mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2026-01-10 10:31:39 +01:00
Compare commits
4 Commits
jb17-b503
...
vkempik/rv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2aac866edd | ||
|
|
0ccc55c4cf | ||
|
|
c4117e06c9 | ||
|
|
2ce9c23da2 |
23
jb/project/docker/Dockerfile.riscv_on_x64
Normal file
23
jb/project/docker/Dockerfile.riscv_on_x64
Normal file
@@ -0,0 +1,23 @@
|
||||
# NOTE: This Dockerfile is meant to be used from the mkdocker_musl_x64.sh script.
|
||||
|
||||
# Pull a concrete version of Linux that does NOT recieve updates after it's
|
||||
# been created. This is so that the image is as stable as possible to make
|
||||
# image creation reproducible.
|
||||
# NB: this also means there may be no security-related fixes there, need to
|
||||
# move the version to the next manually.
|
||||
FROM ubuntu:jammy
|
||||
|
||||
# Install the necessary build tools
|
||||
RUN apt install build-essential autoconf
|
||||
|
||||
# Set up boot JDK for building
|
||||
COPY boot_jdk_amd64.tar.gz /jdk17/
|
||||
RUN cd /jdk17 && tar --strip-components=1 -xzf boot_jdk_musl_amd64.tar.gz && rm /jdk17/boot_jdk_amd64.tar.gz
|
||||
ENV BOOT_JDK=/jdk17
|
||||
|
||||
COPY sdk-x86_64-linux-gnu-to-riscv64-linux-gnu-20220423.tar.gz /devkit/
|
||||
RUN cd /devkit && tar --strip-components=1 -xzf sdk-x86_64-linux-gnu-to-riscv64-linux-gnu-20220423.tar.gz && rm /devkit/sdk-x86_64-linux-gnu-to-riscv64-linux-gnu-20220423.tar.gz
|
||||
ENV DEVKIT_PATH=/devkit
|
||||
|
||||
RUN git config --global user.email "teamcity@jetbrains.com" && \
|
||||
git config --global user.name "builduser"
|
||||
26
jb/project/docker/mkdocker_riscv_cross_x64.sh
Executable file
26
jb/project/docker/mkdocker_riscv_cross_x64.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
# This script creates a Docker image suitable for cross building riscv variant
|
||||
# of the JetBrains Runtime version 17. Host is x86_64
|
||||
|
||||
BOOT_JDK_REMOTE_FILE=zulu17.34.19-ca-jdk17.0.3-linux_x64.tar.gz
|
||||
BOOT_JDK_SHA=caa17c167d045631f9fd85de246bc5313f29cef5ebb1c21524508d3e1196590c
|
||||
BOOT_JDK_LOCAL_FILE=boot_jdk_amd64.tar.gz
|
||||
|
||||
if [ ! -f $BOOT_JDK_LOCAL_FILE ]; then
|
||||
# Obtain "boot JDK" from outside of the container.
|
||||
wget -nc https://cdn.azul.com/zulu/bin/${BOOT_JDK_REMOTE_FILE} -O $BOOT_JDK_LOCAL_FILE
|
||||
else
|
||||
echo "boot JDK \"$BOOT_JDK_LOCAL_FILE\" present, skipping download"
|
||||
fi
|
||||
|
||||
# Verify that what we've downloaded can be trusted.
|
||||
sha256sum -c - <<EOF
|
||||
$BOOT_JDK_SHA *$BOOT_JDK_LOCAL_FILE
|
||||
EOF
|
||||
|
||||
docker build -t jbr17buildenv -f Dockerfile.riscv_on_x64 .
|
||||
|
||||
# NB: the resulting container can (and should) be used without the network
|
||||
# connection (--network none) during build in order to reduce the chance
|
||||
# of build contamination.
|
||||
154
jb/project/tools/linux/scripts/mkimages_riscv_crossx64.sh
Executable file
154
jb/project/tools/linux/scripts/mkimages_riscv_crossx64.sh
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
# The following parameters must be specified:
|
||||
# build_number - specifies the number of JetBrainsRuntime build
|
||||
# bundle_type - specifies bundle to be built;possible values:
|
||||
# <empty> or nomod - the release bundles without any additional modules (jcef)
|
||||
# jcef - the release bundles with jcef
|
||||
# fd - the fastdebug bundles which also include the jcef module
|
||||
#
|
||||
# This script makes test-image along with JDK images when bundle_type is set to "jcef".
|
||||
# If the character 't' is added at the end of bundle_type then it also makes test-image along with JDK images.
|
||||
#
|
||||
# Environment variables:
|
||||
# JDK_BUILD_NUMBER - specifies update release of OpenJDK build or the value of --with-version-build argument
|
||||
# to configure
|
||||
# By default JDK_BUILD_NUMBER is set zero
|
||||
# JCEF_PATH - specifies the path to the directory with JCEF binaries.
|
||||
# By default JCEF binaries should be located in ./jcef_linux_x64
|
||||
|
||||
source jb/project/tools/common/scripts/common.sh
|
||||
|
||||
JCEF_PATH=${JCEF_PATH:=./jcef_linux_x64}
|
||||
|
||||
function do_configure {
|
||||
sh configure \
|
||||
$WITH_DEBUG_LEVEL \
|
||||
--with-vendor-name="$VENDOR_NAME" \
|
||||
--openjdk-target=riscv64-unknown-linux-gnu \
|
||||
--with-devkit="$DEVKIT_PATH"
|
||||
--with-vendor-version-string="$VENDOR_VERSION_STRING" \
|
||||
--with-jvm-features=shenandoahgc \
|
||||
--with-version-pre= \
|
||||
--with-version-build="$JDK_BUILD_NUMBER" \
|
||||
--with-version-opt=b"$build_number" \
|
||||
--with-boot-jdk="$BOOT_JDK" \
|
||||
--with-build-jdk="$BUILD_JDK" \
|
||||
--disable-warnings-as-errors \
|
||||
--enable-cds=yes \
|
||||
$REPRODUCIBLE_BUILD_OPTS \
|
||||
$WITH_ZIPPED_NATIVE_DEBUG_SYMBOLS \
|
||||
|| do_exit $?
|
||||
}
|
||||
|
||||
function is_musl {
|
||||
#old check doesn't work with cross-compiling, so return 0 always
|
||||
return 0
|
||||
}
|
||||
|
||||
function create_image_bundle {
|
||||
__bundle_name=$1
|
||||
__arch_name=$2
|
||||
__modules_path=$3
|
||||
__modules=$4
|
||||
|
||||
libc_type_suffix=''
|
||||
|
||||
if is_musl; then libc_type_suffix='musl-' ; fi
|
||||
|
||||
[ "$bundle_type" == "fd" ] && [ "$__arch_name" == "$JBRSDK_BUNDLE" ] && __bundle_name=$__arch_name && fastdebug_infix="fastdebug-"
|
||||
JBR=${__bundle_name}-${JBSDK_VERSION}-linux-${libc_type_suffix}x64-${fastdebug_infix}b${build_number}
|
||||
|
||||
echo Running jlink....
|
||||
[ -d "$IMAGES_DIR"/"$__arch_name" ] && rm -rf "${IMAGES_DIR:?}"/"$__arch_name"
|
||||
$JSDK/bin/jlink \
|
||||
--module-path "$__modules_path" --no-man-pages --compress=2 \
|
||||
--add-modules "$__modules" --output "$IMAGES_DIR"/"$__arch_name"
|
||||
|
||||
grep -v "^JAVA_VERSION" "$JSDK"/release | grep -v "^MODULES" >> "$IMAGES_DIR"/"$__arch_name"/release
|
||||
if [ "$__arch_name" == "$JBRSDK_BUNDLE" ]; then
|
||||
sed 's/JBR/JBRSDK/g' "$IMAGES_DIR"/"$__arch_name"/release > release
|
||||
mv release "$IMAGES_DIR"/"$__arch_name"/release
|
||||
cp $IMAGES_DIR/jdk/lib/src.zip "$IMAGES_DIR"/"$__arch_name"/lib
|
||||
copy_jmods "$__modules" "$__modules_path" "$IMAGES_DIR"/"$__arch_name"/jmods
|
||||
zip_native_debug_symbols $IMAGES_DIR/jdk "${JBR}_diz"
|
||||
fi
|
||||
|
||||
# jmod does not preserve file permissions (JDK-8173610)
|
||||
[ -f "$IMAGES_DIR"/"$__arch_name"/lib/jcef_helper ] && chmod a+x "$IMAGES_DIR"/"$__arch_name"/lib/jcef_helper
|
||||
|
||||
echo Creating "$JBR".tar.gz ...
|
||||
|
||||
(cd "$IMAGES_DIR" &&
|
||||
find "$__arch_name" -print0 | LC_ALL=C sort -z | \
|
||||
tar $REPRODUCIBLE_TAR_OPTS \
|
||||
--no-recursion --null -T - -cf "$JBR".tar) || do_exit $?
|
||||
mv "$IMAGES_DIR"/"$JBR".tar ./"$JBR".tar
|
||||
[ -f "$JBR".tar.gz ] && rm "$JBR.tar.gz"
|
||||
touch -c -d "@$SOURCE_DATE_EPOCH" "$JBR".tar
|
||||
gzip "$JBR".tar || do_exit $?
|
||||
rm -rf "${IMAGES_DIR:?}"/"$__arch_name"
|
||||
}
|
||||
|
||||
WITH_DEBUG_LEVEL="--with-debug-level=release"
|
||||
RELEASE_NAME=linux-riscv-server-release
|
||||
|
||||
case "$bundle_type" in
|
||||
"jcef")
|
||||
do_reset_changes=1
|
||||
do_maketest=1
|
||||
;;
|
||||
"nomod" | "")
|
||||
bundle_type=""
|
||||
;;
|
||||
"fd")
|
||||
do_reset_changes=1
|
||||
WITH_DEBUG_LEVEL="--with-debug-level=fastdebug"
|
||||
RELEASE_NAME=linux-riscv-server-fastdebug
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$INC_BUILD" ]; then
|
||||
do_configure || do_exit $?
|
||||
make clean CONF=$RELEASE_NAME || do_exit $?
|
||||
fi
|
||||
make images CONF=$RELEASE_NAME || do_exit $?
|
||||
|
||||
IMAGES_DIR=build/$RELEASE_NAME/images
|
||||
JSDK=$IMAGES_DIR/jdk
|
||||
JSDK_MODS_DIR=$IMAGES_DIR/jmods
|
||||
JBRSDK_BUNDLE=jbrsdk
|
||||
|
||||
echo Fixing permissions
|
||||
chmod -R a+r $JSDK
|
||||
|
||||
if [ "$bundle_type" == "jcef" ] || [ "$bundle_type" == "fd" ]; then
|
||||
git apply -p0 < jb/project/tools/patches/add_jcef_module.patch || do_exit $?
|
||||
update_jsdk_mods $JSDK $JCEF_PATH/jmods $JSDK/jmods $JSDK_MODS_DIR || do_exit $?
|
||||
cp $JCEF_PATH/jmods/* $JSDK_MODS_DIR # $JSDK/jmods is not changed
|
||||
|
||||
jbr_name_postfix="_${bundle_type}"
|
||||
fi
|
||||
|
||||
# create runtime image bundle
|
||||
modules=$(xargs < jb/project/tools/common/modules.list | sed s/" "//g) || do_exit $?
|
||||
create_image_bundle "jbr${jbr_name_postfix}" "jbr" $JSDK_MODS_DIR "$modules" || do_exit $?
|
||||
|
||||
# create sdk image bundle
|
||||
modules=$(cat $JSDK/release | grep MODULES | sed s/MODULES=//g | sed s/' '/','/g | sed s/\"//g | sed s/\\n//g) || do_exit $?
|
||||
if [ "$bundle_type" == "jcef" ] || [ "$bundle_type" == "fd" ] || [ "$bundle_type" == "$JBRSDK_BUNDLE" ]; then
|
||||
modules=${modules},$(get_mods_list "$JCEF_PATH"/jmods)
|
||||
fi
|
||||
create_image_bundle "$JBRSDK_BUNDLE${jbr_name_postfix}" $JBRSDK_BUNDLE $JSDK_MODS_DIR "$modules" || do_exit $?
|
||||
|
||||
if [ $do_maketest -eq 1 ]; then
|
||||
JBRSDK_TEST=${JBRSDK_BUNDLE}-${JBSDK_VERSION}-linux-${libc_type_suffix}test-x64-b${build_number}
|
||||
echo Creating "$JBRSDK_TEST" ...
|
||||
[ $do_reset_changes -eq 1 ] && git checkout HEAD jb/project/tools/common/modules.list src/java.desktop/share/classes/module-info.java
|
||||
make test-image CONF=$RELEASE_NAME || do_exit $?
|
||||
tar -pcf "$JBRSDK_TEST".tar -C $IMAGES_DIR --exclude='test/jdk/demos' test || do_exit $?
|
||||
[ -f "$JBRSDK_TEST.tar.gz" ] && rm "$JBRSDK_TEST.tar.gz"
|
||||
gzip "$JBRSDK_TEST".tar || do_exit $?
|
||||
fi
|
||||
|
||||
do_exit 0
|
||||
11
make/autoconf/build-aux/config.guess
vendored
11
make/autoconf/build-aux/config.guess
vendored
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
@@ -111,6 +111,15 @@ if [ "x$OUT" = x ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test and fix RISC-V.
|
||||
if [ "x$OUT" = x ]; then
|
||||
if [ `uname -s` = Linux ]; then
|
||||
if [ `uname -m` = riscv64 ]; then
|
||||
OUT=riscv64-unknown-linux-gnu
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test and fix cpu on macos-aarch64, uname -p reports arm, buildsys expects aarch64
|
||||
echo $OUT | grep arm-apple-darwin > /dev/null 2> /dev/null
|
||||
if test $? != 0; then
|
||||
|
||||
@@ -308,7 +308,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_SHENANDOAHGC],
|
||||
AC_MSG_CHECKING([if platform is supported by Shenandoah])
|
||||
if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xaarch64" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then
|
||||
test "x$OPENJDK_TARGET_CPU" = "xppc64le" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xriscv64"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU])
|
||||
@@ -358,7 +359,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_ZGC],
|
||||
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
|
||||
AVAILABLE=false
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then
|
||||
elif test "x$OPENJDK_TARGET_CPU" = "xppc64le" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xriscv64"; then
|
||||
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -146,6 +146,12 @@ AC_DEFUN_ONCE([LIB_SETUP_LIBRARIES],
|
||||
fi
|
||||
fi
|
||||
|
||||
# Because RISC-V only has word-sized atomics, it requries libatomic where
|
||||
# other common architectures do not. So link libatomic by default.
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$OPENJDK_TARGET_CPU" = xriscv64; then
|
||||
BASIC_JVM_LIBS="$BASIC_JVM_LIBS -latomic"
|
||||
fi
|
||||
|
||||
# perfstat lib
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
BASIC_JVM_LIBS="$BASIC_JVM_LIBS -lperfstat"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -561,6 +561,8 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
|
||||
HOTSPOT_$1_CPU_DEFINE=PPC64
|
||||
elif test "x$OPENJDK_$1_CPU" = xppc64le; then
|
||||
HOTSPOT_$1_CPU_DEFINE=PPC64
|
||||
elif test "x$OPENJDK_$1_CPU" = xriscv64; then
|
||||
HOTSPOT_$1_CPU_DEFINE=RISCV64
|
||||
|
||||
# The cpu defines below are for zero, we don't support them directly.
|
||||
elif test "x$OPENJDK_$1_CPU" = xsparc; then
|
||||
@@ -571,8 +573,6 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
|
||||
HOTSPOT_$1_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_$1_CPU" = xs390x; then
|
||||
HOTSPOT_$1_CPU_DEFINE=S390
|
||||
elif test "x$OPENJDK_$1_CPU" = xriscv64; then
|
||||
HOTSPOT_$1_CPU_DEFINE=RISCV
|
||||
elif test "x$OPENJDK_$1_CPU" != x; then
|
||||
HOTSPOT_$1_CPU_DEFINE=$(echo $OPENJDK_$1_CPU | tr a-z A-Z)
|
||||
fi
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -149,6 +149,13 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
)))
|
||||
endif
|
||||
|
||||
ifeq ($(HOTSPOT_TARGET_CPU_ARCH), riscv)
|
||||
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_v.ad \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_CPU_ARCH)_b.ad \
|
||||
)))
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, shenandoahgc), true)
|
||||
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -1590,7 +1590,9 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
|
||||
LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
|
||||
assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on aarch64");
|
||||
|
||||
Assembler::Condition acond, ncond;
|
||||
switch (condition) {
|
||||
|
||||
@@ -34,14 +34,6 @@
|
||||
return "";
|
||||
}
|
||||
|
||||
// Returns address of n-th instruction preceding addr,
|
||||
// NULL if no preceding instruction can be found.
|
||||
// On ARM(aarch64), we assume a constant instruction length.
|
||||
// It might be beneficial to check "is_readable" as we do on ppc and s390.
|
||||
static address find_prev_instr(address addr, int n_instr) {
|
||||
return addr - Assembler::instruction_size*n_instr;
|
||||
}
|
||||
|
||||
// special-case instruction decoding.
|
||||
// There may be cases where the binutils disassembler doesn't do
|
||||
// the perfect job. In those cases, decode_instruction0 may kick in
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -1417,7 +1417,10 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
|
||||
LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
|
||||
assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on arm");
|
||||
|
||||
AsmCondition acond = al;
|
||||
AsmCondition ncond = nv;
|
||||
if (opr1 != opr2) {
|
||||
|
||||
@@ -33,14 +33,6 @@
|
||||
return "";
|
||||
}
|
||||
|
||||
// Returns address of n-th instruction preceding addr,
|
||||
// NULL if no preceding instruction can be found.
|
||||
// On ARM, we assume a constant instruction length.
|
||||
// It might be beneficial to check "is_readable" as we do on ppc and s390.
|
||||
static address find_prev_instr(address addr, int n_instr) {
|
||||
return addr - Assembler::InstructionSize*n_instr;
|
||||
}
|
||||
|
||||
// special-case instruction decoding.
|
||||
// There may be cases where the binutils disassembler doesn't do
|
||||
// the perfect job. In those cases, decode_instruction0 may kick in
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -1554,8 +1554,10 @@ inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) {
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
|
||||
LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
|
||||
assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on ppc");
|
||||
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
|
||||
if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) {
|
||||
load_to_reg(this, opr1, result); // Condition doesn't matter.
|
||||
return;
|
||||
|
||||
@@ -87,22 +87,6 @@
|
||||
} \
|
||||
}
|
||||
|
||||
address Disassembler::find_prev_instr(address here, int n_instr) {
|
||||
if (!os::is_readable_pointer(here)) return NULL; // obviously a bad location to decode
|
||||
|
||||
// Find most distant possible starting point.
|
||||
// Narrow down because we don't want to SEGV while printing.
|
||||
address start = here - n_instr*Assembler::instr_maxlen(); // starting point can't be further away.
|
||||
while ((start < here) && !os::is_readable_range(start, here)) {
|
||||
start = align_down(start, os::min_page_size()) + os::min_page_size();
|
||||
}
|
||||
if (start >= here) {
|
||||
// Strange. Can only happen with here on page boundary.
|
||||
return NULL;
|
||||
}
|
||||
return start;
|
||||
}
|
||||
|
||||
address Disassembler::decode_instruction0(address here, outputStream * st, address virtual_begin ) {
|
||||
if (is_abstract()) {
|
||||
// The disassembler library was not loaded (yet),
|
||||
|
||||
@@ -34,15 +34,6 @@
|
||||
return "ppc64";
|
||||
}
|
||||
|
||||
// Find preceding instruction.
|
||||
//
|
||||
// Starting at the passed location, the n-th preceding (towards lower addresses)
|
||||
// location is searched, the contents of which - if interpreted as
|
||||
// instructions - has the passed location as n-th successor.
|
||||
// - If no such location exists, NULL is returned. The caller should then
|
||||
// terminate its search and react properly.
|
||||
static address find_prev_instr(address here, int n_instr);
|
||||
|
||||
// special-case instruction decoding.
|
||||
// There may be cases where the binutils disassembler doesn't do
|
||||
// the perfect job. In those cases, decode_instruction0 may kick in
|
||||
|
||||
177
src/hotspot/cpu/riscv/abstractInterpreter_riscv.cpp
Normal file
177
src/hotspot/cpu/riscv/abstractInterpreter_riscv.cpp
Normal file
@@ -0,0 +1,177 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
int i = 0;
|
||||
switch (type) {
|
||||
case T_BOOLEAN: i = 0; break;
|
||||
case T_CHAR : i = 1; break;
|
||||
case T_BYTE : i = 2; break;
|
||||
case T_SHORT : i = 3; break;
|
||||
case T_INT : i = 4; break;
|
||||
case T_LONG : i = 5; break;
|
||||
case T_VOID : i = 6; break;
|
||||
case T_FLOAT : i = 7; break;
|
||||
case T_DOUBLE : i = 8; break;
|
||||
case T_OBJECT : i = 9; break;
|
||||
case T_ARRAY : i = 9; break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
|
||||
"index out of bounds");
|
||||
return i;
|
||||
}
|
||||
|
||||
// How much stack a method activation needs in words.
|
||||
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
const int entry_size = frame::interpreter_frame_monitor_size();
|
||||
|
||||
// total overhead size: entry_size + (saved fp thru expr stack
|
||||
// bottom). be sure to change this if you add/subtract anything
|
||||
// to/from the overhead area
|
||||
const int overhead_size =
|
||||
-(frame::interpreter_frame_initial_sp_offset) + entry_size;
|
||||
|
||||
const int stub_code = frame::entry_frame_after_call_words;
|
||||
assert_cond(method != NULL);
|
||||
const int method_stack = (method->max_locals() + method->max_stack()) *
|
||||
Interpreter::stackElementWords;
|
||||
return (overhead_size + method_stack + stub_code);
|
||||
}
|
||||
|
||||
// asm based interpreter deoptimization helpers
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int temps,
|
||||
int extra_args,
|
||||
int monitors,
|
||||
int callee_params,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in TemplateInterpreterGenerator::generate_method_entry.
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int overhead = frame::sender_sp_offset -
|
||||
frame::interpreter_frame_initial_sp_offset;
|
||||
// Our locals were accounted for by the caller (or last_frame_adjust
|
||||
// on the transistion) Since the callee parameters already account
|
||||
// for the callee's params we only need to account for the extra
|
||||
// locals.
|
||||
int size = overhead +
|
||||
(callee_locals - callee_params) +
|
||||
monitors * frame::interpreter_frame_monitor_size() +
|
||||
// On the top frame, at all times SP <= ESP, and SP is
|
||||
// 16-aligned. We ensure this by adjusting SP on method
|
||||
// entry and re-entry to allow room for the maximum size of
|
||||
// the expression stack. When we call another method we bump
|
||||
// SP so that no stack space is wasted. So, only on the top
|
||||
// frame do we need to allow max_stack words.
|
||||
(is_top_frame ? max_stack : temps + extra_args);
|
||||
|
||||
// On riscv we always keep the stack pointer 16-aligned, so we
|
||||
// must round up here.
|
||||
size = align_up(size, 2);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
// The frame interpreter_frame is guaranteed to be the right size,
|
||||
// as determined by a previous call to the size_activation() method.
|
||||
// It is also guaranteed to be walkable even though it is in a
|
||||
// skeletal state
|
||||
assert_cond(method != NULL && caller != NULL && interpreter_frame != NULL);
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords;
|
||||
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords;
|
||||
|
||||
#ifdef ASSERT
|
||||
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
// NOTE the difference in using sender_sp and interpreter_frame_sender_sp
|
||||
// interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
|
||||
// and sender_sp is fp
|
||||
intptr_t* locals = NULL;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
locals = caller->interpreter_frame_last_sp() + caller_actual_parameters - 1;
|
||||
} else {
|
||||
locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (caller->is_interpreted_frame()) {
|
||||
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
|
||||
}
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_locals(locals);
|
||||
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
|
||||
BasicObjectLock* monbot = montop - moncount;
|
||||
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
|
||||
|
||||
// Set last_sp
|
||||
intptr_t* last_sp = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
interpreter_frame->interpreter_frame_set_last_sp(last_sp);
|
||||
|
||||
// All frames but the initial (oldest) interpreter frame we fill in have
|
||||
// a value for sender_sp that allows walking the stack but isn't
|
||||
// truly correct. Correct the value here.
|
||||
if (extra_locals != 0 &&
|
||||
interpreter_frame->sender_sp() ==
|
||||
interpreter_frame->interpreter_frame_sender_sp()) {
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
|
||||
extra_locals);
|
||||
}
|
||||
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
*interpreter_frame->interpreter_frame_mirror_addr() =
|
||||
method->method_holder()->java_mirror();
|
||||
}
|
||||
372
src/hotspot/cpu/riscv/assembler_riscv.cpp
Normal file
372
src/hotspot/cpu/riscv/assembler_riscv.cpp
Normal file
@@ -0,0 +1,372 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
int AbstractAssembler::code_fill_byte() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Assembler::add(Register Rd, Register Rn, int64_t increment, Register temp) {
|
||||
if (is_imm_in_range(increment, 12, 0)) {
|
||||
addi(Rd, Rn, increment);
|
||||
} else {
|
||||
assert_different_registers(Rn, temp);
|
||||
li(temp, increment);
|
||||
add(Rd, Rn, temp);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::addw(Register Rd, Register Rn, int64_t increment, Register temp) {
|
||||
if (is_imm_in_range(increment, 12, 0)) {
|
||||
addiw(Rd, Rn, increment);
|
||||
} else {
|
||||
assert_different_registers(Rn, temp);
|
||||
li(temp, increment);
|
||||
addw(Rd, Rn, temp);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::sub(Register Rd, Register Rn, int64_t decrement, Register temp) {
|
||||
if (is_imm_in_range(-decrement, 12, 0)) {
|
||||
addi(Rd, Rn, -decrement);
|
||||
} else {
|
||||
assert_different_registers(Rn, temp);
|
||||
li(temp, decrement);
|
||||
sub(Rd, Rn, temp);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::subw(Register Rd, Register Rn, int64_t decrement, Register temp) {
|
||||
if (is_imm_in_range(-decrement, 12, 0)) {
|
||||
addiw(Rd, Rn, -decrement);
|
||||
} else {
|
||||
assert_different_registers(Rn, temp);
|
||||
li(temp, decrement);
|
||||
subw(Rd, Rn, temp);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::zext_w(Register Rd, Register Rs) {
|
||||
add_uw(Rd, Rs, zr);
|
||||
}
|
||||
|
||||
void Assembler::_li(Register Rd, int64_t imm) {
|
||||
// int64_t is in range 0x8000 0000 0000 0000 ~ 0x7fff ffff ffff ffff
|
||||
int shift = 12;
|
||||
int64_t upper = imm, lower = imm;
|
||||
// Split imm to a lower 12-bit sign-extended part and the remainder,
|
||||
// because addi will sign-extend the lower imm.
|
||||
lower = ((int32_t)imm << 20) >> 20;
|
||||
upper -= lower;
|
||||
|
||||
// Test whether imm is a 32-bit integer.
|
||||
if (!(((imm) & ~(int64_t)0x7fffffff) == 0 ||
|
||||
(((imm) & ~(int64_t)0x7fffffff) == ~(int64_t)0x7fffffff))) {
|
||||
while (((upper >> shift) & 1) == 0) { shift++; }
|
||||
upper >>= shift;
|
||||
li(Rd, upper);
|
||||
slli(Rd, Rd, shift);
|
||||
if (lower != 0) {
|
||||
addi(Rd, Rd, lower);
|
||||
}
|
||||
} else {
|
||||
// 32-bit integer
|
||||
Register hi_Rd = zr;
|
||||
if (upper != 0) {
|
||||
lui(Rd, (int32_t)upper);
|
||||
hi_Rd = Rd;
|
||||
}
|
||||
if (lower != 0 || hi_Rd == zr) {
|
||||
addiw(Rd, hi_Rd, lower);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::li64(Register Rd, int64_t imm) {
|
||||
// Load upper 32 bits. upper = imm[63:32], but if imm[31] == 1 or
|
||||
// (imm[31:28] == 0x7ff && imm[19] == 1), upper = imm[63:32] + 1.
|
||||
int64_t lower = imm & 0xffffffff;
|
||||
lower -= ((lower << 44) >> 44);
|
||||
int64_t tmp_imm = ((uint64_t)(imm & 0xffffffff00000000)) + (uint64_t)lower;
|
||||
int32_t upper = (tmp_imm - (int32_t)lower) >> 32;
|
||||
|
||||
// Load upper 32 bits
|
||||
int64_t up = upper, lo = upper;
|
||||
lo = (lo << 52) >> 52;
|
||||
up -= lo;
|
||||
up = (int32_t)up;
|
||||
lui(Rd, up);
|
||||
addi(Rd, Rd, lo);
|
||||
|
||||
// Load the rest 32 bits.
|
||||
slli(Rd, Rd, 12);
|
||||
addi(Rd, Rd, (int32_t)lower >> 20);
|
||||
slli(Rd, Rd, 12);
|
||||
lower = ((int32_t)imm << 12) >> 20;
|
||||
addi(Rd, Rd, lower);
|
||||
slli(Rd, Rd, 8);
|
||||
lower = imm & 0xff;
|
||||
addi(Rd, Rd, lower);
|
||||
}
|
||||
|
||||
void Assembler::li32(Register Rd, int32_t imm) {
|
||||
// int32_t is in range 0x8000 0000 ~ 0x7fff ffff, and imm[31] is the sign bit
|
||||
int64_t upper = imm, lower = imm;
|
||||
lower = (imm << 20) >> 20;
|
||||
upper -= lower;
|
||||
upper = (int32_t)upper;
|
||||
// lui Rd, imm[31:12] + imm[11]
|
||||
lui(Rd, upper);
|
||||
// use addiw to distinguish li32 to li64
|
||||
addiw(Rd, Rd, lower);
|
||||
}
|
||||
|
||||
#define INSN(NAME, REGISTER) \
|
||||
void Assembler::NAME(const address &dest, Register temp) { \
|
||||
assert_cond(dest != NULL); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_imm_in_range(distance, 20, 1)) { \
|
||||
jal(REGISTER, distance); \
|
||||
} else { \
|
||||
assert(temp != noreg, "temp must not be empty register!"); \
|
||||
int32_t offset = 0; \
|
||||
movptr_with_offset(temp, dest, offset); \
|
||||
jalr(REGISTER, temp, offset); \
|
||||
} \
|
||||
} \
|
||||
void Assembler::NAME(Label &l, Register temp) { \
|
||||
jal(REGISTER, l, temp); \
|
||||
} \
|
||||
|
||||
INSN(j, x0);
|
||||
INSN(jal, x1);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, REGISTER) \
|
||||
void Assembler::NAME(Register Rs) { \
|
||||
jalr(REGISTER, Rs, 0); \
|
||||
}
|
||||
|
||||
INSN(jr, x0);
|
||||
INSN(jalr, x1);
|
||||
|
||||
#undef INSN
|
||||
|
||||
void Assembler::ret() {
|
||||
jalr(x0, x1, 0);
|
||||
}
|
||||
|
||||
#define INSN(NAME, REGISTER) \
|
||||
void Assembler::NAME(const address &dest, Register temp) { \
|
||||
assert_cond(dest != NULL); \
|
||||
assert(temp != noreg, "temp must not be empty register!"); \
|
||||
int64_t distance = dest - pc(); \
|
||||
if (is_offset_in_range(distance, 32)) { \
|
||||
auipc(temp, distance + 0x800); \
|
||||
jalr(REGISTER, temp, ((int32_t)distance << 20) >> 20); \
|
||||
} else { \
|
||||
int32_t offset = 0; \
|
||||
movptr_with_offset(temp, dest, offset); \
|
||||
jalr(REGISTER, temp, offset); \
|
||||
} \
|
||||
}
|
||||
|
||||
INSN(call, x1);
|
||||
INSN(tail, x0);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, REGISTER) \
|
||||
void Assembler::NAME(const Address &adr, Register temp) { \
|
||||
switch (adr.getMode()) { \
|
||||
case Address::literal: { \
|
||||
code_section()->relocate(pc(), adr.rspec()); \
|
||||
NAME(adr.target(), temp); \
|
||||
break; \
|
||||
} \
|
||||
case Address::base_plus_offset: { \
|
||||
int32_t offset = 0; \
|
||||
baseOffset(temp, adr, offset); \
|
||||
jalr(REGISTER, temp, offset); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
ShouldNotReachHere(); \
|
||||
} \
|
||||
}
|
||||
|
||||
INSN(j, x0);
|
||||
INSN(jal, x1);
|
||||
INSN(call, x1);
|
||||
INSN(tail, x0);
|
||||
|
||||
#undef INSN
|
||||
|
||||
void Assembler::wrap_label(Register r1, Register r2, Label &L, compare_and_branch_insn insn,
|
||||
compare_and_branch_label_insn neg_insn, bool is_far) {
|
||||
if (is_far) {
|
||||
Label done;
|
||||
(this->*neg_insn)(r1, r2, done, /* is_far */ false);
|
||||
j(L);
|
||||
bind(done);
|
||||
} else {
|
||||
if (L.is_bound()) {
|
||||
(this->*insn)(r1, r2, target(L));
|
||||
} else {
|
||||
L.add_patch_at(code(), locator());
|
||||
(this->*insn)(r1, r2, pc());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::wrap_label(Register Rt, Label &L, Register tmp, load_insn_by_temp insn) {
|
||||
if (L.is_bound()) {
|
||||
(this->*insn)(Rt, target(L), tmp);
|
||||
} else {
|
||||
L.add_patch_at(code(), locator());
|
||||
(this->*insn)(Rt, pc(), tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::wrap_label(Register Rt, Label &L, jal_jalr_insn insn) {
|
||||
if (L.is_bound()) {
|
||||
(this->*insn)(Rt, target(L));
|
||||
} else {
|
||||
L.add_patch_at(code(), locator());
|
||||
(this->*insn)(Rt, pc());
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::movptr_with_offset(Register Rd, address addr, int32_t &offset) {
|
||||
uintptr_t imm64 = (uintptr_t)addr;
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char buffer[64];
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIx64, imm64);
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
assert(is_unsigned_imm_in_range(imm64, 47, 0) || (imm64 == (uintptr_t)-1),
|
||||
"48-bit overflow in address constant");
|
||||
// Load upper 32 bits
|
||||
int32_t imm = imm64 >> 16;
|
||||
int64_t upper = imm, lower = imm;
|
||||
lower = (lower << 52) >> 52;
|
||||
upper -= lower;
|
||||
upper = (int32_t)upper;
|
||||
lui(Rd, upper);
|
||||
addi(Rd, Rd, lower);
|
||||
|
||||
// Load the rest 16 bits.
|
||||
slli(Rd, Rd, 11);
|
||||
addi(Rd, Rd, (imm64 >> 5) & 0x7ff);
|
||||
slli(Rd, Rd, 5);
|
||||
|
||||
// This offset will be used by following jalr/ld.
|
||||
offset = imm64 & 0x1f;
|
||||
}
|
||||
|
||||
void Assembler::movptr(Register Rd, uintptr_t imm64) {
|
||||
movptr(Rd, (address)imm64);
|
||||
}
|
||||
|
||||
void Assembler::movptr(Register Rd, address addr) {
|
||||
int offset = 0;
|
||||
movptr_with_offset(Rd, addr, offset);
|
||||
addi(Rd, Rd, offset);
|
||||
}
|
||||
|
||||
void Assembler::ifence() {
|
||||
fence_i();
|
||||
if (UseConservativeFence) {
|
||||
fence(ir, ir);
|
||||
}
|
||||
}
|
||||
|
||||
#define INSN(NAME, NEG_INSN) \
|
||||
void Assembler::NAME(Register Rs, Register Rt, const address &dest) { \
|
||||
NEG_INSN(Rt, Rs, dest); \
|
||||
} \
|
||||
void Assembler::NAME(Register Rs, Register Rt, Label &l, bool is_far) { \
|
||||
NEG_INSN(Rt, Rs, l, is_far); \
|
||||
}
|
||||
|
||||
INSN(bgt, blt);
|
||||
INSN(ble, bge);
|
||||
INSN(bgtu, bltu);
|
||||
INSN(bleu, bgeu);
|
||||
#undef INSN
|
||||
|
||||
#undef __
|
||||
|
||||
Address::Address(address target, relocInfo::relocType rtype) : _base(noreg), _offset(0), _mode(literal) {
|
||||
_target = target;
|
||||
switch (rtype) {
|
||||
case relocInfo::oop_type:
|
||||
case relocInfo::metadata_type:
|
||||
// Oops are a special case. Normally they would be their own section
|
||||
// but in cases like icBuffer they are literals in the code stream that
|
||||
// we don't have a section for. We use none so that we get a literal address
|
||||
// which is always patchable.
|
||||
break;
|
||||
case relocInfo::external_word_type:
|
||||
_rspec = external_word_Relocation::spec(target);
|
||||
break;
|
||||
case relocInfo::internal_word_type:
|
||||
_rspec = internal_word_Relocation::spec(target);
|
||||
break;
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
_rspec = opt_virtual_call_Relocation::spec();
|
||||
break;
|
||||
case relocInfo::static_call_type:
|
||||
_rspec = static_call_Relocation::spec();
|
||||
break;
|
||||
case relocInfo::runtime_call_type:
|
||||
_rspec = runtime_call_Relocation::spec();
|
||||
break;
|
||||
case relocInfo::poll_type:
|
||||
case relocInfo::poll_return_type:
|
||||
_rspec = Relocation::spec_simple(rtype);
|
||||
break;
|
||||
case relocInfo::none:
|
||||
_rspec = RelocationHolder::none;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
3050
src/hotspot/cpu/riscv/assembler_riscv.hpp
Normal file
3050
src/hotspot/cpu/riscv/assembler_riscv.hpp
Normal file
File diff suppressed because it is too large
Load Diff
47
src/hotspot/cpu/riscv/assembler_riscv.inline.hpp
Normal file
47
src/hotspot/cpu/riscv/assembler_riscv.inline.hpp
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_ASSEMBLER_RISCV_INLINE_HPP
|
||||
#define CPU_RISCV_ASSEMBLER_RISCV_INLINE_HPP
|
||||
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
|
||||
inline bool is_imm_in_range(long value, unsigned bits, unsigned align_bits) {
|
||||
intx sign_bits = (value >> (bits + align_bits - 1));
|
||||
return ((value & right_n_bits(align_bits)) == 0) && ((sign_bits == 0) || (sign_bits == -1));
|
||||
}
|
||||
|
||||
inline bool is_unsigned_imm_in_range(intx value, unsigned bits, unsigned align_bits) {
|
||||
return (value >= 0) && ((value & right_n_bits(align_bits)) == 0) && ((value >> (align_bits + bits)) == 0);
|
||||
}
|
||||
|
||||
inline bool is_offset_in_range(intx offset, unsigned bits) {
|
||||
return is_imm_in_range(offset, bits, 0);
|
||||
}
|
||||
|
||||
#endif // CPU_RISCV_ASSEMBLER_RISCV_INLINE_HPP
|
||||
167
src/hotspot/cpu/riscv/bytes_riscv.hpp
Normal file
167
src/hotspot/cpu/riscv/bytes_riscv.hpp
Normal file
@@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_BYTES_RISCV_HPP
|
||||
#define CPU_RISCV_BYTES_RISCV_HPP
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
|
||||
class Bytes: AllStatic {
|
||||
public:
|
||||
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
|
||||
// RISCV needs to check for alignment.
|
||||
|
||||
// Forward declarations of the compiler-dependent implementation
|
||||
static inline u2 swap_u2(u2 x);
|
||||
static inline u4 swap_u4(u4 x);
|
||||
static inline u8 swap_u8(u8 x);
|
||||
|
||||
static inline u2 get_native_u2(address p) {
|
||||
if ((intptr_t(p) & 1) == 0) {
|
||||
return *(u2*)p;
|
||||
} else {
|
||||
return ((u2)(p[1]) << 8) |
|
||||
((u2)(p[0]));
|
||||
}
|
||||
}
|
||||
|
||||
static inline u4 get_native_u4(address p) {
|
||||
switch (intptr_t(p) & 3) {
|
||||
case 0:
|
||||
return *(u4*)p;
|
||||
|
||||
case 2:
|
||||
return ((u4)(((u2*)p)[1]) << 16) |
|
||||
((u4)(((u2*)p)[0]));
|
||||
|
||||
default:
|
||||
return ((u4)(p[3]) << 24) |
|
||||
((u4)(p[2]) << 16) |
|
||||
((u4)(p[1]) << 8) |
|
||||
((u4)(p[0]));
|
||||
}
|
||||
}
|
||||
|
||||
static inline u8 get_native_u8(address p) {
|
||||
switch (intptr_t(p) & 7) {
|
||||
case 0:
|
||||
return *(u8*)p;
|
||||
|
||||
case 4:
|
||||
return ((u8)(((u4*)p)[1]) << 32) |
|
||||
((u8)(((u4*)p)[0]));
|
||||
|
||||
case 2:
|
||||
return ((u8)(((u2*)p)[3]) << 48) |
|
||||
((u8)(((u2*)p)[2]) << 32) |
|
||||
((u8)(((u2*)p)[1]) << 16) |
|
||||
((u8)(((u2*)p)[0]));
|
||||
|
||||
default:
|
||||
return ((u8)(p[7]) << 56) |
|
||||
((u8)(p[6]) << 48) |
|
||||
((u8)(p[5]) << 40) |
|
||||
((u8)(p[4]) << 32) |
|
||||
((u8)(p[3]) << 24) |
|
||||
((u8)(p[2]) << 16) |
|
||||
((u8)(p[1]) << 8) |
|
||||
((u8)(p[0]));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u2(address p, u2 x) {
|
||||
if ((intptr_t(p) & 1) == 0) {
|
||||
*(u2*)p = x;
|
||||
} else {
|
||||
p[1] = x >> 8;
|
||||
p[0] = x;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u4(address p, u4 x) {
|
||||
switch (intptr_t(p) & 3) {
|
||||
case 0:
|
||||
*(u4*)p = x;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
((u2*)p)[1] = x >> 16;
|
||||
((u2*)p)[0] = x;
|
||||
break;
|
||||
|
||||
default:
|
||||
((u1*)p)[3] = x >> 24;
|
||||
((u1*)p)[2] = x >> 16;
|
||||
((u1*)p)[1] = x >> 8;
|
||||
((u1*)p)[0] = x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u8(address p, u8 x) {
|
||||
switch (intptr_t(p) & 7) {
|
||||
case 0:
|
||||
*(u8*)p = x;
|
||||
break;
|
||||
|
||||
case 4:
|
||||
((u4*)p)[1] = x >> 32;
|
||||
((u4*)p)[0] = x;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
((u2*)p)[3] = x >> 48;
|
||||
((u2*)p)[2] = x >> 32;
|
||||
((u2*)p)[1] = x >> 16;
|
||||
((u2*)p)[0] = x;
|
||||
break;
|
||||
|
||||
default:
|
||||
((u1*)p)[7] = x >> 56;
|
||||
((u1*)p)[6] = x >> 48;
|
||||
((u1*)p)[5] = x >> 40;
|
||||
((u1*)p)[4] = x >> 32;
|
||||
((u1*)p)[3] = x >> 24;
|
||||
((u1*)p)[2] = x >> 16;
|
||||
((u1*)p)[1] = x >> 8;
|
||||
((u1*)p)[0] = x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
|
||||
static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
|
||||
static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
|
||||
static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
|
||||
|
||||
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); }
|
||||
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); }
|
||||
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); }
|
||||
};
|
||||
|
||||
#include OS_CPU_HEADER(bytes)
|
||||
|
||||
#endif // CPU_RISCV_BYTES_RISCV_HPP
|
||||
353
src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
Normal file
353
src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
Normal file
@@ -0,0 +1,353 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "c1/c1_CodeStubs.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "nativeInst_riscv.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "vmreg_riscv.inline.hpp"
|
||||
|
||||
|
||||
#define __ ce->masm()->
|
||||
|
||||
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
|
||||
__ code_section()->relocate(__ pc(), safepoint_pc.rspec());
|
||||
__ la(t0, safepoint_pc.target());
|
||||
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
|
||||
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||
"polling page return stub not created yet");
|
||||
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||
|
||||
__ far_jump(RuntimeAddress(stub));
|
||||
}
|
||||
|
||||
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
Metadata *m = _method->as_constant_ptr()->as_metadata();
|
||||
__ mov_metadata(t0, m);
|
||||
ce->store_parameter(t0, 1);
|
||||
ce->store_parameter(_bci, 0);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ j(_continuation);
|
||||
}
|
||||
|
||||
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
|
||||
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
|
||||
assert(info != NULL, "must have info");
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
|
||||
: _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
|
||||
assert(info != NULL, "must have info");
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
__ far_call(RuntimeAddress(a));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
return;
|
||||
}
|
||||
|
||||
if (_index->is_cpu_register()) {
|
||||
__ mv(t0, _index->as_register());
|
||||
} else {
|
||||
__ mv(t0, _index->as_jint());
|
||||
}
|
||||
Runtime1::StubID stub_id;
|
||||
if (_throw_index_out_of_bounds_exception) {
|
||||
stub_id = Runtime1::throw_index_exception_id;
|
||||
} else {
|
||||
assert(_array != NULL, "sanity");
|
||||
__ mv(t1, _array->as_pointer_register());
|
||||
stub_id = Runtime1::throw_range_check_failed_id;
|
||||
}
|
||||
int32_t off = 0;
|
||||
__ la_patchable(ra, RuntimeAddress(Runtime1::entry_for(stub_id)), off);
|
||||
__ jalr(ra, ra, off);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
}
|
||||
|
||||
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
__ far_call(RuntimeAddress(a));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
}
|
||||
|
||||
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
|
||||
if (_offset != -1) {
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
}
|
||||
__ bind(_entry);
|
||||
__ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
#ifdef ASSERT
|
||||
__ should_not_reach_here();
|
||||
#endif
|
||||
}
|
||||
|
||||
// Implementation of NewInstanceStub
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
|
||||
_result = result;
|
||||
_klass = klass;
|
||||
_klass_reg = klass_reg;
|
||||
_info = new CodeEmitInfo(info);
|
||||
assert(stub_id == Runtime1::new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_init_check_id,
|
||||
"need new_instance id");
|
||||
_stub_id = stub_id;
|
||||
}
|
||||
|
||||
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(__ rsp_offset() == 0, "frame size should be fixed");
|
||||
__ bind(_entry);
|
||||
__ mv(x13, _klass_reg->as_register());
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
assert(_result->as_register() == x10, "result must in x10");
|
||||
__ j(_continuation);
|
||||
}
|
||||
|
||||
// Implementation of NewTypeArrayStub
|
||||
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
|
||||
_klass_reg = klass_reg;
|
||||
_length = length;
|
||||
_result = result;
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(__ rsp_offset() == 0, "frame size should be fixed");
|
||||
__ bind(_entry);
|
||||
assert(_length->as_register() == x9, "length must in x9");
|
||||
assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
assert(_result->as_register() == x10, "result must in x10");
|
||||
__ j(_continuation);
|
||||
}
|
||||
|
||||
// Implementation of NewObjectArrayStub
|
||||
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
|
||||
_klass_reg = klass_reg;
|
||||
_result = result;
|
||||
_length = length;
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(__ rsp_offset() == 0, "frame size should be fixed");
|
||||
__ bind(_entry);
|
||||
assert(_length->as_register() == x9, "length must in x9");
|
||||
assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
assert(_result->as_register() == x10, "result must in x10");
|
||||
__ j(_continuation);
|
||||
}
|
||||
|
||||
// Implementation of MonitorAccessStubs
|
||||
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
|
||||
: MonitorAccessStub(obj_reg, lock_reg) {
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(__ rsp_offset() == 0, "frame size should be fixed");
|
||||
__ bind(_entry);
|
||||
ce->store_parameter(_obj_reg->as_register(), 1);
|
||||
ce->store_parameter(_lock_reg->as_register(), 0);
|
||||
Runtime1::StubID enter_id;
|
||||
if (ce->compilation()->has_fpu_code()) {
|
||||
enter_id = Runtime1::monitorenter_id;
|
||||
} else {
|
||||
enter_id = Runtime1::monitorenter_nofpu_id;
|
||||
}
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ j(_continuation);
|
||||
}
|
||||
|
||||
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
if (_compute_lock) {
|
||||
// lock_reg was destroyed by fast unlocking attempt => recompute it
|
||||
ce->monitor_address(_monitor_ix, _lock_reg);
|
||||
}
|
||||
ce->store_parameter(_lock_reg->as_register(), 0);
|
||||
// note: non-blocking leaf routine => no call info needed
|
||||
Runtime1::StubID exit_id;
|
||||
if (ce->compilation()->has_fpu_code()) {
|
||||
exit_id = Runtime1::monitorexit_id;
|
||||
} else {
|
||||
exit_id = Runtime1::monitorexit_nofpu_id;
|
||||
}
|
||||
__ la(ra, _continuation);
|
||||
__ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
|
||||
}
|
||||
|
||||
// Implementation of patching:
|
||||
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
|
||||
// - Replace original code with a call to the stub
|
||||
// At Runtime:
|
||||
// - call to stub, jump to runtime
|
||||
// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
|
||||
// - in runtime: after initializing class, restore original code, reexecute instruction
|
||||
|
||||
int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
|
||||
|
||||
void PatchingStub::align_patch_site(MacroAssembler* masm) {}
|
||||
|
||||
void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(false, "RISCV should not use C1 runtime patching");
|
||||
}
|
||||
|
||||
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
ce->store_parameter(_trap_request, 0);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
|
||||
ce->add_call_info_here(_info);
|
||||
DEBUG_ONLY(__ should_not_reach_here());
|
||||
}
|
||||
|
||||
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
address a = NULL;
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
|
||||
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
} else {
|
||||
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
|
||||
}
|
||||
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
__ bind(_entry);
|
||||
__ far_call(RuntimeAddress(a));
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
}
|
||||
|
||||
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(__ rsp_offset() == 0, "frame size should be fixed");
|
||||
|
||||
__ bind(_entry);
|
||||
// pass the object in a tmp register because all other registers
|
||||
// must be preserved
|
||||
if (_obj->is_cpu_register()) {
|
||||
__ mv(t0, _obj->as_register());
|
||||
}
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, t1);
|
||||
ce->add_call_info_here(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
}
|
||||
|
||||
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
|
||||
// ---------------slow case: call to native-----------------
|
||||
__ bind(_entry);
|
||||
// Figure out where the args should go
|
||||
// This should really convert the IntrinsicID to the Method* and signature
|
||||
// but I don't know how to do that.
|
||||
const int args_num = 5;
|
||||
VMRegPair args[args_num];
|
||||
BasicType signature[args_num] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
|
||||
SharedRuntime::java_calling_convention(signature, args, args_num);
|
||||
|
||||
// push parameters
|
||||
Register r[args_num];
|
||||
r[0] = src()->as_register();
|
||||
r[1] = src_pos()->as_register();
|
||||
r[2] = dst()->as_register();
|
||||
r[3] = dst_pos()->as_register();
|
||||
r[4] = length()->as_register();
|
||||
|
||||
// next registers will get stored on the stack
|
||||
for (int j = 0; j < args_num; j++) {
|
||||
VMReg r_1 = args[j].first();
|
||||
if (r_1->is_stack()) {
|
||||
int st_off = r_1->reg2stack() * wordSize;
|
||||
__ sd(r[j], Address(sp, st_off));
|
||||
} else {
|
||||
assert(r[j] == args[j].first()->as_Register(), "Wrong register for arg");
|
||||
}
|
||||
}
|
||||
|
||||
ce->align_call(lir_static_call);
|
||||
|
||||
ce->emit_static_call_stub();
|
||||
if (ce->compilation()->bailed_out()) {
|
||||
return; // CodeCache is full
|
||||
}
|
||||
Address resolve(SharedRuntime::get_resolve_static_call_stub(),
|
||||
relocInfo::static_call_type);
|
||||
address call = __ trampoline_call(resolve);
|
||||
if (call == NULL) {
|
||||
ce->bailout("trampoline stub overflow");
|
||||
return;
|
||||
}
|
||||
ce->add_call_info_here(info());
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
__ la(t1, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
|
||||
__ add_memory_int32(Address(t1), 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ j(_continuation);
|
||||
}
|
||||
|
||||
#undef __
|
||||
84
src/hotspot/cpu/riscv/c1_Defs_riscv.hpp
Normal file
84
src/hotspot/cpu/riscv/c1_Defs_riscv.hpp
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_DEFS_RISCV_HPP
|
||||
#define CPU_RISCV_C1_DEFS_RISCV_HPP
|
||||
|
||||
// native word offsets from memory address (little endian)
|
||||
enum {
|
||||
pd_lo_word_offset_in_bytes = 0,
|
||||
pd_hi_word_offset_in_bytes = BytesPerWord
|
||||
};
|
||||
|
||||
// explicit rounding operations are required to implement the strictFP mode
|
||||
enum {
|
||||
pd_strict_fp_requires_explicit_rounding = false
|
||||
};
|
||||
|
||||
// registers
|
||||
enum {
|
||||
pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
|
||||
pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of float registers used during code emission
|
||||
|
||||
// caller saved
|
||||
pd_nof_caller_save_cpu_regs_frame_map = 13, // number of registers killed by calls
|
||||
pd_nof_caller_save_fpu_regs_frame_map = 32, // number of float registers killed by calls
|
||||
|
||||
pd_first_callee_saved_reg = pd_nof_caller_save_cpu_regs_frame_map,
|
||||
pd_last_callee_saved_reg = 21,
|
||||
|
||||
pd_last_allocatable_cpu_reg = pd_nof_caller_save_cpu_regs_frame_map - 1,
|
||||
|
||||
pd_nof_cpu_regs_reg_alloc
|
||||
= pd_nof_caller_save_cpu_regs_frame_map, // number of registers that are visible to register allocator
|
||||
pd_nof_fpu_regs_reg_alloc = 32, // number of float registers that are visible to register allocator
|
||||
|
||||
pd_nof_cpu_regs_linearscan = 32, // number of registers visible to linear scan
|
||||
pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of float registers visible to linear scan
|
||||
pd_nof_xmm_regs_linearscan = 0, // don't have vector registers
|
||||
|
||||
pd_first_cpu_reg = 0,
|
||||
pd_last_cpu_reg = pd_nof_cpu_regs_reg_alloc - 1,
|
||||
pd_first_byte_reg = 0,
|
||||
pd_last_byte_reg = pd_nof_cpu_regs_reg_alloc - 1,
|
||||
|
||||
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
|
||||
pd_last_fpu_reg = pd_first_fpu_reg + 31,
|
||||
|
||||
pd_first_callee_saved_fpu_reg_1 = 8 + pd_first_fpu_reg,
|
||||
pd_last_callee_saved_fpu_reg_1 = 9 + pd_first_fpu_reg,
|
||||
pd_first_callee_saved_fpu_reg_2 = 18 + pd_first_fpu_reg,
|
||||
pd_last_callee_saved_fpu_reg_2 = 27 + pd_first_fpu_reg
|
||||
};
|
||||
|
||||
|
||||
// Encoding of float value in debug info. This is true on x86 where
|
||||
// floats are extended to doubles when stored in the stack, false for
|
||||
// RISCV where floats and doubles are stored in their native form.
|
||||
enum {
|
||||
pd_float_saved_as_double = false
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_C1_DEFS_RISCV_HPP
|
||||
30
src/hotspot/cpu/riscv/c1_FpuStackSim_riscv.cpp
Normal file
30
src/hotspot/cpu/riscv/c1_FpuStackSim_riscv.cpp
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//--------------------------------------------------------
|
||||
// FpuStackSim
|
||||
//--------------------------------------------------------
|
||||
|
||||
// No FPU stack on RISCV
|
||||
32
src/hotspot/cpu/riscv/c1_FpuStackSim_riscv.hpp
Normal file
32
src/hotspot/cpu/riscv/c1_FpuStackSim_riscv.hpp
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_FPUSTACKSIM_RISCV_HPP
|
||||
#define CPU_RISCV_C1_FPUSTACKSIM_RISCV_HPP
|
||||
|
||||
// No FPU stack on RISCV
|
||||
class FpuStackSim;
|
||||
|
||||
#endif // CPU_RISCV_C1_FPUSTACKSIM_RISCV_HPP
|
||||
388
src/hotspot/cpu/riscv/c1_FrameMap_riscv.cpp
Normal file
388
src/hotspot/cpu/riscv/c1_FrameMap_riscv.cpp
Normal file
@@ -0,0 +1,388 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "vmreg_riscv.inline.hpp"
|
||||
|
||||
LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
|
||||
LIR_Opr opr = LIR_OprFact::illegalOpr;
|
||||
VMReg r_1 = reg->first();
|
||||
VMReg r_2 = reg->second();
|
||||
if (r_1->is_stack()) {
|
||||
// Convert stack slot to an SP offset
|
||||
// The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value
|
||||
// so we must add it in here.
|
||||
int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
|
||||
opr = LIR_OprFact::address(new LIR_Address(sp_opr, st_off, type));
|
||||
} else if (r_1->is_Register()) {
|
||||
Register reg1 = r_1->as_Register();
|
||||
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
|
||||
Register reg2 = r_2->as_Register();
|
||||
assert(reg2 == reg1, "must be same register");
|
||||
opr = as_long_opr(reg1);
|
||||
} else if (is_reference_type(type)) {
|
||||
opr = as_oop_opr(reg1);
|
||||
} else if (type == T_METADATA) {
|
||||
opr = as_metadata_opr(reg1);
|
||||
} else if (type == T_ADDRESS) {
|
||||
opr = as_address_opr(reg1);
|
||||
} else {
|
||||
opr = as_opr(reg1);
|
||||
}
|
||||
} else if (r_1->is_FloatRegister()) {
|
||||
assert(type == T_DOUBLE || type == T_FLOAT, "wrong type");
|
||||
int num = r_1->as_FloatRegister()->encoding();
|
||||
if (type == T_FLOAT) {
|
||||
opr = LIR_OprFact::single_fpu(num);
|
||||
} else {
|
||||
opr = LIR_OprFact::double_fpu(num);
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return opr;
|
||||
}
|
||||
|
||||
LIR_Opr FrameMap::zr_opr;
|
||||
LIR_Opr FrameMap::r1_opr;
|
||||
LIR_Opr FrameMap::r2_opr;
|
||||
LIR_Opr FrameMap::r3_opr;
|
||||
LIR_Opr FrameMap::r4_opr;
|
||||
LIR_Opr FrameMap::r5_opr;
|
||||
LIR_Opr FrameMap::r6_opr;
|
||||
LIR_Opr FrameMap::r7_opr;
|
||||
LIR_Opr FrameMap::r8_opr;
|
||||
LIR_Opr FrameMap::r9_opr;
|
||||
LIR_Opr FrameMap::r10_opr;
|
||||
LIR_Opr FrameMap::r11_opr;
|
||||
LIR_Opr FrameMap::r12_opr;
|
||||
LIR_Opr FrameMap::r13_opr;
|
||||
LIR_Opr FrameMap::r14_opr;
|
||||
LIR_Opr FrameMap::r15_opr;
|
||||
LIR_Opr FrameMap::r16_opr;
|
||||
LIR_Opr FrameMap::r17_opr;
|
||||
LIR_Opr FrameMap::r18_opr;
|
||||
LIR_Opr FrameMap::r19_opr;
|
||||
LIR_Opr FrameMap::r20_opr;
|
||||
LIR_Opr FrameMap::r21_opr;
|
||||
LIR_Opr FrameMap::r22_opr;
|
||||
LIR_Opr FrameMap::r23_opr;
|
||||
LIR_Opr FrameMap::r24_opr;
|
||||
LIR_Opr FrameMap::r25_opr;
|
||||
LIR_Opr FrameMap::r26_opr;
|
||||
LIR_Opr FrameMap::r27_opr;
|
||||
LIR_Opr FrameMap::r28_opr;
|
||||
LIR_Opr FrameMap::r29_opr;
|
||||
LIR_Opr FrameMap::r30_opr;
|
||||
LIR_Opr FrameMap::r31_opr;
|
||||
|
||||
LIR_Opr FrameMap::fp_opr;
|
||||
LIR_Opr FrameMap::sp_opr;
|
||||
|
||||
LIR_Opr FrameMap::receiver_opr;
|
||||
|
||||
LIR_Opr FrameMap::zr_oop_opr;
|
||||
LIR_Opr FrameMap::r1_oop_opr;
|
||||
LIR_Opr FrameMap::r2_oop_opr;
|
||||
LIR_Opr FrameMap::r3_oop_opr;
|
||||
LIR_Opr FrameMap::r4_oop_opr;
|
||||
LIR_Opr FrameMap::r5_oop_opr;
|
||||
LIR_Opr FrameMap::r6_oop_opr;
|
||||
LIR_Opr FrameMap::r7_oop_opr;
|
||||
LIR_Opr FrameMap::r8_oop_opr;
|
||||
LIR_Opr FrameMap::r9_oop_opr;
|
||||
LIR_Opr FrameMap::r10_oop_opr;
|
||||
LIR_Opr FrameMap::r11_oop_opr;
|
||||
LIR_Opr FrameMap::r12_oop_opr;
|
||||
LIR_Opr FrameMap::r13_oop_opr;
|
||||
LIR_Opr FrameMap::r14_oop_opr;
|
||||
LIR_Opr FrameMap::r15_oop_opr;
|
||||
LIR_Opr FrameMap::r16_oop_opr;
|
||||
LIR_Opr FrameMap::r17_oop_opr;
|
||||
LIR_Opr FrameMap::r18_oop_opr;
|
||||
LIR_Opr FrameMap::r19_oop_opr;
|
||||
LIR_Opr FrameMap::r20_oop_opr;
|
||||
LIR_Opr FrameMap::r21_oop_opr;
|
||||
LIR_Opr FrameMap::r22_oop_opr;
|
||||
LIR_Opr FrameMap::r23_oop_opr;
|
||||
LIR_Opr FrameMap::r24_oop_opr;
|
||||
LIR_Opr FrameMap::r25_oop_opr;
|
||||
LIR_Opr FrameMap::r26_oop_opr;
|
||||
LIR_Opr FrameMap::r27_oop_opr;
|
||||
LIR_Opr FrameMap::r28_oop_opr;
|
||||
LIR_Opr FrameMap::r29_oop_opr;
|
||||
LIR_Opr FrameMap::r30_oop_opr;
|
||||
LIR_Opr FrameMap::r31_oop_opr;
|
||||
|
||||
LIR_Opr FrameMap::t0_opr;
|
||||
LIR_Opr FrameMap::t1_opr;
|
||||
LIR_Opr FrameMap::t0_long_opr;
|
||||
LIR_Opr FrameMap::t1_long_opr;
|
||||
|
||||
LIR_Opr FrameMap::r10_metadata_opr;
|
||||
LIR_Opr FrameMap::r11_metadata_opr;
|
||||
LIR_Opr FrameMap::r12_metadata_opr;
|
||||
LIR_Opr FrameMap::r13_metadata_opr;
|
||||
LIR_Opr FrameMap::r14_metadata_opr;
|
||||
LIR_Opr FrameMap::r15_metadata_opr;
|
||||
|
||||
LIR_Opr FrameMap::long10_opr;
|
||||
LIR_Opr FrameMap::long11_opr;
|
||||
LIR_Opr FrameMap::fpu10_float_opr;
|
||||
LIR_Opr FrameMap::fpu10_double_opr;
|
||||
|
||||
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
|
||||
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
|
||||
|
||||
//--------------------------------------------------------
|
||||
// FrameMap
|
||||
//--------------------------------------------------------
|
||||
// |---f31--|
|
||||
// |---..---|
|
||||
// |---f28--|
|
||||
// |---f27--|<---pd_last_callee_saved_fpu_reg_2
|
||||
// |---..---|
|
||||
// |---f18--|<---pd_first_callee_saved_fpu_reg_2
|
||||
// |---f17--|
|
||||
// |---..---|
|
||||
// |---f10--|
|
||||
// |---f9---|<---pd_last_callee_saved_fpu_reg_1
|
||||
// |---f8---|<---pd_first_callee_saved_fpu_reg_1
|
||||
// |---f7---|
|
||||
// |---..---|
|
||||
// |---f0---|
|
||||
// |---x27--|
|
||||
// |---x23--|
|
||||
// |---x8---|
|
||||
// |---x4---|
|
||||
// |---x3---|
|
||||
// |---x2---|
|
||||
// |---x1---|
|
||||
// |---x0---|
|
||||
// |---x26--|<---pd_last_callee_saved_reg
|
||||
// |---..---|
|
||||
// |---x18--|
|
||||
// |---x9---|<---pd_first_callee_saved_reg
|
||||
// |---x31--|
|
||||
// |---..---|
|
||||
// |---x28--|
|
||||
// |---x17--|
|
||||
// |---..---|
|
||||
// |---x10--|
|
||||
// |---x7---|
|
||||
|
||||
void FrameMap::initialize() {
|
||||
assert(!_init_done, "once");
|
||||
|
||||
int i = 0;
|
||||
|
||||
// caller save register
|
||||
map_register(i, x7); r7_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x10); r10_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x11); r11_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x12); r12_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x13); r13_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x14); r14_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x15); r15_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x16); r16_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x17); r17_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x28); r28_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x29); r29_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x30); r30_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x31); r31_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
|
||||
// callee save register
|
||||
map_register(i, x9); r9_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x18); r18_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x19); r19_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x20); r20_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x21); r21_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x22); r22_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x24); r24_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x25); r25_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
map_register(i, x26); r26_opr = LIR_OprFact::single_cpu(i); i++;
|
||||
|
||||
// special register
|
||||
map_register(i, x0); zr_opr = LIR_OprFact::single_cpu(i); i++; // zr
|
||||
map_register(i, x1); r1_opr = LIR_OprFact::single_cpu(i); i++; // ra
|
||||
map_register(i, x2); r2_opr = LIR_OprFact::single_cpu(i); i++; // sp
|
||||
map_register(i, x3); r3_opr = LIR_OprFact::single_cpu(i); i++; // gp
|
||||
map_register(i, x4); r4_opr = LIR_OprFact::single_cpu(i); i++; // thread
|
||||
map_register(i, x8); r8_opr = LIR_OprFact::single_cpu(i); i++; // fp
|
||||
map_register(i, x23); r23_opr = LIR_OprFact::single_cpu(i); i++; // java thread
|
||||
map_register(i, x27); r27_opr = LIR_OprFact::single_cpu(i); i++; // heapbase
|
||||
|
||||
// tmp register
|
||||
map_register(i, x5); r5_opr = LIR_OprFact::single_cpu(i); i++; // t0
|
||||
map_register(i, x6); r6_opr = LIR_OprFact::single_cpu(i); i++; // t1
|
||||
|
||||
t0_opr = r5_opr;
|
||||
t1_opr = r6_opr;
|
||||
t0_long_opr = LIR_OprFact::double_cpu(r5_opr->cpu_regnr(), r5_opr->cpu_regnr());
|
||||
t1_long_opr = LIR_OprFact::double_cpu(r6_opr->cpu_regnr(), r6_opr->cpu_regnr());
|
||||
|
||||
long10_opr = LIR_OprFact::double_cpu(r10_opr->cpu_regnr(), r10_opr->cpu_regnr());
|
||||
long11_opr = LIR_OprFact::double_cpu(r11_opr->cpu_regnr(), r11_opr->cpu_regnr());
|
||||
|
||||
fpu10_float_opr = LIR_OprFact::single_fpu(10);
|
||||
fpu10_double_opr = LIR_OprFact::double_fpu(10);
|
||||
|
||||
i = 0;
|
||||
_caller_save_cpu_regs[i++] = r7_opr;
|
||||
_caller_save_cpu_regs[i++] = r10_opr;
|
||||
_caller_save_cpu_regs[i++] = r11_opr;
|
||||
_caller_save_cpu_regs[i++] = r12_opr;
|
||||
_caller_save_cpu_regs[i++] = r13_opr;
|
||||
_caller_save_cpu_regs[i++] = r14_opr;
|
||||
_caller_save_cpu_regs[i++] = r15_opr;
|
||||
_caller_save_cpu_regs[i++] = r16_opr;
|
||||
_caller_save_cpu_regs[i++] = r17_opr;
|
||||
_caller_save_cpu_regs[i++] = r28_opr;
|
||||
_caller_save_cpu_regs[i++] = r29_opr;
|
||||
_caller_save_cpu_regs[i++] = r30_opr;
|
||||
_caller_save_cpu_regs[i++] = r31_opr;
|
||||
|
||||
_init_done = true;
|
||||
|
||||
zr_oop_opr = as_oop_opr(x0);
|
||||
r1_oop_opr = as_oop_opr(x1);
|
||||
r2_oop_opr = as_oop_opr(x2);
|
||||
r3_oop_opr = as_oop_opr(x3);
|
||||
r4_oop_opr = as_oop_opr(x4);
|
||||
r5_oop_opr = as_oop_opr(x5);
|
||||
r6_oop_opr = as_oop_opr(x6);
|
||||
r7_oop_opr = as_oop_opr(x7);
|
||||
r8_oop_opr = as_oop_opr(x8);
|
||||
r9_oop_opr = as_oop_opr(x9);
|
||||
r10_oop_opr = as_oop_opr(x10);
|
||||
r11_oop_opr = as_oop_opr(x11);
|
||||
r12_oop_opr = as_oop_opr(x12);
|
||||
r13_oop_opr = as_oop_opr(x13);
|
||||
r14_oop_opr = as_oop_opr(x14);
|
||||
r15_oop_opr = as_oop_opr(x15);
|
||||
r16_oop_opr = as_oop_opr(x16);
|
||||
r17_oop_opr = as_oop_opr(x17);
|
||||
r18_oop_opr = as_oop_opr(x18);
|
||||
r19_oop_opr = as_oop_opr(x19);
|
||||
r20_oop_opr = as_oop_opr(x20);
|
||||
r21_oop_opr = as_oop_opr(x21);
|
||||
r22_oop_opr = as_oop_opr(x22);
|
||||
r23_oop_opr = as_oop_opr(x23);
|
||||
r24_oop_opr = as_oop_opr(x24);
|
||||
r25_oop_opr = as_oop_opr(x25);
|
||||
r26_oop_opr = as_oop_opr(x26);
|
||||
r27_oop_opr = as_oop_opr(x27);
|
||||
r28_oop_opr = as_oop_opr(x28);
|
||||
r29_oop_opr = as_oop_opr(x29);
|
||||
r30_oop_opr = as_oop_opr(x30);
|
||||
r31_oop_opr = as_oop_opr(x31);
|
||||
|
||||
r10_metadata_opr = as_metadata_opr(x10);
|
||||
r11_metadata_opr = as_metadata_opr(x11);
|
||||
r12_metadata_opr = as_metadata_opr(x12);
|
||||
r13_metadata_opr = as_metadata_opr(x13);
|
||||
r14_metadata_opr = as_metadata_opr(x14);
|
||||
r15_metadata_opr = as_metadata_opr(x15);
|
||||
|
||||
sp_opr = as_pointer_opr(sp);
|
||||
fp_opr = as_pointer_opr(fp);
|
||||
|
||||
VMRegPair regs;
|
||||
BasicType sig_bt = T_OBJECT;
|
||||
SharedRuntime::java_calling_convention(&sig_bt, ®s, 1);
|
||||
receiver_opr = as_oop_opr(regs.first()->as_Register());
|
||||
|
||||
for (i = 0; i < nof_caller_save_fpu_regs; i++) {
|
||||
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Address FrameMap::make_new_address(ByteSize sp_offset) const {
|
||||
return Address(sp, in_bytes(sp_offset));
|
||||
}
|
||||
|
||||
|
||||
// ----------------mapping-----------------------
|
||||
// all mapping is based on fp addressing, except for simple leaf methods where we access
|
||||
// the locals sp based (and no frame is built)
|
||||
|
||||
|
||||
// Frame for simple leaf methods (quick entries)
|
||||
//
|
||||
// +----------+
|
||||
// | ret addr | <- TOS
|
||||
// +----------+
|
||||
// | args |
|
||||
// | ...... |
|
||||
|
||||
// Frame for standard methods
|
||||
//
|
||||
// | .........| <- TOS
|
||||
// | locals |
|
||||
// +----------+
|
||||
// | old fp, |
|
||||
// +----------+
|
||||
// | ret addr |
|
||||
// +----------+
|
||||
// | args | <- FP
|
||||
// | .........|
|
||||
|
||||
|
||||
// For OopMaps, map a local variable or spill index to an VMRegImpl name.
|
||||
// This is the offset from sp() in the frame of the slot for the index,
|
||||
// skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.)
|
||||
//
|
||||
// framesize +
|
||||
// stack0 stack0 0 <- VMReg
|
||||
// | | <registers> |
|
||||
// ...........|..............|.............|
|
||||
// 0 1 2 3 x x 4 5 6 ... | <- local indices
|
||||
// ^ ^ sp() ( x x indicate link
|
||||
// | | and return addr)
|
||||
// arguments non-argument locals
|
||||
|
||||
|
||||
VMReg FrameMap::fpu_regname (int n) {
|
||||
// Return the OptoReg name for the fpu stack slot "n"
|
||||
// A spilled fpu stack slot comprises to two single-word OptoReg's.
|
||||
return as_FloatRegister(n)->as_VMReg();
|
||||
}
|
||||
|
||||
LIR_Opr FrameMap::stack_pointer() {
|
||||
return FrameMap::sp_opr;
|
||||
}
|
||||
|
||||
// JSR 292
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
return LIR_OprFact::illegalOpr; // Not needed on riscv
|
||||
}
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
return true;
|
||||
}
|
||||
148
src/hotspot/cpu/riscv/c1_FrameMap_riscv.hpp
Normal file
148
src/hotspot/cpu/riscv/c1_FrameMap_riscv.hpp
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_FRAMEMAP_RISCV_HPP
|
||||
#define CPU_RISCV_C1_FRAMEMAP_RISCV_HPP
|
||||
|
||||
// On RISCV the frame looks as follows:
|
||||
//
|
||||
// +-----------------------------+---------+----------------------------------------+----------------+-----------
|
||||
// | size_arguments-nof_reg_args | 2 words | size_locals-size_arguments+numreg_args | _size_monitors | spilling .
|
||||
// +-----------------------------+---------+----------------------------------------+----------------+-----------
|
||||
|
||||
public:
|
||||
static const int pd_c_runtime_reserved_arg_size;
|
||||
|
||||
enum {
|
||||
first_available_sp_in_frame = 0,
|
||||
frame_pad_in_bytes = 16,
|
||||
nof_reg_args = 8
|
||||
};
|
||||
|
||||
public:
|
||||
static LIR_Opr receiver_opr;
|
||||
|
||||
static LIR_Opr zr_opr;
|
||||
static LIR_Opr r1_opr;
|
||||
static LIR_Opr r2_opr;
|
||||
static LIR_Opr r3_opr;
|
||||
static LIR_Opr r4_opr;
|
||||
static LIR_Opr r5_opr;
|
||||
static LIR_Opr r6_opr;
|
||||
static LIR_Opr r7_opr;
|
||||
static LIR_Opr r8_opr;
|
||||
static LIR_Opr r9_opr;
|
||||
static LIR_Opr r10_opr;
|
||||
static LIR_Opr r11_opr;
|
||||
static LIR_Opr r12_opr;
|
||||
static LIR_Opr r13_opr;
|
||||
static LIR_Opr r14_opr;
|
||||
static LIR_Opr r15_opr;
|
||||
static LIR_Opr r16_opr;
|
||||
static LIR_Opr r17_opr;
|
||||
static LIR_Opr r18_opr;
|
||||
static LIR_Opr r19_opr;
|
||||
static LIR_Opr r20_opr;
|
||||
static LIR_Opr r21_opr;
|
||||
static LIR_Opr r22_opr;
|
||||
static LIR_Opr r23_opr;
|
||||
static LIR_Opr r24_opr;
|
||||
static LIR_Opr r25_opr;
|
||||
static LIR_Opr r26_opr;
|
||||
static LIR_Opr r27_opr;
|
||||
static LIR_Opr r28_opr;
|
||||
static LIR_Opr r29_opr;
|
||||
static LIR_Opr r30_opr;
|
||||
static LIR_Opr r31_opr;
|
||||
static LIR_Opr fp_opr;
|
||||
static LIR_Opr sp_opr;
|
||||
|
||||
static LIR_Opr zr_oop_opr;
|
||||
static LIR_Opr r1_oop_opr;
|
||||
static LIR_Opr r2_oop_opr;
|
||||
static LIR_Opr r3_oop_opr;
|
||||
static LIR_Opr r4_oop_opr;
|
||||
static LIR_Opr r5_oop_opr;
|
||||
static LIR_Opr r6_oop_opr;
|
||||
static LIR_Opr r7_oop_opr;
|
||||
static LIR_Opr r8_oop_opr;
|
||||
static LIR_Opr r9_oop_opr;
|
||||
static LIR_Opr r10_oop_opr;
|
||||
static LIR_Opr r11_oop_opr;
|
||||
static LIR_Opr r12_oop_opr;
|
||||
static LIR_Opr r13_oop_opr;
|
||||
static LIR_Opr r14_oop_opr;
|
||||
static LIR_Opr r15_oop_opr;
|
||||
static LIR_Opr r16_oop_opr;
|
||||
static LIR_Opr r17_oop_opr;
|
||||
static LIR_Opr r18_oop_opr;
|
||||
static LIR_Opr r19_oop_opr;
|
||||
static LIR_Opr r20_oop_opr;
|
||||
static LIR_Opr r21_oop_opr;
|
||||
static LIR_Opr r22_oop_opr;
|
||||
static LIR_Opr r23_oop_opr;
|
||||
static LIR_Opr r24_oop_opr;
|
||||
static LIR_Opr r25_oop_opr;
|
||||
static LIR_Opr r26_oop_opr;
|
||||
static LIR_Opr r27_oop_opr;
|
||||
static LIR_Opr r28_oop_opr;
|
||||
static LIR_Opr r29_oop_opr;
|
||||
static LIR_Opr r30_oop_opr;
|
||||
static LIR_Opr r31_oop_opr;
|
||||
|
||||
static LIR_Opr t0_opr;
|
||||
static LIR_Opr t1_opr;
|
||||
static LIR_Opr t0_long_opr;
|
||||
static LIR_Opr t1_long_opr;
|
||||
|
||||
static LIR_Opr r10_metadata_opr;
|
||||
static LIR_Opr r11_metadata_opr;
|
||||
static LIR_Opr r12_metadata_opr;
|
||||
static LIR_Opr r13_metadata_opr;
|
||||
static LIR_Opr r14_metadata_opr;
|
||||
static LIR_Opr r15_metadata_opr;
|
||||
|
||||
static LIR_Opr long10_opr;
|
||||
static LIR_Opr long11_opr;
|
||||
static LIR_Opr fpu10_float_opr;
|
||||
static LIR_Opr fpu10_double_opr;
|
||||
|
||||
static LIR_Opr as_long_opr(Register r) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
|
||||
}
|
||||
static LIR_Opr as_pointer_opr(Register r) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
|
||||
}
|
||||
|
||||
// VMReg name for spilled physical FPU stack slot n
|
||||
static VMReg fpu_regname(int n);
|
||||
|
||||
static bool is_caller_save_register(LIR_Opr opr) { return true; }
|
||||
static bool is_caller_save_register(Register r) { return true; }
|
||||
|
||||
static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; }
|
||||
static int last_cpu_reg() { return pd_last_cpu_reg; }
|
||||
|
||||
#endif // CPU_RISCV_C1_FRAMEMAP_RISCV_HPP
|
||||
281
src/hotspot/cpu/riscv/c1_LIRAssembler_arith_riscv.cpp
Normal file
281
src/hotspot/cpu/riscv/c1_LIRAssembler_arith_riscv.cpp
Normal file
@@ -0,0 +1,281 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
|
||||
#ifndef PRODUCT
|
||||
#define COMMENT(x) do { __ block_comment(x); } while (0)
|
||||
#else
|
||||
#define COMMENT(x)
|
||||
#endif
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal,
|
||||
LIR_Opr result, CodeEmitInfo* info) {
|
||||
// opcode check
|
||||
assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
|
||||
bool is_irem = (code == lir_irem);
|
||||
// opreand check
|
||||
assert(left->is_single_cpu(), "left must be a register");
|
||||
assert(right->is_single_cpu() || right->is_constant(), "right must be a register or constant");
|
||||
assert(result->is_single_cpu(), "result must be a register");
|
||||
Register lreg = left->as_register();
|
||||
Register dreg = result->as_register();
|
||||
|
||||
// power-of-2 constant check and codegen
|
||||
if (right->is_constant()) {
|
||||
int c = right->as_constant_ptr()->as_jint();
|
||||
assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
|
||||
if (is_irem) {
|
||||
if (c == 1) {
|
||||
// move 0 to dreg if divisor is 1
|
||||
__ mv(dreg, zr);
|
||||
} else {
|
||||
unsigned int shift = exact_log2(c);
|
||||
__ sraiw(t0, lreg, 0x1f);
|
||||
__ srliw(t0, t0, BitsPerInt - shift);
|
||||
__ addw(t1, lreg, t0);
|
||||
if (is_imm_in_range(c - 1, 12, 0)) {
|
||||
__ andi(t1, t1, c - 1);
|
||||
} else {
|
||||
__ zero_extend(t1, t1, shift);
|
||||
}
|
||||
__ subw(dreg, t1, t0);
|
||||
}
|
||||
} else {
|
||||
if (c == 1) {
|
||||
// move lreg to dreg if divisor is 1
|
||||
__ mv(dreg, lreg);
|
||||
} else {
|
||||
unsigned int shift = exact_log2(c);
|
||||
__ sraiw(t0, lreg, 0x1f);
|
||||
if (is_imm_in_range(c - 1, 12, 0)) {
|
||||
__ andi(t0, t0, c - 1);
|
||||
} else {
|
||||
__ zero_extend(t0, t0, shift);
|
||||
}
|
||||
__ addw(dreg, t0, lreg);
|
||||
__ sraiw(dreg, dreg, shift);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Register rreg = right->as_register();
|
||||
__ corrected_idivl(dreg, lreg, rreg, is_irem);
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arith_op_single_cpu_right_constant(LIR_Code code, LIR_Opr left, LIR_Opr right,
|
||||
Register lreg, Register dreg) {
|
||||
// cpu register - constant
|
||||
jlong c;
|
||||
|
||||
switch (right->type()) {
|
||||
case T_LONG:
|
||||
c = right->as_constant_ptr()->as_jlong(); break;
|
||||
case T_INT: // fall through
|
||||
case T_ADDRESS:
|
||||
c = right->as_constant_ptr()->as_jint(); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
c = 0; // unreachable
|
||||
}
|
||||
|
||||
assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
|
||||
if (c == 0 && dreg == lreg) {
|
||||
COMMENT("effective nop elided");
|
||||
return;
|
||||
}
|
||||
switch (left->type()) {
|
||||
case T_INT:
|
||||
switch (code) {
|
||||
case lir_add: __ addw(dreg, lreg, c); break;
|
||||
case lir_sub: __ subw(dreg, lreg, c); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
break;
|
||||
case T_OBJECT: // fall through
|
||||
case T_ADDRESS:
|
||||
switch (code) {
|
||||
case lir_add: __ add(dreg, lreg, c); break;
|
||||
case lir_sub: __ sub(dreg, lreg, c); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arith_op_single_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
|
||||
Register lreg = left->as_register();
|
||||
Register dreg = as_reg(dest);
|
||||
|
||||
if (right->is_single_cpu()) {
|
||||
// cpu register - cpu register
|
||||
assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT, "should be");
|
||||
Register rreg = right->as_register();
|
||||
switch (code) {
|
||||
case lir_add: __ addw(dest->as_register(), lreg, rreg); break;
|
||||
case lir_sub: __ subw(dest->as_register(), lreg, rreg); break;
|
||||
case lir_mul: __ mulw(dest->as_register(), lreg, rreg); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (right->is_double_cpu()) {
|
||||
Register rreg = right->as_register_lo();
|
||||
// sigle_cpu + double_cpu; can happen with obj_long
|
||||
assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
|
||||
switch (code) {
|
||||
case lir_add: __ add(dreg, lreg, rreg); break;
|
||||
case lir_sub: __ sub(dreg, lreg, rreg); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else if (right->is_constant()) {
|
||||
arith_op_single_cpu_right_constant(code, left, right, lreg, dreg);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arith_op_double_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
|
||||
Register lreg_lo = left->as_register_lo();
|
||||
|
||||
if (right->is_double_cpu()) {
|
||||
// cpu register - cpu register
|
||||
Register rreg_lo = right->as_register_lo();
|
||||
switch (code) {
|
||||
case lir_add: __ add(dest->as_register_lo(), lreg_lo, rreg_lo); break;
|
||||
case lir_sub: __ sub(dest->as_register_lo(), lreg_lo, rreg_lo); break;
|
||||
case lir_mul: __ mul(dest->as_register_lo(), lreg_lo, rreg_lo); break;
|
||||
case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false); break;
|
||||
case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else if (right->is_constant()) {
|
||||
jlong c = right->as_constant_ptr()->as_jlong();
|
||||
Register dreg = as_reg(dest);
|
||||
switch (code) {
|
||||
case lir_add: // fall through
|
||||
case lir_sub:
|
||||
if (c == 0 && dreg == lreg_lo) {
|
||||
COMMENT("effective nop elided");
|
||||
return;
|
||||
}
|
||||
code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
|
||||
break;
|
||||
case lir_div:
|
||||
assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
|
||||
if (c == 1) {
|
||||
// move lreg_lo to dreg if divisor is 1
|
||||
__ mv(dreg, lreg_lo);
|
||||
} else {
|
||||
unsigned int shift = exact_log2_long(c);
|
||||
// use t0 as intermediate result register
|
||||
__ srai(t0, lreg_lo, 0x3f);
|
||||
if (is_imm_in_range(c - 1, 12, 0)) {
|
||||
__ andi(t0, t0, c - 1);
|
||||
} else {
|
||||
__ zero_extend(t0, t0, shift);
|
||||
}
|
||||
__ add(dreg, t0, lreg_lo);
|
||||
__ srai(dreg, dreg, shift);
|
||||
}
|
||||
break;
|
||||
case lir_rem:
|
||||
assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
|
||||
if (c == 1) {
|
||||
// move 0 to dreg if divisor is 1
|
||||
__ mv(dreg, zr);
|
||||
} else {
|
||||
unsigned int shift = exact_log2_long(c);
|
||||
__ srai(t0, lreg_lo, 0x3f);
|
||||
__ srli(t0, t0, BitsPerLong - shift);
|
||||
__ add(t1, lreg_lo, t0);
|
||||
if (is_imm_in_range(c - 1, 12, 0)) {
|
||||
__ andi(t1, t1, c - 1);
|
||||
} else {
|
||||
__ zero_extend(t1, t1, shift);
|
||||
}
|
||||
__ sub(dreg, t1, t0);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arith_op_single_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
|
||||
assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
|
||||
switch (code) {
|
||||
case lir_add: __ fadd_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
|
||||
case lir_sub: __ fsub_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
|
||||
case lir_mul: __ fmul_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
|
||||
case lir_div: __ fdiv_s(dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arith_op_double_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
|
||||
if (right->is_double_fpu()) {
|
||||
// fpu register - fpu register
|
||||
switch (code) {
|
||||
case lir_add: __ fadd_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
|
||||
case lir_sub: __ fsub_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
|
||||
case lir_mul: __ fmul_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
|
||||
case lir_div: __ fdiv_d(dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
|
||||
CodeEmitInfo* info, bool pop_fpu_stack) {
|
||||
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
|
||||
|
||||
if (left->is_single_cpu()) {
|
||||
arith_op_single_cpu(code, left, right, dest);
|
||||
} else if (left->is_double_cpu()) {
|
||||
arith_op_double_cpu(code, left, right, dest);
|
||||
} else if (left->is_single_fpu()) {
|
||||
arith_op_single_fpu(code, left, right, dest);
|
||||
} else if (left->is_double_fpu()) {
|
||||
arith_op_double_fpu(code, left, right, dest);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
#undef __
|
||||
37
src/hotspot/cpu/riscv/c1_LIRAssembler_arith_riscv.hpp
Normal file
37
src/hotspot/cpu/riscv/c1_LIRAssembler_arith_riscv.hpp
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_LIRASSEMBLER_ARITH_RISCV_HPP
|
||||
#define CPU_RISCV_C1_LIRASSEMBLER_ARITH_RISCV_HPP
|
||||
|
||||
// arith_op sub functions
|
||||
void arith_op_single_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);
|
||||
void arith_op_double_cpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);
|
||||
void arith_op_single_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);
|
||||
void arith_op_double_fpu(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);
|
||||
void arith_op_single_cpu_right_constant(LIR_Code code, LIR_Opr left, LIR_Opr right, Register lreg, Register dreg);
|
||||
void arithmetic_idiv(LIR_Op3* op, bool is_irem);
|
||||
|
||||
#endif // CPU_RISCV_C1_LIRASSEMBLER_ARITH_RISCV_HPP
|
||||
388
src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp
Normal file
388
src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp
Normal file
@@ -0,0 +1,388 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "ci/ciArrayKlass.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
|
||||
void LIR_Assembler::generic_arraycopy(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, CodeStub *stub) {
|
||||
assert(src == x11 && src_pos == x12, "mismatch in calling convention");
|
||||
// Save the arguments in case the generic arraycopy fails and we
|
||||
// have to fall back to the JNI stub
|
||||
arraycopy_store_args(src, src_pos, length, dst, dst_pos);
|
||||
|
||||
address copyfunc_addr = StubRoutines::generic_arraycopy();
|
||||
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
|
||||
|
||||
// The arguments are in java calling convention so we shift them
|
||||
// to C convention
|
||||
assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
|
||||
__ mv(c_rarg0, j_rarg0);
|
||||
assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
|
||||
__ mv(c_rarg1, j_rarg1);
|
||||
assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
|
||||
__ mv(c_rarg2, j_rarg2);
|
||||
assert_different_registers(c_rarg3, j_rarg4);
|
||||
__ mv(c_rarg3, j_rarg3);
|
||||
__ mv(c_rarg4, j_rarg4);
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
__ add_memory_int32(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), 1);
|
||||
}
|
||||
#endif
|
||||
__ far_call(RuntimeAddress(copyfunc_addr));
|
||||
__ beqz(x10, *stub->continuation());
|
||||
// Reload values from the stack so they are where the stub
|
||||
// expects them.
|
||||
arraycopy_load_args(src, src_pos, length, dst, dst_pos);
|
||||
|
||||
// x10 is -1^K where K == partial copied count
|
||||
__ xori(t0, x10, -1);
|
||||
// adjust length down and src/end pos up by partial copied count
|
||||
__ subw(length, length, t0);
|
||||
__ addw(src_pos, src_pos, t0);
|
||||
__ addw(dst_pos, dst_pos, t0);
|
||||
__ j(*stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_simple_check(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, Register tmp,
|
||||
CodeStub *stub, int flags) {
|
||||
// test for NULL
|
||||
if (flags & LIR_OpArrayCopy::src_null_check) {
|
||||
__ beqz(src, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
if (flags & LIR_OpArrayCopy::dst_null_check) {
|
||||
__ beqz(dst, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
|
||||
// If the compiler was not able to prove that exact type of the source or the destination
|
||||
// of the arraycopy is an array type, check at runtime if the source or the destination is
|
||||
// an instance type.
|
||||
if (flags & LIR_OpArrayCopy::type_check) {
|
||||
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
|
||||
__ load_klass(tmp, dst);
|
||||
__ lw(t0, Address(tmp, in_bytes(Klass::layout_helper_offset())));
|
||||
__ li(t1, Klass::_lh_neutral_value);
|
||||
__ bge(t0, t1, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
|
||||
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
|
||||
__ load_klass(tmp, src);
|
||||
__ lw(t0, Address(tmp, in_bytes(Klass::layout_helper_offset())));
|
||||
__ li(t1, Klass::_lh_neutral_value);
|
||||
__ bge(t0, t1, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
}
|
||||
|
||||
// check if negative
|
||||
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
|
||||
__ bltz(src_pos, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
|
||||
__ bltz(dst_pos, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
if (flags & LIR_OpArrayCopy::length_positive_check) {
|
||||
__ bltz(length, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::src_range_check) {
|
||||
__ addw(tmp, src_pos, length);
|
||||
__ lwu(t0, Address(src, arrayOopDesc::length_offset_in_bytes()));
|
||||
__ bgtu(tmp, t0, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
if (flags & LIR_OpArrayCopy::dst_range_check) {
|
||||
__ addw(tmp, dst_pos, length);
|
||||
__ lwu(t0, Address(dst, arrayOopDesc::length_offset_in_bytes()));
|
||||
__ bgtu(tmp, t0, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_checkcast(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, Register tmp,
|
||||
CodeStub *stub, BasicType basic_type,
|
||||
address copyfunc_addr, int flags) {
|
||||
// src is not a sub class of dst so we have to do a
|
||||
// per-element check.
|
||||
int mask = LIR_OpArrayCopy::src_objarray | LIR_OpArrayCopy::dst_objarray;
|
||||
if ((flags & mask) != mask) {
|
||||
// Check that at least both of them object arrays.
|
||||
assert(flags & mask, "one of the two should be known to be an object array");
|
||||
|
||||
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
|
||||
__ load_klass(tmp, src);
|
||||
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
|
||||
__ load_klass(tmp, dst);
|
||||
}
|
||||
int lh_offset = in_bytes(Klass::layout_helper_offset());
|
||||
Address klass_lh_addr(tmp, lh_offset);
|
||||
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
|
||||
__ lw(t0, klass_lh_addr);
|
||||
__ mvw(t1, objArray_lh);
|
||||
__ bne(t0, t1, *stub->entry(), /* is_far */ true);
|
||||
}
|
||||
|
||||
// Spill because stubs can use any register they like and it's
|
||||
// easier to restore just those that we care about.
|
||||
arraycopy_store_args(src, src_pos, length, dst, dst_pos);
|
||||
arraycopy_checkcast_prepare_params(src, src_pos, length, dst, dst_pos, basic_type);
|
||||
__ far_call(RuntimeAddress(copyfunc_addr));
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
Label failed;
|
||||
__ bnez(x10, failed);
|
||||
__ add_memory_int32(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), 1);
|
||||
__ bind(failed);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ beqz(x10, *stub->continuation());
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
__ add_memory_int32(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), 1);
|
||||
}
|
||||
#endif
|
||||
assert_different_registers(dst, dst_pos, length, src_pos, src, x10, t0);
|
||||
|
||||
// Restore previously spilled arguments
|
||||
arraycopy_load_args(src, src_pos, length, dst, dst_pos);
|
||||
|
||||
// return value is -1^K where K is partial copied count
|
||||
__ xori(t0, x10, -1);
|
||||
// adjust length down and src/end pos up by partial copied count
|
||||
__ subw(length, length, t0);
|
||||
__ addw(src_pos, src_pos, t0);
|
||||
__ addw(dst_pos, dst_pos, t0);
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, Register tmp,
|
||||
CodeStub *stub, BasicType basic_type, int flags) {
|
||||
// We don't know the array types are compatible
|
||||
if (basic_type != T_OBJECT) {
|
||||
// Simple test for basic type arrays
|
||||
if (UseCompressedClassPointers) {
|
||||
__ lwu(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
__ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
__ ld(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
__ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
__ bne(tmp, t0, *stub->entry(), /* is_far */ true);
|
||||
} else {
|
||||
// For object arrays, if src is a sub class of dst then we can
|
||||
// safely do the copy.
|
||||
Label cont, slow;
|
||||
|
||||
#define PUSH(r1, r2) \
|
||||
__ addi(sp, sp, -2 * wordSize); \
|
||||
__ sd(r1, Address(sp, 1 * wordSize)); \
|
||||
__ sd(r2, Address(sp, 0));
|
||||
|
||||
#define POP(r1, r2) \
|
||||
__ ld(r1, Address(sp, 1 * wordSize)); \
|
||||
__ ld(r2, Address(sp, 0)); \
|
||||
__ addi(sp, sp, 2 * wordSize);
|
||||
|
||||
PUSH(src, dst);
|
||||
__ load_klass(src, src);
|
||||
__ load_klass(dst, dst);
|
||||
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
|
||||
|
||||
PUSH(src, dst);
|
||||
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
|
||||
POP(src, dst);
|
||||
__ bnez(dst, cont);
|
||||
|
||||
__ bind(slow);
|
||||
POP(src, dst);
|
||||
|
||||
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
|
||||
if (copyfunc_addr != NULL) { // use stub if available
|
||||
arraycopy_checkcast(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, copyfunc_addr, flags);
|
||||
}
|
||||
|
||||
__ j(*stub->entry());
|
||||
__ bind(cont);
|
||||
POP(src, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) {
|
||||
assert(default_type != NULL, "NULL default_type!");
|
||||
BasicType basic_type = default_type->element_type()->basic_type();
|
||||
|
||||
if (basic_type == T_ARRAY) { basic_type = T_OBJECT; }
|
||||
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
|
||||
// Sanity check the known type with the incoming class. For the
|
||||
// primitive case the types must match exactly with src.klass and
|
||||
// dst.klass each exactly matching the default type. For the
|
||||
// object array case, if no type check is needed then either the
|
||||
// dst type is exactly the expected type and the src type is a
|
||||
// subtype which we can't check or src is the same array as dst
|
||||
// but not necessarily exactly of type default_type.
|
||||
Label known_ok, halt;
|
||||
__ mov_metadata(tmp, default_type->constant_encoding());
|
||||
if (UseCompressedClassPointers) {
|
||||
__ encode_klass_not_null(tmp);
|
||||
}
|
||||
|
||||
if (basic_type != T_OBJECT) {
|
||||
if (UseCompressedClassPointers) {
|
||||
__ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
__ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
__ bne(tmp, t0, halt);
|
||||
if (UseCompressedClassPointers) {
|
||||
__ lwu(t0, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
__ ld(t0, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
__ beq(tmp, t0, known_ok);
|
||||
} else {
|
||||
if (UseCompressedClassPointers) {
|
||||
__ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
__ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
__ beq(tmp, t0, known_ok);
|
||||
__ beq(src, dst, known_ok);
|
||||
}
|
||||
__ bind(halt);
|
||||
__ stop("incorrect type information in arraycopy");
|
||||
__ bind(known_ok);
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
ciArrayKlass *default_type = op->expected_type();
|
||||
Register src = op->src()->as_register();
|
||||
Register dst = op->dst()->as_register();
|
||||
Register src_pos = op->src_pos()->as_register();
|
||||
Register dst_pos = op->dst_pos()->as_register();
|
||||
Register length = op->length()->as_register();
|
||||
Register tmp = op->tmp()->as_register();
|
||||
|
||||
CodeStub* stub = op->stub();
|
||||
int flags = op->flags();
|
||||
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
|
||||
if (is_reference_type(basic_type)) { basic_type = T_OBJECT; }
|
||||
|
||||
// if we don't know anything, just go through the generic arraycopy
|
||||
if (default_type == NULL) {
|
||||
generic_arraycopy(src, src_pos, length, dst, dst_pos, stub);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(),
|
||||
"must be true at this point");
|
||||
|
||||
arraycopy_simple_check(src, src_pos, length, dst, dst_pos, tmp, stub, flags);
|
||||
|
||||
if (flags & LIR_OpArrayCopy::type_check) {
|
||||
arraycopy_type_check(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, flags);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
arraycopy_assert(src, dst, tmp, default_type, flags);
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
__ add_memory_int32(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), 1);
|
||||
}
|
||||
#endif
|
||||
arraycopy_prepare_params(src, src_pos, length, dst, dst_pos, basic_type);
|
||||
|
||||
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
|
||||
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
|
||||
const char *name = NULL;
|
||||
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
|
||||
|
||||
CodeBlob *cb = CodeCache::find_blob(entry);
|
||||
if (cb != NULL) {
|
||||
__ far_call(RuntimeAddress(entry));
|
||||
} else {
|
||||
const int args_num = 3;
|
||||
__ call_VM_leaf(entry, args_num);
|
||||
}
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::arraycopy_prepare_params(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, BasicType basic_type) {
|
||||
int scale = array_element_size(basic_type);
|
||||
__ shadd(c_rarg0, src_pos, src, t0, scale);
|
||||
__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
|
||||
assert_different_registers(c_rarg0, dst, dst_pos, length);
|
||||
__ shadd(c_rarg1, dst_pos, dst, t0, scale);
|
||||
__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
|
||||
assert_different_registers(c_rarg1, dst, length);
|
||||
__ mv(c_rarg2, length);
|
||||
assert_different_registers(c_rarg2, dst);
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_checkcast_prepare_params(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, BasicType basic_type) {
|
||||
arraycopy_prepare_params(src, src_pos, length, dst, dst_pos, basic_type);
|
||||
__ load_klass(c_rarg4, dst);
|
||||
__ ld(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
|
||||
__ lwu(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_store_args(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos) {
|
||||
__ sd(dst_pos, Address(sp, 0)); // 0: dst_pos sp offset
|
||||
__ sd(dst, Address(sp, 1 * BytesPerWord)); // 1: dst sp offset
|
||||
__ sd(length, Address(sp, 2 * BytesPerWord)); // 2: length sp offset
|
||||
__ sd(src_pos, Address(sp, 3 * BytesPerWord)); // 3: src_pos sp offset
|
||||
__ sd(src, Address(sp, 4 * BytesPerWord)); // 4: src sp offset
|
||||
}
|
||||
|
||||
void LIR_Assembler::arraycopy_load_args(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos) {
|
||||
__ ld(dst_pos, Address(sp, 0)); // 0: dst_pos sp offset
|
||||
__ ld(dst, Address(sp, 1 * BytesPerWord)); // 1: dst sp offset
|
||||
__ ld(length, Address(sp, 2 * BytesPerWord)); // 2: length sp offset
|
||||
__ ld(src_pos, Address(sp, 3 * BytesPerWord)); // 3: src_pos sp offset
|
||||
__ ld(src, Address(sp, 4 * BytesPerWord)); // 4: src sp offset
|
||||
}
|
||||
|
||||
#undef __
|
||||
52
src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.hpp
Normal file
52
src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.hpp
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_LIRASSEMBLER_ARRAYCOPY_RISCV_HPP
|
||||
#define CPU_RISCV_C1_LIRASSEMBLER_ARRAYCOPY_RISCV_HPP
|
||||
|
||||
// arraycopy sub functions
|
||||
void generic_arraycopy(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, CodeStub *stub);
|
||||
void arraycopy_simple_check(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, Register tmp,
|
||||
CodeStub *stub, int flags);
|
||||
void arraycopy_checkcast(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, Register tmp,
|
||||
CodeStub *stub, BasicType basic_type,
|
||||
address copyfunc_addr, int flags);
|
||||
void arraycopy_type_check(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, Register tmp,
|
||||
CodeStub *stub, BasicType basic_type, int flags);
|
||||
void arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags);
|
||||
void arraycopy_prepare_params(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, BasicType basic_type);
|
||||
void arraycopy_checkcast_prepare_params(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos, BasicType basic_type);
|
||||
void arraycopy_store_args(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos);
|
||||
void arraycopy_load_args(Register src, Register src_pos, Register length,
|
||||
Register dst, Register dst_pos);
|
||||
|
||||
#endif // CPU_RISCV_C1_LIRASSEMBLER_ARRAYCOPY_RISCV_HPP
|
||||
2260
src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
Normal file
2260
src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
Normal file
File diff suppressed because it is too large
Load Diff
132
src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
Normal file
132
src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
Normal file
@@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_LIRASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_C1_LIRASSEMBLER_RISCV_HPP
|
||||
|
||||
// ArrayCopyStub needs access to bailout
|
||||
friend class ArrayCopyStub;
|
||||
|
||||
private:
|
||||
|
||||
#include "c1_LIRAssembler_arith_riscv.hpp"
|
||||
#include "c1_LIRAssembler_arraycopy_riscv.hpp"
|
||||
|
||||
int array_element_size(BasicType type) const;
|
||||
|
||||
static Register as_reg(LIR_Opr op) {
|
||||
return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
|
||||
}
|
||||
|
||||
Address as_Address(LIR_Address* addr, Register tmp);
|
||||
|
||||
// helper functions which checks for overflow and sets bailout if it
|
||||
// occurs. Always returns a valid embeddable pointer but in the
|
||||
// bailout case the pointer won't be to unique storage.
|
||||
address float_constant(float f);
|
||||
address double_constant(double d);
|
||||
address int_constant(jlong n);
|
||||
|
||||
// Ensure we have a valid Address (base + offset) to a stack-slot.
|
||||
Address stack_slot_address(int index, uint shift, int adjust = 0);
|
||||
|
||||
// Record the type of the receiver in ReceiverTypeData
|
||||
void type_profile_helper(Register mdo,
|
||||
ciMethodData *md, ciProfileData *data,
|
||||
Register recv, Label* update_done);
|
||||
|
||||
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
|
||||
|
||||
void casw(Register addr, Register newval, Register cmpval);
|
||||
void caswu(Register addr, Register newval, Register cmpval);
|
||||
void casl(Register addr, Register newval, Register cmpval);
|
||||
|
||||
void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL);
|
||||
|
||||
void deoptimize_trap(CodeEmitInfo *info);
|
||||
|
||||
enum {
|
||||
// See emit_static_call_stub for detail
|
||||
// CompiledStaticCall::to_interp_stub_size() (14) + CompiledStaticCall::to_trampoline_stub_size() (1 + 3 + address)
|
||||
_call_stub_size = 14 * NativeInstruction::instruction_size +
|
||||
(NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size),
|
||||
// See emit_exception_handler for detail
|
||||
// verify_not_null_oop + far_call + should_not_reach_here + invalidate_registers(DEBUG_ONLY)
|
||||
_exception_handler_size = DEBUG_ONLY(584) NOT_DEBUG(548), // or smaller
|
||||
// See emit_deopt_handler for detail
|
||||
// auipc (1) + far_jump (6 or 2)
|
||||
_deopt_handler_size = 1 * NativeInstruction::instruction_size +
|
||||
6 * NativeInstruction::instruction_size // or smaller
|
||||
};
|
||||
|
||||
void check_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp,
|
||||
Label &next, Label &none, Address mdo_addr);
|
||||
void check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp, Address mdo_addr, Label &next);
|
||||
|
||||
void check_exact_klass(Register tmp, ciKlass* exact_klass);
|
||||
|
||||
void check_null(Register tmp, Label &update, intptr_t current_klass, Address mdo_addr, bool do_update, Label &next);
|
||||
|
||||
void (MacroAssembler::*add)(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void (MacroAssembler::*xchg)(Register prev, Register newv, Register addr);
|
||||
|
||||
void get_op(BasicType type);
|
||||
|
||||
// emit_typecheck_helper sub functions
|
||||
void data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data);
|
||||
void typecheck_helper_slowcheck(ciKlass* k, Register obj, Register Rtmp1,
|
||||
Register k_RInfo, Register klass_RInfo,
|
||||
Label* failure_target, Label* success_target);
|
||||
void profile_object(ciMethodData* md, ciProfileData* data, Register obj,
|
||||
Register klass_RInfo, Label* obj_is_null);
|
||||
void typecheck_loaded(LIR_OpTypeCheck* op, ciKlass* k, Register k_RInfo);
|
||||
|
||||
// emit_opTypeCheck sub functions
|
||||
void typecheck_lir_store(LIR_OpTypeCheck* op, bool should_profile);
|
||||
|
||||
void type_profile(Register obj, ciMethodData* md, Register klass_RInfo, Register k_RInfo,
|
||||
ciProfileData* data, Label* success, Label* failure,
|
||||
Label& profile_cast_success, Label& profile_cast_failure);
|
||||
|
||||
void lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, Register Rtmp1,
|
||||
Label* success_target, Label* failure_target);
|
||||
|
||||
void const2reg_helper(LIR_Opr src);
|
||||
|
||||
void emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cmp2, Label& label, bool is_far, bool is_unordered);
|
||||
|
||||
void logic_op_reg32(Register dst, Register left, Register right, LIR_Code code);
|
||||
void logic_op_reg(Register dst, Register left, Register right, LIR_Code code);
|
||||
void logic_op_imm(Register dst, Register left, int right, LIR_Code code);
|
||||
|
||||
public:
|
||||
|
||||
void emit_cmove(LIR_Op4* op);
|
||||
|
||||
void store_parameter(Register r, int offset_from_rsp_in_words);
|
||||
void store_parameter(jint c, int offset_from_rsp_in_words);
|
||||
|
||||
#endif // CPU_RISCV_C1_LIRASSEMBLER_RISCV_HPP
|
||||
1078
src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
Normal file
1078
src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
Normal file
File diff suppressed because it is too large
Load Diff
55
src/hotspot/cpu/riscv/c1_LIR_riscv.cpp
Normal file
55
src/hotspot/cpu/riscv/c1_LIR_riscv.cpp
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
return as_FloatRegister(fpu_regnr());
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return as_FloatRegister(fpu_regnrLo());
|
||||
}
|
||||
|
||||
// Reg2 unused.
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg1 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify() const {
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG ||
|
||||
base()->type() == T_METADATA, "wrong type for addresses");
|
||||
}
|
||||
#endif // PRODUCT
|
||||
33
src/hotspot/cpu/riscv/c1_LinearScan_riscv.cpp
Normal file
33
src/hotspot/cpu/riscv/c1_LinearScan_riscv.cpp
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_Instruction.hpp"
|
||||
#include "c1/c1_LinearScan.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
void LinearScan::allocate_fpu_stack() {
|
||||
// No FPU stack on RISCV
|
||||
}
|
||||
83
src/hotspot/cpu/riscv/c1_LinearScan_riscv.hpp
Normal file
83
src/hotspot/cpu/riscv/c1_LinearScan_riscv.hpp
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_LINEARSCAN_RISCV_HPP
|
||||
#define CPU_RISCV_C1_LINEARSCAN_RISCV_HPP
|
||||
|
||||
inline bool LinearScan::is_processed_reg_num(int reg_num)
|
||||
{
|
||||
return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
|
||||
}
|
||||
|
||||
inline int LinearScan::num_physical_regs(BasicType type) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool LinearScan::is_caller_save(int assigned_reg) {
|
||||
assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
|
||||
if (assigned_reg < pd_first_callee_saved_reg) {
|
||||
return true;
|
||||
}
|
||||
if (assigned_reg > pd_last_callee_saved_reg && assigned_reg < pd_first_callee_saved_fpu_reg_1) {
|
||||
return true;
|
||||
}
|
||||
if (assigned_reg > pd_last_callee_saved_fpu_reg_1 && assigned_reg < pd_first_callee_saved_fpu_reg_2) {
|
||||
return true;
|
||||
}
|
||||
if (assigned_reg > pd_last_callee_saved_fpu_reg_2 && assigned_reg < pd_last_fpu_reg) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline void LinearScan::pd_add_temps(LIR_Op* op) {
|
||||
// No special case behaviours yet
|
||||
}
|
||||
|
||||
|
||||
// Implementation of LinearScanWalker
|
||||
|
||||
inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur)
|
||||
{
|
||||
if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::callee_saved)) {
|
||||
assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
|
||||
_first_reg = pd_first_callee_saved_reg;
|
||||
_last_reg = pd_last_callee_saved_reg;
|
||||
return true;
|
||||
} else if (cur->type() == T_INT || cur->type() == T_LONG || cur->type() == T_OBJECT ||
|
||||
cur->type() == T_ADDRESS || cur->type() == T_METADATA) {
|
||||
_first_reg = pd_first_cpu_reg;
|
||||
_last_reg = pd_last_allocatable_cpu_reg;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif // CPU_RISCV_C1_LINEARSCAN_RISCV_HPP
|
||||
450
src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
Normal file
450
src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
Normal file
@@ -0,0 +1,450 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
|
||||
FloatRegister freg0, FloatRegister freg1,
|
||||
Register result)
|
||||
{
|
||||
if (is_float) {
|
||||
float_compare(result, freg0, freg1, unordered_result);
|
||||
} else {
|
||||
double_compare(result, freg0, freg1, unordered_result);
|
||||
}
|
||||
}
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case) {
|
||||
const int aligned_mask = BytesPerWord - 1;
|
||||
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
||||
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
||||
Label done;
|
||||
int null_check_offset = -1;
|
||||
|
||||
verify_oop(obj);
|
||||
|
||||
// save object being locked into the BasicObjectLock
|
||||
sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
|
||||
null_check_offset = offset();
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(hdr, obj);
|
||||
lwu(hdr, Address(hdr, Klass::access_flags_offset()));
|
||||
andi(t0, hdr, JVM_ACC_IS_VALUE_BASED_CLASS);
|
||||
bnez(t0, slow_case, true /* is_far */);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
assert(tmp != noreg, "should have tmp register at this point");
|
||||
biased_locking_enter(disp_hdr, obj, hdr, tmp, false, done, &slow_case);
|
||||
}
|
||||
|
||||
// Load object header
|
||||
ld(hdr, Address(obj, hdr_offset));
|
||||
// and mark it as unlocked
|
||||
ori(hdr, hdr, markWord::unlocked_value);
|
||||
// save unlocked object header into the displaced header location on the stack
|
||||
sd(hdr, Address(disp_hdr, 0));
|
||||
// test if object header is still the same (i.e. unlocked), and if so, store the
|
||||
// displaced header address in the object header - if it is not the same, get the
|
||||
// object header instead
|
||||
la(t1, Address(obj, hdr_offset));
|
||||
cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/NULL);
|
||||
// if the object header was the same, we're done
|
||||
// if the object header was not the same, it is now in the hdr register
|
||||
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
||||
//
|
||||
// 1) (hdr & aligned_mask) == 0
|
||||
// 2) sp <= hdr
|
||||
// 3) hdr <= sp + page_size
|
||||
//
|
||||
// these 3 tests can be done by evaluating the following expression:
|
||||
//
|
||||
// (hdr -sp) & (aligned_mask - page_size)
|
||||
//
|
||||
// assuming both the stack pointer and page_size have their least
|
||||
// significant 2 bits cleared and page_size is a power of 2
|
||||
sub(hdr, hdr, sp);
|
||||
li(t0, aligned_mask - os::vm_page_size());
|
||||
andr(hdr, hdr, t0);
|
||||
// for recursive locking, the result is zero => save it in the displaced header
|
||||
// location (NULL in the displaced hdr location indicates recursive locking)
|
||||
sd(hdr, Address(disp_hdr, 0));
|
||||
// otherwise we don't care about the result and handle locking via runtime call
|
||||
bnez(hdr, slow_case, /* is_far */ true);
|
||||
bind(done);
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
||||
const int aligned_mask = BytesPerWord - 1;
|
||||
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
||||
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
||||
Label done;
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// load object
|
||||
ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
biased_locking_exit(obj, hdr, done);
|
||||
}
|
||||
|
||||
// load displaced header
|
||||
ld(hdr, Address(disp_hdr, 0));
|
||||
// if the loaded hdr is NULL we had recursive locking
|
||||
// if we had recursive locking, we are done
|
||||
beqz(hdr, done);
|
||||
if (!UseBiasedLocking) {
|
||||
// load object
|
||||
ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
}
|
||||
verify_oop(obj);
|
||||
// test if object header is pointing to the displaced header, and if so, restore
|
||||
// the displaced header in the object - if the object header is not pointing to
|
||||
// the displaced header, get the object header instead
|
||||
// if the object header was not pointing to the displaced header,
|
||||
// we do unlocking via runtime call
|
||||
if (hdr_offset) {
|
||||
la(t0, Address(obj, hdr_offset));
|
||||
cmpxchgptr(disp_hdr, hdr, t0, t1, done, &slow_case);
|
||||
} else {
|
||||
cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
|
||||
}
|
||||
bind(done);
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, Label& slow_case) {
|
||||
if (UseTLAB) {
|
||||
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, tmp1, tmp2, slow_case, /* is_far */ true);
|
||||
} else {
|
||||
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, tmp1, slow_case, /* is_far */ true);
|
||||
}
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) {
|
||||
assert_different_registers(obj, klass, len);
|
||||
if (UseBiasedLocking & !len->is_valid()) {
|
||||
assert_different_registers(obj, klass, len, tmp1, tmp2);
|
||||
ld(tmp1, Address(klass, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
// This assumes that all prototype bits fitr in an int32_t
|
||||
mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value());
|
||||
}
|
||||
sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
if (UseCompressedClassPointers) { // Take care not to kill klass
|
||||
encode_klass_not_null(tmp1, klass);
|
||||
sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
sd(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
|
||||
if (len->is_valid()) {
|
||||
sw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
|
||||
} else if (UseCompressedClassPointers) {
|
||||
store_klass_gap(obj, zr);
|
||||
}
|
||||
}
|
||||
|
||||
// preserves obj, destroys len_in_bytes
|
||||
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register tmp) {
|
||||
assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
|
||||
Label done;
|
||||
|
||||
// len_in_bytes is positive and ptr sized
|
||||
sub(len_in_bytes, len_in_bytes, hdr_size_in_bytes);
|
||||
beqz(len_in_bytes, done);
|
||||
|
||||
// Preserve obj
|
||||
if (hdr_size_in_bytes) {
|
||||
add(obj, obj, hdr_size_in_bytes);
|
||||
}
|
||||
zero_memory(obj, len_in_bytes, tmp);
|
||||
if (hdr_size_in_bytes) {
|
||||
sub(obj, obj, hdr_size_in_bytes);
|
||||
}
|
||||
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, int header_size, int object_size, Register klass, Label& slow_case) {
|
||||
assert_different_registers(obj, tmp1, tmp2);
|
||||
assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
|
||||
|
||||
try_allocate(obj, noreg, object_size * BytesPerWord, tmp1, tmp2, slow_case);
|
||||
|
||||
initialize_object(obj, klass, noreg, object_size * HeapWordSize, tmp1, tmp2, UseTLAB);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, bool is_tlab_allocated) {
|
||||
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
|
||||
"con_size_in_bytes is not multiple of alignment");
|
||||
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
|
||||
|
||||
initialize_header(obj, klass, noreg, tmp1, tmp2);
|
||||
|
||||
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
|
||||
// clear rest of allocated space
|
||||
const Register index = tmp2;
|
||||
// 16: multipler for threshold
|
||||
const int threshold = 16 * BytesPerWord; // approximate break even point for code size (see comments below)
|
||||
if (var_size_in_bytes != noreg) {
|
||||
mv(index, var_size_in_bytes);
|
||||
initialize_body(obj, index, hdr_size_in_bytes, tmp1);
|
||||
} else if (con_size_in_bytes <= threshold) {
|
||||
// use explicit null stores
|
||||
int i = hdr_size_in_bytes;
|
||||
if (i < con_size_in_bytes && (con_size_in_bytes % (2 * BytesPerWord))) { // 2: multipler for BytesPerWord
|
||||
sd(zr, Address(obj, i));
|
||||
i += BytesPerWord;
|
||||
}
|
||||
for (; i < con_size_in_bytes; i += BytesPerWord) {
|
||||
sd(zr, Address(obj, i));
|
||||
}
|
||||
} else if (con_size_in_bytes > hdr_size_in_bytes) {
|
||||
block_comment("zero memory");
|
||||
// use loop to null out the fields
|
||||
int words = (con_size_in_bytes - hdr_size_in_bytes) / BytesPerWord;
|
||||
mv(index, words / 8); // 8: byte size
|
||||
|
||||
const int unroll = 8; // Number of sd(zr) instructions we'll unroll
|
||||
int remainder = words % unroll;
|
||||
la(t0, Address(obj, hdr_size_in_bytes + remainder * BytesPerWord));
|
||||
|
||||
Label entry_point, loop;
|
||||
j(entry_point);
|
||||
|
||||
bind(loop);
|
||||
sub(index, index, 1);
|
||||
for (int i = -unroll; i < 0; i++) {
|
||||
if (-i == remainder) {
|
||||
bind(entry_point);
|
||||
}
|
||||
sd(zr, Address(t0, i * wordSize));
|
||||
}
|
||||
if (remainder == 0) {
|
||||
bind(entry_point);
|
||||
}
|
||||
add(t0, t0, unroll * wordSize);
|
||||
bnez(index, loop);
|
||||
}
|
||||
}
|
||||
|
||||
membar(MacroAssembler::StoreStore);
|
||||
|
||||
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
||||
assert(obj == x10, "must be");
|
||||
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
||||
}
|
||||
|
||||
verify_oop(obj);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case) {
|
||||
assert_different_registers(obj, len, tmp1, tmp2, klass);
|
||||
|
||||
// determine alignment mask
|
||||
assert(!(BytesPerWord & 1), "must be multiple of 2 for masking code to work");
|
||||
|
||||
// check for negative or excessive length
|
||||
mv(t0, (int32_t)max_array_allocation_length);
|
||||
bgeu(len, t0, slow_case, /* is_far */ true);
|
||||
|
||||
const Register arr_size = tmp2; // okay to be the same
|
||||
// align object end
|
||||
mv(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
|
||||
shadd(arr_size, len, arr_size, t0, f);
|
||||
andi(arr_size, arr_size, ~(uint)MinObjAlignmentInBytesMask);
|
||||
|
||||
try_allocate(obj, arr_size, 0, tmp1, tmp2, slow_case);
|
||||
|
||||
initialize_header(obj, klass, len, tmp1, tmp2);
|
||||
|
||||
// clear rest of allocated space
|
||||
const Register len_zero = len;
|
||||
initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
|
||||
|
||||
membar(MacroAssembler::StoreStore);
|
||||
|
||||
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
||||
assert(obj == x10, "must be");
|
||||
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
||||
}
|
||||
|
||||
verify_oop(obj);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
|
||||
verify_oop(receiver);
|
||||
// explicit NULL check not needed since load from [klass_offset] causes a trap
|
||||
// check against inline cache
|
||||
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
|
||||
cmp_klass(receiver, iCache, t0, L);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
|
||||
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
|
||||
// Make sure there is enough stack space for this method's activation.
|
||||
// Note that we do this before creating a frame.
|
||||
generate_stack_overflow_check(bang_size_in_bytes);
|
||||
MacroAssembler::build_frame(framesize);
|
||||
|
||||
// Insert nmethod entry barrier into frame.
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->nmethod_entry_barrier(this);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::remove_frame(int framesize) {
|
||||
MacroAssembler::remove_frame(framesize);
|
||||
}
|
||||
|
||||
|
||||
void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
|
||||
// If we have to make this method not-entrant we'll overwrite its
|
||||
// first instruction with a jump. For this action to be legal we
|
||||
// must ensure that this first instruction is a J, JAL or NOP.
|
||||
// Make it a NOP.
|
||||
|
||||
nop();
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
|
||||
// fp + -2: link
|
||||
// + -1: return address
|
||||
// + 0: argument with offset 0
|
||||
// + 1: argument with offset 1
|
||||
// + 2: ...
|
||||
ld(reg, Address(fp, offset_in_words * BytesPerWord));
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
|
||||
if (!VerifyOops) {
|
||||
return;
|
||||
}
|
||||
verify_oop_addr(Address(sp, stack_offset), "oop");
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::verify_not_null_oop(Register r) {
|
||||
if (!VerifyOops) return;
|
||||
Label not_null;
|
||||
bnez(r, not_null);
|
||||
stop("non-null oop required");
|
||||
bind(not_null);
|
||||
verify_oop(r);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::invalidate_registers(bool inv_x10, bool inv_x9, bool inv_x12, bool inv_x13, bool inv_x14, bool inv_x15) {
|
||||
#ifdef ASSERT
|
||||
static int nn;
|
||||
if (inv_x10) { mv(x10, 0xDEAD); }
|
||||
if (inv_x9) { mv(x9, 0xDEAD); }
|
||||
if (inv_x12) { mv(x12, nn++); }
|
||||
if (inv_x13) { mv(x13, 0xDEAD); }
|
||||
if (inv_x14) { mv(x14, 0xDEAD); }
|
||||
if (inv_x15) { mv(x15, 0xDEAD); }
|
||||
#endif // ASSERT
|
||||
}
|
||||
#endif // ifndef PRODUCT
|
||||
|
||||
typedef void (C1_MacroAssembler::*c1_cond_branch_insn)(Register op1, Register op2, Label& label, bool is_far);
|
||||
typedef void (C1_MacroAssembler::*c1_float_cond_branch_insn)(FloatRegister op1, FloatRegister op2,
|
||||
Label& label, bool is_far, bool is_unordered);
|
||||
|
||||
static c1_cond_branch_insn c1_cond_branch[] =
|
||||
{
|
||||
/* SHORT branches */
|
||||
(c1_cond_branch_insn)&Assembler::beq,
|
||||
(c1_cond_branch_insn)&Assembler::bne,
|
||||
(c1_cond_branch_insn)&Assembler::blt,
|
||||
(c1_cond_branch_insn)&Assembler::ble,
|
||||
(c1_cond_branch_insn)&Assembler::bge,
|
||||
(c1_cond_branch_insn)&Assembler::bgt,
|
||||
(c1_cond_branch_insn)&Assembler::bleu, // lir_cond_belowEqual
|
||||
(c1_cond_branch_insn)&Assembler::bgeu // lir_cond_aboveEqual
|
||||
};
|
||||
|
||||
static c1_float_cond_branch_insn c1_float_cond_branch[] =
|
||||
{
|
||||
/* FLOAT branches */
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_beq,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_bne,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_blt,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_ble,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_bge,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::float_bgt,
|
||||
NULL, // lir_cond_belowEqual
|
||||
NULL, // lir_cond_aboveEqual
|
||||
|
||||
/* DOUBLE branches */
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_beq,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_bne,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_blt,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_ble,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_bge,
|
||||
(c1_float_cond_branch_insn)&MacroAssembler::double_bgt,
|
||||
NULL, // lir_cond_belowEqual
|
||||
NULL // lir_cond_aboveEqual
|
||||
};
|
||||
|
||||
void C1_MacroAssembler::c1_cmp_branch(int cmpFlag, Register op1, Register op2, Label& label,
|
||||
BasicType type, bool is_far) {
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
assert(cmpFlag == lir_cond_equal || cmpFlag == lir_cond_notEqual, "Should be equal or notEqual");
|
||||
if (cmpFlag == lir_cond_equal) {
|
||||
beq(op1, op2, label, is_far);
|
||||
} else {
|
||||
bne(op1, op2, label, is_far);
|
||||
}
|
||||
} else {
|
||||
assert(cmpFlag >= 0 && cmpFlag < (int)(sizeof(c1_cond_branch) / sizeof(c1_cond_branch[0])),
|
||||
"invalid c1 conditional branch index");
|
||||
(this->*c1_cond_branch[cmpFlag])(op1, op2, label, is_far);
|
||||
}
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::c1_float_cmp_branch(int cmpFlag, FloatRegister op1, FloatRegister op2, Label& label,
|
||||
bool is_far, bool is_unordered) {
|
||||
assert(cmpFlag >= 0 &&
|
||||
cmpFlag < (int)(sizeof(c1_float_cond_branch) / sizeof(c1_float_cond_branch[0])),
|
||||
"invalid c1 float conditional branch index");
|
||||
(this->*c1_float_cond_branch[cmpFlag])(op1, op2, label, is_far, is_unordered);
|
||||
}
|
||||
121
src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
Normal file
121
src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
Normal file
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_MACROASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_C1_MACROASSEMBLER_RISCV_HPP
|
||||
|
||||
using MacroAssembler::build_frame;
|
||||
using MacroAssembler::null_check;
|
||||
|
||||
// C1_MacroAssembler contains high-level macros for C1
|
||||
|
||||
private:
|
||||
int _rsp_offset; // track rsp changes
|
||||
// initialization
|
||||
void pd_init() { _rsp_offset = 0; }
|
||||
|
||||
|
||||
public:
|
||||
void try_allocate(
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register tmp1, // temp register
|
||||
Register tmp2, // temp register
|
||||
Label& slow_case // continuation point if fast allocation fails
|
||||
);
|
||||
|
||||
void initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2);
|
||||
void initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register tmp);
|
||||
|
||||
void float_cmp(bool is_float, int unordered_result,
|
||||
FloatRegister f0, FloatRegister f1,
|
||||
Register result);
|
||||
|
||||
// locking
|
||||
// hdr : must be x10, contents destroyed
|
||||
// obj : must point to the object to lock, contents preserved
|
||||
// disp_hdr: must point to the displaced header location, contents preserved
|
||||
// tmp : temporary register, contents destroyed
|
||||
// returns code offset at which to add null check debug information
|
||||
int lock_object (Register swap, Register obj, Register disp_hdr, Register tmp, Label& slow_case);
|
||||
|
||||
// unlocking
|
||||
// hdr : contents destroyed
|
||||
// obj : must point to the object to lock, contents preserved
|
||||
// disp_hdr: must be x10 & must point to the displaced header location, contents destroyed
|
||||
void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
|
||||
|
||||
void initialize_object(
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register klass, // object klass
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register tmp1, // temp register
|
||||
Register tmp2, // temp register
|
||||
bool is_tlab_allocated // the object was allocated in a TLAB; relevant for the implementation of ZeroTLAB
|
||||
);
|
||||
|
||||
// allocation of fixed-size objects
|
||||
// (can also be used to allocate fixed-size arrays, by setting
|
||||
// hdr_size correctly and storing the array length afterwards)
|
||||
// obj : will contain pointer to allocated object
|
||||
// t1, t2 : temp registers - contents destroyed
|
||||
// header_size: size of object header in words
|
||||
// object_size: total size of object in words
|
||||
// slow_case : exit to slow case implementation if fast allocation fails
|
||||
void allocate_object(Register obj, Register tmp1, Register tmp2, int header_size, int object_size, Register klass, Label& slow_case);
|
||||
|
||||
enum {
|
||||
max_array_allocation_length = 0x00FFFFFF
|
||||
};
|
||||
|
||||
// allocation of arrays
|
||||
// obj : will contain pointer to allocated object
|
||||
// len : array length in number of elements
|
||||
// t : temp register - contents destroyed
|
||||
// header_size: size of object header in words
|
||||
// f : element scale factor
|
||||
// slow_case : exit to slow case implementation if fast allocation fails
|
||||
void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case);
|
||||
|
||||
int rsp_offset() const { return _rsp_offset; }
|
||||
|
||||
void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN;
|
||||
|
||||
// This platform only uses signal-based null checks. The Label is not needed.
|
||||
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
|
||||
|
||||
void load_parameter(int offset_in_words, Register reg);
|
||||
|
||||
void inline_cache_check(Register receiver, Register iCache, Label &L);
|
||||
|
||||
static const int c1_double_branch_mask = 1 << 3; // depend on c1_float_cond_branch
|
||||
void c1_cmp_branch(int cmpFlag, Register op1, Register op2, Label& label, BasicType type, bool is_far);
|
||||
void c1_float_cmp_branch(int cmpFlag, FloatRegister op1, FloatRegister op2, Label& label,
|
||||
bool is_far, bool is_unordered = false);
|
||||
|
||||
#endif // CPU_RISCV_C1_MACROASSEMBLER_RISCV_HPP
|
||||
1172
src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
Normal file
1172
src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
Normal file
File diff suppressed because it is too large
Load Diff
65
src/hotspot/cpu/riscv/c1_globals_riscv.hpp
Normal file
65
src/hotspot/cpu/riscv/c1_globals_riscv.hpp
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C1_GLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_C1_GLOBALS_RISCV_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Sets the default values for platform dependent flags used by the client compiler.
|
||||
// (see c1_globals.hpp)
|
||||
|
||||
#ifndef COMPILER2
|
||||
define_pd_global(bool, BackgroundCompilation, true );
|
||||
define_pd_global(bool, InlineIntrinsics, true );
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, false);
|
||||
define_pd_global(bool, UseOnStackReplacement, true );
|
||||
define_pd_global(bool, TieredCompilation, false);
|
||||
define_pd_global(intx, CompileThreshold, 1500 );
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
#endif // !COMPILER2
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
define_pd_global(bool, OptimizeSinglePrecision, true );
|
||||
define_pd_global(bool, CSEArrayLength, false);
|
||||
define_pd_global(bool, TwoOperandLIRForm, false);
|
||||
|
||||
#endif // CPU_RISCV_C1_GLOBALS_RISCV_HPP
|
||||
1640
src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
Normal file
1640
src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
Normal file
File diff suppressed because it is too large
Load Diff
193
src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp
Normal file
193
src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp
Normal file
@@ -0,0 +1,193 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP
|
||||
|
||||
// C2_MacroAssembler contains high-level macros for C2
|
||||
|
||||
private:
|
||||
void element_compare(Register r1, Register r2,
|
||||
Register result, Register cnt,
|
||||
Register tmp1, Register tmp2,
|
||||
VectorRegister vr1, VectorRegister vr2,
|
||||
VectorRegister vrs,
|
||||
bool is_latin, Label& DONE);
|
||||
public:
|
||||
|
||||
void string_compare(Register str1, Register str2,
|
||||
Register cnt1, Register cnt2, Register result,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
int ae);
|
||||
|
||||
void string_indexof_char_short(Register str1, Register cnt1,
|
||||
Register ch, Register result,
|
||||
bool isL);
|
||||
|
||||
void string_indexof_char(Register str1, Register cnt1,
|
||||
Register ch, Register result,
|
||||
Register tmp1, Register tmp2,
|
||||
Register tmp3, Register tmp4,
|
||||
bool isL);
|
||||
|
||||
void string_indexof(Register str1, Register str2,
|
||||
Register cnt1, Register cnt2,
|
||||
Register tmp1, Register tmp2,
|
||||
Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6,
|
||||
Register result, int ae);
|
||||
|
||||
void string_indexof_linearscan(Register haystack, Register needle,
|
||||
Register haystack_len, Register needle_len,
|
||||
Register tmp1, Register tmp2,
|
||||
Register tmp3, Register tmp4,
|
||||
int needle_con_cnt, Register result, int ae);
|
||||
|
||||
void arrays_equals(Register r1, Register r2,
|
||||
Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6,
|
||||
Register result, Register cnt1,
|
||||
int elem_size);
|
||||
|
||||
void string_equals(Register r1, Register r2,
|
||||
Register result, Register cnt1,
|
||||
int elem_size);
|
||||
|
||||
// refer to conditional_branches and float_conditional_branches
|
||||
static const int bool_test_bits = 3;
|
||||
static const int neg_cond_bits = 2;
|
||||
static const int unsigned_branch_mask = 1 << bool_test_bits;
|
||||
static const int double_branch_mask = 1 << bool_test_bits;
|
||||
|
||||
// cmp
|
||||
void cmp_branch(int cmpFlag,
|
||||
Register op1, Register op2,
|
||||
Label& label, bool is_far = false);
|
||||
|
||||
void float_cmp_branch(int cmpFlag,
|
||||
FloatRegister op1, FloatRegister op2,
|
||||
Label& label, bool is_far = false);
|
||||
|
||||
void enc_cmpUEqNeLeGt_imm0_branch(int cmpFlag, Register op,
|
||||
Label& L, bool is_far = false);
|
||||
|
||||
void enc_cmpEqNe_imm0_branch(int cmpFlag, Register op,
|
||||
Label& L, bool is_far = false);
|
||||
|
||||
void enc_cmove(int cmpFlag,
|
||||
Register op1, Register op2,
|
||||
Register dst, Register src);
|
||||
|
||||
void spill(Register r, bool is64, int offset) {
|
||||
is64 ? sd(r, Address(sp, offset))
|
||||
: sw(r, Address(sp, offset));
|
||||
}
|
||||
|
||||
void spill(FloatRegister f, bool is64, int offset) {
|
||||
is64 ? fsd(f, Address(sp, offset))
|
||||
: fsw(f, Address(sp, offset));
|
||||
}
|
||||
|
||||
void spill(VectorRegister v, int offset) {
|
||||
add(t0, sp, offset);
|
||||
vs1r_v(v, t0);
|
||||
}
|
||||
|
||||
void unspill(Register r, bool is64, int offset) {
|
||||
is64 ? ld(r, Address(sp, offset))
|
||||
: lw(r, Address(sp, offset));
|
||||
}
|
||||
|
||||
void unspillu(Register r, bool is64, int offset) {
|
||||
is64 ? ld(r, Address(sp, offset))
|
||||
: lwu(r, Address(sp, offset));
|
||||
}
|
||||
|
||||
void unspill(FloatRegister f, bool is64, int offset) {
|
||||
is64 ? fld(f, Address(sp, offset))
|
||||
: flw(f, Address(sp, offset));
|
||||
}
|
||||
|
||||
void unspill(VectorRegister v, int offset) {
|
||||
add(t0, sp, offset);
|
||||
vl1r_v(v, t0);
|
||||
}
|
||||
|
||||
void spill_copy_vector_stack_to_stack(int src_offset, int dst_offset, int vec_reg_size_in_bytes) {
|
||||
assert(vec_reg_size_in_bytes % 16 == 0, "unexpected vector reg size");
|
||||
unspill(v0, src_offset);
|
||||
spill(v0, dst_offset);
|
||||
}
|
||||
|
||||
void minmax_FD(FloatRegister dst,
|
||||
FloatRegister src1, FloatRegister src2,
|
||||
bool is_double, bool is_min);
|
||||
|
||||
// intrinsic methods implemented by rvv instructions
|
||||
void string_equals_v(Register r1, Register r2,
|
||||
Register result, Register cnt1,
|
||||
int elem_size);
|
||||
|
||||
void arrays_equals_v(Register r1, Register r2,
|
||||
Register result, Register cnt1,
|
||||
int elem_size);
|
||||
|
||||
void string_compare_v(Register str1, Register str2,
|
||||
Register cnt1, Register cnt2,
|
||||
Register result,
|
||||
Register tmp1, Register tmp2,
|
||||
int encForm);
|
||||
|
||||
void clear_array_v(Register base, Register cnt);
|
||||
|
||||
void byte_array_inflate_v(Register src, Register dst,
|
||||
Register len, Register tmp);
|
||||
|
||||
void char_array_compress_v(Register src, Register dst,
|
||||
Register len, Register result,
|
||||
Register tmp);
|
||||
|
||||
void encode_iso_array_v(Register src, Register dst,
|
||||
Register len, Register result,
|
||||
Register tmp);
|
||||
|
||||
void has_negatives_v(Register ary, Register len,
|
||||
Register result, Register tmp);
|
||||
|
||||
void string_indexof_char_v(Register str1, Register cnt1,
|
||||
Register ch, Register result,
|
||||
Register tmp1, Register tmp2,
|
||||
bool isL);
|
||||
|
||||
void minmax_FD_v(VectorRegister dst,
|
||||
VectorRegister src1, VectorRegister src2,
|
||||
bool is_double, bool is_min);
|
||||
|
||||
void reduce_minmax_FD_v(FloatRegister dst,
|
||||
FloatRegister src1, VectorRegister src2,
|
||||
VectorRegister tmp1, VectorRegister tmp2,
|
||||
bool is_double, bool is_min);
|
||||
|
||||
#endif // CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP
|
||||
85
src/hotspot/cpu/riscv/c2_globals_riscv.hpp
Normal file
85
src/hotspot/cpu/riscv/c2_globals_riscv.hpp
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_C2_GLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_C2_GLOBALS_RISCV_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Sets the default values for platform dependent flags used by the server compiler.
|
||||
// (see c2_globals.hpp). Alpha-sorted.
|
||||
|
||||
define_pd_global(bool, BackgroundCompilation, true);
|
||||
define_pd_global(bool, CICompileOSR, true);
|
||||
define_pd_global(bool, InlineIntrinsics, true);
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, true);
|
||||
define_pd_global(bool, UseOnStackReplacement, true);
|
||||
define_pd_global(bool, ProfileInterpreter, true);
|
||||
define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false));
|
||||
define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
define_pd_global(intx, ConditionalMoveLimit, 0);
|
||||
define_pd_global(intx, FLOATPRESSURE, 32);
|
||||
define_pd_global(intx, FreqInlineSize, 325);
|
||||
define_pd_global(intx, MinJumpTableSize, 10);
|
||||
define_pd_global(intx, INTPRESSURE, 24);
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16);
|
||||
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
define_pd_global(intx, RegisterCostAreaRatio, 16000);
|
||||
|
||||
// Peephole and CISC spilling both break the graph, and so makes the
|
||||
// scheduler sick.
|
||||
define_pd_global(bool, OptoPeephole, false);
|
||||
define_pd_global(bool, UseCISCSpill, false);
|
||||
define_pd_global(bool, OptoScheduling, true);
|
||||
define_pd_global(bool, OptoBundling, false);
|
||||
define_pd_global(bool, OptoRegScheduling, false);
|
||||
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, false);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed.
|
||||
|
||||
#endif // CPU_RISCV_C2_GLOBALS_RISCV_HPP
|
||||
38
src/hotspot/cpu/riscv/c2_init_riscv.cpp
Normal file
38
src/hotspot/cpu/riscv/c2_init_riscv.cpp
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/node.hpp"
|
||||
|
||||
// processor dependent initialization for riscv
|
||||
|
||||
extern void reg_mask_init();
|
||||
|
||||
void Compile::pd_compiler2_init() {
|
||||
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
|
||||
reg_mask_init();
|
||||
}
|
||||
47
src/hotspot/cpu/riscv/c2_safepointPollStubTable_riscv.cpp
Normal file
47
src/hotspot/cpu/riscv/c2_safepointPollStubTable_riscv.cpp
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/output.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
#define __ masm.
|
||||
void C2SafepointPollStubTable::emit_stub_impl(MacroAssembler& masm, C2SafepointPollStub* entry) const {
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||
"polling page return stub not created yet");
|
||||
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
|
||||
RuntimeAddress callback_addr(stub);
|
||||
|
||||
__ bind(entry->_stub_label);
|
||||
InternalAddress safepoint_pc(masm.pc() - masm.offset() + entry->_safepoint_offset);
|
||||
masm.code_section()->relocate(masm.pc(), safepoint_pc.rspec());
|
||||
__ la(t0, safepoint_pc.target());
|
||||
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
|
||||
__ far_jump(callback_addr);
|
||||
}
|
||||
#undef __
|
||||
36
src/hotspot/cpu/riscv/codeBuffer_riscv.hpp
Normal file
36
src/hotspot/cpu/riscv/codeBuffer_riscv.hpp
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_CODEBUFFER_RISCV_HPP
|
||||
#define CPU_RISCV_CODEBUFFER_RISCV_HPP
|
||||
|
||||
private:
|
||||
void pd_initialize() {}
|
||||
|
||||
public:
|
||||
void flush_bundle(bool start_new_bundle) {}
|
||||
|
||||
#endif // CPU_RISCV_CODEBUFFER_RISCV_HPP
|
||||
149
src/hotspot/cpu/riscv/compiledIC_riscv.cpp
Normal file
149
src/hotspot/cpu/riscv/compiledIC_riscv.cpp
Normal file
@@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
#define __ _masm.
|
||||
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
|
||||
precond(cbuf.stubs()->start() != badAddress);
|
||||
precond(cbuf.stubs()->end() != badAddress);
|
||||
// Stub is fixed up when the corresponding call is converted from
|
||||
// calling compiled code to calling interpreted code.
|
||||
// mv xmethod, 0
|
||||
// jalr -4 # to self
|
||||
|
||||
if (mark == NULL) {
|
||||
mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||
}
|
||||
|
||||
// Note that the code buffer's insts_mark is always relative to insts.
|
||||
// That's why we must use the macroassembler to generate a stub.
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
address base = __ start_a_stub(to_interp_stub_size());
|
||||
int offset = __ offset();
|
||||
if (base == NULL) {
|
||||
return NULL; // CodeBuffer::expand failed
|
||||
}
|
||||
// static stub relocation stores the instruction address of the call
|
||||
__ relocate(static_stub_Relocation::spec(mark));
|
||||
|
||||
__ emit_static_call_stub();
|
||||
|
||||
assert((__ offset() - offset) <= (int)to_interp_stub_size(), "stub too big");
|
||||
__ end_a_stub();
|
||||
return base;
|
||||
}
|
||||
#undef __
|
||||
|
||||
int CompiledStaticCall::to_interp_stub_size() {
|
||||
// fence_i + fence* + (lui, addi, slli, addi, slli, addi) + (lui, addi, slli, addi, slli) + jalr
|
||||
return NativeFenceI::instruction_size() + 12 * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int CompiledStaticCall::to_trampoline_stub_size() {
|
||||
// Somewhat pessimistically, we count 4 instructions here (although
|
||||
// there are only 3) because we sometimes emit an alignment nop.
|
||||
// Trampoline stubs are always word aligned.
|
||||
return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
|
||||
}
|
||||
|
||||
// Relocation entries for call stub, compiled java to interpreter.
|
||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
|
||||
}
|
||||
|
||||
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
|
||||
address stub = find_stub();
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||
p2i(instruction_address()),
|
||||
callee->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeFenceI::instruction_size());
|
||||
#ifdef ASSERT
|
||||
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
|
||||
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
#endif
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry);
|
||||
ICache::invalidate_range(stub, to_interp_stub_size());
|
||||
// Update jump to call.
|
||||
set_destination_mt_safe(stub);
|
||||
}
|
||||
|
||||
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
// Reset stub.
|
||||
address stub = static_stub->addr();
|
||||
assert(stub != NULL, "stub not found");
|
||||
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeFenceI::instruction_size());
|
||||
method_holder->set_data(0);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Non-product mode code
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
_call->verify_alignment();
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeFenceI::instruction_size());
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state.
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
||||
143
src/hotspot/cpu/riscv/copy_riscv.hpp
Normal file
143
src/hotspot/cpu/riscv/copy_riscv.hpp
Normal file
@@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_COPY_RISCV_HPP
|
||||
#define CPU_RISCV_COPY_RISCV_HPP
|
||||
|
||||
#include OS_CPU_HEADER(copy)
|
||||
|
||||
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
julong* to = (julong*) tohw;
|
||||
julong v = ((julong) value << 32) | value;
|
||||
while (count-- > 0) {
|
||||
*to++ = v;
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||
pd_fill_to_words(tohw, count, value);
|
||||
}
|
||||
|
||||
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
|
||||
(void)memset(to, value, count);
|
||||
}
|
||||
|
||||
static void pd_zero_to_words(HeapWord* tohw, size_t count) {
|
||||
pd_fill_to_words(tohw, count, 0);
|
||||
}
|
||||
|
||||
static void pd_zero_to_bytes(void* to, size_t count) {
|
||||
(void)memset(to, 0, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
(void)memmove(to, from, count * HeapWordSize);
|
||||
}
|
||||
|
||||
static inline void pd_disjoint_words_helper(const HeapWord* from, HeapWord* to, size_t count, bool is_atomic) {
|
||||
switch (count) {
|
||||
case 8: to[7] = from[7]; // fall through
|
||||
case 7: to[6] = from[6]; // fall through
|
||||
case 6: to[5] = from[5]; // fall through
|
||||
case 5: to[4] = from[4]; // fall through
|
||||
case 4: to[3] = from[3]; // fall through
|
||||
case 3: to[2] = from[2]; // fall through
|
||||
case 2: to[1] = from[1]; // fall through
|
||||
case 1: to[0] = from[0]; // fall through
|
||||
case 0: break;
|
||||
default:
|
||||
if (is_atomic) {
|
||||
while (count-- > 0) { *to++ = *from++; }
|
||||
} else {
|
||||
memcpy(to, from, count * HeapWordSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_disjoint_words_helper(from, to, count, false);
|
||||
}
|
||||
|
||||
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_disjoint_words_helper(from, to, count, true);
|
||||
}
|
||||
|
||||
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_words(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_disjoint_words(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
|
||||
(void)memmove(to, from, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
|
||||
pd_conjoint_bytes(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
|
||||
_Copy_conjoint_jshorts_atomic(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
|
||||
_Copy_conjoint_jints_atomic(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
|
||||
_Copy_conjoint_jlongs_atomic(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
|
||||
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size.");
|
||||
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
_Copy_arrayof_conjoint_bytes(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
_Copy_arrayof_conjoint_jshorts(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
_Copy_arrayof_conjoint_jints(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
_Copy_arrayof_conjoint_jlongs(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
assert(!UseCompressedOops, "foo!");
|
||||
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
|
||||
_Copy_arrayof_conjoint_jlongs(from, to, count);
|
||||
}
|
||||
|
||||
#endif // CPU_RISCV_COPY_RISCV_HPP
|
||||
50
src/hotspot/cpu/riscv/disassembler_riscv.hpp
Normal file
50
src/hotspot/cpu/riscv/disassembler_riscv.hpp
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_DISASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_DISASSEMBLER_RISCV_HPP
|
||||
|
||||
static int pd_instruction_alignment() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const char* pd_cpu_opts() {
|
||||
return "";
|
||||
}
|
||||
|
||||
// special-case instruction decoding.
|
||||
// There may be cases where the binutils disassembler doesn't do
|
||||
// the perfect job. In those cases, decode_instruction0 may kick in
|
||||
// and do it right.
|
||||
// If nothing had to be done, just return "here", otherwise return "here + instr_len(here)"
|
||||
static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) {
|
||||
return here;
|
||||
}
|
||||
|
||||
// platform-specific instruction annotations (like value of loaded constants)
|
||||
static void annotate(address pc, outputStream* st) {}
|
||||
|
||||
#endif // CPU_RISCV_DISASSEMBLER_RISCV_HPP
|
||||
44
src/hotspot/cpu/riscv/foreign_globals_riscv.cpp
Normal file
44
src/hotspot/cpu/riscv/foreign_globals_riscv.cpp
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "prims/foreign_globals.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
// Stubbed out, implement later
|
||||
const ABIDescriptor ForeignGlobals::parse_abi_descriptor_impl(jobject jabi) const {
|
||||
Unimplemented();
|
||||
return {};
|
||||
}
|
||||
|
||||
const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) const {
|
||||
Unimplemented();
|
||||
return {};
|
||||
}
|
||||
|
||||
const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const {
|
||||
ShouldNotCallThis();
|
||||
return {};
|
||||
}
|
||||
32
src/hotspot/cpu/riscv/foreign_globals_riscv.hpp
Normal file
32
src/hotspot/cpu/riscv/foreign_globals_riscv.hpp
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP
|
||||
|
||||
class ABIDescriptor {};
|
||||
class BufferLayout {};
|
||||
|
||||
#endif // CPU_RISCV_FOREIGN_GLOBALS_RISCV_HPP
|
||||
698
src/hotspot/cpu/riscv/frame_riscv.cpp
Normal file
698
src/hotspot/cpu/riscv/frame_riscv.cpp
Normal file
@@ -0,0 +1,698 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/oopMap.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/monitorChunk.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/stackWatermarkSet.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "vmreg_riscv.inline.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void RegisterMap::check_location_valid() {
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Profiling/safepoint support
|
||||
|
||||
bool frame::safe_for_sender(JavaThread *thread) {
|
||||
address addr_sp = (address)_sp;
|
||||
address addr_fp = (address)_fp;
|
||||
address unextended_sp = (address)_unextended_sp;
|
||||
|
||||
// consider stack guards when trying to determine "safe" stack pointers
|
||||
// sp must be within the usable part of the stack (not in guards)
|
||||
if (!thread->is_in_usable_stack(addr_sp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// When we are running interpreted code the machine stack pointer, SP, is
|
||||
// set low enough so that the Java expression stack can grow and shrink
|
||||
// without ever exceeding the machine stack bounds. So, ESP >= SP.
|
||||
|
||||
// When we call out of an interpreted method, SP is incremented so that
|
||||
// the space between SP and ESP is removed. The SP saved in the callee's
|
||||
// frame is the SP *before* this increment. So, when we walk a stack of
|
||||
// interpreter frames the sender's SP saved in a frame might be less than
|
||||
// the SP at the point of call.
|
||||
|
||||
// So unextended sp must be within the stack but we need not to check
|
||||
// that unextended sp >= sp
|
||||
|
||||
if (!thread->is_in_full_stack_checked(unextended_sp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// an fp must be within the stack and above (but not equal) sp
|
||||
// second evaluation on fp+ is added to handle situation where fp is -1
|
||||
bool fp_safe = thread->is_in_stack_range_excl(addr_fp, addr_sp) &&
|
||||
thread->is_in_full_stack_checked(addr_fp + (return_addr_offset * sizeof(void*)));
|
||||
|
||||
// We know sp/unextended_sp are safe only fp is questionable here
|
||||
|
||||
// If the current frame is known to the code cache then we can attempt to
|
||||
// to construct the sender and do some validation of it. This goes a long way
|
||||
// toward eliminating issues when we get in frame construction code
|
||||
|
||||
if (_cb != NULL) {
|
||||
|
||||
// First check if frame is complete and tester is reliable
|
||||
// Unfortunately we can only check frame complete for runtime stubs and nmethod
|
||||
// other generic buffer blobs are more problematic so we just assume they are
|
||||
// ok. adapter blobs never have a frame complete and are never ok.
|
||||
|
||||
if (!_cb->is_frame_complete_at(_pc)) {
|
||||
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!_cb->code_contains(_pc)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Entry frame checks
|
||||
if (is_entry_frame()) {
|
||||
// an entry frame must have a valid fp.
|
||||
return fp_safe && is_entry_frame_valid(thread);
|
||||
}
|
||||
|
||||
intptr_t* sender_sp = NULL;
|
||||
intptr_t* sender_unextended_sp = NULL;
|
||||
address sender_pc = NULL;
|
||||
intptr_t* saved_fp = NULL;
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
// fp must be safe
|
||||
if (!fp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
sender_pc = (address)this->fp()[return_addr_offset];
|
||||
// for interpreted frames, the value below is the sender "raw" sp,
|
||||
// which can be different from the sender unextended sp (the sp seen
|
||||
// by the sender) because of current frame local variables
|
||||
sender_sp = (intptr_t*) addr_at(sender_sp_offset);
|
||||
sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
|
||||
saved_fp = (intptr_t*) this->fp()[link_offset];
|
||||
} else {
|
||||
// must be some sort of compiled/runtime frame
|
||||
// fp does not have to be safe (although it could be check for c1?)
|
||||
|
||||
// check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
|
||||
if (_cb->frame_size() <= 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
sender_sp = _unextended_sp + _cb->frame_size();
|
||||
// Is sender_sp safe?
|
||||
if (!thread->is_in_full_stack_checked((address)sender_sp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
sender_unextended_sp = sender_sp;
|
||||
sender_pc = (address) *(sender_sp - 1);
|
||||
saved_fp = (intptr_t*) *(sender_sp - 2);
|
||||
}
|
||||
|
||||
|
||||
// If the potential sender is the interpreter then we can do some more checking
|
||||
if (Interpreter::contains(sender_pc)) {
|
||||
|
||||
// fp is always saved in a recognizable place in any code we generate. However
|
||||
// only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp
|
||||
// is really a frame pointer.
|
||||
if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// construct the potential sender
|
||||
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
|
||||
|
||||
return sender.is_interpreted_frame_valid(thread);
|
||||
}
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be a zombie method
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!sender_blob->code_contains(sender_pc)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We should never be able to see an adapter if the current frame is something from code cache
|
||||
if (sender_blob->is_adapter_blob()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be the call_stub
|
||||
if (StubRoutines::returns_to_call_stub(sender_pc)) {
|
||||
if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// construct the potential sender
|
||||
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
|
||||
|
||||
// Validate the JavaCallWrapper an entry frame must have
|
||||
address jcw = (address)sender.entry_frame_call_wrapper();
|
||||
|
||||
bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
|
||||
|
||||
return jcw_safe;
|
||||
}
|
||||
|
||||
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
|
||||
nm->method()->is_method_handle_intrinsic()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
|
||||
// because the return address counts against the callee's frame.
|
||||
if (sender_blob->frame_size() <= 0) {
|
||||
assert(!sender_blob->is_compiled(), "should count return address at least");
|
||||
return false;
|
||||
}
|
||||
|
||||
// We should never be able to see anything here except an nmethod. If something in the
|
||||
// code cache (current frame) is called by an entity within the code cache that entity
|
||||
// should not be anything but the call stub (already covered), the interpreter (already covered)
|
||||
// or an nmethod.
|
||||
if (!sender_blob->is_compiled()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could put some more validation for the potential non-interpreted sender
|
||||
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
|
||||
|
||||
// One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
|
||||
|
||||
// We've validated the potential sender that would be created
|
||||
return true;
|
||||
}
|
||||
|
||||
// Must be native-compiled frame. Since sender will try and use fp to find
|
||||
// linkages it must be safe
|
||||
if (!fp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
|
||||
if ((address)this->fp()[return_addr_offset] == NULL) { return false; }
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void frame::patch_pc(Thread* thread, address pc) {
|
||||
assert(_cb == CodeCache::find_blob(pc), "unexpected pc");
|
||||
address* pc_addr = &(((address*) sp())[-1]);
|
||||
if (TracePcPatching) {
|
||||
tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
|
||||
p2i(pc_addr), p2i(*pc_addr), p2i(pc));
|
||||
}
|
||||
// Either the return address is the original one or we are going to
|
||||
// patch in the same address that's already there.
|
||||
assert(_pc == *pc_addr || pc == *pc_addr, "must be");
|
||||
*pc_addr = pc;
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
assert(original_pc == _pc, "expected original PC to be stored before patching");
|
||||
_deopt_state = is_deoptimized;
|
||||
// leave _pc as is
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
_pc = pc;
|
||||
}
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame() const {
|
||||
return Interpreter::contains(pc());
|
||||
}
|
||||
|
||||
int frame::frame_size(RegisterMap* map) const {
|
||||
frame sender = this->sender(map);
|
||||
return sender.sp() - sp();
|
||||
}
|
||||
|
||||
intptr_t* frame::entry_frame_argument_at(int offset) const {
|
||||
// convert offset to index to deal with tsi
|
||||
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
|
||||
// Entry frame's arguments are always in relation to unextended_sp()
|
||||
return &unextended_sp()[index];
|
||||
}
|
||||
|
||||
// sender_sp
|
||||
intptr_t* frame::interpreter_frame_sender_sp() const {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
return (intptr_t*) at(interpreter_frame_sender_sp_offset);
|
||||
}
|
||||
|
||||
void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
|
||||
}
|
||||
|
||||
|
||||
// monitor elements
|
||||
|
||||
BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
|
||||
return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
|
||||
}
|
||||
|
||||
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
|
||||
BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
|
||||
// make sure the pointer points inside the frame
|
||||
assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
|
||||
assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
|
||||
return result;
|
||||
}
|
||||
|
||||
void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
|
||||
*((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
|
||||
}
|
||||
|
||||
// Used by template based interpreter deoptimization
|
||||
void frame::interpreter_frame_set_last_sp(intptr_t* last_sp) {
|
||||
*((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = last_sp;
|
||||
}
|
||||
|
||||
frame frame::sender_for_entry_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
|
||||
assert(!entry_frame_is_first(), "next Java fp must be non zero");
|
||||
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
|
||||
// Since we are walking the stack now this nested anchor is obviously walkable
|
||||
// even if it wasn't when it was stacked.
|
||||
if (!jfa->walkable()) {
|
||||
// Capture _last_Java_pc (if needed) and mark anchor walkable.
|
||||
jfa->capture_last_Java_pc();
|
||||
}
|
||||
map->clear();
|
||||
assert(map->include_argument_oops(), "should be set by clear");
|
||||
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
|
||||
return fr;
|
||||
}
|
||||
|
||||
OptimizedEntryBlob::FrameData* OptimizedEntryBlob::frame_data_for_frame(const frame& frame) const {
|
||||
ShouldNotCallThis();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool frame::optimized_entry_frame_is_first() const {
|
||||
ShouldNotCallThis();
|
||||
return false;
|
||||
}
|
||||
|
||||
frame frame::sender_for_optimized_entry_frame(RegisterMap* map) const {
|
||||
ShouldNotCallThis();
|
||||
return {};
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::verify_deopt_original_pc
|
||||
//
|
||||
// Verifies the calculated original PC of a deoptimization PC for the
|
||||
// given unextended SP.
|
||||
#ifdef ASSERT
|
||||
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
|
||||
frame fr;
|
||||
|
||||
// This is ugly but it's better than to change {get,set}_original_pc
|
||||
// to take an SP value as argument. And it's only a debugging
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
assert_cond(nm != NULL);
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->insts_contains_inclusive(original_pc),
|
||||
"original PC must be in the main code section of the the compiled method (or must be immediately following it)");
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
void frame::adjust_unextended_sp() {
|
||||
// On riscv, sites calling method handle intrinsics and lambda forms are treated
|
||||
// as any other call site. Therefore, no special action is needed when we are
|
||||
// returning to any of these call sites.
|
||||
|
||||
if (_cb != NULL) {
|
||||
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
|
||||
if (sender_cm != NULL) {
|
||||
// If the sender PC is a deoptimization point, get the original PC.
|
||||
if (sender_cm->is_deopt_entry(_pc) ||
|
||||
sender_cm->is_deopt_mh_entry(_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::update_map_with_saved_link
|
||||
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
|
||||
// The interpreter and compiler(s) always save fp in a known
|
||||
// location on entry. We must record where that location is
|
||||
// so that if fp was live on callout from c2 we can find
|
||||
// the saved copy no matter what it called.
|
||||
|
||||
// Since the interpreter always saves fp if we record where it is then
|
||||
// we don't have to always save fp on entry and exit to c2 compiled
|
||||
// code, on entry will be enough.
|
||||
assert(map != NULL, "map must be set");
|
||||
map->set_location(::fp->as_VMReg(), (address) link_addr);
|
||||
// this is weird "H" ought to be at a higher address however the
|
||||
// oopMaps seems to have the "H" regs at the same address and the
|
||||
// vanilla register.
|
||||
map->set_location(::fp->as_VMReg()->next(), (address) link_addr);
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_for_interpreter_frame
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
// SP is the raw SP from the sender after adapter or interpreter
|
||||
// extension.
|
||||
intptr_t* sender_sp = this->sender_sp();
|
||||
|
||||
// This is the sp before any possible extension (adapter/locals).
|
||||
intptr_t* unextended_sp = interpreter_frame_sender_sp();
|
||||
|
||||
#ifdef COMPILER2
|
||||
assert(map != NULL, "map must be set");
|
||||
if (map->update_map()) {
|
||||
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
return frame(sender_sp, unextended_sp, link(), sender_pc());
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_for_compiled_frame
|
||||
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
// we cannot rely upon the last fp having been saved to the thread
|
||||
// in C2 code but it will have been pushed onto the stack. so we
|
||||
// have to find it relative to the unextended sp
|
||||
|
||||
assert(_cb->frame_size() >= 0, "must have non-zero frame size");
|
||||
intptr_t* l_sender_sp = unextended_sp() + _cb->frame_size();
|
||||
intptr_t* unextended_sp = l_sender_sp;
|
||||
|
||||
// the return_address is always the word on the stack
|
||||
address sender_pc = (address) *(l_sender_sp + frame::return_addr_offset);
|
||||
|
||||
intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp + frame::link_offset);
|
||||
|
||||
assert(map != NULL, "map must be set");
|
||||
if (map->update_map()) {
|
||||
// Tell GC to use argument oopmaps for some runtime stubs that need it.
|
||||
// For C1, the runtime stub might not have oop maps, so set this flag
|
||||
// outside of update_register_map.
|
||||
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
|
||||
if (_cb->oop_maps() != NULL) {
|
||||
OopMapSet::update_register_map(this, map);
|
||||
}
|
||||
|
||||
// Since the prolog does the save and restore of FP there is no
|
||||
// oopmap for it so we must fill in its location as if there was
|
||||
// an oopmap entry since if our caller was compiled code there
|
||||
// could be live jvm state in it.
|
||||
update_map_with_saved_link(map, saved_fp_addr);
|
||||
}
|
||||
|
||||
return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::sender_raw
|
||||
frame frame::sender_raw(RegisterMap* map) const {
|
||||
// Default is we done have to follow them. The sender_for_xxx will
|
||||
// update it accordingly
|
||||
assert(map != NULL, "map must be set");
|
||||
map->set_include_argument_oops(false);
|
||||
|
||||
if (is_entry_frame()) {
|
||||
return sender_for_entry_frame(map);
|
||||
}
|
||||
if (is_interpreted_frame()) {
|
||||
return sender_for_interpreter_frame(map);
|
||||
}
|
||||
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
|
||||
|
||||
// This test looks odd: why is it not is_compiled_frame() ? That's
|
||||
// because stubs also have OOP maps.
|
||||
if (_cb != NULL) {
|
||||
return sender_for_compiled_frame(map);
|
||||
}
|
||||
|
||||
// Must be native-compiled frame, i.e. the marshaling code for native
|
||||
// methods that exists in the core system.
|
||||
return frame(sender_sp(), link(), sender_pc());
|
||||
}
|
||||
|
||||
frame frame::sender(RegisterMap* map) const {
|
||||
frame result = sender_raw(map);
|
||||
|
||||
if (map->process_frames()) {
|
||||
StackWatermarkSet::on_iteration(map->thread(), result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
// These are reasonable sanity checks
|
||||
if (fp() == NULL || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (sp() == NULL || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
|
||||
return false;
|
||||
}
|
||||
// These are hacks to keep us out of trouble.
|
||||
// The problem with these is that they mask other problems
|
||||
if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
|
||||
return false;
|
||||
}
|
||||
|
||||
// do some validation of frame elements
|
||||
|
||||
// first the method
|
||||
Method* m = *interpreter_frame_method_addr();
|
||||
// validate the method we'd find in this potential sender
|
||||
if (!Method::is_valid_method(m)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// stack frames shouldn't be much larger than max_stack elements
|
||||
// this test requires the use of unextended_sp which is the sp as seen by
|
||||
// the current frame, and not sp which is the "raw" pc which could point
|
||||
// further because of local variables of the callee method inserted after
|
||||
// method arguments
|
||||
if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// validate bci/bcx
|
||||
address bcp = interpreter_frame_bcp();
|
||||
if (m->validate_bci_from_bcp(bcp) < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// validate constantPoolCache*
|
||||
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
|
||||
if (MetaspaceObj::is_valid(cp) == false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// validate locals
|
||||
address locals = (address) *interpreter_frame_locals_addr();
|
||||
if (locals > thread->stack_base() || locals < (address) fp()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We'd have to be pretty unlucky to be mislead at this point
|
||||
return true;
|
||||
}
|
||||
|
||||
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
Method* method = interpreter_frame_method();
|
||||
BasicType type = method->result_type();
|
||||
|
||||
intptr_t* tos_addr = NULL;
|
||||
if (method->is_native()) {
|
||||
tos_addr = (intptr_t*)sp();
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
// This is because we do a push(ltos) after push(dtos) in generate_native_entry.
|
||||
tos_addr += 2 * Interpreter::stackElementWords;
|
||||
}
|
||||
} else {
|
||||
tos_addr = (intptr_t*)interpreter_frame_tos_address();
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case T_OBJECT :
|
||||
case T_ARRAY : {
|
||||
oop obj;
|
||||
if (method->is_native()) {
|
||||
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
|
||||
} else {
|
||||
oop* obj_p = (oop*)tos_addr;
|
||||
obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
|
||||
}
|
||||
assert(Universe::is_in_heap_or_null(obj), "sanity check");
|
||||
*oop_result = obj;
|
||||
break;
|
||||
}
|
||||
case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
|
||||
case T_BYTE : value_result->b = *(jbyte*)tos_addr; break;
|
||||
case T_CHAR : value_result->c = *(jchar*)tos_addr; break;
|
||||
case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
|
||||
case T_INT : value_result->i = *(jint*)tos_addr; break;
|
||||
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
|
||||
case T_FLOAT : {
|
||||
value_result->f = *(jfloat*)tos_addr;
|
||||
break;
|
||||
}
|
||||
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
|
||||
case T_VOID : /* Nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
|
||||
intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
|
||||
return &interpreter_frame_tos_address()[index];
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define DESCRIBE_FP_OFFSET(name) \
|
||||
values.describe(frame_no, fp() + frame::name##_offset, #name)
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
intptr_t *frame::initial_deoptimization_info() {
|
||||
// Not used on riscv, but we must return something.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
intptr_t* frame::real_fp() const {
|
||||
if (_cb != NULL) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
if (size > 0) {
|
||||
return unextended_sp() + size;
|
||||
}
|
||||
}
|
||||
// else rely on fp()
|
||||
assert(!is_compiled_frame(), "unknown compiled frame size");
|
||||
return fp();
|
||||
}
|
||||
|
||||
#undef DESCRIBE_FP_OFFSET
|
||||
|
||||
#ifndef PRODUCT
|
||||
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||
frame::frame(void* ptr_sp, void* ptr_fp, void* pc) {
|
||||
init((intptr_t*)ptr_sp, (intptr_t*)ptr_fp, (address)pc);
|
||||
}
|
||||
|
||||
void frame::pd_ps() {}
|
||||
#endif
|
||||
|
||||
void JavaFrameAnchor::make_walkable(JavaThread* thread) {
|
||||
// last frame set?
|
||||
if (last_Java_sp() == NULL) { return; }
|
||||
// already walkable?
|
||||
if (walkable()) { return; }
|
||||
vmassert(Thread::current() == (Thread*)thread, "not current thread");
|
||||
vmassert(last_Java_sp() != NULL, "not called from Java code?");
|
||||
vmassert(last_Java_pc() == NULL, "already walkable");
|
||||
capture_last_Java_pc();
|
||||
vmassert(walkable(), "something went wrong");
|
||||
}
|
||||
|
||||
void JavaFrameAnchor::capture_last_Java_pc() {
|
||||
vmassert(_last_Java_sp != NULL, "no last frame set");
|
||||
vmassert(_last_Java_pc == NULL, "already walkable");
|
||||
_last_Java_pc = (address)_last_Java_sp[-1];
|
||||
}
|
||||
202
src/hotspot/cpu/riscv/frame_riscv.hpp
Normal file
202
src/hotspot/cpu/riscv/frame_riscv.hpp
Normal file
@@ -0,0 +1,202 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_FRAME_RISCV_HPP
|
||||
#define CPU_RISCV_FRAME_RISCV_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
|
||||
// A frame represents a physical stack frame (an activation). Frames can be
|
||||
// C or Java frames, and the Java frames can be interpreted or compiled.
|
||||
// In contrast, vframes represent source-level activations, so that one physical frame
|
||||
// can correspond to multiple source level frames because of inlining.
|
||||
// A frame is comprised of {pc, fp, sp}
|
||||
// ------------------------------ Asm interpreter ----------------------------------------
|
||||
// Layout of asm interpreter frame:
|
||||
// [expression stack ] * <- sp
|
||||
|
||||
// [monitors[0] ] \
|
||||
// ... | monitor block size = k
|
||||
// [monitors[k-1] ] /
|
||||
// [frame initial esp ] ( == &monitors[0], initially here) initial_sp_offset
|
||||
// [byte code index/pointr] = bcx() bcx_offset
|
||||
|
||||
// [pointer to locals ] = locals() locals_offset
|
||||
// [constant pool cache ] = cache() cache_offset
|
||||
|
||||
// [klass of method ] = mirror() mirror_offset
|
||||
// [padding ]
|
||||
|
||||
// [methodData ] = mdp() mdx_offset
|
||||
// [Method ] = method() method_offset
|
||||
|
||||
// [last esp ] = last_sp() last_sp_offset
|
||||
// [old stack pointer ] (sender_sp) sender_sp_offset
|
||||
|
||||
// [old frame pointer ]
|
||||
// [return pc ]
|
||||
|
||||
// [last sp ] <- fp = link()
|
||||
// [oop temp ] (only for native calls)
|
||||
|
||||
// [padding ] (to preserve machine SP alignment)
|
||||
// [locals and parameters ]
|
||||
// <- sender sp
|
||||
// ------------------------------ Asm interpreter ----------------------------------------
|
||||
|
||||
// ------------------------------ C Frame ------------------------------------------------
|
||||
// Stack: gcc with -fno-omit-frame-pointer
|
||||
// .
|
||||
// .
|
||||
// +-> .
|
||||
// | +-----------------+ |
|
||||
// | | return address | |
|
||||
// | | previous fp ------+
|
||||
// | | saved registers |
|
||||
// | | local variables |
|
||||
// | | ... | <-+
|
||||
// | +-----------------+ |
|
||||
// | | return address | |
|
||||
// +------ previous fp | |
|
||||
// | saved registers | |
|
||||
// | local variables | |
|
||||
// +-> | ... | |
|
||||
// | +-----------------+ |
|
||||
// | | return address | |
|
||||
// | | previous fp ------+
|
||||
// | | saved registers |
|
||||
// | | local variables |
|
||||
// | | ... | <-+
|
||||
// | +-----------------+ |
|
||||
// | | return address | |
|
||||
// +------ previous fp | |
|
||||
// | saved registers | |
|
||||
// | local variables | |
|
||||
// $fp --> | ... | |
|
||||
// +-----------------+ |
|
||||
// | return address | |
|
||||
// | previous fp ------+
|
||||
// | saved registers |
|
||||
// $sp --> | local variables |
|
||||
// +-----------------+
|
||||
// ------------------------------ C Frame ------------------------------------------------
|
||||
|
||||
public:
|
||||
enum {
|
||||
pc_return_offset = 0,
|
||||
// All frames
|
||||
link_offset = -2,
|
||||
return_addr_offset = -1,
|
||||
sender_sp_offset = 0,
|
||||
// Interpreter frames
|
||||
interpreter_frame_oop_temp_offset = 1, // for native calls only
|
||||
|
||||
interpreter_frame_sender_sp_offset = -3,
|
||||
// outgoing sp before a call to an invoked method
|
||||
interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1,
|
||||
interpreter_frame_method_offset = interpreter_frame_last_sp_offset - 1,
|
||||
interpreter_frame_mdp_offset = interpreter_frame_method_offset - 1,
|
||||
interpreter_frame_padding_offset = interpreter_frame_mdp_offset - 1,
|
||||
interpreter_frame_mirror_offset = interpreter_frame_padding_offset - 1,
|
||||
interpreter_frame_cache_offset = interpreter_frame_mirror_offset - 1,
|
||||
interpreter_frame_locals_offset = interpreter_frame_cache_offset - 1,
|
||||
interpreter_frame_bcp_offset = interpreter_frame_locals_offset - 1,
|
||||
interpreter_frame_initial_sp_offset = interpreter_frame_bcp_offset - 1,
|
||||
|
||||
interpreter_frame_monitor_block_top_offset = interpreter_frame_initial_sp_offset,
|
||||
interpreter_frame_monitor_block_bottom_offset = interpreter_frame_initial_sp_offset,
|
||||
|
||||
// Entry frames
|
||||
// n.b. these values are determined by the layout defined in
|
||||
// stubGenerator for the Java call stub
|
||||
entry_frame_after_call_words = 22,
|
||||
entry_frame_call_wrapper_offset = -10,
|
||||
|
||||
// we don't need a save area
|
||||
arg_reg_save_area_bytes = 0
|
||||
};
|
||||
|
||||
intptr_t ptr_at(int offset) const {
|
||||
return *ptr_at_addr(offset);
|
||||
}
|
||||
|
||||
void ptr_at_put(int offset, intptr_t value) {
|
||||
*ptr_at_addr(offset) = value;
|
||||
}
|
||||
|
||||
private:
|
||||
// an additional field beyond _sp and _pc:
|
||||
intptr_t* _fp; // frame pointer
|
||||
// The interpreter and adapters will extend the frame of the caller.
|
||||
// Since oopMaps are based on the sp of the caller before extension
|
||||
// we need to know that value. However in order to compute the address
|
||||
// of the return address we need the real "raw" sp. Since sparc already
|
||||
// uses sp() to mean "raw" sp and unextended_sp() to mean the caller's
|
||||
// original sp we use that convention.
|
||||
|
||||
intptr_t* _unextended_sp;
|
||||
void adjust_unextended_sp();
|
||||
|
||||
intptr_t* ptr_at_addr(int offset) const {
|
||||
return (intptr_t*) addr_at(offset);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Used in frame::sender_for_{interpreter,compiled}_frame
|
||||
static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp);
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Constructors
|
||||
|
||||
frame(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc);
|
||||
|
||||
frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp, address pc);
|
||||
|
||||
frame(intptr_t* ptr_sp, intptr_t* ptr_fp);
|
||||
|
||||
void init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc);
|
||||
|
||||
// accessors for the instance variables
|
||||
// Note: not necessarily the real 'frame pointer' (see real_fp)
|
||||
intptr_t* fp() const { return _fp; }
|
||||
|
||||
inline address* sender_pc_addr() const;
|
||||
|
||||
// expression stack tos if we are nested in a java call
|
||||
intptr_t* interpreter_frame_last_sp() const;
|
||||
|
||||
// helper to update a map with callee-saved RBP
|
||||
static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
|
||||
|
||||
// deoptimization support
|
||||
void interpreter_frame_set_last_sp(intptr_t* last_sp);
|
||||
|
||||
static jint interpreter_frame_expression_stack_direction() { return -1; }
|
||||
|
||||
// returns the sending frame, without applying any barriers
|
||||
frame sender_raw(RegisterMap* map) const;
|
||||
|
||||
#endif // CPU_RISCV_FRAME_RISCV_HPP
|
||||
241
src/hotspot/cpu/riscv/frame_riscv.inline.hpp
Normal file
241
src/hotspot/cpu/riscv/frame_riscv.inline.hpp
Normal file
@@ -0,0 +1,241 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_FRAME_RISCV_INLINE_HPP
|
||||
#define CPU_RISCV_FRAME_RISCV_INLINE_HPP
|
||||
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
|
||||
// Inline functions for RISCV frames:
|
||||
|
||||
// Constructors:
|
||||
|
||||
inline frame::frame() {
|
||||
_pc = NULL;
|
||||
_sp = NULL;
|
||||
_unextended_sp = NULL;
|
||||
_fp = NULL;
|
||||
_cb = NULL;
|
||||
_deopt_state = unknown;
|
||||
}
|
||||
|
||||
static int spin;
|
||||
|
||||
inline void frame::init(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) {
|
||||
intptr_t a = intptr_t(ptr_sp);
|
||||
intptr_t b = intptr_t(ptr_fp);
|
||||
_sp = ptr_sp;
|
||||
_unextended_sp = ptr_sp;
|
||||
_fp = ptr_fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp, address pc) {
|
||||
init(ptr_sp, ptr_fp, pc);
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* ptr_sp, intptr_t* unextended_sp, intptr_t* ptr_fp, address pc) {
|
||||
intptr_t a = intptr_t(ptr_sp);
|
||||
intptr_t b = intptr_t(ptr_fp);
|
||||
_sp = ptr_sp;
|
||||
_unextended_sp = unextended_sp;
|
||||
_fp = ptr_fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
assert(_cb->as_compiled_method()->insts_contains_inclusive(_pc),
|
||||
"original PC must be in the main code section of the the compiled method (or must be immediately following it)");
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
|
||||
intptr_t a = intptr_t(ptr_sp);
|
||||
intptr_t b = intptr_t(ptr_fp);
|
||||
_sp = ptr_sp;
|
||||
_unextended_sp = ptr_sp;
|
||||
_fp = ptr_fp;
|
||||
_pc = (address)(ptr_sp[-1]);
|
||||
|
||||
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
|
||||
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
|
||||
// unlucky the junk value could be to a zombied method and we'll die on the
|
||||
// find_blob call. This is also why we can have no asserts on the validity
|
||||
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
|
||||
// -> pd_last_frame should use a specialized version of pd_last_frame which could
|
||||
// call a specilaized frame constructor instead of this one.
|
||||
// Then we could use the assert below. However this assert is of somewhat dubious
|
||||
// value.
|
||||
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
}
|
||||
|
||||
// Accessors
|
||||
|
||||
inline bool frame::equal(frame other) const {
|
||||
bool ret = sp() == other.sp() &&
|
||||
unextended_sp() == other.unextended_sp() &&
|
||||
fp() == other.fp() &&
|
||||
pc() == other.pc();
|
||||
assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Return unique id for this frame. The id must have a value where we can distinguish
|
||||
// identity and younger/older relationship. NULL represents an invalid (incomparable)
|
||||
// frame.
|
||||
inline intptr_t* frame::id(void) const { return unextended_sp(); }
|
||||
|
||||
// Return true if the frame is older (less recent activation) than the frame represented by id
|
||||
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
|
||||
return this->id() > id ; }
|
||||
|
||||
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
|
||||
|
||||
inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
|
||||
|
||||
// Return address
|
||||
inline address* frame::sender_pc_addr() const { return (address*) addr_at(return_addr_offset); }
|
||||
inline address frame::sender_pc() const { return *sender_pc_addr(); }
|
||||
inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); }
|
||||
|
||||
inline intptr_t** frame::interpreter_frame_locals_addr() const {
|
||||
return (intptr_t**)addr_at(interpreter_frame_locals_offset);
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_last_sp() const {
|
||||
return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_bcp_addr() const {
|
||||
return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_mdp_addr() const {
|
||||
return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
|
||||
}
|
||||
|
||||
|
||||
// Constant pool cache
|
||||
|
||||
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
|
||||
return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
|
||||
}
|
||||
|
||||
// Method
|
||||
|
||||
inline Method** frame::interpreter_frame_method_addr() const {
|
||||
return (Method**)addr_at(interpreter_frame_method_offset);
|
||||
}
|
||||
|
||||
// Mirror
|
||||
|
||||
inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
return (oop*)addr_at(interpreter_frame_mirror_offset);
|
||||
}
|
||||
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
intptr_t* last_sp = interpreter_frame_last_sp();
|
||||
if (last_sp == NULL) {
|
||||
return sp();
|
||||
} else {
|
||||
// sp() may have been extended or shrunk by an adapter. At least
|
||||
// check that we don't fall behind the legal region.
|
||||
// For top deoptimized frame last_sp == interpreter_frame_monitor_end.
|
||||
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
|
||||
return last_sp;
|
||||
}
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_temp_oop_addr() const {
|
||||
return (oop *)(fp() + interpreter_frame_oop_temp_offset);
|
||||
}
|
||||
|
||||
inline int frame::interpreter_frame_monitor_size() {
|
||||
return BasicObjectLock::size();
|
||||
}
|
||||
|
||||
|
||||
// expression stack
|
||||
// (the max_stack arguments are used by the GC; see class FrameClosure)
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_expression_stack() const {
|
||||
intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
|
||||
return monitor_end-1;
|
||||
}
|
||||
|
||||
|
||||
// Entry frames
|
||||
|
||||
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
|
||||
return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
|
||||
}
|
||||
|
||||
|
||||
// Compiled frames
|
||||
|
||||
inline oop frame::saved_oop_result(RegisterMap* map) const {
|
||||
oop* result_adr = (oop *)map->location(x10->as_VMReg());
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
return (*result_adr);
|
||||
}
|
||||
|
||||
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
|
||||
oop* result_adr = (oop *)map->location(x10->as_VMReg());
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
*result_adr = obj;
|
||||
}
|
||||
|
||||
#endif // CPU_RISCV_FRAME_RISCV_INLINE_HPP
|
||||
484
src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
Normal file
484
src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
Normal file
@@ -0,0 +1,484 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/g1BarrierSetAssembler.hpp"
|
||||
#include "gc/g1/g1BarrierSetRuntime.hpp"
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "gc/g1/g1ThreadLocalData.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/g1/c1/g1BarrierSetC1.hpp"
|
||||
#endif
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register addr, Register count, RegSet saved_regs) {
|
||||
assert_cond(masm != NULL);
|
||||
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
|
||||
if (!dest_uninitialized) {
|
||||
Label done;
|
||||
Address in_progress(xthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ lwu(t0, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbu(t0, in_progress);
|
||||
}
|
||||
__ beqz(t0, done);
|
||||
|
||||
__ push_reg(saved_regs, sp);
|
||||
if (count == c_rarg0) {
|
||||
if (addr == c_rarg1) {
|
||||
// exactly backwards!!
|
||||
__ mv(t0, c_rarg0);
|
||||
__ mv(c_rarg0, c_rarg1);
|
||||
__ mv(c_rarg1, t0);
|
||||
} else {
|
||||
__ mv(c_rarg1, count);
|
||||
__ mv(c_rarg0, addr);
|
||||
}
|
||||
} else {
|
||||
__ mv(c_rarg0, addr);
|
||||
__ mv(c_rarg1, count);
|
||||
}
|
||||
if (UseCompressedOops) {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
|
||||
}
|
||||
__ pop_reg(saved_regs, sp);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register start, Register count, Register tmp, RegSet saved_regs) {
|
||||
assert_cond(masm != NULL);
|
||||
__ push_reg(saved_regs, sp);
|
||||
assert_different_registers(start, count, tmp);
|
||||
assert_different_registers(c_rarg0, count);
|
||||
__ mv(c_rarg0, start);
|
||||
__ mv(c_rarg1, count);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2);
|
||||
__ pop_reg(saved_regs, sp);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call) {
|
||||
// If expand_call is true then we expand the call_VM_leaf macro
|
||||
// directly to skip generating the check by
|
||||
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
|
||||
|
||||
assert_cond(masm != NULL);
|
||||
assert(thread == xthread, "must be");
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
assert_different_registers(obj, pre_val, tmp, t0);
|
||||
assert(pre_val != noreg && tmp != noreg, "expecting a register");
|
||||
|
||||
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { // 4-byte width
|
||||
__ lwu(tmp, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbu(tmp, in_progress);
|
||||
}
|
||||
__ beqz(tmp, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
__ beqz(pre_val, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
|
||||
__ ld(tmp, index); // tmp := *index_adr
|
||||
__ beqz(tmp, runtime); // tmp == 0?
|
||||
// If yes, goto runtime
|
||||
|
||||
__ sub(tmp, tmp, wordSize); // tmp := tmp - wordSize
|
||||
__ sd(tmp, index); // *index_adr := tmp
|
||||
__ ld(t0, buffer);
|
||||
__ add(tmp, tmp, t0); // tmp := tmp + *buffer_adr
|
||||
|
||||
// Record the previous value
|
||||
__ sd(pre_val, Address(tmp, 0));
|
||||
__ j(done);
|
||||
|
||||
__ bind(runtime);
|
||||
// save the live input values
|
||||
RegSet saved = RegSet::of(pre_val);
|
||||
if (tosca_live) { saved += RegSet::of(x10); }
|
||||
if (obj != noreg) { saved += RegSet::of(obj); }
|
||||
|
||||
__ push_reg(saved, sp);
|
||||
|
||||
if (expand_call) {
|
||||
assert(pre_val != c_rarg1, "smashed arg");
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
}
|
||||
|
||||
__ pop_reg(saved, sp);
|
||||
|
||||
__ bind(done);
|
||||
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
Register tmp2) {
|
||||
assert_cond(masm != NULL);
|
||||
assert(thread == xthread, "must be");
|
||||
assert_different_registers(store_addr, new_val, thread, tmp, tmp2,
|
||||
t0);
|
||||
assert(store_addr != noreg && new_val != noreg && tmp != noreg &&
|
||||
tmp2 != noreg, "expecting a register");
|
||||
|
||||
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ xorr(tmp, store_addr, new_val);
|
||||
__ srli(tmp, tmp, HeapRegion::LogOfHRGrainBytes);
|
||||
__ beqz(tmp, done);
|
||||
|
||||
// crosses regions, storing NULL?
|
||||
|
||||
__ beqz(new_val, done);
|
||||
|
||||
// storing region crossing non-NULL, is card already dirty?
|
||||
|
||||
ExternalAddress cardtable((address) ct->byte_map_base());
|
||||
const Register card_addr = tmp;
|
||||
|
||||
__ srli(card_addr, store_addr, CardTable::card_shift);
|
||||
|
||||
// get the address of the card
|
||||
__ load_byte_map_base(tmp2);
|
||||
__ add(card_addr, card_addr, tmp2);
|
||||
__ lbu(tmp2, Address(card_addr));
|
||||
__ mv(t0, (int)G1CardTable::g1_young_card_val());
|
||||
__ beq(tmp2, t0, done);
|
||||
|
||||
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
|
||||
|
||||
__ membar(MacroAssembler::StoreLoad);
|
||||
|
||||
__ lbu(tmp2, Address(card_addr));
|
||||
__ beqz(tmp2, done);
|
||||
|
||||
// storing a region crossing, non-NULL oop, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
__ sb(zr, Address(card_addr));
|
||||
|
||||
__ ld(t0, queue_index);
|
||||
__ beqz(t0, runtime);
|
||||
__ sub(t0, t0, wordSize);
|
||||
__ sd(t0, queue_index);
|
||||
|
||||
__ ld(tmp2, buffer);
|
||||
__ add(t0, tmp2, t0);
|
||||
__ sd(card_addr, Address(t0, 0));
|
||||
__ j(done);
|
||||
|
||||
__ bind(runtime);
|
||||
// save the live input values
|
||||
RegSet saved = RegSet::of(store_addr);
|
||||
__ push_reg(saved, sp);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
|
||||
__ pop_reg(saved, sp);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread) {
|
||||
assert_cond(masm != NULL);
|
||||
bool on_oop = is_reference_type(type);
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (on_oop && on_reference) {
|
||||
// RA is live. It must be saved around calls.
|
||||
__ enter(); // barrier may call runtime
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
g1_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
xthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
__ leave();
|
||||
}
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
assert_cond(masm != NULL);
|
||||
// flatten object address if needed
|
||||
if (dst.offset() == 0) {
|
||||
if (dst.base() != x13) {
|
||||
__ mv(x13, dst.base());
|
||||
}
|
||||
} else {
|
||||
__ la(x13, dst);
|
||||
}
|
||||
|
||||
g1_write_barrier_pre(masm,
|
||||
x13 /* obj */,
|
||||
tmp2 /* pre_val */,
|
||||
xthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
val != noreg /* tosca_live */,
|
||||
false /* expand_call */);
|
||||
|
||||
if (val == noreg) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), noreg, noreg, noreg);
|
||||
} else {
|
||||
// G1 barrier needs uncompressed oop for region cross check.
|
||||
Register new_val = val;
|
||||
if (UseCompressedOops) {
|
||||
new_val = t1;
|
||||
__ mv(new_val, val);
|
||||
}
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), val, noreg, noreg);
|
||||
g1_write_barrier_post(masm,
|
||||
x13 /* store_adr */,
|
||||
new_val /* new_val */,
|
||||
xthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
tmp2 /* tmp2 */);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef COMPILER1
|
||||
|
||||
#undef __
|
||||
#define __ ce->masm()->
|
||||
|
||||
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
|
||||
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
|
||||
|
||||
// At this point we know that marking is in progress.
|
||||
// If do_load() is true then we have to emit the
|
||||
// load of the previous value; otherwise it has already
|
||||
// been loaded into _pre_val.
|
||||
__ bind(*stub->entry());
|
||||
|
||||
assert(stub->pre_val()->is_register(), "Precondition.");
|
||||
|
||||
Register pre_val_reg = stub->pre_val()->as_register();
|
||||
|
||||
if (stub->do_load()) {
|
||||
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */, false /* unaligned */);
|
||||
}
|
||||
__ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true);
|
||||
ce->store_parameter(stub->pre_val()->as_register(), 0);
|
||||
__ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
|
||||
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
|
||||
__ bind(*stub->entry());
|
||||
assert(stub->addr()->is_register(), "Precondition");
|
||||
assert(stub->new_val()->is_register(), "Precondition");
|
||||
Register new_val_reg = stub->new_val()->as_register();
|
||||
__ beqz(new_val_reg, *stub->continuation(), /* is_far */ true);
|
||||
ce->store_parameter(stub->addr()->as_pointer_register(), 0);
|
||||
__ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ sasm->
|
||||
|
||||
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
|
||||
__ prologue("g1_pre_barrier", false);
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
|
||||
// arg0 : previous value of memory
|
||||
const Register pre_val = x10;
|
||||
const Register thread = xthread;
|
||||
const Register tmp = t0;
|
||||
|
||||
Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// Is marking still active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { // 4-byte width
|
||||
__ lwu(tmp, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbu(tmp, in_progress);
|
||||
}
|
||||
__ beqz(tmp, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
__ ld(tmp, queue_index);
|
||||
__ beqz(tmp, runtime);
|
||||
|
||||
__ sub(tmp, tmp, wordSize);
|
||||
__ sd(tmp, queue_index);
|
||||
__ ld(t1, buffer);
|
||||
__ add(tmp, tmp, t1);
|
||||
__ load_parameter(0, t1);
|
||||
__ sd(t1, Address(tmp, 0));
|
||||
__ j(done);
|
||||
|
||||
__ bind(runtime);
|
||||
__ push_call_clobbered_registers();
|
||||
__ load_parameter(0, pre_val);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ bind(done);
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
|
||||
__ prologue("g1_post_barrier", false);
|
||||
|
||||
// arg0 : store_address
|
||||
Address store_addr(fp, 2 * BytesPerWord); // 2 BytesPerWord from fp
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// At this point we know new_value is non-NULL and the new_value crosses regions.
|
||||
// Must check to see if card is already dirty
|
||||
const Register thread = xthread;
|
||||
|
||||
Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
|
||||
|
||||
const Register card_offset = t1;
|
||||
// RA is free here, so we can use it to hold the byte_map_base.
|
||||
const Register byte_map_base = ra;
|
||||
|
||||
assert_different_registers(card_offset, byte_map_base, t0);
|
||||
|
||||
__ load_parameter(0, card_offset);
|
||||
__ srli(card_offset, card_offset, CardTable::card_shift);
|
||||
__ load_byte_map_base(byte_map_base);
|
||||
|
||||
// Convert card offset into an address in card_addr
|
||||
Register card_addr = card_offset;
|
||||
__ add(card_addr, byte_map_base, card_addr);
|
||||
|
||||
__ lbu(t0, Address(card_addr, 0));
|
||||
__ sub(t0, t0, (int)G1CardTable::g1_young_card_val());
|
||||
__ beqz(t0, done);
|
||||
|
||||
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
|
||||
|
||||
__ membar(MacroAssembler::StoreLoad);
|
||||
__ lbu(t0, Address(card_addr, 0));
|
||||
__ beqz(t0, done);
|
||||
|
||||
// storing region crossing non-NULL, card is clean.
|
||||
// dirty card and log.
|
||||
__ sb(zr, Address(card_addr, 0));
|
||||
|
||||
__ ld(t0, queue_index);
|
||||
__ beqz(t0, runtime);
|
||||
__ sub(t0, t0, wordSize);
|
||||
__ sd(t0, queue_index);
|
||||
|
||||
// Reuse RA to hold buffer_addr
|
||||
const Register buffer_addr = ra;
|
||||
|
||||
__ ld(buffer_addr, buffer);
|
||||
__ add(t0, buffer_addr, t0);
|
||||
__ sd(card_addr, Address(t0, 0));
|
||||
__ j(done);
|
||||
|
||||
__ bind(runtime);
|
||||
__ push_call_clobbered_registers();
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ bind(done);
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#endif // COMPILER1
|
||||
78
src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.hpp
Normal file
78
src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.hpp
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_G1_G1BARRIERSETASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_GC_G1_G1BARRIERSETASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/modRefBarrierSetAssembler.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
#endif
|
||||
class StubAssembler;
|
||||
class G1PreBarrierStub;
|
||||
class G1PostBarrierStub;
|
||||
|
||||
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
|
||||
protected:
|
||||
void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register addr, Register count, RegSet saved_regs);
|
||||
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register start, Register count, Register tmp, RegSet saved_regs);
|
||||
|
||||
void g1_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
Register tmp2);
|
||||
|
||||
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
|
||||
public:
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
|
||||
void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
|
||||
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread);
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_GC_G1_G1BARRIERSETASSEMBLER_RISCV_HPP
|
||||
31
src/hotspot/cpu/riscv/gc/g1/g1Globals_riscv.hpp
Normal file
31
src/hotspot/cpu/riscv/gc/g1/g1Globals_riscv.hpp
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_G1_G1GLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_GC_G1_G1GLOBALS_RISCV_HPP
|
||||
|
||||
const size_t G1MergeHeapRootsPrefetchCacheSize = 16;
|
||||
|
||||
#endif // CPU_RISCV_GC_G1_G1GLOBALS_RISCV_HPP
|
||||
302
src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
Normal file
302
src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
Normal file
@@ -0,0 +1,302 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "gc/shared/barrierSetNMethod.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread) {
|
||||
assert_cond(masm != NULL);
|
||||
|
||||
// RA is live. It must be saved around calls.
|
||||
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool in_native = (decorators & IN_NATIVE) != 0;
|
||||
bool is_not_null = (decorators & IS_NOT_NULL) != 0;
|
||||
switch (type) {
|
||||
case T_OBJECT: // fall through
|
||||
case T_ARRAY: {
|
||||
if (in_heap) {
|
||||
if (UseCompressedOops) {
|
||||
__ lwu(dst, src);
|
||||
if (is_not_null) {
|
||||
__ decode_heap_oop_not_null(dst);
|
||||
} else {
|
||||
__ decode_heap_oop(dst);
|
||||
}
|
||||
} else {
|
||||
__ ld(dst, src);
|
||||
}
|
||||
} else {
|
||||
assert(in_native, "why else?");
|
||||
__ ld(dst, src);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
|
||||
case T_BYTE: __ load_signed_byte (dst, src); break;
|
||||
case T_CHAR: __ load_unsigned_short(dst, src); break;
|
||||
case T_SHORT: __ load_signed_short (dst, src); break;
|
||||
case T_INT: __ lw (dst, src); break;
|
||||
case T_LONG: __ ld (dst, src); break;
|
||||
case T_ADDRESS: __ ld (dst, src); break;
|
||||
case T_FLOAT: __ flw (f10, src); break;
|
||||
case T_DOUBLE: __ fld (f10, src); break;
|
||||
default: Unimplemented();
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
assert_cond(masm != NULL);
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool in_native = (decorators & IN_NATIVE) != 0;
|
||||
switch (type) {
|
||||
case T_OBJECT: // fall through
|
||||
case T_ARRAY: {
|
||||
val = val == noreg ? zr : val;
|
||||
if (in_heap) {
|
||||
if (UseCompressedOops) {
|
||||
assert(!dst.uses(val), "not enough registers");
|
||||
if (val != zr) {
|
||||
__ encode_heap_oop(val);
|
||||
}
|
||||
__ sw(val, dst);
|
||||
} else {
|
||||
__ sd(val, dst);
|
||||
}
|
||||
} else {
|
||||
assert(in_native, "why else?");
|
||||
__ sd(val, dst);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_BOOLEAN:
|
||||
__ andi(val, val, 0x1); // boolean is true if LSB is 1
|
||||
__ sb(val, dst);
|
||||
break;
|
||||
case T_BYTE: __ sb(val, dst); break;
|
||||
case T_CHAR: __ sh(val, dst); break;
|
||||
case T_SHORT: __ sh(val, dst); break;
|
||||
case T_INT: __ sw(val, dst); break;
|
||||
case T_LONG: __ sd(val, dst); break;
|
||||
case T_ADDRESS: __ sd(val, dst); break;
|
||||
case T_FLOAT: __ fsw(f10, dst); break;
|
||||
case T_DOUBLE: __ fsd(f10, dst); break;
|
||||
default: Unimplemented();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath) {
|
||||
assert_cond(masm != NULL);
|
||||
// If mask changes we need to ensure that the inverse is still encodable as an immediate
|
||||
STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
|
||||
__ andi(obj, obj, ~JNIHandles::weak_tag_mask);
|
||||
__ ld(obj, Address(obj, 0)); // *obj
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes, okay for tmp2 == var_size_in_bytes.
|
||||
void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Label& slow_case,
|
||||
bool is_far) {
|
||||
assert_cond(masm != NULL);
|
||||
assert_different_registers(obj, tmp2);
|
||||
assert_different_registers(obj, var_size_in_bytes);
|
||||
Register end = tmp2;
|
||||
|
||||
__ ld(obj, Address(xthread, JavaThread::tlab_top_offset()));
|
||||
if (var_size_in_bytes == noreg) {
|
||||
__ la(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
__ add(end, obj, var_size_in_bytes);
|
||||
}
|
||||
__ ld(t0, Address(xthread, JavaThread::tlab_end_offset()));
|
||||
__ bgtu(end, t0, slow_case, is_far);
|
||||
|
||||
// update the tlab top pointer
|
||||
__ sd(end, Address(xthread, JavaThread::tlab_top_offset()));
|
||||
|
||||
// recover var_size_in_bytes if necessary
|
||||
if (var_size_in_bytes == end) {
|
||||
__ sub(var_size_in_bytes, var_size_in_bytes, obj);
|
||||
}
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register tmp1,
|
||||
Label& slow_case,
|
||||
bool is_far) {
|
||||
assert_cond(masm != NULL);
|
||||
assert_different_registers(obj, var_size_in_bytes, tmp1);
|
||||
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||
__ j(slow_case);
|
||||
} else {
|
||||
Register end = tmp1;
|
||||
Label retry;
|
||||
__ bind(retry);
|
||||
|
||||
// Get the current end of the heap
|
||||
ExternalAddress address_end((address) Universe::heap()->end_addr());
|
||||
{
|
||||
int32_t offset;
|
||||
__ la_patchable(t1, address_end, offset);
|
||||
__ ld(t1, Address(t1, offset));
|
||||
}
|
||||
|
||||
// Get the current top of the heap
|
||||
ExternalAddress address_top((address) Universe::heap()->top_addr());
|
||||
{
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, address_top, offset);
|
||||
__ addi(t0, t0, offset);
|
||||
__ lr_d(obj, t0, Assembler::aqrl);
|
||||
}
|
||||
|
||||
// Adjust it my the size of our new object
|
||||
if (var_size_in_bytes == noreg) {
|
||||
__ la(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
__ add(end, obj, var_size_in_bytes);
|
||||
}
|
||||
|
||||
// if end < obj then we wrapped around high memory
|
||||
__ bltu(end, obj, slow_case, is_far);
|
||||
|
||||
__ bgtu(end, t1, slow_case, is_far);
|
||||
|
||||
// If heap_top hasn't been changed by some other thread, update it.
|
||||
__ sc_d(t1, end, t0, Assembler::rl);
|
||||
__ bnez(t1, retry);
|
||||
|
||||
incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, tmp1);
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register tmp1) {
|
||||
assert_cond(masm != NULL);
|
||||
assert(tmp1->is_valid(), "need temp reg");
|
||||
|
||||
__ ld(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ add(tmp1, tmp1, var_size_in_bytes);
|
||||
} else {
|
||||
__ add(tmp1, tmp1, con_size_in_bytes);
|
||||
}
|
||||
__ sd(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
|
||||
if (bs_nm == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// RISCV atomic operations require that the memory address be naturally aligned.
|
||||
__ align(4);
|
||||
|
||||
Label skip, guard;
|
||||
Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
|
||||
|
||||
__ lwu(t0, guard);
|
||||
|
||||
// Subsequent loads of oops must occur after load of guard value.
|
||||
// BarrierSetNMethod::disarm sets guard with release semantics.
|
||||
__ membar(MacroAssembler::LoadLoad);
|
||||
__ lwu(t1, thread_disarmed_addr);
|
||||
__ beq(t0, t1, skip);
|
||||
|
||||
int32_t offset = 0;
|
||||
__ movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), offset);
|
||||
__ jalr(ra, t0, offset);
|
||||
__ j(skip);
|
||||
|
||||
__ bind(guard);
|
||||
|
||||
assert(__ offset() % 4 == 0, "bad alignment");
|
||||
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
|
||||
|
||||
__ bind(skip);
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
|
||||
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
Label bad_call;
|
||||
__ beqz(xmethod, bad_call);
|
||||
|
||||
// Pointer chase to the method holder to find out if the method is concurrently unloading.
|
||||
Label method_live;
|
||||
__ load_method_holder_cld(t0, xmethod);
|
||||
|
||||
// Is it a strong CLD?
|
||||
__ lwu(t1, Address(t0, ClassLoaderData::keep_alive_offset()));
|
||||
__ bnez(t1, method_live);
|
||||
|
||||
// Is it a weak but alive CLD?
|
||||
__ push_reg(RegSet::of(x28, x29), sp);
|
||||
|
||||
__ ld(x28, Address(t0, ClassLoaderData::holder_offset()));
|
||||
|
||||
// Uses x28 & x29, so we must pass new temporaries.
|
||||
__ resolve_weak_handle(x28, x29);
|
||||
__ mv(t0, x28);
|
||||
|
||||
__ pop_reg(RegSet::of(x28, x29), sp);
|
||||
|
||||
__ bnez(t0, method_live);
|
||||
|
||||
__ bind(bad_call);
|
||||
|
||||
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
__ bind(method_live);
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/barrierSetNMethod.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/access.hpp"
|
||||
|
||||
class BarrierSetAssembler: public CHeapObj<mtGC> {
|
||||
private:
|
||||
void incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes, int con_size_in_bytes,
|
||||
Register t1 = noreg);
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register src, Register dst, Register count, RegSet saved_regs) {}
|
||||
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register start, Register end, Register tmp, RegSet saved_regs) {}
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
|
||||
virtual void tlab_allocate(MacroAssembler* masm,
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register tmp1, // temp register
|
||||
Register tmp2, // temp register
|
||||
Label& slow_case, // continuation point if fast allocation fails
|
||||
bool is_far = false
|
||||
);
|
||||
|
||||
void eden_allocate(MacroAssembler* masm,
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register tmp1, // temp register
|
||||
Label& slow_case, // continuation point if fast allocation fails
|
||||
bool is_far = false
|
||||
);
|
||||
virtual void barrier_stubs_init() {}
|
||||
|
||||
virtual void nmethod_entry_barrier(MacroAssembler* masm);
|
||||
virtual void c2i_entry_barrier(MacroAssembler* masm);
|
||||
virtual ~BarrierSetAssembler() {}
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP
|
||||
171
src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp
Normal file
171
src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp
Normal file
@@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/nativeInst.hpp"
|
||||
#include "gc/shared/barrierSetNMethod.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/registerMap.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class NativeNMethodBarrier: public NativeInstruction {
|
||||
address instruction_address() const { return addr_at(0); }
|
||||
|
||||
int *guard_addr() {
|
||||
/* auipc + lwu + fence + lwu + beq + lui + addi + slli + addi + slli + jalr + j */
|
||||
return reinterpret_cast<int*>(instruction_address() + 12 * 4);
|
||||
}
|
||||
|
||||
public:
|
||||
int get_value() {
|
||||
return Atomic::load_acquire(guard_addr());
|
||||
}
|
||||
|
||||
void set_value(int value) {
|
||||
Atomic::release_store(guard_addr(), value);
|
||||
}
|
||||
|
||||
void verify() const;
|
||||
};
|
||||
|
||||
// Store the instruction bitmask, bits and name for checking the barrier.
|
||||
struct CheckInsn {
|
||||
uint32_t mask;
|
||||
uint32_t bits;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static const struct CheckInsn barrierInsn[] = {
|
||||
{ 0x00000fff, 0x00000297, "auipc t0, 0 "},
|
||||
{ 0x000fffff, 0x0002e283, "lwu t0, 48(t0) "},
|
||||
{ 0xffffffff, 0x0aa0000f, "fence ir, ir "},
|
||||
{ 0x000fffff, 0x000be303, "lwu t1, 112(xthread)"},
|
||||
{ 0x01fff07f, 0x00628063, "beq t0, t1, skip "},
|
||||
{ 0x00000fff, 0x000002b7, "lui t0, imm0 "},
|
||||
{ 0x000fffff, 0x00028293, "addi t0, t0, imm1 "},
|
||||
{ 0xffffffff, 0x00b29293, "slli t0, t0, 11 "},
|
||||
{ 0x000fffff, 0x00028293, "addi t0, t0, imm2 "},
|
||||
{ 0xffffffff, 0x00529293, "slli t0, t0, 5 "},
|
||||
{ 0x000fffff, 0x000280e7, "jalr ra, imm3(t0) "},
|
||||
{ 0x00000fff, 0x0000006f, "j skip "}
|
||||
/* guard: */
|
||||
/* 32bit nmethod guard value */
|
||||
/* skip: */
|
||||
};
|
||||
|
||||
// The encodings must match the instructions emitted by
|
||||
// BarrierSetAssembler::nmethod_entry_barrier. The matching ignores the specific
|
||||
// register numbers and immediate values in the encoding.
|
||||
void NativeNMethodBarrier::verify() const {
|
||||
intptr_t addr = (intptr_t) instruction_address();
|
||||
for(unsigned int i = 0; i < sizeof(barrierInsn)/sizeof(struct CheckInsn); i++ ) {
|
||||
uint32_t inst = *((uint32_t*) addr);
|
||||
if ((inst & barrierInsn[i].mask) != barrierInsn[i].bits) {
|
||||
tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", addr, inst);
|
||||
fatal("not an %s instruction.", barrierInsn[i].name);
|
||||
}
|
||||
addr += 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* We're called from an nmethod when we need to deoptimize it. We do
|
||||
this by throwing away the nmethod's frame and jumping to the
|
||||
ic_miss stub. This looks like there has been an IC miss at the
|
||||
entry of the nmethod, so we resolve the call, which will fall back
|
||||
to the interpreter if the nmethod has been unloaded. */
|
||||
void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
|
||||
|
||||
typedef struct {
|
||||
intptr_t *sp; intptr_t *fp; address ra; address pc;
|
||||
} frame_pointers_t;
|
||||
|
||||
frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
|
||||
|
||||
JavaThread *thread = JavaThread::current();
|
||||
RegisterMap reg_map(thread, false);
|
||||
frame frame = thread->last_frame();
|
||||
|
||||
assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
|
||||
assert(frame.cb() == nm, "must be");
|
||||
frame = frame.sender(®_map);
|
||||
|
||||
LogTarget(Trace, nmethod, barrier) out;
|
||||
if (out.is_enabled()) {
|
||||
ResourceMark mark;
|
||||
log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
|
||||
nm->method()->name_and_sig_as_C_string(),
|
||||
nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
|
||||
thread->name(), frame.sp(), nm->verified_entry_point());
|
||||
}
|
||||
|
||||
new_frame->sp = frame.sp();
|
||||
new_frame->fp = frame.fp();
|
||||
new_frame->ra = frame.pc();
|
||||
new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
|
||||
}
|
||||
|
||||
// This is the offset of the entry barrier from where the frame is completed.
|
||||
// If any code changes between the end of the verified entry where the entry
|
||||
// barrier resides, and the completion of the frame, then
|
||||
// NativeNMethodCmpBarrier::verify() will immediately complain when it does
|
||||
// not find the expected native instruction at this offset, which needs updating.
|
||||
// Note that this offset is invariant of PreserveFramePointer.
|
||||
|
||||
// see BarrierSetAssembler::nmethod_entry_barrier
|
||||
// auipc + lwu + fence + lwu + beq + movptr_with_offset(5 instructions) + jalr + j + int32
|
||||
static const int entry_barrier_offset = -4 * 13;
|
||||
|
||||
static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
|
||||
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
|
||||
NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
|
||||
debug_only(barrier->verify());
|
||||
return barrier;
|
||||
}
|
||||
|
||||
void BarrierSetNMethod::disarm(nmethod* nm) {
|
||||
if (!supports_entry_barrier(nm)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier.
|
||||
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
|
||||
|
||||
barrier->set_value(disarmed_value());
|
||||
}
|
||||
|
||||
bool BarrierSetNMethod::is_armed(nmethod* nm) {
|
||||
if (!supports_entry_barrier(nm)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
|
||||
return barrier->get_value() != disarmed_value();
|
||||
}
|
||||
@@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/cardTable.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
|
||||
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp) {
|
||||
assert_cond(masm != NULL);
|
||||
assert_different_registers(obj, tmp);
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind");
|
||||
|
||||
__ srli(obj, obj, CardTable::card_shift);
|
||||
|
||||
assert(CardTable::dirty_card_val() == 0, "must be");
|
||||
|
||||
__ load_byte_map_base(tmp);
|
||||
__ add(tmp, obj, tmp);
|
||||
|
||||
if (UseCondCardMark) {
|
||||
Label L_already_dirty;
|
||||
__ membar(MacroAssembler::StoreLoad);
|
||||
__ lbu(t1, Address(tmp));
|
||||
__ beqz(t1, L_already_dirty);
|
||||
__ sb(zr, Address(tmp));
|
||||
__ bind(L_already_dirty);
|
||||
} else {
|
||||
__ sb(zr, Address(tmp));
|
||||
}
|
||||
}
|
||||
|
||||
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register start, Register count, Register tmp, RegSet saved_regs) {
|
||||
assert_cond(masm != NULL);
|
||||
assert_different_registers(start, tmp);
|
||||
assert_different_registers(count, tmp);
|
||||
|
||||
Label L_loop, L_done;
|
||||
const Register end = count;
|
||||
|
||||
__ beqz(count, L_done); // zero count - nothing to do
|
||||
// end = start + count << LogBytesPerHeapOop
|
||||
__ shadd(end, count, start, count, LogBytesPerHeapOop);
|
||||
__ sub(end, end, BytesPerHeapOop); // last element address to make inclusive
|
||||
|
||||
__ srli(start, start, CardTable::card_shift);
|
||||
__ srli(end, end, CardTable::card_shift);
|
||||
__ sub(count, end, start); // number of bytes to copy
|
||||
|
||||
__ load_byte_map_base(tmp);
|
||||
__ add(start, start, tmp);
|
||||
|
||||
__ bind(L_loop);
|
||||
__ add(tmp, start, count);
|
||||
__ sb(zr, Address(tmp));
|
||||
__ sub(count, count, 1);
|
||||
__ bgez(count, L_loop);
|
||||
__ bind(L_done);
|
||||
}
|
||||
|
||||
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool is_array = (decorators & IS_ARRAY) != 0;
|
||||
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
|
||||
bool precise = is_array || on_anonymous;
|
||||
|
||||
bool needs_post_barrier = val != noreg && in_heap;
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg);
|
||||
if (needs_post_barrier) {
|
||||
// flatten object address if needed
|
||||
if (!precise || dst.offset() == 0) {
|
||||
store_check(masm, dst.base(), x13);
|
||||
} else {
|
||||
assert_cond(masm != NULL);
|
||||
__ la(x13, dst);
|
||||
store_check(masm, x13, t0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/modRefBarrierSetAssembler.hpp"
|
||||
|
||||
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
|
||||
protected:
|
||||
void store_check(MacroAssembler* masm, Register obj, Register tmp);
|
||||
|
||||
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register start, Register count, Register tmp, RegSet saved_regs);
|
||||
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
};
|
||||
|
||||
#endif // #ifndef CPU_RISCV_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_RISCV_HPP
|
||||
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "gc/shared/modRefBarrierSetAssembler.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register src, Register dst, Register count, RegSet saved_regs) {
|
||||
|
||||
if (is_oop) {
|
||||
gen_write_ref_array_pre_barrier(masm, decorators, dst, count, saved_regs);
|
||||
}
|
||||
}
|
||||
|
||||
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register start, Register count, Register tmp,
|
||||
RegSet saved_regs) {
|
||||
if (is_oop) {
|
||||
gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs);
|
||||
}
|
||||
}
|
||||
|
||||
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
if (is_reference_type(type)) {
|
||||
oop_store_at(masm, decorators, type, dst, val, tmp1, tmp2);
|
||||
} else {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_SHARED_MODREFBARRIERSETASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_GC_SHARED_MODREFBARRIERSETASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
|
||||
// The ModRefBarrierSetAssembler filters away accesses on BasicTypes other
|
||||
// than T_OBJECT/T_ARRAY (oops). The oop accesses call one of the protected
|
||||
// accesses, which are overridden in the concrete BarrierSetAssembler.
|
||||
|
||||
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
|
||||
protected:
|
||||
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register addr, Register count, RegSet saved_regs) {}
|
||||
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register start, Register count, Register tmp, RegSet saved_regs) {}
|
||||
|
||||
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) = 0;
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register src, Register dst, Register count, RegSet saved_regs);
|
||||
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register start, Register count, Register tmp, RegSet saved_regs);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_GC_SHARED_MODREFBARRIERSETASSEMBLER_RISCV_HPP
|
||||
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
|
||||
|
||||
#define __ masm->masm()->
|
||||
|
||||
void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
|
||||
Register addr = _addr->as_register_lo();
|
||||
Register newval = _new_value->as_register();
|
||||
Register cmpval = _cmp_value->as_register();
|
||||
Register tmp1 = _tmp1->as_register();
|
||||
Register tmp2 = _tmp2->as_register();
|
||||
Register result = result_opr()->as_register();
|
||||
|
||||
ShenandoahBarrierSet::assembler()->iu_barrier(masm->masm(), newval, t1);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
__ encode_heap_oop(tmp1, cmpval);
|
||||
cmpval = tmp1;
|
||||
__ encode_heap_oop(tmp2, newval);
|
||||
newval = tmp2;
|
||||
}
|
||||
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /* acquire */ Assembler::aq,
|
||||
/* release */ Assembler::rl, /* is_cae */ false, result);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#ifdef ASSERT
|
||||
#define __ gen->lir(__FILE__, __LINE__)->
|
||||
#else
|
||||
#define __ gen->lir()->
|
||||
#endif
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
|
||||
BasicType bt = access.type();
|
||||
if (access.is_oop()) {
|
||||
LIRGenerator *gen = access.gen();
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
|
||||
LIR_OprFact::illegalOpr /* pre_val */);
|
||||
}
|
||||
if (ShenandoahCASBarrier) {
|
||||
cmp_value.load_item();
|
||||
new_value.load_item();
|
||||
|
||||
LIR_Opr tmp1 = gen->new_register(T_OBJECT);
|
||||
LIR_Opr tmp2 = gen->new_register(T_OBJECT);
|
||||
LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
|
||||
LIR_Opr result = gen->new_register(T_INT);
|
||||
|
||||
__ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), tmp1, tmp2, result));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
|
||||
LIRGenerator* gen = access.gen();
|
||||
BasicType type = access.type();
|
||||
|
||||
LIR_Opr result = gen->new_register(type);
|
||||
value.load_item();
|
||||
LIR_Opr value_opr = value.result();
|
||||
|
||||
if (access.is_oop()) {
|
||||
value_opr = iu_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators());
|
||||
}
|
||||
|
||||
assert(type == T_INT || is_reference_type(type) LP64_ONLY( || type == T_LONG ), "unexpected type");
|
||||
LIR_Opr tmp = gen->new_register(T_INT);
|
||||
__ xchg(access.resolved_addr(), value_opr, result, tmp);
|
||||
|
||||
if (access.is_oop()) {
|
||||
result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), access.decorators());
|
||||
LIR_Opr tmp_opr = gen->new_register(type);
|
||||
__ move(result, tmp_opr);
|
||||
result = tmp_opr;
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
|
||||
result /* pre_val */);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -0,0 +1,712 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
#include "gc/shenandoah/shenandoahForwarding.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
|
||||
#include "gc/shenandoah/shenandoahRuntime.hpp"
|
||||
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
|
||||
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
|
||||
#endif
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register src, Register dst, Register count, RegSet saved_regs) {
|
||||
if (is_oop) {
|
||||
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
|
||||
if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) {
|
||||
|
||||
Label done;
|
||||
|
||||
// Avoid calling runtime if count == 0
|
||||
__ beqz(count, done);
|
||||
|
||||
// Is GC active?
|
||||
Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
assert_different_registers(src, dst, count, t0);
|
||||
|
||||
__ lbu(t0, gc_state);
|
||||
if (ShenandoahSATBBarrier && dest_uninitialized) {
|
||||
__ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED);
|
||||
__ beqz(t0, done);
|
||||
} else {
|
||||
__ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
|
||||
__ beqz(t0, done);
|
||||
}
|
||||
|
||||
__ push_reg(saved_regs, sp);
|
||||
if (UseCompressedOops) {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry),
|
||||
src, dst, count);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
|
||||
}
|
||||
__ pop_reg(saved_regs, sp);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call) {
|
||||
if (ShenandoahSATBBarrier) {
|
||||
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call) {
|
||||
// If expand_call is true then we expand the call_VM_leaf macro
|
||||
// directly to skip generating the check by
|
||||
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
|
||||
assert(thread == xthread, "must be");
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
assert_different_registers(obj, pre_val, tmp, t0);
|
||||
assert(pre_val != noreg && tmp != noreg, "expecting a register");
|
||||
|
||||
Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ lwu(tmp, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbu(tmp, in_progress);
|
||||
}
|
||||
__ beqz(tmp, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
__ beqz(pre_val, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
__ ld(tmp, index); // tmp := *index_adr
|
||||
__ beqz(tmp, runtime); // tmp == 0? If yes, goto runtime
|
||||
|
||||
__ sub(tmp, tmp, wordSize); // tmp := tmp - wordSize
|
||||
__ sd(tmp, index); // *index_adr := tmp
|
||||
__ ld(t0, buffer);
|
||||
__ add(tmp, tmp, t0); // tmp := tmp + *buffer_adr
|
||||
|
||||
// Record the previous value
|
||||
__ sd(pre_val, Address(tmp, 0));
|
||||
__ j(done);
|
||||
|
||||
__ bind(runtime);
|
||||
// save the live input values
|
||||
RegSet saved = RegSet::of(pre_val);
|
||||
if (tosca_live) saved += RegSet::of(x10);
|
||||
if (obj != noreg) saved += RegSet::of(obj);
|
||||
|
||||
__ push_reg(saved, sp);
|
||||
|
||||
// Calling the runtime using the regular call_VM_leaf mechanism generates
|
||||
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
|
||||
//
|
||||
// If we care generating the pre-barrier without a frame (e.g. in the
|
||||
// intrinsified Reference.get() routine) then ebp might be pointing to
|
||||
// the caller frame and so this check will most likely fail at runtime.
|
||||
//
|
||||
// Expanding the call directly bypasses the generation of the check.
|
||||
// So when we do not have have a full interpreter frame on the stack
|
||||
// expand_call should be passed true.
|
||||
if (expand_call) {
|
||||
assert(pre_val != c_rarg1, "smashed arg");
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
}
|
||||
|
||||
__ pop_reg(saved, sp);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
|
||||
|
||||
Label is_null;
|
||||
__ beqz(dst, is_null);
|
||||
resolve_forward_pointer_not_null(masm, dst, tmp);
|
||||
__ bind(is_null);
|
||||
}
|
||||
|
||||
// IMPORTANT: This must preserve all registers, even t0 and t1, except those explicitely
|
||||
// passed in.
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
|
||||
// The below loads the mark word, checks if the lowest two bits are
|
||||
// set, and if so, clear the lowest two bits and copy the result
|
||||
// to dst. Otherwise it leaves dst alone.
|
||||
// Implementing this is surprisingly awkward. I do it here by:
|
||||
// - Inverting the mark word
|
||||
// - Test lowest two bits == 0
|
||||
// - If so, set the lowest two bits
|
||||
// - Invert the result back, and copy to dst
|
||||
RegSet saved_regs = RegSet::of(t2);
|
||||
bool borrow_reg = (tmp == noreg);
|
||||
if (borrow_reg) {
|
||||
// No free registers available. Make one useful.
|
||||
tmp = t0;
|
||||
if (tmp == dst) {
|
||||
tmp = t1;
|
||||
}
|
||||
saved_regs += RegSet::of(tmp);
|
||||
}
|
||||
|
||||
assert_different_registers(tmp, dst, t2);
|
||||
__ push_reg(saved_regs, sp);
|
||||
|
||||
Label done;
|
||||
__ ld(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
|
||||
__ xori(tmp, tmp, -1); // eon with 0 is equivalent to XOR with -1
|
||||
__ andi(t2, tmp, markWord::lock_mask_in_place);
|
||||
__ bnez(t2, done);
|
||||
__ ori(tmp, tmp, markWord::marked_value);
|
||||
__ xori(dst, tmp, -1); // eon with 0 is equivalent to XOR with -1
|
||||
__ bind(done);
|
||||
|
||||
__ pop_reg(saved_regs, sp);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
Register dst,
|
||||
Address load_addr,
|
||||
DecoratorSet decorators) {
|
||||
assert(ShenandoahLoadRefBarrier, "Should be enabled");
|
||||
assert(dst != t1 && load_addr.base() != t1, "need t1");
|
||||
assert_different_registers(load_addr.base(), t0, t1);
|
||||
|
||||
bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
|
||||
bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
|
||||
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
|
||||
bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
|
||||
bool is_narrow = UseCompressedOops && !is_native;
|
||||
|
||||
Label heap_stable, not_cset;
|
||||
__ enter();
|
||||
Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ lbu(t1, gc_state);
|
||||
|
||||
// Check for heap stability
|
||||
if (is_strong) {
|
||||
__ andi(t1, t1, ShenandoahHeap::HAS_FORWARDED);
|
||||
__ beqz(t1, heap_stable);
|
||||
} else {
|
||||
Label lrb;
|
||||
__ andi(t0, t1, ShenandoahHeap::WEAK_ROOTS);
|
||||
__ bnez(t0, lrb);
|
||||
__ andi(t0, t1, ShenandoahHeap::HAS_FORWARDED);
|
||||
__ beqz(t0, heap_stable);
|
||||
__ bind(lrb);
|
||||
}
|
||||
|
||||
// use x11 for load address
|
||||
Register result_dst = dst;
|
||||
if (dst == x11) {
|
||||
__ mv(t1, dst);
|
||||
dst = t1;
|
||||
}
|
||||
|
||||
// Save x10 and x11, unless it is an output register
|
||||
RegSet saved_regs = RegSet::of(x10, x11) - result_dst;
|
||||
__ push_reg(saved_regs, sp);
|
||||
__ la(x11, load_addr);
|
||||
__ mv(x10, dst);
|
||||
|
||||
// Test for in-cset
|
||||
if (is_strong) {
|
||||
__ li(t1, (uint64_t)ShenandoahHeap::in_cset_fast_test_addr());
|
||||
__ srli(t0, x10, ShenandoahHeapRegion::region_size_bytes_shift_jint());
|
||||
__ add(t1, t1, t0);
|
||||
__ lbu(t1, Address(t1));
|
||||
__ andi(t0, t1, 1);
|
||||
__ beqz(t0, not_cset);
|
||||
}
|
||||
|
||||
__ push_call_clobbered_registers();
|
||||
if (is_strong) {
|
||||
if (is_narrow) {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong_narrow);
|
||||
} else {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong);
|
||||
}
|
||||
} else if (is_weak) {
|
||||
if (is_narrow) {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak_narrow);
|
||||
} else {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak);
|
||||
}
|
||||
} else {
|
||||
assert(is_phantom, "only remaining strength");
|
||||
assert(!is_narrow, "phantom access cannot be narrow");
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak);
|
||||
}
|
||||
__ jalr(ra);
|
||||
__ mv(t0, x10);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ mv(x10, t0);
|
||||
__ bind(not_cset);
|
||||
__ mv(result_dst, x10);
|
||||
__ pop_reg(saved_regs, sp);
|
||||
|
||||
__ bind(heap_stable);
|
||||
__ leave();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahIUBarrier) {
|
||||
__ push_call_clobbered_registers();
|
||||
|
||||
satb_write_barrier_pre(masm, noreg, dst, xthread, tmp, true, false);
|
||||
|
||||
__ pop_call_clobbered_registers();
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
// Inputs:
|
||||
// src: oop location to load from, might be clobbered
|
||||
//
|
||||
// Output:
|
||||
// dst: oop loaded from src location
|
||||
//
|
||||
// Kill:
|
||||
// x30 (tmp reg)
|
||||
//
|
||||
// Alias:
|
||||
// dst: x30 (might use x30 as temporary output register to avoid clobbering src)
|
||||
//
|
||||
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp_thread) {
|
||||
// 1: non-reference load, no additional barrier is needed
|
||||
if (!is_reference_type(type)) {
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
return;
|
||||
}
|
||||
|
||||
// 2: load a reference from src location and apply LRB if needed
|
||||
if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
|
||||
Register result_dst = dst;
|
||||
|
||||
// Preserve src location for LRB
|
||||
RegSet saved_regs;
|
||||
if (dst == src.base()) {
|
||||
dst = (src.base() == x28) ? x29 : x28;
|
||||
saved_regs = RegSet::of(dst);
|
||||
__ push_reg(saved_regs, sp);
|
||||
}
|
||||
assert_different_registers(dst, src.base());
|
||||
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
|
||||
load_reference_barrier(masm, dst, src, decorators);
|
||||
|
||||
if (dst != result_dst) {
|
||||
__ mv(result_dst, dst);
|
||||
dst = result_dst;
|
||||
}
|
||||
|
||||
if (saved_regs.bits() != 0) {
|
||||
__ pop_reg(saved_regs, sp);
|
||||
}
|
||||
} else {
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
}
|
||||
|
||||
// 3: apply keep-alive barrier if needed
|
||||
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
|
||||
__ enter();
|
||||
__ push_call_clobbered_registers();
|
||||
satb_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
xthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ leave();
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
bool on_oop = is_reference_type(type);
|
||||
if (!on_oop) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
|
||||
return;
|
||||
}
|
||||
|
||||
// flatten object address if needed
|
||||
if (dst.offset() == 0) {
|
||||
if (dst.base() != x13) {
|
||||
__ mv(x13, dst.base());
|
||||
}
|
||||
} else {
|
||||
__ la(x13, dst);
|
||||
}
|
||||
|
||||
shenandoah_write_barrier_pre(masm,
|
||||
x13 /* obj */,
|
||||
tmp2 /* pre_val */,
|
||||
xthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
val != noreg /* tosca_live */,
|
||||
false /* expand_call */);
|
||||
|
||||
if (val == noreg) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), noreg, noreg, noreg);
|
||||
} else {
|
||||
iu_barrier(masm, val, tmp1);
|
||||
// G1 barrier needs uncompressed oop for region cross check.
|
||||
Register new_val = val;
|
||||
if (UseCompressedOops) {
|
||||
new_val = t1;
|
||||
__ mv(new_val, val);
|
||||
}
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), val, noreg, noreg);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath) {
|
||||
Label done;
|
||||
// Resolve jobject
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
|
||||
|
||||
// Check for null.
|
||||
__ beqz(obj, done);
|
||||
|
||||
assert(obj != t1, "need t1");
|
||||
Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
|
||||
__ lbu(t1, gc_state);
|
||||
|
||||
// Check for heap in evacuation phase
|
||||
__ andi(t0, t1, ShenandoahHeap::EVACUATION);
|
||||
__ bnez(t0, slowpath);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
// Special Shenandoah CAS implementation that handles false negatives due
|
||||
// to concurrent evacuation. The service is more complex than a
|
||||
// traditional CAS operation because the CAS operation is intended to
|
||||
// succeed if the reference at addr exactly matches expected or if the
|
||||
// reference at addr holds a pointer to a from-space object that has
|
||||
// been relocated to the location named by expected. There are two
|
||||
// races that must be addressed:
|
||||
// a) A parallel thread may mutate the contents of addr so that it points
|
||||
// to a different object. In this case, the CAS operation should fail.
|
||||
// b) A parallel thread may heal the contents of addr, replacing a
|
||||
// from-space pointer held in addr with the to-space pointer
|
||||
// representing the new location of the object.
|
||||
// Upon entry to cmpxchg_oop, it is assured that new_val equals NULL
|
||||
// or it refers to an object that is not being evacuated out of
|
||||
// from-space, or it refers to the to-space version of an object that
|
||||
// is being evacuated out of from-space.
|
||||
//
|
||||
// By default the value held in the result register following execution
|
||||
// of the generated code sequence is 0 to indicate failure of CAS,
|
||||
// non-zero to indicate success. If is_cae, the result is the value most
|
||||
// recently fetched from addr rather than a boolean success indicator.
|
||||
//
|
||||
// Clobbers t0, t1
|
||||
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
Register addr,
|
||||
Register expected,
|
||||
Register new_val,
|
||||
Assembler::Aqrl acquire,
|
||||
Assembler::Aqrl release,
|
||||
bool is_cae,
|
||||
Register result) {
|
||||
bool is_narrow = UseCompressedOops;
|
||||
Assembler::operand_size size = is_narrow ? Assembler::uint32 : Assembler::int64;
|
||||
|
||||
assert_different_registers(addr, expected, t0, t1);
|
||||
assert_different_registers(addr, new_val, t0, t1);
|
||||
|
||||
Label retry, success, fail, done;
|
||||
|
||||
__ bind(retry);
|
||||
|
||||
// Step1: Try to CAS.
|
||||
__ cmpxchg(addr, expected, new_val, size, acquire, release, /* result */ t1);
|
||||
|
||||
// If success, then we are done.
|
||||
__ beq(expected, t1, success);
|
||||
|
||||
// Step2: CAS failed, check the forwared pointer.
|
||||
__ mv(t0, t1);
|
||||
|
||||
if (is_narrow) {
|
||||
__ decode_heap_oop(t0, t0);
|
||||
}
|
||||
resolve_forward_pointer(masm, t0);
|
||||
|
||||
__ encode_heap_oop(t0, t0);
|
||||
|
||||
// Report failure when the forwarded oop was not expected.
|
||||
__ bne(t0, expected, fail);
|
||||
|
||||
// Step 3: CAS again using the forwarded oop.
|
||||
__ cmpxchg(addr, t1, new_val, size, acquire, release, /* result */ t0);
|
||||
|
||||
// Retry when failed.
|
||||
__ bne(t0, t1, retry);
|
||||
|
||||
__ bind(success);
|
||||
if (is_cae) {
|
||||
__ mv(result, expected);
|
||||
} else {
|
||||
__ addi(result, zr, 1);
|
||||
}
|
||||
__ j(done);
|
||||
|
||||
__ bind(fail);
|
||||
if (is_cae) {
|
||||
__ mv(result, t0);
|
||||
} else {
|
||||
__ mv(result, zr);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#ifdef COMPILER1
|
||||
|
||||
#define __ ce->masm()->
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
|
||||
ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
|
||||
// At this point we know that marking is in progress.
|
||||
// If do_load() is true then we have to emit the
|
||||
// load of the previous value; otherwise it has already
|
||||
// been loaded into _pre_val.
|
||||
__ bind(*stub->entry());
|
||||
|
||||
assert(stub->pre_val()->is_register(), "Precondition.");
|
||||
|
||||
Register pre_val_reg = stub->pre_val()->as_register();
|
||||
|
||||
if (stub->do_load()) {
|
||||
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */, false /* unaligned */);
|
||||
}
|
||||
__ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true);
|
||||
ce->store_parameter(stub->pre_val()->as_register(), 0);
|
||||
__ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce,
|
||||
ShenandoahLoadReferenceBarrierStub* stub) {
|
||||
ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
|
||||
__ bind(*stub->entry());
|
||||
|
||||
DecoratorSet decorators = stub->decorators();
|
||||
bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
|
||||
bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
|
||||
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
|
||||
bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
|
||||
|
||||
Register obj = stub->obj()->as_register();
|
||||
Register res = stub->result()->as_register();
|
||||
Register addr = stub->addr()->as_pointer_register();
|
||||
Register tmp1 = stub->tmp1()->as_register();
|
||||
Register tmp2 = stub->tmp2()->as_register();
|
||||
|
||||
assert(res == x10, "result must arrive in x10");
|
||||
assert_different_registers(tmp1, tmp2, t0);
|
||||
|
||||
if (res != obj) {
|
||||
__ mv(res, obj);
|
||||
}
|
||||
|
||||
if (is_strong) {
|
||||
// Check for object in cset.
|
||||
__ mv(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
|
||||
__ srli(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
|
||||
__ add(tmp2, tmp2, tmp1);
|
||||
__ lbu(tmp2, Address(tmp2));
|
||||
__ beqz(tmp2, *stub->continuation(), true /* is_far */);
|
||||
}
|
||||
|
||||
ce->store_parameter(res, 0);
|
||||
ce->store_parameter(addr, 1);
|
||||
|
||||
if (is_strong) {
|
||||
if (is_native) {
|
||||
__ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
|
||||
} else {
|
||||
__ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
|
||||
}
|
||||
} else if (is_weak) {
|
||||
__ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
|
||||
} else {
|
||||
assert(is_phantom, "only remaining strength");
|
||||
__ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
|
||||
}
|
||||
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ sasm->
|
||||
|
||||
void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
|
||||
__ prologue("shenandoah_pre_barrier", false);
|
||||
|
||||
// arg0 : previous value of memory
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
|
||||
const Register pre_val = x10;
|
||||
const Register thread = xthread;
|
||||
const Register tmp = t0;
|
||||
|
||||
Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// Is marking still active?
|
||||
Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ lb(tmp, gc_state);
|
||||
__ andi(tmp, tmp, ShenandoahHeap::MARKING);
|
||||
__ beqz(tmp, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
__ ld(tmp, queue_index);
|
||||
__ beqz(tmp, runtime);
|
||||
|
||||
__ sub(tmp, tmp, wordSize);
|
||||
__ sd(tmp, queue_index);
|
||||
__ ld(t1, buffer);
|
||||
__ add(tmp, tmp, t1);
|
||||
__ load_parameter(0, t1);
|
||||
__ sd(t1, Address(tmp, 0));
|
||||
__ j(done);
|
||||
|
||||
__ bind(runtime);
|
||||
__ push_call_clobbered_registers();
|
||||
__ load_parameter(0, pre_val);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ bind(done);
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) {
|
||||
__ prologue("shenandoah_load_reference_barrier", false);
|
||||
// arg0 : object to be resolved
|
||||
|
||||
__ push_call_clobbered_registers();
|
||||
__ load_parameter(0, x10);
|
||||
__ load_parameter(1, x11);
|
||||
|
||||
bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
|
||||
bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
|
||||
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
|
||||
bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
|
||||
if (is_strong) {
|
||||
if (is_native) {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong);
|
||||
} else {
|
||||
if (UseCompressedOops) {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong_narrow);
|
||||
} else {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong);
|
||||
}
|
||||
}
|
||||
} else if (is_weak) {
|
||||
assert(!is_native, "weak must not be called off-heap");
|
||||
if (UseCompressedOops) {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak_narrow);
|
||||
} else {
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak);
|
||||
}
|
||||
} else {
|
||||
assert(is_phantom, "only remaining strength");
|
||||
assert(is_native, "phantom must only be called off-heap");
|
||||
__ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_phantom);
|
||||
}
|
||||
__ jalr(ra);
|
||||
__ mv(t0, x10);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ mv(x10, t0);
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#endif // COMPILER1
|
||||
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class ShenandoahPreBarrierStub;
|
||||
class ShenandoahLoadReferenceBarrierStub;
|
||||
class StubAssembler;
|
||||
#endif
|
||||
class StubCodeGenerator;
|
||||
|
||||
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
|
||||
private:
|
||||
|
||||
void satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
void shenandoah_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg);
|
||||
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
|
||||
void load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators);
|
||||
|
||||
public:
|
||||
|
||||
void iu_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
|
||||
void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators);
|
||||
#endif
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register src, Register dst, Register count, RegSet saved_regs);
|
||||
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
|
||||
void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
|
||||
Assembler::Aqrl acquire, Assembler::Aqrl release, bool is_cae, Register result);
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_RISCV_HPP
|
||||
285
src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv64.ad
Normal file
285
src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv64.ad
Normal file
@@ -0,0 +1,285 @@
|
||||
//
|
||||
// Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
%}
|
||||
|
||||
instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapP_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchgw_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapN_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_reserved(n));
|
||||
match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapPAcq_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_reserved(n));
|
||||
match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndSwapNAcq_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
|
||||
true /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp, #@compareAndExchangeP_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
|
||||
true /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapN_shenandoah"
|
||||
"mv $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_reserved(n));
|
||||
match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register);
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */,
|
||||
true /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_reserved(n));
|
||||
match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register);
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */,
|
||||
true /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapP_shenandoah"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_reserved(n));
|
||||
match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapNAcq_shenandoah"
|
||||
"mv $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_reserved(n));
|
||||
match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_acq_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@weakCompareAndSwapPAcq_shenandoah"
|
||||
"mv $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */,
|
||||
false /* is_cae */, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
441
src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp
Normal file
441
src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp
Normal file
@@ -0,0 +1,441 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/z/zBarrier.inline.hpp"
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
#include "gc/z/zBarrierSetRuntime.hpp"
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/z/c1/zBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp_thread) {
|
||||
if (!ZBarrierSet::barrier_needed(decorators, type)) {
|
||||
// Barrier not needed
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
return;
|
||||
}
|
||||
|
||||
assert_different_registers(t1, src.base());
|
||||
assert_different_registers(t0, t1, dst);
|
||||
|
||||
Label done;
|
||||
|
||||
// Load bad mask into temp register.
|
||||
__ la(t0, src);
|
||||
__ ld(t1, address_bad_mask_from_thread(xthread));
|
||||
__ ld(dst, Address(t0));
|
||||
|
||||
// Test reference against bad mask. If mask bad, then we need to fix it up.
|
||||
__ andr(t1, dst, t1);
|
||||
__ beqz(t1, done);
|
||||
|
||||
__ enter();
|
||||
|
||||
__ push_call_clobbered_registers_except(RegSet::of(dst));
|
||||
|
||||
if (c_rarg0 != dst) {
|
||||
__ mv(c_rarg0, dst);
|
||||
}
|
||||
|
||||
__ mv(c_rarg1, t0);
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
|
||||
|
||||
// Make sure dst has the return value.
|
||||
if (dst != x10) {
|
||||
__ mv(dst, x10);
|
||||
}
|
||||
|
||||
__ pop_call_clobbered_registers_except(RegSet::of(dst));
|
||||
__ leave();
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register val,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
// Verify value
|
||||
if (is_reference_type(type)) {
|
||||
// Note that src could be noreg, which means we
|
||||
// are storing null and can skip verification.
|
||||
if (val != noreg) {
|
||||
Label done;
|
||||
|
||||
// tmp1 and tmp2 are often set to noreg.
|
||||
RegSet savedRegs = RegSet::of(t0);
|
||||
__ push_reg(savedRegs, sp);
|
||||
|
||||
__ ld(t0, address_bad_mask_from_thread(xthread));
|
||||
__ andr(t0, val, t0);
|
||||
__ beqz(t0, done);
|
||||
__ stop("Verify oop store failed");
|
||||
__ should_not_reach_here();
|
||||
__ bind(done);
|
||||
__ pop_reg(savedRegs, sp);
|
||||
}
|
||||
}
|
||||
|
||||
// Store value
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
bool is_oop,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count,
|
||||
RegSet saved_regs) {
|
||||
if (!is_oop) {
|
||||
// Barrier not needed
|
||||
return;
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
|
||||
|
||||
assert_different_registers(src, count, t0);
|
||||
|
||||
__ push_reg(saved_regs, sp);
|
||||
|
||||
if (count == c_rarg0 && src == c_rarg1) {
|
||||
// exactly backwards!!
|
||||
__ xorr(c_rarg0, c_rarg0, c_rarg1);
|
||||
__ xorr(c_rarg1, c_rarg0, c_rarg1);
|
||||
__ xorr(c_rarg0, c_rarg0, c_rarg1);
|
||||
} else {
|
||||
__ mv(c_rarg0, src);
|
||||
__ mv(c_rarg1, count);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
|
||||
|
||||
__ pop_reg(saved_regs, sp);
|
||||
|
||||
BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath) {
|
||||
BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
|
||||
|
||||
assert_different_registers(jni_env, robj, tmp);
|
||||
|
||||
// Resolve jobject
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
|
||||
|
||||
// Compute the offset of address bad mask from the field of jni_environment
|
||||
long int bad_mask_relative_offset = (long int) (in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
|
||||
in_bytes(JavaThread::jni_environment_offset()));
|
||||
|
||||
// Load the address bad mask
|
||||
__ ld(tmp, Address(jni_env, bad_mask_relative_offset));
|
||||
|
||||
// Check address bad mask
|
||||
__ andr(tmp, robj, tmp);
|
||||
__ bnez(tmp, slowpath);
|
||||
|
||||
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_FloatRegister()) {
|
||||
return opto_reg & ~1;
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
class ZSaveLiveRegisters {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
RegSet _gp_regs;
|
||||
FloatRegSet _fp_regs;
|
||||
VectorRegSet _vp_regs;
|
||||
|
||||
public:
|
||||
void initialize(ZLoadBarrierStubC2* stub) {
|
||||
// Record registers that needs to be saved/restored
|
||||
RegMaskIterator rmi(stub->live());
|
||||
while (rmi.has_next()) {
|
||||
const OptoReg::Name opto_reg = rmi.next();
|
||||
if (OptoReg::is_reg(opto_reg)) {
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_Register()) {
|
||||
_gp_regs += RegSet::of(vm_reg->as_Register());
|
||||
} else if (vm_reg->is_FloatRegister()) {
|
||||
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
|
||||
} else if (vm_reg->is_VectorRegister()) {
|
||||
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegisterImpl::max_slots_per_register - 1));
|
||||
_vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister());
|
||||
} else {
|
||||
fatal("Unknown register type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove C-ABI SOE registers, tmp regs and _ref register that will be updated
|
||||
_gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref());
|
||||
}
|
||||
|
||||
ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_gp_regs(),
|
||||
_fp_regs(),
|
||||
_vp_regs() {
|
||||
// Figure out what registers to save/restore
|
||||
initialize(stub);
|
||||
|
||||
// Save registers
|
||||
__ push_reg(_gp_regs, sp);
|
||||
__ push_fp(_fp_regs, sp);
|
||||
__ push_vp(_vp_regs, sp);
|
||||
}
|
||||
|
||||
~ZSaveLiveRegisters() {
|
||||
// Restore registers
|
||||
__ pop_vp(_vp_regs, sp);
|
||||
__ pop_fp(_fp_regs, sp);
|
||||
__ pop_reg(_gp_regs, sp);
|
||||
}
|
||||
};
|
||||
|
||||
class ZSetupArguments {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Setup arguments
|
||||
if (_ref_addr.base() == noreg) {
|
||||
// No self healing
|
||||
if (_ref != c_rarg0) {
|
||||
__ mv(c_rarg0, _ref);
|
||||
}
|
||||
__ mv(c_rarg1, zr);
|
||||
} else {
|
||||
// Self healing
|
||||
if (_ref == c_rarg0) {
|
||||
// _ref is already at correct place
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
} else if (_ref != c_rarg1) {
|
||||
// _ref is in wrong place, but not in c_rarg1, so fix it first
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
__ mv(c_rarg0, _ref);
|
||||
} else if (_ref_addr.base() != c_rarg0) {
|
||||
assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
|
||||
__ mv(c_rarg0, _ref);
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
} else {
|
||||
assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
|
||||
if (_ref_addr.base() == c_rarg0) {
|
||||
__ mv(t1, c_rarg1);
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
__ mv(c_rarg0, t1);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~ZSetupArguments() {
|
||||
// Transfer result
|
||||
if (_ref != x10) {
|
||||
__ mv(_ref, x10);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
BLOCK_COMMENT("ZLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
{
|
||||
ZSaveLiveRegisters save_live_registers(masm, stub);
|
||||
ZSetupArguments setup_arguments(masm, stub);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, stub->slow_path(), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
#undef __
|
||||
#define __ ce->masm()->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const {
|
||||
assert_different_registers(xthread, ref->as_register(), t1);
|
||||
__ ld(t1, address_bad_mask_from_thread(xthread));
|
||||
__ andr(t1, t1, ref->as_register());
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const {
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Register ref = stub->ref()->as_register();
|
||||
Register ref_addr = noreg;
|
||||
Register tmp = noreg;
|
||||
|
||||
if (stub->tmp()->is_valid()) {
|
||||
// Load address into tmp register
|
||||
ce->leal(stub->ref_addr(), stub->tmp());
|
||||
ref_addr = tmp = stub->tmp()->as_pointer_register();
|
||||
} else {
|
||||
// Address already in register
|
||||
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
|
||||
}
|
||||
|
||||
assert_different_registers(ref, ref_addr, noreg);
|
||||
|
||||
// Save x10 unless it is the result or tmp register
|
||||
// Set up SP to accomodate parameters and maybe x10.
|
||||
if (ref != x10 && tmp != x10) {
|
||||
__ sub(sp, sp, 32);
|
||||
__ sd(x10, Address(sp, 16));
|
||||
} else {
|
||||
__ sub(sp, sp, 16);
|
||||
}
|
||||
|
||||
// Setup arguments and call runtime stub
|
||||
ce->store_parameter(ref_addr, 1);
|
||||
ce->store_parameter(ref, 0);
|
||||
|
||||
__ far_call(stub->runtime_stub());
|
||||
|
||||
// Verify result
|
||||
__ verify_oop(x10, "Bad oop");
|
||||
|
||||
|
||||
// Move result into place
|
||||
if (ref != x10) {
|
||||
__ mv(ref, x10);
|
||||
}
|
||||
|
||||
// Restore x10 unless it is the result or tmp register
|
||||
if (ref != x10 && tmp != x10) {
|
||||
__ ld(x10, Address(sp, 16));
|
||||
__ add(sp, sp, 32);
|
||||
} else {
|
||||
__ add(sp, sp, 16);
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ sasm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const {
|
||||
__ prologue("zgc_load_barrier stub", false);
|
||||
|
||||
__ push_call_clobbered_registers_except(RegSet::of(x10));
|
||||
|
||||
// Setup arguments
|
||||
__ load_parameter(0, c_rarg0);
|
||||
__ load_parameter(1, c_rarg1);
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
|
||||
|
||||
__ pop_call_clobbered_registers_except(RegSet::of(x10));
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER1
|
||||
102
src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp
Normal file
102
src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp
Normal file
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_Z_ZBARRIERSETASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_GC_Z_ZBARRIERSETASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_OprDesc;
|
||||
typedef LIR_OprDesc* LIR_Opr;
|
||||
class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp_thread);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register val,
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
bool is_oop,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count,
|
||||
RegSet saved_regs);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath);
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node,
|
||||
OptoReg::Name opto_reg);
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
ZLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_GC_Z_ZBARRIERSETASSEMBLER_RISCV_HPP
|
||||
212
src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.cpp
Normal file
212
src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.cpp
Normal file
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#ifdef LINUX
|
||||
#include <sys/mman.h>
|
||||
#endif // LINUX
|
||||
|
||||
//
|
||||
// The heap can have three different layouts, depending on the max heap size.
|
||||
//
|
||||
// Address Space & Pointer Layout 1
|
||||
// --------------------------------
|
||||
//
|
||||
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
|
||||
// . .
|
||||
// . .
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000014000000000 (20TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000010000000000 (16TB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x00000c0000000000 (12TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000080000000000 (8TB)
|
||||
// | Marked0 View |
|
||||
// +--------------------------------+ 0x0000040000000000 (4TB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000000000000000
|
||||
//
|
||||
// 6 4 4 4 4
|
||||
// 3 6 5 2 1 0
|
||||
// +--------------------+----+-----------------------------------------------+
|
||||
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
|
||||
// +--------------------+----+-----------------------------------------------+
|
||||
// | | |
|
||||
// | | * 41-0 Object Offset (42-bits, 4TB address space)
|
||||
// | |
|
||||
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
|
||||
// | 0010 = Marked1 (Address view 8-12TB)
|
||||
// | 0100 = Remapped (Address view 16-20TB)
|
||||
// | 1000 = Finalizable (Address view N/A)
|
||||
// |
|
||||
// * 63-46 Fixed (18-bits, always zero)
|
||||
//
|
||||
//
|
||||
// Address Space & Pointer Layout 2
|
||||
// --------------------------------
|
||||
//
|
||||
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
|
||||
// . .
|
||||
// . .
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000280000000000 (40TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000200000000000 (32TB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000180000000000 (24TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000100000000000 (16TB)
|
||||
// | Marked0 View |
|
||||
// +--------------------------------+ 0x0000080000000000 (8TB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000000000000000
|
||||
//
|
||||
// 6 4 4 4 4
|
||||
// 3 7 6 3 2 0
|
||||
// +------------------+-----+------------------------------------------------+
|
||||
// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
|
||||
// +-------------------+----+------------------------------------------------+
|
||||
// | | |
|
||||
// | | * 42-0 Object Offset (43-bits, 8TB address space)
|
||||
// | |
|
||||
// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
|
||||
// | 0010 = Marked1 (Address view 16-24TB)
|
||||
// | 0100 = Remapped (Address view 32-40TB)
|
||||
// | 1000 = Finalizable (Address view N/A)
|
||||
// |
|
||||
// * 63-47 Fixed (17-bits, always zero)
|
||||
//
|
||||
//
|
||||
// Address Space & Pointer Layout 3
|
||||
// --------------------------------
|
||||
//
|
||||
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
|
||||
// . .
|
||||
// . .
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000500000000000 (80TB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000400000000000 (64TB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000300000000000 (48TB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000200000000000 (32TB)
|
||||
// | Marked0 View |
|
||||
// +--------------------------------+ 0x0000100000000000 (16TB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000000000000000
|
||||
//
|
||||
// 6 4 4 4 4
|
||||
// 3 8 7 4 3 0
|
||||
// +------------------+----+-------------------------------------------------+
|
||||
// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
|
||||
// +------------------+----+-------------------------------------------------+
|
||||
// | | |
|
||||
// | | * 43-0 Object Offset (44-bits, 16TB address space)
|
||||
// | |
|
||||
// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
|
||||
// | 0010 = Marked1 (Address view 32-48TB)
|
||||
// | 0100 = Remapped (Address view 64-80TB)
|
||||
// | 1000 = Finalizable (Address view N/A)
|
||||
// |
|
||||
// * 63-48 Fixed (16-bits, always zero)
|
||||
//
|
||||
|
||||
// Default value if probing is not implemented for a certain platform: 128TB
|
||||
static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
|
||||
// Minimum value returned, if probing fails: 64GB
|
||||
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
|
||||
|
||||
static size_t probe_valid_max_address_bit() {
|
||||
#ifdef LINUX
|
||||
size_t max_address_bit = 0;
|
||||
const size_t page_size = os::vm_page_size();
|
||||
for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
|
||||
const uintptr_t base_addr = ((uintptr_t) 1U) << i;
|
||||
if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
|
||||
// msync suceeded, the address is valid, and maybe even already mapped.
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
if (errno != ENOMEM) {
|
||||
// Some error occured. This should never happen, but msync
|
||||
// has some undefined behavior, hence ignore this bit.
|
||||
#ifdef ASSERT
|
||||
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#else // ASSERT
|
||||
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#endif // ASSERT
|
||||
continue;
|
||||
}
|
||||
// Since msync failed with ENOMEM, the page might not be mapped.
|
||||
// Try to map it, to see if the address is valid.
|
||||
void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
if ((uintptr_t) result_addr == base_addr) {
|
||||
// address is valid
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (max_address_bit == 0) {
|
||||
// probing failed, allocate a very high page and take that bit as the maximum
|
||||
const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
|
||||
void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
}
|
||||
log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
|
||||
return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
|
||||
#else // LINUX
|
||||
return DEFAULT_MAX_ADDRESS_BIT;
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressMetadataShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
}
|
||||
36
src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.hpp
Normal file
36
src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.hpp
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP
|
||||
|
||||
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
||||
const size_t ZPlatformHeapViews = 3;
|
||||
const size_t ZPlatformCacheLineSize = 64;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP
|
||||
233
src/hotspot/cpu/riscv/gc/z/z_riscv64.ad
Normal file
233
src/hotspot/cpu/riscv/gc/z/z_riscv64.ad
Normal file
@@ -0,0 +1,233 @@
|
||||
//
|
||||
// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
|
||||
if (barrier_data == ZLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ld(tmp, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp, tmp, ref);
|
||||
__ bnez(tmp, *stub->entry(), true /* far */);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong);
|
||||
__ j(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct zLoadP(iRegPNoSp dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && (n->as_Load()->barrier_data() != 0));
|
||||
effect(TEMP dst);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
format %{ "ld $dst, $mem, #@zLoadP" %}
|
||||
|
||||
ins_encode %{
|
||||
const Address ref_addr (as_Register($mem$$base), $mem$$disp);
|
||||
__ ld($dst$$Register, ref_addr);
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, t0 /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapP\n\t"
|
||||
"mv $res, $res == $oldval" %}
|
||||
|
||||
ins_encode %{
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapPAcq\n\t"
|
||||
"mv $res, $res == $oldval" %}
|
||||
|
||||
ins_encode %{
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0));
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
52
src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
Normal file
52
src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GLOBALDEFINITIONS_RISCV_HPP
|
||||
#define CPU_RISCV_GLOBALDEFINITIONS_RISCV_HPP
|
||||
|
||||
const int StackAlignmentInBytes = 16;
|
||||
|
||||
// Indicates whether the C calling conventions require that
|
||||
// 32-bit integer argument values are extended to 64 bits.
|
||||
const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
|
||||
// RISCV has adopted a multicopy atomic model closely following
|
||||
// that of ARMv8.
|
||||
#define CPU_MULTI_COPY_ATOMIC
|
||||
|
||||
// To be safe, we deoptimize when we come across an access that needs
|
||||
// patching. This is similar to what is done on aarch64.
|
||||
#define DEOPTIMIZE_WHEN_PATCHING
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
#define SUPPORT_RESERVED_STACK_AREA
|
||||
|
||||
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
|
||||
|
||||
#define USE_POINTERS_TO_REGISTER_IMPL_ARRAY
|
||||
|
||||
#endif // CPU_RISCV_GLOBALDEFINITIONS_RISCV_HPP
|
||||
101
src/hotspot/cpu/riscv/globals_riscv.hpp
Normal file
101
src/hotspot/cpu/riscv/globals_riscv.hpp
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_GLOBALS_RISCV_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Sets the default values for platform dependent flags used by the runtime system.
|
||||
// (see globals.hpp)
|
||||
|
||||
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
|
||||
define_pd_global(bool, TrapBasedNullChecks, false);
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
|
||||
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||
#define DEFAULT_STACK_RED_PAGES (1)
|
||||
// Java_java_net_SocketOutputStream_socketWrite0() uses a 64k buffer on the
|
||||
// stack if compiled for unix and LP64. To pass stack overflow tests we need
|
||||
// 20 shadow pages.
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+5))
|
||||
#define DEFAULT_STACK_RESERVED_PAGES (1)
|
||||
|
||||
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||
#define MIN_STACK_RESERVED_PAGES (0)
|
||||
|
||||
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
|
||||
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
|
||||
define_pd_global(bool, PreserveFramePointer, false);
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 111);
|
||||
|
||||
define_pd_global(bool, CompactStrings, true);
|
||||
|
||||
// Clear short arrays bigger than one word in an arch-specific way
|
||||
define_pd_global(intx, InitArrayShortSize, BytesPerLong);
|
||||
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
|
||||
#define ARCH_FLAGS(develop, \
|
||||
product, \
|
||||
notproduct, \
|
||||
range, \
|
||||
constraint) \
|
||||
\
|
||||
product(bool, NearCpool, true, \
|
||||
"constant pool is close to instructions") \
|
||||
product(intx, BlockZeroingLowLimit, 256, \
|
||||
"Minimum size in bytes when block zeroing will be used") \
|
||||
range(1, max_jint) \
|
||||
product(bool, TraceTraps, false, "Trace all traps the signal handler") \
|
||||
/* For now we're going to be safe and add the I/O bits to userspace fences. */ \
|
||||
product(bool, UseConservativeFence, true, \
|
||||
"Extend i for r and o for w in the pred/succ flags of fence;" \
|
||||
"Extend fence.i to fence.i + fence.") \
|
||||
product(bool, AvoidUnalignedAccesses, true, \
|
||||
"Avoid generating unaligned memory accesses") \
|
||||
product(bool, UseRVV, false, EXPERIMENTAL, "Use RVV instructions") \
|
||||
product(bool, UseRVC, false, EXPERIMENTAL, "Use RVC instructions") \
|
||||
product(bool, UseZba, false, EXPERIMENTAL, "Use Zba instructions") \
|
||||
product(bool, UseZbb, false, EXPERIMENTAL, "Use Zbb instructions") \
|
||||
product(bool, UseRVVForBigIntegerShiftIntrinsics, true, \
|
||||
"Use RVV instructions for left/right shift of BigInteger")
|
||||
|
||||
#endif // CPU_RISCV_GLOBALS_RISCV_HPP
|
||||
79
src/hotspot/cpu/riscv/icBuffer_riscv.cpp
Normal file
79
src/hotspot/cpu/riscv/icBuffer_riscv.cpp
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/bytecodes.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_riscv.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
int InlineCacheBuffer::ic_stub_code_size() {
|
||||
// 6: auipc + ld + auipc + jalr + address(2 * instruction_size)
|
||||
// 5: auipc + ld + j + address(2 * instruction_size)
|
||||
return (MacroAssembler::far_branches() ? 6 : 5) * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
|
||||
assert_cond(code_begin != NULL && entry_point != NULL);
|
||||
ResourceMark rm;
|
||||
CodeBuffer code(code_begin, ic_stub_code_size());
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
// Note: even though the code contains an embedded value, we do not need reloc info
|
||||
// because
|
||||
// (1) the value is old (i.e., doesn't matter for scavenges)
|
||||
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
|
||||
|
||||
address start = __ pc();
|
||||
Label l;
|
||||
__ ld(t1, l);
|
||||
__ far_jump(ExternalAddress(entry_point));
|
||||
__ align(wordSize);
|
||||
__ bind(l);
|
||||
__ emit_int64((intptr_t)cached_value);
|
||||
// Only need to invalidate the 1st two instructions - not the whole ic stub
|
||||
ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size());
|
||||
assert(__ pc() - start == ic_stub_code_size(), "must be");
|
||||
}
|
||||
|
||||
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(move->next_instruction_address());
|
||||
return jump->jump_destination();
|
||||
}
|
||||
|
||||
|
||||
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
|
||||
// The word containing the cached value is at the end of this IC buffer
|
||||
uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize);
|
||||
void* o = (void*)*p;
|
||||
return o;
|
||||
}
|
||||
51
src/hotspot/cpu/riscv/icache_riscv.cpp
Normal file
51
src/hotspot/cpu/riscv/icache_riscv.cpp
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
static int icache_flush(address addr, int lines, int magic) {
|
||||
os::icache_flush((long int) addr, (long int) (addr + (lines << ICache::log2_line_size)));
|
||||
return magic;
|
||||
}
|
||||
|
||||
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
|
||||
address start = (address)icache_flush;
|
||||
*flush_icache_stub = (ICache::flush_icache_stub_t)start;
|
||||
|
||||
// ICache::invalidate_range() contains explicit condition that the first
|
||||
// call is invoked on the generated icache flush stub code range.
|
||||
ICache::invalidate_range(start, 0);
|
||||
|
||||
{
|
||||
StubCodeMark mark(this, "ICache", "fake_stub_for_inlined_icache_flush");
|
||||
__ ret();
|
||||
}
|
||||
}
|
||||
|
||||
#undef __
|
||||
42
src/hotspot/cpu/riscv/icache_riscv.hpp
Normal file
42
src/hotspot/cpu/riscv/icache_riscv.hpp
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_ICACHE_RISCV_HPP
|
||||
#define CPU_RISCV_ICACHE_RISCV_HPP
|
||||
|
||||
// Interface for updating the instruction cache. Whenever the VM
|
||||
// modifies code, part of the processor instruction cache potentially
|
||||
// has to be flushed.
|
||||
|
||||
class ICache : public AbstractICache {
|
||||
public:
|
||||
enum {
|
||||
stub_size = 16, // Size of the icache flush stub in bytes
|
||||
line_size = BytesPerWord, // conservative
|
||||
log2_line_size = LogBytesPerWord // log2(line_size)
|
||||
};
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_ICACHE_RISCV_HPP
|
||||
1948
src/hotspot/cpu/riscv/interp_masm_riscv.cpp
Normal file
1948
src/hotspot/cpu/riscv/interp_masm_riscv.cpp
Normal file
File diff suppressed because it is too large
Load Diff
285
src/hotspot/cpu/riscv/interp_masm_riscv.hpp
Normal file
285
src/hotspot/cpu/riscv/interp_masm_riscv.hpp
Normal file
@@ -0,0 +1,285 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_INTERP_MASM_RISCV_HPP
|
||||
#define CPU_RISCV_INTERP_MASM_RISCV_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
#include "runtime/frame.hpp"
|
||||
|
||||
// This file specializes the assember with interpreter-specific macros
|
||||
|
||||
typedef ByteSize (*OffsetFunction)(uint);
|
||||
|
||||
class InterpreterMacroAssembler: public MacroAssembler {
|
||||
protected:
|
||||
// Interpreter specific version of call_VM_base
|
||||
using MacroAssembler::call_VM_leaf_base;
|
||||
|
||||
virtual void call_VM_leaf_base(address entry_point,
|
||||
int number_of_arguments);
|
||||
|
||||
virtual void call_VM_base(Register oop_result,
|
||||
Register java_thread,
|
||||
Register last_java_sp,
|
||||
address entry_point,
|
||||
int number_of_arguments,
|
||||
bool check_exceptions);
|
||||
|
||||
// base routine for all dispatches
|
||||
void dispatch_base(TosState state, address* table, bool verifyoop = true,
|
||||
bool generate_poll = false, Register Rs = t0);
|
||||
|
||||
public:
|
||||
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
|
||||
virtual ~InterpreterMacroAssembler() {}
|
||||
|
||||
void load_earlyret_value(TosState state);
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
// Interpreter-specific registers
|
||||
void save_bcp() {
|
||||
sd(xbcp, Address(fp, frame::interpreter_frame_bcp_offset * wordSize));
|
||||
}
|
||||
|
||||
void restore_bcp() {
|
||||
ld(xbcp, Address(fp, frame::interpreter_frame_bcp_offset * wordSize));
|
||||
}
|
||||
|
||||
void restore_locals() {
|
||||
ld(xlocals, Address(fp, frame::interpreter_frame_locals_offset * wordSize));
|
||||
}
|
||||
|
||||
void restore_constant_pool_cache() {
|
||||
ld(xcpool, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
|
||||
}
|
||||
|
||||
void get_dispatch();
|
||||
|
||||
// Helpers for runtime call arguments/results
|
||||
void get_method(Register reg) {
|
||||
ld(reg, Address(fp, frame::interpreter_frame_method_offset * wordSize));
|
||||
}
|
||||
|
||||
void get_const(Register reg) {
|
||||
get_method(reg);
|
||||
ld(reg, Address(reg, in_bytes(Method::const_offset())));
|
||||
}
|
||||
|
||||
void get_constant_pool(Register reg) {
|
||||
get_const(reg);
|
||||
ld(reg, Address(reg, in_bytes(ConstMethod::constants_offset())));
|
||||
}
|
||||
|
||||
void get_constant_pool_cache(Register reg) {
|
||||
get_constant_pool(reg);
|
||||
ld(reg, Address(reg, ConstantPool::cache_offset_in_bytes()));
|
||||
}
|
||||
|
||||
void get_cpool_and_tags(Register cpool, Register tags) {
|
||||
get_constant_pool(cpool);
|
||||
ld(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
|
||||
}
|
||||
|
||||
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
|
||||
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_method_counters(Register method, Register mcs, Label& skip);
|
||||
|
||||
// Load cpool->resolved_references(index).
|
||||
void load_resolved_reference_at_index(Register result, Register index, Register tmp = x15);
|
||||
|
||||
// Load cpool->resolved_klass_at(index).
|
||||
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
|
||||
|
||||
void load_resolved_method_at_index(int byte_no, Register method, Register cache);
|
||||
|
||||
void pop_ptr(Register r = x10);
|
||||
void pop_i(Register r = x10);
|
||||
void pop_l(Register r = x10);
|
||||
void pop_f(FloatRegister r = f10);
|
||||
void pop_d(FloatRegister r = f10);
|
||||
void push_ptr(Register r = x10);
|
||||
void push_i(Register r = x10);
|
||||
void push_l(Register r = x10);
|
||||
void push_f(FloatRegister r = f10);
|
||||
void push_d(FloatRegister r = f10);
|
||||
|
||||
void pop(TosState state); // transition vtos -> state
|
||||
void push(TosState state); // transition state -> vtos
|
||||
|
||||
void empty_expression_stack() {
|
||||
ld(esp, Address(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
}
|
||||
|
||||
// Helpers for swap and dup
|
||||
void load_ptr(int n, Register val);
|
||||
void store_ptr(int n, Register val);
|
||||
|
||||
// Load float value from 'address'. The value is loaded onto the FPU register v0.
|
||||
void load_float(Address src);
|
||||
void load_double(Address src);
|
||||
|
||||
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
|
||||
// a subtype of super_klass.
|
||||
void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );
|
||||
|
||||
// Dispatching
|
||||
void dispatch_prolog(TosState state, int step = 0);
|
||||
void dispatch_epilog(TosState state, int step = 0);
|
||||
// dispatch via t0
|
||||
void dispatch_only(TosState state, bool generate_poll = false, Register Rs = t0);
|
||||
// dispatch normal table via t0 (assume t0 is loaded already)
|
||||
void dispatch_only_normal(TosState state, Register Rs = t0);
|
||||
void dispatch_only_noverify(TosState state, Register Rs = t0);
|
||||
// load t0 from [xbcp + step] and dispatch via t0
|
||||
void dispatch_next(TosState state, int step = 0, bool generate_poll = false);
|
||||
// load t0 from [xbcp] and dispatch via t0 and table
|
||||
void dispatch_via (TosState state, address* table);
|
||||
|
||||
// jump to an invoked target
|
||||
void prepare_to_jump_from_interpreted();
|
||||
void jump_from_interpreted(Register method);
|
||||
|
||||
|
||||
// Returning from interpreted functions
|
||||
//
|
||||
// Removes the current activation (incl. unlocking of monitors)
|
||||
// and sets up the return address. This code is also used for
|
||||
// exception unwindwing. In that case, we do not want to throw
|
||||
// IllegalMonitorStateExceptions, since that might get us into an
|
||||
// infinite rethrow exception loop.
|
||||
// Additionally this code is used for popFrame and earlyReturn.
|
||||
// In popFrame case we want to skip throwing an exception,
|
||||
// installing an exception, and notifying jvmdi.
|
||||
// In earlyReturn case we only want to skip throwing an exception
|
||||
// and installing an exception.
|
||||
void remove_activation(TosState state,
|
||||
bool throw_monitor_exception = true,
|
||||
bool install_monitor_exception = true,
|
||||
bool notify_jvmdi = true);
|
||||
|
||||
// FIXME: Give us a valid frame at a null check.
|
||||
virtual void null_check(Register reg, int offset = -1) {
|
||||
MacroAssembler::null_check(reg, offset);
|
||||
}
|
||||
|
||||
// Object locking
|
||||
void lock_object (Register lock_reg);
|
||||
void unlock_object(Register lock_reg);
|
||||
|
||||
// Interpreter profiling operations
|
||||
void set_method_data_pointer_for_bcp();
|
||||
void test_method_data_pointer(Register mdp, Label& zero_continue);
|
||||
void verify_method_data_pointer();
|
||||
|
||||
void set_mdp_data_at(Register mdp_in, int constant, Register value);
|
||||
void increment_mdp_data_at(Address data, bool decrement = false);
|
||||
void increment_mdp_data_at(Register mdp_in, int constant,
|
||||
bool decrement = false);
|
||||
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
|
||||
bool decrement = false);
|
||||
void increment_mask_and_jump(Address counter_addr,
|
||||
int increment, Address mask,
|
||||
Register tmp1, Register tmp2,
|
||||
bool preloaded, Label* where);
|
||||
|
||||
void set_mdp_flag_at(Register mdp_in, int flag_constant);
|
||||
void test_mdp_data_at(Register mdp_in, int offset, Register value,
|
||||
Register test_value_out,
|
||||
Label& not_equal_continue);
|
||||
|
||||
void record_klass_in_profile(Register receiver, Register mdp,
|
||||
Register reg2, bool is_virtual_call);
|
||||
void record_klass_in_profile_helper(Register receiver, Register mdp,
|
||||
Register reg2,
|
||||
Label& done, bool is_virtual_call);
|
||||
void record_item_in_profile_helper(Register item, Register mdp,
|
||||
Register reg2, int start_row, Label& done, int total_rows,
|
||||
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,
|
||||
int non_profiled_offset);
|
||||
|
||||
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
|
||||
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
|
||||
void update_mdp_by_constant(Register mdp_in, int constant);
|
||||
void update_mdp_for_ret(Register return_bci);
|
||||
|
||||
// narrow int return value
|
||||
void narrow(Register result);
|
||||
|
||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||
void profile_not_taken_branch(Register mdp);
|
||||
void profile_call(Register mdp);
|
||||
void profile_final_call(Register mdp);
|
||||
void profile_virtual_call(Register receiver, Register mdp,
|
||||
Register t1,
|
||||
bool receiver_can_be_null = false);
|
||||
void profile_ret(Register return_bci, Register mdp);
|
||||
void profile_null_seen(Register mdp);
|
||||
void profile_typecheck(Register mdp, Register klass, Register temp);
|
||||
void profile_typecheck_failed(Register mdp);
|
||||
void profile_switch_default(Register mdp);
|
||||
void profile_switch_case(Register index_in_scratch, Register mdp,
|
||||
Register temp);
|
||||
|
||||
void profile_obj_type(Register obj, const Address& mdo_addr, Register tmp);
|
||||
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
|
||||
void profile_return_type(Register mdp, Register ret, Register tmp);
|
||||
void profile_parameters_type(Register mdp, Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
// Debugging
|
||||
// only if +VerifyFPU && (state == ftos || state == dtos)
|
||||
void verify_FPU(int stack_depth, TosState state = ftos);
|
||||
|
||||
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
|
||||
|
||||
// support for jvmti/dtrace
|
||||
void notify_method_entry();
|
||||
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
|
||||
|
||||
virtual void _call_Unimplemented(address call_site) {
|
||||
save_bcp();
|
||||
set_last_Java_frame(esp, fp, (address) pc(), t0);
|
||||
MacroAssembler::_call_Unimplemented(call_site);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_access_flags(Register access_flags, uint32_t flag_bits,
|
||||
const char* msg, bool stop_by_hit = true);
|
||||
void verify_frame_setup();
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_INTERP_MASM_RISCV_HPP
|
||||
295
src/hotspot/cpu/riscv/interpreterRT_riscv.cpp
Normal file
295
src/hotspot/cpu/riscv/interpreterRT_riscv.cpp
Normal file
@@ -0,0 +1,295 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
// Implementation of SignatureHandlerGenerator
|
||||
Register InterpreterRuntime::SignatureHandlerGenerator::from() { return xlocals; }
|
||||
Register InterpreterRuntime::SignatureHandlerGenerator::to() { return sp; }
|
||||
Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return t0; }
|
||||
|
||||
Register InterpreterRuntime::SignatureHandlerGenerator::next_gpr() {
|
||||
if (_num_reg_int_args < Argument::n_int_register_parameters_c - 1) {
|
||||
return g_INTArgReg[++_num_reg_int_args];
|
||||
}
|
||||
return noreg;
|
||||
}
|
||||
|
||||
FloatRegister InterpreterRuntime::SignatureHandlerGenerator::next_fpr() {
|
||||
if (_num_reg_fp_args < Argument::n_float_register_parameters_c) {
|
||||
return g_FPArgReg[_num_reg_fp_args++];
|
||||
} else {
|
||||
return fnoreg;
|
||||
}
|
||||
}
|
||||
|
||||
int InterpreterRuntime::SignatureHandlerGenerator::next_stack_offset() {
|
||||
int ret = _stack_offset;
|
||||
_stack_offset += wordSize;
|
||||
return ret;
|
||||
}
|
||||
|
||||
InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
|
||||
const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
|
||||
_masm = new MacroAssembler(buffer); // allocate on resourse area by default
|
||||
_num_reg_int_args = (method->is_static() ? 1 : 0);
|
||||
_num_reg_fp_args = 0;
|
||||
_stack_offset = 0;
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
|
||||
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
|
||||
|
||||
Register reg = next_gpr();
|
||||
if (reg != noreg) {
|
||||
__ lw(reg, src);
|
||||
} else {
|
||||
__ lw(x10, src);
|
||||
__ sw(x10, Address(to(), next_stack_offset()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
|
||||
const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
|
||||
|
||||
Register reg = next_gpr();
|
||||
if (reg != noreg) {
|
||||
__ ld(reg, src);
|
||||
} else {
|
||||
__ ld(x10, src);
|
||||
__ sd(x10, Address(to(), next_stack_offset()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
|
||||
|
||||
FloatRegister reg = next_fpr();
|
||||
if (reg != fnoreg) {
|
||||
__ flw(reg, src);
|
||||
} else {
|
||||
// a floating-point argument is passed according to the integer calling
|
||||
// convention if no floating-point argument register available
|
||||
pass_int();
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
|
||||
const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
|
||||
|
||||
FloatRegister reg = next_fpr();
|
||||
if (reg != fnoreg) {
|
||||
__ fld(reg, src);
|
||||
} else {
|
||||
// a floating-point argument is passed according to the integer calling
|
||||
// convention if no floating-point argument register available
|
||||
pass_long();
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
Register reg = next_gpr();
|
||||
if (reg == c_rarg1) {
|
||||
assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
|
||||
__ addi(c_rarg1, from(), Interpreter::local_offset_in_bytes(offset()));
|
||||
} else if (reg != noreg) {
|
||||
// c_rarg2-c_rarg7
|
||||
__ addi(x10, from(), Interpreter::local_offset_in_bytes(offset()));
|
||||
__ mv(reg, zr); //_num_reg_int_args:c_rarg -> 1:c_rarg2, 2:c_rarg3...
|
||||
__ ld(temp(), x10);
|
||||
Label L;
|
||||
__ beqz(temp(), L);
|
||||
__ mv(reg, x10);
|
||||
__ bind(L);
|
||||
} else {
|
||||
//to stack
|
||||
__ addi(x10, from(), Interpreter::local_offset_in_bytes(offset()));
|
||||
__ ld(temp(), x10);
|
||||
Label L;
|
||||
__ bnez(temp(), L);
|
||||
__ mv(x10, zr);
|
||||
__ bind(L);
|
||||
assert(sizeof(jobject) == wordSize, "");
|
||||
__ sd(x10, Address(to(), next_stack_offset()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
|
||||
// generate code to handle arguments
|
||||
iterate(fingerprint);
|
||||
|
||||
// return result handler
|
||||
__ la(x10, ExternalAddress(Interpreter::result_handler(method()->result_type())));
|
||||
__ ret();
|
||||
|
||||
__ flush();
|
||||
}
|
||||
|
||||
|
||||
// Implementation of SignatureHandlerLibrary
|
||||
|
||||
void SignatureHandlerLibrary::pd_set_handler(address handler) {}
|
||||
|
||||
|
||||
class SlowSignatureHandler
|
||||
: public NativeSignatureIterator {
|
||||
private:
|
||||
address _from;
|
||||
intptr_t* _to;
|
||||
intptr_t* _int_args;
|
||||
intptr_t* _fp_args;
|
||||
intptr_t* _fp_identifiers;
|
||||
unsigned int _num_reg_int_args;
|
||||
unsigned int _num_reg_fp_args;
|
||||
|
||||
intptr_t* single_slot_addr() {
|
||||
intptr_t* from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
_from -= Interpreter::stackElementSize;
|
||||
return from_addr;
|
||||
}
|
||||
|
||||
intptr_t* double_slot_addr() {
|
||||
intptr_t* from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(1));
|
||||
_from -= 2 * Interpreter::stackElementSize;
|
||||
return from_addr;
|
||||
}
|
||||
|
||||
int pass_gpr(intptr_t value) {
|
||||
if (_num_reg_int_args < Argument::n_int_register_parameters_c - 1) {
|
||||
*_int_args++ = value;
|
||||
return _num_reg_int_args++;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int pass_fpr(intptr_t value) {
|
||||
if (_num_reg_fp_args < Argument::n_float_register_parameters_c) {
|
||||
*_fp_args++ = value;
|
||||
return _num_reg_fp_args++;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void pass_stack(intptr_t value) {
|
||||
*_to++ = value;
|
||||
}
|
||||
|
||||
virtual void pass_int() {
|
||||
jint value = *(jint*)single_slot_addr();
|
||||
if (pass_gpr(value) < 0) {
|
||||
pass_stack(value);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void pass_long() {
|
||||
intptr_t value = *double_slot_addr();
|
||||
if (pass_gpr(value) < 0) {
|
||||
pass_stack(value);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void pass_object() {
|
||||
intptr_t* addr = single_slot_addr();
|
||||
intptr_t value = *addr == 0 ? NULL : (intptr_t)addr;
|
||||
if (pass_gpr(value) < 0) {
|
||||
pass_stack(value);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void pass_float() {
|
||||
jint value = *(jint*) single_slot_addr();
|
||||
// a floating-point argument is passed according to the integer calling
|
||||
// convention if no floating-point argument register available
|
||||
if (pass_fpr(value) < 0 && pass_gpr(value) < 0) {
|
||||
pass_stack(value);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void pass_double() {
|
||||
intptr_t value = *double_slot_addr();
|
||||
int arg = pass_fpr(value);
|
||||
if (0 <= arg) {
|
||||
*_fp_identifiers |= (1ull << arg); // mark as double
|
||||
} else if (pass_gpr(value) < 0) { // no need to mark if passing by integer registers or stack
|
||||
pass_stack(value);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to)
|
||||
: NativeSignatureIterator(method)
|
||||
{
|
||||
_from = from;
|
||||
_to = to;
|
||||
|
||||
_int_args = to - (method->is_static() ? 16 : 17);
|
||||
_fp_args = to - 8;
|
||||
_fp_identifiers = to - 9;
|
||||
*(int*) _fp_identifiers = 0;
|
||||
_num_reg_int_args = (method->is_static() ? 1 : 0);
|
||||
_num_reg_fp_args = 0;
|
||||
}
|
||||
|
||||
~SlowSignatureHandler()
|
||||
{
|
||||
_from = NULL;
|
||||
_to = NULL;
|
||||
_int_args = NULL;
|
||||
_fp_args = NULL;
|
||||
_fp_identifiers = NULL;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
JRT_ENTRY(address,
|
||||
InterpreterRuntime::slow_signature_handler(JavaThread* current,
|
||||
Method* method,
|
||||
intptr_t* from,
|
||||
intptr_t* to))
|
||||
methodHandle m(current, (Method*)method);
|
||||
assert(m->is_native(), "sanity check");
|
||||
|
||||
// handle arguments
|
||||
SlowSignatureHandler ssh(m, (address)from, to);
|
||||
ssh.iterate(UCONST64(-1));
|
||||
|
||||
// return result handler
|
||||
return Interpreter::result_handler(m->result_type());
|
||||
JRT_END
|
||||
68
src/hotspot/cpu/riscv/interpreterRT_riscv.hpp
Normal file
68
src/hotspot/cpu/riscv/interpreterRT_riscv.hpp
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_INTERPRETERRT_RISCV_HPP
|
||||
#define CPU_RISCV_INTERPRETERRT_RISCV_HPP
|
||||
|
||||
// This is included in the middle of class Interpreter.
|
||||
// Do not include files here.
|
||||
|
||||
// native method calls
|
||||
|
||||
class SignatureHandlerGenerator: public NativeSignatureIterator {
|
||||
private:
|
||||
MacroAssembler* _masm;
|
||||
unsigned int _num_reg_fp_args;
|
||||
unsigned int _num_reg_int_args;
|
||||
int _stack_offset;
|
||||
|
||||
void pass_int();
|
||||
void pass_long();
|
||||
void pass_float();
|
||||
void pass_double();
|
||||
void pass_object();
|
||||
|
||||
Register next_gpr();
|
||||
FloatRegister next_fpr();
|
||||
int next_stack_offset();
|
||||
|
||||
public:
|
||||
// Creation
|
||||
SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
|
||||
virtual ~SignatureHandlerGenerator() {
|
||||
_masm = NULL;
|
||||
}
|
||||
|
||||
// Code generation
|
||||
void generate(uint64_t fingerprint);
|
||||
|
||||
// Code generation support
|
||||
static Register from();
|
||||
static Register to();
|
||||
static Register temp();
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_INTERPRETERRT_RISCV_HPP
|
||||
86
src/hotspot/cpu/riscv/javaFrameAnchor_riscv.hpp
Normal file
86
src/hotspot/cpu/riscv/javaFrameAnchor_riscv.hpp
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_JAVAFRAMEANCHOR_RISCV_HPP
|
||||
#define CPU_RISCV_JAVAFRAMEANCHOR_RISCV_HPP
|
||||
|
||||
private:
|
||||
|
||||
// FP value associated with _last_Java_sp:
|
||||
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
|
||||
|
||||
public:
|
||||
// Each arch must define reset, save, restore
|
||||
// These are used by objects that only care about:
|
||||
// 1 - initializing a new state (thread creation, javaCalls)
|
||||
// 2 - saving a current state (javaCalls)
|
||||
// 3 - restoring an old state (javaCalls)
|
||||
|
||||
void clear(void) {
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = NULL;
|
||||
OrderAccess::release();
|
||||
_last_Java_fp = NULL;
|
||||
_last_Java_pc = NULL;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
// In order to make sure the transition state is valid for "this"
|
||||
// We must clear _last_Java_sp before copying the rest of the new data
|
||||
//
|
||||
// Hack Alert: Temporary bugfix for 4717480/4721647
|
||||
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
|
||||
// unless the value is changing
|
||||
//
|
||||
assert(src != NULL, "Src should not be NULL.");
|
||||
if (_last_Java_sp != src->_last_Java_sp) {
|
||||
_last_Java_sp = NULL;
|
||||
OrderAccess::release();
|
||||
}
|
||||
_last_Java_fp = src->_last_Java_fp;
|
||||
_last_Java_pc = src->_last_Java_pc;
|
||||
// Must be last so profiler will always see valid frame if has_last_frame() is true
|
||||
_last_Java_sp = src->_last_Java_sp;
|
||||
}
|
||||
|
||||
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
|
||||
void make_walkable(JavaThread* thread);
|
||||
void capture_last_Java_pc(void);
|
||||
|
||||
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
|
||||
|
||||
const address last_Java_pc(void) { return _last_Java_pc; }
|
||||
|
||||
private:
|
||||
|
||||
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
|
||||
|
||||
public:
|
||||
|
||||
void set_last_Java_sp(intptr_t* java_sp) { _last_Java_sp = java_sp; OrderAccess::release(); }
|
||||
|
||||
intptr_t* last_Java_fp(void) { return _last_Java_fp; }
|
||||
|
||||
#endif // CPU_RISCV_JAVAFRAMEANCHOR_RISCV_HPP
|
||||
214
src/hotspot/cpu/riscv/jniFastGetField_riscv.cpp
Normal file
214
src/hotspot/cpu/riscv/jniFastGetField_riscv.cpp
Normal file
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jniFastGetField.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
#define BUFFER_SIZE 30*wordSize
|
||||
|
||||
// Instead of issuing a LoadLoad barrier we create an address
|
||||
// dependency between loads; this might be more efficient.
|
||||
|
||||
// Common register usage:
|
||||
// x10/f10: result
|
||||
// c_rarg0: jni env
|
||||
// c_rarg1: obj
|
||||
// c_rarg2: jfield id
|
||||
|
||||
static const Register robj = x13;
|
||||
static const Register rcounter = x14;
|
||||
static const Register roffset = x15;
|
||||
static const Register rcounter_addr = x16;
|
||||
static const Register result = x17;
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
const char *name;
|
||||
switch (type) {
|
||||
case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
|
||||
case T_BYTE: name = "jni_fast_GetByteField"; break;
|
||||
case T_CHAR: name = "jni_fast_GetCharField"; break;
|
||||
case T_SHORT: name = "jni_fast_GetShortField"; break;
|
||||
case T_INT: name = "jni_fast_GetIntField"; break;
|
||||
case T_LONG: name = "jni_fast_GetLongField"; break;
|
||||
case T_FLOAT: name = "jni_fast_GetFloatField"; break;
|
||||
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
|
||||
default: ShouldNotReachHere();
|
||||
name = NULL; // unreachable
|
||||
}
|
||||
ResourceMark rm;
|
||||
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
|
||||
CodeBuffer cbuf(blob);
|
||||
MacroAssembler* masm = new MacroAssembler(&cbuf);
|
||||
address fast_entry = __ pc();
|
||||
|
||||
Label slow;
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(rcounter_addr, SafepointSynchronize::safepoint_counter_addr(), offset);
|
||||
__ addi(rcounter_addr, rcounter_addr, offset);
|
||||
|
||||
Address safepoint_counter_addr(rcounter_addr, 0);
|
||||
__ lwu(rcounter, safepoint_counter_addr);
|
||||
// An even value means there are no ongoing safepoint operations
|
||||
__ andi(t0, rcounter, 1);
|
||||
__ bnez(t0, slow);
|
||||
|
||||
if (JvmtiExport::can_post_field_access()) {
|
||||
// Using barrier to order wrt. JVMTI check and load of result.
|
||||
__ membar(MacroAssembler::LoadLoad);
|
||||
|
||||
// Check to see if a field access watch has been set before we
|
||||
// take the fast path.
|
||||
int32_t offset2;
|
||||
__ la_patchable(result,
|
||||
ExternalAddress((address) JvmtiExport::get_field_access_count_addr()),
|
||||
offset2);
|
||||
__ lwu(result, Address(result, offset2));
|
||||
__ bnez(result, slow);
|
||||
|
||||
__ mv(robj, c_rarg1);
|
||||
} else {
|
||||
// Using address dependency to order wrt. load of result.
|
||||
__ xorr(robj, c_rarg1, rcounter);
|
||||
__ xorr(robj, robj, rcounter); // obj, since
|
||||
// robj ^ rcounter ^ rcounter == robj
|
||||
// robj is address dependent on rcounter.
|
||||
}
|
||||
|
||||
// Both robj and t0 are clobbered by try_resolve_jobject_in_native.
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
assert_cond(bs != NULL);
|
||||
bs->try_resolve_jobject_in_native(masm, c_rarg0, robj, t0, slow);
|
||||
|
||||
__ srli(roffset, c_rarg2, 2); // offset
|
||||
|
||||
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
|
||||
speculative_load_pclist[count] = __ pc(); // Used by the segfault handler
|
||||
__ add(roffset, robj, roffset);
|
||||
|
||||
switch (type) {
|
||||
case T_BOOLEAN: __ lbu(result, Address(roffset, 0)); break;
|
||||
case T_BYTE: __ lb(result, Address(roffset, 0)); break;
|
||||
case T_CHAR: __ lhu(result, Address(roffset, 0)); break;
|
||||
case T_SHORT: __ lh(result, Address(roffset, 0)); break;
|
||||
case T_INT: __ lw(result, Address(roffset, 0)); break;
|
||||
case T_LONG: __ ld(result, Address(roffset, 0)); break;
|
||||
case T_FLOAT: {
|
||||
__ flw(f28, Address(roffset, 0)); // f28 as temporaries
|
||||
__ fmv_x_w(result, f28); // f{31--0}-->x
|
||||
break;
|
||||
}
|
||||
case T_DOUBLE: {
|
||||
__ fld(f28, Address(roffset, 0)); // f28 as temporaries
|
||||
__ fmv_x_d(result, f28); // d{63--0}-->x
|
||||
break;
|
||||
}
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// Using acquire: Order JVMTI check and load of result wrt. succeeding check
|
||||
// (LoadStore for volatile field).
|
||||
__ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
|
||||
|
||||
__ lw(t0, safepoint_counter_addr);
|
||||
__ bne(rcounter, t0, slow);
|
||||
|
||||
switch (type) {
|
||||
case T_FLOAT: __ fmv_w_x(f10, result); break;
|
||||
case T_DOUBLE: __ fmv_d_x(f10, result); break;
|
||||
default: __ mv(x10, result); break;
|
||||
}
|
||||
__ ret();
|
||||
|
||||
slowcase_entry_pclist[count++] = __ pc();
|
||||
__ bind(slow);
|
||||
address slow_case_addr;
|
||||
switch (type) {
|
||||
case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
|
||||
case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
|
||||
case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break;
|
||||
case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break;
|
||||
case T_INT: slow_case_addr = jni_GetIntField_addr(); break;
|
||||
case T_LONG: slow_case_addr = jni_GetLongField_addr(); break;
|
||||
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
|
||||
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
|
||||
default: ShouldNotReachHere();
|
||||
slow_case_addr = NULL; // unreachable
|
||||
}
|
||||
|
||||
{
|
||||
__ enter();
|
||||
int32_t tmp_offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress(slow_case_addr), tmp_offset);
|
||||
__ jalr(x1, t0, tmp_offset);
|
||||
__ leave();
|
||||
__ ret();
|
||||
}
|
||||
__ flush();
|
||||
|
||||
return fast_entry;
|
||||
}
|
||||
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_boolean_field() {
|
||||
return generate_fast_get_int_field0(T_BOOLEAN);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_byte_field() {
|
||||
return generate_fast_get_int_field0(T_BYTE);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_char_field() {
|
||||
return generate_fast_get_int_field0(T_CHAR);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_short_field() {
|
||||
return generate_fast_get_int_field0(T_SHORT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_int_field() {
|
||||
return generate_fast_get_int_field0(T_INT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_long_field() {
|
||||
return generate_fast_get_int_field0(T_LONG);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_float_field() {
|
||||
return generate_fast_get_int_field0(T_FLOAT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_double_field() {
|
||||
return generate_fast_get_int_field0(T_DOUBLE);
|
||||
}
|
||||
106
src/hotspot/cpu/riscv/jniTypes_riscv.hpp
Normal file
106
src/hotspot/cpu/riscv/jniTypes_riscv.hpp
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_JNITYPES_RISCV_HPP
|
||||
#define CPU_RISCV_JNITYPES_RISCV_HPP
|
||||
|
||||
#include "jni.h"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
// This file holds platform-dependent routines used to write primitive jni
|
||||
// types to the array of arguments passed into JavaCalls::call
|
||||
|
||||
class JNITypes : private AllStatic {
|
||||
// These functions write a java primitive type (in native format)
|
||||
// to a java stack slot array to be passed as an argument to JavaCalls:calls.
|
||||
// I.e., they are functionally 'push' operations if they have a 'pos'
|
||||
// formal parameter. Note that jlong's and jdouble's are written
|
||||
// _in reverse_ of the order in which they appear in the interpreter
|
||||
// stack. This is because call stubs (see stubGenerator_sparc.cpp)
|
||||
// reverse the argument list constructed by JavaCallArguments (see
|
||||
// javaCalls.hpp).
|
||||
|
||||
public:
|
||||
// Ints are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_int(jint from, intptr_t *to) { *(jint *)(to + 0 ) = from; }
|
||||
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
|
||||
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
|
||||
|
||||
// Longs are stored in native format in one JavaCallArgument slot at
|
||||
// *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) {
|
||||
*(jlong*) (to + 1) = from;
|
||||
}
|
||||
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) {
|
||||
*(jlong*) (to + 1 + pos) = from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) {
|
||||
*(jlong*) (to + 1 + pos) = *from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
// Oops are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_obj(const Handle& from_handle, intptr_t *to, int& pos) { *(to + pos++) = (intptr_t)from_handle.raw_value(); }
|
||||
static inline void put_obj(jobject from_handle, intptr_t *to, int& pos) { *(to + pos++) = (intptr_t)from_handle; }
|
||||
|
||||
// Floats are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_float(jfloat from, intptr_t *to) { *(jfloat *)(to + 0 ) = from; }
|
||||
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
|
||||
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
|
||||
|
||||
#undef _JNI_SLOT_OFFSET
|
||||
#define _JNI_SLOT_OFFSET 1
|
||||
// Doubles are stored in native word format in one JavaCallArgument
|
||||
// slot at *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) {
|
||||
*(jdouble*) (to + 1) = from;
|
||||
}
|
||||
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) {
|
||||
*(jdouble*) (to + 1 + pos) = from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) {
|
||||
*(jdouble*) (to + 1 + pos) = *from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
// The get_xxx routines, on the other hand, actually _do_ fetch
|
||||
// java primitive types from the interpreter stack.
|
||||
// No need to worry about alignment on Intel.
|
||||
static inline jint get_int (intptr_t *from) { return *(jint *) from; }
|
||||
static inline jlong get_long (intptr_t *from) { return *(jlong *) (from + _JNI_SLOT_OFFSET); }
|
||||
static inline oop get_obj (intptr_t *from) { return *(oop *) from; }
|
||||
static inline jfloat get_float (intptr_t *from) { return *(jfloat *) from; }
|
||||
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)(from + _JNI_SLOT_OFFSET); }
|
||||
#undef _JNI_SLOT_OFFSET
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_JNITYPES_RISCV_HPP
|
||||
4238
src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
Normal file
4238
src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
Normal file
File diff suppressed because it is too large
Load Diff
884
src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
Normal file
884
src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
Normal file
@@ -0,0 +1,884 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_MACROASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_MACROASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "asm/assembler.hpp"
|
||||
#include "metaprogramming/enableIf.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
// MacroAssembler extends Assembler by frequently used macros.
|
||||
//
|
||||
// Instructions for which a 'better' code sequence exists depending
|
||||
// on arguments should also go in here.
|
||||
|
||||
class MacroAssembler: public Assembler {
|
||||
|
||||
public:
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {
|
||||
}
|
||||
virtual ~MacroAssembler() {}
|
||||
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
|
||||
|
||||
// Place a fence.i after code may have been modified due to a safepoint.
|
||||
void safepoint_ifence();
|
||||
|
||||
// Alignment
|
||||
void align(int modulus, int extra_offset = 0);
|
||||
|
||||
// Stack frame creation/removal
|
||||
// Note that SP must be updated to the right place before saving/restoring RA and FP
|
||||
// because signal based thread suspend/resume could happen asynchronously.
|
||||
void enter() {
|
||||
addi(sp, sp, - 2 * wordSize);
|
||||
sd(ra, Address(sp, wordSize));
|
||||
sd(fp, Address(sp));
|
||||
addi(fp, sp, 2 * wordSize);
|
||||
}
|
||||
|
||||
void leave() {
|
||||
addi(sp, fp, - 2 * wordSize);
|
||||
ld(fp, Address(sp));
|
||||
ld(ra, Address(sp, wordSize));
|
||||
addi(sp, sp, 2 * wordSize);
|
||||
}
|
||||
|
||||
|
||||
// Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
|
||||
// The pointer will be loaded into the thread register.
|
||||
void get_thread(Register thread);
|
||||
|
||||
// Support for VM calls
|
||||
//
|
||||
// It is imperative that all calls into the VM are handled via the call_VM macros.
|
||||
// They make sure that the stack linkage is setup correctly. call_VM's correspond
|
||||
// to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
|
||||
|
||||
void call_VM(Register oop_result,
|
||||
address entry_point,
|
||||
bool check_exceptions = true);
|
||||
void call_VM(Register oop_result,
|
||||
address entry_point,
|
||||
Register arg_1,
|
||||
bool check_exceptions = true);
|
||||
void call_VM(Register oop_result,
|
||||
address entry_point,
|
||||
Register arg_1, Register arg_2,
|
||||
bool check_exceptions = true);
|
||||
void call_VM(Register oop_result,
|
||||
address entry_point,
|
||||
Register arg_1, Register arg_2, Register arg_3,
|
||||
bool check_exceptions = true);
|
||||
|
||||
// Overloadings with last_Java_sp
|
||||
void call_VM(Register oop_result,
|
||||
Register last_java_sp,
|
||||
address entry_point,
|
||||
int number_of_arguments = 0,
|
||||
bool check_exceptions = true);
|
||||
void call_VM(Register oop_result,
|
||||
Register last_java_sp,
|
||||
address entry_point,
|
||||
Register arg_1,
|
||||
bool check_exceptions = true);
|
||||
void call_VM(Register oop_result,
|
||||
Register last_java_sp,
|
||||
address entry_point,
|
||||
Register arg_1, Register arg_2,
|
||||
bool check_exceptions = true);
|
||||
void call_VM(Register oop_result,
|
||||
Register last_java_sp,
|
||||
address entry_point,
|
||||
Register arg_1, Register arg_2, Register arg_3,
|
||||
bool check_exceptions = true);
|
||||
|
||||
void get_vm_result(Register oop_result, Register java_thread);
|
||||
void get_vm_result_2(Register metadata_result, Register java_thread);
|
||||
|
||||
// These always tightly bind to MacroAssembler::call_VM_leaf_base
|
||||
// bypassing the virtual implementation
|
||||
void call_VM_leaf(address entry_point,
|
||||
int number_of_arguments = 0);
|
||||
void call_VM_leaf(address entry_point,
|
||||
Register arg_0);
|
||||
void call_VM_leaf(address entry_point,
|
||||
Register arg_0, Register arg_1);
|
||||
void call_VM_leaf(address entry_point,
|
||||
Register arg_0, Register arg_1, Register arg_2);
|
||||
|
||||
// These always tightly bind to MacroAssembler::call_VM_base
|
||||
// bypassing the virtual implementation
|
||||
void super_call_VM_leaf(address entry_point, Register arg_0);
|
||||
void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1);
|
||||
void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2);
|
||||
void super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3);
|
||||
|
||||
// last Java Frame (fills frame anchor)
|
||||
void set_last_Java_frame(Register last_java_sp, Register last_java_fp, address last_java_pc, Register tmp);
|
||||
void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Label &last_java_pc, Register tmp);
|
||||
void set_last_Java_frame(Register last_java_sp, Register last_java_fp, Register last_java_pc, Register tmp);
|
||||
|
||||
// thread in the default location (xthread)
|
||||
void reset_last_Java_frame(bool clear_fp);
|
||||
|
||||
void call_native(address entry_point,
|
||||
Register arg_0);
|
||||
void call_native_base(
|
||||
address entry_point, // the entry point
|
||||
Label* retaddr = NULL
|
||||
);
|
||||
|
||||
virtual void call_VM_leaf_base(
|
||||
address entry_point, // the entry point
|
||||
int number_of_arguments, // the number of arguments to pop after the call
|
||||
Label* retaddr = NULL
|
||||
);
|
||||
|
||||
virtual void call_VM_leaf_base(
|
||||
address entry_point, // the entry point
|
||||
int number_of_arguments, // the number of arguments to pop after the call
|
||||
Label& retaddr) {
|
||||
call_VM_leaf_base(entry_point, number_of_arguments, &retaddr);
|
||||
}
|
||||
|
||||
virtual void call_VM_base( // returns the register containing the thread upon return
|
||||
Register oop_result, // where an oop-result ends up if any; use noreg otherwise
|
||||
Register java_thread, // the thread if computed before ; use noreg otherwise
|
||||
Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
|
||||
address entry_point, // the entry point
|
||||
int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
|
||||
bool check_exceptions // whether to check for pending exceptions after return
|
||||
);
|
||||
|
||||
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
|
||||
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
|
||||
void resolve_weak_handle(Register result, Register tmp);
|
||||
void resolve_oop_handle(Register result, Register tmp = x15);
|
||||
void resolve_jobject(Register value, Register thread, Register tmp);
|
||||
|
||||
void movoop(Register dst, jobject obj, bool immediate = false);
|
||||
void mov_metadata(Register dst, Metadata* obj);
|
||||
void bang_stack_size(Register size, Register tmp);
|
||||
void set_narrow_oop(Register dst, jobject obj);
|
||||
void set_narrow_klass(Register dst, Klass* k);
|
||||
|
||||
void load_mirror(Register dst, Register method, Register tmp = x15);
|
||||
void access_load_at(BasicType type, DecoratorSet decorators, Register dst,
|
||||
Address src, Register tmp1, Register thread_tmp);
|
||||
void access_store_at(BasicType type, DecoratorSet decorators, Address dst,
|
||||
Register src, Register tmp1, Register thread_tmp);
|
||||
void load_klass(Register dst, Register src);
|
||||
void store_klass(Register dst, Register src);
|
||||
void cmp_klass(Register oop, Register trial_klass, Register tmp, Label &L);
|
||||
|
||||
void encode_klass_not_null(Register r);
|
||||
void decode_klass_not_null(Register r);
|
||||
void encode_klass_not_null(Register dst, Register src, Register tmp = xheapbase);
|
||||
void decode_klass_not_null(Register dst, Register src, Register tmp = xheapbase);
|
||||
void decode_heap_oop_not_null(Register r);
|
||||
void decode_heap_oop_not_null(Register dst, Register src);
|
||||
void decode_heap_oop(Register d, Register s);
|
||||
void decode_heap_oop(Register r) { decode_heap_oop(r, r); }
|
||||
void encode_heap_oop(Register d, Register s);
|
||||
void encode_heap_oop(Register r) { encode_heap_oop(r, r); };
|
||||
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
|
||||
Register thread_tmp = noreg, DecoratorSet decorators = 0);
|
||||
void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
|
||||
Register thread_tmp = noreg, DecoratorSet decorators = 0);
|
||||
void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
|
||||
Register thread_tmp = noreg, DecoratorSet decorators = 0);
|
||||
|
||||
void store_klass_gap(Register dst, Register src);
|
||||
|
||||
// currently unimplemented
|
||||
// Used for storing NULL. All other oop constants should be
|
||||
// stored using routines that take a jobject.
|
||||
void store_heap_oop_null(Address dst);
|
||||
|
||||
// This dummy is to prevent a call to store_heap_oop from
|
||||
// converting a zero (linke NULL) into a Register by giving
|
||||
// the compiler two choices it can't resolve
|
||||
|
||||
void store_heap_oop(Address dst, void* dummy);
|
||||
|
||||
// Support for NULL-checks
|
||||
//
|
||||
// Generates code that causes a NULL OS exception if the content of reg is NULL.
|
||||
// If the accessed location is M[reg + offset] and the offset is known, provide the
|
||||
// offset. No explicit code generateion is needed if the offset is within a certain
|
||||
// range (0 <= offset <= page_size).
|
||||
|
||||
virtual void null_check(Register reg, int offset = -1);
|
||||
static bool needs_explicit_null_check(intptr_t offset);
|
||||
static bool uses_implicit_null_check(void* address);
|
||||
|
||||
// idiv variant which deals with MINLONG as dividend and -1 as divisor
|
||||
int corrected_idivl(Register result, Register rs1, Register rs2,
|
||||
bool want_remainder);
|
||||
int corrected_idivq(Register result, Register rs1, Register rs2,
|
||||
bool want_remainder);
|
||||
|
||||
// interface method calling
|
||||
void lookup_interface_method(Register recv_klass,
|
||||
Register intf_klass,
|
||||
RegisterOrConstant itable_index,
|
||||
Register method_result,
|
||||
Register scan_tmp,
|
||||
Label& no_such_interface,
|
||||
bool return_method = true);
|
||||
|
||||
// virtual method calling
|
||||
// n.n. x86 allows RegisterOrConstant for vtable_index
|
||||
void lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result);
|
||||
|
||||
// Form an addres from base + offset in Rd. Rd my or may not
|
||||
// actually be used: you must use the Address that is returned. It
|
||||
// is up to you to ensure that the shift provided mathces the size
|
||||
// of your data.
|
||||
Address form_address(Register Rd, Register base, long byte_offset);
|
||||
|
||||
// allocation
|
||||
void tlab_allocate(
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register tmp1, // temp register
|
||||
Register tmp2, // temp register
|
||||
Label& slow_case, // continuation point of fast allocation fails
|
||||
bool is_far = false
|
||||
);
|
||||
|
||||
void eden_allocate(
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register tmp, // temp register
|
||||
Label& slow_case, // continuation point if fast allocation fails
|
||||
bool is_far = false
|
||||
);
|
||||
|
||||
// Test sub_klass against super_klass, with fast and slow paths.
|
||||
|
||||
// The fast path produces a tri-state answer: yes / no / maybe-slow.
|
||||
// One of the three labels can be NULL, meaning take the fall-through.
|
||||
// If super_check_offset is -1, the value is loaded up from super_klass.
|
||||
// No registers are killed, except tmp_reg
|
||||
void check_klass_subtype_fast_path(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register tmp_reg,
|
||||
Label* L_success,
|
||||
Label* L_failure,
|
||||
Label* L_slow_path,
|
||||
Register super_check_offset = noreg);
|
||||
|
||||
// The reset of the type cehck; must be wired to a corresponding fast path.
|
||||
// It does not repeat the fast path logic, so don't use it standalone.
|
||||
// The tmp1_reg and tmp2_reg can be noreg, if no temps are avaliable.
|
||||
// Updates the sub's secondary super cache as necessary.
|
||||
void check_klass_subtype_slow_path(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register tmp1_reg,
|
||||
Register tmp2_reg,
|
||||
Label* L_success,
|
||||
Label* L_failure);
|
||||
|
||||
void check_klass_subtype(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register tmp_reg,
|
||||
Label& L_success);
|
||||
|
||||
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
|
||||
|
||||
// only if +VerifyOops
|
||||
void verify_oop(Register reg, const char* s = "broken oop");
|
||||
void verify_oop_addr(Address addr, const char* s = "broken oop addr");
|
||||
|
||||
void _verify_method_ptr(Register reg, const char* msg, const char* file, int line) {}
|
||||
void _verify_klass_ptr(Register reg, const char* msg, const char* file, int line) {}
|
||||
|
||||
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
|
||||
#define verify_klass_ptr(reg) _verify_method_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
|
||||
|
||||
// A more convenient access to fence for our purposes
|
||||
// We used four bit to indicate the read and write bits in the predecessors and successors,
|
||||
// and extended i for r, o for w if UseConservativeFence enabled.
|
||||
enum Membar_mask_bits {
|
||||
StoreStore = 0b0101, // (pred = ow + succ = ow)
|
||||
LoadStore = 0b1001, // (pred = ir + succ = ow)
|
||||
StoreLoad = 0b0110, // (pred = ow + succ = ir)
|
||||
LoadLoad = 0b1010, // (pred = ir + succ = ir)
|
||||
AnyAny = LoadStore | StoreLoad // (pred = iorw + succ = iorw)
|
||||
};
|
||||
|
||||
void membar(uint32_t order_constraint);
|
||||
|
||||
static void membar_mask_to_pred_succ(uint32_t order_constraint, uint32_t& predecessor, uint32_t& successor) {
|
||||
predecessor = (order_constraint >> 2) & 0x3;
|
||||
successor = order_constraint & 0x3;
|
||||
|
||||
// extend rw -> iorw:
|
||||
// 01(w) -> 0101(ow)
|
||||
// 10(r) -> 1010(ir)
|
||||
// 11(rw)-> 1111(iorw)
|
||||
if (UseConservativeFence) {
|
||||
predecessor |= predecessor << 2;
|
||||
successor |= successor << 2;
|
||||
}
|
||||
}
|
||||
|
||||
static int pred_succ_to_membar_mask(uint32_t predecessor, uint32_t successor) {
|
||||
return ((predecessor & 0x3) << 2) | (successor & 0x3);
|
||||
}
|
||||
|
||||
// prints msg, dumps registers and stops execution
|
||||
void stop(const char* msg);
|
||||
|
||||
static void debug64(char* msg, int64_t pc, int64_t regs[]);
|
||||
|
||||
void unimplemented(const char* what = "");
|
||||
|
||||
void should_not_reach_here() { stop("should not reach here"); }
|
||||
|
||||
static address target_addr_for_insn(address insn_addr);
|
||||
|
||||
// Required platform-specific helpers for Label::patch_instructions.
|
||||
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
|
||||
static int pd_patch_instruction_size(address branch, address target);
|
||||
static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) {
|
||||
pd_patch_instruction_size(branch, target);
|
||||
}
|
||||
static address pd_call_destination(address branch) {
|
||||
return target_addr_for_insn(branch);
|
||||
}
|
||||
|
||||
static int patch_oop(address insn_addr, address o);
|
||||
address emit_trampoline_stub(int insts_call_instruction_offset, address target);
|
||||
void emit_static_call_stub();
|
||||
|
||||
// The following 4 methods return the offset of the appropriate move instruction
|
||||
|
||||
// Support for fast byte/short loading with zero extension (depending on particular CPU)
|
||||
int load_unsigned_byte(Register dst, Address src);
|
||||
int load_unsigned_short(Register dst, Address src);
|
||||
|
||||
// Support for fast byte/short loading with sign extension (depending on particular CPU)
|
||||
int load_signed_byte(Register dst, Address src);
|
||||
int load_signed_short(Register dst, Address src);
|
||||
|
||||
// Load and store values by size and signed-ness
|
||||
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
|
||||
void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
|
||||
|
||||
public:
|
||||
// Standard pseudoinstruction
|
||||
void nop();
|
||||
void mv(Register Rd, Register Rs);
|
||||
void notr(Register Rd, Register Rs);
|
||||
void neg(Register Rd, Register Rs);
|
||||
void negw(Register Rd, Register Rs);
|
||||
void sext_w(Register Rd, Register Rs);
|
||||
void zext_b(Register Rd, Register Rs);
|
||||
void seqz(Register Rd, Register Rs); // set if = zero
|
||||
void snez(Register Rd, Register Rs); // set if != zero
|
||||
void sltz(Register Rd, Register Rs); // set if < zero
|
||||
void sgtz(Register Rd, Register Rs); // set if > zero
|
||||
|
||||
// Float pseudoinstruction
|
||||
void fmv_s(FloatRegister Rd, FloatRegister Rs);
|
||||
void fabs_s(FloatRegister Rd, FloatRegister Rs); // single-precision absolute value
|
||||
void fneg_s(FloatRegister Rd, FloatRegister Rs);
|
||||
|
||||
// Double pseudoinstruction
|
||||
void fmv_d(FloatRegister Rd, FloatRegister Rs);
|
||||
void fabs_d(FloatRegister Rd, FloatRegister Rs);
|
||||
void fneg_d(FloatRegister Rd, FloatRegister Rs);
|
||||
|
||||
// Pseudoinstruction for control and status register
|
||||
void rdinstret(Register Rd); // read instruction-retired counter
|
||||
void rdcycle(Register Rd); // read cycle counter
|
||||
void rdtime(Register Rd); // read time
|
||||
void csrr(Register Rd, unsigned csr); // read csr
|
||||
void csrw(unsigned csr, Register Rs); // write csr
|
||||
void csrs(unsigned csr, Register Rs); // set bits in csr
|
||||
void csrc(unsigned csr, Register Rs); // clear bits in csr
|
||||
void csrwi(unsigned csr, unsigned imm);
|
||||
void csrsi(unsigned csr, unsigned imm);
|
||||
void csrci(unsigned csr, unsigned imm);
|
||||
void frcsr(Register Rd); // read float-point csr
|
||||
void fscsr(Register Rd, Register Rs); // swap float-point csr
|
||||
void fscsr(Register Rs); // write float-point csr
|
||||
void frrm(Register Rd); // read float-point rounding mode
|
||||
void fsrm(Register Rd, Register Rs); // swap float-point rounding mode
|
||||
void fsrm(Register Rs); // write float-point rounding mode
|
||||
void fsrmi(Register Rd, unsigned imm);
|
||||
void fsrmi(unsigned imm);
|
||||
void frflags(Register Rd); // read float-point exception flags
|
||||
void fsflags(Register Rd, Register Rs); // swap float-point exception flags
|
||||
void fsflags(Register Rs); // write float-point exception flags
|
||||
void fsflagsi(Register Rd, unsigned imm);
|
||||
void fsflagsi(unsigned imm);
|
||||
|
||||
void beqz(Register Rs, const address &dest);
|
||||
void bnez(Register Rs, const address &dest);
|
||||
void blez(Register Rs, const address &dest);
|
||||
void bgez(Register Rs, const address &dest);
|
||||
void bltz(Register Rs, const address &dest);
|
||||
void bgtz(Register Rs, const address &dest);
|
||||
void la(Register Rd, Label &label);
|
||||
void la(Register Rd, const address &dest);
|
||||
void la(Register Rd, const Address &adr);
|
||||
//label
|
||||
void beqz(Register Rs, Label &l, bool is_far = false);
|
||||
void bnez(Register Rs, Label &l, bool is_far = false);
|
||||
void blez(Register Rs, Label &l, bool is_far = false);
|
||||
void bgez(Register Rs, Label &l, bool is_far = false);
|
||||
void bltz(Register Rs, Label &l, bool is_far = false);
|
||||
void bgtz(Register Rs, Label &l, bool is_far = false);
|
||||
void float_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void float_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void float_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void float_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void float_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void float_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void double_beq(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void double_bne(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void double_ble(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void double_bge(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void double_blt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
void double_bgt(FloatRegister Rs1, FloatRegister Rs2, Label &l, bool is_far = false, bool is_unordered = false);
|
||||
|
||||
void push_reg(RegSet regs, Register stack) { if (regs.bits()) { push_reg(regs.bits(), stack); } }
|
||||
void pop_reg(RegSet regs, Register stack) { if (regs.bits()) { pop_reg(regs.bits(), stack); } }
|
||||
void push_reg(Register Rs);
|
||||
void pop_reg(Register Rd);
|
||||
int push_reg(unsigned int bitset, Register stack);
|
||||
int pop_reg(unsigned int bitset, Register stack);
|
||||
void push_fp(FloatRegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
|
||||
void pop_fp(FloatRegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
|
||||
#ifdef COMPILER2
|
||||
void push_vp(VectorRegSet regs, Register stack) { if (regs.bits()) push_vp(regs.bits(), stack); }
|
||||
void pop_vp(VectorRegSet regs, Register stack) { if (regs.bits()) pop_vp(regs.bits(), stack); }
|
||||
#endif // COMPILER2
|
||||
|
||||
// Push and pop everything that might be clobbered by a native
|
||||
// runtime call except t0 and t1. (They are always
|
||||
// temporary registers, so we don't have to protect them.)
|
||||
// Additional registers can be excluded in a passed RegSet.
|
||||
void push_call_clobbered_registers_except(RegSet exclude);
|
||||
void pop_call_clobbered_registers_except(RegSet exclude);
|
||||
|
||||
void push_call_clobbered_registers() {
|
||||
push_call_clobbered_registers_except(RegSet());
|
||||
}
|
||||
void pop_call_clobbered_registers() {
|
||||
pop_call_clobbered_registers_except(RegSet());
|
||||
}
|
||||
|
||||
void pusha();
|
||||
void popa();
|
||||
void push_CPU_state(bool save_vectors = false, int vector_size_in_bytes = 0);
|
||||
void pop_CPU_state(bool restore_vectors = false, int vector_size_in_bytes = 0);
|
||||
|
||||
// if heap base register is used - reinit it with the correct value
|
||||
void reinit_heapbase();
|
||||
|
||||
void bind(Label& L) {
|
||||
Assembler::bind(L);
|
||||
// fences across basic blocks should not be merged
|
||||
code()->clear_last_insn();
|
||||
}
|
||||
|
||||
// mv
|
||||
template<typename T, ENABLE_IF(std::is_integral<T>::value)>
|
||||
inline void mv(Register Rd, T o) {
|
||||
li(Rd, (int64_t)o);
|
||||
}
|
||||
|
||||
inline void mvw(Register Rd, int32_t imm32) { mv(Rd, imm32); }
|
||||
|
||||
void mv(Register Rd, Address dest);
|
||||
void mv(Register Rd, address addr);
|
||||
void mv(Register Rd, RegisterOrConstant src);
|
||||
|
||||
// logic
|
||||
void andrw(Register Rd, Register Rs1, Register Rs2);
|
||||
void orrw(Register Rd, Register Rs1, Register Rs2);
|
||||
void xorrw(Register Rd, Register Rs1, Register Rs2);
|
||||
|
||||
// revb
|
||||
void revb_h_h(Register Rd, Register Rs, Register tmp = t0); // reverse bytes in halfword in lower 16 bits, sign-extend
|
||||
void revb_w_w(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in lower word, sign-extend
|
||||
void revb_h_h_u(Register Rd, Register Rs, Register tmp = t0); // reverse bytes in halfword in lower 16 bits, zero-extend
|
||||
void revb_h_w_u(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in halfwords in lower 32 bits, zero-extend
|
||||
void revb_h_helper(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in upper 16 bits (48:63) and move to lower
|
||||
void revb_h(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in each halfword
|
||||
void revb_w(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2= t1); // reverse bytes in each word
|
||||
void revb(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in doubleword
|
||||
|
||||
void ror_imm(Register dst, Register src, uint32_t shift, Register tmp = t0);
|
||||
void andi(Register Rd, Register Rn, int64_t imm, Register tmp = t0);
|
||||
void orptr(Address adr, RegisterOrConstant src, Register tmp1 = t0, Register tmp2 = t1);
|
||||
|
||||
void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, Label &succeed, Label *fail);
|
||||
void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail);
|
||||
void cmpxchg(Register addr, Register expected,
|
||||
Register new_val,
|
||||
enum operand_size size,
|
||||
Assembler::Aqrl acquire, Assembler::Aqrl release,
|
||||
Register result, bool result_as_bool = false);
|
||||
void cmpxchg_weak(Register addr, Register expected,
|
||||
Register new_val,
|
||||
enum operand_size size,
|
||||
Assembler::Aqrl acquire, Assembler::Aqrl release,
|
||||
Register result);
|
||||
void cmpxchg_narrow_value_helper(Register addr, Register expected,
|
||||
Register new_val,
|
||||
enum operand_size size,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
void cmpxchg_narrow_value(Register addr, Register expected,
|
||||
Register new_val,
|
||||
enum operand_size size,
|
||||
Assembler::Aqrl acquire, Assembler::Aqrl release,
|
||||
Register result, bool result_as_bool,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
void weak_cmpxchg_narrow_value(Register addr, Register expected,
|
||||
Register new_val,
|
||||
enum operand_size size,
|
||||
Assembler::Aqrl acquire, Assembler::Aqrl release,
|
||||
Register result,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
void atomic_add(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void atomic_addw(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void atomic_addal(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr);
|
||||
|
||||
void atomic_xchg(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgw(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgal(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgalw(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgwu(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgalwu(Register prev, Register newv, Register addr);
|
||||
|
||||
void atomic_incw(Register counter_addr, Register tmp);
|
||||
void atomic_incw(Address counter_addr, Register tmp1, Register tmp2) {
|
||||
la(tmp1, counter_addr);
|
||||
atomic_incw(tmp1, tmp2);
|
||||
}
|
||||
|
||||
// Biased locking support
|
||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||
// swap_reg is killed.
|
||||
// tmp_reg must be supplied and must not be t0 or t1
|
||||
// Optional slow case is for implementations (interpreter and C1) which branch to
|
||||
// slow case directly. Leaves condition codes set for C2's Fast_Lock done.
|
||||
// Returns offset of first potentially-faulting instruction for null
|
||||
// check info (currently consumed only by C1). If
|
||||
// swap_reg_contains_mark is true then returns -1 as it as assumed
|
||||
// the calling code has already passed any potential faults.
|
||||
void biased_locking_enter(Register lock_reg, Register obj_reg,
|
||||
Register swap_reg, Register tmp_Reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done, Label* slow_case = NULL,
|
||||
BiasedLockingCounters* counters = NULL,
|
||||
Register flag = noreg);
|
||||
void biased_locking_exit(Register obj_reg, Register tmp_reg, Label& done, Register flag = noreg);
|
||||
|
||||
static bool far_branches() {
|
||||
return ReservedCodeCacheSize > branch_range;
|
||||
}
|
||||
|
||||
// Jumps that can reach anywhere in the code cache.
|
||||
// Trashes tmp.
|
||||
void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = t0);
|
||||
void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = t0);
|
||||
|
||||
static int far_branch_size() {
|
||||
if (far_branches()) {
|
||||
return 2 * 4; // auipc + jalr, see far_call() & far_jump()
|
||||
} else {
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
|
||||
void load_byte_map_base(Register reg);
|
||||
|
||||
void bang_stack_with_offset(int offset) {
|
||||
// stack grows down, caller passes positive offset
|
||||
assert(offset > 0, "must bang with negative offset");
|
||||
sub(t0, sp, offset);
|
||||
sd(zr, Address(t0));
|
||||
}
|
||||
|
||||
void la_patchable(Register reg1, const Address &dest, int32_t &offset);
|
||||
|
||||
virtual void _call_Unimplemented(address call_site) {
|
||||
mv(t1, call_site);
|
||||
}
|
||||
|
||||
#define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
|
||||
|
||||
// Frame creation and destruction shared between JITs.
|
||||
void build_frame(int framesize);
|
||||
void remove_frame(int framesize);
|
||||
|
||||
void reserved_stack_check();
|
||||
|
||||
void get_polling_page(Register dest, relocInfo::relocType rtype);
|
||||
address read_polling_page(Register r, int32_t offset, relocInfo::relocType rtype);
|
||||
|
||||
address trampoline_call(Address entry, CodeBuffer* cbuf = NULL);
|
||||
address ic_call(address entry, jint method_index = 0);
|
||||
|
||||
void add_memory_int64(const Address dst, int64_t imm);
|
||||
void add_memory_int32(const Address dst, int32_t imm);
|
||||
|
||||
void cmpptr(Register src1, Address src2, Label& equal);
|
||||
|
||||
void clinit_barrier(Register klass, Register tmp, Label* L_fast_path = NULL, Label* L_slow_path = NULL);
|
||||
void load_method_holder_cld(Register result, Register method);
|
||||
void load_method_holder(Register holder, Register method);
|
||||
|
||||
void compute_index(Register str1, Register trailing_zeros, Register match_mask,
|
||||
Register result, Register char_tmp, Register tmp,
|
||||
bool haystack_isL);
|
||||
void compute_match_mask(Register src, Register pattern, Register match_mask,
|
||||
Register mask1, Register mask2);
|
||||
|
||||
#ifdef COMPILER2
|
||||
void mul_add(Register out, Register in, Register offset,
|
||||
Register len, Register k, Register tmp);
|
||||
void cad(Register dst, Register src1, Register src2, Register carry);
|
||||
void cadc(Register dst, Register src1, Register src2, Register carry);
|
||||
void adc(Register dst, Register src1, Register src2, Register carry);
|
||||
void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
|
||||
Register src1, Register src2, Register carry);
|
||||
void multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
Register idx, Register kdx);
|
||||
void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
Register idx, Register kdx);
|
||||
void multiply_128_x_128_loop(Register y, Register z,
|
||||
Register carry, Register carry2,
|
||||
Register idx, Register jdx,
|
||||
Register yz_idx1, Register yz_idx2,
|
||||
Register tmp, Register tmp3, Register tmp4,
|
||||
Register tmp6, Register product_hi);
|
||||
void multiply_to_len(Register x, Register xlen, Register y, Register ylen,
|
||||
Register z, Register zlen,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6, Register product_hi);
|
||||
#endif
|
||||
|
||||
void inflate_lo32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1);
|
||||
void inflate_hi32(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1);
|
||||
|
||||
void ctzc_bit(Register Rd, Register Rs, bool isLL = false, Register tmp1 = t0, Register tmp2 = t1);
|
||||
|
||||
void zero_words(Register base, u_int64_t cnt);
|
||||
address zero_words(Register ptr, Register cnt);
|
||||
void fill_words(Register base, Register cnt, Register value);
|
||||
void zero_memory(Register addr, Register len, Register tmp);
|
||||
|
||||
// shift left by shamt and add
|
||||
void shadd(Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt);
|
||||
|
||||
// Here the float instructions with safe deal with some exceptions.
|
||||
// e.g. convert from NaN, +Inf, -Inf to int, float, double
|
||||
// will trigger exception, we need to deal with these situations
|
||||
// to get correct results.
|
||||
void fcvt_w_s_safe(Register dst, FloatRegister src, Register tmp = t0);
|
||||
void fcvt_l_s_safe(Register dst, FloatRegister src, Register tmp = t0);
|
||||
void fcvt_w_d_safe(Register dst, FloatRegister src, Register tmp = t0);
|
||||
void fcvt_l_d_safe(Register dst, FloatRegister src, Register tmp = t0);
|
||||
|
||||
// vector load/store unit-stride instructions
|
||||
void vlex_v(VectorRegister vd, Register base, Assembler::SEW sew, VectorMask vm = unmasked) {
|
||||
switch (sew) {
|
||||
case Assembler::e64:
|
||||
vle64_v(vd, base, vm);
|
||||
break;
|
||||
case Assembler::e32:
|
||||
vle32_v(vd, base, vm);
|
||||
break;
|
||||
case Assembler::e16:
|
||||
vle16_v(vd, base, vm);
|
||||
break;
|
||||
case Assembler::e8: // fall through
|
||||
default:
|
||||
vle8_v(vd, base, vm);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void vsex_v(VectorRegister store_data, Register base, Assembler::SEW sew, VectorMask vm = unmasked) {
|
||||
switch (sew) {
|
||||
case Assembler::e64:
|
||||
vse64_v(store_data, base, vm);
|
||||
break;
|
||||
case Assembler::e32:
|
||||
vse32_v(store_data, base, vm);
|
||||
break;
|
||||
case Assembler::e16:
|
||||
vse16_v(store_data, base, vm);
|
||||
break;
|
||||
case Assembler::e8: // fall through
|
||||
default:
|
||||
vse8_v(store_data, base, vm);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const int zero_words_block_size;
|
||||
|
||||
void cast_primitive_type(BasicType type, Register Rt) {
|
||||
switch (type) {
|
||||
case T_BOOLEAN:
|
||||
sltu(Rt, zr, Rt);
|
||||
break;
|
||||
case T_CHAR :
|
||||
zero_extend(Rt, Rt, 16);
|
||||
break;
|
||||
case T_BYTE :
|
||||
sign_extend(Rt, Rt, 8);
|
||||
break;
|
||||
case T_SHORT :
|
||||
sign_extend(Rt, Rt, 16);
|
||||
break;
|
||||
case T_INT :
|
||||
addw(Rt, Rt, zr);
|
||||
break;
|
||||
case T_LONG : /* nothing to do */ break;
|
||||
case T_VOID : /* nothing to do */ break;
|
||||
case T_FLOAT : /* nothing to do */ break;
|
||||
case T_DOUBLE : /* nothing to do */ break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// float cmp with unordered_result
|
||||
void float_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result);
|
||||
void double_compare(Register result, FloatRegister Rs1, FloatRegister Rs2, int unordered_result);
|
||||
|
||||
// Zero/Sign-extend
|
||||
void zero_extend(Register dst, Register src, int bits);
|
||||
void sign_extend(Register dst, Register src, int bits);
|
||||
|
||||
// compare src1 and src2 and get -1/0/1 in dst.
|
||||
// if [src1 > src2], dst = 1;
|
||||
// if [src1 == src2], dst = 0;
|
||||
// if [src1 < src2], dst = -1;
|
||||
void cmp_l2i(Register dst, Register src1, Register src2, Register tmp = t0);
|
||||
|
||||
int push_fp(unsigned int bitset, Register stack);
|
||||
int pop_fp(unsigned int bitset, Register stack);
|
||||
|
||||
int push_vp(unsigned int bitset, Register stack);
|
||||
int pop_vp(unsigned int bitset, Register stack);
|
||||
|
||||
// vext
|
||||
void vmnot_m(VectorRegister vd, VectorRegister vs);
|
||||
void vncvt_x_x_w(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked);
|
||||
void vfneg_v(VectorRegister vd, VectorRegister vs);
|
||||
|
||||
private:
|
||||
|
||||
#ifdef ASSERT
|
||||
// Template short-hand support to clean-up after a failed call to trampoline
|
||||
// call generation (see trampoline_call() below), when a set of Labels must
|
||||
// be reset (before returning).
|
||||
template<typename Label, typename... More>
|
||||
void reset_labels(Label& lbl, More&... more) {
|
||||
lbl.reset(); reset_labels(more...);
|
||||
}
|
||||
template<typename Label>
|
||||
void reset_labels(Label& lbl) {
|
||||
lbl.reset();
|
||||
}
|
||||
#endif
|
||||
void repne_scan(Register addr, Register value, Register count, Register tmp);
|
||||
|
||||
// Return true if an address is within the 48-bit RISCV64 address space.
|
||||
bool is_valid_riscv64_address(address addr) {
|
||||
return ((uintptr_t)addr >> 48) == 0;
|
||||
}
|
||||
|
||||
void ld_constant(Register dest, const Address &const_addr) {
|
||||
if (NearCpool) {
|
||||
ld(dest, const_addr);
|
||||
} else {
|
||||
int32_t offset = 0;
|
||||
la_patchable(dest, InternalAddress(const_addr.target()), offset);
|
||||
ld(dest, Address(dest, offset));
|
||||
}
|
||||
}
|
||||
|
||||
int bitset_to_regs(unsigned int bitset, unsigned char* regs);
|
||||
Address add_memory_helper(const Address dst);
|
||||
|
||||
void load_reserved(Register addr, enum operand_size size, Assembler::Aqrl acquire);
|
||||
void store_conditional(Register addr, Register new_val, enum operand_size size, Assembler::Aqrl release);
|
||||
|
||||
// Check the current thread doesn't need a cross modify fence.
|
||||
void verify_cross_modify_fence_not_required() PRODUCT_RETURN;
|
||||
|
||||
void load_prototype_header(Register dst, Register src);
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* class SkipIfEqual:
|
||||
*
|
||||
* Instantiating this class will result in assembly code being output that will
|
||||
* jump around any code emitted between the creation of the instance and it's
|
||||
* automatic destruction at the end of a scope block, depending on the value of
|
||||
* the flag passed to the constructor, which will be checked at run-time.
|
||||
*/
|
||||
class SkipIfEqual {
|
||||
private:
|
||||
MacroAssembler* _masm;
|
||||
Label _label;
|
||||
|
||||
public:
|
||||
SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
|
||||
~SkipIfEqual();
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_MACROASSEMBLER_RISCV_HPP
|
||||
31
src/hotspot/cpu/riscv/macroAssembler_riscv.inline.hpp
Normal file
31
src/hotspot/cpu/riscv/macroAssembler_riscv.inline.hpp
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_MACROASSEMBLER_RISCV_INLINE_HPP
|
||||
#define CPU_RISCV_MACROASSEMBLER_RISCV_INLINE_HPP
|
||||
|
||||
// Still empty.
|
||||
|
||||
#endif // CPU_RISCV_MACROASSEMBLER_RISCV_INLINE_HPP
|
||||
169
src/hotspot/cpu/riscv/matcher_riscv.hpp
Normal file
169
src/hotspot/cpu/riscv/matcher_riscv.hpp
Normal file
@@ -0,0 +1,169 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_MATCHER_RISCV_HPP
|
||||
#define CPU_RISCV_MATCHER_RISCV_HPP
|
||||
|
||||
// Defined within class Matcher
|
||||
|
||||
// false => size gets scaled to BytesPerLong, ok.
|
||||
static const bool init_array_count_is_in_bytes = false;
|
||||
|
||||
// Whether this platform implements the scalable vector feature
|
||||
static const bool implements_scalable_vector = true;
|
||||
|
||||
static const bool supports_scalable_vector() {
|
||||
return UseRVV;
|
||||
}
|
||||
|
||||
// riscv supports misaligned vectors store/load.
|
||||
static constexpr bool misaligned_vectors_ok() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Whether code generation need accurate ConvI2L types.
|
||||
static const bool convi2l_type_required = false;
|
||||
|
||||
// Does the CPU require late expand (see block.cpp for description of late expand)?
|
||||
static const bool require_postalloc_expand = false;
|
||||
|
||||
// Do we need to mask the count passed to shift instructions or does
|
||||
// the cpu only look at the lower 5/6 bits anyway?
|
||||
static const bool need_masked_shift_count = false;
|
||||
|
||||
// No support for generic vector operands.
|
||||
static const bool supports_generic_vector_operands = false;
|
||||
|
||||
static constexpr bool isSimpleConstant64(jlong value) {
|
||||
// Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
|
||||
// Probably always true, even if a temp register is required.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Use conditional move (CMOVL)
|
||||
static constexpr int long_cmove_cost() {
|
||||
// long cmoves are no more expensive than int cmoves
|
||||
return 0;
|
||||
}
|
||||
|
||||
static constexpr int float_cmove_cost() {
|
||||
// float cmoves are no more expensive than int cmoves
|
||||
return 0;
|
||||
}
|
||||
|
||||
// This affects two different things:
|
||||
// - how Decode nodes are matched
|
||||
// - how ImplicitNullCheck opportunities are recognized
|
||||
// If true, the matcher will try to remove all Decodes and match them
|
||||
// (as operands) into nodes. NullChecks are not prepared to deal with
|
||||
// Decodes by final_graph_reshaping().
|
||||
// If false, final_graph_reshaping() forces the decode behind the Cmp
|
||||
// for a NullCheck. The matcher matches the Decode node into a register.
|
||||
// Implicit_null_check optimization moves the Decode along with the
|
||||
// memory operation back up before the NullCheck.
|
||||
static bool narrow_oop_use_complex_address() {
|
||||
return CompressedOops::shift() == 0;
|
||||
}
|
||||
|
||||
static bool narrow_klass_use_complex_address() {
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool const_oop_prefer_decode() {
|
||||
// Prefer ConN+DecodeN over ConP in simple compressed oops mode.
|
||||
return CompressedOops::base() == NULL;
|
||||
}
|
||||
|
||||
static bool const_klass_prefer_decode() {
|
||||
// Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
|
||||
return CompressedKlassPointers::base() == NULL;
|
||||
}
|
||||
|
||||
// Is it better to copy float constants, or load them directly from
|
||||
// memory? Intel can load a float constant from a direct address,
|
||||
// requiring no extra registers. Most RISCs will have to materialize
|
||||
// an address into a register first, so they would do better to copy
|
||||
// the constant from stack.
|
||||
static const bool rematerialize_float_constants = false;
|
||||
|
||||
// If CPU can load and store mis-aligned doubles directly then no
|
||||
// fixup is needed. Else we split the double into 2 integer pieces
|
||||
// and move it piece-by-piece. Only happens when passing doubles into
|
||||
// C code as the Java calling convention forces doubles to be aligned.
|
||||
static const bool misaligned_doubles_ok = true;
|
||||
|
||||
// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
|
||||
static const bool strict_fp_requires_explicit_rounding = false;
|
||||
|
||||
// Are floats converted to double when stored to stack during
|
||||
// deoptimization?
|
||||
static constexpr bool float_in_double() { return false; }
|
||||
|
||||
// Do ints take an entire long register or just half?
|
||||
// The relevant question is how the int is callee-saved:
|
||||
// the whole long is written but de-opt'ing will have to extract
|
||||
// the relevant 32 bits.
|
||||
static const bool int_in_long = true;
|
||||
|
||||
// Does the CPU supports vector variable shift instructions?
|
||||
static constexpr bool supports_vector_variable_shifts(void) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Does the CPU supports vector variable rotate instructions?
|
||||
static constexpr bool supports_vector_variable_rotates(void) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Does the CPU supports vector constant rotate instructions?
|
||||
static constexpr bool supports_vector_constant_rotates(int shift) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Does the CPU supports vector unsigned comparison instructions?
|
||||
static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Some microarchitectures have mask registers used on vectors
|
||||
static const bool has_predicated_vectors(void) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// true means we have fast l2f convers
|
||||
// false means that conversion is done by runtime call
|
||||
static constexpr bool convL2FSupported(void) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Implements a variant of EncodeISOArrayNode that encode ASCII only
|
||||
static const bool supports_encode_ascii_array = false;
|
||||
|
||||
// Returns pre-selection estimated size of a vector operation.
|
||||
static int vector_op_pre_select_sz_estimate(int vopc, BasicType ety, int vlen) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // CPU_RISCV_MATCHER_RISCV_HPP
|
||||
461
src/hotspot/cpu/riscv/methodHandles_riscv.cpp
Normal file
461
src/hotspot/cpu/riscv/methodHandles_riscv.cpp
Normal file
@@ -0,0 +1,461 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/flags/flagSetting.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
|
||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||
|
||||
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
|
||||
assert_cond(_masm != NULL);
|
||||
if (VerifyMethodHandles) {
|
||||
verify_klass(_masm, klass_reg, VM_CLASS_ID(java_lang_Class),
|
||||
"MH argument is a Class");
|
||||
}
|
||||
__ ld(klass_reg, Address(klass_reg, java_lang_Class::klass_offset()));
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static int check_nonzero(const char* xname, int x) {
|
||||
assert(x != 0, "%s should be nonzero", xname);
|
||||
return x;
|
||||
}
|
||||
#define NONZERO(x) check_nonzero(#x, x)
|
||||
#else //ASSERT
|
||||
#define NONZERO(x) (x)
|
||||
#endif //PRODUCT
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Register obj, vmClassID klass_id,
|
||||
const char* error_message) {
|
||||
assert_cond(_masm != NULL);
|
||||
InstanceKlass** klass_addr = vmClasses::klass_addr_at(klass_id);
|
||||
Klass* klass = vmClasses::klass_at(klass_id);
|
||||
Register temp = t1;
|
||||
Register temp2 = t0; // used by MacroAssembler::cmpptr
|
||||
Label L_ok, L_bad;
|
||||
BLOCK_COMMENT("verify_klass {");
|
||||
__ verify_oop(obj);
|
||||
__ beqz(obj, L_bad);
|
||||
__ push_reg(RegSet::of(temp, temp2), sp);
|
||||
__ load_klass(temp, obj);
|
||||
__ cmpptr(temp, ExternalAddress((address) klass_addr), L_ok);
|
||||
intptr_t super_check_offset = klass->super_check_offset();
|
||||
__ ld(temp, Address(temp, super_check_offset));
|
||||
__ cmpptr(temp, ExternalAddress((address) klass_addr), L_ok);
|
||||
__ pop_reg(RegSet::of(temp, temp2), sp);
|
||||
__ bind(L_bad);
|
||||
__ stop(error_message);
|
||||
__ BIND(L_ok);
|
||||
__ pop_reg(RegSet::of(temp, temp2), sp);
|
||||
BLOCK_COMMENT("} verify_klass");
|
||||
}
|
||||
|
||||
void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {}
|
||||
|
||||
#endif //ASSERT
|
||||
|
||||
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
|
||||
bool for_compiler_entry) {
|
||||
assert_cond(_masm != NULL);
|
||||
assert(method == xmethod, "interpreter calling convention");
|
||||
Label L_no_such_method;
|
||||
__ beqz(xmethod, L_no_such_method);
|
||||
__ verify_method_ptr(method);
|
||||
|
||||
if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
|
||||
Label run_compiled_code;
|
||||
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
|
||||
__ lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
|
||||
__ beqz(t0, run_compiled_code);
|
||||
__ ld(t0, Address(method, Method::interpreter_entry_offset()));
|
||||
__ jr(t0);
|
||||
__ BIND(run_compiled_code);
|
||||
}
|
||||
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
__ ld(t0,Address(method, entry_offset));
|
||||
__ jr(t0);
|
||||
__ bind(L_no_such_method);
|
||||
__ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
|
||||
}
|
||||
|
||||
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register method_temp,
|
||||
Register temp2,
|
||||
bool for_compiler_entry) {
|
||||
assert_cond(_masm != NULL);
|
||||
BLOCK_COMMENT("jump_to_lambda_form {");
|
||||
// This is the initial entry point of a lazy method handle.
|
||||
// After type checking, it picks up the invoker from the LambdaForm.
|
||||
assert_different_registers(recv, method_temp, temp2);
|
||||
assert(recv != noreg, "required register");
|
||||
assert(method_temp == xmethod, "required register for loading method");
|
||||
|
||||
// Load the invoker, as MH -> MH.form -> LF.vmentry
|
||||
__ verify_oop(recv);
|
||||
__ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset())), temp2);
|
||||
__ verify_oop(method_temp);
|
||||
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset())), temp2);
|
||||
__ verify_oop(method_temp);
|
||||
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset())), temp2);
|
||||
__ verify_oop(method_temp);
|
||||
__ access_load_at(T_ADDRESS, IN_HEAP, method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset())), noreg, noreg);
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// make sure recv is already on stack
|
||||
__ ld(temp2, Address(method_temp, Method::const_offset()));
|
||||
__ load_sized_value(temp2,
|
||||
Address(temp2, ConstMethod::size_of_parameters_offset()),
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
Label L;
|
||||
__ ld(t0, __ argument_address(temp2, -1));
|
||||
__ beq(recv, t0, L);
|
||||
__ ld(x10, __ argument_address(temp2, -1));
|
||||
__ ebreak();
|
||||
__ BIND(L);
|
||||
}
|
||||
|
||||
jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
|
||||
BLOCK_COMMENT("} jump_to_lambda_form");
|
||||
}
|
||||
|
||||
// Code generation
|
||||
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
|
||||
vmIntrinsics::ID iid) {
|
||||
assert_cond(_masm != NULL);
|
||||
const bool not_for_compiler_entry = false; // this is the interpreter entry
|
||||
assert(is_signature_polymorphic(iid), "expected invoke iid");
|
||||
if (iid == vmIntrinsics::_invokeGeneric ||
|
||||
iid == vmIntrinsics::_compiledLambdaForm) {
|
||||
// Perhaps surprisingly, the symbolic references visible to Java are not directly used.
|
||||
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
|
||||
// They all allow an appendix argument.
|
||||
__ ebreak(); // empty stubs make SG sick
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// No need in interpreter entry for linkToNative for now.
|
||||
// Interpreter calls compiled entry through i2c.
|
||||
if (iid == vmIntrinsics::_linkToNative) {
|
||||
__ ebreak();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// x30: sender SP (must preserve; see prepare_to_jump_from_interpreted)
|
||||
// xmethod: Method*
|
||||
// x13: argument locator (parameter slot count, added to sp)
|
||||
// x11: used as temp to hold mh or receiver
|
||||
// x10, x29: garbage temps, blown away
|
||||
Register argp = x13; // argument list ptr, live on error paths
|
||||
Register mh = x11; // MH receiver; dies quickly and is recycled
|
||||
|
||||
// here's where control starts out:
|
||||
__ align(CodeEntryAlignment);
|
||||
address entry_point = __ pc();
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
|
||||
|
||||
Label L;
|
||||
BLOCK_COMMENT("verify_intrinsic_id {");
|
||||
__ lhu(t0, Address(xmethod, Method::intrinsic_id_offset_in_bytes()));
|
||||
__ mv(t1, (int) iid);
|
||||
__ beq(t0, t1, L);
|
||||
if (iid == vmIntrinsics::_linkToVirtual ||
|
||||
iid == vmIntrinsics::_linkToSpecial) {
|
||||
// could do this for all kinds, but would explode assembly code size
|
||||
trace_method_handle(_masm, "bad Method*::intrinsic_id");
|
||||
}
|
||||
__ ebreak();
|
||||
__ bind(L);
|
||||
BLOCK_COMMENT("} verify_intrinsic_id");
|
||||
}
|
||||
|
||||
// First task: Find out how big the argument list is.
|
||||
Address x13_first_arg_addr;
|
||||
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
|
||||
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
|
||||
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||
__ ld(argp, Address(xmethod, Method::const_offset()));
|
||||
__ load_sized_value(argp,
|
||||
Address(argp, ConstMethod::size_of_parameters_offset()),
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
x13_first_arg_addr = __ argument_address(argp, -1);
|
||||
} else {
|
||||
DEBUG_ONLY(argp = noreg);
|
||||
}
|
||||
|
||||
if (!is_signature_polymorphic_static(iid)) {
|
||||
__ ld(mh, x13_first_arg_addr);
|
||||
DEBUG_ONLY(argp = noreg);
|
||||
}
|
||||
|
||||
// x13_first_arg_addr is live!
|
||||
|
||||
trace_method_handle_interpreter_entry(_masm, iid);
|
||||
if (iid == vmIntrinsics::_invokeBasic) {
|
||||
generate_method_handle_dispatch(_masm, iid, mh, noreg, not_for_compiler_entry);
|
||||
} else {
|
||||
// Adjust argument list by popping the trailing MemberName argument.
|
||||
Register recv = noreg;
|
||||
if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||
// Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
|
||||
__ ld(recv = x12, x13_first_arg_addr);
|
||||
}
|
||||
DEBUG_ONLY(argp = noreg);
|
||||
Register xmember = xmethod; // MemberName ptr; incoming method ptr is dead now
|
||||
__ pop_reg(xmember); // extract last argument
|
||||
generate_method_handle_dispatch(_masm, iid, recv, xmember, not_for_compiler_entry);
|
||||
}
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
vmIntrinsics::ID iid,
|
||||
Register receiver_reg,
|
||||
Register member_reg,
|
||||
bool for_compiler_entry) {
|
||||
assert_cond(_masm != NULL);
|
||||
assert(is_signature_polymorphic(iid), "expected invoke iid");
|
||||
// temps used in this code are not used in *either* compiled or interpreted calling sequences
|
||||
Register temp1 = x7;
|
||||
Register temp2 = x28;
|
||||
Register temp3 = x29; // x30 is live by this point: it contains the sender SP
|
||||
if (for_compiler_entry) {
|
||||
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
|
||||
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
}
|
||||
|
||||
assert_different_registers(temp1, temp2, temp3, receiver_reg);
|
||||
assert_different_registers(temp1, temp2, temp3, member_reg);
|
||||
|
||||
if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
|
||||
if (iid == vmIntrinsics::_linkToNative) {
|
||||
assert(for_compiler_entry, "only compiler entry is supported");
|
||||
}
|
||||
// indirect through MH.form.vmentry.vmtarget
|
||||
jump_to_lambda_form(_masm, receiver_reg, xmethod, temp1, for_compiler_entry);
|
||||
} else {
|
||||
// The method is a member invoker used by direct method handles.
|
||||
if (VerifyMethodHandles) {
|
||||
// make sure the trailing argument really is a MemberName (caller responsibility)
|
||||
verify_klass(_masm, member_reg, VM_CLASS_ID(java_lang_invoke_MemberName),
|
||||
"MemberName required for invokeVirtual etc.");
|
||||
}
|
||||
|
||||
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset()));
|
||||
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset()));
|
||||
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset()));
|
||||
Address vmtarget_method( xmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset()));
|
||||
|
||||
Register temp1_recv_klass = temp1;
|
||||
if (iid != vmIntrinsics::_linkToStatic) {
|
||||
__ verify_oop(receiver_reg);
|
||||
if (iid == vmIntrinsics::_linkToSpecial) {
|
||||
// Don't actually load the klass; just null-check the receiver.
|
||||
__ null_check(receiver_reg);
|
||||
} else {
|
||||
// load receiver klass itself
|
||||
__ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(temp1_recv_klass, receiver_reg);
|
||||
__ verify_klass_ptr(temp1_recv_klass);
|
||||
}
|
||||
BLOCK_COMMENT("check_receiver {");
|
||||
// The receiver for the MemberName must be in receiver_reg.
|
||||
// Check the receiver against the MemberName.clazz
|
||||
if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
|
||||
// Did not load it above...
|
||||
__ load_klass(temp1_recv_klass, receiver_reg);
|
||||
__ verify_klass_ptr(temp1_recv_klass);
|
||||
}
|
||||
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
|
||||
Label L_ok;
|
||||
Register temp2_defc = temp2;
|
||||
__ load_heap_oop(temp2_defc, member_clazz, temp3);
|
||||
load_klass_from_Class(_masm, temp2_defc);
|
||||
__ verify_klass_ptr(temp2_defc);
|
||||
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
|
||||
// If we get here, the type check failed!
|
||||
__ ebreak();
|
||||
__ bind(L_ok);
|
||||
}
|
||||
BLOCK_COMMENT("} check_receiver");
|
||||
}
|
||||
if (iid == vmIntrinsics::_linkToSpecial ||
|
||||
iid == vmIntrinsics::_linkToStatic) {
|
||||
DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
|
||||
}
|
||||
|
||||
// Live registers at this point:
|
||||
// member_reg - MemberName that was the trailing argument
|
||||
// temp1_recv_klass - klass of stacked receiver, if needed
|
||||
// x30 - interpreter linkage (if interpreted)
|
||||
// x11 ... x10 - compiler arguments (if compiled)
|
||||
|
||||
Label L_incompatible_class_change_error;
|
||||
switch (iid) {
|
||||
case vmIntrinsics::_linkToSpecial:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
|
||||
}
|
||||
__ load_heap_oop(xmethod, member_vmtarget);
|
||||
__ access_load_at(T_ADDRESS, IN_HEAP, xmethod, vmtarget_method, noreg, noreg);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
|
||||
}
|
||||
__ load_heap_oop(xmethod, member_vmtarget);
|
||||
__ access_load_at(T_ADDRESS, IN_HEAP, xmethod, vmtarget_method, noreg, noreg);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
{
|
||||
// same as TemplateTable::invokevirtual,
|
||||
// minus the CP setup and profiling:
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
|
||||
}
|
||||
|
||||
// pick out the vtable index from the MemberName, and then we can discard it:
|
||||
Register temp2_index = temp2;
|
||||
__ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg);
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ bgez(temp2_index, L_index_ok);
|
||||
__ ebreak();
|
||||
__ BIND(L_index_ok);
|
||||
}
|
||||
|
||||
// Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
|
||||
// at this point. And VerifyMethodHandles has already checked clazz, if needed.
|
||||
|
||||
// get target Method* & entry point
|
||||
__ lookup_virtual_method(temp1_recv_klass, temp2_index, xmethod);
|
||||
break;
|
||||
}
|
||||
|
||||
case vmIntrinsics::_linkToInterface:
|
||||
{
|
||||
// same as TemplateTable::invokeinterface
|
||||
// (minus the CP setup and profiling, with different argument motion)
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
|
||||
}
|
||||
|
||||
Register temp3_intf = temp3;
|
||||
__ load_heap_oop(temp3_intf, member_clazz);
|
||||
load_klass_from_Class(_masm, temp3_intf);
|
||||
__ verify_klass_ptr(temp3_intf);
|
||||
|
||||
Register rindex = xmethod;
|
||||
__ access_load_at(T_ADDRESS, IN_HEAP, rindex, member_vmindex, noreg, noreg);
|
||||
if (VerifyMethodHandles) {
|
||||
Label L;
|
||||
__ bgez(rindex, L);
|
||||
__ ebreak();
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
// given intf, index, and recv klass, dispatch to the implementation method
|
||||
__ lookup_interface_method(temp1_recv_klass, temp3_intf,
|
||||
// note: next two args must be the same:
|
||||
rindex, xmethod,
|
||||
temp2,
|
||||
L_incompatible_class_change_error);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
|
||||
break;
|
||||
}
|
||||
|
||||
// live at this point: xmethod, x30 (if interpreted)
|
||||
|
||||
// After figuring out which concrete method to call, jump into it.
|
||||
// Note that this works in the interpreter with no data motion.
|
||||
// But the compiled version will require that r2_recv be shifted out.
|
||||
__ verify_method_ptr(xmethod);
|
||||
jump_from_method_handle(_masm, xmethod, temp1, for_compiler_entry);
|
||||
if (iid == vmIntrinsics::_linkToInterface) {
|
||||
__ bind(L_incompatible_class_change_error);
|
||||
__ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
oopDesc* mh,
|
||||
intptr_t* saved_regs,
|
||||
intptr_t* entry_sp) { }
|
||||
|
||||
// The stub wraps the arguments in a struct on the stack to avoid
|
||||
// dealing with the different calling conventions for passing 6
|
||||
// arguments.
|
||||
struct MethodHandleStubArguments {
|
||||
const char* adaptername;
|
||||
oopDesc* mh;
|
||||
intptr_t* saved_regs;
|
||||
intptr_t* entry_sp;
|
||||
};
|
||||
void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { }
|
||||
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { }
|
||||
#endif //PRODUCT
|
||||
57
src/hotspot/cpu/riscv/methodHandles_riscv.hpp
Normal file
57
src/hotspot/cpu/riscv/methodHandles_riscv.hpp
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Platform-specific definitions for method handles.
|
||||
// These definitions are inlined into class MethodHandles.
|
||||
|
||||
// Adapters
|
||||
enum /* platform_dependent_constants */ {
|
||||
adapter_code_size = 32000 DEBUG_ONLY(+ 120000)
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
|
||||
|
||||
static void verify_klass(MacroAssembler* _masm,
|
||||
Register obj, vmClassID klass_id,
|
||||
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
|
||||
verify_klass(_masm, mh_reg, VM_CLASS_ID(java_lang_invoke_MethodHandle),
|
||||
"reference is a MH");
|
||||
}
|
||||
|
||||
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
|
||||
|
||||
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
|
||||
// Takes care of special dispatch from single stepping too.
|
||||
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
|
||||
bool for_compiler_entry);
|
||||
|
||||
static void jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register method_temp,
|
||||
Register temp2,
|
||||
bool for_compiler_entry);
|
||||
429
src/hotspot/cpu/riscv/nativeInst_riscv.cpp
Normal file
429
src/hotspot/cpu/riscv/nativeInst_riscv.cpp
Normal file
@@ -0,0 +1,429 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_riscv.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
Register NativeInstruction::extract_rs1(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
return as_Register(Assembler::extract(((unsigned*)instr)[0], 19, 15));
|
||||
}
|
||||
|
||||
Register NativeInstruction::extract_rs2(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
return as_Register(Assembler::extract(((unsigned*)instr)[0], 24, 20));
|
||||
}
|
||||
|
||||
Register NativeInstruction::extract_rd(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
return as_Register(Assembler::extract(((unsigned*)instr)[0], 11, 7));
|
||||
}
|
||||
|
||||
uint32_t NativeInstruction::extract_opcode(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
return Assembler::extract(((unsigned*)instr)[0], 6, 0);
|
||||
}
|
||||
|
||||
uint32_t NativeInstruction::extract_funct3(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
return Assembler::extract(((unsigned*)instr)[0], 14, 12);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_pc_relative_at(address instr) {
|
||||
// auipc + jalr
|
||||
// auipc + addi
|
||||
// auipc + load
|
||||
// auipc + fload_load
|
||||
return (is_auipc_at(instr)) &&
|
||||
(is_addi_at(instr + instruction_size) ||
|
||||
is_jalr_at(instr + instruction_size) ||
|
||||
is_load_at(instr + instruction_size) ||
|
||||
is_float_load_at(instr + instruction_size)) &&
|
||||
check_pc_relative_data_dependency(instr);
|
||||
}
|
||||
|
||||
// ie:ld(Rd, Label)
|
||||
bool NativeInstruction::is_load_pc_relative_at(address instr) {
|
||||
return is_auipc_at(instr) && // auipc
|
||||
is_ld_at(instr + instruction_size) && // ld
|
||||
check_load_pc_relative_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_movptr_at(address instr) {
|
||||
return is_lui_at(instr) && // Lui
|
||||
is_addi_at(instr + instruction_size) && // Addi
|
||||
is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
|
||||
is_addi_at(instr + instruction_size * 3) && // Addi
|
||||
is_slli_shift_at(instr + instruction_size * 4, 5) && // Slli Rd, Rs, 5
|
||||
(is_addi_at(instr + instruction_size * 5) ||
|
||||
is_jalr_at(instr + instruction_size * 5) ||
|
||||
is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
|
||||
check_movptr_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_li32_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_addiw_at(instr + instruction_size) && // addiw
|
||||
check_li32_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_li64_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_addi_at(instr + instruction_size) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 2, 12) && // Slli Rd, Rs, 12
|
||||
is_addi_at(instr + instruction_size * 3) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 4, 12) && // Slli Rd, Rs, 12
|
||||
is_addi_at(instr + instruction_size * 5) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 6, 8) && // Slli Rd, Rs, 8
|
||||
is_addi_at(instr + instruction_size * 7) && // addi
|
||||
check_li64_data_dependency(instr);
|
||||
}
|
||||
|
||||
void NativeCall::verify() {
|
||||
assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
|
||||
}
|
||||
|
||||
address NativeCall::destination() const {
|
||||
address addr = (address)this;
|
||||
assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal.");
|
||||
address destination = MacroAssembler::target_addr_for_insn(instruction_address());
|
||||
|
||||
// Do we use a trampoline stub for this call?
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
// Yes we do, so get the destination from the trampoline stub.
|
||||
const address trampoline_stub_addr = destination;
|
||||
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
|
||||
}
|
||||
|
||||
return destination;
|
||||
}
|
||||
|
||||
// Similar to replace_mt_safe, but just changes the destination. The
|
||||
// important thing is that free-running threads are able to execute this
|
||||
// call instruction at all times.
|
||||
//
|
||||
// Used in the runtime linkage of calls; see class CompiledIC.
|
||||
//
|
||||
// Add parameter assert_lock to switch off assertion
|
||||
// during code generation, where no patching lock is needed.
|
||||
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
assert(!assert_lock ||
|
||||
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
|
||||
CompiledICLocker::is_safe(addr_at(0)),
|
||||
"concurrent code patching");
|
||||
|
||||
ResourceMark rm;
|
||||
address addr_call = addr_at(0);
|
||||
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
|
||||
|
||||
// Patch the constant in the call's trampoline stub.
|
||||
address trampoline_stub_addr = get_trampoline();
|
||||
if (trampoline_stub_addr != NULL) {
|
||||
assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
|
||||
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
|
||||
}
|
||||
|
||||
// Patch the call.
|
||||
if (Assembler::reachable_from_branch_at(addr_call, dest)) {
|
||||
set_destination(dest);
|
||||
} else {
|
||||
assert (trampoline_stub_addr != NULL, "we need a trampoline");
|
||||
set_destination(trampoline_stub_addr);
|
||||
}
|
||||
|
||||
ICache::invalidate_range(addr_call, instruction_size);
|
||||
}
|
||||
|
||||
address NativeCall::get_trampoline() {
|
||||
address call_addr = addr_at(0);
|
||||
|
||||
CodeBlob *code = CodeCache::find_blob(call_addr);
|
||||
assert(code != NULL, "Could not find the containing code blob");
|
||||
|
||||
address jal_destination = MacroAssembler::pd_call_destination(call_addr);
|
||||
if (code != NULL && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
|
||||
return jal_destination;
|
||||
}
|
||||
|
||||
if (code != NULL && code->is_nmethod()) {
|
||||
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Inserts a native call instruction at a given pc
|
||||
void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
void NativeMovConstReg::verify() {
|
||||
if (!(nativeInstruction_at(instruction_address())->is_movptr() ||
|
||||
is_auipc_at(instruction_address()))) {
|
||||
fatal("should be MOVPTR or AUIPC");
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t NativeMovConstReg::data() const {
|
||||
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
|
||||
if (maybe_cpool_ref(instruction_address())) {
|
||||
return *(intptr_t*)addr;
|
||||
} else {
|
||||
return (intptr_t)addr;
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovConstReg::set_data(intptr_t x) {
|
||||
if (maybe_cpool_ref(instruction_address())) {
|
||||
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
|
||||
*(intptr_t*)addr = x;
|
||||
} else {
|
||||
// Store x into the instruction stream.
|
||||
MacroAssembler::pd_patch_instruction_size(instruction_address(), (address)x);
|
||||
ICache::invalidate_range(instruction_address(), movptr_instruction_size);
|
||||
}
|
||||
|
||||
// Find and replace the oop/metadata corresponding to this
|
||||
// instruction in oops section.
|
||||
CodeBlob* cb = CodeCache::find_blob(instruction_address());
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
RelocIterator iter(nm, instruction_address(), next_instruction_address());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
oop* oop_addr = iter.oop_reloc()->oop_addr();
|
||||
*oop_addr = cast_to_oop(x);
|
||||
break;
|
||||
} else if (iter.type() == relocInfo::metadata_type) {
|
||||
Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
|
||||
*metadata_addr = (Metadata*)x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovConstReg::print() {
|
||||
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
|
||||
p2i(instruction_address()), data());
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
int NativeMovRegMem::offset() const {
|
||||
Unimplemented();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void NativeMovRegMem::set_offset(int x) { Unimplemented(); }
|
||||
|
||||
void NativeMovRegMem::verify() {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
void NativeJump::verify() { }
|
||||
|
||||
|
||||
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
}
|
||||
|
||||
|
||||
address NativeJump::jump_destination() const {
|
||||
address dest = MacroAssembler::target_addr_for_insn(instruction_address());
|
||||
|
||||
// We use jump to self as the unresolved address which the inline
|
||||
// cache code (and relocs) know about
|
||||
// As a special case we also use sequence movptr_with_offset(r,0), jalr(r,0)
|
||||
// i.e. jump to 0 when we need leave space for a wide immediate
|
||||
// load
|
||||
|
||||
// return -1 if jump to self or to 0
|
||||
if ((dest == (address) this) || dest == 0) {
|
||||
dest = (address) -1;
|
||||
}
|
||||
|
||||
return dest;
|
||||
};
|
||||
|
||||
void NativeJump::set_jump_destination(address dest) {
|
||||
// We use jump to self as the unresolved address which the inline
|
||||
// cache code (and relocs) know about
|
||||
if (dest == (address) -1)
|
||||
dest = instruction_address();
|
||||
|
||||
MacroAssembler::pd_patch_instruction(instruction_address(), dest);
|
||||
ICache::invalidate_range(instruction_address(), instruction_size);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
address NativeGeneralJump::jump_destination() const {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
|
||||
address dest = (address) move->data();
|
||||
|
||||
// We use jump to self as the unresolved address which the inline
|
||||
// cache code (and relocs) know about
|
||||
// As a special case we also use jump to 0 when first generating
|
||||
// a general jump
|
||||
|
||||
// return -1 if jump to self or to 0
|
||||
if ((dest == (address) this) || dest == 0) {
|
||||
dest = (address) -1;
|
||||
}
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
bool NativeInstruction::is_safepoint_poll() {
|
||||
return is_lwu_to_zr(address(this));
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_lwu_to_zr(address instr) {
|
||||
assert_cond(instr != NULL);
|
||||
return (extract_opcode(instr) == 0b0000011 &&
|
||||
extract_funct3(instr) == 0b110 &&
|
||||
extract_rd(instr) == zr); // zr
|
||||
}
|
||||
|
||||
// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
|
||||
bool NativeInstruction::is_sigill_zombie_not_entrant() {
|
||||
// jvmci
|
||||
return uint_at(0) == 0xffffffff;
|
||||
}
|
||||
|
||||
void NativeIllegalInstruction::insert(address code_pos) {
|
||||
assert_cond(code_pos != NULL);
|
||||
*(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_stop() {
|
||||
return uint_at(0) == 0xffffffff; // an illegal instruction
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// MT-safe inserting of a jump over a jump or a nop (used by
|
||||
// nmethod::make_not_entrant_or_zombie)
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
|
||||
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() ||
|
||||
nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
|
||||
"riscv cannot replace non-jump with jump");
|
||||
|
||||
// Patch this nmethod atomically.
|
||||
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
|
||||
ptrdiff_t offset = dest - verified_entry;
|
||||
guarantee(is_imm_in_range(offset, 20, 1), "offset is too large to be patched in one jal insrusction."); // 1M
|
||||
|
||||
uint32_t insn = 0;
|
||||
address pInsn = (address)&insn;
|
||||
Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
|
||||
Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
|
||||
Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
|
||||
Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
|
||||
Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump
|
||||
Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset)
|
||||
*(unsigned int*)verified_entry = insn;
|
||||
} else {
|
||||
// We use an illegal instruction for marking a method as
|
||||
// not_entrant or zombie.
|
||||
NativeIllegalInstruction::insert(verified_entry);
|
||||
}
|
||||
|
||||
ICache::invalidate_range(verified_entry, instruction_size);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
CodeBuffer cb(code_pos, instruction_size);
|
||||
MacroAssembler a(&cb);
|
||||
|
||||
int32_t offset = 0;
|
||||
a.movptr_with_offset(t0, entry, offset); // lui, addi, slli, addi, slli
|
||||
a.jalr(x0, t0, offset); // jalr
|
||||
|
||||
ICache::invalidate_range(code_pos, instruction_size);
|
||||
}
|
||||
|
||||
// MT-safe patching of a long jump instruction.
|
||||
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
||||
|
||||
address NativeCallTrampolineStub::destination(nmethod *nm) const {
|
||||
return ptr_at(data_offset);
|
||||
}
|
||||
|
||||
void NativeCallTrampolineStub::set_destination(address new_destination) {
|
||||
set_ptr_at(data_offset, new_destination);
|
||||
OrderAccess::release();
|
||||
}
|
||||
|
||||
uint32_t NativeMembar::get_kind() {
|
||||
uint32_t insn = uint_at(0);
|
||||
|
||||
uint32_t predecessor = Assembler::extract(insn, 27, 24);
|
||||
uint32_t successor = Assembler::extract(insn, 23, 20);
|
||||
|
||||
return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
|
||||
}
|
||||
|
||||
void NativeMembar::set_kind(uint32_t order_kind) {
|
||||
uint32_t predecessor = 0;
|
||||
uint32_t successor = 0;
|
||||
|
||||
MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
|
||||
|
||||
uint32_t insn = uint_at(0);
|
||||
address pInsn = (address) &insn;
|
||||
Assembler::patch(pInsn, 27, 24, predecessor);
|
||||
Assembler::patch(pInsn, 23, 20, successor);
|
||||
|
||||
address membar = addr_at(0);
|
||||
*(unsigned int*) membar = insn;
|
||||
}
|
||||
572
src/hotspot/cpu/riscv/nativeInst_riscv.hpp
Normal file
572
src/hotspot/cpu/riscv/nativeInst_riscv.hpp
Normal file
@@ -0,0 +1,572 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_NATIVEINST_RISCV_HPP
|
||||
#define CPU_RISCV_NATIVEINST_RISCV_HPP
|
||||
|
||||
#include "asm/assembler.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
// We have interfaces for the following instructions:
|
||||
// - NativeInstruction
|
||||
// - - NativeCall
|
||||
// - - NativeMovConstReg
|
||||
// - - NativeMovRegMem
|
||||
// - - NativeJump
|
||||
// - - NativeGeneralJump
|
||||
// - - NativeIllegalInstruction
|
||||
// - - NativeCallTrampolineStub
|
||||
// - - NativeMembar
|
||||
// - - NativeFenceI
|
||||
|
||||
// The base class for different kinds of native instruction abstractions.
|
||||
// Provides the primitive operations to manipulate code relative to this.
|
||||
|
||||
class NativeCall;
|
||||
|
||||
class NativeInstruction {
|
||||
friend class Relocation;
|
||||
friend bool is_NativeCallTrampolineStub_at(address);
|
||||
public:
|
||||
enum {
|
||||
instruction_size = 4,
|
||||
compressed_instruction_size = 2,
|
||||
};
|
||||
|
||||
juint encoding() const {
|
||||
return uint_at(0);
|
||||
}
|
||||
|
||||
bool is_jal() const { return is_jal_at(addr_at(0)); }
|
||||
bool is_movptr() const { return is_movptr_at(addr_at(0)); }
|
||||
bool is_call() const { return is_call_at(addr_at(0)); }
|
||||
bool is_jump() const { return is_jump_at(addr_at(0)); }
|
||||
|
||||
static bool is_jal_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1101111; }
|
||||
static bool is_jalr_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_branch_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b1100011; }
|
||||
static bool is_ld_at(address instr) { assert_cond(instr != NULL); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
|
||||
static bool is_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000011; }
|
||||
static bool is_float_load_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0000111; }
|
||||
static bool is_auipc_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010111; }
|
||||
static bool is_jump_at(address instr) { assert_cond(instr != NULL); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
|
||||
static bool is_addi_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_lui_at(address instr) { assert_cond(instr != NULL); return extract_opcode(instr) == 0b0110111; }
|
||||
static bool is_slli_shift_at(address instr, uint32_t shift) {
|
||||
assert_cond(instr != NULL);
|
||||
return (extract_opcode(instr) == 0b0010011 && // opcode field
|
||||
extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
|
||||
Assembler::extract(((unsigned*)instr)[0], 25, 20) == shift); // shamt field
|
||||
}
|
||||
|
||||
static Register extract_rs1(address instr);
|
||||
static Register extract_rs2(address instr);
|
||||
static Register extract_rd(address instr);
|
||||
static uint32_t extract_opcode(address instr);
|
||||
static uint32_t extract_funct3(address instr);
|
||||
|
||||
// the instruction sequence of movptr is as below:
|
||||
// lui
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi/jalr/load
|
||||
static bool check_movptr_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addi1 = lui + instruction_size;
|
||||
address slli1 = addi1 + instruction_size;
|
||||
address addi2 = slli1 + instruction_size;
|
||||
address slli2 = addi2 + instruction_size;
|
||||
address last_instr = slli2 + instruction_size;
|
||||
return extract_rs1(addi1) == extract_rd(lui) &&
|
||||
extract_rs1(addi1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(slli2) &&
|
||||
extract_rs1(last_instr) == extract_rd(slli2);
|
||||
}
|
||||
|
||||
// the instruction sequence of li64 is as below:
|
||||
// lui
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
static bool check_li64_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addi1 = lui + instruction_size;
|
||||
address slli1 = addi1 + instruction_size;
|
||||
address addi2 = slli1 + instruction_size;
|
||||
address slli2 = addi2 + instruction_size;
|
||||
address addi3 = slli2 + instruction_size;
|
||||
address slli3 = addi3 + instruction_size;
|
||||
address addi4 = slli3 + instruction_size;
|
||||
return extract_rs1(addi1) == extract_rd(lui) &&
|
||||
extract_rs1(addi1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(slli2) &&
|
||||
extract_rs1(addi3) == extract_rd(slli2) &&
|
||||
extract_rs1(addi3) == extract_rd(addi3) &&
|
||||
extract_rs1(slli3) == extract_rd(addi3) &&
|
||||
extract_rs1(slli3) == extract_rd(slli3) &&
|
||||
extract_rs1(addi4) == extract_rd(slli3) &&
|
||||
extract_rs1(addi4) == extract_rd(addi4);
|
||||
}
|
||||
|
||||
// the instruction sequence of li32 is as below:
|
||||
// lui
|
||||
// addiw
|
||||
static bool check_li32_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addiw = lui + instruction_size;
|
||||
|
||||
return extract_rs1(addiw) == extract_rd(lui) &&
|
||||
extract_rs1(addiw) == extract_rd(addiw);
|
||||
}
|
||||
|
||||
// the instruction sequence of pc-relative is as below:
|
||||
// auipc
|
||||
// jalr/addi/load/float_load
|
||||
static bool check_pc_relative_data_dependency(address instr) {
|
||||
address auipc = instr;
|
||||
address last_instr = auipc + instruction_size;
|
||||
|
||||
return extract_rs1(last_instr) == extract_rd(auipc);
|
||||
}
|
||||
|
||||
// the instruction sequence of load_label is as below:
|
||||
// auipc
|
||||
// load
|
||||
static bool check_load_pc_relative_data_dependency(address instr) {
|
||||
address auipc = instr;
|
||||
address load = auipc + instruction_size;
|
||||
|
||||
return extract_rd(load) == extract_rd(auipc) &&
|
||||
extract_rs1(load) == extract_rd(load);
|
||||
}
|
||||
|
||||
static bool is_movptr_at(address instr);
|
||||
static bool is_li32_at(address instr);
|
||||
static bool is_li64_at(address instr);
|
||||
static bool is_pc_relative_at(address branch);
|
||||
static bool is_load_pc_relative_at(address branch);
|
||||
|
||||
static bool is_call_at(address instr) {
|
||||
if (is_jal_at(instr) || is_jalr_at(instr)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
static bool is_lwu_to_zr(address instr);
|
||||
|
||||
inline bool is_nop();
|
||||
inline bool is_jump_or_nop();
|
||||
bool is_safepoint_poll();
|
||||
bool is_sigill_zombie_not_entrant();
|
||||
bool is_stop();
|
||||
|
||||
protected:
|
||||
address addr_at(int offset) const { return address(this) + offset; }
|
||||
|
||||
jint int_at(int offset) const { return *(jint*) addr_at(offset); }
|
||||
juint uint_at(int offset) const { return *(juint*) addr_at(offset); }
|
||||
|
||||
address ptr_at(int offset) const { return *(address*) addr_at(offset); }
|
||||
|
||||
oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
|
||||
|
||||
|
||||
void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
|
||||
void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
|
||||
void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; }
|
||||
void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; }
|
||||
|
||||
public:
|
||||
|
||||
inline friend NativeInstruction* nativeInstruction_at(address addr);
|
||||
|
||||
static bool maybe_cpool_ref(address instr) {
|
||||
return is_auipc_at(instr);
|
||||
}
|
||||
|
||||
bool is_membar() {
|
||||
return (uint_at(0) & 0x7f) == 0b1111 && extract_funct3(addr_at(0)) == 0;
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeInstruction* nativeInstruction_at(address addr) {
|
||||
return (NativeInstruction*)addr;
|
||||
}
|
||||
|
||||
// The natural type of an RISCV instruction is uint32_t
|
||||
inline NativeInstruction* nativeInstruction_at(uint32_t *addr) {
|
||||
return (NativeInstruction*)addr;
|
||||
}
|
||||
|
||||
inline NativeCall* nativeCall_at(address addr);
|
||||
// The NativeCall is an abstraction for accessing/manipulating native
|
||||
// call instructions (used to manipulate inline caches, primitive &
|
||||
// DSO calls, etc.).
|
||||
|
||||
class NativeCall: public NativeInstruction {
|
||||
public:
|
||||
enum RISCV_specific_constants {
|
||||
instruction_size = 4,
|
||||
instruction_offset = 0,
|
||||
displacement_offset = 0,
|
||||
return_address_offset = 4
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const { return addr_at(return_address_offset); }
|
||||
address return_address() const { return addr_at(return_address_offset); }
|
||||
address destination() const;
|
||||
|
||||
void set_destination(address dest) {
|
||||
assert(is_jal(), "Should be jal instruction!");
|
||||
intptr_t offset = (intptr_t)(dest - instruction_address());
|
||||
assert((offset & 0x1) == 0, "bad alignment");
|
||||
assert(is_imm_in_range(offset, 20, 1), "encoding constraint");
|
||||
unsigned int insn = 0b1101111; // jal
|
||||
address pInsn = (address)(&insn);
|
||||
Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
|
||||
Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
|
||||
Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
|
||||
Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
|
||||
Assembler::patch(pInsn, 11, 7, ra->encoding()); // Rd must be x1, need ra
|
||||
set_int_at(displacement_offset, insn);
|
||||
}
|
||||
|
||||
void verify_alignment() {} // do nothing on riscv
|
||||
void verify();
|
||||
void print();
|
||||
|
||||
// Creation
|
||||
inline friend NativeCall* nativeCall_at(address addr);
|
||||
inline friend NativeCall* nativeCall_before(address return_address);
|
||||
|
||||
static bool is_call_before(address return_address) {
|
||||
return is_call_at(return_address - NativeCall::return_address_offset);
|
||||
}
|
||||
|
||||
// MT-safe patching of a call instruction.
|
||||
static void insert(address code_pos, address entry);
|
||||
|
||||
static void replace_mt_safe(address instr_addr, address code_buffer);
|
||||
|
||||
// Similar to replace_mt_safe, but just changes the destination. The
|
||||
// important thing is that free-running threads are able to execute
|
||||
// this call instruction at all times. If the call is an immediate BL
|
||||
// instruction we can simply rely on atomicity of 32-bit writes to
|
||||
// make sure other threads will see no intermediate states.
|
||||
|
||||
// We cannot rely on locks here, since the free-running threads must run at
|
||||
// full speed.
|
||||
//
|
||||
// Used in the runtime linkage of calls; see class CompiledIC.
|
||||
// (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
|
||||
|
||||
// The parameter assert_lock disables the assertion during code generation.
|
||||
void set_destination_mt_safe(address dest, bool assert_lock = true);
|
||||
|
||||
address get_trampoline();
|
||||
};
|
||||
|
||||
inline NativeCall* nativeCall_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
NativeCall* call = (NativeCall*)(addr - NativeCall::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
call->verify();
|
||||
#endif
|
||||
return call;
|
||||
}
|
||||
|
||||
inline NativeCall* nativeCall_before(address return_address) {
|
||||
assert_cond(return_address != NULL);
|
||||
NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
|
||||
#ifdef ASSERT
|
||||
call->verify();
|
||||
#endif
|
||||
return call;
|
||||
}
|
||||
|
||||
// An interface for accessing/manipulating native mov reg, imm instructions.
|
||||
// (used to manipulate inlined 64-bit data calls, etc.)
|
||||
class NativeMovConstReg: public NativeInstruction {
|
||||
public:
|
||||
enum RISCV_specific_constants {
|
||||
movptr_instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr().
|
||||
movptr_with_offset_instruction_size = 5 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli. See movptr_with_offset().
|
||||
load_pc_relative_instruction_size = 2 * NativeInstruction::instruction_size, // auipc, ld
|
||||
instruction_offset = 0,
|
||||
displacement_offset = 0
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const {
|
||||
// if the instruction at 5 * instruction_size is addi,
|
||||
// it means a lui + addi + slli + addi + slli + addi instruction sequence,
|
||||
// and the next instruction address should be addr_at(6 * instruction_size).
|
||||
// However, when the instruction at 5 * instruction_size isn't addi,
|
||||
// the next instruction address should be addr_at(5 * instruction_size)
|
||||
if (nativeInstruction_at(instruction_address())->is_movptr()) {
|
||||
if (is_addi_at(addr_at(movptr_with_offset_instruction_size))) {
|
||||
// Assume: lui, addi, slli, addi, slli, addi
|
||||
return addr_at(movptr_instruction_size);
|
||||
} else {
|
||||
// Assume: lui, addi, slli, addi, slli
|
||||
return addr_at(movptr_with_offset_instruction_size);
|
||||
}
|
||||
} else if (is_load_pc_relative_at(instruction_address())) {
|
||||
// Assume: auipc, ld
|
||||
return addr_at(load_pc_relative_instruction_size);
|
||||
}
|
||||
guarantee(false, "Unknown instruction in NativeMovConstReg");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t x);
|
||||
|
||||
void flush() {
|
||||
if (!maybe_cpool_ref(instruction_address())) {
|
||||
ICache::invalidate_range(instruction_address(), movptr_instruction_size);
|
||||
}
|
||||
}
|
||||
|
||||
void verify();
|
||||
void print();
|
||||
|
||||
// Creation
|
||||
inline friend NativeMovConstReg* nativeMovConstReg_at(address addr);
|
||||
inline friend NativeMovConstReg* nativeMovConstReg_before(address addr);
|
||||
};
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
test->verify();
|
||||
#endif
|
||||
return test;
|
||||
}
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_before(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
test->verify();
|
||||
#endif
|
||||
return test;
|
||||
}
|
||||
|
||||
// RISCV should not use C1 runtime patching, so just leave NativeMovRegMem Unimplemented.
|
||||
class NativeMovRegMem: public NativeInstruction {
|
||||
public:
|
||||
int instruction_start() const {
|
||||
Unimplemented();
|
||||
return 0;
|
||||
}
|
||||
|
||||
address instruction_address() const {
|
||||
Unimplemented();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int num_bytes_to_end_of_patch() const {
|
||||
Unimplemented();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int offset() const;
|
||||
|
||||
void set_offset(int x);
|
||||
|
||||
void add_offset_in_bytes(int add_offset) { Unimplemented(); }
|
||||
|
||||
void verify();
|
||||
void print();
|
||||
|
||||
private:
|
||||
inline friend NativeMovRegMem* nativeMovRegMem_at (address addr);
|
||||
};
|
||||
|
||||
inline NativeMovRegMem* nativeMovRegMem_at (address addr) {
|
||||
Unimplemented();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
class NativeJump: public NativeInstruction {
|
||||
public:
|
||||
enum RISCV_specific_constants {
|
||||
instruction_size = NativeInstruction::instruction_size,
|
||||
instruction_offset = 0,
|
||||
data_offset = 0,
|
||||
next_instruction_offset = NativeInstruction::instruction_size
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const { return addr_at(instruction_size); }
|
||||
address jump_destination() const;
|
||||
void set_jump_destination(address dest);
|
||||
|
||||
// Creation
|
||||
inline friend NativeJump* nativeJump_at(address address);
|
||||
|
||||
void verify();
|
||||
|
||||
// Insertion of native jump instruction
|
||||
static void insert(address code_pos, address entry);
|
||||
// MT-safe insertion of native jump at verified method entry
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry);
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
};
|
||||
|
||||
inline NativeJump* nativeJump_at(address addr) {
|
||||
NativeJump* jump = (NativeJump*)(addr - NativeJump::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
jump->verify();
|
||||
#endif
|
||||
return jump;
|
||||
}
|
||||
|
||||
class NativeGeneralJump: public NativeJump {
|
||||
public:
|
||||
enum RISCV_specific_constants {
|
||||
instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, jalr
|
||||
instruction_offset = 0,
|
||||
data_offset = 0,
|
||||
next_instruction_offset = 6 * NativeInstruction::instruction_size // lui, addi, slli, addi, slli, jalr
|
||||
};
|
||||
|
||||
address jump_destination() const;
|
||||
|
||||
static void insert_unconditional(address code_pos, address entry);
|
||||
static void replace_mt_safe(address instr_addr, address code_buffer);
|
||||
};
|
||||
|
||||
inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
|
||||
debug_only(jump->verify();)
|
||||
return jump;
|
||||
}
|
||||
|
||||
class NativeIllegalInstruction: public NativeInstruction {
|
||||
public:
|
||||
// Insert illegal opcode as specific address
|
||||
static void insert(address code_pos);
|
||||
};
|
||||
|
||||
inline bool NativeInstruction::is_nop() {
|
||||
uint32_t insn = *(uint32_t*)addr_at(0);
|
||||
return insn == 0x13;
|
||||
}
|
||||
|
||||
inline bool NativeInstruction::is_jump_or_nop() {
|
||||
return is_nop() || is_jump();
|
||||
}
|
||||
|
||||
// Call trampoline stubs.
|
||||
class NativeCallTrampolineStub : public NativeInstruction {
|
||||
public:
|
||||
|
||||
enum RISCV_specific_constants {
|
||||
// Refer to function emit_trampoline_stub.
|
||||
instruction_size = 3 * NativeInstruction::instruction_size + wordSize, // auipc + ld + jr + target address
|
||||
data_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jr
|
||||
};
|
||||
|
||||
address destination(nmethod *nm = NULL) const;
|
||||
void set_destination(address new_destination);
|
||||
ptrdiff_t destination_offset() const;
|
||||
};
|
||||
|
||||
inline bool is_NativeCallTrampolineStub_at(address addr) {
|
||||
// Ensure that the stub is exactly
|
||||
// ld t0, L--->auipc + ld
|
||||
// jr t0
|
||||
// L:
|
||||
|
||||
// judge inst + register + imm
|
||||
// 1). check the instructions: auipc + ld + jalr
|
||||
// 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
|
||||
// 3). check if the offset in ld[31:20] equals the data_offset
|
||||
assert_cond(addr != NULL);
|
||||
const int instr_size = NativeInstruction::instruction_size;
|
||||
if (NativeInstruction::is_auipc_at(addr) &&
|
||||
NativeInstruction::is_ld_at(addr + instr_size) &&
|
||||
NativeInstruction::is_jalr_at(addr + 2 * instr_size) &&
|
||||
(NativeInstruction::extract_rd(addr) == x5) &&
|
||||
(NativeInstruction::extract_rd(addr + instr_size) == x5) &&
|
||||
(NativeInstruction::extract_rs1(addr + instr_size) == x5) &&
|
||||
(NativeInstruction::extract_rs1(addr + 2 * instr_size) == x5) &&
|
||||
(Assembler::extract(((unsigned*)addr)[1], 31, 20) == NativeCallTrampolineStub::data_offset)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
|
||||
return (NativeCallTrampolineStub*)addr;
|
||||
}
|
||||
|
||||
class NativeMembar : public NativeInstruction {
|
||||
public:
|
||||
uint32_t get_kind();
|
||||
void set_kind(uint32_t order_kind);
|
||||
};
|
||||
|
||||
inline NativeMembar *NativeMembar_at(address addr) {
|
||||
assert_cond(addr != NULL);
|
||||
assert(nativeInstruction_at(addr)->is_membar(), "no membar found");
|
||||
return (NativeMembar*)addr;
|
||||
}
|
||||
|
||||
class NativeFenceI : public NativeInstruction {
|
||||
public:
|
||||
static inline int instruction_size() {
|
||||
// 2 for fence.i + fence
|
||||
return (UseConservativeFence ? 2 : 1) * NativeInstruction::instruction_size;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_NATIVEINST_RISCV_HPP
|
||||
45
src/hotspot/cpu/riscv/registerMap_riscv.cpp
Normal file
45
src/hotspot/cpu/riscv/registerMap_riscv.cpp
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "runtime/registerMap.hpp"
|
||||
#include "vmreg_riscv.inline.hpp"
|
||||
|
||||
address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const {
|
||||
if (base_reg->is_VectorRegister()) {
|
||||
assert(base_reg->is_concrete(), "must pass base reg");
|
||||
int base_reg_enc = (base_reg->value() - ConcreteRegisterImpl::max_fpr) /
|
||||
VectorRegisterImpl::max_slots_per_register;
|
||||
intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size;
|
||||
address base_location = location(base_reg);
|
||||
if (base_location != NULL) {
|
||||
return base_location + offset_in_bytes;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
return location(base_reg->next(slot_idx));
|
||||
}
|
||||
}
|
||||
43
src/hotspot/cpu/riscv/registerMap_riscv.hpp
Normal file
43
src/hotspot/cpu/riscv/registerMap_riscv.hpp
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_REGISTERMAP_RISCV_HPP
|
||||
#define CPU_RISCV_REGISTERMAP_RISCV_HPP
|
||||
|
||||
// machine-dependent implemention for register maps
|
||||
friend class frame;
|
||||
|
||||
private:
|
||||
// This is the hook for finding a register in an "well-known" location,
|
||||
// such as a register block of a predetermined format.
|
||||
address pd_location(VMReg reg) const { return NULL; }
|
||||
address pd_location(VMReg base_reg, int slot_idx) const;
|
||||
|
||||
// no PD state to clear or copy:
|
||||
void pd_clear() {}
|
||||
void pd_initialize() {}
|
||||
void pd_initialize_from(const RegisterMap* map) {}
|
||||
|
||||
#endif // CPU_RISCV_REGISTERMAP_RISCV_HPP
|
||||
193
src/hotspot/cpu/riscv/register_definitions_riscv.cpp
Normal file
193
src/hotspot/cpu/riscv/register_definitions_riscv.cpp
Normal file
@@ -0,0 +1,193 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "interp_masm_riscv.hpp"
|
||||
#include "register_riscv.hpp"
|
||||
|
||||
REGISTER_DEFINITION(Register, noreg);
|
||||
|
||||
REGISTER_DEFINITION(Register, x0);
|
||||
REGISTER_DEFINITION(Register, x1);
|
||||
REGISTER_DEFINITION(Register, x2);
|
||||
REGISTER_DEFINITION(Register, x3);
|
||||
REGISTER_DEFINITION(Register, x4);
|
||||
REGISTER_DEFINITION(Register, x5);
|
||||
REGISTER_DEFINITION(Register, x6);
|
||||
REGISTER_DEFINITION(Register, x7);
|
||||
REGISTER_DEFINITION(Register, x8);
|
||||
REGISTER_DEFINITION(Register, x9);
|
||||
REGISTER_DEFINITION(Register, x10);
|
||||
REGISTER_DEFINITION(Register, x11);
|
||||
REGISTER_DEFINITION(Register, x12);
|
||||
REGISTER_DEFINITION(Register, x13);
|
||||
REGISTER_DEFINITION(Register, x14);
|
||||
REGISTER_DEFINITION(Register, x15);
|
||||
REGISTER_DEFINITION(Register, x16);
|
||||
REGISTER_DEFINITION(Register, x17);
|
||||
REGISTER_DEFINITION(Register, x18);
|
||||
REGISTER_DEFINITION(Register, x19);
|
||||
REGISTER_DEFINITION(Register, x20);
|
||||
REGISTER_DEFINITION(Register, x21);
|
||||
REGISTER_DEFINITION(Register, x22);
|
||||
REGISTER_DEFINITION(Register, x23);
|
||||
REGISTER_DEFINITION(Register, x24);
|
||||
REGISTER_DEFINITION(Register, x25);
|
||||
REGISTER_DEFINITION(Register, x26);
|
||||
REGISTER_DEFINITION(Register, x27);
|
||||
REGISTER_DEFINITION(Register, x28);
|
||||
REGISTER_DEFINITION(Register, x29);
|
||||
REGISTER_DEFINITION(Register, x30);
|
||||
REGISTER_DEFINITION(Register, x31);
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, fnoreg);
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, f0);
|
||||
REGISTER_DEFINITION(FloatRegister, f1);
|
||||
REGISTER_DEFINITION(FloatRegister, f2);
|
||||
REGISTER_DEFINITION(FloatRegister, f3);
|
||||
REGISTER_DEFINITION(FloatRegister, f4);
|
||||
REGISTER_DEFINITION(FloatRegister, f5);
|
||||
REGISTER_DEFINITION(FloatRegister, f6);
|
||||
REGISTER_DEFINITION(FloatRegister, f7);
|
||||
REGISTER_DEFINITION(FloatRegister, f8);
|
||||
REGISTER_DEFINITION(FloatRegister, f9);
|
||||
REGISTER_DEFINITION(FloatRegister, f10);
|
||||
REGISTER_DEFINITION(FloatRegister, f11);
|
||||
REGISTER_DEFINITION(FloatRegister, f12);
|
||||
REGISTER_DEFINITION(FloatRegister, f13);
|
||||
REGISTER_DEFINITION(FloatRegister, f14);
|
||||
REGISTER_DEFINITION(FloatRegister, f15);
|
||||
REGISTER_DEFINITION(FloatRegister, f16);
|
||||
REGISTER_DEFINITION(FloatRegister, f17);
|
||||
REGISTER_DEFINITION(FloatRegister, f18);
|
||||
REGISTER_DEFINITION(FloatRegister, f19);
|
||||
REGISTER_DEFINITION(FloatRegister, f20);
|
||||
REGISTER_DEFINITION(FloatRegister, f21);
|
||||
REGISTER_DEFINITION(FloatRegister, f22);
|
||||
REGISTER_DEFINITION(FloatRegister, f23);
|
||||
REGISTER_DEFINITION(FloatRegister, f24);
|
||||
REGISTER_DEFINITION(FloatRegister, f25);
|
||||
REGISTER_DEFINITION(FloatRegister, f26);
|
||||
REGISTER_DEFINITION(FloatRegister, f27);
|
||||
REGISTER_DEFINITION(FloatRegister, f28);
|
||||
REGISTER_DEFINITION(FloatRegister, f29);
|
||||
REGISTER_DEFINITION(FloatRegister, f30);
|
||||
REGISTER_DEFINITION(FloatRegister, f31);
|
||||
|
||||
REGISTER_DEFINITION(VectorRegister, vnoreg);
|
||||
|
||||
REGISTER_DEFINITION(VectorRegister, v0);
|
||||
REGISTER_DEFINITION(VectorRegister, v1);
|
||||
REGISTER_DEFINITION(VectorRegister, v2);
|
||||
REGISTER_DEFINITION(VectorRegister, v3);
|
||||
REGISTER_DEFINITION(VectorRegister, v4);
|
||||
REGISTER_DEFINITION(VectorRegister, v5);
|
||||
REGISTER_DEFINITION(VectorRegister, v6);
|
||||
REGISTER_DEFINITION(VectorRegister, v7);
|
||||
REGISTER_DEFINITION(VectorRegister, v8);
|
||||
REGISTER_DEFINITION(VectorRegister, v9);
|
||||
REGISTER_DEFINITION(VectorRegister, v10);
|
||||
REGISTER_DEFINITION(VectorRegister, v11);
|
||||
REGISTER_DEFINITION(VectorRegister, v12);
|
||||
REGISTER_DEFINITION(VectorRegister, v13);
|
||||
REGISTER_DEFINITION(VectorRegister, v14);
|
||||
REGISTER_DEFINITION(VectorRegister, v15);
|
||||
REGISTER_DEFINITION(VectorRegister, v16);
|
||||
REGISTER_DEFINITION(VectorRegister, v17);
|
||||
REGISTER_DEFINITION(VectorRegister, v18);
|
||||
REGISTER_DEFINITION(VectorRegister, v19);
|
||||
REGISTER_DEFINITION(VectorRegister, v20);
|
||||
REGISTER_DEFINITION(VectorRegister, v21);
|
||||
REGISTER_DEFINITION(VectorRegister, v22);
|
||||
REGISTER_DEFINITION(VectorRegister, v23);
|
||||
REGISTER_DEFINITION(VectorRegister, v24);
|
||||
REGISTER_DEFINITION(VectorRegister, v25);
|
||||
REGISTER_DEFINITION(VectorRegister, v26);
|
||||
REGISTER_DEFINITION(VectorRegister, v27);
|
||||
REGISTER_DEFINITION(VectorRegister, v28);
|
||||
REGISTER_DEFINITION(VectorRegister, v29);
|
||||
REGISTER_DEFINITION(VectorRegister, v30);
|
||||
REGISTER_DEFINITION(VectorRegister, v31);
|
||||
|
||||
REGISTER_DEFINITION(Register, c_rarg0);
|
||||
REGISTER_DEFINITION(Register, c_rarg1);
|
||||
REGISTER_DEFINITION(Register, c_rarg2);
|
||||
REGISTER_DEFINITION(Register, c_rarg3);
|
||||
REGISTER_DEFINITION(Register, c_rarg4);
|
||||
REGISTER_DEFINITION(Register, c_rarg5);
|
||||
REGISTER_DEFINITION(Register, c_rarg6);
|
||||
REGISTER_DEFINITION(Register, c_rarg7);
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg0);
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg1);
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg2);
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg3);
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg4);
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg5);
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg6);
|
||||
REGISTER_DEFINITION(FloatRegister, c_farg7);
|
||||
|
||||
REGISTER_DEFINITION(Register, j_rarg0);
|
||||
REGISTER_DEFINITION(Register, j_rarg1);
|
||||
REGISTER_DEFINITION(Register, j_rarg2);
|
||||
REGISTER_DEFINITION(Register, j_rarg3);
|
||||
REGISTER_DEFINITION(Register, j_rarg4);
|
||||
REGISTER_DEFINITION(Register, j_rarg5);
|
||||
REGISTER_DEFINITION(Register, j_rarg6);
|
||||
REGISTER_DEFINITION(Register, j_rarg7);
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg0);
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg1);
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg2);
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg3);
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg4);
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg5);
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg6);
|
||||
REGISTER_DEFINITION(FloatRegister, j_farg7);
|
||||
|
||||
REGISTER_DEFINITION(Register, zr);
|
||||
REGISTER_DEFINITION(Register, gp);
|
||||
REGISTER_DEFINITION(Register, tp);
|
||||
REGISTER_DEFINITION(Register, xmethod);
|
||||
REGISTER_DEFINITION(Register, ra);
|
||||
REGISTER_DEFINITION(Register, sp);
|
||||
REGISTER_DEFINITION(Register, fp);
|
||||
REGISTER_DEFINITION(Register, xheapbase);
|
||||
REGISTER_DEFINITION(Register, xcpool);
|
||||
REGISTER_DEFINITION(Register, xmonitors);
|
||||
REGISTER_DEFINITION(Register, xlocals);
|
||||
REGISTER_DEFINITION(Register, xthread);
|
||||
REGISTER_DEFINITION(Register, xbcp);
|
||||
REGISTER_DEFINITION(Register, xdispatch);
|
||||
REGISTER_DEFINITION(Register, esp);
|
||||
|
||||
REGISTER_DEFINITION(Register, t0);
|
||||
REGISTER_DEFINITION(Register, t1);
|
||||
REGISTER_DEFINITION(Register, t2);
|
||||
69
src/hotspot/cpu/riscv/register_riscv.cpp
Normal file
69
src/hotspot/cpu/riscv/register_riscv.cpp
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "register_riscv.hpp"
|
||||
|
||||
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers *
|
||||
RegisterImpl::max_slots_per_register;
|
||||
|
||||
const int ConcreteRegisterImpl::max_fpr =
|
||||
ConcreteRegisterImpl::max_gpr +
|
||||
FloatRegisterImpl::number_of_registers * FloatRegisterImpl::max_slots_per_register;
|
||||
|
||||
const int ConcreteRegisterImpl::max_vpr =
|
||||
ConcreteRegisterImpl::max_fpr +
|
||||
VectorRegisterImpl::number_of_registers * VectorRegisterImpl::max_slots_per_register;
|
||||
|
||||
|
||||
const char* RegisterImpl::name() const {
|
||||
static const char *const names[number_of_registers] = {
|
||||
"zr", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "x9",
|
||||
"c_rarg0", "c_rarg1", "c_rarg2", "c_rarg3", "c_rarg4", "c_rarg5", "c_rarg6", "c_rarg7",
|
||||
"x18", "x19", "esp", "xdispatch", "xbcp", "xthread", "xlocals",
|
||||
"xmonitors", "xcpool", "xheapbase", "x28", "x29", "x30", "xmethod"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
|
||||
const char* FloatRegisterImpl::name() const {
|
||||
static const char *const names[number_of_registers] = {
|
||||
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
|
||||
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
|
||||
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
|
||||
"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
|
||||
const char* VectorRegisterImpl::name() const {
|
||||
static const char *const names[number_of_registers] = {
|
||||
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
|
||||
"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
|
||||
"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
|
||||
"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
385
src/hotspot/cpu/riscv/register_riscv.hpp
Normal file
385
src/hotspot/cpu/riscv/register_riscv.hpp
Normal file
@@ -0,0 +1,385 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_REGISTER_RISCV_HPP
|
||||
#define CPU_RISCV_REGISTER_RISCV_HPP
|
||||
|
||||
#include "asm/register.hpp"
|
||||
|
||||
#define CSR_FFLAGS 0x001 // Floating-Point Accrued Exceptions.
|
||||
#define CSR_FRM 0x002 // Floating-Point Dynamic Rounding Mode.
|
||||
#define CSR_FCSR 0x003 // Floating-Point Control and Status Register (frm + fflags).
|
||||
#define CSR_VSTART 0x008 // Vector start position
|
||||
#define CSR_VXSAT 0x009 // Fixed-Point Saturate Flag
|
||||
#define CSR_VXRM 0x00A // Fixed-Point Rounding Mode
|
||||
#define CSR_VCSR 0x00F // Vector control and status register
|
||||
#define CSR_VL 0xC20 // Vector length
|
||||
#define CSR_VTYPE 0xC21 // Vector data type register
|
||||
#define CSR_VLENB 0xC22 // VLEN/8 (vector register length in bytes)
|
||||
#define CSR_CYCLE 0xc00 // Cycle counter for RDCYCLE instruction.
|
||||
#define CSR_TIME 0xc01 // Timer for RDTIME instruction.
|
||||
#define CSR_INSTERT 0xc02 // Instructions-retired counter for RDINSTRET instruction.
|
||||
|
||||
class VMRegImpl;
|
||||
typedef VMRegImpl* VMReg;
|
||||
|
||||
// Use Register as shortcut
|
||||
class RegisterImpl;
|
||||
typedef RegisterImpl* Register;
|
||||
|
||||
inline Register as_Register(int encoding) {
|
||||
return (Register)(intptr_t) encoding;
|
||||
}
|
||||
|
||||
class RegisterImpl: public AbstractRegisterImpl {
|
||||
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 32,
|
||||
max_slots_per_register = 2,
|
||||
|
||||
// integer registers x8 - x15 and floating-point registers f8 - f15 are allocatable
|
||||
// for compressed instructions. See Table 17.2 in spec.
|
||||
compressed_register_base = 8,
|
||||
compressed_register_top = 15,
|
||||
};
|
||||
|
||||
// derived registers, offsets, and addresses
|
||||
Register successor() const { return as_Register(encoding() + 1); }
|
||||
|
||||
// construction
|
||||
inline friend Register as_Register(int encoding);
|
||||
|
||||
VMReg as_VMReg();
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); }
|
||||
bool is_valid() const { return 0 <= encoding_nocheck() && encoding_nocheck() < number_of_registers; }
|
||||
const char* name() const;
|
||||
int encoding_nocheck() const { return (intptr_t)this; }
|
||||
|
||||
// Return the bit which represents this register. This is intended
|
||||
// to be ORed into a bitmask: for usage see class AbstractRegSet below.
|
||||
unsigned long bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; }
|
||||
|
||||
// for rvc
|
||||
int compressed_encoding() const {
|
||||
assert(is_compressed_valid(), "invalid compressed register");
|
||||
return encoding() - compressed_register_base;
|
||||
}
|
||||
|
||||
int compressed_encoding_nocheck() const {
|
||||
return encoding_nocheck() - compressed_register_base;
|
||||
}
|
||||
|
||||
bool is_compressed_valid() const {
|
||||
return encoding_nocheck() >= compressed_register_base &&
|
||||
encoding_nocheck() <= compressed_register_top;
|
||||
}
|
||||
};
|
||||
|
||||
// The integer registers of the RISCV architecture
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x0, (0));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x1, (1));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x2, (2));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x3, (3));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x4, (4));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x5, (5));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x6, (6));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x7, (7));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x8, (8));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x9, (9));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x10, (10));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x11, (11));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x12, (12));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x13, (13));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x14, (14));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x15, (15));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x16, (16));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x17, (17));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x18, (18));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x19, (19));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x20, (20));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x21, (21));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x22, (22));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x23, (23));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x24, (24));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x25, (25));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x26, (26));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x27, (27));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x28, (28));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x29, (29));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x30, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, x31, (31));
|
||||
|
||||
// Use FloatRegister as shortcut
|
||||
class FloatRegisterImpl;
|
||||
typedef FloatRegisterImpl* FloatRegister;
|
||||
|
||||
inline FloatRegister as_FloatRegister(int encoding) {
|
||||
return (FloatRegister)(intptr_t) encoding;
|
||||
}
|
||||
|
||||
// The implementation of floating point registers for the architecture
|
||||
class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 32,
|
||||
max_slots_per_register = 2,
|
||||
|
||||
// float registers in the range of [f8~f15] correspond to RVC. Please see Table 16.2 in spec.
|
||||
compressed_register_base = 8,
|
||||
compressed_register_top = 15,
|
||||
};
|
||||
|
||||
// construction
|
||||
inline friend FloatRegister as_FloatRegister(int encoding);
|
||||
|
||||
VMReg as_VMReg();
|
||||
|
||||
// derived registers, offsets, and addresses
|
||||
FloatRegister successor() const {
|
||||
return as_FloatRegister((encoding() + 1));
|
||||
}
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); }
|
||||
int encoding_nocheck() const { return (intptr_t)this; }
|
||||
int is_valid() const { return 0 <= encoding_nocheck() && encoding_nocheck() < number_of_registers; }
|
||||
const char* name() const;
|
||||
|
||||
// for rvc
|
||||
int compressed_encoding() const {
|
||||
assert(is_compressed_valid(), "invalid compressed register");
|
||||
return encoding() - compressed_register_base;
|
||||
}
|
||||
|
||||
int compressed_encoding_nocheck() const {
|
||||
return encoding_nocheck() - compressed_register_base;
|
||||
}
|
||||
|
||||
bool is_compressed_valid() const {
|
||||
return encoding_nocheck() >= compressed_register_base &&
|
||||
encoding_nocheck() <= compressed_register_top;
|
||||
}
|
||||
};
|
||||
|
||||
// The float registers of the RISCV architecture
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg , (-1));
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f0 , ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f1 , ( 1));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f2 , ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f3 , ( 3));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f4 , ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f5 , ( 5));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f6 , ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f7 , ( 7));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f8 , ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f9 , ( 9));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f10 , (10));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f11 , (11));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f12 , (12));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f13 , (13));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f14 , (14));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f15 , (15));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f16 , (16));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f17 , (17));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f18 , (18));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f19 , (19));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f20 , (20));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f21 , (21));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f22 , (22));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f23 , (23));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f24 , (24));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f25 , (25));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f26 , (26));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f27 , (27));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f28 , (28));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f29 , (29));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f30 , (30));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, f31 , (31));
|
||||
|
||||
// Use VectorRegister as shortcut
|
||||
class VectorRegisterImpl;
|
||||
typedef VectorRegisterImpl* VectorRegister;
|
||||
|
||||
inline VectorRegister as_VectorRegister(int encoding) {
|
||||
return (VectorRegister)(intptr_t) encoding;
|
||||
}
|
||||
|
||||
// The implementation of vector registers for RVV
|
||||
class VectorRegisterImpl: public AbstractRegisterImpl {
|
||||
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 32,
|
||||
max_slots_per_register = 4
|
||||
};
|
||||
|
||||
// construction
|
||||
inline friend VectorRegister as_VectorRegister(int encoding);
|
||||
|
||||
VMReg as_VMReg();
|
||||
|
||||
// derived registers, offsets, and addresses
|
||||
VectorRegister successor() const { return as_VectorRegister(encoding() + 1); }
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return encoding_nocheck(); }
|
||||
int encoding_nocheck() const { return (intptr_t)this; }
|
||||
bool is_valid() const { return 0 <= encoding_nocheck() && encoding_nocheck() < number_of_registers; }
|
||||
const char* name() const;
|
||||
|
||||
};
|
||||
|
||||
// The vector registers of RVV
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, vnoreg , (-1));
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v0 , ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v1 , ( 1));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v2 , ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v3 , ( 3));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v4 , ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v5 , ( 5));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v6 , ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v7 , ( 7));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v8 , ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v9 , ( 9));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v10 , (10));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v11 , (11));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v12 , (12));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v13 , (13));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v14 , (14));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v15 , (15));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v16 , (16));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v17 , (17));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v18 , (18));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v19 , (19));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v20 , (20));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v21 , (21));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v22 , (22));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v23 , (23));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v24 , (24));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v25 , (25));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v26 , (26));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v27 , (27));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v28 , (28));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v29 , (29));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v30 , (30));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, v31 , (31));
|
||||
|
||||
|
||||
// Need to know the total number of registers of all sorts for SharedInfo.
|
||||
// Define a class that exports it.
|
||||
class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
// A big enough number for C2: all the registers plus flags
|
||||
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
|
||||
// There is no requirement that any ordering here matches any ordering c2 gives
|
||||
// it's optoregs.
|
||||
|
||||
number_of_registers = (RegisterImpl::max_slots_per_register * RegisterImpl::number_of_registers +
|
||||
FloatRegisterImpl::max_slots_per_register * FloatRegisterImpl::number_of_registers +
|
||||
VectorRegisterImpl::max_slots_per_register * VectorRegisterImpl::number_of_registers)
|
||||
};
|
||||
|
||||
// added to make it compile
|
||||
static const int max_gpr;
|
||||
static const int max_fpr;
|
||||
static const int max_vpr;
|
||||
};
|
||||
|
||||
// A set of registers
|
||||
template<class RegImpl>
|
||||
class AbstractRegSet {
|
||||
uint32_t _bitset;
|
||||
|
||||
AbstractRegSet(uint32_t bitset) : _bitset(bitset) { }
|
||||
|
||||
public:
|
||||
AbstractRegSet() : _bitset(0) { }
|
||||
|
||||
AbstractRegSet(RegImpl r1) : _bitset(1 << r1->encoding()) { }
|
||||
|
||||
AbstractRegSet operator+(const AbstractRegSet aSet) const {
|
||||
AbstractRegSet result(_bitset | aSet._bitset);
|
||||
return result;
|
||||
}
|
||||
|
||||
AbstractRegSet operator-(const AbstractRegSet aSet) const {
|
||||
AbstractRegSet result(_bitset & ~aSet._bitset);
|
||||
return result;
|
||||
}
|
||||
|
||||
AbstractRegSet &operator+=(const AbstractRegSet aSet) {
|
||||
*this = *this + aSet;
|
||||
return *this;
|
||||
}
|
||||
|
||||
AbstractRegSet &operator-=(const AbstractRegSet aSet) {
|
||||
*this = *this - aSet;
|
||||
return *this;
|
||||
}
|
||||
|
||||
static AbstractRegSet of(RegImpl r1) {
|
||||
return AbstractRegSet(r1);
|
||||
}
|
||||
|
||||
static AbstractRegSet of(RegImpl r1, RegImpl r2) {
|
||||
return of(r1) + r2;
|
||||
}
|
||||
|
||||
static AbstractRegSet of(RegImpl r1, RegImpl r2, RegImpl r3) {
|
||||
return of(r1, r2) + r3;
|
||||
}
|
||||
|
||||
static AbstractRegSet of(RegImpl r1, RegImpl r2, RegImpl r3, RegImpl r4) {
|
||||
return of(r1, r2, r3) + r4;
|
||||
}
|
||||
|
||||
static AbstractRegSet range(RegImpl start, RegImpl end) {
|
||||
uint32_t bits = ~0;
|
||||
bits <<= start->encoding();
|
||||
bits <<= (31 - end->encoding());
|
||||
bits >>= (31 - end->encoding());
|
||||
|
||||
return AbstractRegSet(bits);
|
||||
}
|
||||
|
||||
uint32_t bits() const { return _bitset; }
|
||||
};
|
||||
|
||||
typedef AbstractRegSet<Register> RegSet;
|
||||
typedef AbstractRegSet<FloatRegister> FloatRegSet;
|
||||
typedef AbstractRegSet<VectorRegister> VectorRegSet;
|
||||
|
||||
#endif // CPU_RISCV_REGISTER_RISCV_HPP
|
||||
113
src/hotspot/cpu/riscv/relocInfo_riscv.cpp
Normal file
113
src/hotspot/cpu/riscv/relocInfo_riscv.cpp
Normal file
@@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_riscv.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
if (verify_only) {
|
||||
return;
|
||||
}
|
||||
|
||||
int bytes;
|
||||
|
||||
switch (type()) {
|
||||
case relocInfo::oop_type: {
|
||||
oop_Relocation *reloc = (oop_Relocation *)this;
|
||||
// in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate
|
||||
if (NativeInstruction::is_load_pc_relative_at(addr())) {
|
||||
address constptr = (address)code()->oop_addr_at(reloc->oop_index());
|
||||
bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr);
|
||||
assert(*(address*)constptr == x, "error in oop relocation");
|
||||
} else {
|
||||
bytes = MacroAssembler::patch_oop(addr(), x);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
bytes = MacroAssembler::pd_patch_instruction_size(addr(), x);
|
||||
break;
|
||||
}
|
||||
ICache::invalidate_range(addr(), bytes);
|
||||
}
|
||||
|
||||
address Relocation::pd_call_destination(address orig_addr) {
|
||||
assert(is_call(), "should be an address instruction here");
|
||||
if (NativeCall::is_call_at(addr())) {
|
||||
address trampoline = nativeCall_at(addr())->get_trampoline();
|
||||
if (trampoline != NULL) {
|
||||
return nativeCallTrampolineStub_at(trampoline)->destination();
|
||||
}
|
||||
}
|
||||
if (orig_addr != NULL) {
|
||||
// the extracted address from the instructions in address orig_addr
|
||||
address new_addr = MacroAssembler::pd_call_destination(orig_addr);
|
||||
// If call is branch to self, don't try to relocate it, just leave it
|
||||
// as branch to self. This happens during code generation if the code
|
||||
// buffer expands. It will be relocated to the trampoline above once
|
||||
// code generation is complete.
|
||||
new_addr = (new_addr == orig_addr) ? addr() : new_addr;
|
||||
return new_addr;
|
||||
}
|
||||
return MacroAssembler::pd_call_destination(addr());
|
||||
}
|
||||
|
||||
void Relocation::pd_set_call_destination(address x) {
|
||||
assert(is_call(), "should be an address instruction here");
|
||||
if (NativeCall::is_call_at(addr())) {
|
||||
address trampoline = nativeCall_at(addr())->get_trampoline();
|
||||
if (trampoline != NULL) {
|
||||
nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
MacroAssembler::pd_patch_instruction_size(addr(), x);
|
||||
address pd_call = pd_call_destination(addr());
|
||||
assert(pd_call == x, "fail in reloc");
|
||||
}
|
||||
|
||||
address* Relocation::pd_address_in_code() {
|
||||
assert(NativeCall::is_load_pc_relative_at(addr()), "Not the expected instruction sequence!");
|
||||
return (address*)(MacroAssembler::target_addr_for_insn(addr()));
|
||||
}
|
||||
|
||||
address Relocation::pd_get_address_from_code() {
|
||||
return MacroAssembler::pd_call_destination(addr());
|
||||
}
|
||||
|
||||
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
||||
if (NativeInstruction::maybe_cpool_ref(addr())) {
|
||||
address old_addr = old_addr_for(addr(), src, dest);
|
||||
MacroAssembler::pd_patch_instruction_size(addr(), MacroAssembler::target_addr_for_insn(old_addr));
|
||||
}
|
||||
}
|
||||
|
||||
void metadata_Relocation::pd_fix_value(address x) {
|
||||
}
|
||||
44
src/hotspot/cpu/riscv/relocInfo_riscv.hpp
Normal file
44
src/hotspot/cpu/riscv/relocInfo_riscv.hpp
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_RELOCINFO_RISCV_HPP
|
||||
#define CPU_RISCV_RELOCINFO_RISCV_HPP
|
||||
|
||||
// machine-dependent parts of class relocInfo
|
||||
private:
|
||||
enum {
|
||||
// Relocations are byte-aligned.
|
||||
offset_unit = 1,
|
||||
// Must be at least 1 for RelocInfo::narrow_oop_in_const.
|
||||
format_width = 1
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
// This platform has no oops in the code that are not also
|
||||
// listed in the oop section.
|
||||
static bool mustIterateImmediateOopsInCode() { return false; }
|
||||
|
||||
#endif // CPU_RISCV_RELOCINFO_RISCV_HPP
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user