Compare commits

...

2 Commits

Author SHA1 Message Date
Nikita Provotorov
2a7a497d3a Merge pull request #65 from bell-sw/windows-aarch64
8248238: Windows AArch64 Support
2021-10-05 09:39:45 +07:00
bell-sw
4321d03d52 8248238: Windows AArch64 Support 2021-06-22 22:18:53 +03:00
91 changed files with 2888 additions and 550 deletions

View File

@@ -672,6 +672,16 @@ AC_DEFUN([BASIC_EVAL_DEVKIT_VARIABLE],
fi
])
###############################################################################
# Evaluates platform specific overrides for build devkit variables.
# $1: Name of variable
AC_DEFUN([BASIC_EVAL_BUILD_DEVKIT_VARIABLE],
[
if test "x[$]$1" = x; then
eval $1="\${$1_${OPENJDK_BUILD_CPU}}"
fi
])
###############################################################################
AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
[

View File

@@ -112,7 +112,6 @@ AC_DEFUN([BASIC_FIXUP_PATH_CYGWIN],
# unix format.
path="[$]$1"
new_path=`$CYGPATH -u "$path"`
# Cygwin tries to hide some aspects of the Windows file system, such that binaries are
# named .exe but called without that suffix. Therefore, "foo" and "foo.exe" are considered
# the same file, most of the time (as in "test -f"). But not when running cygpath -s, then
@@ -402,8 +401,19 @@ AC_DEFUN_ONCE([BASIC_COMPILE_FIXPATH],
BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([FIXPATH_BIN_W])
$RM -rf $FIXPATH_BIN $FIXPATH_DIR
$MKDIR -p $FIXPATH_DIR $CONFIGURESUPPORT_OUTPUTDIR/bin
# simple cross compilation solution for fixpath for aarch64
# Final solution should be backport of 8257679
if test "x$COMPILE_TYPE-$OPENJDK_TARGET_CPU" = xcross-aarch64; then
FIXPATH_CC=`$ECHO "$CC"|$SED 's|/arm64/cl|/x64/cl|'`
FIXPATH_LIB=`$ECHO "$LIB"|$SED 's|arm64|x64|gI'`
else
FIXPATH_CC=$CC
FIXPATH_LIB=$LIB
fi
OLDLIB=$LIB
export LIB="$FIXPATH_LIB"
cd $FIXPATH_DIR
$CC $FIXPATH_SRC_W -Fe$FIXPATH_BIN_W > $FIXPATH_DIR/fixpath1.log 2>&1
$FIXPATH_CC $FIXPATH_SRC_W -Fe$FIXPATH_BIN_W > $FIXPATH_DIR/fixpath1.log 2>&1
cd $CURDIR
if test ! -x $FIXPATH_BIN; then
@@ -414,9 +424,10 @@ AC_DEFUN_ONCE([BASIC_COMPILE_FIXPATH],
AC_MSG_RESULT([yes])
AC_MSG_CHECKING([if fixpath.exe works])
cd $FIXPATH_DIR
$FIXPATH $CC $FIXPATH_SRC -Fe$FIXPATH_DIR/fixpath2.exe \
$FIXPATH $FIXPATH_CC $FIXPATH_SRC -Fe$FIXPATH_DIR/fixpath2.exe \
> $FIXPATH_DIR/fixpath2.log 2>&1
cd $CURDIR
export LIB="$OLDLIB"
if test ! -x $FIXPATH_DIR/fixpath2.exe; then
AC_MSG_RESULT([no])
cat $FIXPATH_DIR/fixpath2.log

View File

@@ -30,13 +30,13 @@
# First include the real base spec.gmk file
include @SPEC@
CC := @BUILD_CC@
CXX := @BUILD_CXX@
LD := @BUILD_LD@
LDCXX := @BUILD_LDCXX@
AS := @BUILD_AS@
CC := @FIXPATH@ @BUILD_CC@
CXX := @FIXPATH@ @BUILD_CXX@
LD := @FIXPATH@ @BUILD_LD@
LDCXX := @FIXPATH@ @BUILD_LDCXX@
AS := @FIXPATH@ @BUILD_AS@
NM := @BUILD_NM@
AR := @BUILD_AR@
AR := @FIXPATH@ @BUILD_AR@
OBJCOPY := @BUILD_OBJCOPY@
STRIP := @BUILD_STRIP@
SYSROOT_CFLAGS := @BUILD_SYSROOT_CFLAGS@

View File

@@ -705,7 +705,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
$1_DEFINES_CPU_JDK="${$1_DEFINES_CPU_JDK} -DcpuIntel -Di586 -D$FLAGS_CPU_LEGACY_LIB"
fi
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
if test "x$FLAGS_CPU" = xx86_64; then
if test "x$FLAGS_CPU" = xaarch64; then
$1_DEFINES_CPU_JDK="${$1_DEFINES_CPU_JDK} -D_ARM64_ -Darm64"
elif test "x$FLAGS_CPU" = xx86_64; then
$1_DEFINES_CPU_JDK="${$1_DEFINES_CPU_JDK} -D_AMD64_ -Damd64"
else
$1_DEFINES_CPU_JDK="${$1_DEFINES_CPU_JDK} -D_X86_ -Dx86"

View File

@@ -188,20 +188,19 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_CPU_DEP],
fi
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
if test "x${OPENJDK_$1_CPU_BITS}" = "x32"; then
$1_CPU_EXECUTABLE_LDFLAGS="-stack:327680"
elif test "x${OPENJDK_$1_CPU_BITS}" = "x64"; then
$1_CPU_EXECUTABLE_LDFLAGS="-stack:1048576"
fi
if test "x${OPENJDK_$1_CPU}" = "xx86"; then
$1_CPU_LDFLAGS="-safeseh"
# NOTE: Old build added -machine. Probably not needed.
$1_CPU_LDFLAGS_JVM_ONLY="-machine:I386"
$1_CPU_EXECUTABLE_LDFLAGS="-stack:327680"
else
$1_CPU_LDFLAGS_JVM_ONLY="-machine:AMD64"
$1_CPU_EXECUTABLE_LDFLAGS="-stack:1048576"
fi
fi
# JVM_VARIANT_PATH depends on if this is build or target...
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
$1_LDFLAGS_JDK_LIBPATH="-libpath:${OUTPUTDIR}/support/modules_libs/java.base"
$1_LDFLAGS_JDK_LIBPATH="-libpath:\${SUPPORT_OUTPUTDIR}/modules_libs/java.base"
else
$1_LDFLAGS_JDK_LIBPATH="-L\$(SUPPORT_OUTPUTDIR)/modules_libs/java.base \
-L\$(SUPPORT_OUTPUTDIR)/modules_libs/java.base/${$1_JVM_VARIANT_PATH}"

View File

@@ -221,7 +221,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
if test "x$ENABLE_AOT" = "xtrue"; then
# Only enable AOT on X64 platforms.
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xlinux-aarch64" ; then
if test -e "${TOPDIR}/src/jdk.aot"; then
if test -e "${TOPDIR}/src/jdk.internal.vm.compiler"; then
ENABLE_AOT="true"
@@ -240,7 +240,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
else
ENABLE_AOT="false"
if test "x$enable_aot" = "xyes"; then
AC_MSG_ERROR([AOT is currently only supported on x86_64 and aarch64. Remove --enable-aot.])
AC_MSG_ERROR([AOT is currently only supported on x86_64 and linux-aarch64. Remove --enable-aot.])
fi
fi
fi
@@ -364,6 +364,10 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
AC_MSG_CHECKING([if shenandoah can be built])
if HOTSPOT_CHECK_JVM_FEATURE(shenandoahgc); then
if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
# Filter out Shenandoah from user requested features, as it's already in non-minimal set
if HOTSPOT_CHECK_JVM_VARIANT(minimal); then
BASIC_GET_NON_MATCHING_VALUES(JVM_FEATURES, $JVM_FEATURES, shenandoahgc)
fi
AC_MSG_RESULT([yes])
else
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES shenandoahgc"
@@ -371,6 +375,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
fi
else
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES shenandoahgc"
AC_MSG_RESULT([no, not enabled by default])
fi
# Only enable ZGC on supported platforms
@@ -420,7 +425,8 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
# Only enable jvmci on x86_64, sparcv9 and aarch64
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xwindows-aarch64" || \
test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xlinux-aarch64" ; then
AC_MSG_RESULT([yes])
JVM_FEATURES_jvmci="jvmci"
INCLUDE_JVMCI="true"
@@ -451,10 +457,11 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
JVM_FEATURES_graal="graal"
INCLUDE_GRAAL="true"
else
# By default enable graal build on x64 or where AOT is available.
# By default enable graal build on x64/aarch64 or where AOT is available.
# graal build requires jvmci.
if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \
(test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
test "x$OPENJDK_TARGET_CPU" = "xaarch64" || \
test "x$ENABLE_AOT" = "xtrue") ; then
AC_MSG_RESULT([yes])
JVM_FEATURES_graal="graal"

View File

@@ -918,14 +918,18 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
. $CONFIGURESUPPORT_OUTPUTDIR/build-devkit.info
# This potentially sets the following:
# A descriptive name of the devkit
BASIC_EVAL_DEVKIT_VARIABLE([BUILD_DEVKIT_NAME])
BASIC_EVAL_BUILD_DEVKIT_VARIABLE([BUILD_DEVKIT_NAME])
# Corresponds to --with-extra-path
BASIC_EVAL_DEVKIT_VARIABLE([BUILD_DEVKIT_EXTRA_PATH])
BASIC_EVAL_BUILD_DEVKIT_VARIABLE([BUILD_DEVKIT_EXTRA_PATH])
# Corresponds to --with-toolchain-path
BASIC_EVAL_DEVKIT_VARIABLE([BUILD_DEVKIT_TOOLCHAIN_PATH])
BASIC_EVAL_BUILD_DEVKIT_VARIABLE([BUILD_DEVKIT_TOOLCHAIN_PATH])
# Corresponds to --with-sysroot
BASIC_EVAL_DEVKIT_VARIABLE([BUILD_DEVKIT_SYSROOT])
# Skip the Window specific parts
BASIC_EVAL_BUILD_DEVKIT_VARIABLE([BUILD_DEVKIT_SYSROOT])
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
BASIC_EVAL_BUILD_DEVKIT_VARIABLE([BUILD_DEVKIT_VS_INCLUDE])
BASIC_EVAL_BUILD_DEVKIT_VARIABLE([BUILD_DEVKIT_VS_LIB])
fi
fi
AC_MSG_CHECKING([for build platform devkit])
@@ -935,13 +939,37 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
AC_MSG_RESULT([$BUILD_DEVKIT_ROOT])
fi
BUILD_SYSROOT="$BUILD_DEVKIT_SYSROOT"
PATH="$BUILD_DEVKIT_EXTRA_PATH:$PATH"
# Fallback default of just /bin if DEVKIT_PATH is not defined
# Fallback default of just /bin if DEVKIT_PATH is not defined
if test "x$BUILD_DEVKIT_TOOLCHAIN_PATH" = x; then
BUILD_DEVKIT_TOOLCHAIN_PATH="$BUILD_DEVKIT_ROOT/bin"
fi
PATH="$BUILD_DEVKIT_TOOLCHAIN_PATH:$BUILD_DEVKIT_EXTRA_PATH"
PATH="$BUILD_DEVKIT_TOOLCHAIN_PATH:$PATH"
BUILD_SYSROOT="$BUILD_DEVKIT_SYSROOT"
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
BUILD_VS_INCLUDE="$BUILD_DEVKIT_VS_INCLUDE"
BUILD_VS_LIB="$BUILD_DEVKIT_VS_LIB"
TOOLCHAIN_SETUP_VISUAL_STUDIO_SYSROOT_FLAGS([BUILD_])
fi
fi
else
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
if test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then
# try to guess compiler layout. Final solution should be
# backport of 8257679. Try primitive approach for now
BUILD_CC=`$ECHO "$CC"|$SED 's|/arm64/cl|/x64/cl|'`
BUILD_CXX=`$ECHO "$CXX"|$SED 's|/arm64/cl|/x64/cl|'`
BUILD_AR=`$ECHO "$AR"|$SED 's|/arm64/lib|/x64/lib|'`
BUILD_LD=`$ECHO "$LD"|$SED 's|/arm64/link|/x64/link|'`
BUILD_SYSROOT_CFLAGS=`$ECHO "$SYSROOT_CFLAGS"|$SED 's|/arm64|/x64|g'`
BUILD_SYSROOT_LDFLAGS=`$ECHO "$SYSROOT_LDFLAGS"|$SED 's|/arm64|/x64|g'`
else
AC_MSG_ERROR([only windows-aarch64 cross compilation is supported so far])
fi
fi
fi
@@ -967,9 +995,37 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
BASIC_FIXUP_EXECUTABLE(BUILD_STRIP)
# Assume the C compiler is the assembler
BUILD_AS="$BUILD_CC -c"
# Just like for the target compiler, use the compiler as linker
BUILD_LD="$BUILD_CC"
BUILD_LDCXX="$BUILD_CXX"
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
# In the Microsoft toolchain we have a separate LD command "link".
# Make sure we reject /usr/bin/link (as determined in CYGWIN_LINK), which is
# a cygwin program for something completely different.
AC_CHECK_PROG([BUILD_LD], [link$EXE_SUFFIX],[link$EXE_SUFFIX],,, [$CYGWIN_LINK])
BASIC_FIXUP_EXECUTABLE(BUILD_LD)
# Verify that we indeed succeeded with this trick.
AC_MSG_CHECKING([if the found link.exe is actually the Visual Studio linker])
# Reset PATH since it can contain a mix of WSL/linux paths and Windows paths from VS,
# which, in combination with WSLENV, will make the WSL layer complain
old_path="$PATH"
PATH=
"$BUILD_LD" --version > /dev/null
if test $? -eq 0 ; then
AC_MSG_RESULT([no])
AC_MSG_ERROR([This is the Cygwin link tool. Please check your PATH and rerun configure.])
else
AC_MSG_RESULT([yes])
fi
PATH="$old_path"
BUILD_LDCXX="$BUILD_LD"
else
# Just like for the target compiler, use the compiler as linker
BUILD_LD="$BUILD_CC"
BUILD_LDCXX="$BUILD_CXX"
fi
PATH="$OLDPATH"
@@ -1025,6 +1081,10 @@ AC_DEFUN_ONCE([TOOLCHAIN_MISC_CHECKS],
if test "x$COMPILER_CPU_TEST" != "xx64"; then
AC_MSG_ERROR([Target CPU mismatch. We are building for $OPENJDK_TARGET_CPU but CL is for "$COMPILER_CPU_TEST"; expected "x64".])
fi
elif test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then
if test "x$COMPILER_CPU_TEST" != "xARM64"; then
AC_MSG_ERROR([Target CPU mismatch. We are building for $OPENJDK_TARGET_CPU but CL is for "$COMPILER_CPU_TEST"; expected "arm64".])
fi
fi
fi

View File

@@ -25,7 +25,7 @@
################################################################################
# The order of these defines the priority by which we try to find them.
VALID_VS_VERSIONS="2017 2013 2015 2012 2010"
VALID_VS_VERSIONS="2019 2017 2013 2015 2012 2010"
VS_DESCRIPTION_2010="Microsoft Visual Studio 2010"
VS_VERSION_INTERNAL_2010=100
@@ -80,7 +80,7 @@ VS_VERSION_INTERNAL_2017=141
VS_MSVCR_2017=vcruntime140.dll
VS_MSVCP_2017=msvcp140.dll
VS_ENVVAR_2017="VS150COMNTOOLS"
VS_USE_UCRT_2017="true"
VS_USE_UCRT_2017="false"
VS_VS_INSTALLDIR_2017="Microsoft Visual Studio/2017"
VS_EDITIONS_2017="BuildTools Community Professional Enterprise"
VS_SDK_INSTALLDIR_2017=
@@ -88,6 +88,19 @@ VS_VS_PLATFORM_NAME_2017="v141"
VS_SDK_PLATFORM_NAME_2017=
VS_SUPPORTED_2017=true
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
VS_VERSION_INTERNAL_2019=142
VS_MSVCR_2019=vcruntime140.dll
VS_MSVCP_2019=msvcp140.dll
VS_ENVVAR_2019="VS160COMNTOOLS"
VS_USE_UCRT_2019="false"
VS_VS_INSTALLDIR_2019="Microsoft Visual Studio/2019"
VS_EDITIONS_2019="BuildTools Community Professional Enterprise"
VS_SDK_INSTALLDIR_2019=
VS_VS_PLATFORM_NAME_2019="v142"
VS_SDK_PLATFORM_NAME_2019=
VS_SUPPORTED_2019=true
################################################################################
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
@@ -111,11 +124,15 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
if test -d "$VS_BASE"; then
AC_MSG_NOTICE([Found Visual Studio installation at $VS_BASE using $METHOD])
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
if test "x$OPENJDK_TARGET_CPU" = xx86; then
VCVARSFILES="vc/bin/vcvars32.bat vc/auxiliary/build/vcvars32.bat"
else
elif test "x$OPENJDK_TARGET_CPU" = xx86_64; then
VCVARSFILES="vc/bin/amd64/vcvars64.bat vc/bin/x86_amd64/vcvarsx86_amd64.bat \
vc/auxiliary/build/vcvarsx86_amd64.bat vc/auxiliary/build/vcvars64.bat"
elif test "x$OPENJDK_TARGET_CPU" = xaarch64; then
# for host x86-64, target aarch64
VCVARSFILES="vc/auxiliary/build/vcvarsamd64_arm64.bat \
vc/auxiliary/build/vcvarsx86_arm64.bat"
fi
for VCVARSFILE in $VCVARSFILES; do
@@ -155,10 +172,12 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_WIN_SDK_ROOT],
elif test -f "$WIN_SDK_BASE/Bin/SetEnv.Cmd"; then
AC_MSG_NOTICE([Found Windows SDK installation at $WIN_SDK_BASE using $METHOD])
VS_ENV_CMD="$WIN_SDK_BASE/Bin/SetEnv.Cmd"
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
if test "x$OPENJDK_TARGET_CPU" = xx86; then
VS_ENV_ARGS="/x86"
else
elif test "x$OPENJDK_TARGET_CPU" = xx86_64; then
VS_ENV_ARGS="/x64"
elif test "x$OPENJDK_TARGET_CPU" = xaarch64; then
VS_ENV_ARGS="/arm64"
fi
# PLATFORM_TOOLSET is used during the compilation of the freetype sources (see
# 'LIB_BUILD_FREETYPE' in libraries.m4) and must be 'Windows7.1SDK' for Windows7.1SDK
@@ -455,41 +474,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV],
AC_SUBST(VS_INCLUDE)
AC_SUBST(VS_LIB)
# Convert VS_INCLUDE into SYSROOT_CFLAGS
OLDIFS="$IFS"
IFS=";"
for i in $VS_INCLUDE; do
ipath=$i
# Only process non-empty elements
if test "x$ipath" != x; then
IFS="$OLDIFS"
# Check that directory exists before calling fixup_path
testpath=$ipath
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([testpath])
if test -d "$testpath"; then
BASIC_FIXUP_PATH([ipath])
SYSROOT_CFLAGS="$SYSROOT_CFLAGS -I$ipath"
fi
IFS=";"
fi
done
# Convert VS_LIB into SYSROOT_LDFLAGS
for i in $VS_LIB; do
libpath=$i
# Only process non-empty elements
if test "x$libpath" != x; then
IFS="$OLDIFS"
# Check that directory exists before calling fixup_path
testpath=$libpath
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([testpath])
if test -d "$testpath"; then
BASIC_FIXUP_PATH([libpath])
SYSROOT_LDFLAGS="$SYSROOT_LDFLAGS -libpath:$libpath"
fi
IFS=";"
fi
done
IFS="$OLDIFS"
TOOLCHAIN_SETUP_VISUAL_STUDIO_SYSROOT_FLAGS
fi
else
AC_MSG_RESULT([not found])
@@ -526,10 +511,15 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_MSVC_DLL],
CORRECT_MSVCR_ARCH="PE32+ executable"
fi
else
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
if test "x$OPENJDK_TARGET_CPU" = xx86; then
CORRECT_MSVCR_ARCH=386
else
elif test "x$OPENJDK_TARGET_CPU" = xx86_64; then
CORRECT_MSVCR_ARCH=x86-64
elif test "x$OPENJDK_TARGET_CPU" = xaarch64; then
# The cygwin 'file' command only returns "PE32+ executable (DLL) (console), for MS Windows",
# without specifying which architecture it is for specifically. This has been fixed upstream.
# https://github.com/file/file/commit/b849b1af098ddd530094bf779b58431395db2e10#diff-ff2eced09e6860de75057dd731d092aeR142
CORRECT_MSVCR_ARCH="PE32+ executable"
fi
fi
if $ECHO "$MSVC_DLL_FILETYPE" | $GREP "$CORRECT_MSVCR_ARCH" 2>&1 > /dev/null; then
@@ -549,24 +539,24 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
DLL_NAME="$1"
MSVC_DLL=
if test "x$OPENJDK_TARGET_CPU" = xx86; then
vs_target_cpu=x86
elif test "x$OPENJDK_TARGET_CPU" = xx86_64; then
vs_target_cpu=x64
elif test "x$OPENJDK_TARGET_CPU" = xaarch64; then
vs_target_cpu=arm64
fi
if test "x$MSVC_DLL" = x; then
if test "x$VCINSTALLDIR" != x; then
CYGWIN_VC_INSTALL_DIR="$VCINSTALLDIR"
BASIC_FIXUP_PATH(CYGWIN_VC_INSTALL_DIR)
if test "$VS_VERSION" -lt 2017; then
# Probe: Using well-known location from Visual Studio 12.0 and older
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
POSSIBLE_MSVC_DLL="$CYGWIN_VC_INSTALL_DIR/redist/x64/Microsoft.VC${VS_VERSION_INTERNAL}.CRT/$DLL_NAME"
else
POSSIBLE_MSVC_DLL="$CYGWIN_VC_INSTALL_DIR/redist/x86/Microsoft.VC${VS_VERSION_INTERNAL}.CRT/$DLL_NAME"
fi
POSSIBLE_MSVC_DLL="$CYGWIN_VC_INSTALL_DIR/redist/$vs_target_cpu/Microsoft.VC${VS_VERSION_INTERNAL}.CRT/$DLL_NAME"
else
# Probe: Using well-known location from VS 2017
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
POSSIBLE_MSVC_DLL="`ls $CYGWIN_VC_INSTALL_DIR/Redist/MSVC/*/x64/Microsoft.VC${VS_VERSION_INTERNAL}.CRT/$DLL_NAME`"
else
POSSIBLE_MSVC_DLL="`ls $CYGWIN_VC_INSTALL_DIR/Redist/MSVC/*/x86/Microsoft.VC${VS_VERSION_INTERNAL}.CRT/$DLL_NAME`"
fi
POSSIBLE_MSVC_DLL="`ls $CYGWIN_VC_INSTALL_DIR/Redist/MSVC/*/$vs_target_cpu/Microsoft.VC${VS_VERSION_INTERNAL}.CRT/$DLL_NAME`"
fi
# In case any of the above finds more than one file, loop over them.
for possible_msvc_dll in $POSSIBLE_MSVC_DLL; do
@@ -598,13 +588,8 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
if test "x$VS100COMNTOOLS" != x; then
CYGWIN_VS_TOOLS_DIR="$VS100COMNTOOLS/.."
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(CYGWIN_VS_TOOLS_DIR)
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VS_TOOLS_DIR" -name $DLL_NAME \
| $GREP -i /x64/ | $HEAD --lines 1`
else
POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VS_TOOLS_DIR" -name $DLL_NAME \
| $GREP -i /x86/ | $HEAD --lines 1`
fi
POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VS_TOOLS_DIR" -name $DLL_NAME \
| $GREP -i /$vs_target_cpu/ | $HEAD --lines 1`
TOOLCHAIN_CHECK_POSSIBLE_MSVC_DLL([$DLL_NAME], [$POSSIBLE_MSVC_DLL],
[search of VS100COMNTOOLS])
fi
@@ -614,17 +599,17 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
# Probe: Search wildly in the VCINSTALLDIR. We've probably lost by now.
# (This was the original behaviour; kept since it might turn something up)
if test "x$CYGWIN_VC_INSTALL_DIR" != x; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
if test "x$OPENJDK_TARGET_CPU" = xx86; then
POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VC_INSTALL_DIR" -name $DLL_NAME \
| $GREP x64 | $HEAD --lines 1`
else
POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VC_INSTALL_DIR" -name $DLL_NAME \
| $GREP x86 | $GREP -v ia64 | $GREP -v x64 | $HEAD --lines 1`
| $GREP x86 | $GREP -v ia64 | $GREP -v x64 | $GREP -v arm64 | $HEAD --lines 1`
if test "x$POSSIBLE_MSVC_DLL" = x; then
# We're grasping at straws now...
POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VC_INSTALL_DIR" -name $DLL_NAME \
| $HEAD --lines 1`
fi
else
POSSIBLE_MSVC_DLL=`$FIND "$CYGWIN_VC_INSTALL_DIR" -name $DLL_NAME \
| $GREP $vs_target_cpu | $HEAD --lines 1`
fi
TOOLCHAIN_CHECK_POSSIBLE_MSVC_DLL([$DLL_NAME], [$POSSIBLE_MSVC_DLL],
@@ -708,8 +693,12 @@ AC_DEFUN([TOOLCHAIN_SETUP_VS_RUNTIME_DLLS],
CYGWIN_WINDOWSSDKDIR="${WINDOWSSDKDIR}"
BASIC_FIXUP_PATH([CYGWIN_WINDOWSSDKDIR])
dll_subdir=$OPENJDK_TARGET_CPU
if test "x$dll_subdir" = "xx86_64"; then
if test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then
dll_subdir="arm64"
elif test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
dll_subdir="x64"
elif test "x$OPENJDK_TARGET_CPU" = "xx86"; then
dll_subdir="x86"
fi
UCRT_DLL_DIR="$CYGWIN_WINDOWSSDKDIR/Redist/ucrt/DLLs/$dll_subdir"
if test -z "$(ls -d "$UCRT_DLL_DIR/"*.dll 2> /dev/null)"; then
@@ -732,3 +721,49 @@ AC_DEFUN([TOOLCHAIN_SETUP_VS_RUNTIME_DLLS],
fi
AC_SUBST(UCRT_DLL_DIR)
])
# Setup the sysroot flags and add them to global CFLAGS and LDFLAGS so
# that configure can use them while detecting compilers.
# TOOLCHAIN_TYPE is available here.
# Param 1 - Optional prefix to all variables. (e.g BUILD_)
AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_SYSROOT_FLAGS],
[
# Convert $1VS_INCLUDE into $1SYSROOT_CFLAGS
OLDIFS="$IFS"
IFS=";"
for i in [$]$1VS_INCLUDE; do
ipath=$i
# Only process non-empty elements
if test "x$ipath" != x; then
IFS="$OLDIFS"
# Check that directory exists before calling fixup_path
testpath=$ipath
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([testpath])
if test -d "$testpath"; then
BASIC_FIXUP_PATH([ipath])
$1SYSROOT_CFLAGS="[$]$1SYSROOT_CFLAGS -I$ipath"
fi
IFS=";"
fi
done
# Convert $1VS_LIB into $1SYSROOT_LDFLAGS
for i in [$]$1VS_LIB; do
libpath=$i
# Only process non-empty elements
if test "x$libpath" != x; then
IFS="$OLDIFS"
# Check that directory exists before calling fixup_path
testpath=$libpath
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH([testpath])
if test -d "$testpath"; then
BASIC_FIXUP_PATH([libpath])
$1SYSROOT_LDFLAGS="[$]$1SYSROOT_LDFLAGS -libpath:$libpath"
fi
IFS=";"
fi
done
IFS="$OLDIFS"
AC_SUBST($1SYSROOT_CFLAGS)
AC_SUBST($1SYSROOT_LDFLAGS)
])

View File

@@ -44,37 +44,44 @@ endif
################################################################################
# Copy the microsoft runtime libraries on windows
################################################################################
# Copy the microsoft runtime libraries on windows, but only if we are not
# creating a buildjdk. If we are, the provided runtime librareis are made for
# the target platform, not the build platform (and we should not need to bundle
# anything with the minimalistic, locally-only buildjdk.)
ifeq ($(OPENJDK_TARGET_OS), windows)
ifneq ($(CREATING_BUILDJDK), true)
# Chmod to avoid permission issues if bundles are unpacked on unix platforms.
define copy-and-chmod
# Chmod to avoid permission issues if bundles are unpacked on unix platforms.
define copy-and-chmod
$(install-file)
$(CHMOD) a+rx $@
endef
endef
# Use separate macro calls in case the source files are not in the same
# directory.
$(eval $(call SetupCopyFiles,COPY_MSVCR, \
# Use separate macro calls in case the source files are not in the same
# directory.
$(eval $(call SetupCopyFiles,COPY_MSVCR, \
DEST := $(LIB_DST_DIR), \
FILES := $(MSVCR_DLL), \
MACRO := copy-and-chmod))
$(eval $(call SetupCopyFiles,COPY_MSVCP, \
$(eval $(call SetupCopyFiles,COPY_MSVCP, \
DEST := $(LIB_DST_DIR), \
FILES := $(MSVCP_DLL), \
MACRO := copy-and-chmod))
TARGETS += $(COPY_MSVCR) $(COPY_MSVCP)
TARGETS += $(COPY_MSVCR) $(COPY_MSVCP)
ifneq ($(UCRT_DLL_DIR), )
$(eval $(call SetupCopyFiles, COPY_UCRT_DLLS, \
ifneq ($(UCRT_DLL_DIR), )
$(eval $(call SetupCopyFiles, COPY_UCRT_DLLS, \
DEST := $(LIB_DST_DIR), \
SRC := $(UCRT_DLL_DIR), \
FILES := $(wildcard $(UCRT_DLL_DIR)/*.dll), \
MACRO := copy-and-chmod, \
))
))
TARGETS += $(COPY_UCRT_DLLS)
TARGETS += $(COPY_UCRT_DLLS)
endif
endif
endif

View File

@@ -89,19 +89,23 @@ if [ ! -d $DEVKIT_ROOT/VC ]; then
REDIST_SUBDIR="VC/Redist/MSVC/14.12.25810"
echo "Copying VC..."
mkdir -p $DEVKIT_ROOT/VC/bin
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/arm64" $DEVKIT_ROOT/VC/bin/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/x64" $DEVKIT_ROOT/VC/bin/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx86/x86" $DEVKIT_ROOT/VC/bin/
mkdir -p $DEVKIT_ROOT/VC/lib
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/arm64" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/x64" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/x86" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/include" $DEVKIT_ROOT/VC/
mkdir -p $DEVKIT_ROOT/VC/atlmfc/lib
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/arm64" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/x64" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/x86" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/include" $DEVKIT_ROOT/VC/atlmfc/
mkdir -p $DEVKIT_ROOT/VC/Auxiliary
cp -r "$VS_INSTALL_DIR/VC/Auxiliary/Build" $DEVKIT_ROOT/VC/Auxiliary/
mkdir -p $DEVKIT_ROOT/VC/redist
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/arm64" $DEVKIT_ROOT/VC/redist/
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/x64" $DEVKIT_ROOT/VC/redist/
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/x86" $DEVKIT_ROOT/VC/redist/
@@ -111,6 +115,8 @@ if [ ! -d $DEVKIT_ROOT/VC ]; then
cp $DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x86
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x64
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x64
cp $DEVKIT_ROOT/VC/redist/arm64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/arm64
cp $DEVKIT_ROOT/VC/redist/arm64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/arm64
fi
################################################################################
@@ -128,8 +134,10 @@ if [ ! -d $DEVKIT_ROOT/$SDK_VERSION ]; then
cp -r "$SDK_INSTALL_DIR/bin/$SDK_FULL_VERSION/x64" $DEVKIT_ROOT/$SDK_VERSION/bin/
cp -r "$SDK_INSTALL_DIR/bin/$SDK_FULL_VERSION/x86" $DEVKIT_ROOT/$SDK_VERSION/bin/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/lib
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/arm64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/arm64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/Redist
@@ -164,6 +172,13 @@ echo-info "DEVKIT_VS_LIB_x86_64=\"\$DEVKIT_ROOT/VC/lib/x64;\$DEVKIT_ROOT/VC/atlm
echo-info "DEVKIT_MSVCR_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL\""
echo-info "DEVKIT_MSVCP_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_x86_64=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x64\""
echo-info ""
echo-info "DEVKIT_TOOLCHAIN_PATH_aarch64=\"\$DEVKIT_ROOT/VC/bin/arm64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\""
echo-info "DEVKIT_VS_INCLUDE_aarch64=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\""
echo-info "DEVKIT_VS_LIB_aarch64=\"\$DEVKIT_ROOT/VC/lib/arm64;\$DEVKIT_ROOT/VC/atlmfc/lib/arm64;\$DEVKIT_ROOT/$SDK_VERSION/lib/arm64\""
echo-info "DEVKIT_MSVCR_DLL_aarch64=\"\$DEVKIT_ROOT/VC/redist/arm64/$MSVCR_DLL\""
echo-info "DEVKIT_MSVCP_DLL_aarch64=\"\$DEVKIT_ROOT/VC/redist/arm64/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_aarch64=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/arm64\""
################################################################################
# Copy this script

View File

@@ -0,0 +1,227 @@
#!/bin/bash
#
# Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# This script copies parts of a Visual Studio installation into a devkit
# suitable for building OpenJDK and OracleJDK. Needs to run in Cygwin or WSL.
# erik.joelsson@oracle.com
VS_VERSION="2019"
VS_VERSION_NUM_NODOT="160"
VS_DLL_VERSION="140"
SDK_VERSION="10"
SDK_FULL_VERSION="10.0.17763.0"
MSVC_DIR="Microsoft.VC142.CRT"
MSVC_FULL_VERSION="14.12.27508"
REDIST_FULL_VERSION="14.20.27508"
SCRIPT_DIR="$(cd "$(dirname $0)" > /dev/null && pwd)"
BUILD_DIR="${SCRIPT_DIR}/../../build/devkit"
################################################################################
# Prepare settings
UNAME_SYSTEM=`uname -s`
UNAME_RELEASE=`uname -r`
# Detect cygwin or WSL
IS_CYGWIN=`echo $UNAME_SYSTEM | grep -i CYGWIN`
IS_WSL=`echo $UNAME_RELEASE | grep Microsoft`
if test "x$IS_CYGWIN" != "x"; then
BUILD_ENV="cygwin"
elif test "x$IS_WSL" != "x"; then
BUILD_ENV="wsl"
else
echo "Unknown environment; only Cygwin and WSL are supported."
exit 1
fi
if test "x$BUILD_ENV" = "xcygwin"; then
WINDOWS_PATH_TO_UNIX_PATH="cygpath -u"
elif test "x$BUILD_ENV" = "xwsl"; then
WINDOWS_PATH_TO_UNIX_PATH="wslpath -u"
fi
# Work around the insanely named ProgramFiles(x86) env variable
PROGRAMFILES_X86="$($WINDOWS_PATH_TO_UNIX_PATH "$(cmd.exe /c set | sed -n 's/^ProgramFiles(x86)=//p' | tr -d '\r')")"
# Find Visual Studio installation dir
VSNNNCOMNTOOLS=`cmd.exe /c echo %VS${VS_VERSION_NUM_NODOT}COMNTOOLS% | tr -d '\r'`
if [ -d "$VSNNNCOMNTOOLS" ]; then
VS_INSTALL_DIR="$($WINDOWS_PATH_TO_UNIX_PATH "$VSNNNCOMNTOOLS/../..")"
else
VS_INSTALL_DIR="${PROGRAMFILES_X86}/Microsoft Visual Studio/2019"
VS_INSTALL_DIR="$(ls -d "${VS_INSTALL_DIR}/"{Community,Professional,Enterprise} 2>/dev/null | head -n1)"
fi
echo "VS_INSTALL_DIR: $VS_INSTALL_DIR"
# Extract semantic version
POTENTIAL_INI_FILES="Common7/IDE/wdexpress.isolation.ini Common7/IDE/devenv.isolation.ini"
for f in $POTENTIAL_INI_FILES; do
if [ -f "$VS_INSTALL_DIR/$f" ]; then
VS_VERSION_SP="$(grep ^SemanticVersion= "$VS_INSTALL_DIR/$f")"
# Remove SemnaticVersion=
VS_VERSION_SP="${VS_VERSION_SP#*=}"
# Remove suffix of too detailed numbering starting with +
VS_VERSION_SP="${VS_VERSION_SP%+*}"
break
fi
done
if [ -z "$VS_VERSION_SP" ]; then
echo "Failed to find SP version"
exit 1
fi
echo "Found Version SP: $VS_VERSION_SP"
# Setup output dirs
DEVKIT_ROOT="${BUILD_DIR}/VS${VS_VERSION}-${VS_VERSION_SP}-devkit"
DEVKIT_BUNDLE="${DEVKIT_ROOT}.tar.gz"
echo "Creating devkit in $DEVKIT_ROOT"
MSVCR_DLL=${MSVC_DIR}/vcruntime${VS_DLL_VERSION}.dll
VCRUNTIME_1_DLL=${MSVC_DIR}/vcruntime${VS_DLL_VERSION}_1.dll
MSVCP_DLL=${MSVC_DIR}/msvcp${VS_DLL_VERSION}.dll
################################################################################
# Copy Visual Studio files
TOOLS_VERSION="$(ls "$VS_INSTALL_DIR/VC/Tools/MSVC" | sort -r -n | head -n1)"
echo "Found Tools version: $TOOLS_VERSION"
VC_SUBDIR="VC/Tools/MSVC/$TOOLS_VERSION"
REDIST_VERSION="$(ls "$VS_INSTALL_DIR/VC/Redist/MSVC" | sort -r -n | head -n1)"
echo "Found Redist version: $REDIST_VERSION"
REDIST_SUBDIR="VC/Redist/MSVC/$REDIST_VERSION"
echo "Copying VC..."
rm -rf $DEVKIT_ROOT/VC
mkdir -p $DEVKIT_ROOT/VC/bin
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/arm64" $DEVKIT_ROOT/VC/bin/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/x64" $DEVKIT_ROOT/VC/bin/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx86/x86" $DEVKIT_ROOT/VC/bin/
mkdir -p $DEVKIT_ROOT/VC/lib
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/arm64" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/x64" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/x86" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/include" $DEVKIT_ROOT/VC/
mkdir -p $DEVKIT_ROOT/VC/atlmfc/lib
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/arm64" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/x64" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/x86" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/include" $DEVKIT_ROOT/VC/atlmfc/
mkdir -p $DEVKIT_ROOT/VC/Auxiliary
cp -r "$VS_INSTALL_DIR/VC/Auxiliary/Build" $DEVKIT_ROOT/VC/Auxiliary/
mkdir -p $DEVKIT_ROOT/VC/redist
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/arm64" $DEVKIT_ROOT/VC/redist/
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/x64" $DEVKIT_ROOT/VC/redist/
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/x86" $DEVKIT_ROOT/VC/redist/
# The redist runtime libs are needed to run the compiler but may not be
# installed on the machine where the devkit will be used.
cp $DEVKIT_ROOT/VC/redist/x86/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x86
cp $DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x86
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x64
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x64
cp $DEVKIT_ROOT/VC/redist/arm64/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/arm64
cp $DEVKIT_ROOT/VC/redist/arm64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/arm64
################################################################################
# Copy SDK files
SDK_INSTALL_DIR="$PROGRAMFILES_X86/Windows Kits/$SDK_VERSION"
echo "SDK_INSTALL_DIR: $SDK_INSTALL_DIR"
SDK_FULL_VERSION="$(ls "$SDK_INSTALL_DIR/bin" | sort -r -n | head -n1)"
echo "Found SDK version: $SDK_FULL_VERSION"
UCRT_VERSION="$(ls "$SDK_INSTALL_DIR/Redist" | grep $SDK_VERSION | sort -r -n | head -n1)"
echo "Found UCRT version: $UCRT_VERSION"
echo "Copying SDK..."
rm -rf $DEVKIT_ROOT/$SDK_VERSION
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/bin
cp -r "$SDK_INSTALL_DIR/bin/$SDK_FULL_VERSION/x64" $DEVKIT_ROOT/$SDK_VERSION/bin/
cp -r "$SDK_INSTALL_DIR/bin/$SDK_FULL_VERSION/x86" $DEVKIT_ROOT/$SDK_VERSION/bin/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/lib
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/arm64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/arm64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/Redist
cp -r "$SDK_INSTALL_DIR/Redist/$UCRT_VERSION/ucrt" $DEVKIT_ROOT/$SDK_VERSION/Redist/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/include
cp -r "$SDK_INSTALL_DIR/include/$SDK_FULL_VERSION/"* $DEVKIT_ROOT/$SDK_VERSION/include/
################################################################################
# Generate devkit.info
echo-info() {
echo "$1" >> $DEVKIT_ROOT/devkit.info
}
echo "Generating devkit.info..."
rm -f $DEVKIT_ROOT/devkit.info
echo-info "# This file describes to configure how to interpret the contents of this devkit"
echo-info "DEVKIT_NAME=\"Microsoft Visual Studio $VS_VERSION $VS_VERSION_SP (devkit)\""
echo-info "DEVKIT_VS_VERSION=\"$VS_VERSION\""
echo-info ""
echo-info "DEVKIT_TOOLCHAIN_PATH_x86=\"\$DEVKIT_ROOT/VC/bin/x86:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\""
echo-info "DEVKIT_VS_INCLUDE_x86=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\""
echo-info "DEVKIT_VS_LIB_x86=\"\$DEVKIT_ROOT/VC/lib/x86;\$DEVKIT_ROOT/VC/atlmfc/lib/x86;\$DEVKIT_ROOT/$SDK_VERSION/lib/x86\""
echo-info "DEVKIT_MSVCR_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCR_DLL\""
echo-info "DEVKIT_MSVCP_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_x86=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x86\""
echo-info ""
echo-info "DEVKIT_TOOLCHAIN_PATH_x86_64=\"\$DEVKIT_ROOT/VC/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\""
echo-info "DEVKIT_VS_INCLUDE_x86_64=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\""
echo-info "DEVKIT_VS_LIB_x86_64=\"\$DEVKIT_ROOT/VC/lib/x64;\$DEVKIT_ROOT/VC/atlmfc/lib/x64;\$DEVKIT_ROOT/$SDK_VERSION/lib/x64\""
echo-info "DEVKIT_MSVCR_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL\""
echo-info "DEVKIT_VCRUNTIME_1_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$VCRUNTIME_1_DLL\""
echo-info "DEVKIT_MSVCP_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_x86_64=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x64\""
echo-info ""
echo-info "DEVKIT_TOOLCHAIN_PATH_aarch64=\"\$DEVKIT_ROOT/VC/bin/arm64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\""
echo-info "DEVKIT_VS_INCLUDE_aarch64=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\""
echo-info "DEVKIT_VS_LIB_aarch64=\"\$DEVKIT_ROOT/VC/lib/arm64;\$DEVKIT_ROOT/VC/atlmfc/lib/arm64;\$DEVKIT_ROOT/$SDK_VERSION/lib/arm64\""
echo-info "DEVKIT_MSVCR_DLL_aarch64=\"\$DEVKIT_ROOT/VC/redist/arm64/$MSVCR_DLL\""
echo-info "DEVKIT_VCRUNTIME_1_DLL_aarch64=\"\$DEVKIT_ROOT/VC/redist/arm64/$VCRUNTIME_1_DLL\""
echo-info "DEVKIT_MSVCP_DLL_aarch64=\"\$DEVKIT_ROOT/VC/redist/arm64/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_aarch64=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/arm64\""
echo-info ""
echo-info "DEVKIT_TOOLS_VERSION=\"$TOOLS_VERSION\""
echo-info "DEVKIT_REDIST_VERSION=\"$REDIST_VERSION\""
echo-info "DEVKIT_SDK_VERSION=\"$SDK_FULL_VERSION\""
echo-info "DEVKIT_UCRT_VERSION=\"$UCRT_VERSION\""
################################################################################
# Copy this script
echo "Copying this script..."
cp $0 $DEVKIT_ROOT/
################################################################################
# Create bundle
echo "Creating bundle: $DEVKIT_BUNDLE"
(cd "$DEVKIT_ROOT" && tar zcf "$DEVKIT_BUNDLE" .)

View File

@@ -63,6 +63,12 @@ ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), )
CPP_FLAGS += -x c
else ifeq ($(TOOLCHAIN_TYPE), microsoft)
CPP_FLAGS += -nologo
ifeq ($(OPENJDK_TARGET_CPU),aarch64)
# cl.exe does only recognize few file extensions as valid (ex: .c, .h, .cpp), so
# make sure *.java.template files are recognized as valid input files
CPP_FILEPREFIX = -Tc
endif
endif
# Generate a java source file from a template through the C preprocessor for the
@@ -75,7 +81,7 @@ endif
define generate-preproc-src
$(call MakeDir, $(@D))
( $(NAWK) '/@@END_COPYRIGHT@@/{exit}1' $< && \
$(CPP) $(CPP_FLAGS) $(SYSROOT_CFLAGS) $(CFLAGS_JDKLIB) $< \
$(CPP) $(CPP_FLAGS) $(SYSROOT_CFLAGS) $(CFLAGS_JDKLIB) $(CPP_FILEPREFIX) $< \
2> >($(GREP) -v '^$(<F)$$' >&2) \
| $(NAWK) '/@@START_HERE@@/,0' \
| $(SED) -e 's/@@START_HERE@@/\/\/ AUTOMATICALLY GENERATED FILE - DO NOT EDIT/' \

View File

@@ -99,6 +99,13 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ADLCFLAGS += -DAIX=1
else ifeq ($(OPENJDK_TARGET_OS), macosx)
ADLCFLAGS += -D_ALLBSD_SOURCE=1 -D_GNU_SOURCE=1
else ifeq ($(OPENJDK_TARGET_OS), windows)
ifeq ($(call isTargetCpuBits, 64), true)
ADLCFLAGS += -D_WIN64=1
endif
ifeq ($(HOTSPOT_TARGET_CPU_ARCH), aarch64)
ADLCFLAGS += -DR18_RESERVED=1
endif
endif
ifneq ($(OPENJDK_TARGET_OS), windows)

View File

@@ -78,6 +78,8 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
DISABLED_WARNINGS_clang := undef switch format-nonliteral \
tautological-undefined-compare $(BUILD_LIBJVM_DISABLED_WARNINGS_clang), \
DISABLED_WARNINGS_solstudio := identexpected, \
DISABLED_WARNINGS_microsoft := 4100 4127 4146 4201 4244 4291 4351 \
4511 4512 4514 4624 4996, \
LDFLAGS := $(JVM_LDFLAGS), \
LDFLAGS_solaris := -library=stlport4 $(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(JVM_LIBS), \

View File

@@ -164,6 +164,8 @@ $(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
DISABLED_WARNINGS_solstudio := $(DISABLED_WARNINGS_solstudio), \
DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
1540-1088 1500-010, \
DISABLED_WARNINGS_microsoft := 4100 4127 4146 4201 4244 4291 4351 \
4511 4512 4514 4624 4996, \
ASFLAGS := $(JVM_ASFLAGS), \
LDFLAGS := $(JVM_LDFLAGS), \
LIBS := $(JVM_LIBS), \

View File

@@ -110,8 +110,8 @@ reg_def R16 ( SOC, SOC, Op_RegI, 16, r16->as_VMReg() );
reg_def R16_H ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
reg_def R17 ( SOC, SOC, Op_RegI, 17, r17->as_VMReg() );
reg_def R17_H ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
reg_def R18 ( SOC, SOC, Op_RegI, 18, r18->as_VMReg() );
reg_def R18_H ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
reg_def R18 ( SOC, SOC, Op_RegI, 18, r18_reserved->as_VMReg() );
reg_def R18_H ( SOC, SOC, Op_RegI, 18, r18_reserved->as_VMReg()->next());
reg_def R19 ( SOC, SOE, Op_RegI, 19, r19->as_VMReg() );
reg_def R19_H ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
reg_def R20 ( SOC, SOE, Op_RegI, 20, r20->as_VMReg() ); // caller esp
@@ -352,7 +352,6 @@ alloc_class chunk0(
R15, R15_H,
R16, R16_H,
R17, R17_H,
R18, R18_H,
// arg registers
R0, R0_H,
@@ -375,7 +374,7 @@ alloc_class chunk0(
R26, R26_H,
// non-allocatable registers
R18, R18_H, // platform
R27, R27_H, // heapbase
R28, R28_H, // thread
R29, R29_H, // fp
@@ -533,7 +532,8 @@ reg_class no_special_reg32_no_fp(
R15,
R16,
R17,
#ifdef __APPLE__
#ifndef R18_RESERVED
// See comment in register_aarch64.hpp
R18,
#endif
R19,
@@ -568,7 +568,8 @@ reg_class no_special_reg32_with_fp(
R15,
R16,
R17,
#ifdef __APPLE__
#ifndef R18_RESERVED
// See comment in register_aarch64.hpp
R18,
#endif
R19,
@@ -606,7 +607,8 @@ reg_class no_special_reg_no_fp(
R15, R15_H,
R16, R16_H,
R17, R17_H,
#ifdef __APPLE__
#ifndef R18_RESERVED
// See comment in register_aarch64.hpp
R18, R18_H,
#endif
R19, R19_H,
@@ -641,7 +643,8 @@ reg_class no_special_reg_with_fp(
R15, R15_H,
R16, R16_H,
R17, R17_H,
#ifdef __APPLE__
#ifndef R18_RESERVED
// See comment in register_aarch64.hpp
R18, R18_H,
#endif
R19, R19_H,
@@ -783,7 +786,10 @@ reg_class no_special_ptr_reg(
R15, R15_H,
R16, R16_H,
R17, R17_H,
#ifndef R18_RESERVED
// See comment in register_aarch64.hpp
R18, R18_H,
#endif
R19, R19_H,
R20, R20_H,
R21, R21_H,
@@ -2048,11 +2054,6 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
const bool Matcher::match_rule_supported(int opcode) {
switch (opcode) {
default:
break;
}
if (!has_match_rule(opcode)) {
return false;
}
@@ -2975,7 +2976,7 @@ encode %{
enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
MacroAssembler _masm(&cbuf);
u_int32_t con = (u_int32_t)$src$$constant;
uint32_t con = (uint32_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
if (con == 0) {
__ movw(dst_reg, zr);
@@ -2987,7 +2988,7 @@ encode %{
enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
u_int64_t con = (u_int64_t)$src$$constant;
uint64_t con = (uint64_t)$src$$constant;
if (con == 0) {
__ mov(dst_reg, zr);
} else {
@@ -3012,7 +3013,7 @@ encode %{
if (con < (address)(uintptr_t)os::vm_page_size()) {
__ mov(dst_reg, con);
} else {
uint64_t offset;
uintptr_t offset;
__ adrp(dst_reg, con, offset);
__ add(dst_reg, dst_reg, offset);
}
@@ -3029,7 +3030,7 @@ encode %{
enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, (u_int64_t)1);
__ mov(dst_reg, (uint64_t)1);
%}
enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
@@ -3163,7 +3164,7 @@ encode %{
enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
u_int32_t val = (u_int32_t)$src2$$constant;
uint32_t val = (uint32_t)$src2$$constant;
__ movw(rscratch1, val);
__ cmpw(reg1, rscratch1);
%}
@@ -3185,7 +3186,7 @@ encode %{
__ adds(zr, reg, -val);
} else {
// aargh, Long.MIN_VALUE is a special case
__ orr(rscratch1, zr, (u_int64_t)val);
__ orr(rscratch1, zr, (uint64_t)val);
__ subs(zr, reg, rscratch1);
}
%}
@@ -3193,7 +3194,7 @@ encode %{
enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
u_int64_t val = (u_int64_t)$src2$$constant;
uint64_t val = (uint64_t)$src2$$constant;
__ mov(rscratch1, val);
__ cmp(reg1, rscratch1);
%}
@@ -5411,7 +5412,7 @@ pipeline %{
attributes %{
// ARM instructions are of fixed length
fixed_size_instructions; // Fixed size instructions TODO does
max_instructions_per_bundle = 2; // A53 = 2, A57 = 4
max_instructions_per_bundle = 4; // A53 = 2, A57 = 4
// ARM instructions come in 32-bit word units
instruction_unit_size = 4; // An instruction is 4 bytes long
instruction_fetch_unit_size = 64; // The processor fetches one line
@@ -9652,7 +9653,7 @@ instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
ins_encode %{
__ sbfiz(as_Register($dst$$reg),
as_Register($src$$reg),
$scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
$scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
%}
ins_pipe(ialu_reg_shift);
@@ -13519,8 +13520,8 @@ instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlag
instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
%{
predicate((u_int64_t)n->in(2)->get_long()
< (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
predicate((uint64_t)n->in(2)->get_long()
< (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
match(Set dummy (ClearArray cnt base));
effect(USE_KILL base);
@@ -13528,7 +13529,7 @@ instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg
format %{ "ClearArray $cnt, $base" %}
ins_encode %{
__ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
__ zero_words($base$$Register, (uint64_t)$cnt$$constant);
%}
ins_pipe(pipe_class_memory);

View File

@@ -1247,7 +1247,7 @@ void Address::lea(MacroAssembler *as, Register r) const {
break;
}
case base_plus_offset_reg: {
__ add(r, _base, _index, _ext.op(), MAX(_ext.shift(), 0));
__ add(r, _base, _index, _ext.op(), MAX2(_ext.shift(), 0));
break;
}
case literal: {
@@ -1513,3 +1513,7 @@ static float unpack(unsigned value) {
ival = fp_immediate_for_encoding(value, 0);
return val;
}
address Assembler::locate_next_instruction(address inst) {
return inst + Assembler::instruction_size;
}

View File

@@ -28,6 +28,19 @@
#include "asm/register.hpp"
#ifdef __GNUC__
// __nop needs volatile so that compiler doesn't optimize it away
#define NOP() asm volatile ("nop");
#elif defined(_MSC_VER)
// Use MSVC instrinsic: https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019#I
#define NOP() __nop();
#endif
// definitions of various symbolic names for machine registers
// First intercalls between C and Java which use 8 general registers
@@ -201,7 +214,7 @@ public:
static void patch(address a, int msb, int lsb, uint64_t val) {
int nbits = msb - lsb + 1;
guarantee(val < (1U << nbits), "Field too big for insn");
guarantee(val < (1ULL << nbits), "Field too big for insn");
assert_cond(msb >= lsb);
unsigned mask = checked_cast<unsigned>(right_n_bits(nbits));
val <<= lsb;
@@ -421,8 +434,8 @@ class Address {
}
Register base() const {
guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg
| _mode == post | _mode == post_reg),
guarantee((_mode == base_plus_offset || _mode == base_plus_offset_reg
|| _mode == post || _mode == post_reg),
"wrong mode");
return _base;
}
@@ -614,7 +627,7 @@ class Assembler : public AbstractAssembler {
void emit_long(jint x) {
if ((uintptr_t)pc() == asm_bp)
asm volatile ("nop");
NOP();
AbstractAssembler::emit_int32(x);
}
#else
@@ -646,6 +659,8 @@ public:
return Address(Post(base, idx));
}
static address locate_next_instruction(address inst);
Instruction_aarch64* current;
void set_current(Instruction_aarch64* i) { current = i; }
@@ -1511,6 +1526,11 @@ public:
#undef INSN
#ifdef _WIN64
// In MSVC, `mvn` is defined as a macro and it affects compilation
#undef mvn
#endif
// Aliases for short forms of orn
void mvn(Register Rd, Register Rm,
enum shift_kind kind = LSL, unsigned shift = 0) {

View File

@@ -32,7 +32,7 @@
inline bool Address::offset_ok_for_immed(int64_t offset, uint shift) {
uint mask = (1 << shift) - 1;
unsigned mask = (1 << shift) - 1;
if (offset < 0 || (offset & mask) != 0) {
// Unscaled signed offset, encoded in a signed imm9 field.
return Assembler::is_simm9(offset);

View File

@@ -44,13 +44,13 @@ enum {
pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_caller_save_cpu_regs_frame_map = 19 - 2 BSD_ONLY(- 1 /* r18 */), // number of registers killed by calls
pd_nof_caller_save_cpu_regs_frame_map = 19 - 2 /* rscratch1 and rscratch2 */ R18_RESERVED_ONLY(- 1), // number of registers killed by calls
pd_nof_caller_save_fpu_regs_frame_map = 32, // number of registers killed by calls
pd_first_callee_saved_reg = 19 - 2 BSD_ONLY(- 1 /* r18 */),
pd_last_callee_saved_reg = 26 - 2 BSD_ONLY(- 1 /* r18 */),
pd_first_callee_saved_reg = 19 - 2 /* rscratch1 and rscratch2 */ R18_RESERVED_ONLY(- 1),
pd_last_callee_saved_reg = 26 - 2 /* rscratch1 and rscratch2 */ R18_RESERVED_ONLY(- 1),
pd_last_allocatable_cpu_reg = 16 BSD_ONLY(- 1 /* r18 */),
pd_last_allocatable_cpu_reg = 16 R18_RESERVED_ONLY(- 1),
pd_nof_cpu_regs_reg_alloc
= pd_last_allocatable_cpu_reg + 1, // number of registers that are visible to register allocator
@@ -60,9 +60,9 @@ enum {
pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan
pd_nof_xmm_regs_linearscan = 0, // like sparc we don't have any of these
pd_first_cpu_reg = 0,
pd_last_cpu_reg = 16 BSD_ONLY(- 1 /* r18 */),
pd_last_cpu_reg = 16 R18_RESERVED_ONLY(- 1),
pd_first_byte_reg = 0,
pd_last_byte_reg = 16 BSD_ONLY(- 1 /* r18 */),
pd_last_byte_reg = 16 R18_RESERVED_ONLY(- 1),
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
pd_last_fpu_reg = pd_first_fpu_reg + 31,

View File

@@ -28,3 +28,4 @@
//--------------------------------------------------------
// No FPU stack on AARCH64
#include "precompiled.hpp"

View File

@@ -181,8 +181,9 @@ void FrameMap::initialize() {
map_register(i, r15); r15_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r16); r16_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r17); r17_opr = LIR_OprFact::single_cpu(i); i++;
#ifndef __APPLE__
map_register(i, r18); r18_opr = LIR_OprFact::single_cpu(i); i++;
#ifndef R18_RESERVED
// See comment in register_aarch64.hpp
map_register(i, r18_reserved); r18_opr = LIR_OprFact::single_cpu(i); i++;
#endif
map_register(i, r19); r19_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r20); r20_opr = LIR_OprFact::single_cpu(i); i++;
@@ -201,8 +202,9 @@ void FrameMap::initialize() {
map_register(i, r8); r8_opr = LIR_OprFact::single_cpu(i); i++; // rscratch1
map_register(i, r9); r9_opr = LIR_OprFact::single_cpu(i); i++; // rscratch2
#ifdef __APPLE__
map_register(i, r18); r18_opr = LIR_OprFact::single_cpu(i); i++; // rscratch2
#ifdef R18_RESERVED
// See comment in register_aarch64.hpp
map_register(i, r18_reserved); r18_opr = LIR_OprFact::single_cpu(i); i++;
#endif
rscratch1_opr = r8_opr;
@@ -233,7 +235,8 @@ void FrameMap::initialize() {
_caller_save_cpu_regs[13] = r15_opr;
_caller_save_cpu_regs[14] = r16_opr;
_caller_save_cpu_regs[15] = r17_opr;
#ifndef __APPLE__
#ifndef R18_RESERVED
// See comment in register_aarch64.hpp
_caller_save_cpu_regs[16] = r18_opr;
#endif
@@ -261,7 +264,7 @@ void FrameMap::initialize() {
r15_oop_opr = as_oop_opr(r15);
r16_oop_opr = as_oop_opr(r16);
r17_oop_opr = as_oop_opr(r17);
r18_oop_opr = as_oop_opr(r18);
r18_oop_opr = as_oop_opr(r18_reserved);
r19_oop_opr = as_oop_opr(r19);
r20_oop_opr = as_oop_opr(r20);
r21_oop_opr = as_oop_opr(r21);

View File

@@ -2042,7 +2042,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
} else if (code == lir_cmp_l2i) {
Label done;
__ cmp(left->as_register_lo(), right->as_register_lo());
__ mov(dst->as_register(), (u_int64_t)-1L);
__ mov(dst->as_register(), (uint64_t)-1L);
__ br(Assembler::LT, done);
__ csinc(dst->as_register(), zr, zr, Assembler::EQ);
__ bind(done);
@@ -2314,7 +2314,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int elem_size = type2aelembytes(basic_type);
int shift_amount;
int scale = exact_log2(elem_size);
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());

View File

@@ -290,7 +290,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
}
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
if (is_power_of_2(c - 1)) {
__ shift_left(left, exact_log2(c - 1), tmp);

View File

@@ -670,17 +670,18 @@ intptr_t* frame::real_fp() const {
#undef DESCRIBE_FP_OFFSET
#define DESCRIBE_FP_OFFSET(name) \
{ \
uintptr_t *p = (uintptr_t *)fp; \
printf("0x%016lx 0x%016lx %s\n", (uintptr_t)(p + frame::name##_offset), \
p[frame::name##_offset], #name); \
#define DESCRIBE_FP_OFFSET(name) \
{ \
uintptr_t *p = (uintptr_t *)fp; \
printf(INTPTR_FORMAT " " INTPTR_FORMAT " %s\n", \
(uintptr_t)(p + frame::name##_offset), \
p[frame::name##_offset], #name); \
}
static __thread uintptr_t nextfp;
static __thread uintptr_t nextpc;
static __thread uintptr_t nextsp;
static __thread RegisterMap *reg_map;
static THREAD_LOCAL uintptr_t nextfp;
static THREAD_LOCAL uintptr_t nextpc;
static THREAD_LOCAL uintptr_t nextsp;
static THREAD_LOCAL RegisterMap *reg_map;
static void printbc(Method *m, intptr_t bcx) {
const char *name;

View File

@@ -53,4 +53,13 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define THREAD_LOCAL_POLL
#if defined(_WIN64)
#define R18_RESERVED
#define R18_RESERVED_ONLY(code) code
#define NOT_R18_RESERVED(code)
#else
#define R18_RESERVED_ONLY(code)
#define NOT_R18_RESERVED(code) code
#endif
#endif // CPU_AARCH64_VM_GLOBALDEFINITIONS_AARCH64_HPP

View File

@@ -26,19 +26,6 @@
#ifndef CPU_AARCH64_VM_ICACHE_AARCH64_HPP
#define CPU_AARCH64_VM_ICACHE_AARCH64_HPP
// Interface for updating the instruction cache. Whenever the VM
// modifies code, part of the processor instruction cache potentially
// has to be flushed.
class ICache : public AbstractICache {
public:
static void initialize();
static void invalidate_word(address addr) {
__clear_cache((char *)addr, (char *)(addr + 3));
}
static void invalidate_range(address start, int nbytes) {
__clear_cache((char *)start, (char *)(start + nbytes));
}
};
#include OS_CPU_HEADER(icache)
#endif // CPU_AARCH64_VM_ICACHE_AARCH64_HPP

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,10 @@
*/
#include <stdlib.h>
#include <stdint.h>
#include "precompiled.hpp"
#include "utilities/globalDefinitions.hpp"
#include "immediate_aarch64.hpp"
// there are at most 2^13 possible logical immediate encodings
@@ -34,14 +38,14 @@ static int li_table_entry_count;
// for forward lookup we just use a direct array lookup
// and assume that the cient has supplied a valid encoding
// table[encoding] = immediate
static u_int64_t LITable[LI_TABLE_SIZE];
static uint64_t LITable[LI_TABLE_SIZE];
// for reverse lookup we need a sparse map so we store a table of
// immediate and encoding pairs sorted by immediate value
struct li_pair {
u_int64_t immediate;
u_int32_t encoding;
uint64_t immediate;
uint32_t encoding;
};
static struct li_pair InverseLITable[LI_TABLE_SIZE];
@@ -63,9 +67,9 @@ int compare_immediate_pair(const void *i1, const void *i2)
// helper functions used by expandLogicalImmediate
// for i = 1, ... N result<i-1> = 1 other bits are zero
static inline u_int64_t ones(int N)
static inline uint64_t ones(int N)
{
return (N == 64 ? (u_int64_t)-1UL : ((1UL << N) - 1));
return (N == 64 ? -1ULL : (1ULL << N) - 1);
}
/*
@@ -73,49 +77,49 @@ static inline u_int64_t ones(int N)
*/
// 32 bit mask with bits [hi,...,lo] set
static inline u_int32_t mask32(int hi = 31, int lo = 0)
static inline uint32_t mask32(int hi = 31, int lo = 0)
{
int nbits = (hi + 1) - lo;
return ((1 << nbits) - 1) << lo;
}
static inline u_int64_t mask64(int hi = 63, int lo = 0)
static inline uint64_t mask64(int hi = 63, int lo = 0)
{
int nbits = (hi + 1) - lo;
return ((1L << nbits) - 1) << lo;
}
// pick bits [hi,...,lo] from val
static inline u_int32_t pick32(u_int32_t val, int hi = 31, int lo = 0)
static inline uint32_t pick32(uint32_t val, int hi = 31, int lo = 0)
{
return (val & mask32(hi, lo));
}
// pick bits [hi,...,lo] from val
static inline u_int64_t pick64(u_int64_t val, int hi = 31, int lo = 0)
static inline uint64_t pick64(uint64_t val, int hi = 31, int lo = 0)
{
return (val & mask64(hi, lo));
}
// mask [hi,lo] and shift down to start at bit 0
static inline u_int32_t pickbits32(u_int32_t val, int hi = 31, int lo = 0)
static inline uint32_t pickbits32(uint32_t val, int hi = 31, int lo = 0)
{
return (pick32(val, hi, lo) >> lo);
}
// mask [hi,lo] and shift down to start at bit 0
static inline u_int64_t pickbits64(u_int64_t val, int hi = 63, int lo = 0)
static inline uint64_t pickbits64(uint64_t val, int hi = 63, int lo = 0)
{
return (pick64(val, hi, lo) >> lo);
}
// result<0> to val<N>
static inline u_int64_t pickbit(u_int64_t val, int N)
static inline uint64_t pickbit(uint64_t val, int N)
{
return pickbits64(val, N, N);
}
static inline u_int32_t uimm(u_int32_t val, int hi, int lo)
static inline uint32_t uimm(uint32_t val, int hi, int lo)
{
return pickbits32(val, hi, lo);
}
@@ -123,11 +127,11 @@ static inline u_int32_t uimm(u_int32_t val, int hi, int lo)
// SPEC bits(M*N) Replicate(bits(M) x, integer N);
// this is just an educated guess
u_int64_t replicate(u_int64_t bits, int nbits, int count)
uint64_t replicate(uint64_t bits, int nbits, int count)
{
u_int64_t result = 0;
uint64_t result = 0;
// nbits may be 64 in which case we want mask to be -1
u_int64_t mask = ones(nbits);
uint64_t mask = ones(nbits);
for (int i = 0; i < count ; i++) {
result <<= nbits;
result |= (bits & mask);
@@ -140,24 +144,24 @@ u_int64_t replicate(u_int64_t bits, int nbits, int count)
// encoding must be treated as an UNALLOC instruction
// construct a 32 bit immediate value for a logical immediate operation
int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
u_int32_t imms, u_int64_t &bimm)
int expandLogicalImmediate(uint32_t immN, uint32_t immr,
uint32_t imms, uint64_t &bimm)
{
int len; // ought to be <= 6
u_int32_t levels; // 6 bits
u_int32_t tmask_and; // 6 bits
u_int32_t wmask_and; // 6 bits
u_int32_t tmask_or; // 6 bits
u_int32_t wmask_or; // 6 bits
u_int64_t imm64; // 64 bits
u_int64_t tmask, wmask; // 64 bits
u_int32_t S, R, diff; // 6 bits?
int len; // ought to be <= 6
uint32_t levels; // 6 bits
uint32_t tmask_and; // 6 bits
uint32_t wmask_and; // 6 bits
uint32_t tmask_or; // 6 bits
uint32_t wmask_or; // 6 bits
uint64_t imm64; // 64 bits
uint64_t tmask, wmask; // 64 bits
uint32_t S, R, diff; // 6 bits?
if (immN == 1) {
len = 6; // looks like 7 given the spec above but this cannot be!
} else {
len = 0;
u_int32_t val = (~imms & 0x3f);
uint32_t val = (~imms & 0x3f);
for (int i = 5; i > 0; i--) {
if (val & (1 << i)) {
len = i;
@@ -170,7 +174,7 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
// for valid inputs leading 1s in immr must be less than leading
// zeros in imms
int len2 = 0; // ought to be < len
u_int32_t val2 = (~immr & 0x3f);
uint32_t val2 = (~immr & 0x3f);
for (int i = 5; i > 0; i--) {
if (!(val2 & (1 << i))) {
len2 = i;
@@ -199,12 +203,12 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
for (int i = 0; i < 6; i++) {
int nbits = 1 << i;
u_int64_t and_bit = pickbit(tmask_and, i);
u_int64_t or_bit = pickbit(tmask_or, i);
u_int64_t and_bits_sub = replicate(and_bit, 1, nbits);
u_int64_t or_bits_sub = replicate(or_bit, 1, nbits);
u_int64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits);
u_int64_t or_bits_top = (0 << nbits) | or_bits_sub;
uint64_t and_bit = pickbit(tmask_and, i);
uint64_t or_bit = pickbit(tmask_or, i);
uint64_t and_bits_sub = replicate(and_bit, 1, nbits);
uint64_t or_bits_sub = replicate(or_bit, 1, nbits);
uint64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits);
uint64_t or_bits_top = (0 << nbits) | or_bits_sub;
tmask = ((tmask
& (replicate(and_bits_top, 2 * nbits, 32 / nbits)))
@@ -218,12 +222,12 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
for (int i = 0; i < 6; i++) {
int nbits = 1 << i;
u_int64_t and_bit = pickbit(wmask_and, i);
u_int64_t or_bit = pickbit(wmask_or, i);
u_int64_t and_bits_sub = replicate(and_bit, 1, nbits);
u_int64_t or_bits_sub = replicate(or_bit, 1, nbits);
u_int64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub;
u_int64_t or_bits_top = (or_bits_sub << nbits) | 0;
uint64_t and_bit = pickbit(wmask_and, i);
uint64_t or_bit = pickbit(wmask_or, i);
uint64_t and_bits_sub = replicate(and_bit, 1, nbits);
uint64_t or_bits_sub = replicate(or_bit, 1, nbits);
uint64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub;
uint64_t or_bits_top = (or_bits_sub << nbits) | 0;
wmask = ((wmask
& (replicate(and_bits_top, 2 * nbits, 32 / nbits)))
@@ -243,14 +247,17 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr,
// constructor to initialise the lookup tables
static void initLITables() __attribute__ ((constructor));
static void initLITables();
// Use an empty struct with a construtor as MSVC doesn't support `__attribute__ ((constructor))`
// See https://stackoverflow.com/questions/1113409/attribute-constructor-equivalent-in-vc
static struct initLITables_t { initLITables_t(void) { initLITables(); } } _initLITables;
static void initLITables()
{
li_table_entry_count = 0;
for (unsigned index = 0; index < LI_TABLE_SIZE; index++) {
u_int32_t N = uimm(index, 12, 12);
u_int32_t immr = uimm(index, 11, 6);
u_int32_t imms = uimm(index, 5, 0);
uint32_t N = uimm(index, 12, 12);
uint32_t immr = uimm(index, 11, 6);
uint32_t imms = uimm(index, 5, 0);
if (expandLogicalImmediate(N, immr, imms, LITable[index])) {
InverseLITable[li_table_entry_count].immediate = LITable[index];
InverseLITable[li_table_entry_count].encoding = index;
@@ -264,12 +271,12 @@ static void initLITables()
// public APIs provided for logical immediate lookup and reverse lookup
u_int64_t logical_immediate_for_encoding(u_int32_t encoding)
uint64_t logical_immediate_for_encoding(uint32_t encoding)
{
return LITable[encoding];
}
u_int32_t encoding_for_logical_immediate(u_int64_t immediate)
uint32_t encoding_for_logical_immediate(uint64_t immediate)
{
struct li_pair pair;
struct li_pair *result;
@@ -293,15 +300,15 @@ u_int32_t encoding_for_logical_immediate(u_int64_t immediate)
// fpimm[3:0] = fraction (assuming leading 1)
// i.e. F = s * 1.f * 2^(e - b)
u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp)
uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp)
{
union {
float fpval;
double dpval;
u_int64_t val;
uint64_t val;
};
u_int32_t s, e, f;
uint32_t s, e, f;
s = (imm8 >> 7 ) & 0x1;
e = (imm8 >> 4) & 0x7;
f = imm8 & 0xf;
@@ -329,7 +336,7 @@ u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp)
return val;
}
u_int32_t encoding_for_fp_immediate(float immediate)
uint32_t encoding_for_fp_immediate(float immediate)
{
// given a float which is of the form
//
@@ -341,10 +348,10 @@ u_int32_t encoding_for_fp_immediate(float immediate)
union {
float fpval;
u_int32_t val;
uint32_t val;
};
fpval = immediate;
u_int32_t s, r, f, res;
uint32_t s, r, f, res;
// sign bit is 31
s = (val >> 31) & 0x1;
// exponent is bits 30-23 but we only want the bottom 3 bits

View File

@@ -46,9 +46,9 @@
* encoding then a map lookup will return 0xffffffff.
*/
u_int64_t logical_immediate_for_encoding(u_int32_t encoding);
u_int32_t encoding_for_logical_immediate(u_int64_t immediate);
u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp);
u_int32_t encoding_for_fp_immediate(float immediate);
uint64_t logical_immediate_for_encoding(uint32_t encoding);
uint32_t encoding_for_logical_immediate(uint64_t immediate);
uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp);
uint32_t encoding_for_fp_immediate(float immediate);
#endif // _IMMEDIATE_H

View File

@@ -21,6 +21,7 @@
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/jvmciRuntime.hpp"

View File

@@ -40,16 +40,21 @@
#include "oops/accessDecorators.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.hpp"
#include "opto/compile.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/node.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#endif
#ifdef COMPILER2
#include "oops/oop.hpp"
#include "opto/compile.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/node.hpp"
#endif
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -88,7 +93,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
offset = target-branch;
int shift = Instruction_aarch64::extract(insn, 31, 31);
if (shift) {
u_int64_t dest = (u_int64_t)target;
uint64_t dest = (uint64_t)target;
uint64_t pc_page = (uint64_t)branch >> 12;
uint64_t adr_page = (uint64_t)target >> 12;
unsigned offset_lo = dest & 0xfff;
@@ -141,7 +146,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
Instruction_aarch64::spatch(branch, 23, 5, offset);
Instruction_aarch64::patch(branch, 30, 29, offset_lo);
} else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) {
u_int64_t dest = (u_int64_t)target;
uint64_t dest = (uint64_t)target;
// Move wide constant
assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
@@ -267,13 +272,13 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
ShouldNotReachHere();
}
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
u_int32_t *insns = (u_int32_t *)insn_addr;
uint32_t *insns = (uint32_t *)insn_addr;
// Move wide constant: movz, movk, movk. See movptr().
assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
+ (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
+ (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
+ (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
+ (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
return 0;
@@ -735,12 +740,15 @@ address MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
// We need a trampoline if branches are far.
if (far_branches()) {
bool in_scratch_emit_size = false;
#ifdef COMPILER2
// We don't want to emit a trampoline if C2 is generating dummy
// code during its branch shortening phase.
CompileTask* task = ciEnv::current()->task();
bool in_scratch_emit_size =
in_scratch_emit_size =
(task != NULL && is_c2_compile(task->comp_level()) &&
Compile::current()->in_scratch_emit_size());
#endif
if (!in_scratch_emit_size) {
address stub = emit_trampoline_stub(offset(), entry.target());
if (stub == NULL) {
@@ -774,7 +782,9 @@ address MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
address dest) {
address stub = start_a_stub(Compile::MAX_stubs_size/2);
// Max stub size: alignment nop, TrampolineStub.
address stub = start_a_stub(NativeInstruction::instruction_size
+ NativeCallTrampolineStub::instruction_size);
if (stub == NULL) {
return NULL; // CodeBuffer::expand failed
}
@@ -1468,7 +1478,7 @@ void MacroAssembler::null_check(Register reg, int offset) {
void MacroAssembler::mov(Register r, Address dest) {
code_section()->relocate(pc(), dest.rspec());
u_int64_t imm64 = (u_int64_t)dest.target();
uint64_t imm64 = (uint64_t)dest.target();
movptr(r, imm64);
}
@@ -1480,7 +1490,7 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) {
#ifndef PRODUCT
{
char buffer[64];
snprintf(buffer, sizeof(buffer), "0x%"PRIX64, (uint64_t)imm64);
snprintf(buffer, sizeof(buffer), PTR64_FORMAT, imm64);
block_comment(buffer);
}
#endif
@@ -1501,20 +1511,20 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) {
// imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh
// imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
// T1D/T2D: invalid
void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) {
void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32) {
assert(T != T1D && T != T2D, "invalid arrangement");
if (T == T8B || T == T16B) {
assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)");
movi(Vd, T, imm32 & 0xff, 0);
return;
}
u_int32_t nimm32 = ~imm32;
uint32_t nimm32 = ~imm32;
if (T == T4H || T == T8H) {
assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)");
imm32 &= 0xffff;
nimm32 &= 0xffff;
}
u_int32_t x = imm32;
uint32_t x = imm32;
int movi_cnt = 0;
int movn_cnt = 0;
while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
@@ -1538,12 +1548,12 @@ void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32)
}
}
void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
{
#ifndef PRODUCT
{
char buffer[64];
snprintf(buffer, sizeof(buffer), "0x%"PRIX64, (uint64_t)imm64);
snprintf(buffer, sizeof(buffer), PTR64_FORMAT, imm64);
block_comment(buffer);
}
#endif
@@ -1552,7 +1562,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
} else {
// we can use a combination of MOVZ or MOVN with
// MOVK to build up the constant
u_int64_t imm_h[4];
uint64_t imm_h[4];
int zero_count = 0;
int neg_count = 0;
int i;
@@ -1573,7 +1583,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
} else if (zero_count == 3) {
for (i = 0; i < 4; i++) {
if (imm_h[i] != 0L) {
movz(dst, (u_int32_t)imm_h[i], (i << 4));
movz(dst, (uint32_t)imm_h[i], (i << 4));
break;
}
}
@@ -1581,7 +1591,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
// one MOVN will do
for (int i = 0; i < 4; i++) {
if (imm_h[i] != 0xffffL) {
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
break;
}
}
@@ -1589,74 +1599,74 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
// one MOVZ and one MOVK will do
for (i = 0; i < 3; i++) {
if (imm_h[i] != 0L) {
movz(dst, (u_int32_t)imm_h[i], (i << 4));
movz(dst, (uint32_t)imm_h[i], (i << 4));
i++;
break;
}
}
for (;i < 4; i++) {
if (imm_h[i] != 0L) {
movk(dst, (u_int32_t)imm_h[i], (i << 4));
movk(dst, (uint32_t)imm_h[i], (i << 4));
}
}
} else if (neg_count == 2) {
// one MOVN and one MOVK will do
for (i = 0; i < 4; i++) {
if (imm_h[i] != 0xffffL) {
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
i++;
break;
}
}
for (;i < 4; i++) {
if (imm_h[i] != 0xffffL) {
movk(dst, (u_int32_t)imm_h[i], (i << 4));
movk(dst, (uint32_t)imm_h[i], (i << 4));
}
}
} else if (zero_count == 1) {
// one MOVZ and two MOVKs will do
for (i = 0; i < 4; i++) {
if (imm_h[i] != 0L) {
movz(dst, (u_int32_t)imm_h[i], (i << 4));
movz(dst, (uint32_t)imm_h[i], (i << 4));
i++;
break;
}
}
for (;i < 4; i++) {
if (imm_h[i] != 0x0L) {
movk(dst, (u_int32_t)imm_h[i], (i << 4));
movk(dst, (uint32_t)imm_h[i], (i << 4));
}
}
} else if (neg_count == 1) {
// one MOVN and two MOVKs will do
for (i = 0; i < 4; i++) {
if (imm_h[i] != 0xffffL) {
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4));
i++;
break;
}
}
for (;i < 4; i++) {
if (imm_h[i] != 0xffffL) {
movk(dst, (u_int32_t)imm_h[i], (i << 4));
movk(dst, (uint32_t)imm_h[i], (i << 4));
}
}
} else {
// use a MOVZ and 3 MOVKs (makes it easier to debug)
movz(dst, (u_int32_t)imm_h[0], 0);
movz(dst, (uint32_t)imm_h[0], 0);
for (i = 1; i < 4; i++) {
movk(dst, (u_int32_t)imm_h[i], (i << 4));
movk(dst, (uint32_t)imm_h[i], (i << 4));
}
}
}
}
void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32)
void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
{
#ifndef PRODUCT
{
char buffer[64];
snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32);
snprintf(buffer, sizeof(buffer), PTR32_FORMAT, imm32);
block_comment(buffer);
}
#endif
@@ -1665,7 +1675,7 @@ void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32)
} else {
// we can use MOVZ, MOVN or two calls to MOVK to build up the
// constant
u_int32_t imm_h[2];
uint32_t imm_h[2];
imm_h[0] = imm32 & 0xffff;
imm_h[1] = ((imm32 >> 16) & 0xffff);
if (imm_h[0] == 0) {
@@ -1705,7 +1715,7 @@ Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_of
{
uint64_t word_offset = byte_offset >> shift;
uint64_t masked_offset = word_offset & 0xfff000;
if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
if (Address::offset_ok_for_immed(word_offset - masked_offset)
&& Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
add(Rd, base, masked_offset << shift);
word_offset -= masked_offset;
@@ -1820,7 +1830,7 @@ bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size
return true;
} else {
assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
const unsigned mask = size_in_bytes - 1;
const uint64_t mask = size_in_bytes - 1;
if (adr.getMode() == Address::base_plus_offset &&
(adr.offset() & mask) == 0) { // only supports base_plus_offset.
code()->set_last_insn(pc());
@@ -2530,9 +2540,17 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
}
}
RegSet MacroAssembler::call_clobbered_registers() {
RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2);
#ifndef R18_RESERVED
regs += r18_reserved;
#endif
return regs;
}
void MacroAssembler::push_call_clobbered_registers() {
int step = 4 * wordSize;
push(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2) BSD_ONLY(- r18), sp);
push(call_clobbered_registers() - RegSet::of(rscratch1, rscratch2), sp);
sub(sp, sp, step);
mov(rscratch1, -step);
// Push v0-v7, v16-v31.
@@ -2552,7 +2570,7 @@ void MacroAssembler::pop_call_clobbered_registers() {
as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize)));
}
pop(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2) BSD_ONLY(- r18), sp);
pop(call_clobbered_registers() - RegSet::of(rscratch1, rscratch2), sp);
}
void MacroAssembler::push_CPU_state(bool save_vectors) {
@@ -2738,7 +2756,7 @@ void MacroAssembler::merge_ldst(Register rt,
// Overwrite previous generated binary.
code_section()->set_end(prev);
const int sz = prev_ldst->size_in_bytes();
const size_t sz = prev_ldst->size_in_bytes();
assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
if (!is_store) {
BLOCK_COMMENT("merged ldr pair");
@@ -4277,6 +4295,7 @@ void MacroAssembler::remove_frame(int framesize) {
}
}
#ifdef COMPILER2
typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
// Search for str1 in str2 and return index or -1
@@ -4782,7 +4801,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result, Register tmp1, Register tmp2,
FloatRegister vtmp1, FloatRegister vtmp2, FloatRegister vtmp3, int ae) {
Label DONE, SHORT_LOOP, SHORT_STRING, SHORT_LAST, TAIL, STUB,
DIFFERENCE, NEXT_WORD, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT,
DIFF, NEXT_WORD, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT,
SHORT_LOOP_START, TAIL_CHECK;
const int STUB_THRESHOLD = 64 + 8;
@@ -4869,7 +4888,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
adds(cnt2, cnt2, isUL ? 4 : 8);
br(GE, TAIL);
eor(rscratch2, tmp1, tmp2);
cbnz(rscratch2, DIFFERENCE);
cbnz(rscratch2, DIFF);
// main loop
bind(NEXT_WORD);
if (str1_isL == str2_isL) {
@@ -4895,10 +4914,10 @@ void MacroAssembler::string_compare(Register str1, Register str2,
eor(rscratch2, tmp1, tmp2);
cbz(rscratch2, NEXT_WORD);
b(DIFFERENCE);
b(DIFF);
bind(TAIL);
eor(rscratch2, tmp1, tmp2);
cbnz(rscratch2, DIFFERENCE);
cbnz(rscratch2, DIFF);
// Last longword. In the case where length == 4 we compare the
// same longword twice, but that's still faster than another
// conditional branch.
@@ -4922,7 +4941,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// Find the first different characters in the longwords and
// compute their difference.
bind(DIFFERENCE);
bind(DIFF);
rev(rscratch2, rscratch2);
clz(rscratch2, rscratch2);
andr(rscratch2, rscratch2, isLL ? -8 : -16);
@@ -5002,6 +5021,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
BLOCK_COMMENT("} string_compare");
}
#endif // COMPILER2
// This method checks if provided byte array contains byte with highest bit set.
void MacroAssembler::has_negatives(Register ary1, Register len, Register result) {
@@ -5400,7 +5420,7 @@ void MacroAssembler::zero_words(Register ptr, Register cnt)
// base: Address of a buffer to be zeroed, 8 bytes aligned.
// cnt: Immediate count in HeapWords.
#define SmallArraySize (18 * BytesPerLong)
void MacroAssembler::zero_words(Register base, u_int64_t cnt)
void MacroAssembler::zero_words(Register base, uint64_t cnt)
{
BLOCK_COMMENT("zero_words {");
int i = cnt & 1; // store any odd word to start

View File

@@ -88,7 +88,7 @@ class MacroAssembler: public Assembler {
= (operand_valid_for_logical_immediate(false /*is32*/,
(uint64_t)Universe::narrow_klass_base())
&& ((uint64_t)Universe::narrow_klass_base()
> (1UL << log2_intptr((uintptr_t)Universe::narrow_klass_range()))));
> (1ULL << log2_intptr(Universe::narrow_klass_range()))));
}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
@@ -185,7 +185,15 @@ class MacroAssembler: public Assembler {
mov(rscratch2, call_site);
}
// Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
// Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
// https://reviews.llvm.org/D3311
#ifdef _WIN64
#define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
#else
#define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
#endif
// aliases defined in AARCH64 spec
@@ -448,8 +456,8 @@ class MacroAssembler: public Assembler {
// first two private routines for loading 32 bit or 64 bit constants
private:
void mov_immediate64(Register dst, u_int64_t imm64);
void mov_immediate32(Register dst, u_int32_t imm32);
void mov_immediate64(Register dst, uint64_t imm64);
void mov_immediate32(Register dst, uint32_t imm32);
int push(unsigned int bitset, Register stack);
int pop(unsigned int bitset, Register stack);
@@ -460,6 +468,8 @@ public:
void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
static RegSet call_clobbered_registers();
// Push and pop everything that might be clobbered by a native
// runtime call except rscratch1 and rscratch2. (They are always
// scratch, so we don't have to protect them.) Only save the lower
@@ -479,7 +489,7 @@ public:
inline void mov(Register dst, unsigned long imm64) { mov_immediate64(dst, (uint64_t)imm64); }
inline void mov(Register dst, unsigned long long imm64) { mov_immediate64(dst, (uint64_t)imm64); }
inline void movw(Register dst, u_int32_t imm32)
inline void movw(Register dst, uint32_t imm32)
{
mov_immediate32(dst, imm32);
}
@@ -493,7 +503,7 @@ public:
void movptr(Register r, uintptr_t imm64);
void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32);
void mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32);
void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
orr(Vd, T, Vn, Vn);
@@ -503,10 +513,10 @@ public:
// Generalized Test Bit And Branch, including a "far" variety which
// spans more than 32KiB.
void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool far = false) {
void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool isfar = false) {
assert(cond == EQ || cond == NE, "must be");
if (far)
if (isfar)
cond = ~cond;
void (Assembler::* branch)(Register Rt, int bitpos, Label &L);
@@ -515,7 +525,7 @@ public:
else
branch = &Assembler::tbnz;
if (far) {
if (isfar) {
Label L;
(this->*branch)(Rt, bitpos, L);
b(dest);
@@ -1209,7 +1219,7 @@ public:
int elem_size);
void fill_words(Register base, Register cnt, Register value);
void zero_words(Register base, u_int64_t cnt);
void zero_words(Register base, uint64_t cnt);
void zero_words(Register ptr, Register cnt);
void zero_dcache_blocks(Register base, Register cnt);

View File

@@ -65,7 +65,7 @@
// Table with p(r) polynomial coefficients
// and table representation of logarithm values (hi and low parts)
__attribute__ ((aligned(64))) juint _L_tbl[] =
ATTRIBUTE_ALIGNED(64) juint _L_tbl[] =
{
// coefficients of p(r) polynomial:
// _coeff[]

View File

@@ -689,7 +689,7 @@ void MacroAssembler::generate__kernel_rem_pio2(address two_over_pi, address pio2
RECOMP_FOR1_CHECK;
Register tmp2 = r1, n = r2, jv = r4, tmp5 = r5, jx = r6,
tmp3 = r7, iqBase = r10, ih = r11, tmp4 = r12, tmp1 = r13,
jz = r14, j = r15, twoOverPiBase = r16, i = r17, qBase = r20;
jz = r14, j = r15, twoOverPiBase = r16, i = r17, qBase = r19;
// jp = jk == init_jk[prec] = init_jk[2] == {2,3,4,6}[2] == 4
// jx = nx - 1
lea(twoOverPiBase, ExternalAddress(two_over_pi));
@@ -1421,6 +1421,12 @@ void MacroAssembler::generate_dsin_dcos(bool isCos, address npio2_hw,
Label DONE, ARG_REDUCTION, TINY_X, RETURN_SIN, EARLY_CASE;
Register X = r0, absX = r1, n = r2, ix = r3;
FloatRegister y0 = v4, y1 = v5;
enter();
// r19 is used in TemplateInterpreterGenerator::generate_math_entry
RegSet saved_regs = RegSet::of(r19);
push (saved_regs, sp);
block_comment("check |x| ~< pi/4, NaN, Inf and |x| < 2**-27 cases"); {
fmovd(X, v0);
mov(rscratch2, 0x3e400000);
@@ -1438,14 +1444,14 @@ void MacroAssembler::generate_dsin_dcos(bool isCos, address npio2_hw,
// Set last bit unconditionally to make it NaN
orr(r10, r10, 1);
fmovd(v0, r10);
ret(lr);
b(DONE);
}
block_comment("kernel_sin/kernel_cos: if(ix<0x3e400000) {<fast return>}"); {
bind(TINY_X);
if (isCos) {
fmovd(v0, 1.0);
}
ret(lr);
b(DONE);
}
bind(ARG_REDUCTION); /* argument reduction needed */
block_comment("n = __ieee754_rem_pio2(x,y);"); {
@@ -1465,7 +1471,7 @@ void MacroAssembler::generate_dsin_dcos(bool isCos, address npio2_hw,
tbz(n, 1, DONE);
}
fnegd(v0, v0);
ret(lr);
b(DONE);
bind(RETURN_SIN);
generate_kernel_sin(y0, true, dsin_coef);
if (isCos) {
@@ -1474,7 +1480,7 @@ void MacroAssembler::generate_dsin_dcos(bool isCos, address npio2_hw,
tbz(n, 1, DONE);
}
fnegd(v0, v0);
ret(lr);
b(DONE);
}
bind(EARLY_CASE);
eor(y1, T8B, y1, y1);
@@ -1484,5 +1490,7 @@ void MacroAssembler::generate_dsin_dcos(bool isCos, address npio2_hw,
generate_kernel_sin(v0, false, dsin_coef);
}
bind(DONE);
pop(saved_regs, sp);
leave();
ret(lr);
}

View File

@@ -681,7 +681,7 @@ public:
return 0;
}
}
size_t size_in_bytes() { return 1 << size(); }
size_t size_in_bytes() { return 1ULL << size(); }
bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); }
bool is_load() {
assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||

View File

@@ -36,7 +36,7 @@ const char* RegisterImpl::name() const {
"c_rarg0", "c_rarg1", "c_rarg2", "c_rarg3", "c_rarg4", "c_rarg5", "c_rarg6", "c_rarg7",
"rscratch1", "rscratch2",
"r10", "r11", "r12", "r13", "r14", "r15", "r16",
"r17", "r18", "r19",
"r17", "r18_tls", "r19",
"resp", "rdispatch", "rbcp", "r23", "rlocals", "rmonitors", "rcpool", "rheapbase",
"rthread", "rfp", "lr", "sp"
};

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -90,7 +90,18 @@ CONSTANT_REGISTER_DECLARATION(Register, r14, (14));
CONSTANT_REGISTER_DECLARATION(Register, r15, (15));
CONSTANT_REGISTER_DECLARATION(Register, r16, (16));
CONSTANT_REGISTER_DECLARATION(Register, r17, (17));
CONSTANT_REGISTER_DECLARATION(Register, r18, (18));
// In the ABI for Windows+AArch64 the register r18 is used to store the pointer
// to the current thread's TEB (where TLS variables are stored). We could
// carefully save and restore r18 at key places, however Win32 Structured
// Exception Handling (SEH) is using TLS to unwind the stack. If r18 is used
// for any other purpose at the time of an exception happening, SEH would not
// be able to unwind the stack properly and most likely crash.
//
// It's easier to avoid allocating r18 altogether.
//
// See https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=vs-2019#integer-registers
CONSTANT_REGISTER_DECLARATION(Register, r18_reserved, (18));
CONSTANT_REGISTER_DECLARATION(Register, r19, (19));
CONSTANT_REGISTER_DECLARATION(Register, r20, (20));
CONSTANT_REGISTER_DECLARATION(Register, r21, (21));
@@ -203,6 +214,8 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
static const int max_fpr;
};
class RegSetIterator;
// A set of registers
class RegSet {
uint32_t _bitset;
@@ -230,6 +243,11 @@ public:
return *this;
}
RegSet &operator-=(const RegSet aSet) {
*this = *this - aSet;
return *this;
}
static RegSet of(Register r1) {
return RegSet(r1);
}
@@ -256,6 +274,49 @@ public:
}
uint32_t bits() const { return _bitset; }
private:
Register first() {
uint32_t first = _bitset & -_bitset;
return first ? as_Register(exact_log2(first)) : noreg;
}
public:
friend class RegSetIterator;
RegSetIterator begin();
};
class RegSetIterator {
RegSet _regs;
public:
RegSetIterator(RegSet x): _regs(x) {}
RegSetIterator(const RegSetIterator& mit) : _regs(mit._regs) {}
RegSetIterator& operator++() {
Register r = _regs.first();
if (r != noreg)
_regs -= r;
return *this;
}
bool operator==(const RegSetIterator& rhs) const {
return _regs.bits() == rhs._regs.bits();
}
bool operator!=(const RegSetIterator& rhs) const {
return ! (rhs == *this);
}
Register operator*() {
return _regs.first();
}
};
inline RegSetIterator RegSet::begin() {
return RegSetIterator(*this);
}
#endif // CPU_AARCH64_VM_REGISTER_AARCH64_HPP

View File

@@ -50,7 +50,7 @@ REGISTER_DEFINITION(Register, r14);
REGISTER_DEFINITION(Register, r15);
REGISTER_DEFINITION(Register, r16);
REGISTER_DEFINITION(Register, r17);
REGISTER_DEFINITION(Register, r18);
REGISTER_DEFINITION(Register, r18_reserved); // see comment in register_aarch64.hpp
REGISTER_DEFINITION(Register, r19);
REGISTER_DEFINITION(Register, r20);
REGISTER_DEFINITION(Register, r21);

View File

@@ -1485,7 +1485,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Generate stack overflow check
if (UseStackBanging) {
__ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
__ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
} else {
Unimplemented();
}
@@ -2433,7 +2433,7 @@ void SharedRuntime::generate_deopt_blob() {
__ sub(sp, sp, r19);
// Push interpreter frames in a loop
__ mov(rscratch1, (address)0xDEADDEAD); // Make a recognizable pattern
__ mov(rscratch1, (uint64_t)0xDEADDEAD); // Make a recognizable pattern
__ mov(rscratch2, rscratch1);
Label loop;
__ bind(loop);

View File

@@ -681,7 +681,6 @@ class StubGenerator: public StubCodeGenerator {
int unit = wordSize * direction;
int bias = (UseSIMDForMemoryOps ? 4:2) * wordSize;
int offset;
const Register t0 = r3, t1 = r4, t2 = r5, t3 = r6,
t4 = r7, t5 = r10, t6 = r11, t7 = r12;
const Register stride = r13;
@@ -1285,23 +1284,27 @@ class StubGenerator: public StubCodeGenerator {
void clobber_registers() {
#ifdef ASSERT
RegSet clobbered
= MacroAssembler::call_clobbered_registers() - rscratch1;
__ mov(rscratch1, (uint64_t)0xdeadbeef);
__ orr(rscratch1, rscratch1, rscratch1, Assembler::LSL, 32);
for (Register r = r3; r <= NOT_BSD(r18) BSD_ONLY(r17); r++)
if (r != rscratch1) __ mov(r, rscratch1);
for (RegSetIterator it = clobbered.begin(); *it != noreg; ++it) {
__ mov(*it, rscratch1);
}
#endif
}
// Scan over array at a for count oops, verifying each one.
// Preserves a and count, clobbers rscratch1 and rscratch2.
void verify_oop_array (size_t size, Register a, Register count, Register temp) {
void verify_oop_array (int size, Register a, Register count, Register temp) {
Label loop, end;
__ mov(rscratch1, a);
__ mov(rscratch2, zr);
__ bind(loop);
__ cmp(rscratch2, count);
__ br(Assembler::HS, end);
if (size == (size_t)wordSize) {
if (size == wordSize) {
__ ldr(temp, Address(a, rscratch2, Address::lsl(exact_log2(size))));
__ verify_oop(temp);
} else {
@@ -1332,7 +1335,7 @@ class StubGenerator: public StubCodeGenerator {
// disjoint_int_copy_entry is set to the no-overlap entry point
// used by generate_conjoint_int_oop_copy().
//
address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address *entry,
address generate_disjoint_copy(int size, bool aligned, bool is_oop, address *entry,
const char *name, bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
RegSet saved_reg = RegSet::of(s, d, count);
@@ -1393,7 +1396,7 @@ class StubGenerator: public StubCodeGenerator {
// the hardware handle it. The two dwords within qwords that span
// cache line boundaries will still be loaded and stored atomicly.
//
address generate_conjoint_copy(size_t size, bool aligned, bool is_oop, address nooverlap_target,
address generate_conjoint_copy(int size, bool aligned, bool is_oop, address nooverlap_target,
address *entry, const char *name,
bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
@@ -1639,7 +1642,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_disjoint_oop_copy(bool aligned, address *entry,
const char *name, bool dest_uninitialized) {
const bool is_oop = true;
const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
const int size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
return generate_disjoint_copy(size, aligned, is_oop, entry, name, dest_uninitialized);
}
@@ -1657,7 +1660,7 @@ class StubGenerator: public StubCodeGenerator {
address nooverlap_target, address *entry,
const char *name, bool dest_uninitialized) {
const bool is_oop = true;
const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
const int size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
return generate_conjoint_copy(size, aligned, is_oop, nooverlap_target, entry,
name, dest_uninitialized);
}
@@ -1752,7 +1755,6 @@ class StubGenerator: public StubCodeGenerator {
// Empty array: Nothing to do.
__ cbz(count, L_done);
__ push(RegSet::of(r19, r20, r21, r22), sp);
#ifdef ASSERT
@@ -4065,7 +4067,7 @@ class StubGenerator: public StubCodeGenerator {
FloatRegister vtmpZ = v0, vtmp = v1, vtmp3 = v2;
RegSet spilled_regs = RegSet::of(tmp3, tmp4);
int prefetchLoopExitCondition = MAX(64, SoftwarePrefetchHintDistance/2);
int prefetchLoopExitCondition = MAX2(64, SoftwarePrefetchHintDistance/2);
__ eor(vtmpZ, __ T16B, vtmpZ, vtmpZ);
// cnt2 == amount of characters left to compare
@@ -4181,7 +4183,7 @@ class StubGenerator: public StubCodeGenerator {
DIFF_LAST_POSITION, DIFF_LAST_POSITION2;
// exit from large loop when less than 64 bytes left to read or we're about
// to prefetch memory behind array border
int largeLoopExitCondition = MAX(64, SoftwarePrefetchHintDistance)/(isLL ? 1 : 2);
int largeLoopExitCondition = MAX2(64, SoftwarePrefetchHintDistance)/(isLL ? 1 : 2);
// cnt1/cnt2 contains amount of characters to compare. cnt1 can be re-used
// update cnt2 counter with already loaded 8 bytes
__ sub(cnt2, cnt2, wordSize/(isLL ? 1 : 2));
@@ -4606,7 +4608,7 @@ class StubGenerator: public StubCodeGenerator {
address entry = __ pc();
Label LOOP, LOOP_START, LOOP_PRFM, LOOP_PRFM_START, DONE;
Register src = r0, dst = r1, len = r2, octetCounter = r3;
const int large_loop_threshold = MAX(64, SoftwarePrefetchHintDistance)/8 + 4;
const int large_loop_threshold = MAX2(64, SoftwarePrefetchHintDistance)/8 + 4;
// do one more 8-byte read to have address 16-byte aligned in most cases
// also use single store instruction
@@ -4851,54 +4853,45 @@ class StubGenerator: public StubCodeGenerator {
// Register allocation
Register reg = c_rarg0;
Pa_base = reg; // Argument registers
RegSetIterator regs = (RegSet::range(r0, r26) - r18_reserved).begin();
Pa_base = *regs; // Argument registers
if (squaring)
Pb_base = Pa_base;
else
Pb_base = next_reg(reg);
Pn_base = next_reg(reg);
Rlen= next_reg(reg);
inv = next_reg(reg);
Pm_base = next_reg(reg);
Pb_base = *++regs;
Pn_base = *++regs;
Rlen= *++regs;
inv = *++regs;
Pm_base = *++regs;
// Working registers:
Ra = next_reg(reg); // The current digit of a, b, n, and m.
Rb = next_reg(reg);
Rm = next_reg(reg);
Rn = next_reg(reg);
Ra = *++regs; // The current digit of a, b, n, and m.
Rb = *++regs;
Rm = *++regs;
Rn = *++regs;
Pa = next_reg(reg); // Pointers to the current/next digit of a, b, n, and m.
Pb = next_reg(reg);
Pm = next_reg(reg);
Pn = next_reg(reg);
Pa = *++regs; // Pointers to the current/next digit of a, b, n, and m.
Pb = *++regs;
Pm = *++regs;
Pn = *++regs;
t0 = next_reg(reg); // Three registers which form a
t1 = next_reg(reg); // triple-precision accumuator.
t2 = next_reg(reg);
t0 = *++regs; // Three registers which form a
t1 = *++regs; // triple-precision accumuator.
t2 = *++regs;
Ri = next_reg(reg); // Inner and outer loop indexes.
Rj = next_reg(reg);
Ri = *++regs; // Inner and outer loop indexes.
Rj = *++regs;
Rhi_ab = next_reg(reg); // Product registers: low and high parts
Rlo_ab = next_reg(reg); // of a*b and m*n.
Rhi_mn = next_reg(reg);
Rlo_mn = next_reg(reg);
Rhi_ab = *++regs; // Product registers: low and high parts
Rlo_ab = *++regs; // of a*b and m*n.
Rhi_mn = *++regs;
Rlo_mn = *++regs;
// r19 and up are callee-saved.
_toSave = RegSet::range(r19, reg) + Pm_base;
_toSave = RegSet::range(r19, *regs) + Pm_base;
}
private:
Register next_reg(Register &reg) {
#ifdef __APPLE__
// skip r18 on macOSs, it should not be used
return ++reg == r18 ? ++reg : reg;
#else
return ++reg;
#endif
}
void save_regs() {
push(_toSave, sp);
}
@@ -5762,6 +5755,7 @@ class StubGenerator: public StubCodeGenerator {
// byte_array_inflate stub for large arrays.
StubRoutines::aarch64::_large_byte_array_inflate = generate_large_byte_array_inflate();
#ifdef COMPILER2
if (UseMultiplyToLenIntrinsic) {
StubRoutines::_multiplyToLen = generate_multiplyToLen();
}
@@ -5787,6 +5781,7 @@ class StubGenerator: public StubCodeGenerator {
// because it's faster for the sizes of modulus we care about.
StubRoutines::_montgomerySquare = g.generate_multiply();
}
#endif // COMPILER2
// generate GHASH intrinsics code
if (UseGHASHIntrinsics) {

View File

@@ -61,7 +61,7 @@ bool StubRoutines::aarch64::_completed = false;
/**
* crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h
*/
juint StubRoutines::aarch64::_crc_table[] ATTRIBUTE_ALIGNED(4096) =
ATTRIBUTE_ALIGNED(4096) juint StubRoutines::aarch64::_crc_table[] =
{
// Table 0
0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
@@ -287,7 +287,7 @@ juint StubRoutines::aarch64::_crc_table[] ATTRIBUTE_ALIGNED(4096) =
0xD502ED78UL, 0xAE7D62EDUL, // byte swap of word swap
};
juint StubRoutines::aarch64::_npio2_hw[] __attribute__ ((aligned(64))) = {
ATTRIBUTE_ALIGNED(64) juint StubRoutines::aarch64::_npio2_hw[] = {
// first, various coefficient values: 0.5, invpio2, pio2_1, pio2_1t, pio2_2,
// pio2_2t, pio2_3, pio2_3t
// This is a small optimization wich keeping double[8] values in int[] table
@@ -319,7 +319,7 @@ juint StubRoutines::aarch64::_npio2_hw[] __attribute__ ((aligned(64))) = {
// Coefficients for sin(x) polynomial approximation: S1..S6.
// See kernel_sin comments in macroAssembler_aarch64_trig.cpp for details
jdouble StubRoutines::aarch64::_dsin_coef[] __attribute__ ((aligned(64))) = {
ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_dsin_coef[] = {
-1.66666666666666324348e-01, // 0xBFC5555555555549
8.33333333332248946124e-03, // 0x3F8111111110F8A6
-1.98412698298579493134e-04, // 0xBF2A01A019C161D5
@@ -330,7 +330,7 @@ jdouble StubRoutines::aarch64::_dsin_coef[] __attribute__ ((aligned(64))) = {
// Coefficients for cos(x) polynomial approximation: C1..C6.
// See kernel_cos comments in macroAssembler_aarch64_trig.cpp for details
jdouble StubRoutines::aarch64::_dcos_coef[] __attribute__ ((aligned(64))) = {
ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_dcos_coef[] = {
4.16666666666666019037e-02, // c0x3FA555555555554C
-1.38888888888741095749e-03, // 0xBF56C16C16C15177
2.48015872894767294178e-05, // 0x3EFA01A019CB1590
@@ -345,7 +345,7 @@ jdouble StubRoutines::aarch64::_dcos_coef[] __attribute__ ((aligned(64))) = {
// Converted to double to avoid unnecessary conversion in code
// NOTE: table looks like original int table: {0xA2F983, 0x6E4E44,...} with
// only (double) conversion added
jdouble StubRoutines::aarch64::_two_over_pi[] __attribute__ ((aligned(64))) = {
ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_two_over_pi[] = {
(double)0xA2F983, (double)0x6E4E44, (double)0x1529FC, (double)0x2757D1, (double)0xF534DD, (double)0xC0DB62,
(double)0x95993C, (double)0x439041, (double)0xFE5163, (double)0xABDEBB, (double)0xC561B7, (double)0x246E3A,
(double)0x424DD2, (double)0xE00649, (double)0x2EEA09, (double)0xD1921C, (double)0xFE1DEB, (double)0x1CB129,
@@ -360,7 +360,7 @@ jdouble StubRoutines::aarch64::_two_over_pi[] __attribute__ ((aligned(64))) = {
};
// Pi over 2 value
jdouble StubRoutines::aarch64::_pio2[] __attribute__ ((aligned(64))) = {
ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_pio2[] = {
1.57079625129699707031e+00, // 0x3FF921FB40000000
7.54978941586159635335e-08, // 0x3E74442D00000000
5.39030252995776476554e-15, // 0x3CF8469880000000

View File

@@ -1125,7 +1125,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// an interpreter frame with greater than a page of locals, so each page
// needs to be checked. Only true for non-native.
if (UseStackBanging) {
const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size();
const int n_shadow_pages = (int)(JavaThread::stack_shadow_zone_size() / os::vm_page_size());
const int start_page = native_call ? n_shadow_pages : 1;
const int page_size = os::vm_page_size();
for (int pages = start_page; pages <= n_shadow_pages ; pages++) {
@@ -1593,9 +1593,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
__ add(rlocals, esp, r2, ext::uxtx, 3);
__ sub(rlocals, rlocals, wordSize);
// Make room for locals
__ sub(rscratch1, esp, r3, ext::uxtx, 3);
__ andr(sp, rscratch1, -16);
__ mov(rscratch1, esp);
// r3 - # of additional locals
// allocate space for locals
@@ -1605,12 +1603,16 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
__ ands(zr, r3, r3);
__ br(Assembler::LE, exit); // do nothing if r3 <= 0
__ bind(loop);
__ str(zr, Address(__ post(rscratch1, wordSize)));
__ str(zr, Address(__ pre(rscratch1, -wordSize)));
__ sub(r3, r3, 1); // until everything initialized
__ cbnz(r3, loop);
__ bind(exit);
}
// Padding between locals and fixed part of activation frame to ensure
// SP is always 16-byte aligned.
__ andr(sp, rscratch1, -16);
// And the base dispatch table
__ get_dispatch();

View File

@@ -1705,7 +1705,7 @@ void TemplateTable::lcmp()
Label done;
__ pop_l(r1);
__ cmp(r1, r0);
__ mov(r0, (u_int64_t)-1L);
__ mov(r0, (uint64_t)-1L);
__ br(Assembler::LT, done);
// __ mov(r0, 1UL);
// __ csel(r0, r0, zr, Assembler::NE);
@@ -1729,7 +1729,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result)
if (unordered_result < 0) {
// we want -1 for unordered or less than, 0 for equal and 1 for
// greater than.
__ mov(r0, (u_int64_t)-1L);
__ mov(r0, (uint64_t)-1L);
// for FP LT tests less than or unordered
__ br(Assembler::LT, done);
// install 0 for EQ otherwise 1

View File

@@ -117,9 +117,11 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
}
#endif
}
// Cortex A53
@@ -300,6 +302,16 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
}
if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
}
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
UsePopCountInstruction = true;
}
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true;
}
@@ -312,14 +324,6 @@ void VM_Version::initialize() {
UseMulAddIntrinsic = true;
}
if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
}
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
UsePopCountInstruction = true;
}
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
UseMontgomeryMultiplyIntrinsic = true;
}
@@ -327,7 +331,6 @@ void VM_Version::initialize() {
UseMontgomerySquareIntrinsic = true;
}
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(OptoScheduling)) {
OptoScheduling = true;
}

View File

@@ -22,6 +22,7 @@
*
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/os.inline.hpp"

View File

@@ -436,7 +436,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
}
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
assert(left != result, "should be different registers");
if (is_power_of_2(c + 1)) {
#ifdef AARCH64

View File

@@ -289,7 +289,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
}
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
assert(left != result, "should be different registers");
if (is_power_of_2(c + 1)) {
__ shift_left(left, log2_int(c + 1), result);

View File

@@ -223,7 +223,7 @@ void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr bas
__ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
}
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
if (tmp->is_valid()) {
if (is_power_of_2(c + 1)) {
__ move(left, tmp);

View File

@@ -31,6 +31,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
@@ -120,12 +121,19 @@ static FILETIME process_exit_time;
static FILETIME process_user_time;
static FILETIME process_kernel_time;
#ifdef _M_AMD64
#if defined(_M_ARM64)
#define __CPU__ aarch64
#elif defined(_M_AMD64)
#define __CPU__ amd64
#else
#define __CPU__ i486
#endif
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
PVOID topLevelVectoredExceptionHandler = NULL;
LPTOP_LEVEL_EXCEPTION_FILTER previousUnhandledExceptionFilter = NULL;
#endif
// save DLL module handle, used by GetModuleFileName
HINSTANCE vm_lib_handle;
@@ -144,6 +152,12 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
if (ForceTimeHighResolution) {
timeEndPeriod(1L);
}
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
if (topLevelVectoredExceptionHandler != NULL) {
RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
topLevelVectoredExceptionHandler = NULL;
}
#endif
break;
default:
break;
@@ -453,6 +467,12 @@ static unsigned __stdcall thread_native_entry(Thread* thread) {
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
#ifdef USE_VECTORED_EXCEPTION_HANDLING
// Any exception is caught by the Vectored Exception Handler, so VM can
// generate error dump when an exception occurred in non-Java thread
// (e.g. VM thread).
thread->call_run();
#else
// Install a win32 structured exception handler around every thread created
// by VM, so VM can generate error dump when an exception occurred in non-
// Java thread (e.g. VM thread).
@@ -462,6 +482,7 @@ static unsigned __stdcall thread_native_entry(Thread* thread) {
(_EXCEPTION_POINTERS*)_exception_info())) {
// Nothing to do.
}
#endif
// Note: at this point the thread object may already have deleted itself.
// Do not dereference it from here on out.
@@ -1516,15 +1537,18 @@ void * os::dll_load(const char *utf8_name, char *ebuf, int ebuflen) {
static const arch_t arch_array[] = {
{IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
{IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}
{IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
{IMAGE_FILE_MACHINE_ARM64, (char*)"ARM 64"}
};
#if (defined _M_AMD64)
#if (defined _M_ARM64)
static const uint16_t running_arch = IMAGE_FILE_MACHINE_ARM64;
#elif (defined _M_AMD64)
static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
#elif (defined _M_IX86)
static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
#else
#error Method os::dll_load requires that one of following \
is defined :_M_AMD64 or _M_IX86
is defined :_M_AMD64 or _M_IX86 or _M_ARM64
#endif
@@ -1819,7 +1843,8 @@ void os::win32::print_windows_version(outputStream* st) {
SYSTEM_INFO si;
ZeroMemory(&si, sizeof(SYSTEM_INFO));
GetNativeSystemInfo(&si);
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
if ((si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) ||
(si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_ARM64)) {
st->print(" , 64 bit");
}
@@ -2227,7 +2252,14 @@ LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
address handler) {
JavaThread* thread = (JavaThread*) Thread::current_or_null();
// Save pc in thread
#ifdef _M_AMD64
#if defined(_M_ARM64)
// Do not blow up if no thread info available.
if (thread) {
thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Pc);
}
// Set pc to handler
exceptionInfo->ContextRecord->Pc = (DWORD64)handler;
#elif defined(_M_AMD64)
// Do not blow up if no thread info available.
if (thread) {
thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
@@ -2325,7 +2357,17 @@ const char* os::exception_name(int exception_code, char *buf, size_t size) {
LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
// handle exception caused by idiv; should only happen for -MinInt/-1
// (division by zero is handled explicitly)
#ifdef _M_AMD64
#if defined(_M_ARM64)
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Sp;
assert(pc[0] == 0x83, "not an sdiv opcode"); //Fixme did i get the right opcode?
assert(ctx->X4 == min_jint, "unexpected idiv exception");
// set correct result values and continue after idiv instruction
ctx->Pc = (uint64_t)pc + 4; // idiv reg, reg, reg is 4 bytes
ctx->X4 = (uint64_t)min_jint; // result
ctx->X5 = (uint64_t)0; // remainder
// Continue the execution
#elif defined(_M_AMD64)
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Rip;
assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
@@ -2356,6 +2398,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
return EXCEPTION_CONTINUE_EXECUTION;
}
#if defined(_M_AMD64) || defined(_M_IX86)
//-----------------------------------------------------------------------------
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
PCONTEXT ctx = exceptionInfo->ContextRecord;
@@ -2401,6 +2444,7 @@ LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
return EXCEPTION_CONTINUE_SEARCH;
}
#endif
static inline void report_error(Thread* t, DWORD exception_code,
address addr, void* siginfo, void* context) {
@@ -2410,46 +2454,14 @@ static inline void report_error(Thread* t, DWORD exception_code,
// somewhere where we can find it in the minidump.
}
bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (Interpreter::contains(pc)) {
*fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
if (!fr->is_first_java_frame()) {
// get_frame_at_stack_banging_point() is only called when we
// have well defined stacks so java_sender() calls do not need
// to assert safe_for_sender() first.
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
*fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
// in compiled code, the stack banging is performed just after the return pc
// has been pushed on the stack
*fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
if (!fr->is_java_frame()) {
// See java_sender() comment above.
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
//-----------------------------------------------------------------------------
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
#ifdef _M_AMD64
PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
DWORD exception_code = exception_record->ExceptionCode;
#if defined(_M_ARM64)
address pc = (address) exceptionInfo->ContextRecord->Pc;
#elif defined(_M_AMD64)
address pc = (address) exceptionInfo->ContextRecord->Rip;
#else
address pc = (address) exceptionInfo->ContextRecord->Eip;
@@ -2467,9 +2479,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// This is safe to do because we have a new/unique ExceptionInformation
// code for this condition.
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
address addr = (address) exceptionRecord->ExceptionInformation[1];
int exception_subcode = (int) exception_record->ExceptionInformation[0];
address addr = (address) exception_record->ExceptionInformation[1];
if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
int page_size = os::vm_page_size();
@@ -2533,8 +2544,10 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// Last unguard failed or not unguarding
tty->print_raw_cr("Execution protection violation");
report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
report_error(t, exception_code, addr, exception_record,
exceptionInfo->ContextRecord);
#endif
return EXCEPTION_CONTINUE_SEARCH;
}
}
@@ -2547,8 +2560,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
if (t != NULL && t->is_Java_thread()) {
JavaThread* thread = (JavaThread*) t;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
address addr = (address) exception_record->ExceptionInformation[1];
if (os::is_memory_serialize_page(thread, addr)) {
// Block current thread until the memory serialize page permission restored.
os::block_on_serialize_page_trap();
@@ -2557,23 +2569,25 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
}
}
#if defined(_M_AMD64) || defined(_M_IX86)
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
VM_Version::is_cpuinfo_segv_addr(pc)) {
// Verify that OS save/restore AVX registers.
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
}
#endif
if (t != NULL && t->is_Java_thread()) {
JavaThread* thread = (JavaThread*) t;
bool in_java = thread->thread_state() == _thread_in_Java;
bool in_native = thread->thread_state() == _thread_in_native;
bool in_vm = thread->thread_state() == _thread_in_vm;
// Handle potential stack overflows up front.
if (exception_code == EXCEPTION_STACK_OVERFLOW) {
if (thread->stack_guards_enabled()) {
if (in_java) {
frame fr;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
assert(fr.is_java_frame(), "Must be a Java frame");
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
@@ -2582,7 +2596,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// Yellow zone violation. The o/s has unprotected the first yellow
// zone page for us. Note: must call disable_stack_yellow_zone to
// update the enabled status, even if the zone contains only one page.
assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
assert(!in_vm, "Undersized StackShadowPages");
thread->disable_stack_yellow_reserved_zone();
// If not in java code, return and hope for the best.
return in_java
@@ -2592,15 +2606,16 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// Fatal red zone violation.
thread->disable_stack_red_zone();
tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
report_error(t, exception_code, pc, exception_record,
exceptionInfo->ContextRecord);
#endif
return EXCEPTION_CONTINUE_SEARCH;
}
} else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
// Either stack overflow or null pointer exception.
if (in_java) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
// Either stack overflow or null pointer exception.
address addr = (address) exception_record->ExceptionInformation[1];
address stack_end = thread->stack_end();
if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
// Stack overflow.
@@ -2619,48 +2634,41 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
return Handle_Exception(exceptionInfo, stub);
}
}
{
#ifdef _WIN64
// If it's a legal stack address map the entire region in
//
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
addr = (address)((uintptr_t)addr &
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
os::commit_memory((char *)addr, thread->stack_base() - addr,
!ExecMem);
return EXCEPTION_CONTINUE_EXECUTION;
} else
#endif
{
// Null pointer exception.
if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
return EXCEPTION_CONTINUE_SEARCH;
}
// If it's a legal stack address map the entire region in
if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
addr = (address)((uintptr_t)addr &
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
os::commit_memory((char *)addr, thread->stack_base() - addr,
!ExecMem);
return EXCEPTION_CONTINUE_EXECUTION;
}
#endif
// Null pointer exception.
if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
report_error(t, exception_code, pc, exception_record,
exceptionInfo->ContextRecord);
return EXCEPTION_CONTINUE_SEARCH;
}
#ifdef _WIN64
// Special care for fast JNI field accessors.
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
// in and the heap gets shrunk before the field access.
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
address addr = JNI_FastGetField::find_slowcase_pc(pc);
if (addr != (address)-1) {
return Handle_Exception(exceptionInfo, addr);
}
address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
if (slowcase_pc != (address)-1) {
return Handle_Exception(exceptionInfo, slowcase_pc);
}
#endif
// Stack overflow or null pointer exception in native code.
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
report_error(t, exception_code, pc, exception_record,
exceptionInfo->ContextRecord);
#endif
return EXCEPTION_CONTINUE_SEARCH;
} // /EXCEPTION_ACCESS_VIOLATION
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -2672,13 +2680,25 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
}
if ((thread->thread_state() == _thread_in_vm &&
thread->doing_unsafe_access()) ||
if ((in_vm && thread->doing_unsafe_access()) ||
(nm != NULL && nm->has_unsafe_access())) {
return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc)));
}
}
#ifdef _M_ARM64
if (in_java &&
(exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
if (TraceTraps) {
tty->print_cr("trap: zombie_not_entrant");
}
return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub());
}
}
#endif
if (in_java) {
switch (exception_code) {
case EXCEPTION_INT_DIVIDE_BY_ZERO:
@@ -2689,20 +2709,74 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
} // switch
}
if (((thread->thread_state() == _thread_in_Java) ||
(thread->thread_state() == _thread_in_native)) &&
exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
#if defined(_M_AMD64) || defined(_M_IX86)
if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
LONG result=Handle_FLT_Exception(exceptionInfo);
if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
}
#endif
}
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
if (exception_code != EXCEPTION_BREAKPOINT) {
report_error(t, exception_code, pc, exception_record,
exceptionInfo->ContextRecord);
}
#endif
return EXCEPTION_CONTINUE_SEARCH;
}
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
#if defined(_M_ARM64)
address pc = (address) exceptionInfo->ContextRecord->Pc;
#elif defined(_M_AMD64)
address pc = (address) exceptionInfo->ContextRecord->Rip;
#else
address pc = (address) exceptionInfo->ContextRecord->Eip;
#endif
// Fast path for code part of the code cache
if (CodeCache::low_bound() <= pc && pc < CodeCache::high_bound()) {
return topLevelExceptionFilter(exceptionInfo);
}
// Handle the case where we get an implicit exception in AOT generated
// code. AOT DLL's loaded are not registered for structured exceptions.
// If the exception occurred in the codeCache or AOT code, pass control
// to our normal exception handler.
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != NULL) {
return topLevelExceptionFilter(exceptionInfo);
}
return EXCEPTION_CONTINUE_SEARCH;
}
#endif
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
LONG WINAPI topLevelUnhandledExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (InterceptOSException) goto exit;
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
#if defined(_M_ARM64)
address pc = (address)exceptionInfo->ContextRecord->Pc;
#elif defined(_M_AMD64)
address pc = (address) exceptionInfo->ContextRecord->Rip;
#else
address pc = (address) exceptionInfo->ContextRecord->Eip;
#endif
Thread* t = Thread::current_or_null_safe();
if (exception_code != EXCEPTION_BREAKPOINT) {
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
exceptionInfo->ContextRecord);
}
return EXCEPTION_CONTINUE_SEARCH;
exit:
return previousUnhandledExceptionFilter ? previousUnhandledExceptionFilter(exceptionInfo) : EXCEPTION_CONTINUE_SEARCH;
}
#endif
#ifndef _WIN64
// Special care for fast JNI accessors.
@@ -3544,7 +3618,12 @@ char* os::non_memory_address_word() {
// Must never look like an address returned by reserve_memory,
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
#ifdef _M_ARM64
// AArch64 has a maximum addressable space of 48-bits
return (char*)((1ull << 48) - 1);
#else
return (char*)-1;
#endif
}
#define MAX_ERROR_COUNT 100
@@ -4218,6 +4297,11 @@ static jint initSock();
jint os::init_2(void) {
// Setup Windows Exceptions
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
#endif
// for debugging float code generation bugs
if (ForceFloatExceptions) {
#ifndef _WIN64
@@ -5561,7 +5645,7 @@ int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
#if defined(IA32)
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
#elif defined (AMD64)
#elif defined(AMD64) || defined(_M_ARM64)
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
#endif

View File

@@ -35,10 +35,10 @@
// See threadCritical.hpp for details of this class.
//
static bool initialized = false;
static volatile int lock_count = -1;
static INIT_ONCE initialized = INIT_ONCE_STATIC_INIT;
static int lock_count = 0;
static HANDLE lock_event;
static DWORD lock_owner = -1;
static DWORD lock_owner = 0;
//
// Note that Microsoft's critical region code contains a race
@@ -51,48 +51,36 @@ static DWORD lock_owner = -1;
// and found them ~30 times slower than the critical region code.
//
ThreadCritical::ThreadCritical() {
DWORD current_thread = GetCurrentThreadId();
static BOOL WINAPI initialize(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context) {
lock_event = CreateEvent(NULL, false, true, NULL);
assert(lock_event != NULL, "unexpected return value from CreateEvent");
return true;
}
ThreadCritical::ThreadCritical() {
InitOnceExecuteOnce(&initialized, &initialize, NULL, NULL);
DWORD current_thread = GetCurrentThreadId();
if (lock_owner != current_thread) {
// Grab the lock before doing anything.
while (Atomic::cmpxchg(0, &lock_count, -1) != -1) {
if (initialized) {
DWORD ret = WaitForSingleObject(lock_event, INFINITE);
assert(ret == WAIT_OBJECT_0, "unexpected return value from WaitForSingleObject");
}
}
// Make sure the event object is allocated.
if (!initialized) {
// Locking will not work correctly unless this is autoreset.
lock_event = CreateEvent(NULL, false, false, NULL);
initialized = true;
}
assert(lock_owner == -1, "Lock acquired illegally.");
DWORD ret = WaitForSingleObject(lock_event, INFINITE);
assert(ret == WAIT_OBJECT_0, "unexpected return value from WaitForSingleObject");
lock_owner = current_thread;
} else {
// Atomicity isn't required. Bump the recursion count.
lock_count++;
}
assert(lock_owner == GetCurrentThreadId(), "Lock acquired illegally.");
// Atomicity isn't required. Bump the recursion count.
lock_count++;
}
ThreadCritical::~ThreadCritical() {
assert(lock_owner == GetCurrentThreadId(), "unlock attempt by wrong thread");
assert(lock_count >= 0, "Attempt to unlock when already unlocked");
lock_count--;
if (lock_count == 0) {
// We're going to unlock
lock_owner = -1;
lock_count = -1;
lock_owner = 0;
// No lost wakeups, lock_event stays signaled until reset.
DWORD ret = SetEvent(lock_event);
assert(ret != 0, "unexpected return value from SetEvent");
} else {
// Just unwinding a recursive lock;
lock_count--;
}
}

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_AARCH64_ICACHE_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_ICACHE_AARCH64_HPP
// Interface for updating the instruction cache. Whenever the VM
// modifies code, part of the processor instruction cache potentially
// has to be flushed.
class ICache : public AbstractICache {
public:
static void initialize();
static void invalidate_word(address addr) {
__builtin___clear_cache((char *)addr, (char *)(addr + 4));
}
static void invalidate_range(address start, int nbytes) {
__builtin___clear_cache((char *)start, (char *)(start + nbytes));
}
};
#endif // OS_CPU_LINUX_AARCH64_ICACHE_AARCH64_HPP

View File

@@ -0,0 +1,26 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// nothing required here
#include "precompiled.hpp"

View File

@@ -0,0 +1,108 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP
#include <intrin.h>
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
// As per atomic.hpp all read-modify-write operations have to provide two-way
// barriers semantics. The memory_order parameter is ignored - we always provide
// the strongest/most-conservative ordering
//
// For AARCH64 we add explicit barriers in the stubs.
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
template<typename I, typename D>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
};
// The Interlocked* APIs only take long and will not accept __int32. That is
// acceptable on Windows, since long is a 32-bits integer type.
#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \
template<> \
template<typename I, typename D> \
inline D Atomic::PlatformAdd<sizeof(IntrinsicType)>::add_and_fetch(I add_value, \
D volatile* dest, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \
return PrimitiveConversions::cast<D>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
PrimitiveConversions::cast<IntrinsicType>(add_value))); \
}
DEFINE_INTRINSIC_ADD(InterlockedAdd, long)
DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64)
#undef DEFINE_INTRINSIC_ADD
#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \
template<> \
template<typename T> \
inline T Atomic::PlatformXchg<sizeof(IntrinsicType)>::operator()(T exchange_value, \
T volatile* dest, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
return PrimitiveConversions::cast<T>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
PrimitiveConversions::cast<IntrinsicType>(exchange_value))); \
}
DEFINE_INTRINSIC_XCHG(InterlockedExchange, long)
DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64)
#undef DEFINE_INTRINSIC_XCHG
// Note: the order of the parameters is different between
// Atomic::PlatformCmpxchg<*>::operator() and the
// InterlockedCompareExchange* API.
#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \
template<> \
template<typename T> \
inline T Atomic::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T exchange_value, \
T volatile* dest, \
T compare_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
return PrimitiveConversions::cast<T>( \
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
PrimitiveConversions::cast<IntrinsicType>(exchange_value), \
PrimitiveConversions::cast<IntrinsicType>(compare_value))); \
}
DEFINE_INTRINSIC_CMPXCHG(_InterlockedCompareExchange8, char) // Use the intrinsic as InterlockedCompareExchange8 does not exist
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange, long)
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange64, __int64)
#undef DEFINE_INTRINSIC_CMPXCHG
#endif // OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_BYTES_WINDOWS_AARCH64_INLINE_HPP
#define OS_CPU_WINDOWS_AARCH64_BYTES_WINDOWS_AARCH64_INLINE_HPP
#include <stdlib.h>
// Efficient swapping of data bytes from Java byte
// ordering to native byte ordering and vice versa.
inline u2 Bytes::swap_u2(u2 x) {
return _byteswap_ushort(x);
}
inline u4 Bytes::swap_u4(u4 x) {
return _byteswap_ulong(x);
}
inline u8 Bytes::swap_u8(u8 x) {
return _byteswap_uint64(x);
}
#pragma warning(default: 4035) // Enable warning 4035: no return value
#endif // OS_CPU_WINDOWS_AARCH64_BYTES_WINDOWS_AARCH64_INLINE_HPP

View File

@@ -0,0 +1,158 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_COPY_WINDOWS_AARCH64_INLINE_HPP
#define OS_CPU_WINDOWS_AARCH64_COPY_WINDOWS_AARCH64_INLINE_HPP
#include <string.h>
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default:
(void)memcpy(to, from, count * HeapWordSize);
break;
}
}
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default: while (count-- > 0) {
*to++ = *from++;
}
break;
}
}
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
// pd_conjoint_words(from, to, count);
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
*to++ = *from++;
}
} else {
from += count - 1;
to += count - 1;
while (count-- > 0) {
// Copy backwards
*to-- = *from--;
}
}
}
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
*to++ = *from++;
}
} else {
from += count - 1;
to += count - 1;
while (count-- > 0) {
// Copy backwards
*to-- = *from--;
}
}
}
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
*to++ = *from++;
}
} else {
from += count - 1;
to += count - 1;
while (count-- > 0) {
// Copy backwards
*to-- = *from--;
}
}
}
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
#endif // OS_CPU_WINDOWS_AARCH64_COPY_WINDOWS_AARCH64_INLINE_HPP

View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_GLOBALS_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_GLOBALS_WINDOWS_AARCH64_HPP
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
define_pd_global(bool, DontYieldALot, false);
// Default stack size on Windows is determined by the executable (java.exe
// has a default value of 320K/1MB [32bit/64bit]). Depending on Windows version, changing
// ThreadStackSize to non-zero may have significant impact on memory usage.
// See comments in os_windows.cpp.
define_pd_global(intx, ThreadStackSize, 0); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 0);
#ifdef ASSERT
define_pd_global(intx, CompilerThreadStackSize, 1024);
#else
define_pd_global(intx, CompilerThreadStackSize, 0);
#endif
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
#endif // OS_CPU_WINDOWS_AARCH64_GLOBALS_WINDOWS_AARCH64_HPP

View File

@@ -0,0 +1,44 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_ICACHE_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_ICACHE_AARCH64_HPP
// Interface for updating the instruction cache. Whenever the VM
// modifies code, part of the processor instruction cache potentially
// has to be flushed.
class ICache : public AbstractICache {
public:
static void initialize();
static void invalidate_word(address addr) {
invalidate_range(addr, 4);
}
static void invalidate_range(address start, int nbytes) {
FlushInstructionCache((HANDLE)GetCurrentProcess(), start, (SIZE_T)(nbytes));
}
};
#endif // OS_CPU_WINDOWS_AARCH64_ICACHE_AARCH64_HPP

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_ORDERACCESS_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_ORDERACCESS_WINDOWS_AARCH64_HPP
// Included in orderAccess.hpp header file.
#include <atomic>
using std::atomic_thread_fence;
#include <intrin.h>
#include "vm_version_aarch64.hpp"
#include "runtime/vm_version.hpp"
// Implementation of class OrderAccess.
inline void OrderAccess::loadload() { acquire(); }
inline void OrderAccess::storestore() { release(); }
inline void OrderAccess::loadstore() { acquire(); }
inline void OrderAccess::storeload() { fence(); }
#define READ_MEM_BARRIER atomic_thread_fence(std::memory_order_acquire);
#define WRITE_MEM_BARRIER atomic_thread_fence(std::memory_order_release);
#define FULL_MEM_BARRIER atomic_thread_fence(std::memory_order_seq_cst);
inline void OrderAccess::acquire() {
READ_MEM_BARRIER;
}
inline void OrderAccess::release() {
WRITE_MEM_BARRIER;
}
inline void OrderAccess::fence() {
FULL_MEM_BARRIER;
}
#endif // OS_CPU_WINDOWS_AARCH64_ORDERACCESS_WINDOWS_AARCH64_HPP

View File

@@ -0,0 +1,312 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jvm.h"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "unwind_windows_aarch64.hpp"
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
// put OS-includes here
# include <sys/types.h>
# include <signal.h>
# include <errno.h>
# include <stdlib.h>
# include <stdio.h>
# include <intrin.h>
void os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread) {
f(value, method, args, thread);
}
#pragma warning(disable: 4172)
// Returns an estimate of the current stack pointer. Result must be guaranteed
// to point into the calling threads stack, and be no lower than the current
// stack pointer.
address os::current_stack_pointer() {
int dummy;
address sp = (address)&dummy;
return sp;
}
#pragma warning(default: 4172)
ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
ExtendedPC epc;
CONTEXT* uc = (CONTEXT*)ucVoid;
if (uc != NULL) {
epc = ExtendedPC((address)uc->Pc);
if (ret_sp) *ret_sp = (intptr_t*)uc->Sp;
if (ret_fp) *ret_fp = (intptr_t*)uc->Fp;
} else {
// construct empty ExtendedPC for return value checking
epc = NULL;
if (ret_sp) *ret_sp = (intptr_t *)NULL;
if (ret_fp) *ret_fp = (intptr_t *)NULL;
}
return epc;
}
frame os::fetch_frame_from_context(const void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
return frame(sp, fp, epc.pc());
}
bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (Interpreter::contains(pc)) {
*fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
if (!fr->is_first_java_frame()) {
// get_frame_at_stack_banging_point() is only called when we
// have well defined stacks so java_sender() calls do not need
// to assert safe_for_sender() first.
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
// In compiled code, the stack banging is performed before LR
// has been saved in the frame. LR is live, and SP and FP
// belong to the caller.
intptr_t* fp = (intptr_t*)exceptionInfo->ContextRecord->Fp;
intptr_t* sp = (intptr_t*)exceptionInfo->ContextRecord->Sp;
address pc = (address)(exceptionInfo->ContextRecord->Lr
- NativeInstruction::instruction_size);
*fr = frame(sp, fp, pc);
if (!fr->is_java_frame()) {
assert(fr->safe_for_sender(thread), "Safety check");
assert(!fr->is_first_frame(), "Safety check");
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
// By default, gcc always saves frame pointer rfp on this stack. This
// may get turned off by -fomit-frame-pointer.
frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->link(), fr->link(), fr->sender_pc());
}
frame os::current_frame() {
typedef intptr_t* get_fp_func ();
get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
StubRoutines::aarch64::get_previous_fp_entry());
if (func == NULL) return frame();
intptr_t* fp = (*func)();
if (fp == NULL) {
return frame();
}
frame myframe((intptr_t*)os::current_stack_pointer(),
(intptr_t*)fp,
CAST_FROM_FN_PTR(address, os::current_frame));
if (os::is_first_C_frame(&myframe)) {
// stack is not walkable
return frame();
} else {
return os::get_sender_for_C_frame(&myframe);
}
}
////////////////////////////////////////////////////////////////////////////////
// thread stack
// Minimum usable stack sizes required to get to user code. Space for
// HotSpot guard pages is added later.
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
void os::print_context(outputStream *st, const void *context) {
if (context == NULL) return;
const CONTEXT* uc = (const CONTEXT*)context;
st->print_cr("Registers:");
st->print( "X0 =" INTPTR_FORMAT, uc->X0);
st->print(", X1 =" INTPTR_FORMAT, uc->X1);
st->print(", X2 =" INTPTR_FORMAT, uc->X2);
st->print(", X3 =" INTPTR_FORMAT, uc->X3);
st->cr();
st->print( "X4 =" INTPTR_FORMAT, uc->X4);
st->print(", X5 =" INTPTR_FORMAT, uc->X5);
st->print(", X6 =" INTPTR_FORMAT, uc->X6);
st->print(", X7 =" INTPTR_FORMAT, uc->X7);
st->cr();
st->print( "X8 =" INTPTR_FORMAT, uc->X8);
st->print(", X9 =" INTPTR_FORMAT, uc->X9);
st->print(", X10=" INTPTR_FORMAT, uc->X10);
st->print(", X11=" INTPTR_FORMAT, uc->X11);
st->cr();
st->print( "X12=" INTPTR_FORMAT, uc->X12);
st->print(", X13=" INTPTR_FORMAT, uc->X13);
st->print(", X14=" INTPTR_FORMAT, uc->X14);
st->print(", X15=" INTPTR_FORMAT, uc->X15);
st->cr();
st->print( "X16=" INTPTR_FORMAT, uc->X16);
st->print(", X17=" INTPTR_FORMAT, uc->X17);
st->print(", X18=" INTPTR_FORMAT, uc->X18);
st->print(", X19=" INTPTR_FORMAT, uc->X19);
st->cr();
st->print(", X20=" INTPTR_FORMAT, uc->X20);
st->print(", X21=" INTPTR_FORMAT, uc->X21);
st->print(", X22=" INTPTR_FORMAT, uc->X22);
st->print(", X23=" INTPTR_FORMAT, uc->X23);
st->cr();
st->print(", X24=" INTPTR_FORMAT, uc->X24);
st->print(", X25=" INTPTR_FORMAT, uc->X25);
st->print(", X26=" INTPTR_FORMAT, uc->X26);
st->print(", X27=" INTPTR_FORMAT, uc->X27);
st->print(", X28=" INTPTR_FORMAT, uc->X28);
st->cr();
st->cr();
intptr_t *sp = (intptr_t *)uc->Sp;
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
st->cr();
// Note: it may be unsafe to inspect memory near pc. For example, pc may
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = (address)uc->Pc;
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
st->cr();
}
void os::print_register_info(outputStream *st, const void *context) {
if (context == NULL) return;
const CONTEXT* uc = (const CONTEXT*)context;
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print(" X0="); print_location(st, uc->X0);
st->print(" X1="); print_location(st, uc->X1);
st->print(" X2="); print_location(st, uc->X2);
st->print(" X3="); print_location(st, uc->X3);
st->cr();
st->print(" X4="); print_location(st, uc->X4);
st->print(" X5="); print_location(st, uc->X5);
st->print(" X6="); print_location(st, uc->X6);
st->print(" X7="); print_location(st, uc->X7);
st->cr();
st->print(" X8="); print_location(st, uc->X8);
st->print(" X9="); print_location(st, uc->X9);
st->print("X10="); print_location(st, uc->X10);
st->print("X11="); print_location(st, uc->X11);
st->cr();
st->print("X12="); print_location(st, uc->X12);
st->print("X13="); print_location(st, uc->X13);
st->print("X14="); print_location(st, uc->X14);
st->print("X15="); print_location(st, uc->X15);
st->cr();
st->print("X16="); print_location(st, uc->X16);
st->print("X17="); print_location(st, uc->X17);
st->print("X18="); print_location(st, uc->X18);
st->print("X19="); print_location(st, uc->X19);
st->cr();
st->print("X20="); print_location(st, uc->X20);
st->print("X21="); print_location(st, uc->X21);
st->print("X22="); print_location(st, uc->X22);
st->print("X23="); print_location(st, uc->X23);
st->cr();
st->print("X24="); print_location(st, uc->X24);
st->print("X25="); print_location(st, uc->X25);
st->print("X26="); print_location(st, uc->X26);
st->print("X27="); print_location(st, uc->X27);
st->print("X28="); print_location(st, uc->X28);
st->cr();
}
void os::setup_fpu() {
}
bool os::supports_sse() {
return true;
}
#ifndef PRODUCT
void os::verify_stack_alignment() {
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
}
#endif
int os::extra_bang_size_in_bytes() {
// AArch64 does not require the additional stack bang.
return 0;
}
extern "C" {
int SpinPause() {
return 0;
}
};

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_HPP
static void setup_fpu();
static bool supports_sse();
static bool register_code_area(char *low, char *high) {
// Using Vectored Exception Handling
return true;
}
private:
static void current_thread_enable_wx_impl(WXMode mode) { }
public:
#endif // OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_HPP

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_INLINE_HPP
#define OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_INLINE_HPP
#include "runtime/os.hpp"
#endif // OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_INLINE_HPP

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP
#define OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP
#include "runtime/prefetch.hpp"
inline void Prefetch::read (void *loc, intx interval) {
}
inline void Prefetch::write(void *loc, intx interval) {
}
#endif // OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP

View File

@@ -0,0 +1,100 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
frame JavaThread::pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
}
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread");
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this;
// If we have a last_Java_frame, then we should use it even if
// isInJava == true. It should be more reliable than CONTEXT info.
if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
*fr_addr = jt->pd_last_frame();
return true;
}
// At this point, we don't have a last_Java_frame, so
// we try to glean some information out of the CONTEXT
// if we were running Java code when SIGPROF came in.
if (isInJava) {
frame ret_frame = os::fetch_frame_from_context(ucontext);
if (ret_frame.pc() == NULL || ret_frame.sp() == NULL ) {
// CONTEXT wasn't useful
return false;
}
if (MetaspaceShared::is_in_trampoline_frame(ret_frame.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
if (!ret_frame.safe_for_sender(jt)) {
#if COMPILER2_OR_JVMCI
// C2 and JVMCI use ebp as a general register see if NULL fp helps
frame ret_frame2(ret_frame.sp(), NULL, ret_frame.pc());
if (!ret_frame2.safe_for_sender(jt)) {
// nothing else to try if the frame isn't good
return false;
}
ret_frame = ret_frame2;
#else
// nothing else to try if the frame isn't good
return false;
#endif // COMPILER2_OR_JVMCI
}
*fr_addr = ret_frame;
return true;
}
// nothing else to try
return false;
}
void JavaThread::cache_global_variables() { }

View File

@@ -0,0 +1,79 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_THREAD_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_THREAD_WINDOWS_AARCH64_HPP
private:
#ifdef ASSERT
// spill stack holds N callee-save registers at each Java call and
// grows downwards towards limit
// we need limit to check we have space for a spill and base so we
// can identify all live spill frames at GC (eventually)
address _spill_stack;
address _spill_stack_base;
address _spill_stack_limit;
#endif // ASSERT
void pd_initialize() {
_anchor.clear();
}
frame pd_last_frame();
public:
// Mutators are highly dangerous....
intptr_t* last_Java_fp() { return _anchor.last_Java_fp(); }
void set_last_Java_fp(intptr_t* fp) { _anchor.set_last_Java_fp(fp); }
void set_base_of_stack_pointer(intptr_t* base_sp) {}
static ByteSize last_Java_fp_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
intptr_t* base_of_stack_pointer() { return NULL; }
void record_base_of_stack_pointer() {}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
static Thread *aarch64_get_thread_helper() {
return Thread::current();
}
public:
// These routines are only used on cpu architectures that
// have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; }
static void enable_register_stack_guard() {}
static void disable_register_stack_guard() {}
#endif // OS_CPU_WINDOWS_AARCH64_THREAD_WINDOWS_AARCH64_HPP

View File

@@ -0,0 +1,102 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_UNWIND_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_UNWIND_WINDOWS_AARCH64_HPP
typedef unsigned char UBYTE;
#if _MSC_VER < 1700
/* Not needed for VS2012 compiler, comes from winnt.h. */
#define UNW_FLAG_EHANDLER 0x01
#define UNW_FLAG_UHANDLER 0x02
#define UNW_FLAG_CHAININFO 0x04
#endif
// See https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling#xdata-records
typedef struct _UNWIND_INFO_EH_ONLY {
DWORD FunctionLength : 18;
DWORD Version : 2;
DWORD X : 1; // = 1
DWORD E : 1; // = 1
DWORD EpilogCount : 5; // = 0
DWORD CodeWords : 5; // = 1
DWORD UnwindCode0 : 8;
DWORD UnwindCode1 : 8;
DWORD UnwindCode2 : 8;
DWORD UnwindCode3 : 8;
DWORD ExceptionHandler;
} UNWIND_INFO_EH_ONLY, *PUNWIND_INFO_EH_ONLY;
/*
typedef struct _RUNTIME_FUNCTION {
DWORD BeginAddress;
union {
DWORD UnwindData;
struct {
DWORD Flag : 2;
DWORD FunctionLength : 11;
DWORD RegF : 3;
DWORD RegI : 4;
DWORD H : 1;
DWORD CR : 2;
DWORD FrameSize : 9;
} DUMMYSTRUCTNAME;
} DUMMYUNIONNAME;
} RUNTIME_FUNCTION, *PRUNTIME_FUNCTION;
*/
#if _MSC_VER < 1700
/* Not needed for VS2012 compiler, comes from winnt.h. */
typedef struct _DISPATCHER_CONTEXT {
ULONG64 ControlPc;
ULONG64 ImageBase;
PRUNTIME_FUNCTION FunctionEntry;
ULONG64 EstablisherFrame;
ULONG64 TargetIp;
PCONTEXT ContextRecord;
// PEXCEPTION_ROUTINE LanguageHandler;
char * LanguageHandler; // double dependency problem
PVOID HandlerData;
} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
#endif
#if _MSC_VER < 1500
/* Not needed for VS2008 compiler, comes from winnt.h. */
typedef EXCEPTION_DISPOSITION (*PEXCEPTION_ROUTINE) (
IN PEXCEPTION_RECORD ExceptionRecord,
IN ULONG64 EstablisherFrame,
IN OUT PCONTEXT ContextRecord,
IN OUT PDISPATCHER_CONTEXT DispatcherContext
);
#endif
#endif // OS_CPU_WINDOWS_AARCH64_UNWIND_WINDOWS_AARCH64_HPP

View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_VMSTRUCTS_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_VMSTRUCTS_WINDOWS_AARCH64_HPP
// These are the OS and CPU-specific fields, types and integer
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
\
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
unchecked_nonstatic_field(OSThread, _thread_handle, sizeof(HANDLE)) /* NOTE: no type */
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
declare_unsigned_integer_type(OSThread::thread_id_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_WINDOWS_AARCH64_VMSTRUCTS_WINDOWS_AARCH64_HPP

View File

@@ -0,0 +1,87 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
void VM_Version::get_os_cpu_info() {
if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)) _features |= CPU_CRC32;
if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) _features |= CPU_AES | CPU_SHA1 | CPU_SHA2;
if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE)) _features |= CPU_ASIMD;
// No check for CPU_PMULL
__int64 dczid_el0 = _ReadStatusReg(0x5807 /* ARM64_DCZID_EL0 */);
if (!(dczid_el0 & 0x10)) {
_zva_length = 4 << (dczid_el0 & 0xf);
}
{
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;
DWORD returnLength = 0;
// See https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation
GetLogicalProcessorInformation(NULL, &returnLength);
assert(GetLastError() == ERROR_INSUFFICIENT_BUFFER, "Unexpected return from GetLogicalProcessorInformation");
buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)os::malloc(returnLength, mtInternal);
BOOL rc = GetLogicalProcessorInformation(buffer, &returnLength);
assert(rc, "Unexpected return from GetLogicalProcessorInformation");
_icache_line_size = _dcache_line_size = -1;
for (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = buffer; ptr < buffer + returnLength / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); ptr++) {
switch (ptr->Relationship) {
case RelationCache:
// Cache data is in ptr->Cache, one CACHE_DESCRIPTOR structure for each cache.
PCACHE_DESCRIPTOR Cache = &ptr->Cache;
if (Cache->Level == 1) {
_icache_line_size = _dcache_line_size = Cache->LineSize;
}
break;
}
}
os::free(buffer);
}
{
char* buf = ::getenv("PROCESSOR_IDENTIFIER");
if (buf && strstr(buf, "Ampere(TM)") != NULL) {
_cpu = CPU_AMCC;
} else if (buf && strstr(buf, "Cavium Inc.") != NULL) {
_cpu = CPU_CAVIUM;
} else {
log_info(os)("VM_Version: unknown CPU model");
}
if (_cpu) {
SYSTEM_INFO si;
GetSystemInfo(&si);
_model = si.wProcessorLevel;
_variant = si.wProcessorRevision / 0xFF;
_revision = si.wProcessorRevision & 0xFF;
}
}
}

View File

@@ -487,6 +487,41 @@ address os::current_stack_pointer() {
}
#endif
bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (Interpreter::contains(pc)) {
*fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
if (!fr->is_first_java_frame()) {
// get_frame_at_stack_banging_point() is only called when we
// have well defined stacks so java_sender() calls do not need
// to assert safe_for_sender() first.
*fr = fr->java_sender();
}
} else {
// more complex code with compiled code
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
// Not sure where the pc points to, fallback to default
// stack overflow handling
return false;
} else {
// in compiled code, the stack banging is performed just after the return pc
// has been pushed on the stack
intptr_t* fp = (intptr_t*)exceptionInfo->ContextRecord->REG_FP;
intptr_t* sp = (intptr_t*)exceptionInfo->ContextRecord->REG_SP;
*fr = frame(sp + 1, fp, (address)*sp);
if (!fr->is_java_frame()) {
// See java_sender() comment above.
*fr = fr->java_sender();
}
}
}
assert(fr->is_java_frame(), "Safety check");
return true;
}
#ifndef AMD64
intptr_t* _get_previous_fp() {

View File

@@ -63,33 +63,22 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
// we try to glean some information out of the CONTEXT
// if we were running Java code when SIGPROF came in.
if (isInJava) {
CONTEXT* uc = (CONTEXT*)ucontext;
#ifdef AMD64
intptr_t* ret_fp = (intptr_t*) uc->Rbp;
intptr_t* ret_sp = (intptr_t*) uc->Rsp;
ExtendedPC addr = ExtendedPC((address)uc->Rip);
#else
intptr_t* ret_fp = (intptr_t*) uc->Ebp;
intptr_t* ret_sp = (intptr_t*) uc->Esp;
ExtendedPC addr = ExtendedPC((address)uc->Eip);
#endif // AMD64
if (addr.pc() == NULL || ret_sp == NULL ) {
frame ret_frame = os::fetch_frame_from_context(ucontext);
if (ret_frame.pc() == NULL || ret_frame.sp() == NULL ) {
// CONTEXT wasn't useful
return false;
}
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
if (MetaspaceShared::is_in_trampoline_frame(ret_frame.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
frame ret_frame(ret_sp, ret_fp, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {
#if COMPILER2_OR_JVMCI
// C2 and JVMCI use ebp as a general register see if NULL fp helps
frame ret_frame2(ret_sp, NULL, addr.pc());
frame ret_frame2(ret_frame.sp(), NULL, ret_frame.pc());
if (!ret_frame2.safe_for_sender(jt)) {
// nothing else to try if the frame isn't good
return false;

View File

@@ -96,7 +96,7 @@
static jint CurrentVersion = JNI_VERSION_10;
#ifdef _WIN32
#if defined(_WIN32) && !defined(USE_VECTORED_EXCEPTION_HANDLING)
extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
#endif
@@ -4076,11 +4076,11 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
_JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, void *args) {
jint result = JNI_ERR;
// On Windows, let CreateJavaVM run with SEH protection
#ifdef _WIN32
#if defined(_WIN32) && !defined(USE_VECTORED_EXCEPTION_HANDLING)
__try {
#endif
result = JNI_CreateJavaVM_inner(vm, penv, args);
#ifdef _WIN32
#if defined(_WIN32) && !defined(USE_VECTORED_EXCEPTION_HANDLING)
} __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
// Nothing to do.
}
@@ -4149,11 +4149,11 @@ static jint JNICALL jni_DestroyJavaVM_inner(JavaVM *vm) {
jint JNICALL jni_DestroyJavaVM(JavaVM *vm) {
jint result = JNI_ERR;
// On Windows, we need SEH protection
#ifdef _WIN32
#if defined(_WIN32) && !defined(USE_VECTORED_EXCEPTION_HANDLING)
__try {
#endif
result = jni_DestroyJavaVM_inner(vm);
#ifdef _WIN32
#if defined(_WIN32) && !defined(USE_VECTORED_EXCEPTION_HANDLING)
} __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
// Nothing to do.
}

View File

@@ -169,7 +169,7 @@ void universe_post_module_init(); // must happen after call_initPhase2
#ifndef USE_LIBRARY_BASED_TLS_ONLY
// Current thread is maintained as a thread-local variable
THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL;
THREAD_LOCAL Thread* Thread::_thr_current = NULL;
#endif
// ======= Thread ========

View File

@@ -118,7 +118,7 @@ class Thread: public ThreadShadow {
#ifndef USE_LIBRARY_BASED_TLS_ONLY
// Current thread is maintained as a thread-local variable
static THREAD_LOCAL_DECL Thread* _thr_current;
static THREAD_LOCAL Thread* _thr_current;
#endif
private:

View File

@@ -264,9 +264,7 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#define JLONG_FORMAT "%ld"
#endif // _LP64 && __APPLE__
#ifndef USE_LIBRARY_BASED_TLS_ONLY
#define THREAD_LOCAL_DECL __thread
#endif
#define THREAD_LOCAL __thread
// Inlining support
#define NOINLINE __attribute__ ((noinline))

View File

@@ -159,9 +159,7 @@ inline int g_isfinite(jdouble f) { return _finite(f); }
#define offset_of(klass,field) offsetof(klass,field)
#ifndef USE_LIBRARY_BASED_TLS_ONLY
#define THREAD_LOCAL_DECL __declspec( thread )
#endif
#define THREAD_LOCAL __declspec(thread)
// Inlining support
// MSVC has '__declspec(noinline)' but according to the official documentation
@@ -173,4 +171,8 @@ inline int g_isfinite(jdouble f) { return _finite(f); }
// Alignment
#define ATTRIBUTE_ALIGNED(x) __declspec(align(x))
#ifdef _M_ARM64
#define USE_VECTORED_EXCEPTION_HANDLING
#endif
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP

View File

@@ -147,10 +147,6 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
// AIX 5.3 has buggy __thread support. (see JDK-8176442).
#define USE_LIBRARY_BASED_TLS_ONLY 1
#ifndef USE_LIBRARY_BASED_TLS_ONLY
#define THREAD_LOCAL_DECL __thread
#endif
// Inlining support
//
// Be aware that for function/method declarations, xlC only supports the following

View File

@@ -576,6 +576,8 @@ GetJavaProperties(JNIEnv* env)
sprops.os_arch = "amd64";
#elif _X86_
sprops.os_arch = "x86";
#elif defined(_M_ARM64)
sprops.os_arch = "aarch64";
#else
sprops.os_arch = "unknown";
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,6 @@
* questions.
*/
#include "awt.h"
#include "awt_ole.h"
#include "awt_DCHolder.h" // main symbols

View File

@@ -23,18 +23,16 @@
* questions.
*/
#include "awt.h"
#include <shlwapi.h>
#include <shellapi.h>
#include <memory.h>
#include "awt_DataTransferer.h"
#include "awt_Toolkit.h"
#include "java_awt_dnd_DnDConstants.h"
#include "sun_awt_windows_WDropTargetContextPeer.h"
#include "awt_Container.h"
#include "alloc.h"
#include "awt_ole.h"
#include "awt_Toolkit.h"
#include "awt_DnDDT.h"
#include "awt_DnDDS.h"

View File

@@ -23,6 +23,7 @@
* questions.
*/
#include <comdef.h>
#include "awt.h"
#include "awt_FileDialog.h"
#include "awt_Dialog.h"

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,10 @@
#ifndef AWT_OLE_H
#define AWT_OLE_H
#include "awt.h"
#include <ole2.h>
#include <comdef.h>
#include <comutil.h>
#include "awt.h"
#ifdef _DEBUG
#define _SUN_DEBUG

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,7 @@ public class AttachProviderImpl extends HotSpotAttachProvider {
"This provider is not supported on this version of Windows");
}
String arch = System.getProperty("os.arch");
if (!arch.equals("x86") && !arch.equals("amd64")) {
if (!arch.equals("x86") && !arch.equals("amd64") && !arch.equals("aarch64")) {
throw new RuntimeException(
"This provider is not supported on this processor architecture");
}

View File

@@ -555,8 +555,10 @@ public class HotSpotAgent {
machDesc = new MachineDescriptionIntelX86();
} else if (cpu.equals("amd64")) {
machDesc = new MachineDescriptionAMD64();
} else if (cpu.equals("aarch64")) {
machDesc = new MachineDescriptionAArch64();
} else {
throw new DebuggerException("Win32 supported under x86 and amd64 only");
throw new DebuggerException("Win32 supported under x86, amd64 and aarch64 only");
}
// Note we do not use a cache for the local debugger in server

View File

@@ -28,8 +28,10 @@ import java.io.*;
import java.net.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.debugger.x86.*;
import sun.jvm.hotspot.debugger.windbg.aarch64.*;
import sun.jvm.hotspot.debugger.windbg.amd64.*;
import sun.jvm.hotspot.debugger.windbg.x86.*;
import sun.jvm.hotspot.debugger.win32.coff.*;
@@ -113,6 +115,8 @@ public class WindbgDebuggerLocal extends DebuggerBase implements WindbgDebugger
threadFactory = new WindbgX86ThreadFactory(this);
} else if (cpu.equals("amd64")) {
threadFactory = new WindbgAMD64ThreadFactory(this);
} else if (cpu.equals("aarch64")) {
threadFactory = new WindbgAARCH64ThreadFactory(this);
}
if (useCache) {

View File

@@ -0,0 +1,94 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.debugger.windbg.aarch64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.debugger.windbg.*;
class WindbgAARCH64Thread implements ThreadProxy {
private WindbgDebugger debugger;
private long sysId;
private boolean gotID;
private long id;
// The address argument must be the address of the OSThread::_thread_id
WindbgAARCH64Thread(WindbgDebugger debugger, Address addr) {
this.debugger = debugger;
this.sysId = (long)addr.getCIntegerAt(0, 4, true);
gotID = false;
}
WindbgAARCH64Thread(WindbgDebugger debugger, long sysId) {
this.debugger = debugger;
this.sysId = sysId;
gotID = false;
}
public ThreadContext getContext() throws IllegalThreadStateException {
long[] data = debugger.getThreadIntegerRegisterSet(getThreadID());
WindbgAARCH64ThreadContext context = new WindbgAARCH64ThreadContext(debugger);
for (int i = 0; i < data.length; i++) {
context.setRegister(i, data[i]);
}
return context;
}
public boolean canSetContext() throws DebuggerException {
return false;
}
public void setContext(ThreadContext thrCtx)
throws IllegalThreadStateException, DebuggerException {
throw new DebuggerException("Unimplemented");
}
public boolean equals(Object obj) {
if ((obj == null) || !(obj instanceof WindbgAARCH64Thread)) {
return false;
}
return (((WindbgAARCH64Thread) obj).getThreadID() == getThreadID());
}
public int hashCode() {
return (int) getThreadID();
}
public String toString() {
return Long.toString(getThreadID());
}
/** Retrieves the thread ID of this thread by examining the Thread
Information Block. */
private long getThreadID() {
if (!gotID) {
id = debugger.getThreadIdFromSysId(sysId);
}
return id;
}
}

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.debugger.windbg.aarch64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.debugger.windbg.*;
class WindbgAARCH64ThreadContext extends AARCH64ThreadContext {
private WindbgDebugger debugger;
public WindbgAARCH64ThreadContext(WindbgDebugger debugger) {
super();
this.debugger = debugger;
}
public void setRegisterAsAddress(int index, Address value) {
setRegister(index, debugger.getAddressValue(value));
}
public Address getRegisterAsAddress(int index) {
return debugger.newAddress(getRegister(index));
}
}

View File

@@ -0,0 +1,45 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.debugger.windbg.aarch64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.windbg.*;
public class WindbgAARCH64ThreadFactory implements WindbgThreadFactory {
private WindbgDebugger debugger;
public WindbgAARCH64ThreadFactory(WindbgDebugger debugger) {
this.debugger = debugger;
}
public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) {
return new WindbgAARCH64Thread(debugger, threadIdentifierAddr);
}
public ThreadProxy createThreadWrapper(long id) {
return new WindbgAARCH64Thread(debugger, id);
}
}

View File

@@ -28,11 +28,13 @@ import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.runtime.solaris_sparc.SolarisSPARCJavaThreadPDAccess;
import sun.jvm.hotspot.runtime.solaris_x86.SolarisX86JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.solaris_amd64.SolarisAMD64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.win32_amd64.Win32AMD64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.win32_x86.Win32X86JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.win32_amd64.Win32AMD64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.win32_aarch64.Win32AARCH64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.linux_x86.LinuxX86JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess;
@@ -83,6 +85,8 @@ public class Threads {
access = new Win32X86JavaThreadPDAccess();
} else if (cpu.equals("amd64")) {
access = new Win32AMD64JavaThreadPDAccess();
} else if (cpu.equals("aarch64")) {
access = new Win32AARCH64JavaThreadPDAccess();
}
} else if (os.equals("linux")) {
if (cpu.equals("x86")) {

View File

@@ -0,0 +1,138 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime.win32_aarch64;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.runtime.aarch64.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
/** This class is only public to allow using the VMObjectFactory to
instantiate these.
*/
public class Win32AARCH64JavaThreadPDAccess implements JavaThreadPDAccess {
private static AddressField lastJavaFPField;
private static AddressField osThreadField;
// Field from OSThread
private static Field osThreadThreadIDField;
// This is currently unneeded but is being kept in case we change
// the currentFrameGuess algorithm
private static final long GUESS_SCAN_RANGE = 128 * 1024;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("JavaThread");
osThreadField = type.getAddressField("_osthread");
Type anchorType = db.lookupType("JavaFrameAnchor");
lastJavaFPField = anchorType.getAddressField("_last_Java_fp");
Type osThreadType = db.lookupType("OSThread");
osThreadThreadIDField = osThreadType.getField("_thread_id");
}
public Address getLastJavaFP(Address addr) {
return lastJavaFPField.getValue(addr.addOffsetTo(sun.jvm.hotspot.runtime.JavaThread.getAnchorField().getOffset()));
}
public Address getLastJavaPC(Address addr) {
return null;
}
public Address getBaseOfStackPointer(Address addr) {
return null;
}
public Frame getLastFramePD(JavaThread thread, Address addr) {
Address fp = thread.getLastJavaFP();
if (fp == null) {
return null; // no information
}
Address pc = thread.getLastJavaPC();
if ( pc != null ) {
return new AARCH64Frame(thread.getLastJavaSP(), fp, pc);
} else {
return new AARCH64Frame(thread.getLastJavaSP(), fp);
}
}
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
return new AARCH64RegisterMap(thread, updateMap);
}
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
ThreadProxy t = getThreadProxy(addr);
AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
AARCH64CurrentFrameGuess guesser = new AARCH64CurrentFrameGuess(context, thread);
if (!guesser.run(GUESS_SCAN_RANGE)) {
return null;
}
if (guesser.getPC() == null) {
return new AARCH64Frame(guesser.getSP(), guesser.getFP());
} else {
return new AARCH64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
}
}
public void printThreadIDOn(Address addr, PrintStream tty) {
tty.print(getThreadProxy(addr));
}
public void printInfoOn(Address threadAddr, PrintStream tty) {
}
public Address getLastSP(Address addr) {
ThreadProxy t = getThreadProxy(addr);
AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
return context.getRegisterAsAddress(AARCH64ThreadContext.SP);
}
public ThreadProxy getThreadProxy(Address addr) {
// Addr is the address of the JavaThread.
// Fetch the OSThread (for now and for simplicity, not making a
// separate "OSThread" class in this package)
Address osThreadAddr = osThreadField.getValue(addr);
// Get the address of the thread_id within the OSThread
Address threadIdAddr = osThreadAddr.addOffsetTo(osThreadThreadIDField.getOffset());
JVMDebugger debugger = VM.getVM().getDebugger();
return debugger.getThreadForIdentifierAddress(threadIdAddr);
}
}

View File

@@ -36,6 +36,9 @@
#elif _M_AMD64
#include "sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext.h"
#define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
#elif _M_ARM64
#include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h"
#define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG
#else
#error "SA windbg back-end is not supported for your cpu!"
#endif

View File

@@ -128,7 +128,11 @@ public class AArch64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFac
}
protected RegisterConfig createRegisterConfig(AArch64HotSpotVMConfig config, TargetDescription target) {
return new AArch64HotSpotRegisterConfig(target, config.useCompressedOops);
// ARMv8 defines r18 as being available to the platform ABI. Windows
// and Darwin use it for such. Linux doesn't assign it and thus r18 can
// be used as an additional register.
boolean canUsePlatformRegister = config.linuxOs;
return new AArch64HotSpotRegisterConfig(target, config.useCompressedOops, canUsePlatformRegister);
}
protected HotSpotCodeCacheProvider createCodeCache(HotSpotJVMCIRuntime runtime, TargetDescription target, RegisterConfig regConfig) {

View File

@@ -34,6 +34,7 @@ import static jdk.vm.ci.aarch64.AArch64.r7;
import static jdk.vm.ci.aarch64.AArch64.rscratch1;
import static jdk.vm.ci.aarch64.AArch64.rscratch2;
import static jdk.vm.ci.aarch64.AArch64.r12;
import static jdk.vm.ci.aarch64.AArch64.r18;
import static jdk.vm.ci.aarch64.AArch64.r27;
import static jdk.vm.ci.aarch64.AArch64.r28;
import static jdk.vm.ci.aarch64.AArch64.r29;
@@ -122,6 +123,12 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
*/
public static final Register metaspaceMethodRegister = r12;
/**
* The platform ABI can use r18 to carry inter-procedural state (e.g. thread
* context). If not defined as such by the platform ABI, it can be used as
* additional temporary register.
*/
public static final Register platformRegister = r18;
public static final Register heapBaseRegister = r27;
public static final Register threadRegister = r28;
public static final Register fp = r29;
@@ -129,9 +136,9 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
private static final RegisterArray reservedRegisters
= new RegisterArray(rscratch1, rscratch2, threadRegister, fp, lr, r31, zr, sp);
private static RegisterArray initAllocatable(Architecture arch, boolean reserveForHeapBase) {
private static RegisterArray initAllocatable(Architecture arch, boolean reserveForHeapBase, boolean canUsePlatformRegister) {
RegisterArray allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.size() - reservedRegisters.size() - (reserveForHeapBase ? 1 : 0)];
Register[] registers = new Register[allRegisters.size() - reservedRegisters.size() - (reserveForHeapBase ? 1 : 0) - (!canUsePlatformRegister ? 1 : 0)];
List<Register> reservedRegistersList = reservedRegisters.asList();
int idx = 0;
@@ -140,6 +147,9 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
// skip reserved registers
continue;
}
if (!canUsePlatformRegister && reg.equals(platformRegister)) {
continue;
}
assert !(reg.equals(threadRegister) || reg.equals(fp) || reg.equals(lr) || reg.equals(r31) || reg.equals(zr) || reg.equals(sp));
if (reserveForHeapBase && reg.equals(heapBaseRegister)) {
// skip heap base register
@@ -153,8 +163,8 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
return new RegisterArray(registers);
}
public AArch64HotSpotRegisterConfig(TargetDescription target, boolean useCompressedOops) {
this(target, initAllocatable(target.arch, useCompressedOops));
public AArch64HotSpotRegisterConfig(TargetDescription target, boolean useCompressedOops, boolean canUsePlatformRegister) {
this(target, initAllocatable(target.arch, useCompressedOops, canUsePlatformRegister));
assert callerSaved.size() >= allocatable.size();
}