diff --git a/.github/actions/get-bootjdk/action.yml b/.github/actions/get-bootjdk/action.yml
index 19c3a0128f4d4..1e569dd47c570 100644
--- a/.github/actions/get-bootjdk/action.yml
+++ b/.github/actions/get-bootjdk/action.yml
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -104,6 +104,6 @@ runs:
- name: 'Export path to where BootJDK is installed'
id: path-name
run: |
- # Export the path
- echo 'path=bootjdk/jdk' >> $GITHUB_OUTPUT
+ # Export the absolute path
+ echo "path=`pwd`/bootjdk/jdk" >> $GITHUB_OUTPUT
shell: bash
diff --git a/.github/actions/get-gtest/action.yml b/.github/actions/get-gtest/action.yml
index e8d70699c8fcb..5de2b10cd3209 100644
--- a/.github/actions/get-gtest/action.yml
+++ b/.github/actions/get-gtest/action.yml
@@ -40,7 +40,7 @@ runs:
var: GTEST_VERSION
- name: 'Checkout GTest source'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: google/googletest
ref: 'v${{ steps.version.outputs.value }}'
diff --git a/.github/actions/get-jtreg/action.yml b/.github/actions/get-jtreg/action.yml
index 9e1435513997b..a45c0c1e6a959 100644
--- a/.github/actions/get-jtreg/action.yml
+++ b/.github/actions/get-jtreg/action.yml
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@ runs:
key: jtreg-${{ steps.version.outputs.value }}
- name: 'Checkout the JTReg source'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: openjdk/jtreg
ref: jtreg-${{ steps.version.outputs.value }}
@@ -56,8 +56,14 @@ runs:
- name: 'Build JTReg'
run: |
+ # If runner architecture is x64 set JAVA_HOME_17_X64 otherwise set to JAVA_HOME_17_arm64
+ if [[ '${{ runner.arch }}' == 'X64' ]]; then
+ JDK="$JAVA_HOME_17_X64"
+ else
+ JDK="$JAVA_HOME_17_arm64"
+ fi
# Build JTReg and move files to the proper locations
- bash make/build.sh --jdk "$JAVA_HOME_17_X64"
+ bash make/build.sh --jdk "$JDK"
mkdir ../installed
mv build/images/jtreg/* ../installed
working-directory: jtreg/src
diff --git a/.github/scripts/gen-test-results.sh b/.github/scripts/gen-test-results.sh
index 73edb8b3d11fe..9e85eef4dc08d 100644
--- a/.github/scripts/gen-test-results.sh
+++ b/.github/scripts/gen-test-results.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,8 @@ for test in $failures $errors; do
base_path="$(echo "$test" | tr '#' '_')"
report_file="$report_dir/$base_path.jtr"
hs_err_files=$(ls $report_dir/$base_path/hs_err*.log 2> /dev/null || true)
+ replay_files=$(ls $report_dir/$base_path/replay*.log 2> /dev/null || true)
echo "#### $test"
-
echo 'View test results
'
echo ''
echo '```'
@@ -73,6 +73,20 @@ for test in $failures $errors; do
echo ''
fi
+ if [[ "$replay_files" != "" ]]; then
+ echo 'View HotSpot replay file
'
+ echo ''
+ for replay in $replay_files; do
+ echo '```'
+ echo "$replay:"
+ echo ''
+ cat "$replay"
+ echo '```'
+ done
+
+ echo ' '
+ echo ''
+ fi
done >> $GITHUB_STEP_SUMMARY
# With many failures, the summary can easily exceed 1024 kB, the limit set by Github
diff --git a/.github/scripts/gen-test-summary.sh b/.github/scripts/gen-test-summary.sh
index d016cb38649fd..a612bed552779 100644
--- a/.github/scripts/gen-test-summary.sh
+++ b/.github/scripts/gen-test-summary.sh
@@ -42,6 +42,7 @@ error_count=$(echo $errors | wc -w || true)
if [[ "$failures" = "" && "$errors" = "" ]]; then
# We know something went wrong, but not what
+ echo 'failure=true' >> $GITHUB_OUTPUT
echo 'error-message=Unspecified test suite failure. Please see log for job for details.' >> $GITHUB_OUTPUT
exit 0
fi
diff --git a/.github/workflows/build-cross-compile.yml b/.github/workflows/build-cross-compile.yml
index e27b7e2c05258..5db69f07d98c5 100644
--- a/.github/workflows/build-cross-compile.yml
+++ b/.github/workflows/build-cross-compile.yml
@@ -80,13 +80,12 @@ jobs:
- target-cpu: riscv64
gnu-arch: riscv64
debian-arch: riscv64
- debian-repository: https://deb.debian.org/debian-ports
- debian-keyring: /usr/share/keyrings/debian-ports-archive-keyring.gpg
+ debian-repository: https://httpredir.debian.org/debian/
debian-version: sid
steps:
- name: 'Checkout the JDK source'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: 'Get the BootJDK'
id: bootjdk
@@ -101,6 +100,10 @@ jobs:
with:
platform: linux-x64
+ - name: 'Get GTest'
+ id: gtest
+ uses: ./.github/actions/get-gtest
+
# Upgrading apt to solve libc6 installation bugs, see JDK-8260460.
- name: 'Install toolchain and dependencies'
run: |
@@ -112,8 +115,7 @@ jobs:
g++-${{ inputs.gcc-major-version }} \
gcc-${{ inputs.gcc-major-version }}-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}} \
g++-${{ inputs.gcc-major-version }}-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}} \
- libxrandr-dev libxtst-dev libcups2-dev libasound2-dev \
- debian-ports-archive-keyring
+ libxrandr-dev libxtst-dev libcups2-dev libasound2-dev
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ inputs.gcc-major-version }} 100 --slave /usr/bin/g++ g++ /usr/bin/g++-${{ inputs.gcc-major-version }}
- name: 'Check cache for sysroot'
@@ -132,9 +134,9 @@ jobs:
sudo debootstrap
--arch=${{ matrix.debian-arch }}
--verbose
- --include=fakeroot,symlinks,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng-dev
+ --include=fakeroot,symlinks,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype-dev,libpng-dev
--resolve-deps
- $(test -n "${{ matrix.debian-keyring }}" && echo "--keyring=${{ matrix.debian-keyring }}")
+ --variant=minbase
${{ matrix.debian-version }}
sysroot
${{ matrix.debian-repository }}
@@ -147,7 +149,8 @@ jobs:
sudo chown ${USER} -R sysroot
rm -rf sysroot/{dev,proc,run,sys,var}
rm -rf sysroot/usr/{sbin,bin,share}
- rm -rf sysroot/usr/lib/{apt,udev,systemd}
+ rm -rf sysroot/usr/lib/{apt,gcc,udev,systemd}
+ rm -rf sysroot/usr/libexec/gcc
if: steps.get-cached-sysroot.outputs.cache-hit != 'true'
- name: 'Configure'
@@ -156,6 +159,7 @@ jobs:
--with-conf-name=linux-${{ matrix.target-cpu }}
--with-version-opt=${GITHUB_ACTOR}-${GITHUB_SHA}
--with-boot-jdk=${{ steps.bootjdk.outputs.path }}
+ --with-gtest=${{ steps.gtest.outputs.path }}
--with-zlib=system
--enable-debug
--disable-precompiled-headers
diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml
index 72b7cfb0613a1..f3ea4e4fb6ad8 100644
--- a/.github/workflows/build-linux.yml
+++ b/.github/workflows/build-linux.yml
@@ -78,7 +78,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: 'Get the BootJDK'
id: bootjdk
diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml
index cbe501ea60640..90bb6af044ff8 100644
--- a/.github/workflows/build-macos.yml
+++ b/.github/workflows/build-macos.yml
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,9 @@ on:
platform:
required: true
type: string
+ runs-on:
+ required: true
+ type: string
extra-conf-options:
required: false
type: string
@@ -55,7 +58,7 @@ on:
jobs:
build-macos:
name: build
- runs-on: macos-11
+ runs-on: ${{ inputs.runs-on }}
strategy:
fail-fast: false
@@ -68,13 +71,13 @@ jobs:
steps:
- name: 'Checkout the JDK source'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: 'Get the BootJDK'
id: bootjdk
uses: ./.github/actions/get-bootjdk
with:
- platform: macos-x64
+ platform: ${{ inputs.platform }}
- name: 'Get JTReg'
id: jtreg
@@ -87,7 +90,7 @@ jobs:
- name: 'Install toolchain and dependencies'
run: |
# Run Homebrew installation and xcode-select
- brew install make
+ brew install autoconf make
sudo xcode-select --switch /Applications/Xcode_${{ inputs.xcode-toolset-version }}.app/Contents/Developer
# This will make GNU make available as 'make' and not only as 'gmake'
echo '/usr/local/opt/make/libexec/gnubin' >> $GITHUB_PATH
diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml
index 6a56df295baf3..d02ef91ad86f6 100644
--- a/.github/workflows/build-windows.yml
+++ b/.github/workflows/build-windows.yml
@@ -79,7 +79,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: 'Get MSYS2'
uses: ./.github/actions/get-msys2
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index ac8f886cbe26b..1704102f6f0f9 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -146,7 +146,7 @@ jobs:
apt-architecture: 'i386'
# Some multilib libraries do not have proper inter-dependencies, so we have to
# install their dependencies manually.
- apt-extra-packages: 'libfreetype6-dev:i386 libtiff-dev:i386 libcupsimage2-dev:i386 libc6-i386 libgcc-s1:i386 libstdc++6:i386'
+ apt-extra-packages: 'libfreetype-dev:i386 libtiff-dev:i386 libcupsimage2-dev:i386 libc6-i386 libgcc-s1:i386 libstdc++6:i386'
extra-conf-options: '--with-target-bits=32'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
@@ -227,7 +227,8 @@ jobs:
uses: ./.github/workflows/build-macos.yml
with:
platform: macos-x64
- xcode-toolset-version: '12.5.1'
+ runs-on: 'macos-13'
+ xcode-toolset-version: '14.3.1'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
if: needs.select.outputs.macos-x64 == 'true'
@@ -238,8 +239,8 @@ jobs:
uses: ./.github/workflows/build-macos.yml
with:
platform: macos-aarch64
- xcode-toolset-version: '12.5.1'
- extra-conf-options: '--openjdk-target=aarch64-apple-darwin'
+ runs-on: 'macos-14'
+ xcode-toolset-version: '14.3.1'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
if: needs.select.outputs.macos-aarch64 == 'true'
@@ -328,7 +329,17 @@ jobs:
with:
platform: macos-x64
bootjdk-platform: macos-x64
- runs-on: macos-11
+ runs-on: macos-13
+
+ test-macos-aarch64:
+ name: macos-aarch64
+ needs:
+ - build-macos-aarch64
+ uses: ./.github/workflows/test.yml
+ with:
+ platform: macos-aarch64
+ bootjdk-platform: macos-aarch64
+ runs-on: macos-14
test-windows-x64:
name: windows-x64
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index f4d4e93c1875c..8808ab80d0ef6 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,10 @@ jobs:
- 'jdk/tier1 part 3'
- 'langtools/tier1'
- 'hs/tier1 common'
- - 'hs/tier1 compiler'
+ - 'hs/tier1 compiler part 1'
+ - 'hs/tier1 compiler part 2'
+ - 'hs/tier1 compiler part 3'
+ - 'hs/tier1 compiler not-xcomp'
- 'hs/tier1 gc'
- 'hs/tier1 runtime'
- 'hs/tier1 serviceability'
@@ -83,8 +86,20 @@ jobs:
test-suite: 'test/hotspot/jtreg/:tier1_common'
debug-suffix: -debug
- - test-name: 'hs/tier1 compiler'
- test-suite: 'test/hotspot/jtreg/:tier1_compiler'
+ - test-name: 'hs/tier1 compiler part 1'
+ test-suite: 'test/hotspot/jtreg/:tier1_compiler_1'
+ debug-suffix: -debug
+
+ - test-name: 'hs/tier1 compiler part 2'
+ test-suite: 'test/hotspot/jtreg/:tier1_compiler_2'
+ debug-suffix: -debug
+
+ - test-name: 'hs/tier1 compiler part 3'
+ test-suite: 'test/hotspot/jtreg/:tier1_compiler_3'
+ debug-suffix: -debug
+
+ - test-name: 'hs/tier1 compiler not-xcomp'
+ test-suite: 'test/hotspot/jtreg/:tier1_compiler_not_xcomp'
debug-suffix: -debug
- test-name: 'hs/tier1 gc'
@@ -105,7 +120,7 @@ jobs:
steps:
- name: 'Checkout the JDK source'
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: 'Get MSYS2'
uses: ./.github/actions/get-msys2
@@ -132,7 +147,7 @@ jobs:
run: |
# On macOS we need to install some dependencies for testing
brew install make
- sudo xcode-select --switch /Applications/Xcode_11.7.app/Contents/Developer
+ sudo xcode-select --switch /Applications/Xcode_14.3.1.app/Contents/Developer
# This will make GNU make available as 'make' and not only as 'gmake'
echo '/usr/local/opt/make/libexec/gnubin' >> $GITHUB_PATH
if: runner.os == 'macOS'
diff --git a/.jcheck/conf b/.jcheck/conf
index 8993c274fe0c9..e2ca212ab3a23 100644
--- a/.jcheck/conf
+++ b/.jcheck/conf
@@ -1,7 +1,7 @@
[general]
-project=jdk
+project=jdk-updates
jbs=JDK
-version=21
+version=21.0.3
[checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists
diff --git a/doc/testing.html b/doc/testing.html
index 0f81647ecae7f..19d937df1ead1 100644
--- a/doc/testing.html
+++ b/doc/testing.html
@@ -577,12 +577,15 @@ PKCS11 Tests
are hard to diagnose. For example,
sun/security/pkcs11/Secmod/AddTrustedCert.java may fail on Ubuntu 18.04
with the default NSS version in the system. To run these tests
-correctly, the system property test.nss.lib.paths
is
-required on Ubuntu 18.04 to specify the alternative NSS lib
-directories.
+correctly, the system property
+jdk.test.lib.artifacts.<NAME>
is required on Ubuntu
+18.04 to specify the alternative NSS lib directory. The
+<NAME>
component should be replaced with the name
+element of the appropriate @Artifact
class. (See
+test/jdk/sun/security/pkcs11/PKCS11Test.java
)
For example:
$ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
- JTREG="JAVA_OPTIONS=-Dtest.nss.lib.paths=/path/to/your/latest/NSS-libs"
+ JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
Client UI Tests
diff --git a/doc/testing.md b/doc/testing.md
index 764fec15c8da7..9756a691a8c46 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -600,14 +600,16 @@ It is highly recommended to use the latest NSS version when running PKCS11
tests. Improper NSS version may lead to unexpected failures which are hard to
diagnose. For example, sun/security/pkcs11/Secmod/AddTrustedCert.java may fail
on Ubuntu 18.04 with the default NSS version in the system. To run these tests
-correctly, the system property `test.nss.lib.paths` is required on Ubuntu 18.04
-to specify the alternative NSS lib directories.
+correctly, the system property `jdk.test.lib.artifacts.` is required on
+Ubuntu 18.04 to specify the alternative NSS lib directory. The ``
+component should be replaced with the name element of the appropriate
+`@Artifact` class. (See `test/jdk/sun/security/pkcs11/PKCS11Test.java`)
For example:
```
$ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
- JTREG="JAVA_OPTIONS=-Dtest.nss.lib.paths=/path/to/your/latest/NSS-libs"
+ JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"
```
For more notes about the PKCS11 tests, please refer to
diff --git a/make/CreateJmods.gmk b/make/CreateJmods.gmk
index 2901930cf8814..6fbaef6bdf1c4 100644
--- a/make/CreateJmods.gmk
+++ b/make/CreateJmods.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -81,13 +81,11 @@ endif
ifneq ($(CMDS_DIR), )
DEPS += $(call FindFiles, $(CMDS_DIR))
ifeq ($(call isTargetOs, windows)+$(SHIP_DEBUG_SYMBOLS), true+public)
- # For public debug symbols on Windows, we have to use stripped pdbs, rename them
- # and filter out a few launcher pdbs where there's a lib that goes by the same name
+ # For public debug symbols on Windows, we have to use stripped pdbs and rename them
rename_stripped = $(patsubst %.stripped.pdb,%.pdb,$1)
CMDS_DIR_FILTERED := $(subst modules_cmds,modules_cmds_filtered, $(CMDS_DIR))
FILES_CMDS := $(filter-out %.pdb, $(call FindFiles, $(CMDS_DIR))) \
- $(filter-out %jimage.stripped.pdb %jpackage.stripped.pdb %java.stripped.pdb, \
- $(filter %.stripped.pdb, $(call FindFiles, $(CMDS_DIR))))
+ $(filter %.stripped.pdb, $(call FindFiles, $(CMDS_DIR)))
$(eval $(call SetupCopyFiles, COPY_FILTERED_CMDS, \
SRC := $(CMDS_DIR), \
DEST := $(CMDS_DIR_FILTERED), \
@@ -96,18 +94,6 @@ ifneq ($(CMDS_DIR), )
))
DEPS += $(COPY_FILTERED_CMDS)
JMOD_FLAGS += --cmds $(CMDS_DIR_FILTERED)
- else ifeq ($(call isTargetOs, windows)+$(SHIP_DEBUG_SYMBOLS), true+full)
- # For full debug symbols on Windows, we have to filter out a few launcher pdbs
- # where there's a lib that goes by the same name
- CMDS_DIR_FILTERED := $(subst modules_cmds,modules_cmds_filtered, $(CMDS_DIR))
- $(eval $(call SetupCopyFiles, COPY_FILTERED_CMDS, \
- SRC := $(CMDS_DIR), \
- DEST := $(CMDS_DIR_FILTERED), \
- FILES := $(filter-out %jimage.pdb %jpackage.pdb %java.pdb, \
- $(call FindFiles, $(CMDS_DIR))), \
- ))
- DEPS += $(COPY_FILTERED_CMDS)
- JMOD_FLAGS += --cmds $(CMDS_DIR_FILTERED)
else
JMOD_FLAGS += --cmds $(CMDS_DIR)
endif
diff --git a/make/Images.gmk b/make/Images.gmk
index 5b2f776155f0c..225d9a93d4f72 100644
--- a/make/Images.gmk
+++ b/make/Images.gmk
@@ -267,9 +267,6 @@ else
endif
endif
-FILTERED_PDBS := %jimage.stripped.pdb %jpackage.stripped.pdb %java.stripped.pdb \
- %jimage.pdb %jpackage.pdb %java.pdb %jimage.map %jpackage.map %java.map
-
# Param 1 - either JDK or JRE
SetupCopyDebuginfo = \
$(foreach m, $(ALL_$1_MODULES), \
@@ -283,8 +280,8 @@ SetupCopyDebuginfo = \
$(eval $(call SetupCopyFiles, COPY_$1_CMDS_DEBUGINFO_$m, \
SRC := $(SUPPORT_OUTPUTDIR)/modules_cmds/$m, \
DEST := $($1_IMAGE_DIR)/$(CMDS_TARGET_SUBDIR), \
- FILES := $(filter-out $(FILTERED_PDBS), $(call FindDebuginfoFiles, \
- $(SUPPORT_OUTPUTDIR)/modules_cmds/$m)), \
+ FILES := $(call FindDebuginfoFiles, \
+ $(SUPPORT_OUTPUTDIR)/modules_cmds/$m), \
)) \
$(eval $1_TARGETS += $$(COPY_$1_CMDS_DEBUGINFO_$m)) \
)
diff --git a/make/Init.gmk b/make/Init.gmk
index 151127d578803..61846217ecc34 100644
--- a/make/Init.gmk
+++ b/make/Init.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -138,7 +138,10 @@ ifeq ($(HAS_SPEC),)
# The spec files depend on the autoconf source code. This check makes sure
# the configuration is up to date after changes to configure.
$(SPECS): $(wildcard $(topdir)/make/autoconf/*) \
- $(if $(CUSTOM_CONFIG_DIR), $(wildcard $(CUSTOM_CONFIG_DIR)/*))
+ $(if $(CUSTOM_CONFIG_DIR), $(wildcard $(CUSTOM_CONFIG_DIR)/*)) \
+ $(addprefix $(topdir)/make/conf/, version-numbers.conf branding.conf) \
+ $(if $(CUSTOM_CONF_DIR), $(wildcard $(addprefix $(CUSTOM_CONF_DIR)/, \
+ version-numbers.conf branding.conf)))
ifeq ($(CONF_CHECK), fail)
@echo Error: The configuration is not up to date for \
"'$(lastword $(subst /, , $(dir $@)))'."
diff --git a/make/JrtfsJar.gmk b/make/JrtfsJar.gmk
index 278b7657a82d2..50ae82ca56567 100644
--- a/make/JrtfsJar.gmk
+++ b/make/JrtfsJar.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -46,8 +46,10 @@ JIMAGE_PKGS := \
jdk/internal/jrtfs \
#
+# Compile jrt-fs.jar with the interim compiler, as it
+# ends up in the image, this will ensure reproducible classes
$(eval $(call SetupJavaCompilation, BUILD_JRTFS, \
- COMPILER := bootjdk, \
+ COMPILER := interim, \
DISABLED_WARNINGS := options, \
TARGET_RELEASE := $(TARGET_RELEASE_JDK8), \
SRC := $(TOPDIR)/src/java.base/share/classes, \
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index 770b81af8aca0..b82d2b2b6015d 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -178,7 +178,8 @@ ifeq ($(TEST_JOBS), 0)
c = c * $(TEST_JOBS_FACTOR_JDL); \
c = c * $(TEST_JOBS_FACTOR_MACHINE); \
if (c < 1) c = 1; \
- printf "%.0f", c; \
+ c = c + 0.5; \
+ printf "%d", c; \
}')
endif
@@ -356,7 +357,7 @@ ExpandJtregPath = \
# with test id: dir/Test.java#selection -> Test.java#selection -> .java#selection -> #selection
# without: dir/Test.java -> Test.java -> .java -> <>
TestID = \
- $(subst .sh,,$(subst .html,,$(subst .java,,$(suffix $(notdir $1)))))
+ $(subst .jasm,,$(subst .sh,,$(subst .html,,$(subst .java,,$(suffix $(notdir $1))))))
# The test id starting with a hash (#testid) will be stripped by all
# evals in ParseJtregTestSelectionInner and will be reinserted by calling
@@ -800,8 +801,10 @@ define SetupRunJtregTestBody
$1_JTREG_BASIC_OPTIONS += -e:JIB_DATA_DIR
# If running on Windows, propagate the _NT_SYMBOL_PATH to enable
# symbol lookup in hserr files
+ # The minidumps are disabled by default on client Windows, so enable them
ifeq ($$(call isTargetOs, windows), true)
$1_JTREG_BASIC_OPTIONS += -e:_NT_SYMBOL_PATH
+ $1_JTREG_BASIC_OPTIONS += -vmoption:-XX:+CreateCoredumpOnCrash
else ifeq ($$(call isTargetOs, linux), true)
$1_JTREG_BASIC_OPTIONS += -e:_JVM_DWARF_PATH=$$(SYMBOLS_IMAGE_DIR)
endif
@@ -860,11 +863,12 @@ define SetupRunJtregTestBody
$$(eval $$(call SetupRunJtregTestCustom, $1))
- clean-workdir-$1:
+ clean-outputdirs-$1:
$$(RM) -r $$($1_TEST_SUPPORT_DIR)
+ $$(RM) -r $$($1_TEST_RESULTS_DIR)
$1_COMMAND_LINE := \
- $$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
+ $$(JTREG_JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
-Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
$$($1_JTREG_BASIC_OPTIONS) \
-testjdk:$$(JDK_UNDER_TEST) \
@@ -907,7 +911,7 @@ define SetupRunJtregTestBody
done
endif
- run-test-$1: pre-run-test clean-workdir-$1
+ run-test-$1: pre-run-test clean-outputdirs-$1
$$(call LogWarn)
$$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR) \
@@ -944,9 +948,9 @@ define SetupRunJtregTestBody
$$(eval $1_TOTAL := 1) \
)
- $1: run-test-$1 parse-test-$1 clean-workdir-$1
+ $1: run-test-$1 parse-test-$1 clean-outputdirs-$1
- TARGETS += $1 run-test-$1 parse-test-$1 clean-workdir-$1
+ TARGETS += $1 run-test-$1 parse-test-$1 clean-outputdirs-$1
TEST_TARGETS += parse-test-$1
endef
diff --git a/make/RunTestsPrebuilt.gmk b/make/RunTestsPrebuilt.gmk
index ca20ccf26ad20..93febe5ed31d9 100644
--- a/make/RunTestsPrebuilt.gmk
+++ b/make/RunTestsPrebuilt.gmk
@@ -122,6 +122,7 @@ $(eval $(call SetupVariable,JT_HOME))
$(eval $(call SetupVariable,JDK_IMAGE_DIR,$(OUTPUTDIR)/images/jdk))
$(eval $(call SetupVariable,TEST_IMAGE_DIR,$(OUTPUTDIR)/images/test))
$(eval $(call SetupVariable,SYMBOLS_IMAGE_DIR,$(OUTPUTDIR)/images/symbols,NO_CHECK))
+$(eval $(call SetupVariable,JTREG_JDK,$(BOOT_JDK)))
# Provide default values for tools that we need
$(eval $(call SetupVariable,MAKE,make,NO_CHECK))
@@ -157,6 +158,10 @@ ifeq ($(UNAME_OS), CYGWIN)
OPENJDK_TARGET_OS := windows
OPENJDK_TARGET_OS_TYPE := windows
OPENJDK_TARGET_OS_ENV := windows.cygwin
+else ifeq ($(UNAME_OS), MINGW64)
+ OPENJDK_TARGET_OS := windows
+ OPENJDK_TARGET_OS_TYPE := windows
+ OPENJDK_TARGET_OS_ENV := windows.msys2
else
OPENJDK_TARGET_OS_TYPE:=unix
ifeq ($(UNAME_OS), Linux)
@@ -169,6 +174,9 @@ else
OPENJDK_TARGET_OS_ENV := $(OPENJDK_TARGET_OS)
endif
+# Sanity check env detection
+$(info Detected target OS, type and env: [$(OPENJDK_TARGET_OS)] [$(OPENJDK_TARGET_OS_TYPE)] [$(OPENJDK_TARGET_OS_ENV)])
+
# Assume little endian unless otherwise specified
OPENJDK_TARGET_CPU_ENDIAN := little
@@ -248,6 +256,7 @@ $(call CreateNewSpec, $(NEW_SPEC), \
TOPDIR := $(TOPDIR), \
OUTPUTDIR := $(OUTPUTDIR), \
BOOT_JDK := $(BOOT_JDK), \
+ JTREG_JDK := $(JTREG_JDK), \
JT_HOME := $(JT_HOME), \
JDK_IMAGE_DIR := $(JDK_IMAGE_DIR), \
JCOV_IMAGE_DIR := $(JCOV_IMAGE_DIR), \
diff --git a/make/RunTestsPrebuiltSpec.gmk b/make/RunTestsPrebuiltSpec.gmk
index 585fd24fda337..7fcaf56ff527e 100644
--- a/make/RunTestsPrebuiltSpec.gmk
+++ b/make/RunTestsPrebuiltSpec.gmk
@@ -124,6 +124,8 @@ JAR := $(FIXPATH) $(JAR_CMD)
JLINK := $(FIXPATH) $(JLINK_CMD)
JMOD := $(FIXPATH) $(JMOD_CMD)
+JTREG_JAVA := $(FIXPATH) $(JTREG_JDK)/bin/java $(JAVA_FLAGS_BIG) $(JAVA_FLAGS)
+
BUILD_JAVA := $(JDK_IMAGE_DIR)/bin/JAVA
################################################################################
# Some common tools. Assume most common name and no path.
diff --git a/make/ZipSecurity.gmk b/make/ZipSecurity.gmk
index f7435f05efe6f..489bbef49f68b 100644
--- a/make/ZipSecurity.gmk
+++ b/make/ZipSecurity.gmk
@@ -88,9 +88,9 @@ ifeq ($(call isTargetOs, windows), true)
$(eval $(call SetupZipArchive,BUILD_JGSS_BIN_ZIP, \
SRC := $(SUPPORT_OUTPUTDIR), \
INCLUDE_FILES := modules_libs/java.security.jgss/w2k_lsa_auth.dll \
- modules_libs/java.security.jgss/w2k_lsa_auth.diz \
- modules_libs/java.security.jgss/w2k_lsa_auth.map \
- modules_libs/java.security.jgss/w2k_lsa_auth.pdb, \
+ modules_libs/java.security.jgss/w2k_lsa_auth.dll.diz \
+ modules_libs/java.security.jgss/w2k_lsa_auth.dll.map \
+ modules_libs/java.security.jgss/w2k_lsa_auth.dll.pdb, \
ZIP := $(IMAGES_OUTPUTDIR)/$(JGSS_ZIP_NAME)))
TARGETS += $(IMAGES_OUTPUTDIR)/$(JGSS_ZIP_NAME)
diff --git a/make/autoconf/basic.m4 b/make/autoconf/basic.m4
index f02c0e3370761..02c477f4c9fda 100644
--- a/make/autoconf/basic.m4
+++ b/make/autoconf/basic.m4
@@ -406,9 +406,9 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
# WARNING: This might be a bad thing to do. You need to be sure you want to
# have a configuration in this directory. Do some sanity checks!
- if test ! -e "$OUTPUTDIR/spec.gmk"; then
- # If we have a spec.gmk, we have run here before and we are OK. Otherwise, check for
- # other files
+ if test ! -e "$OUTPUTDIR/spec.gmk" && test ! -e "$OUTPUTDIR/configure-support/generated-configure.sh"; then
+ # If we have a spec.gmk or configure-support/generated-configure.sh,
+ # we have run here before and we are OK. Otherwise, check for other files
files_present=`$LS $OUTPUTDIR`
# Configure has already touched config.log and confdefs.h in the current dir when this check
# is performed.
@@ -423,8 +423,9 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
AC_MSG_NOTICE([Current directory is $CONFIGURE_START_DIR.])
AC_MSG_NOTICE([Since this is not the source root, configure will output the configuration here])
AC_MSG_NOTICE([(as opposed to creating a configuration in /build/).])
- AC_MSG_NOTICE([However, this directory is not empty. This is not allowed, since it could])
- AC_MSG_NOTICE([seriously mess up just about everything.])
+ AC_MSG_NOTICE([However, this directory is not empty, additionally to some allowed files])
+ AC_MSG_NOTICE([it contains $filtered_files.])
+ AC_MSG_NOTICE([This is not allowed, since it could seriously mess up just about everything.])
AC_MSG_NOTICE([Try 'cd $TOPDIR' and restart configure])
AC_MSG_NOTICE([(or create a new empty directory and cd to it).])
AC_MSG_ERROR([Will not continue creating configuration in $CONFIGURE_START_DIR])
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index f8fbe14cc38d3..06a62c9a8f148 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -117,6 +117,11 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_PREFIX_CFLAGS}],
IF_FALSE: [
DEBUG_PREFIX_CFLAGS=
+ ],
+ IF_TRUE: [
+ # Add debug prefix map gcc system include paths, as they cause
+ # non-deterministic debug paths depending on gcc path location.
+ DEBUG_PREFIX_MAP_GCC_INCLUDE_PATHS
]
)
fi
@@ -158,6 +163,55 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
AC_SUBST(ASFLAGS_DEBUG_SYMBOLS)
])
+# gcc will embed the full system include paths in the debug info
+# resulting in non-deterministic debug symbol files and thus
+# non-reproducible native libraries if gcc includes are located
+# in different paths.
+# Add -fdebug-prefix-map'ings for root and gcc include paths,
+# pointing to a common set of folders so that the binaries are deterministic:
+# root include : /usr/include
+# gcc include : /usr/local/gcc_include
+# g++ include : /usr/local/gxx_include
+AC_DEFUN([DEBUG_PREFIX_MAP_GCC_INCLUDE_PATHS],
+[
+ # Determine gcc system include paths.
+ # Assume default roots to start with:
+ GCC_ROOT_INCLUDE="/usr/include"
+
+ # Determine is sysroot or devkit specified?
+ if test "x$SYSROOT" != "x"; then
+ GCC_ROOT_INCLUDE="${SYSROOT%/}/usr/include"
+ fi
+
+ # Add root include mapping => /usr/include
+ GCC_INCLUDE_DEBUG_MAP_FLAGS="-fdebug-prefix-map=${GCC_ROOT_INCLUDE}/=/usr/include/"
+
+ # Add gcc system include mapping => /usr/local/gcc_include
+ # Find location of stddef.h using build C compiler
+ GCC_SYSTEM_INCLUDE=`$ECHO "#include " | \
+ $CC $CFLAGS -v -E - 2>&1 | \
+ $GREP stddef | $TAIL -1 | $TR -s " " | $CUT -d'"' -f2`
+ if test "x$GCC_SYSTEM_INCLUDE" != "x"; then
+ GCC_SYSTEM_INCLUDE=`$DIRNAME $GCC_SYSTEM_INCLUDE`
+ GCC_INCLUDE_DEBUG_MAP_FLAGS="$GCC_INCLUDE_DEBUG_MAP_FLAGS \
+ -fdebug-prefix-map=${GCC_SYSTEM_INCLUDE}/=/usr/local/gcc_include/"
+ fi
+
+ # Add g++ system include mapping => /usr/local/gxx_include
+ # Find location of cstddef using build C++ compiler
+ GXX_SYSTEM_INCLUDE=`$ECHO "#include " | \
+ $CXX $CXXFLAGS -v -E -x c++ - 2>&1 | \
+ $GREP cstddef | $TAIL -1 | $TR -s " " | $CUT -d'"' -f2`
+ if test "x$GXX_SYSTEM_INCLUDE" != "x"; then
+ GXX_SYSTEM_INCLUDE=`$DIRNAME $GXX_SYSTEM_INCLUDE`
+ GCC_INCLUDE_DEBUG_MAP_FLAGS="$GCC_INCLUDE_DEBUG_MAP_FLAGS \
+ -fdebug-prefix-map=${GXX_SYSTEM_INCLUDE}/=/usr/local/gxx_include/"
+ fi
+
+ # Add to debug prefix cflags
+ DEBUG_PREFIX_CFLAGS="$DEBUG_PREFIX_CFLAGS $GCC_INCLUDE_DEBUG_MAP_FLAGS"
+])
+
AC_DEFUN([FLAGS_SETUP_WARNINGS],
[
# Set default value.
@@ -425,7 +479,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
[
#### OS DEFINES, these should be independent on toolchain
if test "x$OPENJDK_TARGET_OS" = xlinux; then
- CFLAGS_OS_DEF_JVM="-DLINUX"
+ CFLAGS_OS_DEF_JVM="-DLINUX -D_FILE_OFFSET_BITS=64"
CFLAGS_OS_DEF_JDK="-D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE"
elif test "x$OPENJDK_TARGET_OS" = xmacosx; then
CFLAGS_OS_DEF_JVM="-D_ALLBSD_SOURCE -D_DARWIN_C_SOURCE -D_XOPEN_SOURCE"
@@ -816,6 +870,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
REPRODUCIBLE_CFLAGS=
]
)
+ AC_SUBST(REPRODUCIBLE_CFLAGS)
fi
# Prevent the __FILE__ macro from generating absolute paths into the built
@@ -849,6 +904,22 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
FILE_MACRO_CFLAGS=
]
)
+ if test "x$FILE_MACRO_CFLAGS" != x; then
+ # Add -pathmap for all VS system include paths using Windows
+ # full Long path name that is generated by the compiler
+ # Not enabled under WSL as there is no easy way to obtain the
+ # Windows full long paths, thus reproducible WSL builds will
+ # depend on building with the same VS toolchain install location.
+ if test "x$OPENJDK_BUILD_OS_ENV" != "xwindows.wsl1" && test "x$OPENJDK_BUILD_OS_ENV" != "xwindows.wsl2"; then
+ for ipath in ${$3SYSROOT_CFLAGS}; do
+ if test "x${ipath:0:2}" == "x-I"; then
+ ipath_path=${ipath#"-I"}
+ UTIL_FIXUP_WIN_LONG_PATH(ipath_path)
+ FILE_MACRO_CFLAGS="$FILE_MACRO_CFLAGS -pathmap:\"$ipath_path\"=vsi"
+ fi
+ done
+ fi
+ fi
fi
AC_MSG_CHECKING([how to prevent absolute paths in output])
diff --git a/make/autoconf/flags-other.m4 b/make/autoconf/flags-other.m4
index 8062a32601f3d..0af7c02cff6c2 100644
--- a/make/autoconf/flags-other.m4
+++ b/make/autoconf/flags-other.m4
@@ -88,6 +88,16 @@ AC_DEFUN([FLAGS_SETUP_RCFLAGS],
AC_SUBST(RCFLAGS)
])
+AC_DEFUN([FLAGS_SETUP_NMFLAGS],
+[
+ # On AIX, we need to set NM flag -X64 for processing 64bit object files
+ if test "x$OPENJDK_TARGET_OS" = xaix; then
+ NMFLAGS="-X64"
+ fi
+
+ AC_SUBST(NMFLAGS)
+])
+
################################################################################
# platform independent
AC_DEFUN([FLAGS_SETUP_ASFLAGS],
diff --git a/make/autoconf/flags.m4 b/make/autoconf/flags.m4
index ddb2b4c8e0abc..7060edeff73b2 100644
--- a/make/autoconf/flags.m4
+++ b/make/autoconf/flags.m4
@@ -428,6 +428,7 @@ AC_DEFUN([FLAGS_SETUP_FLAGS],
FLAGS_SETUP_ARFLAGS
FLAGS_SETUP_STRIPFLAGS
FLAGS_SETUP_RCFLAGS
+ FLAGS_SETUP_NMFLAGS
FLAGS_SETUP_ASFLAGS
FLAGS_SETUP_ASFLAGS_CPU_DEP([TARGET])
diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4
index f08cc6ddd4150..f56081223a600 100644
--- a/make/autoconf/jdk-options.m4
+++ b/make/autoconf/jdk-options.m4
@@ -822,6 +822,9 @@ AC_DEFUN([JDKOPT_CHECK_CODESIGN_PARAMS],
$RM "$CODESIGN_TESTFILE"
$TOUCH "$CODESIGN_TESTFILE"
CODESIGN_SUCCESS=false
+
+ $ECHO "check codesign, calling $CODESIGN $PARAMS $CODESIGN_TESTFILE" >&AS_MESSAGE_LOG_FD
+
eval \"$CODESIGN\" $PARAMS \"$CODESIGN_TESTFILE\" 2>&AS_MESSAGE_LOG_FD \
>&AS_MESSAGE_LOG_FD && CODESIGN_SUCCESS=true
$RM "$CODESIGN_TESTFILE"
diff --git a/make/autoconf/jdk-version.m4 b/make/autoconf/jdk-version.m4
index 6a7662556fdaf..7c9ecad77791b 100644
--- a/make/autoconf/jdk-version.m4
+++ b/make/autoconf/jdk-version.m4
@@ -110,6 +110,15 @@ AC_DEFUN_ONCE([JDKVER_SETUP_JDK_VERSION_NUMBERS],
CHECK_VALUE: [UTIL_CHECK_STRING_NON_EMPTY_PRINTABLE])
AC_SUBST(COMPANY_NAME)
+ # Set the JDK RC Company name
+ # Otherwise uses the value set for "vendor-name".
+ UTIL_ARG_WITH(NAME: jdk-rc-company-name, TYPE: string,
+ DEFAULT: $COMPANY_NAME,
+ DESC: [Set JDK RC company name. This is used for CompanyName properties of MS Windows binaries.],
+ DEFAULT_DESC: [from branding.conf],
+ CHECK_VALUE: [UTIL_CHECK_STRING_NON_EMPTY_PRINTABLE])
+ AC_SUBST(JDK_RC_COMPANY_NAME)
+
# The vendor URL, if any
# Only set VENDOR_URL if '--with-vendor-url' was used and is not empty.
# Otherwise we will use the value from "branding.conf" included above.
diff --git a/make/autoconf/lib-cups.m4 b/make/autoconf/lib-cups.m4
index 0a7df8b381be2..27d5efbc8c9e5 100644
--- a/make/autoconf/lib-cups.m4
+++ b/make/autoconf/lib-cups.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,20 @@ AC_DEFUN_ONCE([LIB_SETUP_CUPS],
fi
fi
if test "x$CUPS_FOUND" = xno; then
- # Are the cups headers installed in the default /usr/include location?
- AC_CHECK_HEADERS([cups/cups.h cups/ppd.h], [
- CUPS_FOUND=yes
- CUPS_CFLAGS=
- DEFAULT_CUPS=yes
- ])
+ # Are the cups headers installed in the default AIX or /usr/include location?
+ if test "x$OPENJDK_TARGET_OS" = "xaix"; then
+ AC_CHECK_HEADERS([/opt/freeware/include/cups/cups.h /opt/freeware/include/cups/ppd.h], [
+ CUPS_FOUND=yes
+ CUPS_CFLAGS="-I/opt/freeware/include"
+ DEFAULT_CUPS=yes
+ ])
+ else
+ AC_CHECK_HEADERS([cups/cups.h cups/ppd.h], [
+ CUPS_FOUND=yes
+ CUPS_CFLAGS=
+ DEFAULT_CUPS=yes
+ ])
+ fi
fi
if test "x$CUPS_FOUND" = xno; then
HELP_MSG_MISSING_DEPENDENCY([cups])
diff --git a/make/autoconf/lib-tests.m4 b/make/autoconf/lib-tests.m4
index aa02ac4ef976f..f44a64e18b443 100644
--- a/make/autoconf/lib-tests.m4
+++ b/make/autoconf/lib-tests.m4
@@ -28,7 +28,7 @@
################################################################################
# Minimum supported versions
-JTREG_MINIMUM_VERSION=7.2
+JTREG_MINIMUM_VERSION=7.3.1
GTEST_MINIMUM_VERSION=1.13.0
###############################################################################
@@ -227,12 +227,47 @@ AC_DEFUN_ONCE([LIB_TESTS_SETUP_JTREG],
UTIL_FIXUP_PATH(JT_HOME)
AC_SUBST(JT_HOME)
+ # Specify a JDK for running jtreg. Defaults to the BOOT_JDK.
+ AC_ARG_WITH(jtreg-jdk, [AS_HELP_STRING([--with-jdk],
+ [path to JDK for running jtreg @<:@BOOT_JDK@:>@])])
+
+ AC_MSG_CHECKING([for jtreg jdk])
+ if test "x${with_jtreg_jdk}" != x; then
+ if test "x${with_jtreg_jdk}" = xno; then
+ AC_MSG_RESULT([no, jtreg jdk not specified])
+ elif test "x${with_jtreg_jdk}" = xyes; then
+ AC_MSG_RESULT([not specified])
+ AC_MSG_ERROR([--with-jtreg-jdk needs a value])
+ else
+ JTREG_JDK="${with_jtreg_jdk}"
+ AC_MSG_RESULT([$JTREG_JDK])
+ UTIL_FIXUP_PATH(JTREG_JDK)
+ if test ! -f "$JTREG_JDK/bin/java"; then
+ AC_MSG_ERROR([Could not find jtreg java at $JTREG_JDK/bin/java])
+ fi
+ fi
+ else
+ JTREG_JDK="${BOOT_JDK}"
+ AC_MSG_RESULT([no, using BOOT_JDK])
+ fi
+
+ UTIL_FIXUP_PATH(JTREG_JDK)
+ AC_SUBST([JTREG_JDK])
+ # For use in the configure script
+ JTREG_JAVA="$FIXPATH $JTREG_JDK/bin/java"
+
# Verify jtreg version
if test "x$JT_HOME" != x; then
+ AC_MSG_CHECKING([jtreg jar existence])
+ if test ! -f "$JT_HOME/lib/jtreg.jar"; then
+ AC_MSG_ERROR([Could not find jtreg jar at $JT_HOME/lib/jtreg.jar])
+ fi
+
AC_MSG_CHECKING([jtreg version number])
# jtreg -version looks like this: "jtreg 6.1+1-19"
# Extract actual version part ("6.1" in this case)
- jtreg_version_full=`$JAVA -jar $JT_HOME/lib/jtreg.jar -version | $HEAD -n 1 | $CUT -d ' ' -f 2`
+ jtreg_version_full=$($JTREG_JAVA -jar $JT_HOME/lib/jtreg.jar -version | $HEAD -n 1 | $CUT -d ' ' -f 2)
+
jtreg_version=${jtreg_version_full/%+*}
AC_MSG_RESULT([$jtreg_version])
diff --git a/make/autoconf/lib-x11.m4 b/make/autoconf/lib-x11.m4
index 97a3f24a2dbf7..b1902a432a1e0 100644
--- a/make/autoconf/lib-x11.m4
+++ b/make/autoconf/lib-x11.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
X_CFLAGS=
X_LIBS=
else
+ x_libraries_orig="$x_libraries"
if test "x${with_x}" = xno; then
AC_MSG_ERROR([It is not possible to disable the use of X11. Remove the --without-x option.])
@@ -48,6 +49,7 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
fi
if test "x$x_libraries" = xNONE; then
x_libraries="${with_x}/lib"
+ x_libraries_orig="$x_libraries"
fi
else
# Check if the user has specified sysroot, but not --with-x, --x-includes or --x-libraries.
@@ -82,8 +84,8 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
AC_PATH_XTRA
# AC_PATH_XTRA creates X_LIBS and sometimes adds -R flags. When cross compiling
- # this doesn't make sense so we remove it.
- if test "x$COMPILE_TYPE" = xcross; then
+ # this doesn't make sense so we remove it; same for sysroot (devkit).
+ if test "x$COMPILE_TYPE" = xcross || (test "x$SYSROOT" != "x" && test "x$x_libraries_orig" = xNONE); then
X_LIBS=`$ECHO $X_LIBS | $SED 's/-R \{0,1\}[[^ ]]*//g'`
fi
diff --git a/make/autoconf/libraries.m4 b/make/autoconf/libraries.m4
index ebad69d9dcf75..feb0bcf3e753e 100644
--- a/make/autoconf/libraries.m4
+++ b/make/autoconf/libraries.m4
@@ -109,12 +109,6 @@ AC_DEFUN([LIB_SETUP_JVM_LIBS],
BASIC_JVM_LIBS_$1="$BASIC_JVM_LIBS_$1 -latomic"
fi
fi
-
- # Because RISC-V only has word-sized atomics, it requires libatomic where
- # other common architectures do not, so link libatomic by default.
- if test "x$OPENJDK_$1_OS" = xlinux && test "x$OPENJDK_$1_CPU" = xriscv64; then
- BASIC_JVM_LIBS_$1="$BASIC_JVM_LIBS_$1 -latomic"
- fi
])
################################################################################
diff --git a/make/autoconf/platform.m4 b/make/autoconf/platform.m4
index 0c987e9e9ab14..df610cc489bd3 100644
--- a/make/autoconf/platform.m4
+++ b/make/autoconf/platform.m4
@@ -567,8 +567,6 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
HOTSPOT_$1_CPU_DEFINE=PPC64
elif test "x$OPENJDK_$1_CPU" = xppc64le; then
HOTSPOT_$1_CPU_DEFINE=PPC64
- elif test "x$OPENJDK_$1_CPU" = xriscv32; then
- HOTSPOT_$1_CPU_DEFINE=RISCV32
elif test "x$OPENJDK_$1_CPU" = xriscv64; then
HOTSPOT_$1_CPU_DEFINE=RISCV64
@@ -577,10 +575,14 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
HOTSPOT_$1_CPU_DEFINE=SPARC
elif test "x$OPENJDK_$1_CPU" = xppc; then
HOTSPOT_$1_CPU_DEFINE=PPC32
+ elif test "x$OPENJDK_$1_CPU" = xriscv32; then
+ HOTSPOT_$1_CPU_DEFINE=RISCV32
elif test "x$OPENJDK_$1_CPU" = xs390; then
HOTSPOT_$1_CPU_DEFINE=S390
elif test "x$OPENJDK_$1_CPU" = xs390x; then
HOTSPOT_$1_CPU_DEFINE=S390
+ elif test "x$OPENJDK_$1_CPU" = xloongarch64; then
+ HOTSPOT_$1_CPU_DEFINE=LOONGARCH64
elif test "x$OPENJDK_$1_CPU" != x; then
HOTSPOT_$1_CPU_DEFINE=$(echo $OPENJDK_$1_CPU | tr a-z A-Z)
fi
diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in
index 9419562b654e5..4d7abc334277c 100644
--- a/make/autoconf/spec.gmk.in
+++ b/make/autoconf/spec.gmk.in
@@ -191,6 +191,7 @@ PRODUCT_NAME:=@PRODUCT_NAME@
PRODUCT_SUFFIX:=@PRODUCT_SUFFIX@
JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
JDK_RC_NAME:=@JDK_RC_NAME@
+JDK_RC_COMPANY_NAME:=@JDK_RC_COMPANY_NAME@
COMPANY_NAME:=@COMPANY_NAME@
HOTSPOT_VM_DISTRO:=@HOTSPOT_VM_DISTRO@
MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
@@ -424,6 +425,7 @@ LIBFFI_CFLAGS:=@LIBFFI_CFLAGS@
ENABLE_LIBFFI_BUNDLING:=@ENABLE_LIBFFI_BUNDLING@
LIBFFI_LIB_FILE:=@LIBFFI_LIB_FILE@
FILE_MACRO_CFLAGS := @FILE_MACRO_CFLAGS@
+REPRODUCIBLE_CFLAGS := @REPRODUCIBLE_CFLAGS@
BRANCH_PROTECTION_CFLAGS := @BRANCH_PROTECTION_CFLAGS@
STATIC_LIBS_CFLAGS := @STATIC_LIBS_CFLAGS@
@@ -601,6 +603,7 @@ AR := @AR@
ARFLAGS:=@ARFLAGS@
NM:=@NM@
+NMFLAGS:=@NMFLAGS@
STRIP:=@STRIP@
OBJDUMP:=@OBJDUMP@
CXXFILT:=@CXXFILT@
@@ -678,6 +681,9 @@ JAR = $(JAR_CMD)
JLINK = $(JLINK_CMD)
JMOD = $(JMOD_CMD)
+JTREG_JDK := @JTREG_JDK@
+JTREG_JAVA = @FIXPATH@ $(JTREG_JDK)/bin/java $(JAVA_FLAGS_BIG) $(JAVA_FLAGS)
+
BUILD_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@
BUILD_JAVA=@FIXPATH@ $(BUILD_JDK)/bin/java $(BUILD_JAVA_FLAGS)
BUILD_JAVAC=@FIXPATH@ $(BUILD_JDK)/bin/javac
diff --git a/make/autoconf/toolchain.m4 b/make/autoconf/toolchain.m4
index c89aab11d4bcb..7a24815d163f5 100644
--- a/make/autoconf/toolchain.m4
+++ b/make/autoconf/toolchain.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -389,6 +389,10 @@ AC_DEFUN_ONCE([TOOLCHAIN_POST_DETECTION],
# This is necessary since AC_PROG_CC defaults CFLAGS to "-g -O2"
CFLAGS="$ORG_CFLAGS"
CXXFLAGS="$ORG_CXXFLAGS"
+
+ # filter out some unwanted additions autoconf may add to CXX; we saw this on macOS with autoconf 2.72
+ UTIL_GET_NON_MATCHING_VALUES(cxx_filtered, $CXX, -std=c++11 -std=gnu++11)
+ CXX="$cxx_filtered"
])
# Check if a compiler is of the toolchain type we expect, and save the version
@@ -804,7 +808,11 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA],
case $TOOLCHAIN_TYPE in
gcc|clang)
- UTIL_REQUIRE_TOOLCHAIN_PROGS(CXXFILT, c++filt)
+ if test "x$OPENJDK_TARGET_OS" = xaix; then
+ UTIL_REQUIRE_TOOLCHAIN_PROGS(CXXFILT, ibm-llvm-cxxfilt)
+ else
+ UTIL_REQUIRE_TOOLCHAIN_PROGS(CXXFILT, c++filt)
+ fi
;;
esac
])
diff --git a/make/autoconf/util.m4 b/make/autoconf/util.m4
index 83349aea99d0b..76426005f81d0 100644
--- a/make/autoconf/util.m4
+++ b/make/autoconf/util.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -199,7 +199,7 @@ AC_DEFUN([UTIL_GET_NON_MATCHING_VALUES],
if test -z "$legal_values"; then
$1="$2"
else
- result=`$GREP -Fvx "$legal_values" <<< "$values_to_check" | $GREP -v '^$'`
+ result=`$GREP -Fvx -- "$legal_values" <<< "$values_to_check" | $GREP -v '^$'`
$1=${result//$'\n'/ }
fi
])
@@ -226,7 +226,7 @@ AC_DEFUN([UTIL_GET_MATCHING_VALUES],
if test -z "$illegal_values"; then
$1=""
else
- result=`$GREP -Fx "$illegal_values" <<< "$values_to_check" | $GREP -v '^$'`
+ result=`$GREP -Fx -- "$illegal_values" <<< "$values_to_check" | $GREP -v '^$'`
$1=${result//$'\n'/ }
fi
])
diff --git a/make/autoconf/util_paths.m4 b/make/autoconf/util_paths.m4
index 3dd6ea7b489a2..8b2c776397bf6 100644
--- a/make/autoconf/util_paths.m4
+++ b/make/autoconf/util_paths.m4
@@ -118,6 +118,24 @@ AC_DEFUN([UTIL_FIXUP_PATH],
fi
])
+##############################################################################
+# Fixup path to be a Windows full long path
+# Note: Only supported with cygwin/msys2 (cygpath tool)
+AC_DEFUN([UTIL_FIXUP_WIN_LONG_PATH],
+[
+ # Only process if variable expands to non-empty
+ path="[$]$1"
+ if test "x$path" != x; then
+ if test "x$OPENJDK_BUILD_OS" = "xwindows"; then
+ win_path=$($PATHTOOL -wl "$path")
+ if test "x$win_path" != "x$path"; then
+ $1="$win_path"
+ fi
+ fi
+ fi
+])
+
+
###############################################################################
# Check if the given file is a unix-style or windows-style executable, that is,
# if it expects paths in unix-style or windows-style.
diff --git a/make/common/JdkNativeCompilation.gmk b/make/common/JdkNativeCompilation.gmk
index 6a963ac2c498a..1a1333cf51707 100644
--- a/make/common/JdkNativeCompilation.gmk
+++ b/make/common/JdkNativeCompilation.gmk
@@ -98,7 +98,7 @@ GLOBAL_VERSION_INFO_RESOURCE := $(TOPDIR)/src/java.base/windows/native/common/ve
JDK_RCFLAGS=$(RCFLAGS) \
-D"JDK_VERSION_STRING=$(VERSION_STRING)" \
- -D"JDK_COMPANY=$(COMPANY_NAME)" \
+ -D"JDK_COMPANY=$(JDK_RC_COMPANY_NAME)" \
-D"JDK_VER=$(VERSION_NUMBER_FOUR_POSITIONS)" \
-D"JDK_COPYRIGHT=Copyright \xA9 $(COPYRIGHT_YEAR)" \
-D"JDK_NAME=$(JDK_RC_NAME) $(VERSION_SHORT)" \
diff --git a/make/common/NativeCompilation.gmk b/make/common/NativeCompilation.gmk
index 0d7ab6a7ef339..65843829e4fdd 100644
--- a/make/common/NativeCompilation.gmk
+++ b/make/common/NativeCompilation.gmk
@@ -48,12 +48,12 @@ define GetSymbols
$(SED) -e 's/#.*//;s/global://;s/local://;s/\;//;s/^[ ]*/_/;/^_$$$$/d' | \
$(EGREP) -v "JNI_OnLoad|JNI_OnUnload|Agent_OnLoad|Agent_OnUnload|Agent_OnAttach" > \
$$(@D)/$$(basename $$(@F)).symbols || true; \
- $(NM) $$($1_TARGET) | $(GREP) " T " | \
+ $(NM) $(NMFLAGS) $$($1_TARGET) | $(GREP) " T " | \
$(EGREP) "JNI_OnLoad|JNI_OnUnload|Agent_OnLoad|Agent_OnUnload|Agent_OnAttach" | \
$(CUT) -d ' ' -f 3 >> $$(@D)/$$(basename $$(@F)).symbols || true;\
else \
$(ECHO) "Getting symbols from nm"; \
- $(NM) -m $$($1_TARGET) | $(GREP) "__TEXT" | \
+ $(NM) $(NMFLAGS) -m $$($1_TARGET) | $(GREP) "__TEXT" | \
$(EGREP) -v "non-external|private extern|__TEXT,__eh_frame" | \
$(SED) -e 's/.* //' > $$(@D)/$$(basename $$(@F)).symbols; \
fi
@@ -1050,13 +1050,13 @@ define SetupNativeCompilationBody
ifneq ($$($1_TYPE), STATIC_LIBRARY)
# Generate debuginfo files.
ifeq ($(call isTargetOs, windows), true)
- $1_EXTRA_LDFLAGS += -debug "-pdb:$$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).pdb" \
- "-map:$$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).map"
+ $1_EXTRA_LDFLAGS += -debug "-pdb:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).pdb" \
+ "-map:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).map"
ifeq ($(SHIP_DEBUG_SYMBOLS), public)
- $1_EXTRA_LDFLAGS += "-pdbstripped:$$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).stripped.pdb"
+ $1_EXTRA_LDFLAGS += "-pdbstripped:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).stripped.pdb"
endif
- $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).pdb \
- $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).map
+ $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_BASENAME).pdb \
+ $$($1_SYMBOLS_DIR)/$$($1_BASENAME).map
else ifeq ($(call isTargetOs, linux), true)
$1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).debuginfo
@@ -1104,7 +1104,11 @@ define SetupNativeCompilationBody
$1 += $$($1_DEBUGINFO_FILES)
ifeq ($$($1_ZIP_EXTERNAL_DEBUG_SYMBOLS), true)
- $1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).diz
+ ifeq ($(call isTargetOs, windows), true)
+ $1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_BASENAME).diz
+ else
+ $1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).diz
+ endif
$1 += $$($1_DEBUGINFO_ZIP)
# The dependency on TARGET is needed for debuginfo files
diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf
index 35d26baaccb3f..3a08380a8b6aa 100644
--- a/make/conf/github-actions.conf
+++ b/make/conf/github-actions.conf
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
# Versions and download locations for dependencies used by GitHub Actions (GHA)
GTEST_VERSION=1.13.0
-JTREG_VERSION=7.2+1
+JTREG_VERSION=7.3.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-x64_bin.tar.gz
@@ -36,6 +36,10 @@ MACOS_X64_BOOT_JDK_EXT=tar.gz
MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-x64_bin.tar.gz
MACOS_X64_BOOT_JDK_SHA256=47cf960d9bb89dbe987535a389f7e26c42de7c984ef5108612d77c81aa8cc6a4
+MACOS_AARCH64_BOOT_JDK_EXT=tar.gz
+MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-aarch64_bin.tar.gz
+MACOS_AARCH64_BOOT_JDK_SHA256=d020f5c512c043cfb7119a591bc7e599a5bfd76d866d939f5562891d9db7c9b3
+
WINDOWS_X64_BOOT_JDK_EXT=zip
WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip
WINDOWS_X64_BOOT_JDK_SHA256=c92fae5e42b9aecf444a66c8ec563c652f60b1e231dfdd33a4f5a3e3603058fb
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index da8f2f87a62e1..cf9097eee57c9 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -945,10 +945,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: input.build_os,
target_cpu: input.build_cpu,
dependencies: [ "jtreg", "gnumake", "boot_jdk", "devkit", "jib" ],
- labels: "test",
- environment: {
- "JT_JAVA": common.boot_jdk_home
- }
+ labels: "test"
}
};
profiles = concatObjects(profiles, testOnlyProfiles);
@@ -1188,9 +1185,9 @@ var getJibProfilesDependencies = function (input, common) {
jtreg: {
server: "jpg",
product: "jtreg",
- version: "7.2",
+ version: "7.3.1",
build_number: "1",
- file: "bundles/jtreg-7.2+1.zip",
+ file: "bundles/jtreg-7.3.1+1.zip",
environment_name: "JT_HOME",
environment_path: input.get("jtreg", "home_path") + "/bin",
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
diff --git a/make/conf/version-numbers.conf b/make/conf/version-numbers.conf
index 985f986fc1ab3..b4571b544e2e7 100644
--- a/make/conf/version-numbers.conf
+++ b/make/conf/version-numbers.conf
@@ -28,15 +28,15 @@
DEFAULT_VERSION_FEATURE=21
DEFAULT_VERSION_INTERIM=0
-DEFAULT_VERSION_UPDATE=0
+DEFAULT_VERSION_UPDATE=3
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
-DEFAULT_VERSION_DATE=2023-09-19
+DEFAULT_VERSION_DATE=2024-04-16
DEFAULT_VERSION_CLASSFILE_MAJOR=65 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="20 21"
DEFAULT_JDK_SOURCE_TARGET_VERSION=21
-DEFAULT_PROMOTED_VERSION_PRE=
+DEFAULT_PROMOTED_VERSION_PRE=ea
diff --git a/make/data/charsetmapping/MS950_HKSCS.map b/make/data/charsetmapping/MS950_HKSCS.map
new file mode 100644
index 0000000000000..77e2f014dc0dc
--- /dev/null
+++ b/make/data/charsetmapping/MS950_HKSCS.map
@@ -0,0 +1,5019 @@
+#
+# http://www.ogcio.gov.hk/ccli/eng/hskcs/mapping_table_2008.html
+#
+# commnent out following entries
+#
+# 88A3 <00EA,0304>
+# 88A5 <00EA,030C>
+# 8862 <00CA,0304>
+# 8864 <00CA,030C>
+#
+8740 43F0
+8741 4C32
+8742 4603
+8743 45A6
+8744 4578
+8745 27267
+8746 4D77
+8747 45B3
+8748 27CB1
+8749 4CE2
+874A 27CC5
+874B 3B95
+874C 4736
+874D 4744
+874E 4C47
+874F 4C40
+8750 242BF
+8751 23617
+8752 27352
+8753 26E8B
+8754 270D2
+8755 4C57
+8756 2A351
+8757 474F
+8758 45DA
+8759 4C85
+875A 27C6C
+875B 4D07
+875C 4AA4
+875D 46A1
+875E 26B23
+875F 7225
+8760 25A54
+8761 21A63
+8762 23E06
+8763 23F61
+8764 664D
+8765 56FB
+8767 7D95
+8768 591D
+8769 28BB9
+876A 3DF4
+876B 9734
+876C 27BEF
+876D 5BDB
+876E 21D5E
+876F 5AA4
+8770 3625
+8771 29EB0
+8772 5AD1
+8773 5BB7
+8774 5CFC
+8775 676E
+8776 8593
+8777 29945
+8778 7461
+8779 749D
+877A 3875
+877B 21D53
+877C 2369E
+877D 26021
+877E 3EEC
+87A1 258DE
+87A2 3AF5
+87A3 7AFC
+87A4 9F97
+87A5 24161
+87A6 2890D
+87A7 231EA
+87A8 20A8A
+87A9 2325E
+87AA 430A
+87AB 8484
+87AC 9F96
+87AD 942F
+87AE 4930
+87AF 8613
+87B0 5896
+87B1 974A
+87B2 9218
+87B3 79D0
+87B4 7A32
+87B5 6660
+87B6 6A29
+87B7 889D
+87B8 744C
+87B9 7BC5
+87BA 6782
+87BB 7A2C
+87BC 524F
+87BD 9046
+87BE 34E6
+87BF 73C4
+87C0 25DB9
+87C1 74C6
+87C2 9FC7
+87C3 57B3
+87C4 492F
+87C5 544C
+87C6 4131
+87C7 2368E
+87C8 5818
+87C9 7A72
+87CA 27B65
+87CB 8B8F
+87CC 46AE
+87CD 26E88
+87CE 4181
+87CF 25D99
+87D0 7BAE
+87D1 224BC
+87D2 9FC8
+87D3 224C1
+87D4 224C9
+87D5 224CC
+87D6 9FC9
+87D7 8504
+87D8 235BB
+87D9 40B4
+87DA 9FCA
+87DB 44E1
+87DC 2ADFF
+87DD 62C1
+87DE 706E
+87DF 9FCB
+8840 31C0
+8841 31C1
+8842 31C2
+8843 31C3
+8844 31C4
+8845 2010C
+8846 31C5
+8847 200D1
+8848 200CD
+8849 31C6
+884A 31C7
+884B 200CB
+884C 21FE8
+884D 31C8
+884E 200CA
+884F 31C9
+8850 31CA
+8851 31CB
+8852 31CC
+8853 2010E
+8854 31CD
+8855 31CE
+8856 0100
+8857 00C1
+8858 01CD
+8859 00C0
+885A 0112
+885B 00C9
+885C 011A
+885D 00C8
+885E 014C
+885F 00D3
+8860 01D1
+8861 00D2
+#8862 <00CA,0304>
+8863 1EBE
+#8864 <00CA,030C>
+8865 1EC0
+8866 00CA
+8867 0101
+8868 00E1
+8869 01CE
+886A 00E0
+886B 0251
+886C 0113
+886D 00E9
+886E 011B
+886F 00E8
+8870 012B
+8871 00ED
+8872 01D0
+8873 00EC
+8874 014D
+8875 00F3
+8876 01D2
+8877 00F2
+8878 016B
+8879 00FA
+887A 01D4
+887B 00F9
+887C 01D6
+887D 01D8
+887E 01DA
+88A1 01DC
+88A2 00FC
+#88A3 <00EA,0304>
+88A4 1EBF
+#88A5 <00EA,030C>
+88A6 1EC1
+88A7 00EA
+88A8 0261
+88A9 23DA
+88AA 23DB
+8940 2A3A9
+8941 21145
+8943 650A
+8946 4E3D
+8947 6EDD
+8948 9D4E
+8949 91DF
+894C 27735
+894D 6491
+894E 4F1A
+894F 4F28
+8950 4FA8
+8951 5156
+8952 5174
+8953 519C
+8954 51E4
+8955 52A1
+8956 52A8
+8957 533B
+8958 534E
+8959 53D1
+895A 53D8
+895B 56E2
+895C 58F0
+895D 5904
+895E 5907
+895F 5932
+8960 5934
+8961 5B66
+8962 5B9E
+8963 5B9F
+8964 5C9A
+8965 5E86
+8966 603B
+8967 6589
+8968 67FE
+8969 6804
+896A 6865
+896B 6D4E
+896C 70BC
+896D 7535
+896E 7EA4
+896F 7EAC
+8970 7EBA
+8971 7EC7
+8972 7ECF
+8973 7EDF
+8974 7F06
+8975 7F37
+8976 827A
+8977 82CF
+8978 836F
+8979 89C6
+897A 8BBE
+897B 8BE2
+897C 8F66
+897D 8F67
+897E 8F6E
+89A1 7411
+89A2 7CFC
+89A3 7DCD
+89A4 6946
+89A5 7AC9
+89A6 5227
+89AB 918C
+89AC 78B8
+89AD 915E
+89AE 80BC
+89B0 8D0B
+89B1 80F6
+89B2 209E7
+89B5 809F
+89B6 9EC7
+89B7 4CCD
+89B8 9DC9
+89B9 9E0C
+89BA 4C3E
+89BB 29DF6
+89BC 2700E
+89BD 9E0A
+89BE 2A133
+89BF 35C1
+89C1 6E9A
+89C2 823E
+89C3 7519
+89C5 4911
+89C6 9A6C
+89C7 9A8F
+89C8 9F99
+89C9 7987
+89CA 2846C
+89CB 21DCA
+89CC 205D0
+89CD 22AE6
+89CE 4E24
+89CF 4E81
+89D0 4E80
+89D1 4E87
+89D2 4EBF
+89D3 4EEB
+89D4 4F37
+89D5 344C
+89D6 4FBD
+89D7 3E48
+89D8 5003
+89D9 5088
+89DA 347D
+89DB 3493
+89DC 34A5
+89DD 5186
+89DE 5905
+89DF 51DB
+89E0 51FC
+89E1 5205
+89E2 4E89
+89E3 5279
+89E4 5290
+89E5 5327
+89E6 35C7
+89E7 53A9
+89E8 3551
+89E9 53B0
+89EA 3553
+89EB 53C2
+89EC 5423
+89ED 356D
+89EE 3572
+89EF 3681
+89F0 5493
+89F1 54A3
+89F2 54B4
+89F3 54B9
+89F4 54D0
+89F5 54EF
+89F6 5518
+89F7 5523
+89F8 5528
+89F9 3598
+89FA 553F
+89FB 35A5
+89FC 35BF
+89FD 55D7
+89FE 35C5
+8A40 27D84
+8A41 5525
+8A43 20C42
+8A44 20D15
+8A45 2512B
+8A46 5590
+8A47 22CC6
+8A48 39EC
+8A49 20341
+8A4A 8E46
+8A4B 24DB8
+8A4C 294E5
+8A4D 4053
+8A4E 280BE
+8A4F 777A
+8A50 22C38
+8A51 3A34
+8A52 47D5
+8A53 2815D
+8A54 269F2
+8A55 24DEA
+8A56 64DD
+8A57 20D7C
+8A58 20FB4
+8A59 20CD5
+8A5A 210F4
+8A5B 648D
+8A5C 8E7E
+8A5D 20E96
+8A5E 20C0B
+8A5F 20F64
+8A60 22CA9
+8A61 28256
+8A62 244D3
+8A64 20D46
+8A65 29A4D
+8A66 280E9
+8A67 47F4
+8A68 24EA7
+8A69 22CC2
+8A6A 9AB2
+8A6B 3A67
+8A6C 295F4
+8A6D 3FED
+8A6E 3506
+8A6F 252C7
+8A70 297D4
+8A71 278C8
+8A72 22D44
+8A73 9D6E
+8A74 9815
+8A76 43D9
+8A77 260A5
+8A78 64B4
+8A79 54E3
+8A7A 22D4C
+8A7B 22BCA
+8A7C 21077
+8A7D 39FB
+8A7E 2106F
+8AA1 266DA
+8AA2 26716
+8AA3 279A0
+8AA4 64EA
+8AA5 25052
+8AA6 20C43
+8AA7 8E68
+8AA8 221A1
+8AA9 28B4C
+8AAA 20731
+8AAC 480B
+8AAD 201A9
+8AAE 3FFA
+8AAF 5873
+8AB0 22D8D
+8AB2 245C8
+8AB3 204FC
+8AB4 26097
+8AB5 20F4C
+8AB6 20D96
+8AB7 5579
+8AB8 40BB
+8AB9 43BA
+8ABB 4AB4
+8ABC 22A66
+8ABD 2109D
+8ABE 81AA
+8ABF 98F5
+8AC0 20D9C
+8AC1 6379
+8AC2 39FE
+8AC3 22775
+8AC4 8DC0
+8AC5 56A1
+8AC6 647C
+8AC7 3E43
+8AC9 2A601
+8ACA 20E09
+8ACB 22ACF
+8ACC 22CC9
+8ACE 210C8
+8ACF 239C2
+8AD0 3992
+8AD1 3A06
+8AD2 2829B
+8AD3 3578
+8AD4 25E49
+8AD5 220C7
+8AD6 5652
+8AD7 20F31
+8AD8 22CB2
+8AD9 29720
+8ADA 34BC
+8ADB 6C3D
+8ADC 24E3B
+8ADF 27574
+8AE0 22E8B
+8AE1 22208
+8AE2 2A65B
+8AE3 28CCD
+8AE4 20E7A
+8AE5 20C34
+8AE6 2681C
+8AE7 7F93
+8AE8 210CF
+8AE9 22803
+8AEA 22939
+8AEB 35FB
+8AEC 251E3
+8AED 20E8C
+8AEE 20F8D
+8AEF 20EAA
+8AF0 3F93
+8AF1 20F30
+8AF2 20D47
+8AF3 2114F
+8AF4 20E4C
+8AF6 20EAB
+8AF7 20BA9
+8AF8 20D48
+8AF9 210C0
+8AFA 2113D
+8AFB 3FF9
+8AFC 22696
+8AFD 6432
+8AFE 20FAD
+8B40 233F4
+8B41 27639
+8B42 22BCE
+8B43 20D7E
+8B44 20D7F
+8B45 22C51
+8B46 22C55
+8B47 3A18
+8B48 20E98
+8B49 210C7
+8B4A 20F2E
+8B4B 2A632
+8B4C 26B50
+8B4D 28CD2
+8B4E 28D99
+8B4F 28CCA
+8B50 95AA
+8B51 54CC
+8B52 82C4
+8B53 55B9
+8B55 29EC3
+8B56 9C26
+8B57 9AB6
+8B58 2775E
+8B59 22DEE
+8B5A 7140
+8B5B 816D
+8B5C 80EC
+8B5D 5C1C
+8B5E 26572
+8B5F 8134
+8B60 3797
+8B61 535F
+8B62 280BD
+8B63 91B6
+8B64 20EFA
+8B65 20E0F
+8B66 20E77
+8B67 20EFB
+8B68 35DD
+8B69 24DEB
+8B6A 3609
+8B6B 20CD6
+8B6C 56AF
+8B6D 227B5
+8B6E 210C9
+8B6F 20E10
+8B70 20E78
+8B71 21078
+8B72 21148
+8B73 28207
+8B74 21455
+8B75 20E79
+8B76 24E50
+8B77 22DA4
+8B78 5A54
+8B79 2101D
+8B7A 2101E
+8B7B 210F5
+8B7C 210F6
+8B7D 579C
+8B7E 20E11
+8BA1 27694
+8BA2 282CD
+8BA3 20FB5
+8BA4 20E7B
+8BA5 2517E
+8BA6 3703
+8BA7 20FB6
+8BA8 21180
+8BA9 252D8
+8BAA 2A2BD
+8BAB 249DA
+8BAC 2183A
+8BAD 24177
+8BAE 2827C
+8BAF 5899
+8BB0 5268
+8BB1 361A
+8BB2 2573D
+8BB3 7BB2
+8BB4 5B68
+8BB5 4800
+8BB6 4B2C
+8BB7 9F27
+8BB8 49E7
+8BB9 9C1F
+8BBA 9B8D
+8BBB 25B74
+8BBC 2313D
+8BBD 55FB
+8BBE 35F2
+8BBF 5689
+8BC0 4E28
+8BC1 5902
+8BC2 21BC1
+8BC3 2F878
+8BC4 9751
+8BC5 20086
+8BC6 4E5B
+8BC7 4EBB
+8BC8 353E
+8BC9 5C23
+8BCA 5F51
+8BCB 5FC4
+8BCC 38FA
+8BCD 624C
+8BCE 6535
+8BCF 6B7A
+8BD0 6C35
+8BD1 6C3A
+8BD2 706C
+8BD3 722B
+8BD4 4E2C
+8BD5 72AD
+8BD6 248E9
+8BD7 7F52
+8BD8 793B
+8BD9 7CF9
+8BDA 7F53
+8BDB 2626A
+8BDC 34C1
+8BDE 2634B
+8BDF 8002
+8BE0 8080
+8BE1 26612
+8BE2 26951
+8BE3 535D
+8BE4 8864
+8BE5 89C1
+8BE6 278B2
+8BE7 8BA0
+8BE8 8D1D
+8BE9 9485
+8BEA 9578
+8BEB 957F
+8BEC 95E8
+8BED 28E0F
+8BEE 97E6
+8BEF 9875
+8BF0 98CE
+8BF1 98DE
+8BF2 9963
+8BF3 29810
+8BF4 9C7C
+8BF5 9E1F
+8BF6 9EC4
+8BF7 6B6F
+8BF8 F907
+8BF9 4E37
+8BFA 20087
+8BFB 961D
+8BFC 6237
+8BFD 94A2
+8C40 503B
+8C41 6DFE
+8C42 29C73
+8C43 9FA6
+8C44 3DC9
+8C45 888F
+8C46 2414E
+8C47 7077
+8C48 5CF5
+8C49 4B20
+8C4A 251CD
+8C4B 3559
+8C4C 25D30
+8C4D 6122
+8C4E 28A32
+8C4F 8FA7
+8C50 91F6
+8C51 7191
+8C52 6719
+8C53 73BA
+8C54 23281
+8C55 2A107
+8C56 3C8B
+8C57 21980
+8C58 4B10
+8C59 78E4
+8C5A 7402
+8C5B 51AE
+8C5C 2870F
+8C5D 4009
+8C5E 6A63
+8C5F 2A2BA
+8C60 4223
+8C61 860F
+8C62 20A6F
+8C63 7A2A
+8C64 29947
+8C65 28AEA
+8C66 9755
+8C67 704D
+8C68 5324
+8C69 2207E
+8C6A 93F4
+8C6B 76D9
+8C6C 289E3
+8C6D 9FA7
+8C6E 77DD
+8C6F 4EA3
+8C70 4FF0
+8C71 50BC
+8C72 4E2F
+8C73 4F17
+8C74 9FA8
+8C75 5434
+8C76 7D8B
+8C77 5892
+8C78 58D0
+8C79 21DB6
+8C7A 5E92
+8C7B 5E99
+8C7C 5FC2
+8C7D 22712
+8C7E 658B
+8CA1 233F9
+8CA2 6919
+8CA3 6A43
+8CA4 23C63
+8CA5 6CFF
+8CA7 7200
+8CA8 24505
+8CA9 738C
+8CAA 3EDB
+8CAB 24A13
+8CAC 5B15
+8CAD 74B9
+8CAE 8B83
+8CAF 25CA4
+8CB0 25695
+8CB1 7A93
+8CB2 7BEC
+8CB3 7CC3
+8CB4 7E6C
+8CB5 82F8
+8CB6 8597
+8CB7 9FA9
+8CB8 8890
+8CB9 9FAA
+8CBA 8EB9
+8CBB 9FAB
+8CBC 8FCF
+8CBD 855F
+8CBE 99E0
+8CBF 9221
+8CC0 9FAC
+8CC1 28DB9
+8CC2 2143F
+8CC3 4071
+8CC4 42A2
+8CC5 5A1A
+8CC9 9868
+8CCA 676B
+8CCB 4276
+8CCC 573D
+8CCE 85D6
+8CCF 2497B
+8CD0 82BF
+8CD1 2710D
+8CD2 4C81
+8CD3 26D74
+8CD4 5D7B
+8CD5 26B15
+8CD6 26FBE
+8CD7 9FAD
+8CD8 9FAE
+8CD9 5B96
+8CDA 9FAF
+8CDB 66E7
+8CDC 7E5B
+8CDD 6E57
+8CDE 79CA
+8CDF 3D88
+8CE0 44C3
+8CE1 23256
+8CE2 22796
+8CE3 439A
+8CE4 4536
+8CE6 5CD5
+8CE7 23B1A
+8CE8 8AF9
+8CE9 5C78
+8CEA 3D12
+8CEB 23551
+8CEC 5D78
+8CED 9FB2
+8CEE 7157
+8CEF 4558
+8CF0 240EC
+8CF1 21E23
+8CF2 4C77
+8CF3 3978
+8CF4 344A
+8CF5 201A4
+8CF6 26C41
+8CF7 8ACC
+8CF8 4FB4
+8CF9 20239
+8CFA 59BF
+8CFB 816C
+8CFC 9856
+8CFD 298FA
+8CFE 5F3B
+8D40 20B9F
+8D42 221C1
+8D43 2896D
+8D44 4102
+8D45 46BB
+8D46 29079
+8D47 3F07
+8D48 9FB3
+8D49 2A1B5
+8D4A 40F8
+8D4B 37D6
+8D4C 46F7
+8D4D 26C46
+8D4E 417C
+8D4F 286B2
+8D50 273FF
+8D51 456D
+8D52 38D4
+8D53 2549A
+8D54 4561
+8D55 451B
+8D56 4D89
+8D57 4C7B
+8D58 4D76
+8D59 45EA
+8D5A 3FC8
+8D5B 24B0F
+8D5C 3661
+8D5D 44DE
+8D5E 44BD
+8D5F 41ED
+8D60 5D3E
+8D61 5D48
+8D62 5D56
+8D63 3DFC
+8D64 380F
+8D65 5DA4
+8D66 5DB9
+8D67 3820
+8D68 3838
+8D69 5E42
+8D6A 5EBD
+8D6B 5F25
+8D6C 5F83
+8D6D 3908
+8D6E 3914
+8D6F 393F
+8D70 394D
+8D71 60D7
+8D72 613D
+8D73 5CE5
+8D74 3989
+8D75 61B7
+8D76 61B9
+8D77 61CF
+8D78 39B8
+8D79 622C
+8D7A 6290
+8D7B 62E5
+8D7C 6318
+8D7D 39F8
+8D7E 56B1
+8DA1 3A03
+8DA2 63E2
+8DA3 63FB
+8DA4 6407
+8DA5 645A
+8DA6 3A4B
+8DA7 64C0
+8DA8 5D15
+8DA9 5621
+8DAA 9F9F
+8DAB 3A97
+8DAC 6586
+8DAD 3ABD
+8DAE 65FF
+8DAF 6653
+8DB0 3AF2
+8DB1 6692
+8DB2 3B22
+8DB3 6716
+8DB4 3B42
+8DB5 67A4
+8DB6 6800
+8DB7 3B58
+8DB8 684A
+8DB9 6884
+8DBA 3B72
+8DBB 3B71
+8DBC 3B7B
+8DBD 6909
+8DBE 6943
+8DBF 725C
+8DC0 6964
+8DC1 699F
+8DC2 6985
+8DC3 3BBC
+8DC4 69D6
+8DC5 3BDD
+8DC6 6A65
+8DC7 6A74
+8DC8 6A71
+8DC9 6A82
+8DCA 3BEC
+8DCB 6A99
+8DCC 3BF2
+8DCD 6AAB
+8DCE 6AB5
+8DCF 6AD4
+8DD0 6AF6
+8DD1 6B81
+8DD2 6BC1
+8DD3 6BEA
+8DD4 6C75
+8DD5 6CAA
+8DD6 3CCB
+8DD7 6D02
+8DD8 6D06
+8DD9 6D26
+8DDA 6D81
+8DDB 3CEF
+8DDC 6DA4
+8DDD 6DB1
+8DDE 6E15
+8DDF 6E18
+8DE0 6E29
+8DE1 6E86
+8DE2 289C0
+8DE3 6EBB
+8DE4 6EE2
+8DE5 6EDA
+8DE6 9F7F
+8DE7 6EE8
+8DE8 6EE9
+8DE9 6F24
+8DEA 6F34
+8DEB 3D46
+8DEC 23F41
+8DED 6F81
+8DEE 6FBE
+8DEF 3D6A
+8DF0 3D75
+8DF1 71B7
+8DF2 5C99
+8DF3 3D8A
+8DF4 702C
+8DF5 3D91
+8DF6 7050
+8DF7 7054
+8DF8 706F
+8DF9 707F
+8DFA 7089
+8DFB 20325
+8DFC 43C1
+8DFD 35F1
+8DFE 20ED8
+8E40 23ED7
+8E41 57BE
+8E42 26ED3
+8E43 713E
+8E44 257E0
+8E45 364E
+8E46 69A2
+8E47 28BE9
+8E48 5B74
+8E49 7A49
+8E4A 258E1
+8E4B 294D9
+8E4C 7A65
+8E4D 7A7D
+8E4E 259AC
+8E4F 7ABB
+8E50 7AB0
+8E51 7AC2
+8E52 7AC3
+8E53 71D1
+8E54 2648D
+8E55 41CA
+8E56 7ADA
+8E57 7ADD
+8E58 7AEA
+8E59 41EF
+8E5A 54B2
+8E5B 25C01
+8E5C 7B0B
+8E5D 7B55
+8E5E 7B29
+8E5F 2530E
+8E60 25CFE
+8E61 7BA2
+8E62 7B6F
+8E63 839C
+8E64 25BB4
+8E65 26C7F
+8E66 7BD0
+8E67 8421
+8E68 7B92
+8E6A 25D20
+8E6B 3DAD
+8E6C 25C65
+8E6D 8492
+8E6E 7BFA
+8E70 7C35
+8E71 25CC1
+8E72 7C44
+8E73 7C83
+8E74 24882
+8E75 7CA6
+8E76 667D
+8E77 24578
+8E78 7CC9
+8E79 7CC7
+8E7A 7CE6
+8E7B 7C74
+8E7C 7CF3
+8E7D 7CF5
+8EA1 7E67
+8EA2 451D
+8EA3 26E44
+8EA4 7D5D
+8EA5 26ED6
+8EA6 748D
+8EA7 7D89
+8EA8 7DAB
+8EA9 7135
+8EAA 7DB3
+8EAC 24057
+8EAD 26029
+8EAE 7DE4
+8EAF 3D13
+8EB0 7DF5
+8EB1 217F9
+8EB2 7DE5
+8EB3 2836D
+8EB5 26121
+8EB6 2615A
+8EB7 7E6E
+8EB8 7E92
+8EB9 432B
+8EBA 946C
+8EBB 7E27
+8EBC 7F40
+8EBD 7F41
+8EBE 7F47
+8EBF 7936
+8EC0 262D0
+8EC1 99E1
+8EC2 7F97
+8EC3 26351
+8EC4 7FA3
+8EC5 21661
+8EC6 20068
+8EC7 455C
+8EC8 23766
+8EC9 4503
+8ECA 2833A
+8ECB 7FFA
+8ECC 26489
+8ECE 8008
+8ECF 801D
+8ED1 802F
+8ED2 2A087
+8ED3 26CC3
+8ED4 803B
+8ED5 803C
+8ED6 8061
+8ED7 22714
+8ED8 4989
+8ED9 26626
+8EDA 23DE3
+8EDB 266E8
+8EDC 6725
+8EDD 80A7
+8EDE 28A48
+8EDF 8107
+8EE0 811A
+8EE1 58B0
+8EE2 226F6
+8EE3 6C7F
+8EE4 26498
+8EE5 24FB8
+8EE6 64E7
+8EE7 2148A
+8EE8 8218
+8EE9 2185E
+8EEA 6A53
+8EEB 24A65
+8EEC 24A95
+8EED 447A
+8EEE 8229
+8EEF 20B0D
+8EF0 26A52
+8EF1 23D7E
+8EF2 4FF9
+8EF3 214FD
+8EF4 84E2
+8EF5 8362
+8EF6 26B0A
+8EF7 249A7
+8EF8 23530
+8EF9 21773
+8EFA 23DF8
+8EFB 82AA
+8EFC 691B
+8EFD 2F994
+8EFE 41DB
+8F40 854B
+8F41 82D0
+8F42 831A
+8F43 20E16
+8F44 217B4
+8F45 36C1
+8F46 2317D
+8F47 2355A
+8F48 827B
+8F49 82E2
+8F4A 8318
+8F4B 23E8B
+8F4C 26DA3
+8F4D 26B05
+8F4E 26B97
+8F4F 235CE
+8F50 3DBF
+8F51 831D
+8F52 55EC
+8F53 8385
+8F54 450B
+8F55 26DA5
+8F56 83AC
+8F58 83D3
+8F59 347E
+8F5A 26ED4
+8F5B 6A57
+8F5C 855A
+8F5D 3496
+8F5E 26E42
+8F5F 22EEF
+8F60 8458
+8F61 25BE4
+8F62 8471
+8F63 3DD3
+8F64 44E4
+8F65 6AA7
+8F66 844A
+8F67 23CB5
+8F68 7958
+8F6A 26B96
+8F6B 26E77
+8F6C 26E43
+8F6D 84DE
+8F6F 8391
+8F70 44A0
+8F71 8493
+8F72 84E4
+8F73 25C91
+8F74 4240
+8F75 25CC0
+8F76 4543
+8F77 8534
+8F78 5AF2
+8F79 26E99
+8F7A 4527
+8F7B 8573
+8F7C 4516
+8F7D 67BF
+8F7E 8616
+8FA1 28625
+8FA2 2863B
+8FA3 85C1
+8FA4 27088
+8FA5 8602
+8FA6 21582
+8FA7 270CD
+8FA8 2F9B2
+8FA9 456A
+8FAA 8628
+8FAB 3648
+8FAC 218A2
+8FAD 53F7
+8FAE 2739A
+8FAF 867E
+8FB0 8771
+8FB1 2A0F8
+8FB2 87EE
+8FB3 22C27
+8FB4 87B1
+8FB5 87DA
+8FB6 880F
+8FB7 5661
+8FB8 866C
+8FB9 6856
+8FBA 460F
+8FBB 8845
+8FBC 8846
+8FBD 275E0
+8FBE 23DB9
+8FBF 275E4
+8FC0 885E
+8FC1 889C
+8FC2 465B
+8FC3 88B4
+8FC4 88B5
+8FC5 63C1
+8FC6 88C5
+8FC7 7777
+8FC8 2770F
+8FC9 8987
+8FCA 898A
+8FCD 89A7
+8FCE 89BC
+8FCF 28A25
+8FD0 89E7
+8FD1 27924
+8FD2 27ABD
+8FD3 8A9C
+8FD4 7793
+8FD5 91FE
+8FD6 8A90
+8FD7 27A59
+8FD8 7AE9
+8FD9 27B3A
+8FDA 23F8F
+8FDB 4713
+8FDC 27B38
+8FDD 717C
+8FDE 8B0C
+8FDF 8B1F
+8FE0 25430
+8FE1 25565
+8FE2 8B3F
+8FE3 8B4C
+8FE4 8B4D
+8FE5 8AA9
+8FE6 24A7A
+8FE7 8B90
+8FE8 8B9B
+8FE9 8AAF
+8FEA 216DF
+8FEB 4615
+8FEC 884F
+8FED 8C9B
+8FEE 27D54
+8FEF 27D8F
+8FF0 2F9D4
+8FF1 3725
+8FF2 27D53
+8FF3 8CD6
+8FF4 27D98
+8FF5 27DBD
+8FF6 8D12
+8FF7 8D03
+8FF8 21910
+8FF9 8CDB
+8FFA 705C
+8FFB 8D11
+8FFC 24CC9
+8FFD 3ED0
+9040 8DA9
+9041 28002
+9042 21014
+9043 2498A
+9044 3B7C
+9045 281BC
+9046 2710C
+9047 7AE7
+9048 8EAD
+9049 8EB6
+904A 8EC3
+904B 92D4
+904C 8F19
+904D 8F2D
+904E 28365
+904F 28412
+9050 8FA5
+9051 9303
+9052 2A29F
+9053 20A50
+9054 8FB3
+9055 492A
+9056 289DE
+9057 2853D
+9058 23DBB
+9059 5EF8
+905A 23262
+905B 8FF9
+905C 2A014
+905D 286BC
+905E 28501
+905F 22325
+9060 3980
+9061 26ED7
+9062 9037
+9063 2853C
+9064 27ABE
+9065 9061
+9066 2856C
+9067 2860B
+9068 90A8
+9069 28713
+906A 90C4
+906B 286E6
+906C 90AE
+906E 9167
+906F 3AF0
+9070 91A9
+9071 91C4
+9072 7CAC
+9073 28933
+9074 21E89
+9075 920E
+9076 6C9F
+9077 9241
+9078 9262
+9079 255B9
+907B 28AC6
+907C 23C9B
+907D 28B0C
+907E 255DB
+90A1 20D31
+90A2 932C
+90A3 936B
+90A4 28AE1
+90A5 28BEB
+90A6 708F
+90A7 5AC3
+90A8 28AE2
+90A9 28AE5
+90AA 4965
+90AB 9244
+90AC 28BEC
+90AD 28C39
+90AE 28BFF
+90AF 9373
+90B0 945B
+90B1 8EBC
+90B2 9585
+90B3 95A6
+90B4 9426
+90B5 95A0
+90B6 6FF6
+90B7 42B9
+90B8 2267A
+90B9 286D8
+90BA 2127C
+90BB 23E2E
+90BC 49DF
+90BD 6C1C
+90BE 967B
+90BF 9696
+90C0 416C
+90C1 96A3
+90C2 26ED5
+90C3 61DA
+90C4 96B6
+90C5 78F5
+90C6 28AE0
+90C7 96BD
+90C8 53CC
+90C9 49A1
+90CA 26CB8
+90CB 20274
+90CC 26410
+90CD 290AF
+90CE 290E5
+90CF 24AD1
+90D0 21915
+90D1 2330A
+90D2 9731
+90D3 8642
+90D4 9736
+90D5 4A0F
+90D6 453D
+90D7 4585
+90D8 24AE9
+90D9 7075
+90DA 5B41
+90DB 971B
+90DD 291D5
+90DE 9757
+90DF 5B4A
+90E0 291EB
+90E1 975F
+90E2 9425
+90E3 50D0
+90E4 230B7
+90E5 230BC
+90E6 9789
+90E7 979F
+90E8 97B1
+90E9 97BE
+90EA 97C0
+90EB 97D2
+90EC 97E0
+90ED 2546C
+90EE 97EE
+90EF 741C
+90F0 29433
+90F2 97F5
+90F3 2941D
+90F4 2797A
+90F5 4AD1
+90F6 9834
+90F7 9833
+90F8 984B
+90F9 9866
+90FA 3B0E
+90FB 27175
+90FC 3D51
+90FD 20630
+90FE 2415C
+9140 25706
+9141 98CA
+9142 98B7
+9143 98C8
+9144 98C7
+9145 4AFF
+9146 26D27
+9147 216D3
+9148 55B0
+9149 98E1
+914A 98E6
+914B 98EC
+914C 9378
+914D 9939
+914E 24A29
+914F 4B72
+9150 29857
+9151 29905
+9152 99F5
+9153 9A0C
+9154 9A3B
+9155 9A10
+9156 9A58
+9157 25725
+9158 36C4
+9159 290B1
+915A 29BD5
+915B 9AE0
+915C 9AE2
+915D 29B05
+915E 9AF4
+915F 4C0E
+9160 9B14
+9161 9B2D
+9162 28600
+9163 5034
+9164 9B34
+9165 269A8
+9166 38C3
+9167 2307D
+9168 9B50
+9169 9B40
+916A 29D3E
+916B 5A45
+916C 21863
+916D 9B8E
+916E 2424B
+916F 9C02
+9170 9BFF
+9171 9C0C
+9172 29E68
+9173 9DD4
+9174 29FB7
+9175 2A192
+9176 2A1AB
+9177 2A0E1
+9178 2A123
+9179 2A1DF
+917A 9D7E
+917B 9D83
+917C 2A134
+917D 9E0E
+917E 6888
+91A1 9DC4
+91A2 2215B
+91A3 2A193
+91A4 2A220
+91A5 2193B
+91A6 2A233
+91A7 9D39
+91A8 2A0B9
+91A9 2A2B4
+91AA 9E90
+91AB 9E95
+91AC 9E9E
+91AD 9EA2
+91AE 4D34
+91AF 9EAA
+91B0 9EAF
+91B1 24364
+91B2 9EC1
+91B3 3B60
+91B4 39E5
+91B5 3D1D
+91B6 4F32
+91B7 37BE
+91B8 28C2B
+91B9 9F02
+91BA 9F08
+91BB 4B96
+91BC 9424
+91BD 26DA2
+91BE 9F17
+91C0 9F39
+91C1 569F
+91C2 568A
+91C3 9F45
+91C4 99B8
+91C5 2908B
+91C6 97F2
+91C7 847F
+91C8 9F62
+91C9 9F69
+91CA 7ADC
+91CB 9F8E
+91CC 7216
+91CD 4BBE
+91CE 24975
+91CF 249BB
+91D0 7177
+91D1 249F8
+91D2 24348
+91D3 24A51
+91D4 739E
+91D5 28BDA
+91D6 218FA
+91D7 799F
+91D8 2897E
+91D9 28E36
+91DA 9369
+91DB 93F3
+91DC 28A44
+91DD 92EC
+91DE 9381
+91DF 93CB
+91E0 2896C
+91E1 244B9
+91E2 7217
+91E3 3EEB
+91E4 7772
+91E5 7A43
+91E6 70D0
+91E7 24473
+91E8 243F8
+91E9 717E
+91EA 217EF
+91EB 70A3
+91EC 218BE
+91ED 23599
+91EE 3EC7
+91EF 21885
+91F0 2542F
+91F1 217F8
+91F2 3722
+91F3 216FB
+91F4 21839
+91F5 36E1
+91F6 21774
+91F7 218D1
+91F8 25F4B
+91F9 3723
+91FA 216C0
+91FB 575B
+91FC 24A25
+91FD 213FE
+91FE 212A8
+9240 213C6
+9241 214B6
+9242 8503
+9243 236A6
+9245 8455
+9246 24994
+9247 27165
+9248 23E31
+9249 2555C
+924A 23EFB
+924B 27052
+924C 44F4
+924D 236EE
+924E 2999D
+924F 26F26
+9250 67F9
+9251 3733
+9252 3C15
+9253 3DE7
+9254 586C
+9255 21922
+9256 6810
+9257 4057
+9258 2373F
+9259 240E1
+925A 2408B
+925B 2410F
+925C 26C21
+925D 54CB
+925E 569E
+925F 266B1
+9260 5692
+9261 20FDF
+9262 20BA8
+9263 20E0D
+9264 93C6
+9265 28B13
+9266 939C
+9267 4EF8
+9268 512B
+9269 3819
+926A 24436
+926B 4EBC
+926C 20465
+926D 2037F
+926E 4F4B
+926F 4F8A
+9270 25651
+9271 5A68
+9272 201AB
+9273 203CB
+9274 3999
+9275 2030A
+9276 20414
+9277 3435
+9278 4F29
+9279 202C0
+927A 28EB3
+927B 20275
+927C 8ADA
+927D 2020C
+927E 4E98
+92A1 50CD
+92A2 510D
+92A3 4FA2
+92A4 4F03
+92A5 24A0E
+92A6 23E8A
+92A7 4F42
+92A8 502E
+92A9 506C
+92AA 5081
+92AB 4FCC
+92AC 4FE5
+92AD 5058
+92AE 50FC
+92B3 6E76
+92B4 23595
+92B5 23E39
+92B6 23EBF
+92B7 6D72
+92B8 21884
+92B9 23E89
+92BA 51A8
+92BB 51C3
+92BC 205E0
+92BD 44DD
+92BE 204A3
+92BF 20492
+92C0 20491
+92C1 8D7A
+92C2 28A9C
+92C3 2070E
+92C4 5259
+92C5 52A4
+92C6 20873
+92C7 52E1
+92C9 467A
+92CA 718C
+92CB 2438C
+92CC 20C20
+92CD 249AC
+92CE 210E4
+92CF 69D1
+92D0 20E1D
+92D2 3EDE
+92D3 7499
+92D4 7414
+92D5 7456
+92D6 7398
+92D7 4B8E
+92D8 24ABC
+92D9 2408D
+92DA 53D0
+92DB 3584
+92DC 720F
+92DD 240C9
+92DE 55B4
+92DF 20345
+92E0 54CD
+92E1 20BC6
+92E2 571D
+92E3 925D
+92E4 96F4
+92E5 9366
+92E6 57DD
+92E7 578D
+92E8 577F
+92E9 363E
+92EA 58CB
+92EB 5A99
+92EC 28A46
+92ED 216FA
+92EE 2176F
+92EF 21710
+92F0 5A2C
+92F1 59B8
+92F2 928F
+92F3 5A7E
+92F4 5ACF
+92F5 5A12
+92F6 25946
+92F7 219F3
+92F8 21861
+92F9 24295
+92FA 36F5
+92FB 6D05
+92FC 7443
+92FD 5A21
+92FE 25E83
+9340 5A81
+9341 28BD7
+9342 20413
+9343 93E0
+9344 748C
+9345 21303
+9346 7105
+9347 4972
+9348 9408
+9349 289FB
+934A 93BD
+934B 37A0
+934C 5C1E
+934D 5C9E
+934E 5E5E
+934F 5E48
+9350 21996
+9351 2197C
+9352 23AEE
+9353 5ECD
+9354 5B4F
+9355 21903
+9356 21904
+9357 3701
+9358 218A0
+9359 36DD
+935A 216FE
+935B 36D3
+935C 812A
+935D 28A47
+935E 21DBA
+935F 23472
+9360 289A8
+9361 5F0C
+9362 5F0E
+9363 21927
+9364 217AB
+9365 5A6B
+9366 2173B
+9367 5B44
+9368 8614
+9369 275FD
+936A 8860
+936B 607E
+936C 22860
+936D 2262B
+936E 5FDB
+936F 3EB8
+9370 225AF
+9371 225BE
+9372 29088
+9373 26F73
+9374 61C0
+9375 2003E
+9376 20046
+9377 2261B
+9378 6199
+9379 6198
+937A 6075
+937B 22C9B
+937C 22D07
+937D 246D4
+937E 2914D
+93A1 6471
+93A2 24665
+93A3 22B6A
+93A4 3A29
+93A5 22B22
+93A6 23450
+93A7 298EA
+93A8 22E78
+93A9 6337
+93AA 2A45B
+93AB 64B6
+93AC 6331
+93AD 63D1
+93AE 249E3
+93AF 22D67
+93B0 62A4
+93B1 22CA1
+93B2 643B
+93B3 656B
+93B4 6972
+93B5 3BF4
+93B6 2308E
+93B7 232AD
+93B8 24989
+93B9 232AB
+93BA 550D
+93BB 232E0
+93BC 218D9
+93BD 2943F
+93BE 66CE
+93BF 23289
+93C0 231B3
+93C1 3AE0
+93C2 4190
+93C3 25584
+93C4 28B22
+93C5 2558F
+93C6 216FC
+93C7 2555B
+93C8 25425
+93C9 78EE
+93CA 23103
+93CB 2182A
+93CC 23234
+93CD 3464
+93CE 2320F
+93CF 23182
+93D0 242C9
+93D1 668E
+93D2 26D24
+93D3 666B
+93D4 4B93
+93D5 6630
+93D6 27870
+93D7 21DEB
+93D8 6663
+93D9 232D2
+93DA 232E1
+93DB 661E
+93DC 25872
+93DD 38D1
+93DE 2383A
+93DF 237BC
+93E0 3B99
+93E1 237A2
+93E2 233FE
+93E3 74D0
+93E4 3B96
+93E5 678F
+93E6 2462A
+93E7 68B6
+93E8 681E
+93E9 3BC4
+93EA 6ABE
+93EB 3863
+93EC 237D5
+93ED 24487
+93EE 6A33
+93EF 6A52
+93F0 6AC9
+93F1 6B05
+93F2 21912
+93F3 6511
+93F4 6898
+93F5 6A4C
+93F6 3BD7
+93F7 6A7A
+93F8 6B57
+93F9 23FC0
+93FA 23C9A
+93FB 93A0
+93FC 92F2
+93FD 28BEA
+93FE 28ACB
+9440 9289
+9441 2801E
+9442 289DC
+9443 9467
+9444 6DA5
+9445 6F0B
+9446 249EC
+9448 23F7F
+9449 3D8F
+944A 6E04
+944B 2403C
+944C 5A3D
+944D 6E0A
+944E 5847
+944F 6D24
+9450 7842
+9451 713B
+9452 2431A
+9453 24276
+9454 70F1
+9455 7250
+9456 7287
+9457 7294
+9458 2478F
+9459 24725
+945A 5179
+945B 24AA4
+945C 205EB
+945D 747A
+945E 23EF8
+945F 2365F
+9460 24A4A
+9461 24917
+9462 25FE1
+9463 3F06
+9464 3EB1
+9465 24ADF
+9466 28C23
+9467 23F35
+9468 60A7
+9469 3EF3
+946A 74CC
+946B 743C
+946C 9387
+946D 7437
+946E 449F
+946F 26DEA
+9470 4551
+9471 7583
+9472 3F63
+9473 24CD9
+9474 24D06
+9475 3F58
+9476 7555
+9477 7673
+9478 2A5C6
+9479 3B19
+947A 7468
+947B 28ACC
+947C 249AB
+947D 2498E
+947E 3AFB
+94A1 3DCD
+94A2 24A4E
+94A3 3EFF
+94A4 249C5
+94A5 248F3
+94A6 91FA
+94A7 5732
+94A8 9342
+94A9 28AE3
+94AA 21864
+94AB 50DF
+94AC 25221
+94AD 251E7
+94AE 7778
+94AF 23232
+94B0 770E
+94B1 770F
+94B2 777B
+94B3 24697
+94B4 23781
+94B5 3A5E
+94B6 248F0
+94B7 7438
+94B8 749B
+94B9 3EBF
+94BA 24ABA
+94BB 24AC7
+94BC 40C8
+94BD 24A96
+94BE 261AE
+94BF 9307
+94C0 25581
+94C1 781E
+94C2 788D
+94C3 7888
+94C4 78D2
+94C5 73D0
+94C6 7959
+94C7 27741
+94C8 256E3
+94C9 410E
+94CB 8496
+94CC 79A5
+94CD 6A2D
+94CE 23EFA
+94CF 7A3A
+94D0 79F4
+94D1 416E
+94D2 216E6
+94D3 4132
+94D4 9235
+94D5 79F1
+94D6 20D4C
+94D7 2498C
+94D8 20299
+94D9 23DBA
+94DA 2176E
+94DB 3597
+94DC 556B
+94DD 3570
+94DE 36AA
+94DF 201D4
+94E0 20C0D
+94E1 7AE2
+94E2 5A59
+94E3 226F5
+94E4 25AAF
+94E5 25A9C
+94E6 5A0D
+94E7 2025B
+94E8 78F0
+94E9 5A2A
+94EA 25BC6
+94EB 7AFE
+94EC 41F9
+94ED 7C5D
+94EE 7C6D
+94EF 4211
+94F0 25BB3
+94F1 25EBC
+94F2 25EA6
+94F3 7CCD
+94F4 249F9
+94F5 217B0
+94F6 7C8E
+94F7 7C7C
+94F8 7CAE
+94F9 6AB2
+94FA 7DDC
+94FB 7E07
+94FC 7DD3
+94FD 7F4E
+94FE 26261
+9540 2615C
+9541 27B48
+9542 7D97
+9543 25E82
+9544 426A
+9545 26B75
+9546 20916
+9547 67D6
+9548 2004E
+9549 235CF
+954A 57C4
+954B 26412
+954C 263F8
+954D 24962
+954E 7FDD
+954F 7B27
+9550 2082C
+9551 25AE9
+9552 25D43
+9553 7B0C
+9554 25E0E
+9555 99E6
+9556 8645
+9557 9A63
+9558 6A1C
+9559 2343F
+955A 39E2
+955B 249F7
+955C 265AD
+955D 9A1F
+955E 265A0
+955F 8480
+9560 27127
+9561 26CD1
+9562 44EA
+9563 8137
+9564 4402
+9565 80C6
+9566 8109
+9567 8142
+9568 267B4
+9569 98C3
+956A 26A42
+956B 8262
+956C 8265
+956D 26A51
+956E 8453
+956F 26DA7
+9570 8610
+9571 2721B
+9572 5A86
+9573 417F
+9574 21840
+9575 5B2B
+9576 218A1
+9577 5AE4
+9578 218D8
+9579 86A0
+957A 2F9BC
+957B 23D8F
+957C 882D
+957D 27422
+957E 5A02
+95A1 886E
+95A2 4F45
+95A3 8887
+95A4 88BF
+95A5 88E6
+95A6 8965
+95A7 894D
+95A8 25683
+95A9 8954
+95AA 27785
+95AB 27784
+95AC 28BF5
+95AD 28BD9
+95AE 28B9C
+95AF 289F9
+95B0 3EAD
+95B1 84A3
+95B2 46F5
+95B3 46CF
+95B4 37F2
+95B5 8A3D
+95B6 8A1C
+95B7 29448
+95B8 5F4D
+95B9 922B
+95BA 24284
+95BB 65D4
+95BC 7129
+95BD 70C4
+95BE 21845
+95BF 9D6D
+95C0 8C9F
+95C1 8CE9
+95C2 27DDC
+95C3 599A
+95C4 77C3
+95C5 59F0
+95C6 436E
+95C7 36D4
+95C8 8E2A
+95C9 8EA7
+95CA 24C09
+95CB 8F30
+95CC 8F4A
+95CD 42F4
+95CE 6C58
+95CF 6FBB
+95D0 22321
+95D1 489B
+95D2 6F79
+95D3 6E8B
+95D4 217DA
+95D5 9BE9
+95D6 36B5
+95D7 2492F
+95D8 90BB
+95DA 5571
+95DB 4906
+95DC 91BB
+95DD 9404
+95DE 28A4B
+95DF 4062
+95E0 28AFC
+95E1 9427
+95E2 28C1D
+95E3 28C3B
+95E4 84E5
+95E5 8A2B
+95E6 9599
+95E7 95A7
+95E8 9597
+95E9 9596
+95EA 28D34
+95EB 7445
+95EC 3EC2
+95ED 248FF
+95EE 24A42
+95EF 243EA
+95F0 3EE7
+95F1 23225
+95F2 968F
+95F3 28EE7
+95F4 28E66
+95F5 28E65
+95F6 3ECC
+95F7 249ED
+95F8 24A78
+95F9 23FEE
+95FA 7412
+95FB 746B
+95FC 3EFC
+95FD 9741
+95FE 290B0
+9640 6847
+9641 4A1D
+9642 29093
+9643 257DF
+9645 9368
+9646 28989
+9647 28C26
+9648 28B2F
+9649 263BE
+964A 92BA
+964B 5B11
+964C 8B69
+964D 493C
+964E 73F9
+964F 2421B
+9650 979B
+9651 9771
+9652 9938
+9653 20F26
+9654 5DC1
+9655 28BC5
+9656 24AB2
+9657 981F
+9658 294DA
+9659 92F6
+965A 295D7
+965B 91E5
+965C 44C0
+965D 28B50
+965E 24A67
+965F 28B64
+9660 98DC
+9661 28A45
+9662 3F00
+9663 922A
+9664 4925
+9665 8414
+9666 993B
+9667 994D
+9668 27B06
+9669 3DFD
+966A 999B
+966B 4B6F
+966C 99AA
+966D 9A5C
+966E 28B65
+966F 258C8
+9670 6A8F
+9671 9A21
+9672 5AFE
+9673 9A2F
+9674 298F1
+9675 4B90
+9676 29948
+9677 99BC
+9678 4BBD
+9679 4B97
+967A 937D
+967B 5872
+967C 21302
+967D 5822
+967E 249B8
+96A1 214E8
+96A2 7844
+96A3 2271F
+96A4 23DB8
+96A5 68C5
+96A6 3D7D
+96A7 9458
+96A8 3927
+96A9 6150
+96AA 22781
+96AB 2296B
+96AC 6107
+96AD 9C4F
+96AE 9C53
+96AF 9C7B
+96B0 9C35
+96B1 9C10
+96B2 9B7F
+96B3 9BCF
+96B4 29E2D
+96B5 9B9F
+96B6 2A1F5
+96B7 2A0FE
+96B8 9D21
+96B9 4CAE
+96BA 24104
+96BB 9E18
+96BC 4CB0
+96BD 9D0C
+96BE 2A1B4
+96BF 2A0ED
+96C0 2A0F3
+96C1 2992F
+96C2 9DA5
+96C3 84BD
+96C4 26E12
+96C5 26FDF
+96C6 26B82
+96C7 85FC
+96C8 4533
+96C9 26DA4
+96CA 26E84
+96CB 26DF0
+96CC 8420
+96CD 85EE
+96CE 26E00
+96CF 237D7
+96D0 26064
+96D1 79E2
+96D2 2359C
+96D3 23640
+96D4 492D
+96D5 249DE
+96D6 3D62
+96D7 93DB
+96D8 92BE
+96D9 9348
+96DA 202BF
+96DB 78B9
+96DC 9277
+96DD 944D
+96DE 4FE4
+96DF 3440
+96E0 9064
+96E1 2555D
+96E2 783D
+96E3 7854
+96E4 78B6
+96E5 784B
+96E6 21757
+96E7 231C9
+96E8 24941
+96E9 369A
+96EA 4F72
+96EB 6FDA
+96EC 6FD9
+96EE 701E
+96EF 5414
+96F0 241B5
+96F1 57BB
+96F2 58F3
+96F3 578A
+96F4 9D16
+96F5 57D7
+96F6 7134
+96F7 34AF
+96F8 241AC
+96F9 71EB
+96FA 26C40
+96FB 24F97
+96FD 217B5
+96FE 28A49
+9740 610C
+9741 5ACE
+9742 5A0B
+9743 42BC
+9744 24488
+9745 372C
+9746 4B7B
+9747 289FC
+9748 93BB
+9749 93B8
+974A 218D6
+974B 20F1D
+974C 8472
+974D 26CC0
+974E 21413
+974F 242FA
+9750 22C26
+9751 243C1
+9752 5994
+9753 23DB7
+9754 26741
+9755 7DA8
+9756 2615B
+9757 260A4
+9758 249B9
+9759 2498B
+975A 289FA
+975B 92E5
+975C 73E2
+975D 3EE9
+975E 74B4
+975F 28B63
+9760 2189F
+9761 3EE1
+9762 24AB3
+9763 6AD8
+9764 73F3
+9765 73FB
+9766 3ED6
+9767 24A3E
+9768 24A94
+9769 217D9
+976A 24A66
+976B 203A7
+976C 21424
+976D 249E5
+976E 7448
+976F 24916
+9770 70A5
+9771 24976
+9772 9284
+9773 73E6
+9774 935F
+9775 204FE
+9776 9331
+9777 28ACE
+9778 28A16
+9779 9386
+977A 28BE7
+977B 255D5
+977C 4935
+977D 28A82
+977E 716B
+97A1 24943
+97A2 20CFF
+97A3 56A4
+97A4 2061A
+97A5 20BEB
+97A6 20CB8
+97A7 5502
+97A8 79C4
+97A9 217FA
+97AA 7DFE
+97AB 216C2
+97AC 24A50
+97AD 21852
+97AE 452E
+97AF 9401
+97B0 370A
+97B1 28AC0
+97B2 249AD
+97B3 59B0
+97B4 218BF
+97B5 21883
+97B6 27484
+97B7 5AA1
+97B8 36E2
+97B9 23D5B
+97BA 36B0
+97BB 925F
+97BC 5A79
+97BD 28A81
+97BE 21862
+97BF 9374
+97C0 3CCD
+97C1 20AB4
+97C2 4A96
+97C3 398A
+97C4 50F4
+97C5 3D69
+97C6 3D4C
+97C7 2139C
+97C8 7175
+97C9 42FB
+97CA 28218
+97CB 6E0F
+97CC 290E4
+97CD 44EB
+97CE 6D57
+97CF 27E4F
+97D0 7067
+97D1 6CAF
+97D2 3CD6
+97D3 23FED
+97D4 23E2D
+97D5 6E02
+97D6 6F0C
+97D7 3D6F
+97D8 203F5
+97D9 7551
+97DA 36BC
+97DB 34C8
+97DC 4680
+97DD 3EDA
+97DE 4871
+97DF 59C4
+97E0 926E
+97E1 493E
+97E2 8F41
+97E3 28C1C
+97E4 26BC0
+97E5 5812
+97E6 57C8
+97E7 36D6
+97E8 21452
+97E9 70FE
+97EA 24362
+97EB 24A71
+97EC 22FE3
+97ED 212B0
+97EE 223BD
+97EF 68B9
+97F0 6967
+97F1 21398
+97F2 234E5
+97F3 27BF4
+97F4 236DF
+97F5 28A83
+97F6 237D6
+97F7 233FA
+97F8 24C9F
+97F9 6A1A
+97FA 236AD
+97FB 26CB7
+97FC 843E
+97FD 44DF
+97FE 44CE
+9840 26D26
+9841 26D51
+9842 26C82
+9843 26FDE
+9844 6F17
+9845 27109
+9846 833D
+9847 2173A
+9848 83ED
+9849 26C80
+984A 27053
+984B 217DB
+984C 5989
+984D 5A82
+984E 217B3
+984F 5A61
+9850 5A71
+9851 21905
+9852 241FC
+9853 372D
+9854 59EF
+9855 2173C
+9856 36C7
+9857 718E
+9858 9390
+9859 669A
+985A 242A5
+985B 5A6E
+985C 5A2B
+985D 24293
+985E 6A2B
+985F 23EF9
+9860 27736
+9861 2445B
+9862 242CA
+9863 711D
+9864 24259
+9865 289E1
+9866 4FB0
+9867 26D28
+9868 5CC2
+9869 244CE
+986A 27E4D
+986B 243BD
+986C 6A0C
+986D 24256
+986E 21304
+986F 70A6
+9870 7133
+9871 243E9
+9872 3DA5
+9873 6CDF
+9874 2F825
+9875 24A4F
+9876 7E65
+9877 59EB
+9878 5D2F
+9879 3DF3
+987A 5F5C
+987B 24A5D
+987C 217DF
+987D 7DA4
+987E 8426
+98A1 5485
+98A2 23AFA
+98A3 23300
+98A4 20214
+98A5 577E
+98A6 208D5
+98A7 20619
+98A8 3FE5
+98A9 21F9E
+98AA 2A2B6
+98AB 7003
+98AC 2915B
+98AD 5D70
+98AE 738F
+98AF 7CD3
+98B0 28A59
+98B1 29420
+98B2 4FC8
+98B3 7FE7
+98B4 72CD
+98B5 7310
+98B6 27AF4
+98B7 7338
+98B8 7339
+98B9 256F6
+98BA 7341
+98BB 7348
+98BC 3EA9
+98BD 27B18
+98BE 906C
+98BF 71F5
+98C0 248F2
+98C1 73E1
+98C2 81F6
+98C3 3ECA
+98C4 770C
+98C5 3ED1
+98C6 6CA2
+98C7 56FD
+98C8 7419
+98C9 741E
+98CA 741F
+98CB 3EE2
+98CC 3EF0
+98CD 3EF4
+98CE 3EFA
+98CF 74D3
+98D0 3F0E
+98D1 3F53
+98D2 7542
+98D3 756D
+98D4 7572
+98D5 758D
+98D6 3F7C
+98D7 75C8
+98D8 75DC
+98D9 3FC0
+98DA 764D
+98DB 3FD7
+98DC 7674
+98DD 3FDC
+98DE 767A
+98DF 24F5C
+98E0 7188
+98E1 5623
+98E2 8980
+98E3 5869
+98E4 401D
+98E5 7743
+98E6 4039
+98E7 6761
+98E8 4045
+98E9 35DB
+98EA 7798
+98EB 406A
+98EC 406F
+98ED 5C5E
+98EE 77BE
+98EF 77CB
+98F0 58F2
+98F1 7818
+98F2 70B9
+98F3 781C
+98F4 40A8
+98F5 7839
+98F6 7847
+98F7 7851
+98F8 7866
+98F9 8448
+98FA 25535
+98FB 7933
+98FC 6803
+98FD 7932
+98FE 4103
+9940 4109
+9941 7991
+9942 7999
+9943 8FBB
+9944 7A06
+9945 8FBC
+9946 4167
+9947 7A91
+9948 41B2
+9949 7ABC
+994A 8279
+994B 41C4
+994C 7ACF
+994D 7ADB
+994E 41CF
+994F 4E21
+9950 7B62
+9951 7B6C
+9952 7B7B
+9953 7C12
+9954 7C1B
+9955 4260
+9956 427A
+9957 7C7B
+9958 7C9C
+9959 428C
+995A 7CB8
+995B 4294
+995C 7CED
+995D 8F93
+995E 70C0
+995F 20CCF
+9960 7DCF
+9961 7DD4
+9962 7DD0
+9963 7DFD
+9964 7FAE
+9965 7FB4
+9966 729F
+9967 4397
+9968 8020
+9969 8025
+996A 7B39
+996B 802E
+996C 8031
+996D 8054
+996E 3DCC
+996F 57B4
+9970 70A0
+9971 80B7
+9972 80E9
+9973 43ED
+9974 810C
+9975 732A
+9976 810E
+9977 8112
+9978 7560
+9979 8114
+997A 4401
+997B 3B39
+997C 8156
+997D 8159
+997E 815A
+99A1 4413
+99A2 583A
+99A3 817C
+99A4 8184
+99A5 4425
+99A6 8193
+99A7 442D
+99A8 81A5
+99A9 57EF
+99AA 81C1
+99AB 81E4
+99AC 8254
+99AD 448F
+99AE 82A6
+99AF 8276
+99B0 82CA
+99B1 82D8
+99B2 82FF
+99B3 44B0
+99B4 8357
+99B5 9669
+99B6 698A
+99B7 8405
+99B8 70F5
+99B9 8464
+99BA 60E3
+99BB 8488
+99BC 4504
+99BD 84BE
+99BE 84E1
+99BF 84F8
+99C0 8510
+99C1 8538
+99C2 8552
+99C3 453B
+99C4 856F
+99C5 8570
+99C6 85E0
+99C7 4577
+99C8 8672
+99C9 8692
+99CA 86B2
+99CB 86EF
+99CC 9645
+99CD 878B
+99CE 4606
+99CF 4617
+99D0 88AE
+99D1 88FF
+99D2 8924
+99D3 8947
+99D4 8991
+99D5 27967
+99D6 8A29
+99D7 8A38
+99D8 8A94
+99D9 8AB4
+99DA 8C51
+99DB 8CD4
+99DC 8CF2
+99DD 8D1C
+99DE 4798
+99DF 585F
+99E0 8DC3
+99E1 47ED
+99E2 4EEE
+99E3 8E3A
+99E4 55D8
+99E5 5754
+99E6 8E71
+99E7 55F5
+99E8 8EB0
+99E9 4837
+99EA 8ECE
+99EB 8EE2
+99EC 8EE4
+99ED 8EED
+99EE 8EF2
+99EF 8FB7
+99F0 8FC1
+99F1 8FCA
+99F2 8FCC
+99F3 9033
+99F4 99C4
+99F5 48AD
+99F6 98E0
+99F7 9213
+99F8 491E
+99F9 9228
+99FA 9258
+99FB 926B
+99FC 92B1
+99FD 92AE
+99FE 92BF
+9A40 92E3
+9A41 92EB
+9A42 92F3
+9A43 92F4
+9A44 92FD
+9A45 9343
+9A46 9384
+9A47 93AD
+9A48 4945
+9A49 4951
+9A4A 9EBF
+9A4B 9417
+9A4C 5301
+9A4D 941D
+9A4E 942D
+9A4F 943E
+9A50 496A
+9A51 9454
+9A52 9479
+9A53 952D
+9A54 95A2
+9A55 49A7
+9A56 95F4
+9A57 9633
+9A58 49E5
+9A59 67A0
+9A5A 4A24
+9A5B 9740
+9A5C 4A35
+9A5D 97B2
+9A5E 97C2
+9A5F 5654
+9A60 4AE4
+9A61 60E8
+9A62 98B9
+9A63 4B19
+9A64 98F1
+9A65 5844
+9A66 990E
+9A67 9919
+9A68 51B4
+9A69 991C
+9A6A 9937
+9A6B 9942
+9A6C 995D
+9A6D 9962
+9A6E 4B70
+9A6F 99C5
+9A70 4B9D
+9A71 9A3C
+9A72 9B0F
+9A73 7A83
+9A74 9B69
+9A75 9B81
+9A76 9BDD
+9A77 9BF1
+9A78 9BF4
+9A79 4C6D
+9A7A 9C20
+9A7B 376F
+9A7C 21BC2
+9A7D 9D49
+9A7E 9C3A
+9AA1 9EFE
+9AA2 5650
+9AA3 9D93
+9AA4 9DBD
+9AA5 9DC0
+9AA6 9DFC
+9AA7 94F6
+9AA8 8FB6
+9AA9 9E7B
+9AAA 9EAC
+9AAB 9EB1
+9AAC 9EBD
+9AAD 9EC6
+9AAE 94DC
+9AAF 9EE2
+9AB0 9EF1
+9AB1 9EF8
+9AB2 7AC8
+9AB3 9F44
+9AB4 20094
+9AB5 202B7
+9AB6 203A0
+9AB7 691A
+9AB8 94C3
+9AB9 59AC
+9ABA 204D7
+9ABB 5840
+9ABC 94C1
+9ABD 37B9
+9ABE 205D5
+9ABF 20615
+9AC0 20676
+9AC1 216BA
+9AC2 5757
+9AC3 7173
+9AC4 20AC2
+9AC5 20ACD
+9AC6 20BBF
+9AC7 546A
+9AC8 2F83B
+9AC9 20BCB
+9ACA 549E
+9ACB 20BFB
+9ACC 20C3B
+9ACD 20C53
+9ACE 20C65
+9ACF 20C7C
+9AD0 60E7
+9AD1 20C8D
+9AD2 567A
+9AD3 20CB5
+9AD4 20CDD
+9AD5 20CED
+9AD6 20D6F
+9AD7 20DB2
+9AD8 20DC8
+9AD9 6955
+9ADA 9C2F
+9ADB 87A5
+9ADC 20E04
+9ADD 20E0E
+9ADE 20ED7
+9ADF 20F90
+9AE0 20F2D
+9AE1 20E73
+9AE2 5C20
+9AE3 20FBC
+9AE4 5E0B
+9AE5 2105C
+9AE6 2104F
+9AE7 21076
+9AE8 671E
+9AE9 2107B
+9AEA 21088
+9AEB 21096
+9AEC 3647
+9AED 210BF
+9AEE 210D3
+9AEF 2112F
+9AF0 2113B
+9AF1 5364
+9AF2 84AD
+9AF3 212E3
+9AF4 21375
+9AF5 21336
+9AF6 8B81
+9AF7 21577
+9AF8 21619
+9AF9 217C3
+9AFA 217C7
+9AFB 4E78
+9AFC 70BB
+9AFD 2182D
+9AFE 2196A
+9B40 21A2D
+9B41 21A45
+9B42 21C2A
+9B43 21C70
+9B44 21CAC
+9B45 21EC8
+9B46 62C3
+9B47 21ED5
+9B48 21F15
+9B49 7198
+9B4A 6855
+9B4B 22045
+9B4C 69E9
+9B4D 36C8
+9B4E 2227C
+9B4F 223D7
+9B50 223FA
+9B51 2272A
+9B52 22871
+9B53 2294F
+9B54 82FD
+9B55 22967
+9B56 22993
+9B57 22AD5
+9B58 89A5
+9B59 22AE8
+9B5A 8FA0
+9B5B 22B0E
+9B5C 97B8
+9B5D 22B3F
+9B5E 9847
+9B5F 9ABD
+9B60 22C4C
+9B62 22C88
+9B63 22CB7
+9B64 25BE8
+9B65 22D08
+9B66 22D12
+9B67 22DB7
+9B68 22D95
+9B69 22E42
+9B6A 22F74
+9B6B 22FCC
+9B6C 23033
+9B6D 23066
+9B6E 2331F
+9B6F 233DE
+9B70 5FB1
+9B71 6648
+9B72 66BF
+9B73 27A79
+9B74 23567
+9B75 235F3
+9B77 249BA
+9B79 2361A
+9B7A 23716
+9B7C 20346
+9B7D 58B5
+9B7E 670E
+9BA1 6918
+9BA2 23AA7
+9BA3 27657
+9BA4 25FE2
+9BA5 23E11
+9BA6 23EB9
+9BA7 275FE
+9BA8 2209A
+9BA9 48D0
+9BAA 4AB8
+9BAB 24119
+9BAC 28A9A
+9BAD 242EE
+9BAE 2430D
+9BAF 2403B
+9BB0 24334
+9BB1 24396
+9BB2 24A45
+9BB3 205CA
+9BB4 51D2
+9BB5 20611
+9BB6 599F
+9BB7 21EA8
+9BB8 3BBE
+9BB9 23CFF
+9BBA 24404
+9BBB 244D6
+9BBC 5788
+9BBD 24674
+9BBE 399B
+9BBF 2472F
+9BC0 285E8
+9BC1 299C9
+9BC2 3762
+9BC3 221C3
+9BC4 8B5E
+9BC5 28B4E
+9BC7 24812
+9BC8 248FB
+9BC9 24A15
+9BCA 7209
+9BCB 24AC0
+9BCC 20C78
+9BCD 5965
+9BCE 24EA5
+9BCF 24F86
+9BD0 20779
+9BD1 8EDA
+9BD2 2502C
+9BD3 528F
+9BD4 573F
+9BD5 7171
+9BD6 25299
+9BD7 25419
+9BD8 23F4A
+9BD9 24AA7
+9BDA 55BC
+9BDB 25446
+9BDC 2546E
+9BDD 26B52
+9BDF 3473
+9BE0 2553F
+9BE1 27632
+9BE2 2555E
+9BE3 4718
+9BE4 25562
+9BE5 25566
+9BE6 257C7
+9BE7 2493F
+9BE8 2585D
+9BE9 5066
+9BEA 34FB
+9BEB 233CC
+9BED 25903
+9BEE 477C
+9BEF 28948
+9BF0 25AAE
+9BF1 25B89
+9BF2 25C06
+9BF3 21D90
+9BF4 57A1
+9BF5 7151
+9BF7 26102
+9BF8 27C12
+9BF9 9056
+9BFA 261B2
+9BFB 24F9A
+9BFC 8B62
+9BFD 26402
+9BFE 2644A
+9C40 5D5B
+9C41 26BF7
+9C43 26484
+9C44 2191C
+9C45 8AEA
+9C46 249F6
+9C47 26488
+9C48 23FEF
+9C49 26512
+9C4A 4BC0
+9C4B 265BF
+9C4C 266B5
+9C4D 2271B
+9C4E 9465
+9C4F 257E1
+9C50 6195
+9C51 5A27
+9C52 2F8CD
+9C54 56B9
+9C55 24521
+9C56 266FC
+9C57 4E6A
+9C58 24934
+9C59 9656
+9C5A 6D8F
+9C5B 26CBD
+9C5C 3618
+9C5D 8977
+9C5E 26799
+9C5F 2686E
+9C60 26411
+9C61 2685E
+9C63 268C7
+9C64 7B42
+9C65 290C0
+9C66 20A11
+9C67 26926
+9C69 26939
+9C6A 7A45
+9C6C 269FA
+9C6D 9A26
+9C6E 26A2D
+9C6F 365F
+9C70 26469
+9C71 20021
+9C72 7983
+9C73 26A34
+9C74 26B5B
+9C75 5D2C
+9C76 23519
+9C78 26B9D
+9C79 46D0
+9C7A 26CA4
+9C7B 753B
+9C7C 8865
+9C7D 26DAE
+9C7E 58B6
+9CA1 371C
+9CA2 2258D
+9CA3 2704B
+9CA4 271CD
+9CA5 3C54
+9CA6 27280
+9CA7 27285
+9CA8 9281
+9CA9 2217A
+9CAA 2728B
+9CAB 9330
+9CAC 272E6
+9CAD 249D0
+9CAE 6C39
+9CAF 949F
+9CB0 27450
+9CB1 20EF8
+9CB2 8827
+9CB3 88F5
+9CB4 22926
+9CB5 28473
+9CB6 217B1
+9CB7 6EB8
+9CB8 24A2A
+9CB9 21820
+9CBA 39A4
+9CBB 36B9
+9CBE 453F
+9CBF 66B6
+9CC0 29CAD
+9CC1 298A4
+9CC2 8943
+9CC3 277CC
+9CC4 27858
+9CC5 56D6
+9CC6 40DF
+9CC7 2160A
+9CC8 39A1
+9CC9 2372F
+9CCA 280E8
+9CCB 213C5
+9CCC 71AD
+9CCD 8366
+9CCE 279DD
+9CCF 291A8
+9CD1 4CB7
+9CD2 270AF
+9CD3 289AB
+9CD4 279FD
+9CD5 27A0A
+9CD6 27B0B
+9CD7 27D66
+9CD8 2417A
+9CD9 7B43
+9CDA 797E
+9CDB 28009
+9CDC 6FB5
+9CDD 2A2DF
+9CDE 6A03
+9CDF 28318
+9CE0 53A2
+9CE1 26E07
+9CE2 93BF
+9CE3 6836
+9CE4 975D
+9CE5 2816F
+9CE6 28023
+9CE7 269B5
+9CE8 213ED
+9CE9 2322F
+9CEA 28048
+9CEB 5D85
+9CEC 28C30
+9CED 28083
+9CEE 5715
+9CEF 9823
+9CF0 28949
+9CF1 5DAB
+9CF2 24988
+9CF3 65BE
+9CF4 69D5
+9CF5 53D2
+9CF6 24AA5
+9CF7 23F81
+9CF8 3C11
+9CF9 6736
+9CFA 28090
+9CFB 280F4
+9CFC 2812E
+9CFD 21FA1
+9CFE 2814F
+9D40 28189
+9D41 281AF
+9D42 2821A
+9D43 28306
+9D44 2832F
+9D45 2838A
+9D46 35CA
+9D47 28468
+9D48 286AA
+9D49 48FA
+9D4A 63E6
+9D4B 28956
+9D4C 7808
+9D4D 9255
+9D4E 289B8
+9D4F 43F2
+9D50 289E7
+9D51 43DF
+9D52 289E8
+9D53 28B46
+9D54 28BD4
+9D55 59F8
+9D56 28C09
+9D58 28FC5
+9D59 290EC
+9D5B 29110
+9D5C 2913C
+9D5D 3DF7
+9D5E 2915E
+9D5F 24ACA
+9D60 8FD0
+9D61 728F
+9D62 568B
+9D63 294E7
+9D64 295E9
+9D65 295B0
+9D66 295B8
+9D67 29732
+9D68 298D1
+9D69 29949
+9D6A 2996A
+9D6B 299C3
+9D6C 29A28
+9D6D 29B0E
+9D6E 29D5A
+9D6F 29D9B
+9D70 7E9F
+9D71 29EF8
+9D72 29F23
+9D73 4CA4
+9D74 9547
+9D75 2A293
+9D76 71A2
+9D77 2A2FF
+9D78 4D91
+9D79 9012
+9D7A 2A5CB
+9D7B 4D9C
+9D7C 20C9C
+9D7D 8FBE
+9D7E 55C1
+9DA1 8FBA
+9DA2 224B0
+9DA3 8FB9
+9DA4 24A93
+9DA5 4509
+9DA6 7E7F
+9DA7 6F56
+9DA8 6AB1
+9DA9 4EEA
+9DAA 34E4
+9DAB 28B2C
+9DAC 2789D
+9DAD 373A
+9DAE 8E80
+9DAF 217F5
+9DB0 28024
+9DB1 28B6C
+9DB2 28B99
+9DB3 27A3E
+9DB4 266AF
+9DB5 3DEB
+9DB6 27655
+9DB7 23CB7
+9DB8 25635
+9DB9 25956
+9DBA 4E9A
+9DBB 25E81
+9DBC 26258
+9DBD 56BF
+9DBE 20E6D
+9DBF 8E0E
+9DC0 5B6D
+9DC1 23E88
+9DC2 24C9E
+9DC3 63DE
+9DC5 217F6
+9DC6 2187B
+9DC7 6530
+9DC8 562D
+9DC9 25C4A
+9DCA 541A
+9DCB 25311
+9DCC 3DC6
+9DCD 29D98
+9DCE 4C7D
+9DCF 5622
+9DD0 561E
+9DD1 7F49
+9DD2 25ED8
+9DD3 5975
+9DD4 23D40
+9DD5 8770
+9DD6 4E1C
+9DD7 20FEA
+9DD8 20D49
+9DD9 236BA
+9DDA 8117
+9DDB 9D5E
+9DDC 8D18
+9DDD 763B
+9DDE 9C45
+9DDF 764E
+9DE0 77B9
+9DE1 9345
+9DE2 5432
+9DE3 8148
+9DE4 82F7
+9DE5 5625
+9DE6 8132
+9DE7 8418
+9DE8 80BD
+9DE9 55EA
+9DEA 7962
+9DEB 5643
+9DEC 5416
+9DED 20E9D
+9DEE 35CE
+9DEF 5605
+9DF0 55F1
+9DF1 66F1
+9DF2 282E2
+9DF3 362D
+9DF4 7534
+9DF5 55F0
+9DF6 55BA
+9DF7 5497
+9DF8 5572
+9DF9 20C41
+9DFA 20C96
+9DFB 5ED0
+9DFC 25148
+9DFD 20E76
+9DFE 22C62
+9E40 20EA2
+9E41 9EAB
+9E42 7D5A
+9E43 55DE
+9E44 21075
+9E45 629D
+9E46 976D
+9E47 5494
+9E48 8CCD
+9E49 71F6
+9E4A 9176
+9E4B 63FC
+9E4C 63B9
+9E4D 63FE
+9E4E 5569
+9E4F 22B43
+9E50 9C72
+9E51 22EB3
+9E52 519A
+9E53 34DF
+9E54 20DA7
+9E55 51A7
+9E56 544D
+9E57 551E
+9E58 5513
+9E59 7666
+9E5A 8E2D
+9E5B 2688A
+9E5C 75B1
+9E5D 80B6
+9E5E 8804
+9E5F 8786
+9E60 88C7
+9E61 81B6
+9E62 841C
+9E63 210C1
+9E64 44EC
+9E65 7304
+9E66 24706
+9E67 5B90
+9E68 830B
+9E69 26893
+9E6A 567B
+9E6B 226F4
+9E6C 27D2F
+9E6D 241A3
+9E6E 27D73
+9E6F 26ED0
+9E70 272B6
+9E71 9170
+9E72 211D9
+9E73 9208
+9E74 23CFC
+9E75 2A6A9
+9E76 20EAC
+9E77 20EF9
+9E78 7266
+9E79 21CA2
+9E7A 474E
+9E7B 24FC2
+9E7C 27FF9
+9E7D 20FEB
+9E7E 40FA
+9EA1 9C5D
+9EA2 651F
+9EA3 22DA0
+9EA4 48F3
+9EA5 247E0
+9EA6 29D7C
+9EA7 20FEC
+9EA8 20E0A
+9EAA 275A3
+9EAB 20FED
+9EAD 26048
+9EAE 21187
+9EAF 71A3
+9EB0 7E8E
+9EB1 9D50
+9EB2 4E1A
+9EB3 4E04
+9EB4 3577
+9EB5 5B0D
+9EB6 6CB2
+9EB7 5367
+9EB8 36AC
+9EB9 39DC
+9EBA 537D
+9EBB 36A5
+9EBC 24618
+9EBD 589A
+9EBE 24B6E
+9EBF 822D
+9EC0 544B
+9EC1 57AA
+9EC2 25A95
+9EC3 20979
+9EC5 3A52
+9EC6 22465
+9EC7 7374
+9EC8 29EAC
+9EC9 4D09
+9ECA 9BED
+9ECB 23CFE
+9ECC 29F30
+9ECD 4C5B
+9ECE 24FA9
+9ECF 2959E
+9ED0 29FDE
+9ED1 845C
+9ED2 23DB6
+9ED3 272B2
+9ED4 267B3
+9ED5 23720
+9ED6 632E
+9ED7 7D25
+9ED8 23EF7
+9ED9 23E2C
+9EDA 3A2A
+9EDB 9008
+9EDC 52CC
+9EDD 3E74
+9EDE 367A
+9EDF 45E9
+9EE0 2048E
+9EE1 7640
+9EE2 5AF0
+9EE3 20EB6
+9EE4 787A
+9EE5 27F2E
+9EE6 58A7
+9EE7 40BF
+9EE8 567C
+9EE9 9B8B
+9EEA 5D74
+9EEB 7654
+9EEC 2A434
+9EED 9E85
+9EEE 4CE1
+9EF0 37FB
+9EF1 6119
+9EF2 230DA
+9EF3 243F2
+9EF5 565D
+9EF6 212A9
+9EF7 57A7
+9EF8 24963
+9EF9 29E06
+9EFA 5234
+9EFB 270AE
+9EFC 35AD
+9EFE 9D7C
+9F40 7C56
+9F41 9B39
+9F42 57DE
+9F43 2176C
+9F44 5C53
+9F45 64D3
+9F46 294D0
+9F47 26335
+9F48 27164
+9F49 86AD
+9F4A 20D28
+9F4B 26D22
+9F4C 24AE2
+9F4D 20D71
+9F4F 51FE
+9F50 21F0F
+9F51 5D8E
+9F52 9703
+9F53 21DD1
+9F54 9E81
+9F55 904C
+9F56 7B1F
+9F57 9B02
+9F58 5CD1
+9F59 7BA3
+9F5A 6268
+9F5B 6335
+9F5C 9AFF
+9F5D 7BCF
+9F5E 9B2A
+9F5F 7C7E
+9F61 7C42
+9F62 7C86
+9F63 9C15
+9F64 7BFC
+9F65 9B09
+9F67 9C1B
+9F68 2493E
+9F69 9F5A
+9F6A 5573
+9F6B 5BC3
+9F6C 4FFD
+9F6D 9E98
+9F6E 4FF2
+9F6F 5260
+9F70 3E06
+9F71 52D1
+9F72 5767
+9F73 5056
+9F74 59B7
+9F75 5E12
+9F76 97C8
+9F77 9DAB
+9F78 8F5C
+9F79 5469
+9F7A 97B4
+9F7B 9940
+9F7C 97BA
+9F7D 532C
+9F7E 6130
+9FA1 692C
+9FA2 53DA
+9FA3 9C0A
+9FA4 9D02
+9FA5 4C3B
+9FA6 9641
+9FA7 6980
+9FA8 50A6
+9FA9 7546
+9FAA 2176D
+9FAB 99DA
+9FAC 5273
+9FAE 9159
+9FAF 9681
+9FB0 915C
+9FB2 9151
+9FB3 28E97
+9FB4 637F
+9FB5 26D23
+9FB6 6ACA
+9FB7 5611
+9FB8 918E
+9FB9 757A
+9FBA 6285
+9FBB 203FC
+9FBC 734F
+9FBD 7C70
+9FBE 25C21
+9FBF 23CFD
+9FC1 24919
+9FC2 76D6
+9FC3 9B9D
+9FC4 4E2A
+9FC5 20CD4
+9FC6 83BE
+9FC7 8842
+9FC9 5C4A
+9FCA 69C0
+9FCC 577A
+9FCD 521F
+9FCE 5DF5
+9FCF 4ECE
+9FD0 6C31
+9FD1 201F2
+9FD2 4F39
+9FD3 549C
+9FD4 54DA
+9FD5 529A
+9FD6 8D82
+9FD7 35FE
+9FD9 35F3
+9FDB 6B52
+9FDC 917C
+9FDD 9FA5
+9FDE 9B97
+9FDF 982E
+9FE0 98B4
+9FE1 9ABA
+9FE2 9EA8
+9FE3 9E84
+9FE4 717A
+9FE5 7B14
+9FE7 6BFA
+9FE8 8818
+9FE9 7F78
+9FEB 5620
+9FEC 2A64A
+9FED 8E77
+9FEE 9F53
+9FF0 8DD4
+9FF1 8E4F
+9FF2 9E1C
+9FF3 8E01
+9FF4 6282
+9FF5 2837D
+9FF6 8E28
+9FF7 8E75
+9FF8 7AD3
+9FF9 24A77
+9FFA 7A3E
+9FFB 78D8
+9FFC 6CEA
+9FFD 8A67
+9FFE 7607
+A040 28A5A
+A041 9F26
+A042 6CCE
+A043 87D6
+A044 75C3
+A045 2A2B2
+A046 7853
+A047 2F840
+A048 8D0C
+A049 72E2
+A04A 7371
+A04B 8B2D
+A04C 7302
+A04D 74F1
+A04E 8CEB
+A04F 24ABB
+A050 862F
+A051 5FBA
+A052 88A0
+A053 44B7
+A055 2183B
+A056 26E05
+A058 8A7E
+A059 2251B
+A05B 60FD
+A05C 7667
+A05D 9AD7
+A05E 9D44
+A05F 936E
+A060 9B8F
+A061 87F5
+A064 8CF7
+A065 732C
+A066 9721
+A067 9BB0
+A068 35D6
+A069 72B2
+A06A 4C07
+A06B 7C51
+A06C 994A
+A06D 26159
+A06E 6159
+A06F 4C04
+A070 9E96
+A071 617D
+A073 575F
+A074 616F
+A075 62A6
+A076 6239
+A078 3A5C
+A079 61E2
+A07A 53AA
+A07B 233F5
+A07C 6364
+A07D 6802
+A07E 35D2
+A0A1 5D57
+A0A2 28BC2
+A0A3 8FDA
+A0A4 28E39
+A0A6 50D9
+A0A7 21D46
+A0A8 7906
+A0A9 5332
+A0AA 9638
+A0AB 20F3B
+A0AC 4065
+A0AE 77FE
+A0B0 7CC2
+A0B1 25F1A
+A0B2 7CDA
+A0B3 7A2D
+A0B4 8066
+A0B5 8063
+A0B6 7D4D
+A0B7 7505
+A0B8 74F2
+A0B9 8994
+A0BA 821A
+A0BB 670C
+A0BC 8062
+A0BD 27486
+A0BE 805B
+A0BF 74F0
+A0C0 8103
+A0C1 7724
+A0C2 8989
+A0C3 267CC
+A0C4 7553
+A0C5 26ED1
+A0C6 87A9
+A0C7 87CE
+A0C8 81C8
+A0C9 878C
+A0CA 8A49
+A0CB 8CAD
+A0CC 8B43
+A0CD 772B
+A0CE 74F8
+A0CF 84DA
+A0D0 3635
+A0D1 69B2
+A0D2 8DA6
+A0D4 89A9
+A0D6 6DB9
+A0D7 87C1
+A0D8 24011
+A0D9 74E7
+A0DA 3DDB
+A0DB 7176
+A0DC 60A4
+A0DD 619C
+A0DE 3CD1
+A0E0 6077
+A0E2 7F71
+A0E3 28B2D
+A0E5 60E9
+A0E6 4B7E
+A0E7 5220
+A0E8 3C18
+A0E9 23CC7
+A0EA 25ED7
+A0EB 27656
+A0EC 25531
+A0ED 21944
+A0EE 212FE
+A0EF 29903
+A0F0 26DDC
+A0F1 270AD
+A0F2 5CC1
+A0F3 261AD
+A0F4 28A0F
+A0F5 23677
+A0F6 200EE
+A0F7 26846
+A0F8 24F0E
+A0F9 4562
+A0FA 5B1F
+A0FB 2634C
+A0FC 9F50
+A0FD 9EA6
+A0FE 2626B
+C6A1 2460
+C6A2 2461
+C6A3 2462
+C6A4 2463
+C6A5 2464
+C6A6 2465
+C6A7 2466
+C6A8 2467
+C6A9 2468
+C6AA 2469
+C6AB 2474
+C6AC 2475
+C6AD 2476
+C6AE 2477
+C6AF 2478
+C6B0 2479
+C6B1 247A
+C6B2 247B
+C6B3 247C
+C6B4 247D
+C6B5 2170
+C6B6 2171
+C6B7 2172
+C6B8 2173
+C6B9 2174
+C6BA 2175
+C6BB 2176
+C6BC 2177
+C6BD 2178
+C6BE 2179
+C6BF 4E36
+C6C0 4E3F
+C6C1 4E85
+C6C2 4EA0
+C6C3 5182
+C6C4 5196
+C6C5 51AB
+C6C6 52F9
+C6C7 5338
+C6C8 5369
+C6C9 53B6
+C6CA 590A
+C6CB 5B80
+C6CC 5DDB
+C6CD 2F33
+C6CE 5E7F
+C6D0 5F50
+C6D1 5F61
+C6D2 6534
+C6D4 7592
+C6D6 8FB5
+C6D8 00A8
+C6D9 02C6
+C6DA 30FD
+C6DB 30FE
+C6DC 309D
+C6DD 309E
+C6E0 3005
+C6E1 3006
+C6E2 3007
+C6E3 30FC
+C6E4 FF3B
+C6E5 FF3D
+C6E6 273D
+C6E7 3041
+C6E8 3042
+C6E9 3043
+C6EA 3044
+C6EB 3045
+C6EC 3046
+C6ED 3047
+C6EE 3048
+C6EF 3049
+C6F0 304A
+C6F1 304B
+C6F2 304C
+C6F3 304D
+C6F4 304E
+C6F5 304F
+C6F6 3050
+C6F7 3051
+C6F8 3052
+C6F9 3053
+C6FA 3054
+C6FB 3055
+C6FC 3056
+C6FD 3057
+C6FE 3058
+C740 3059
+C741 305A
+C742 305B
+C743 305C
+C744 305D
+C745 305E
+C746 305F
+C747 3060
+C748 3061
+C749 3062
+C74A 3063
+C74B 3064
+C74C 3065
+C74D 3066
+C74E 3067
+C74F 3068
+C750 3069
+C751 306A
+C752 306B
+C753 306C
+C754 306D
+C755 306E
+C756 306F
+C757 3070
+C758 3071
+C759 3072
+C75A 3073
+C75B 3074
+C75C 3075
+C75D 3076
+C75E 3077
+C75F 3078
+C760 3079
+C761 307A
+C762 307B
+C763 307C
+C764 307D
+C765 307E
+C766 307F
+C767 3080
+C768 3081
+C769 3082
+C76A 3083
+C76B 3084
+C76C 3085
+C76D 3086
+C76E 3087
+C76F 3088
+C770 3089
+C771 308A
+C772 308B
+C773 308C
+C774 308D
+C775 308E
+C776 308F
+C777 3090
+C778 3091
+C779 3092
+C77A 3093
+C77B 30A1
+C77C 30A2
+C77D 30A3
+C77E 30A4
+C7A1 30A5
+C7A2 30A6
+C7A3 30A7
+C7A4 30A8
+C7A5 30A9
+C7A6 30AA
+C7A7 30AB
+C7A8 30AC
+C7A9 30AD
+C7AA 30AE
+C7AB 30AF
+C7AC 30B0
+C7AD 30B1
+C7AE 30B2
+C7AF 30B3
+C7B0 30B4
+C7B1 30B5
+C7B2 30B6
+C7B3 30B7
+C7B4 30B8
+C7B5 30B9
+C7B6 30BA
+C7B7 30BB
+C7B8 30BC
+C7B9 30BD
+C7BA 30BE
+C7BB 30BF
+C7BC 30C0
+C7BD 30C1
+C7BE 30C2
+C7BF 30C3
+C7C0 30C4
+C7C1 30C5
+C7C2 30C6
+C7C3 30C7
+C7C4 30C8
+C7C5 30C9
+C7C6 30CA
+C7C7 30CB
+C7C8 30CC
+C7C9 30CD
+C7CA 30CE
+C7CB 30CF
+C7CC 30D0
+C7CD 30D1
+C7CE 30D2
+C7CF 30D3
+C7D0 30D4
+C7D1 30D5
+C7D2 30D6
+C7D3 30D7
+C7D4 30D8
+C7D5 30D9
+C7D6 30DA
+C7D7 30DB
+C7D8 30DC
+C7D9 30DD
+C7DA 30DE
+C7DB 30DF
+C7DC 30E0
+C7DD 30E1
+C7DE 30E2
+C7DF 30E3
+C7E0 30E4
+C7E1 30E5
+C7E2 30E6
+C7E3 30E7
+C7E4 30E8
+C7E5 30E9
+C7E6 30EA
+C7E7 30EB
+C7E8 30EC
+C7E9 30ED
+C7EA 30EE
+C7EB 30EF
+C7EC 30F0
+C7ED 30F1
+C7EE 30F2
+C7EF 30F3
+C7F0 30F4
+C7F1 30F5
+C7F2 30F6
+C7F3 0410
+C7F4 0411
+C7F5 0412
+C7F6 0413
+C7F7 0414
+C7F8 0415
+C7F9 0401
+C7FA 0416
+C7FB 0417
+C7FC 0418
+C7FD 0419
+C7FE 041A
+C840 041B
+C841 041C
+C842 041D
+C843 041E
+C844 041F
+C845 0420
+C846 0421
+C847 0422
+C848 0423
+C849 0424
+C84A 0425
+C84B 0426
+C84C 0427
+C84D 0428
+C84E 0429
+C84F 042A
+C850 042B
+C851 042C
+C852 042D
+C853 042E
+C854 042F
+C855 0430
+C856 0431
+C857 0432
+C858 0433
+C859 0434
+C85A 0435
+C85B 0451
+C85C 0436
+C85D 0437
+C85E 0438
+C85F 0439
+C860 043A
+C861 043B
+C862 043C
+C863 043D
+C864 043E
+C865 043F
+C866 0440
+C867 0441
+C868 0442
+C869 0443
+C86A 0444
+C86B 0445
+C86C 0446
+C86D 0447
+C86E 0448
+C86F 0449
+C870 044A
+C871 044B
+C872 044C
+C873 044D
+C874 044E
+C875 044F
+C876 21E7
+C877 21B8
+C878 21B9
+C879 31CF
+C87A 200CC
+C87B 4E5A
+C87C 2008A
+C87D 5202
+C87E 4491
+C8A1 9FB0
+C8A2 5188
+C8A3 9FB1
+C8A4 27607
+C8CD FFE2
+C8CE FFE4
+C8CF FF07
+C8D0 FF02
+C8D1 3231
+C8D2 2116
+C8D3 2121
+C8D4 309B
+C8D5 309C
+C8D6 2E80
+C8D7 2E84
+C8D8 2E86
+C8D9 2E87
+C8DA 2E88
+C8DB 2E8A
+C8DC 2E8C
+C8DD 2E8D
+C8DE 2E95
+C8DF 2E9C
+C8E0 2E9D
+C8E1 2EA5
+C8E2 2EA7
+C8E3 2EAA
+C8E4 2EAC
+C8E5 2EAE
+C8E6 2EB6
+C8E7 2EBC
+C8E8 2EBE
+C8E9 2EC6
+C8EA 2ECA
+C8EB 2ECC
+C8EC 2ECD
+C8ED 2ECF
+C8EE 2ED6
+C8EF 2ED7
+C8F0 2EDE
+C8F1 2EE3
+C8F5 0283
+C8F6 0250
+C8F7 025B
+C8F8 0254
+C8F9 0275
+C8FA 0153
+C8FB 00F8
+C8FC 014B
+C8FD 028A
+C8FE 026A
+F9D6 7881
+F9D7 92B9
+F9D8 88CF
+F9D9 58BB
+F9DA 6052
+F9DB 7CA7
+F9DC 5AFA
+F9DD 2554
+F9DE 2566
+F9DF 2557
+F9E0 2560
+F9E1 256C
+F9E2 2563
+F9E3 255A
+F9E4 2569
+F9E5 255D
+F9E6 2552
+F9E7 2564
+F9E8 2555
+F9E9 255E
+F9EA 256A
+F9EB 2561
+F9EC 2558
+F9ED 2567
+F9EE 255B
+F9EF 2553
+F9F0 2565
+F9F1 2556
+F9F2 255F
+F9F3 256B
+F9F4 2562
+F9F5 2559
+F9F6 2568
+F9F7 255C
+F9F8 2551
+F9F9 2550
+F9FA 256D
+F9FB 256E
+F9FC 2570
+F9FD 256F
+F9FE FFED
+FA40 20547
+FA41 92DB
+FA42 205DF
+FA43 23FC5
+FA44 854C
+FA45 42B5
+FA46 73EF
+FA47 51B5
+FA48 3649
+FA49 24942
+FA4A 289E4
+FA4B 9344
+FA4C 219DB
+FA4D 82EE
+FA4E 23CC8
+FA4F 783C
+FA50 6744
+FA51 62DF
+FA52 24933
+FA53 289AA
+FA54 202A0
+FA55 26BB3
+FA56 21305
+FA57 4FAB
+FA58 224ED
+FA59 5008
+FA5A 26D29
+FA5B 27A84
+FA5C 23600
+FA5D 24AB1
+FA5E 22513
+FA60 2037E
+FA61 5FA4
+FA62 20380
+FA63 20347
+FA64 6EDB
+FA65 2041F
+FA67 5101
+FA68 347A
+FA69 510E
+FA6A 986C
+FA6B 3743
+FA6C 8416
+FA6D 249A4
+FA6E 20487
+FA6F 5160
+FA70 233B4
+FA71 516A
+FA72 20BFF
+FA73 220FC
+FA74 202E5
+FA75 22530
+FA76 2058E
+FA77 23233
+FA78 21983
+FA79 5B82
+FA7A 877D
+FA7B 205B3
+FA7C 23C99
+FA7D 51B2
+FA7E 51B8
+FAA1 9D34
+FAA2 51C9
+FAA3 51CF
+FAA4 51D1
+FAA5 3CDC
+FAA6 51D3
+FAA7 24AA6
+FAA8 51B3
+FAA9 51E2
+FAAA 5342
+FAAB 51ED
+FAAC 83CD
+FAAD 693E
+FAAE 2372D
+FAAF 5F7B
+FAB0 520B
+FAB1 5226
+FAB2 523C
+FAB3 52B5
+FAB4 5257
+FAB5 5294
+FAB6 52B9
+FAB7 52C5
+FAB8 7C15
+FAB9 8542
+FABA 52E0
+FABB 860D
+FABC 26B13
+FABE 28ADE
+FABF 5549
+FAC0 6ED9
+FAC1 23F80
+FAC2 20954
+FAC3 23FEC
+FAC4 5333
+FAC6 20BE2
+FAC7 6CCB
+FAC8 21726
+FAC9 681B
+FACA 73D5
+FACB 604A
+FACC 3EAA
+FACD 38CC
+FACE 216E8
+FACF 71DD
+FAD0 44A2
+FAD1 536D
+FAD2 5374
+FAD3 286AB
+FAD4 537E
+FAD6 21596
+FAD7 21613
+FAD8 77E6
+FAD9 5393
+FADA 28A9B
+FADB 53A0
+FADC 53AB
+FADD 53AE
+FADE 73A7
+FADF 25772
+FAE0 3F59
+FAE1 739C
+FAE2 53C1
+FAE3 53C5
+FAE4 6C49
+FAE5 4E49
+FAE6 57FE
+FAE7 53D9
+FAE8 3AAB
+FAE9 20B8F
+FAEA 53E0
+FAEB 23FEB
+FAEC 22DA3
+FAED 53F6
+FAEE 20C77
+FAEF 5413
+FAF0 7079
+FAF1 552B
+FAF2 6657
+FAF3 6D5B
+FAF4 546D
+FAF5 26B53
+FAF6 20D74
+FAF7 555D
+FAF8 548F
+FAF9 54A4
+FAFA 47A6
+FAFB 2170D
+FAFC 20EDD
+FAFD 3DB4
+FAFE 20D4D
+FB40 289BC
+FB41 22698
+FB42 5547
+FB43 4CED
+FB44 542F
+FB45 7417
+FB46 5586
+FB47 55A9
+FB49 218D7
+FB4A 2403A
+FB4B 4552
+FB4C 24435
+FB4D 66B3
+FB4E 210B4
+FB4F 5637
+FB50 66CD
+FB51 2328A
+FB52 66A4
+FB53 66AD
+FB54 564D
+FB55 564F
+FB56 78F1
+FB57 56F1
+FB58 9787
+FB59 53FE
+FB5A 5700
+FB5B 56EF
+FB5C 56ED
+FB5D 28B66
+FB5E 3623
+FB5F 2124F
+FB60 5746
+FB61 241A5
+FB62 6C6E
+FB63 708B
+FB64 5742
+FB65 36B1
+FB66 26C7E
+FB67 57E6
+FB68 21416
+FB69 5803
+FB6A 21454
+FB6B 24363
+FB6C 5826
+FB6D 24BF5
+FB6E 585C
+FB6F 58AA
+FB70 3561
+FB71 58E0
+FB72 58DC
+FB73 2123C
+FB74 58FB
+FB75 5BFF
+FB76 5743
+FB77 2A150
+FB78 24278
+FB79 93D3
+FB7A 35A1
+FB7B 591F
+FB7C 68A6
+FB7D 36C3
+FB7E 6E59
+FBA1 2163E
+FBA2 5A24
+FBA3 5553
+FBA4 21692
+FBA5 8505
+FBA6 59C9
+FBA7 20D4E
+FBA8 26C81
+FBA9 26D2A
+FBAA 217DC
+FBAB 59D9
+FBAC 217FB
+FBAD 217B2
+FBAE 26DA6
+FBAF 6D71
+FBB0 21828
+FBB1 216D5
+FBB2 59F9
+FBB3 26E45
+FBB4 5AAB
+FBB5 5A63
+FBB6 36E6
+FBB7 249A9
+FBB9 3708
+FBBA 5A96
+FBBB 7465
+FBBC 5AD3
+FBBD 26FA1
+FBBE 22554
+FBBF 3D85
+FBC0 21911
+FBC1 3732
+FBC2 216B8
+FBC3 5E83
+FBC4 52D0
+FBC5 5B76
+FBC6 6588
+FBC7 5B7C
+FBC8 27A0E
+FBC9 4004
+FBCA 485D
+FBCB 20204
+FBCC 5BD5
+FBCD 6160
+FBCE 21A34
+FBCF 259CC
+FBD0 205A5
+FBD1 5BF3
+FBD2 5B9D
+FBD3 4D10
+FBD4 5C05
+FBD5 21B44
+FBD6 5C13
+FBD7 73CE
+FBD8 5C14
+FBD9 21CA5
+FBDA 26B28
+FBDB 5C49
+FBDC 48DD
+FBDD 5C85
+FBDE 5CE9
+FBDF 5CEF
+FBE0 5D8B
+FBE1 21DF9
+FBE2 21E37
+FBE3 5D10
+FBE4 5D18
+FBE5 5D46
+FBE6 21EA4
+FBE7 5CBA
+FBE8 5DD7
+FBE9 82FC
+FBEA 382D
+FBEB 24901
+FBEC 22049
+FBED 22173
+FBEE 8287
+FBEF 3836
+FBF0 3BC2
+FBF1 5E2E
+FBF2 6A8A
+FBF4 5E7A
+FBF5 244BC
+FBF6 20CD3
+FBF7 53A6
+FBF8 4EB7
+FBFA 53A8
+FBFB 21771
+FBFC 5E09
+FBFD 5EF4
+FBFE 28482
+FC40 5EF9
+FC41 5EFB
+FC42 38A0
+FC43 5EFC
+FC44 683E
+FC45 941B
+FC46 5F0D
+FC47 201C1
+FC48 2F894
+FC49 3ADE
+FC4A 48AE
+FC4B 2133A
+FC4C 5F3A
+FC4D 26888
+FC4E 223D0
+FC50 22471
+FC51 5F63
+FC52 97BD
+FC53 26E6E
+FC54 5F72
+FC55 9340
+FC56 28A36
+FC57 5FA7
+FC58 5DB6
+FC59 3D5F
+FC5A 25250
+FC5B 21F6A
+FC5C 270F8
+FC5D 22668
+FC5E 91D6
+FC5F 2029E
+FC60 28A29
+FC61 6031
+FC62 6685
+FC63 21877
+FC64 3963
+FC65 3DC7
+FC66 3639
+FC67 5790
+FC68 227B4
+FC69 7971
+FC6A 3E40
+FC6B 609E
+FC6D 60B3
+FC6E 24982
+FC6F 2498F
+FC70 27A53
+FC71 74A4
+FC72 50E1
+FC73 5AA0
+FC74 6164
+FC75 8424
+FC76 6142
+FC77 2F8A6
+FC78 26ED2
+FC79 6181
+FC7A 51F4
+FC7B 20656
+FC7C 6187
+FC7D 5BAA
+FC7E 23FB7
+FCA1 2285F
+FCA2 61D3
+FCA3 28B9D
+FCA4 2995D
+FCA5 61D0
+FCA6 3932
+FCA7 22980
+FCA8 228C1
+FCA9 6023
+FCAA 615C
+FCAB 651E
+FCAC 638B
+FCAD 20118
+FCAE 62C5
+FCAF 21770
+FCB0 62D5
+FCB1 22E0D
+FCB2 636C
+FCB3 249DF
+FCB4 3A17
+FCB5 6438
+FCB6 63F8
+FCB7 2138E
+FCB8 217FC
+FCBA 6F8A
+FCBB 22E36
+FCBC 9814
+FCBD 2408C
+FCBE 2571D
+FCBF 64E1
+FCC0 64E5
+FCC1 947B
+FCC2 3A66
+FCC3 643A
+FCC4 3A57
+FCC5 654D
+FCC6 6F16
+FCC7 24A28
+FCC8 24A23
+FCC9 6585
+FCCA 656D
+FCCB 655F
+FCCC 2307E
+FCCD 65B5
+FCCE 24940
+FCCF 4B37
+FCD0 65D1
+FCD1 40D8
+FCD2 21829
+FCD3 65E0
+FCD4 65E3
+FCD5 5FDF
+FCD6 23400
+FCD7 6618
+FCD8 231F7
+FCD9 231F8
+FCDA 6644
+FCDB 231A4
+FCDC 231A5
+FCDD 664B
+FCDE 20E75
+FCDF 6667
+FCE0 251E6
+FCE1 6673
+FCE3 21E3D
+FCE4 23231
+FCE5 285F4
+FCE6 231C8
+FCE7 25313
+FCE8 77C5
+FCE9 228F7
+FCEA 99A4
+FCEB 6702
+FCEC 2439C
+FCED 24A21
+FCEE 3B2B
+FCEF 69FA
+FCF0 237C2
+FCF2 6767
+FCF3 6762
+FCF4 241CD
+FCF5 290ED
+FCF6 67D7
+FCF7 44E9
+FCF8 6822
+FCF9 6E50
+FCFA 923C
+FCFB 6801
+FCFC 233E6
+FCFD 26DA0
+FCFE 685D
+FD40 2346F
+FD41 69E1
+FD42 6A0B
+FD43 28ADF
+FD44 6973
+FD45 68C3
+FD46 235CD
+FD47 6901
+FD48 6900
+FD49 3D32
+FD4A 3A01
+FD4B 2363C
+FD4C 3B80
+FD4D 67AC
+FD4E 6961
+FD4F 28A4A
+FD50 42FC
+FD51 6936
+FD52 6998
+FD53 3BA1
+FD54 203C9
+FD55 8363
+FD56 5090
+FD57 69F9
+FD58 23659
+FD59 2212A
+FD5A 6A45
+FD5B 23703
+FD5C 6A9D
+FD5D 3BF3
+FD5E 67B1
+FD5F 6AC8
+FD60 2919C
+FD61 3C0D
+FD62 6B1D
+FD63 20923
+FD64 60DE
+FD65 6B35
+FD66 6B74
+FD67 227CD
+FD68 6EB5
+FD69 23ADB
+FD6A 203B5
+FD6B 21958
+FD6C 3740
+FD6D 5421
+FD6E 23B5A
+FD6F 6BE1
+FD70 23EFC
+FD71 6BDC
+FD72 6C37
+FD73 2248B
+FD74 248F1
+FD75 26B51
+FD76 6C5A
+FD77 8226
+FD78 6C79
+FD79 23DBC
+FD7A 44C5
+FD7B 23DBD
+FD7C 241A4
+FD7D 2490C
+FD7E 24900
+FDA1 23CC9
+FDA2 36E5
+FDA3 3CEB
+FDA4 20D32
+FDA5 9B83
+FDA6 231F9
+FDA7 22491
+FDA8 7F8F
+FDA9 6837
+FDAA 26D25
+FDAB 26DA1
+FDAC 26DEB
+FDAD 6D96
+FDAE 6D5C
+FDAF 6E7C
+FDB0 6F04
+FDB1 2497F
+FDB2 24085
+FDB3 26E72
+FDB4 8533
+FDB5 26F74
+FDB6 51C7
+FDB9 842E
+FDBA 28B21
+FDBC 23E2F
+FDBD 7453
+FDBE 23F82
+FDBF 79CC
+FDC0 6E4F
+FDC1 5A91
+FDC2 2304B
+FDC3 6FF8
+FDC4 370D
+FDC5 6F9D
+FDC6 23E30
+FDC7 6EFA
+FDC8 21497
+FDC9 2403D
+FDCA 4555
+FDCB 93F0
+FDCC 6F44
+FDCD 6F5C
+FDCE 3D4E
+FDCF 6F74
+FDD0 29170
+FDD1 3D3B
+FDD2 6F9F
+FDD3 24144
+FDD4 6FD3
+FDD5 24091
+FDD6 24155
+FDD7 24039
+FDD8 23FF0
+FDD9 23FB4
+FDDA 2413F
+FDDB 51DF
+FDDC 24156
+FDDD 24157
+FDDE 24140
+FDDF 261DD
+FDE0 704B
+FDE1 707E
+FDE2 70A7
+FDE3 7081
+FDE4 70CC
+FDE5 70D5
+FDE6 70D6
+FDE7 70DF
+FDE8 4104
+FDE9 3DE8
+FDEA 71B4
+FDEB 7196
+FDEC 24277
+FDED 712B
+FDEE 7145
+FDEF 5A88
+FDF0 714A
+FDF2 5C9C
+FDF3 24365
+FDF4 714F
+FDF5 9362
+FDF6 242C1
+FDF7 712C
+FDF8 2445A
+FDF9 24A27
+FDFA 24A22
+FDFB 71BA
+FDFC 28BE8
+FDFD 70BD
+FDFE 720E
+FE40 9442
+FE41 7215
+FE42 5911
+FE43 9443
+FE44 7224
+FE45 9341
+FE46 25605
+FE47 722E
+FE48 7240
+FE49 24974
+FE4A 68BD
+FE4B 7255
+FE4C 7257
+FE4D 3E55
+FE4E 23044
+FE4F 680D
+FE50 6F3D
+FE51 7282
+FE53 732B
+FE54 24823
+FE55 2882B
+FE56 48ED
+FE57 28804
+FE58 7328
+FE59 732E
+FE5A 73CF
+FE5B 73AA
+FE5C 20C3A
+FE5D 26A2E
+FE5E 73C9
+FE5F 7449
+FE60 241E2
+FE61 216E7
+FE62 24A24
+FE63 6623
+FE64 36C5
+FE65 249B7
+FE66 2498D
+FE67 249FB
+FE68 73F7
+FE69 7415
+FE6A 6903
+FE6B 24A26
+FE6C 7439
+FE6D 205C3
+FE6E 3ED7
+FE70 228AD
+FE71 7460
+FE72 28EB2
+FE73 7447
+FE74 73E4
+FE75 7476
+FE76 83B9
+FE77 746C
+FE78 3730
+FE79 7474
+FE7A 93F1
+FE7B 6A2C
+FE7C 7482
+FE7D 4953
+FE7E 24A8C
+FEA1 2415F
+FEA2 24A79
+FEA3 28B8F
+FEA4 5B46
+FEA5 28C03
+FEA6 2189E
+FEA7 74C8
+FEA8 21988
+FEA9 750E
+FEAB 751E
+FEAC 28ED9
+FEAD 21A4B
+FEAE 5BD7
+FEAF 28EAC
+FEB0 9385
+FEB1 754D
+FEB2 754A
+FEB3 7567
+FEB4 756E
+FEB5 24F82
+FEB6 3F04
+FEB7 24D13
+FEB8 758E
+FEB9 745D
+FEBA 759E
+FEBB 75B4
+FEBC 7602
+FEBD 762C
+FEBE 7651
+FEBF 764F
+FEC0 766F
+FEC1 7676
+FEC2 263F5
+FEC3 7690
+FEC4 81EF
+FEC5 37F8
+FEC6 26911
+FEC7 2690E
+FEC8 76A1
+FEC9 76A5
+FECA 76B7
+FECB 76CC
+FECC 26F9F
+FECD 8462
+FECE 2509D
+FECF 2517D
+FED0 21E1C
+FED1 771E
+FED2 7726
+FED3 7740
+FED4 64AF
+FED5 25220
+FED6 7758
+FED7 232AC
+FED8 77AF
+FED9 28964
+FEDA 28968
+FEDB 216C1
+FEDC 77F4
+FEDE 21376
+FEDF 24A12
+FEE0 68CA
+FEE1 78AF
+FEE2 78C7
+FEE3 78D3
+FEE4 96A5
+FEE5 792E
+FEE6 255E0
+FEE7 78D7
+FEE8 7934
+FEE9 78B1
+FEEA 2760C
+FEEB 8FB8
+FEEC 8884
+FEED 28B2B
+FEEE 26083
+FEEF 2261C
+FEF0 7986
+FEF1 8900
+FEF2 6902
+FEF3 7980
+FEF4 25857
+FEF5 799D
+FEF6 27B39
+FEF7 793C
+FEF8 79A9
+FEF9 6E2A
+FEFA 27126
+FEFB 3EA8
+FEFC 79C6
+FEFD 2910D
+FEFE 79D4
diff --git a/make/devkit/Tools.gmk b/make/devkit/Tools.gmk
index af37f12f39dc2..187320ca26ed4 100644
--- a/make/devkit/Tools.gmk
+++ b/make/devkit/Tools.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -539,6 +539,7 @@ $(BUILDDIR)/$(gcc_ver)/Makefile \
$(PATHPRE) $(ENVS) $(GCC_CFG) $(EXTRA_CFLAGS) \
$(CONFIG) \
--with-sysroot=$(SYSROOT) \
+ --with-debug-prefix-map=$(OUTPUT_ROOT)=devkit \
--enable-languages=c,c++ \
--enable-shared \
--disable-nls \
diff --git a/make/devkit/createJMHBundle.sh b/make/devkit/createJMHBundle.sh
index af29a0917d606..c3c97947dabf0 100644
--- a/make/devkit/createJMHBundle.sh
+++ b/make/devkit/createJMHBundle.sh
@@ -26,8 +26,8 @@
# Create a bundle in the build directory, containing what's needed to
# build and run JMH microbenchmarks from the OpenJDK build.
-JMH_VERSION=1.36
-COMMONS_MATH3_VERSION=3.2
+JMH_VERSION=1.37
+COMMONS_MATH3_VERSION=3.6.1
JOPT_SIMPLE_VERSION=5.0.4
BUNDLE_NAME=jmh-$JMH_VERSION.tar.gz
diff --git a/make/hotspot/gensrc/GensrcAdlc.gmk b/make/hotspot/gensrc/GensrcAdlc.gmk
index 427a0dfd34b79..0898d91e1c2a0 100644
--- a/make/hotspot/gensrc/GensrcAdlc.gmk
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk
@@ -62,7 +62,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ADLC_CFLAGS += -I$(TOPDIR)/src/hotspot/share
# Add file macro mappings
- ADLC_CFLAGS += $(FILE_MACRO_CFLAGS)
+ ADLC_CFLAGS += $(FILE_MACRO_CFLAGS) $(REPRODUCIBLE_CFLAGS)
ifeq ($(UBSAN_ENABLED), true)
ADLC_CFLAGS += $(UBSAN_CFLAGS)
@@ -133,6 +133,21 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ADLCFLAGS += -DARM=1
endif
+ # Set ASSERT, NDEBUG and PRODUCT flags just like in JvmFlags.gmk
+ ifeq ($(DEBUG_LEVEL), release)
+ # release builds disable uses of assert macro from .
+ ADLCFLAGS += -DNDEBUG
+ # For hotspot, release builds differ internally between "optimized" and "product"
+ # in that "optimize" does not define PRODUCT.
+ ifneq ($(HOTSPOT_DEBUG_LEVEL), optimized)
+ ADLCFLAGS += -DPRODUCT
+ endif
+ else ifeq ($(DEBUG_LEVEL), fastdebug)
+ ADLCFLAGS += -DASSERT
+ else ifeq ($(DEBUG_LEVEL), slowdebug)
+ ADLCFLAGS += -DASSERT
+ endif
+
##############################################################################
# Concatenate all ad source files into a single file, which will be fed to
# adlc. Also include a #line directive at the start of every included file
diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk
index adb964d05389a..d21e7f99c63a7 100644
--- a/make/hotspot/lib/CompileJvm.gmk
+++ b/make/hotspot/lib/CompileJvm.gmk
@@ -276,10 +276,10 @@ ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
define SetupOperatorNewDeleteCheck
$1.op_check: $1
$$(call ExecuteWithLog, $1.op_check, \
- $$(NM) $$< 2>&1 | $$(GREP) $$(addprefix -e , $$(MANGLED_SYMS)) | $$(GREP) $$(UNDEF_PATTERN) > $1.op_check || true)
+ $$(NM) $$(NMFLAGS) $$< 2>&1 | $$(GREP) $$(addprefix -e , $$(MANGLED_SYMS)) | $$(GREP) $$(UNDEF_PATTERN) > $1.op_check || true)
if [ -s $1.op_check ]; then \
$$(ECHO) "$$(notdir $$<): Error: Use of global operators new and delete is not allowed in Hotspot:"; \
- $$(NM) $$< | $$(CXXFILT) | $$(EGREP) '$$(DEMANGLED_REGEXP)' | $$(GREP) $$(UNDEF_PATTERN); \
+ $$(NM) $$(NMFLAGS) $$< | $$(CXXFILT) | $$(EGREP) '$$(DEMANGLED_REGEXP)' | $$(GREP) $$(UNDEF_PATTERN); \
$$(ECHO) "See: $$(TOPDIR)/make/hotspot/lib/CompileJvm.gmk"; \
exit 1; \
fi
diff --git a/make/hotspot/lib/JvmMapfile.gmk b/make/hotspot/lib/JvmMapfile.gmk
index d80b804a2f6ca..2808ac2af0372 100644
--- a/make/hotspot/lib/JvmMapfile.gmk
+++ b/make/hotspot/lib/JvmMapfile.gmk
@@ -53,7 +53,7 @@ endif
# platform dependent.
ifeq ($(call isTargetOs, linux), true)
- DUMP_SYMBOLS_CMD := $(NM) --defined-only *$(OBJ_SUFFIX)
+ DUMP_SYMBOLS_CMD := $(NM) $(NMFLAGS) --defined-only *$(OBJ_SUFFIX)
ifneq ($(FILTER_SYMBOLS_PATTERN), )
FILTER_SYMBOLS_PATTERN := $(FILTER_SYMBOLS_PATTERN)|
endif
@@ -67,7 +67,7 @@ ifeq ($(call isTargetOs, linux), true)
else ifeq ($(call isTargetOs, macosx), true)
# nm on macosx prints out "warning: nm: no name list" to stderr for
# files without symbols. Hide this, even at the expense of hiding real errors.
- DUMP_SYMBOLS_CMD := $(NM) -Uj *$(OBJ_SUFFIX) 2> /dev/null
+ DUMP_SYMBOLS_CMD := $(NM) $(NMFLAGS) -Uj *$(OBJ_SUFFIX) 2> /dev/null
ifneq ($(FILTER_SYMBOLS_PATTERN), )
FILTER_SYMBOLS_PATTERN := $(FILTER_SYMBOLS_PATTERN)|
endif
@@ -89,7 +89,7 @@ else ifeq ($(call isTargetOs, aix), true)
# which may be installed under /opt/freeware/bin. So better use an absolute path here!
# NM=/usr/bin/nm
- DUMP_SYMBOLS_CMD := $(NM) -X64 -B -C *$(OBJ_SUFFIX)
+ DUMP_SYMBOLS_CMD := $(NM) $(NMFLAGS) -B -C *$(OBJ_SUFFIX)
FILTER_SYMBOLS_AWK_SCRIPT := \
'{ \
if (($$2="d" || $$2="D") && ($$3 ~ /^__vft/ || $$3 ~ /^gHotSpotVM/)) print $$3; \
diff --git a/make/hotspot/test/GtestImage.gmk b/make/hotspot/test/GtestImage.gmk
index d216328e5674e..9b2a37962cddd 100644
--- a/make/hotspot/test/GtestImage.gmk
+++ b/make/hotspot/test/GtestImage.gmk
@@ -61,7 +61,7 @@ ifeq ($(call isTargetOs, windows), true)
$(eval $(call SetupCopyFiles, COPY_GTEST_PDB_$v, \
SRC := $(HOTSPOT_OUTPUTDIR)/variant-$v/libjvm/gtest, \
DEST := $(TEST_IMAGE_DIR)/hotspot/gtest/$v, \
- FILES := jvm.pdb gtestLauncher.pdb, \
+ FILES := jvm.dll.pdb gtestLauncher.exe.pdb, \
)) \
$(eval TARGETS += $$(COPY_GTEST_PDB_$v)) \
) \
diff --git a/make/ide/visualstudio/hotspot/src/classes/build/tools/projectcreator/WinGammaPlatformVC10.java b/make/ide/visualstudio/hotspot/src/classes/build/tools/projectcreator/WinGammaPlatformVC10.java
index 092e5afd3e8fe..ed085dae09562 100644
--- a/make/ide/visualstudio/hotspot/src/classes/build/tools/projectcreator/WinGammaPlatformVC10.java
+++ b/make/ide/visualstudio/hotspot/src/classes/build/tools/projectcreator/WinGammaPlatformVC10.java
@@ -329,7 +329,7 @@ Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir) {
addAttr(rv, "PrecompiledHeaderOutputFile", outDir+Util.sep+"vm.pch");
addAttr(rv, "AssemblerListingLocation", outDir);
addAttr(rv, "ObjectFileName", outDir+Util.sep);
- addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"jvm.pdb");
+ addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"jvm.dll.pdb");
// Set /nologo option
addAttr(rv, "SuppressStartupBanner", "true");
// Surpass the default /Tc or /Tp.
@@ -409,7 +409,7 @@ Vector getBaseLinkerFlags(String outDir, String outDll, String platformName) {
addAttr(rv, "OutputFile", outDll);
addAttr(rv, "SuppressStartupBanner", "true");
addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
- addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"jvm.pdb");
+ addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"jvm.dll.pdb");
addAttr(rv, "SubSystem", "Windows");
addAttr(rv, "BaseAddress", "0x8000000");
addAttr(rv, "ImportLibrary", outDir+Util.sep+"jvm.lib");
diff --git a/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java b/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java
index 561edbef0346a..9655e08016c5a 100644
--- a/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java
+++ b/make/jdk/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
import java.io.FileOutputStream;
import java.io.InputStream;
import java.text.SimpleDateFormat;
+import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.Locale;
@@ -339,9 +340,15 @@ private static void buildOtherTables() {
validCurrencyCodes.substring(i * 7 + 3, i * 7 + 6));
checkCurrencyCode(currencyCode);
int tableEntry = mainTable[(currencyCode.charAt(0) - 'A') * A_TO_Z + (currencyCode.charAt(1) - 'A')];
- if (tableEntry == INVALID_COUNTRY_ENTRY ||
- (tableEntry & SPECIAL_CASE_COUNTRY_MASK) != 0 ||
- (tableEntry & SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK) != (currencyCode.charAt(2) - 'A')) {
+
+ // Do not allow a future currency to be classified as an otherCurrency,
+ // otherwise it will leak out into Currency:getAvailableCurrencies
+ boolean futureCurrency = Arrays.asList(specialCaseNewCurrencies).contains(currencyCode);
+ boolean simpleCurrency = (tableEntry & SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK) == (currencyCode.charAt(2) - 'A');
+
+ // If neither a simple currency, or one defined in the future
+ // then the current currency is applicable to be added to the otherTable
+ if (!futureCurrency && !simpleCurrency) {
if (otherCurrenciesCount == maxOtherCurrencies) {
throw new RuntimeException("too many other currencies");
}
diff --git a/make/langtools/tools/javacserver/client/Client.java b/make/langtools/tools/javacserver/client/Client.java
index b3b4a8543a059..9576a9923a4e5 100644
--- a/make/langtools/tools/javacserver/client/Client.java
+++ b/make/langtools/tools/javacserver/client/Client.java
@@ -51,9 +51,9 @@
public class Client {
private static final Log.Level LOG_LEVEL = Log.Level.INFO;
- // Wait 2 seconds for response, before giving up on javac server.
- private static final int CONNECTION_TIMEOUT = 2000;
- private static final int MAX_CONNECT_ATTEMPTS = 3;
+ // Wait 4 seconds for response, before giving up on javac server.
+ private static final int CONNECTION_TIMEOUT = 4000;
+ private static final int MAX_CONNECT_ATTEMPTS = 10;
private static final int WAIT_BETWEEN_CONNECT_ATTEMPTS = 2000;
private final ClientConfiguration conf;
@@ -130,7 +130,7 @@ private Socket tryConnect() throws IOException, InterruptedException {
Log.error("Connection attempt failed: " + ex.getMessage());
if (attempt >= MAX_CONNECT_ATTEMPTS) {
Log.error("Giving up");
- throw new IOException("Could not connect to server", ex);
+ throw new IOException("Could not connect to server after " + MAX_CONNECT_ATTEMPTS + " attempts with timeout " + CONNECTION_TIMEOUT, ex);
}
}
Thread.sleep(WAIT_BETWEEN_CONNECT_ATTEMPTS);
diff --git a/make/modules/java.base/gensrc/GensrcMisc.gmk b/make/modules/java.base/gensrc/GensrcMisc.gmk
index e37aa50d41cb1..578adce4e9da6 100644
--- a/make/modules/java.base/gensrc/GensrcMisc.gmk
+++ b/make/modules/java.base/gensrc/GensrcMisc.gmk
@@ -52,9 +52,7 @@ $(eval $(call SetupTextFileProcessing, BUILD_VERSION_JAVA, \
# Normalize OPENJDK_TARGET_CPU name to match jdk.internal.util.Architecture enum
-ifneq ($(filter $(OPENJDK_TARGET_CPU), ppc64le), )
- OPENJDK_TARGET_ARCH_CANONICAL = ppc64
-else ifneq ($(filter $(OPENJDK_TARGET_CPU), s390x), )
+ifneq ($(filter $(OPENJDK_TARGET_CPU), s390x), )
OPENJDK_TARGET_ARCH_CANONICAL = s390
else ifneq ($(filter $(OPENJDK_TARGET_CPU), x86_64 amd64), )
OPENJDK_TARGET_ARCH_CANONICAL = x64
diff --git a/make/modules/java.desktop/lib/Awt2dLibraries.gmk b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
index 62b4477b8bfcf..d6a4e6df4fce3 100644
--- a/make/modules/java.desktop/lib/Awt2dLibraries.gmk
+++ b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
@@ -237,6 +237,8 @@ ifeq ($(call isTargetOs, windows macosx), false)
DISABLED_WARNINGS_gcc_gtk3_interface.c := parentheses type-limits unused-function, \
DISABLED_WARNINGS_gcc_OGLBufImgOps.c := format-nonliteral, \
DISABLED_WARNINGS_gcc_OGLPaints.c := format-nonliteral, \
+ DISABLED_WARNINGS_gcc_screencast_pipewire.c := undef, \
+ DISABLED_WARNINGS_gcc_screencast_portal.c := undef, \
DISABLED_WARNINGS_gcc_sun_awt_X11_GtkFileDialogPeer.c := parentheses, \
DISABLED_WARNINGS_gcc_X11SurfaceData.c := implicit-fallthrough pointer-to-int-cast, \
DISABLED_WARNINGS_gcc_XlibWrapper.c := type-limits pointer-to-int-cast, \
@@ -465,11 +467,18 @@ else
# hb-ft.cc is not presently needed, and requires freetype 2.4.2 or later.
LIBFONTMANAGER_EXCLUDE_FILES += libharfbuzz/hb-ft.cc
+ # list of disabled warnings and the compilers for which it was specifically added.
+ # array-bounds -> GCC 12 on Alpine Linux
+ # parentheses -> GCC 6
+ # range-loop-analysis -> clang on Xcode12
+
HARFBUZZ_DISABLED_WARNINGS_gcc := missing-field-initializers strict-aliasing \
- unused-result array-bounds
+ unused-result array-bounds parentheses
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.
# expansion-to-defined required for GCC 9 builds. Not required for GCC 10+.
- HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type expansion-to-defined dangling-reference
+ # maybe-uninitialized required for GCC 8 builds. Not required for GCC 9+.
+ HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type \
+ expansion-to-defined dangling-reference maybe-uninitialized
HARFBUZZ_DISABLED_WARNINGS_clang := missing-field-initializers range-loop-analysis
HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244
diff --git a/make/scripts/compare_exceptions.sh.incl b/make/scripts/compare_exceptions.sh.incl
index d9f62aa113222..d5043637145b2 100644
--- a/make/scripts/compare_exceptions.sh.incl
+++ b/make/scripts/compare_exceptions.sh.incl
@@ -49,8 +49,8 @@ elif [ "$OPENJDK_TARGET_OS" = "windows" ]; then
SKIP_BIN_DIFF="true"
SKIP_FULLDUMP_DIFF="true"
ACCEPTED_JARZIP_CONTENTS="
- /modules_libs/java.security.jgss/w2k_lsa_auth.pdb
- /modules_libs/java.security.jgss/w2k_lsa_auth.map
+ /modules_libs/java.security.jgss/w2k_lsa_auth.dll.pdb
+ /modules_libs/java.security.jgss/w2k_lsa_auth.dll.map
/modules_libs/java.security.jgss/w2k_lsa_auth.dll
"
elif [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
diff --git a/make/test/JtregNativeJdk.gmk b/make/test/JtregNativeJdk.gmk
index 5f945e90dd288..94c8810bba90c 100644
--- a/make/test/JtregNativeJdk.gmk
+++ b/make/test/JtregNativeJdk.gmk
@@ -132,6 +132,8 @@ ifeq ($(call isTargetOs, linux), true)
# stripping during the test libraries' build.
BUILD_JDK_JTREG_LIBRARIES_CFLAGS_libFib := -g
BUILD_JDK_JTREG_LIBRARIES_STRIP_SYMBOLS_libFib := false
+ # nio tests' libCreationTimeHelper native needs -ldl linker flag
+ BUILD_JDK_JTREG_LIBRARIES_LIBS_libCreationTimeHelper := -ldl
endif
ifeq ($(ASAN_ENABLED), true)
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index fea6a3d7d115e..a9aa3cab3004e 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -3809,202 +3809,6 @@ encode %{
__ br(target_reg);
%}
- enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
- C2_MacroAssembler _masm(&cbuf);
- Register oop = as_Register($object$$reg);
- Register box = as_Register($box$$reg);
- Register disp_hdr = as_Register($tmp$$reg);
- Register tmp = as_Register($tmp2$$reg);
- Label cont;
- Label object_has_monitor;
- Label count, no_count;
-
- assert_different_registers(oop, box, tmp, disp_hdr);
-
- // Load markWord from object into displaced_header.
- __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
-
- if (DiagnoseSyncOnValueBasedClasses != 0) {
- __ load_klass(tmp, oop);
- __ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
- __ tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
- __ br(Assembler::NE, cont);
- }
-
- // Check for existing monitor
- __ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
-
- if (LockingMode == LM_MONITOR) {
- __ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
- __ b(cont);
- } else if (LockingMode == LM_LEGACY) {
- // Set tmp to be (markWord of object | UNLOCK_VALUE).
- __ orr(tmp, disp_hdr, markWord::unlocked_value);
-
- // Initialize the box. (Must happen before we update the object mark!)
- __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
-
- // Compare object markWord with an unlocked value (tmp) and if
- // equal exchange the stack address of our box with object markWord.
- // On failure disp_hdr contains the possibly locked markWord.
- __ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
- /*release*/ true, /*weak*/ false, disp_hdr);
- __ br(Assembler::EQ, cont);
-
- assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
-
- // If the compare-and-exchange succeeded, then we found an unlocked
- // object, will have now locked it will continue at label cont
-
- // Check if the owner is self by comparing the value in the
- // markWord of object (disp_hdr) with the stack pointer.
- __ mov(rscratch1, sp);
- __ sub(disp_hdr, disp_hdr, rscratch1);
- __ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
- // If condition is true we are cont and hence we can store 0 as the
- // displaced header in the box, which indicates that it is a recursive lock.
- __ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
- __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
- __ b(cont);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- __ fast_lock(oop, disp_hdr, tmp, rscratch1, no_count);
- __ b(count);
- }
-
- // Handle existing monitor.
- __ bind(object_has_monitor);
-
- // The object's monitor m is unlocked iff m->owner == NULL,
- // otherwise m->owner may contain a thread or a stack address.
- //
- // Try to CAS m->owner from NULL to current thread.
- __ add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
- __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
- /*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
-
- if (LockingMode != LM_LIGHTWEIGHT) {
- // Store a non-null value into the box to avoid looking like a re-entrant
- // lock. The fast-path monitor unlock code checks for
- // markWord::monitor_value so use markWord::unused_mark which has the
- // relevant bit set, and also matches ObjectSynchronizer::enter.
- __ mov(tmp, (address)markWord::unused_mark().value());
- __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
- }
- __ br(Assembler::EQ, cont); // CAS success means locking succeeded
-
- __ cmp(rscratch1, rthread);
- __ br(Assembler::NE, cont); // Check for recursive locking
-
- // Recursive lock case
- __ increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
- // flag == EQ still from the cmp above, checking if this is a reentrant lock
-
- __ bind(cont);
- // flag == EQ indicates success
- // flag == NE indicates failure
- __ br(Assembler::NE, no_count);
-
- __ bind(count);
- __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
-
- __ bind(no_count);
- %}
-
- enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
- C2_MacroAssembler _masm(&cbuf);
- Register oop = as_Register($object$$reg);
- Register box = as_Register($box$$reg);
- Register disp_hdr = as_Register($tmp$$reg);
- Register tmp = as_Register($tmp2$$reg);
- Label cont;
- Label object_has_monitor;
- Label count, no_count;
-
- assert_different_registers(oop, box, tmp, disp_hdr);
-
- if (LockingMode == LM_LEGACY) {
- // Find the lock address and load the displaced header from the stack.
- __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
-
- // If the displaced header is 0, we have a recursive unlock.
- __ cmp(disp_hdr, zr);
- __ br(Assembler::EQ, cont);
- }
-
- // Handle existing monitor.
- __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
- __ tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
-
- if (LockingMode == LM_MONITOR) {
- __ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
- __ b(cont);
- } else if (LockingMode == LM_LEGACY) {
- // Check if it is still a light weight lock, this is is true if we
- // see the stack address of the basicLock in the markWord of the
- // object.
-
- __ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
- /*release*/ true, /*weak*/ false, tmp);
- __ b(cont);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- __ fast_unlock(oop, tmp, box, disp_hdr, no_count);
- __ b(count);
- }
-
- assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
-
- // Handle existing monitor.
- __ bind(object_has_monitor);
- STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
- __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
-
- if (LockingMode == LM_LIGHTWEIGHT) {
- // If the owner is anonymous, we need to fix it -- in an outline stub.
- Register tmp2 = disp_hdr;
- __ ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
- // We cannot use tbnz here, the target might be too far away and cannot
- // be encoded.
- __ tst(tmp2, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER);
- C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
- Compile::current()->output()->add_stub(stub);
- __ br(Assembler::NE, stub->entry());
- __ bind(stub->continuation());
- }
-
- __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
-
- Label notRecursive;
- __ cbz(disp_hdr, notRecursive);
-
- // Recursive lock
- __ sub(disp_hdr, disp_hdr, 1u);
- __ str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
- __ cmp(disp_hdr, disp_hdr); // Sets flags for result
- __ b(cont);
-
- __ bind(notRecursive);
- __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
- __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
- __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
- __ cmp(rscratch1, zr); // Sets flags for result
- __ cbnz(rscratch1, cont);
- // need a release store here
- __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
- __ stlr(zr, tmp); // set unowned
-
- __ bind(cont);
- // flag == EQ indicates success
- // flag == NE indicates failure
- __ br(Assembler::NE, no_count);
-
- __ bind(count);
- __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
-
- __ bind(no_count);
- %}
-
%}
//----------FRAME--------------------------------------------------------------
@@ -16609,17 +16413,19 @@ instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
// ============================================================================
// inlined locking and unlocking
-instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
+instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
match(Set cr (FastLock object box));
- effect(TEMP tmp, TEMP tmp2);
+ effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
// TODO
// identify correct cost
ins_cost(5 * INSN_COST);
format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
- ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2));
+ ins_encode %{
+ __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
+ %}
ins_pipe(pipe_serial);
%}
@@ -16632,7 +16438,9 @@ instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRe
ins_cost(5 * INSN_COST);
format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
- ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2));
+ ins_encode %{
+ __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
+ %}
ins_pipe(pipe_serial);
%}
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 4bf7fee936bac..656dbdf7ae8cb 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -282,7 +282,8 @@ void LIR_Assembler::osr_entry() {
__ bind(L);
}
#endif
- __ ldp(r19, r20, Address(OSR_buf, slot_offset));
+ __ ldr(r19, Address(OSR_buf, slot_offset));
+ __ ldr(r20, Address(OSR_buf, slot_offset + BytesPerWord));
__ str(r19, frame_map()->address_for_monitor_lock(i));
__ str(r20, frame_map()->address_for_monitor_object(i));
}
@@ -434,7 +435,7 @@ int LIR_Assembler::emit_unwind_handler() {
if (LockingMode == LM_MONITOR) {
__ b(*stub->entry());
} else {
- __ unlock_object(r5, r4, r0, *stub->entry());
+ __ unlock_object(r5, r4, r0, r6, *stub->entry());
}
__ bind(*stub->continuation());
}
@@ -2558,6 +2559,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register(); // may not be an oop
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
+ Register temp = op->scratch_opr()->as_register();
if (LockingMode == LM_MONITOR) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
@@ -2567,14 +2569,14 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
- int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
+ int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
if (op->info() != nullptr) {
add_debug_info_for_null_check(null_check_offset, op->info());
}
// done
} else if (op->code() == lir_unlock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
- __ unlock_object(hdr, obj, lock, *op->stub()->entry());
+ __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
} else {
Unimplemented();
}
@@ -2722,7 +2724,10 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ verify_oop(obj);
if (tmp != obj) {
+ assert_different_registers(obj, tmp, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
__ mov(tmp, obj);
+ } else {
+ assert_different_registers(obj, rscratch1, rscratch2, mdo_addr.base(), mdo_addr.index());
}
if (do_null) {
__ cbnz(tmp, update);
@@ -2779,10 +2784,11 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ cbz(rscratch2, none);
__ cmp(rscratch2, (u1)TypeEntries::null_seen);
__ br(Assembler::EQ, none);
- // There is a chance that the checks above (re-reading profiling
- // data from memory) fail if another thread has just set the
+ // There is a chance that the checks above
+ // fail if another thread has just set the
// profiling to this obj's klass
__ dmb(Assembler::ISHLD);
+ __ eor(tmp, tmp, rscratch2); // get back original value before XOR
__ ldr(rscratch2, mdo_addr);
__ eor(tmp, tmp, rscratch2);
__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
@@ -2807,6 +2813,10 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ bind(none);
// first time here. Set profile type.
__ str(tmp, mdo_addr);
+#ifdef ASSERT
+ __ andr(tmp, tmp, TypeEntries::type_mask);
+ __ verify_klass_ptr(tmp);
+#endif
}
} else {
// There's a single possible klass at this profile point
@@ -2838,6 +2848,10 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#endif
// first time here. Set profile type.
__ str(tmp, mdo_addr);
+#ifdef ASSERT
+ __ andr(tmp, tmp, TypeEntries::type_mask);
+ __ verify_klass_ptr(tmp);
+#endif
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
index c32f975946375..952e060ed212a 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
@@ -314,6 +314,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
+ LIR_Opr scratch = new_register(T_INT);
CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) {
@@ -322,7 +323,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
- monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
+ monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
x->monitor_no(), info_for_exception, info);
}
@@ -335,8 +336,9 @@ void LIRGenerator::do_MonitorExit(MonitorExit* x) {
LIR_Opr lock = new_register(T_INT);
LIR_Opr obj_temp = new_register(T_INT);
+ LIR_Opr scratch = new_register(T_INT);
set_no_result(x);
- monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
+ monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
}
void LIRGenerator::do_NegateOp(NegateOp* x) {
diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
index a847289e3ab2f..d3a746178f14e 100644
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
@@ -60,10 +60,10 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
}
}
-int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
+int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
- assert_different_registers(hdr, obj, disp_hdr);
+ assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
int null_check_offset = -1;
verify_oop(obj);
@@ -83,7 +83,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// Load object header
ldr(hdr, Address(obj, hdr_offset));
if (LockingMode == LM_LIGHTWEIGHT) {
- fast_lock(obj, hdr, rscratch1, rscratch2, slow_case);
+ lightweight_lock(obj, hdr, temp, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// and mark it as unlocked
@@ -125,10 +125,10 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
}
-void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
+void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
- assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
+ assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
Label done;
if (LockingMode != LM_LIGHTWEIGHT) {
@@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// be encoded.
tst(hdr, markWord::monitor_value);
br(Assembler::NE, slow_case);
- fast_unlock(obj, hdr, rscratch1, rscratch2, slow_case);
+ lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp
index 98cffb4552406..4aa6206aa6073 100644
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp
@@ -58,14 +58,16 @@ using MacroAssembler::null_check;
// hdr : must be r0, contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must point to the displaced header location, contents preserved
+ // temp : temporary register, must not be rscratch1 or rscratch2
// returns code offset at which to add null check debug information
- int lock_object (Register swap, Register obj, Register disp_hdr, Label& slow_case);
+ int lock_object (Register swap, Register obj, Register disp_hdr, Register temp, Label& slow_case);
// unlocking
// hdr : contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must be r0 & must point to the displaced header location, contents destroyed
- void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
+ // temp : temporary register, must not be rscratch1 or rscratch2
+ void unlock_object(Register swap, Register obj, Register lock, Register temp, Label& slow_case);
void initialize_object(
Register obj, // result: pointer to object after successful allocation
diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
index bdba111f6df8b..4b7d2959a631a 100644
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
@@ -478,6 +478,15 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
const Register exception_pc = r3;
const Register handler_addr = r1;
+ if (AbortVMOnException) {
+ __ mov(rscratch1, exception_oop);
+ __ enter();
+ save_live_registers(sasm);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), rscratch1);
+ restore_live_registers(sasm);
+ __ leave();
+ }
+
// verify that only r0, is valid at this time
__ invalidate_registers(false, true, true, true, true, true);
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
index dbe64f8f9ca74..cdf3ec7567ac9 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
@@ -45,6 +45,202 @@
typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
+void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register tmpReg,
+ Register tmp2Reg, Register tmp3Reg) {
+ Register oop = objectReg;
+ Register box = boxReg;
+ Register disp_hdr = tmpReg;
+ Register tmp = tmp2Reg;
+ Label cont;
+ Label object_has_monitor;
+ Label count, no_count;
+
+ assert_different_registers(oop, box, tmp, disp_hdr);
+
+ // Load markWord from object into displaced_header.
+ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
+
+ if (DiagnoseSyncOnValueBasedClasses != 0) {
+ load_klass(tmp, oop);
+ ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
+ tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
+ br(Assembler::NE, cont);
+ }
+
+ // Check for existing monitor
+ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
+
+ if (LockingMode == LM_MONITOR) {
+ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
+ b(cont);
+ } else if (LockingMode == LM_LEGACY) {
+ // Set tmp to be (markWord of object | UNLOCK_VALUE).
+ orr(tmp, disp_hdr, markWord::unlocked_value);
+
+ // Initialize the box. (Must happen before we update the object mark!)
+ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+ // Compare object markWord with an unlocked value (tmp) and if
+ // equal exchange the stack address of our box with object markWord.
+ // On failure disp_hdr contains the possibly locked markWord.
+ cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
+ /*release*/ true, /*weak*/ false, disp_hdr);
+ br(Assembler::EQ, cont);
+
+ assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+ // If the compare-and-exchange succeeded, then we found an unlocked
+ // object, will have now locked it will continue at label cont
+
+ // Check if the owner is self by comparing the value in the
+ // markWord of object (disp_hdr) with the stack pointer.
+ mov(rscratch1, sp);
+ sub(disp_hdr, disp_hdr, rscratch1);
+ mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
+ // If condition is true we are cont and hence we can store 0 as the
+ // displaced header in the box, which indicates that it is a recursive lock.
+ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
+ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+ b(cont);
+ } else {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ lightweight_lock(oop, disp_hdr, tmp, tmp3Reg, no_count);
+ b(count);
+ }
+
+ // Handle existing monitor.
+ bind(object_has_monitor);
+
+ // The object's monitor m is unlocked iff m->owner == NULL,
+ // otherwise m->owner may contain a thread or a stack address.
+ //
+ // Try to CAS m->owner from NULL to current thread.
+ add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
+ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
+ /*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
+
+ if (LockingMode != LM_LIGHTWEIGHT) {
+ // Store a non-null value into the box to avoid looking like a re-entrant
+ // lock. The fast-path monitor unlock code checks for
+ // markWord::monitor_value so use markWord::unused_mark which has the
+ // relevant bit set, and also matches ObjectSynchronizer::enter.
+ mov(tmp, (address)markWord::unused_mark().value());
+ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+ }
+ br(Assembler::EQ, cont); // CAS success means locking succeeded
+
+ cmp(rscratch1, rthread);
+ br(Assembler::NE, cont); // Check for recursive locking
+
+ // Recursive lock case
+ increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
+ // flag == EQ still from the cmp above, checking if this is a reentrant lock
+
+ bind(cont);
+ // flag == EQ indicates success
+ // flag == NE indicates failure
+ br(Assembler::NE, no_count);
+
+ bind(count);
+ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
+
+ bind(no_count);
+}
+
+void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
+ Register tmp2Reg) {
+ Register oop = objectReg;
+ Register box = boxReg;
+ Register disp_hdr = tmpReg;
+ Register tmp = tmp2Reg;
+ Label cont;
+ Label object_has_monitor;
+ Label count, no_count;
+
+ assert_different_registers(oop, box, tmp, disp_hdr);
+
+ if (LockingMode == LM_LEGACY) {
+ // Find the lock address and load the displaced header from the stack.
+ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+ // If the displaced header is 0, we have a recursive unlock.
+ cmp(disp_hdr, zr);
+ br(Assembler::EQ, cont);
+ }
+
+ // Handle existing monitor.
+ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
+ tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
+
+ if (LockingMode == LM_MONITOR) {
+ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
+ b(cont);
+ } else if (LockingMode == LM_LEGACY) {
+ // Check if it is still a light weight lock, this is is true if we
+ // see the stack address of the basicLock in the markWord of the
+ // object.
+
+ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
+ /*release*/ true, /*weak*/ false, tmp);
+ b(cont);
+ } else {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ lightweight_unlock(oop, tmp, box, disp_hdr, no_count);
+ b(count);
+ }
+
+ assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+ // Handle existing monitor.
+ bind(object_has_monitor);
+ STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
+ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
+
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ // If the owner is anonymous, we need to fix it -- in an outline stub.
+ Register tmp2 = disp_hdr;
+ ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
+ // We cannot use tbnz here, the target might be too far away and cannot
+ // be encoded.
+ tst(tmp2, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER);
+ C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
+ Compile::current()->output()->add_stub(stub);
+ br(Assembler::NE, stub->entry());
+ bind(stub->continuation());
+ }
+
+ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
+
+ Label notRecursive;
+ cbz(disp_hdr, notRecursive);
+
+ // Recursive lock
+ sub(disp_hdr, disp_hdr, 1u);
+ str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
+ cmp(disp_hdr, disp_hdr); // Sets flags for result
+ b(cont);
+
+ bind(notRecursive);
+ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
+ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
+ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
+ cmp(rscratch1, zr); // Sets flags for result
+ cbnz(rscratch1, cont);
+ // need a release store here
+ lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
+ stlr(zr, tmp); // set unowned
+
+ bind(cont);
+ // flag == EQ indicates success
+ // flag == NE indicates failure
+ br(Assembler::NE, no_count);
+
+ bind(count);
+ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
+
+ bind(no_count);
+}
+
// Search for str1 in str2 and return index or -1
// Clobbers: rscratch1, rscratch2, rflags. May also clobber v0-v1, when icnt1==-1.
void C2_MacroAssembler::string_indexof(Register str2, Register str1,
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
index ccfd60b1a8b25..f342ca3c977b9 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
@@ -35,6 +35,11 @@
enum shift_kind kind = Assembler::LSL, unsigned shift = 0);
public:
+ // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
+ // See full description in macroAssembler_aarch64.cpp.
+ void fast_lock(Register object, Register box, Register tmp, Register tmp2, Register tmp3);
+ void fast_unlock(Register object, Register box, Register tmp, Register tmp2);
+
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,
Register tmp1, Register tmp2, FloatRegister vtmp1,
diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
index ea76c9d20c158..2357721ed3d4c 100644
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
@@ -508,7 +508,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// first the method
- Method* m = *interpreter_frame_method_addr();
+ Method* m = safe_interpreter_frame_method();
// validate the method we'd find in this potential sender
if (!Method::is_valid_method(m)) return false;
diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.hpp b/src/hotspot/cpu/aarch64/frame_aarch64.hpp
index 3d1f588359fa8..e58d66d5cf50e 100644
--- a/src/hotspot/cpu/aarch64/frame_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.hpp
@@ -165,7 +165,7 @@
frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc);
- frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb);
+ frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null = false);
// used for fast frame construction by continuations
frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap);
diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp
index b969e180e4ab9..39dc16d2748e6 100644
--- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp
@@ -92,7 +92,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
init(sp, fp, pc);
}
-inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
+inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null) {
assert(pauth_ptr_is_raw(pc), "cannot be signed");
intptr_t a = intptr_t(sp);
intptr_t b = intptr_t(fp);
@@ -103,7 +103,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
assert(pc != nullptr, "no pc?");
_cb = cb;
_oop_map = nullptr;
- assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
+ assert(_cb != nullptr || allow_cb_null, "pc: " INTPTR_FORMAT, p2i(pc));
_on_heap = false;
DEBUG_ONLY(_frame_index = -1;)
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
index 3a9e5db2ef7ae..ebaf18299728d 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
@@ -1290,6 +1290,9 @@ static bool aarch64_test_and_branch_reachable(int branch_offset, int target_offs
return test_and_branch_to_trampoline_delta < test_and_branch_delta_limit;
}
+ZLoadBarrierStubC2Aarch64::ZLoadBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register ref)
+ : ZLoadBarrierStubC2(node, ref_addr, ref), _test_and_branch_reachable_entry(), _offset(), _deferred_emit(false), _test_and_branch_reachable(false) {}
+
ZLoadBarrierStubC2Aarch64::ZLoadBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register ref, int offset)
: ZLoadBarrierStubC2(node, ref_addr, ref), _test_and_branch_reachable_entry(), _offset(offset), _deferred_emit(false), _test_and_branch_reachable(false) {
PhaseOutput* const output = Compile::current()->output();
@@ -1319,6 +1322,12 @@ int ZLoadBarrierStubC2Aarch64::get_stub_size() {
return cb.insts_size();
}
+ZLoadBarrierStubC2Aarch64* ZLoadBarrierStubC2Aarch64::create(const MachNode* node, Address ref_addr, Register ref) {
+ ZLoadBarrierStubC2Aarch64* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2Aarch64(node, ref_addr, ref);
+ register_stub(stub);
+ return stub;
+}
+
ZLoadBarrierStubC2Aarch64* ZLoadBarrierStubC2Aarch64::create(const MachNode* node, Address ref_addr, Register ref, int offset) {
ZLoadBarrierStubC2Aarch64* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2Aarch64(node, ref_addr, ref, offset);
register_stub(stub);
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
index 00714e5c0c04b..82334b34adeca 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
@@ -265,10 +265,12 @@ class ZLoadBarrierStubC2Aarch64 : public ZLoadBarrierStubC2 {
bool _deferred_emit;
bool _test_and_branch_reachable;
+ ZLoadBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register ref);
ZLoadBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register ref, int offset);
int get_stub_size();
public:
+ static ZLoadBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register ref);
static ZLoadBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register ref, int offset);
virtual void emit_code(MacroAssembler& masm);
diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
index 8c698635ad0f5..23564a3f23c38 100644
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
@@ -48,7 +48,7 @@ static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* nod
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov);
__ movzw(tmp, barrier_Relocation::unpatched);
__ tst(ref, tmp);
- ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
+ ZLoadBarrierStubC2Aarch64* const stub = ZLoadBarrierStubC2Aarch64::create(node, ref_addr, ref);
__ br(Assembler::NE, *stub->entry());
z_uncolor(_masm, node, ref);
__ bind(*stub->continuation());
diff --git a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
index 94f26b6d062c6..1146324e19cb7 100644
--- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
@@ -41,6 +41,8 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
// and Operational Models for ARMv8"
#define CPU_MULTI_COPY_ATOMIC
+#define DEFAULT_CACHE_LINE_SIZE 64
+
// According to the ARMv8 ARM, "Concurrent modification and execution
// of instructions can lead to the resulting instruction performing
// any behavior that can be achieved by executing any sequence of
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index bda7e4c1438b6..9e69c913a7b59 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -622,7 +622,7 @@ void InterpreterMacroAssembler::remove_activation(
// Check that all monitors are unlocked
{
Label loop, exception, entry, restart;
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
const Address monitor_block_top(
rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
@@ -692,6 +692,12 @@ void InterpreterMacroAssembler::remove_activation(
// testing if reserved zone needs to be re-enabled
Label no_reserved_zone_enabling;
+ // check if already enabled - if so no re-enabling needed
+ assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
+ ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
+ cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
+ br(Assembler::EQ, no_reserved_zone_enabling);
+
// look for an overflow into the stack reserved zone, i.e.
// interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
@@ -725,7 +731,7 @@ void InterpreterMacroAssembler::remove_activation(
//
// Kills:
// r0
-// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
+// c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
// rscratch1, rscratch2 (scratch regs)
void InterpreterMacroAssembler::lock_object(Register lock_reg)
{
@@ -740,6 +746,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
const Register swap_reg = r0;
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
+ const Register tmp2 = c_rarg4;
+ const Register tmp3 = c_rarg5;
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
@@ -760,7 +768,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
if (LockingMode == LM_LIGHTWEIGHT) {
ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- fast_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
+ lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
b(count);
} else if (LockingMode == LM_LEGACY) {
// Load (object->mark() | 1) into swap_reg
@@ -858,6 +866,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
const Register swap_reg = r0;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
+ const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
save_bcp(); // Save in case of exception
@@ -891,7 +900,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case);
- fast_unlock(obj_reg, header_reg, swap_reg, rscratch1, slow_case);
+ lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
b(count);
bind(slow_case);
} else if (LockingMode == LM_LEGACY) {
@@ -1660,7 +1669,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
}
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
- assert_different_registers(obj, rscratch1);
+ assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
Label update, next, none;
verify_oop(obj);
@@ -1682,13 +1691,13 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
tbnz(obj, exact_log2(TypeEntries::type_unknown), next);
// already unknown. Nothing to do anymore.
- ldr(rscratch1, mdo_addr);
cbz(rscratch1, none);
cmp(rscratch1, (u1)TypeEntries::null_seen);
br(Assembler::EQ, none);
- // There is a chance that the checks above (re-reading profiling
- // data from memory) fail if another thread has just set the
+ // There is a chance that the checks above
+ // fail if another thread has just set the
// profiling to this obj's klass
+ eor(obj, obj, rscratch1); // get back original value before XOR
ldr(rscratch1, mdo_addr);
eor(obj, obj, rscratch1);
tst(obj, TypeEntries::type_klass_mask);
@@ -1701,6 +1710,10 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
bind(none);
// first time here. Set profile type.
str(obj, mdo_addr);
+#ifdef ASSERT
+ andr(obj, obj, TypeEntries::type_mask);
+ verify_klass_ptr(obj);
+#endif
bind(next);
}
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 3be34786f19c3..5a90cf189ce3d 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -2737,6 +2737,10 @@ void MacroAssembler::cmpxchg(Register addr, Register expected,
mov(result, expected);
lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
compare_eq(result, expected, size);
+#ifdef ASSERT
+ // Poison rscratch1 which is written on !UseLSE branch
+ mov(rscratch1, 0x1f1f1f1f1f1f1f1f);
+#endif
} else {
Label retry_load, done;
prfm(Address(addr), PSTL1STRM);
@@ -4152,108 +4156,117 @@ void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, R
}
add(table, table, table_offset);
+ // Registers v0..v7 are used as data registers.
+ // Registers v16..v31 are used as tmp registers.
sub(buf, buf, 0x10);
- ldrq(v1, Address(buf, 0x10));
- ldrq(v2, Address(buf, 0x20));
- ldrq(v3, Address(buf, 0x30));
- ldrq(v4, Address(buf, 0x40));
- ldrq(v5, Address(buf, 0x50));
- ldrq(v6, Address(buf, 0x60));
- ldrq(v7, Address(buf, 0x70));
- ldrq(v8, Address(pre(buf, 0x80)));
-
- movi(v25, T4S, 0);
- mov(v25, S, 0, crc);
- eor(v1, T16B, v1, v25);
-
- ldrq(v0, Address(table));
+ ldrq(v0, Address(buf, 0x10));
+ ldrq(v1, Address(buf, 0x20));
+ ldrq(v2, Address(buf, 0x30));
+ ldrq(v3, Address(buf, 0x40));
+ ldrq(v4, Address(buf, 0x50));
+ ldrq(v5, Address(buf, 0x60));
+ ldrq(v6, Address(buf, 0x70));
+ ldrq(v7, Address(pre(buf, 0x80)));
+
+ movi(v31, T4S, 0);
+ mov(v31, S, 0, crc);
+ eor(v0, T16B, v0, v31);
+
+ // Register v16 contains constants from the crc table.
+ ldrq(v16, Address(table));
b(CRC_by128_loop);
align(OptoLoopAlignment);
BIND(CRC_by128_loop);
- pmull (v9, T1Q, v1, v0, T1D);
- pmull2(v10, T1Q, v1, v0, T2D);
- ldrq(v1, Address(buf, 0x10));
- eor3(v1, T16B, v9, v10, v1);
-
- pmull (v11, T1Q, v2, v0, T1D);
- pmull2(v12, T1Q, v2, v0, T2D);
- ldrq(v2, Address(buf, 0x20));
- eor3(v2, T16B, v11, v12, v2);
-
- pmull (v13, T1Q, v3, v0, T1D);
- pmull2(v14, T1Q, v3, v0, T2D);
- ldrq(v3, Address(buf, 0x30));
- eor3(v3, T16B, v13, v14, v3);
-
- pmull (v15, T1Q, v4, v0, T1D);
- pmull2(v16, T1Q, v4, v0, T2D);
- ldrq(v4, Address(buf, 0x40));
- eor3(v4, T16B, v15, v16, v4);
-
- pmull (v17, T1Q, v5, v0, T1D);
- pmull2(v18, T1Q, v5, v0, T2D);
- ldrq(v5, Address(buf, 0x50));
- eor3(v5, T16B, v17, v18, v5);
-
- pmull (v19, T1Q, v6, v0, T1D);
- pmull2(v20, T1Q, v6, v0, T2D);
- ldrq(v6, Address(buf, 0x60));
- eor3(v6, T16B, v19, v20, v6);
-
- pmull (v21, T1Q, v7, v0, T1D);
- pmull2(v22, T1Q, v7, v0, T2D);
- ldrq(v7, Address(buf, 0x70));
- eor3(v7, T16B, v21, v22, v7);
-
- pmull (v23, T1Q, v8, v0, T1D);
- pmull2(v24, T1Q, v8, v0, T2D);
- ldrq(v8, Address(pre(buf, 0x80)));
- eor3(v8, T16B, v23, v24, v8);
+ pmull (v17, T1Q, v0, v16, T1D);
+ pmull2(v18, T1Q, v0, v16, T2D);
+ ldrq(v0, Address(buf, 0x10));
+ eor3(v0, T16B, v17, v18, v0);
+
+ pmull (v19, T1Q, v1, v16, T1D);
+ pmull2(v20, T1Q, v1, v16, T2D);
+ ldrq(v1, Address(buf, 0x20));
+ eor3(v1, T16B, v19, v20, v1);
+
+ pmull (v21, T1Q, v2, v16, T1D);
+ pmull2(v22, T1Q, v2, v16, T2D);
+ ldrq(v2, Address(buf, 0x30));
+ eor3(v2, T16B, v21, v22, v2);
+
+ pmull (v23, T1Q, v3, v16, T1D);
+ pmull2(v24, T1Q, v3, v16, T2D);
+ ldrq(v3, Address(buf, 0x40));
+ eor3(v3, T16B, v23, v24, v3);
+
+ pmull (v25, T1Q, v4, v16, T1D);
+ pmull2(v26, T1Q, v4, v16, T2D);
+ ldrq(v4, Address(buf, 0x50));
+ eor3(v4, T16B, v25, v26, v4);
+
+ pmull (v27, T1Q, v5, v16, T1D);
+ pmull2(v28, T1Q, v5, v16, T2D);
+ ldrq(v5, Address(buf, 0x60));
+ eor3(v5, T16B, v27, v28, v5);
+
+ pmull (v29, T1Q, v6, v16, T1D);
+ pmull2(v30, T1Q, v6, v16, T2D);
+ ldrq(v6, Address(buf, 0x70));
+ eor3(v6, T16B, v29, v30, v6);
+
+ // Reuse registers v23, v24.
+ // Using them won't block the first instruction of the next iteration.
+ pmull (v23, T1Q, v7, v16, T1D);
+ pmull2(v24, T1Q, v7, v16, T2D);
+ ldrq(v7, Address(pre(buf, 0x80)));
+ eor3(v7, T16B, v23, v24, v7);
subs(len, len, 0x80);
br(Assembler::GE, CRC_by128_loop);
// fold into 512 bits
- ldrq(v0, Address(table, 0x10));
+ // Use v31 for constants because v16 can be still in use.
+ ldrq(v31, Address(table, 0x10));
- pmull (v10, T1Q, v1, v0, T1D);
- pmull2(v11, T1Q, v1, v0, T2D);
- eor3(v1, T16B, v10, v11, v5);
+ pmull (v17, T1Q, v0, v31, T1D);
+ pmull2(v18, T1Q, v0, v31, T2D);
+ eor3(v0, T16B, v17, v18, v4);
- pmull (v12, T1Q, v2, v0, T1D);
- pmull2(v13, T1Q, v2, v0, T2D);
- eor3(v2, T16B, v12, v13, v6);
+ pmull (v19, T1Q, v1, v31, T1D);
+ pmull2(v20, T1Q, v1, v31, T2D);
+ eor3(v1, T16B, v19, v20, v5);
- pmull (v14, T1Q, v3, v0, T1D);
- pmull2(v15, T1Q, v3, v0, T2D);
- eor3(v3, T16B, v14, v15, v7);
+ pmull (v21, T1Q, v2, v31, T1D);
+ pmull2(v22, T1Q, v2, v31, T2D);
+ eor3(v2, T16B, v21, v22, v6);
- pmull (v16, T1Q, v4, v0, T1D);
- pmull2(v17, T1Q, v4, v0, T2D);
- eor3(v4, T16B, v16, v17, v8);
+ pmull (v23, T1Q, v3, v31, T1D);
+ pmull2(v24, T1Q, v3, v31, T2D);
+ eor3(v3, T16B, v23, v24, v7);
// fold into 128 bits
- ldrq(v5, Address(table, 0x20));
- pmull (v10, T1Q, v1, v5, T1D);
- pmull2(v11, T1Q, v1, v5, T2D);
- eor3(v4, T16B, v4, v10, v11);
-
- ldrq(v6, Address(table, 0x30));
- pmull (v12, T1Q, v2, v6, T1D);
- pmull2(v13, T1Q, v2, v6, T2D);
- eor3(v4, T16B, v4, v12, v13);
-
- ldrq(v7, Address(table, 0x40));
- pmull (v14, T1Q, v3, v7, T1D);
- pmull2(v15, T1Q, v3, v7, T2D);
- eor3(v1, T16B, v4, v14, v15);
+ // Use v17 for constants because v31 can be still in use.
+ ldrq(v17, Address(table, 0x20));
+ pmull (v25, T1Q, v0, v17, T1D);
+ pmull2(v26, T1Q, v0, v17, T2D);
+ eor3(v3, T16B, v3, v25, v26);
+
+ // Use v18 for constants because v17 can be still in use.
+ ldrq(v18, Address(table, 0x30));
+ pmull (v27, T1Q, v1, v18, T1D);
+ pmull2(v28, T1Q, v1, v18, T2D);
+ eor3(v3, T16B, v3, v27, v28);
+
+ // Use v19 for constants because v18 can be still in use.
+ ldrq(v19, Address(table, 0x40));
+ pmull (v29, T1Q, v2, v19, T1D);
+ pmull2(v30, T1Q, v2, v19, T2D);
+ eor3(v0, T16B, v3, v29, v30);
add(len, len, 0x80);
add(buf, buf, 0x10);
- mov(tmp0, v1, D, 0);
- mov(tmp1, v1, D, 1);
+ mov(tmp0, v0, D, 0);
+ mov(tmp1, v0, D, 1);
}
SkipIfEqual::SkipIfEqual(
@@ -6212,16 +6225,16 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
}
}
-// Implements fast-locking.
+// Implements lightweight-locking.
// Branches to slow upon failure to lock the object, with ZF cleared.
// Falls through upon success with ZF set.
//
// - obj: the object to be locked
// - hdr: the header, already loaded from obj, will be destroyed
// - t1, t2: temporary registers, will be destroyed
-void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
+void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
- assert_different_registers(obj, hdr, t1, t2);
+ assert_different_registers(obj, hdr, t1, t2, rscratch1);
// Check if we would have space on lock-stack for the object.
ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
@@ -6233,6 +6246,7 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register
// Clear lock-bits, into t2
eor(t2, hdr, markWord::unlocked_value);
// Try to swing header from unlocked to locked
+ // Clobbers rscratch1 when UseLSE is false
cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
/*acquire*/ true, /*release*/ true, /*weak*/ false, t1);
br(Assembler::NE, slow);
@@ -6244,16 +6258,16 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Register
strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
}
-// Implements fast-unlocking.
+// Implements lightweight-unlocking.
// Branches to slow upon failure, with ZF cleared.
// Falls through upon success, with ZF set.
//
// - obj: the object to be unlocked
// - hdr: the (pre-loaded) header of the object
// - t1, t2: temporary registers
-void MacroAssembler::fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
+void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
- assert_different_registers(obj, hdr, t1, t2);
+ assert_different_registers(obj, hdr, t1, t2, rscratch1);
#ifdef ASSERT
{
@@ -6293,6 +6307,7 @@ void MacroAssembler::fast_unlock(Register obj, Register hdr, Register t1, Regist
orr(t1, hdr, markWord::unlocked_value);
// Try to swing header from locked to unlocked
+ // Clobbers rscratch1 when UseLSE is false
cmpxchg(obj, hdr, t1, Assembler::xword,
/*acquire*/ true, /*release*/ true, /*weak*/ false, t2);
br(Assembler::NE, slow);
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index 6b45be8ce43ad..81146c5449979 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -1582,8 +1582,8 @@ class MacroAssembler: public Assembler {
// Code for java.lang.Thread::onSpinWait() intrinsic.
void spin_wait();
- void fast_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
- void fast_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
+ void lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
+ void lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
private:
// Check the current thread doesn't need a cross modify fence.
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 47dbe0cd97a24..2335a70c9feeb 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1759,6 +1759,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register obj_reg = r19; // Will contain the oop
const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
const Register old_hdr = r13; // value of old header at unlock time
+ const Register lock_tmp = r14; // Temporary used by lightweight_lock/unlock
const Register tmp = lr;
Label slow_path_lock;
@@ -1812,7 +1813,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ fast_lock(obj_reg, swap_reg, tmp, rscratch1, slow_path_lock);
+ __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
}
__ bind(count);
__ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
@@ -1953,7 +1954,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "");
__ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock);
- __ fast_unlock(obj_reg, old_hdr, swap_reg, rscratch1, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
}
diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
index 5384af5f215c8..469edaef233ce 100644
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
@@ -688,7 +688,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// monitor entry size: see picture of stack set
// (generate_method_entry) and frame_amd64.hpp
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
// total overhead size: entry_size + (saved rbp through expr stack
// bottom). be sure to change this if you add/subtract anything
@@ -769,7 +769,7 @@ void TemplateInterpreterGenerator::lock_method() {
const Address monitor_block_top(
rfp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
#ifdef ASSERT
{
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index 46c649d77b910..5d3585a5e5a43 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -3813,7 +3813,7 @@ void TemplateTable::monitorenter()
rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
Label allocated;
@@ -3916,7 +3916,7 @@ void TemplateTable::monitorexit()
rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
rfp, frame::interpreter_frame_initial_sp_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
Label found;
diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
index cbe9057f6a22c..c09e54e0e57ad 100644
--- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
@@ -219,7 +219,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
- fast_lock_2(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
+ lightweight_lock(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
// Success: fall through
} else if (LockingMode == LM_LEGACY) {
@@ -282,8 +282,8 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
Register t2 = hdr; // blow
Register t3 = Rtemp; // blow
- fast_unlock_2(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
- slow_case);
+ lightweight_unlock(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
+ slow_case);
// Success: Fall through
} else if (LockingMode == LM_LEGACY) {
diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
index 537bf98c3e443..62faa6170833b 100644
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
@@ -350,6 +350,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
+
+ if (AbortVMOnException) {
+ save_live_registers(sasm);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Rexception_obj);
+ restore_live_registers(sasm);
+ }
+
// FP no longer used to find the frame start
// on entry, remove_frame() has already been called (restoring FP and LR)
diff --git a/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp
index f887f5d889cc8..1db30ce5c685d 100644
--- a/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp
@@ -93,8 +93,8 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
if (LockingMode == LM_LIGHTWEIGHT) {
- fast_lock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
- 1 /* savemask (save t1) */, done);
+ lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
+ 1 /* savemask (save t1) */, done);
// Success: set Z
cmp(Roop, Roop);
@@ -143,8 +143,8 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
if (LockingMode == LM_LIGHTWEIGHT) {
- fast_unlock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
- 1 /* savemask (save t1) */, done);
+ lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
+ 1 /* savemask (save t1) */, done);
cmp(Roop, Roop); // Success: Set Z
// Fall through
diff --git a/src/hotspot/cpu/arm/frame_arm.cpp b/src/hotspot/cpu/arm/frame_arm.cpp
index de2df56f59df2..d923e1f43ad43 100644
--- a/src/hotspot/cpu/arm/frame_arm.cpp
+++ b/src/hotspot/cpu/arm/frame_arm.cpp
@@ -279,7 +279,6 @@ BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
}
-// Pointer beyond the "oldest/deepest" BasicObjectLock on stack.
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
// make sure the pointer points inside the frame
@@ -421,7 +420,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// first the method
- Method* m = *interpreter_frame_method_addr();
+ Method* m = safe_interpreter_frame_method();
// validate the method we'd find in this potential sender
if (!Method::is_valid_method(m)) return false;
diff --git a/src/hotspot/cpu/arm/globalDefinitions_arm.hpp b/src/hotspot/cpu/arm/globalDefinitions_arm.hpp
index 0a400d0086477..ba180fb0f8718 100644
--- a/src/hotspot/cpu/arm/globalDefinitions_arm.hpp
+++ b/src/hotspot/cpu/arm/globalDefinitions_arm.hpp
@@ -48,6 +48,8 @@ const bool HaveVFP = true;
// arm32 is not specified as multi-copy-atomic
// So we must not #define CPU_MULTI_COPY_ATOMIC
+#define DEFAULT_CACHE_LINE_SIZE 64
+
#define STUBROUTINES_MD_HPP "stubRoutines_arm.hpp"
#define INTERP_MASM_MD_HPP "interp_masm_arm.hpp"
#define TEMPLATETABLE_MD_HPP "templateTable_arm.hpp"
diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp
index 2dc0960cd8291..f49b6ed06ecec 100644
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp
@@ -814,7 +814,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
{
Label loop;
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
const Register Rbottom = R3;
const Register Rcur_obj = Rtemp;
@@ -911,7 +911,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
}
if (LockingMode == LM_LIGHTWEIGHT) {
- fast_lock_2(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
+ lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
b(done);
} else if (LockingMode == LM_LEGACY) {
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
@@ -1033,8 +1033,8 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) {
cmpoop(Rtemp, Robj);
b(slow_case, ne);
- fast_unlock_2(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
- 1 /* savemask (save t1) */, slow_case);
+ lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
+ 1 /* savemask (save t1) */, slow_case);
b(done);
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
index 9a7735a94b8c4..b827e69d02233 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
@@ -1748,14 +1748,14 @@ void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype
POISON_REG(mask, 1, R2, poison) \
POISON_REG(mask, 2, R3, poison)
-// Attempt to fast-lock an object
+// Attempt to lightweight-lock an object
// Registers:
// - obj: the object to be locked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
-void MacroAssembler::fast_lock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
+void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, t1, t2, t3);
@@ -1806,14 +1806,14 @@ void MacroAssembler::fast_lock_2(Register obj, Register t1, Register t2, Registe
// Success: fall through
}
-// Attempt to fast-unlock an object
+// Attempt to lightweight-unlock an object
// Registers:
// - obj: the object to be unlocked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
-void MacroAssembler::fast_unlock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
+void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, t1, t2, t3);
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.hpp b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
index 359ad93b91bd2..9a855dee8f668 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
@@ -1009,23 +1009,23 @@ class MacroAssembler: public Assembler {
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
- // Attempt to fast-lock an object
+ // Attempt to lightweight-lock an object
// Registers:
// - obj: the object to be locked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
- void fast_lock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
+ void lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
- // Attempt to fast-unlock an object
+ // Attempt to lightweight-unlock an object
// Registers:
// - obj: the object to be unlocked
// - t1, t2, t3: temp registers. If corresponding bit in savemask is set, they get saved, otherwise blown.
// Result:
// - Success: fallthrough
// - Error: break to slow, Z cleared.
- void fast_unlock_2(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
+ void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow);
#ifndef PRODUCT
// Preserves flags and all registers.
diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
index 5916305a9d27c..e4f4107da0fb6 100644
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
@@ -1155,8 +1155,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (LockingMode == LM_LIGHTWEIGHT) {
log_trace(fastlock)("SharedRuntime lock fast");
- __ fast_lock_2(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
- 0x7 /* savemask */, slow_lock);
+ __ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
+ 0x7 /* savemask */, slow_lock);
// Fall through to lock_done
} else if (LockingMode == LM_LEGACY) {
const Register mark = tmp;
@@ -1242,8 +1242,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (method->is_synchronized()) {
if (LockingMode == LM_LIGHTWEIGHT) {
log_trace(fastlock)("SharedRuntime unlock fast");
- __ fast_unlock_2(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
- 7 /* savemask */, slow_unlock);
+ __ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
+ 7 /* savemask */, slow_unlock);
// Fall through
} else if (LockingMode == LM_LEGACY) {
// See C1_MacroAssembler::unlock_object() for more comments
diff --git a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
index d1963ebfd6902..848a265263391 100644
--- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
@@ -530,7 +530,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
const Register RmaxStack = R2;
// monitor entry size
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
// total overhead size: entry_size + (saved registers, thru expr stack bottom).
// be sure to change this if you add/subtract anything to/from the overhead area
@@ -569,7 +569,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
void TemplateInterpreterGenerator::lock_method() {
// synchronize method
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment");
#ifdef ASSERT
diff --git a/src/hotspot/cpu/arm/templateTable_arm.cpp b/src/hotspot/cpu/arm/templateTable_arm.cpp
index f52875af6e38c..1bb92092e7a1d 100644
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp
@@ -4270,7 +4270,7 @@ void TemplateTable::monitorenter() {
// check for null object
__ null_check(Robj, Rtemp);
- const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
+ const int entry_size = (frame::interpreter_frame_monitor_size_in_bytes());
assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
Label allocate_monitor, allocated;
@@ -4381,7 +4381,7 @@ void TemplateTable::monitorexit() {
// check for null object
__ null_check(Robj, Rtemp);
- const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
+ const int entry_size = (frame::interpreter_frame_monitor_size_in_bytes());
Label found, throw_exception;
// find matching slot
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index ce347fe66d974..3f4b8520b9fef 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -2635,6 +2635,13 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
Unimplemented();
}
+ // There might be a volatile load before this Unsafe CAS.
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ sync();
+ } else {
+ __ lwsync();
+ }
+
if (is_64bit) {
__ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
MacroAssembler::MemBarNone,
@@ -2996,9 +3003,24 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!");
const Register Rptr = addr->base()->as_pointer_register(),
Rtmp = tmp->as_register();
- Register Rco = noreg;
- if (UseCompressedOops && data->is_oop()) {
- Rco = __ encode_heap_oop(Rtmp, data->as_register());
+ Register Robj = noreg;
+ if (data->is_oop()) {
+ if (UseCompressedOops) {
+ Robj = __ encode_heap_oop(Rtmp, data->as_register());
+ } else {
+ Robj = data->as_register();
+ if (Robj == dest->as_register()) { // May happen with ZGC.
+ __ mr(Rtmp, Robj);
+ Robj = Rtmp;
+ }
+ }
+ }
+
+ // There might be a volatile load before this Unsafe OP.
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ sync();
+ } else {
+ __ lwsync();
}
Label Lretry;
@@ -3018,18 +3040,11 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
} else if (data->is_oop()) {
assert(code == lir_xchg, "xadd for oops");
const Register Rold = dest->as_register();
+ assert_different_registers(Rptr, Rold, Robj);
if (UseCompressedOops) {
- assert_different_registers(Rptr, Rold, Rco);
__ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
- __ stwcx_(Rco, Rptr);
+ __ stwcx_(Robj, Rptr);
} else {
- Register Robj = data->as_register();
- assert_different_registers(Rptr, Rold, Rtmp);
- assert_different_registers(Rptr, Robj, Rtmp);
- if (Robj == Rold) { // May happen with ZGC.
- __ mr(Rtmp, Robj);
- Robj = Rtmp;
- }
__ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
__ stdcx_(Robj, Rptr);
}
@@ -3057,6 +3072,12 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
if (UseCompressedOops && data->is_oop()) {
__ decode_heap_oop(dest->as_register());
}
+
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ isync();
+ } else {
+ __ sync();
+ }
}
@@ -3141,7 +3162,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
// Klass seen before, nothing to do (regardless of unknown bit).
//beq(CCR1, do_nothing);
- __ andi_(R0, klass, TypeEntries::type_unknown);
+ __ andi_(R0, tmp, TypeEntries::type_unknown);
// Already unknown. Nothing to do anymore.
//bne(CCR0, do_nothing);
__ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
index ecc40d6fde7c1..32aab91c7d374 100644
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
@@ -639,13 +639,6 @@ LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_
cmp_value.load_item();
new_value.load_item();
- // Volatile load may be followed by Unsafe CAS.
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar();
- } else {
- __ membar_release();
- }
-
if (is_reference_type(type)) {
if (UseCompressedOops) {
t1 = new_register(T_OBJECT);
@@ -670,21 +663,7 @@ LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value)
LIR_Opr tmp = FrameMap::R0_opr;
value.load_item();
-
- // Volatile load may be followed by Unsafe CAS.
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar();
- } else {
- __ membar_release();
- }
-
__ xchg(addr, value.result(), result, tmp);
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar_acquire();
- } else {
- __ membar();
- }
return result;
}
@@ -694,21 +673,7 @@ LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
LIR_Opr tmp = FrameMap::R0_opr;
value.load_item();
-
- // Volatile load may be followed by Unsafe CAS.
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar(); // To be safe. Unsafe semantics are unclear.
- } else {
- __ membar_release();
- }
-
__ xadd(addr, value.result(), result, tmp);
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar_acquire();
- } else {
- __ membar();
- }
return result;
}
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index bf0122ee737c1..577dcae25f4bc 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -115,7 +115,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
}
if (LockingMode == LM_LIGHTWEIGHT) {
- fast_lock(Roop, Rmark, Rscratch, slow_int);
+ lightweight_lock(Roop, Rmark, Rscratch, slow_int);
} else if (LockingMode == LM_LEGACY) {
// ... and mark it unlocked.
ori(Rmark, Rmark, markWord::unlocked_value);
@@ -181,7 +181,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
andi_(R0, Rmark, markWord::monitor_value);
bne(CCR0, slow_int);
- fast_unlock(Roop, Rmark, slow_int);
+ lightweight_unlock(Roop, Rmark, slow_int);
} else if (LockingMode == LM_LEGACY) {
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markWord of the object.
diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
index 348de609901ca..2ba6a6bca4e03 100644
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
@@ -552,6 +552,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Rexception_save = R31, Rcaller_sp = R30;
__ set_info("unwind_exception", dont_gc_arguments);
+ if (AbortVMOnException) {
+ save_live_registers(sasm);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Rexception);
+ restore_live_registers(sasm, noreg, noreg);
+ }
+
__ ld(Rcaller_sp, 0, R1_SP);
__ push_frame_reg_args(0, R0); // dummy frame for C call
__ mr(Rexception_save, Rexception); // save over C call
diff --git a/src/hotspot/cpu/ppc/frame_ppc.cpp b/src/hotspot/cpu/ppc/frame_ppc.cpp
index 2dcf9975477e8..316db829b58a5 100644
--- a/src/hotspot/cpu/ppc/frame_ppc.cpp
+++ b/src/hotspot/cpu/ppc/frame_ppc.cpp
@@ -324,7 +324,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// first the method
- Method* m = *interpreter_frame_method_addr();
+ Method* m = safe_interpreter_frame_method();
// validate the method we'd find in this potential sender
if (!Method::is_valid_method(m)) return false;
@@ -454,7 +454,6 @@ intptr_t *frame::initial_deoptimization_info() {
frame::frame(void* sp, void* fp, void* pc) : frame((intptr_t*)sp, (address)pc) {}
#endif
-// Pointer beyond the "oldest/deepest" BasicObjectLock on stack.
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
BasicObjectLock* result = (BasicObjectLock*) at(ijava_idx(monitors));
// make sure the pointer points inside the frame
diff --git a/src/hotspot/cpu/ppc/frame_ppc.hpp b/src/hotspot/cpu/ppc/frame_ppc.hpp
index a4ec15d5aa075..e2e2b9d015dde 100644
--- a/src/hotspot/cpu/ppc/frame_ppc.hpp
+++ b/src/hotspot/cpu/ppc/frame_ppc.hpp
@@ -424,9 +424,6 @@
template
static void update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr);
- // Size of a monitor in bytes.
- static int interpreter_frame_monitor_size_in_bytes();
-
// The size of a cInterpreter object.
static inline int interpreter_frame_cinterpreterstate_size_in_bytes();
diff --git a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp
index 478c0a9081aef..c711882189a12 100644
--- a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp
@@ -250,10 +250,6 @@ inline int frame::interpreter_frame_monitor_size() {
WordsPerLong); // number of stack slots for a Java long
}
-inline int frame::interpreter_frame_monitor_size_in_bytes() {
- return frame::interpreter_frame_monitor_size() * wordSize;
-}
-
// entry frames
inline intptr_t* frame::entry_frame_argument_at(int offset) const {
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp b/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp
index fc06e1b71e0b8..6d9a1db1ed4b8 100644
--- a/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shenandoah/c1/shenandoahBarrierSetC1_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
- * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
+ * Copyright (c) 2018, 2023, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,8 +53,13 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler *masm) {
__ encode_heap_oop(new_val, new_val);
}
- // Due to the memory barriers emitted in ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved,
- // there is no need to specify stronger memory semantics.
+ // There might be a volatile load before this Unsafe CAS.
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ sync();
+ } else {
+ __ lwsync();
+ }
+
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmp_val, new_val, tmp1, tmp2,
false, result);
@@ -63,6 +68,12 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler *masm) {
__ decode_heap_oop(new_val);
}
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ isync();
+ } else {
+ __ sync();
+ }
+
__ block_comment("} LIR_OpShenandoahCompareAndSwap (shenandaohgc)");
}
@@ -80,14 +91,6 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess &access, LI
if (access.is_oop()) {
LIRGenerator* gen = access.gen();
- if (ShenandoahCASBarrier) {
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar();
- } else {
- __ membar_release();
- }
- }
-
if (ShenandoahSATBBarrier) {
pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
LIR_OprFact::illegalOpr);
@@ -104,12 +107,6 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess &access, LI
__ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, result));
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar_acquire();
- } else {
- __ membar();
- }
-
return result;
}
}
@@ -125,12 +122,6 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess &access, LIRIt
value.load_item();
LIR_Opr value_opr = value.result();
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar();
- } else {
- __ membar_release();
- }
-
if (access.is_oop()) {
value_opr = iu_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators());
}
@@ -152,11 +143,5 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess &access, LIRIt
}
}
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar_acquire();
- } else {
- __ membar();
- }
-
return result;
}
diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
index 1d07c6d573a17..9ed47b688ff26 100644
--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
@@ -340,11 +340,12 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm,
}
__ ld(R0, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
__ cmpxchgd(CCR0, tmp, (intptr_t)0, R0, ref_base,
- MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update());
+ MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
+ noreg, need_restore ? nullptr : &slow_path);
if (need_restore) {
__ subf(ref_base, ind_or_offs, ref_base);
+ __ bne(CCR0, slow_path);
}
- __ bne(CCR0, slow_path);
} else {
// A non-atomic relocatable object won't get to the medium fast path due to a
// raw null in the young generation. We only get here because the field is bad.
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index 97eb07dec7348..2627ca1ad3446 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -887,6 +887,12 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// Test if reserved zone needs to be enabled.
Label no_reserved_zone_enabling;
+ // check if already enabled - if so no re-enabling needed
+ assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
+ lwz(R0, in_bytes(JavaThread::stack_guard_state_offset()), R16_thread);
+ cmpwi(CCR0, R0, StackOverflow::stack_guard_enabled);
+ beq_predict_taken(CCR0, no_reserved_zone_enabling);
+
// Compare frame pointers. There is no good stack pointer, as with stack
// frame compression we can get different SPs when we do calls. A subsequent
// call could have a smaller SP, so that this compare succeeds for an
@@ -961,7 +967,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
}
if (LockingMode == LM_LIGHTWEIGHT) {
- fast_lock(object, /* mark word */ header, tmp, slow_case);
+ lightweight_lock(object, /* mark word */ header, tmp, slow_case);
b(count_locking);
} else if (LockingMode == LM_LEGACY) {
@@ -1111,7 +1117,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
ld(header, oopDesc::mark_offset_in_bytes(), object);
andi_(R0, header, markWord::monitor_value);
bne(CCR0, slow_case);
- fast_unlock(object, header, slow_case);
+ lightweight_unlock(object, header, slow_case);
} else {
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
@@ -1777,7 +1783,7 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr
// Klass seen before, nothing to do (regardless of unknown bit).
//beq(CCR1, do_nothing);
- andi_(R0, klass, TypeEntries::type_unknown);
+ andi_(R0, tmp, TypeEntries::type_unknown);
// Already unknown. Nothing to do anymore.
//bne(CCR0, do_nothing);
crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
@@ -1976,7 +1982,7 @@ void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register
}
}
-// Add a InterpMonitorElem to stack (see frame_sparc.hpp).
+// Add a monitor (see frame_ppc.hpp).
void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
// Very-local scratch registers.
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index 027c6fe4ce833..bb8711c6aa68a 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -2707,7 +2707,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
b(failure);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- fast_lock(oop, displaced_header, temp, failure);
+ lightweight_lock(oop, displaced_header, temp, failure);
b(success);
}
@@ -2819,7 +2819,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
b(success);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- fast_unlock(oop, current_header, failure);
+ lightweight_unlock(oop, current_header, failure);
b(success);
}
@@ -4491,14 +4491,14 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj,
}
}
-// Implements fast-locking.
+// Implements lightweight-locking.
// Branches to slow upon failure to lock the object, with CCR0 NE.
// Falls through upon success with CCR0 EQ.
//
// - obj: the object to be locked
// - hdr: the header, already loaded from obj, will be destroyed
// - t1: temporary register
-void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Label& slow) {
+void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr, t1);
@@ -4524,13 +4524,13 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register t1, Label& s
stw(t1, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
}
-// Implements fast-unlocking.
+// Implements lightweight-unlocking.
// Branches to slow upon failure, with CCR0 NE.
// Falls through upon success, with CCR0 EQ.
//
// - obj: the object to be unlocked
// - hdr: the (pre-loaded) header of the object, will be destroyed
-void MacroAssembler::fast_unlock(Register obj, Register hdr, Label& slow) {
+void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(obj, hdr);
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
index 902edda0039ed..9947644475ec0 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
@@ -607,8 +607,8 @@ class MacroAssembler: public Assembler {
void inc_held_monitor_count(Register tmp);
void dec_held_monitor_count(Register tmp);
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
- void fast_lock(Register obj, Register hdr, Register t1, Label& slow);
- void fast_unlock(Register obj, Register hdr, Label& slow);
+ void lightweight_lock(Register obj, Register hdr, Register t1, Label& slow);
+ void lightweight_unlock(Register obj, Register hdr, Label& slow);
// allocation (for C1)
void tlab_allocate(
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 542856c144416..401d4f4efa840 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -2457,7 +2457,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
if (method->is_synchronized()) {
- ConditionRegister r_flag = CCR1;
Register r_oop = r_temp_4;
const Register r_box = r_temp_5;
Label done, locked;
@@ -2472,8 +2471,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Try fastpath for locking.
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
- __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
- __ beq(r_flag, locked);
+ __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
+ __ beq(CCR0, locked);
// None of the above fast optimizations worked so we have to get into the
// slow case of monitor enter. Inline a special case of call_VM that
@@ -2666,8 +2665,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// --------------------------------------------------------------------------
if (method->is_synchronized()) {
-
- ConditionRegister r_flag = CCR1;
const Register r_oop = r_temp_4;
const Register r_box = r_temp_5;
const Register r_exception = r_temp_6;
@@ -2684,8 +2681,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ addi(r_box, R1_SP, lock_offset);
// Try fastpath for unlocking.
- __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
- __ beq(r_flag, done);
+ __ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
+ __ beq(CCR0, done);
// Save and restore any potential method result value around the unlocking operation.
save_native_result(masm, ret_type, workspace_slot_offset);
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 006896c9f9b86..2ed731926b6dd 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -4106,79 +4106,67 @@ void TemplateTable::athrow() {
// at next monitor exit.
void TemplateTable::monitorenter() {
transition(atos, vtos);
-
__ verify_oop(R17_tos);
- Register Rcurrent_monitor = R11_scratch1,
- Rcurrent_obj = R12_scratch2,
+ Register Rcurrent_monitor = R3_ARG1,
+ Rcurrent_obj = R4_ARG2,
Robj_to_lock = R17_tos,
- Rscratch1 = R3_ARG1,
- Rscratch2 = R4_ARG2,
- Rscratch3 = R5_ARG3,
- Rcurrent_obj_addr = R6_ARG4;
+ Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Rbot = R5_ARG3,
+ Rfree_slot = R6_ARG4;
+
+ Label Lfound, Lallocate_new;
+
+ __ ld(Rscratch1, _abi0(callers_sp), R1_SP); // load FP
+ __ li(Rfree_slot, 0); // Points to free slot or null.
+
+ // Set up search loop - start with topmost monitor.
+ __ mr(Rcurrent_monitor, R26_monitor);
+ __ addi(Rbot, Rscratch1, -frame::ijava_state_size);
// ------------------------------------------------------------------------------
// Null pointer exception.
- __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
+ __ null_check_throw(Robj_to_lock, -1, Rscratch1);
- // Try to acquire a lock on the object.
- // Repeat until succeeded (i.e., until monitorenter returns true).
+ // Check if any slot is present => short cut to allocation if not.
+ __ cmpld(CCR0, Rcurrent_monitor, Rbot);
+ __ beq(CCR0, Lallocate_new);
// ------------------------------------------------------------------------------
// Find a free slot in the monitor block.
- Label Lfound, Lexit, Lallocate_new;
- ConditionRegister found_free_slot = CCR0,
- found_same_obj = CCR1,
- reached_limit = CCR6;
+ // Note: The order of the monitors is important for C2 OSR which derives the
+ // unlock order from it (see comments for interpreter_frame_monitor_*).
{
- Label Lloop;
- Register Rlimit = Rcurrent_monitor;
-
- // Set up search loop - start with topmost monitor.
- __ addi(Rcurrent_obj_addr, R26_monitor, in_bytes(BasicObjectLock::obj_offset()));
+ Label Lloop, LnotFree, Lexit;
- __ ld(Rlimit, 0, R1_SP);
- __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - in_bytes(BasicObjectLock::obj_offset()))); // Monitor base
+ __ bind(Lloop);
+ __ ld(Rcurrent_obj, in_bytes(BasicObjectLock::obj_offset()), Rcurrent_monitor);
+ // Exit if current entry is for same object; this guarantees, that new monitor
+ // used for recursive lock is above the older one.
+ __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
+ __ beq(CCR0, Lexit); // recursive locking
- // Check if any slot is present => short cut to allocation if not.
- __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
- __ bgt(reached_limit, Lallocate_new);
+ __ cmpdi(CCR0, Rcurrent_obj, 0);
+ __ bne(CCR0, LnotFree);
+ __ mr(Rfree_slot, Rcurrent_monitor); // remember free slot closest to the bottom
+ __ bind(LnotFree);
- // Pre-load topmost slot.
- __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
- __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
- // The search loop.
- __ bind(Lloop);
- // Found free slot?
- __ cmpdi(found_free_slot, Rcurrent_obj, 0);
- // Is this entry for same obj? If so, stop the search and take the found
- // free slot or allocate a new one to enable recursive locking.
- __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
- __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
- __ beq(found_free_slot, Lexit);
- __ beq(found_same_obj, Lallocate_new);
- __ bgt(reached_limit, Lallocate_new);
- // Check if last allocated BasicLockObj reached.
- __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
- __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
- // Next iteration if unchecked BasicObjectLocks exist on the stack.
- __ b(Lloop);
+ __ addi(Rcurrent_monitor, Rcurrent_monitor, frame::interpreter_frame_monitor_size_in_bytes());
+ __ cmpld(CCR0, Rcurrent_monitor, Rbot);
+ __ bne(CCR0, Lloop);
+ __ bind(Lexit);
}
// ------------------------------------------------------------------------------
// Check if we found a free slot.
- __ bind(Lexit);
-
- __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - in_bytes(BasicObjectLock::obj_offset()));
- __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
- __ b(Lfound);
+ __ cmpdi(CCR0, Rfree_slot, 0);
+ __ bne(CCR0, Lfound);
// We didn't find a free BasicObjLock => allocate one.
- __ align(32, 12);
__ bind(Lallocate_new);
__ add_monitor_to_stack(false, Rscratch1, Rscratch2);
- __ mr(Rcurrent_monitor, R26_monitor);
- __ addi(Rcurrent_obj_addr, R26_monitor, in_bytes(BasicObjectLock::obj_offset()));
+ __ mr(Rfree_slot, R26_monitor);
// ------------------------------------------------------------------------------
// We now have a slot to lock.
@@ -4188,8 +4176,8 @@ void TemplateTable::monitorenter() {
// The object has already been popped from the stack, so the expression stack looks correct.
__ addi(R14_bcp, R14_bcp, 1);
- __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
- __ lock_object(Rcurrent_monitor, Robj_to_lock);
+ __ std(Robj_to_lock, in_bytes(BasicObjectLock::obj_offset()), Rfree_slot);
+ __ lock_object(Rfree_slot, Robj_to_lock);
// Check if there's enough space on the stack for the monitors after locking.
// This emits a single store.
@@ -4203,46 +4191,40 @@ void TemplateTable::monitorexit() {
transition(atos, vtos);
__ verify_oop(R17_tos);
- Register Rcurrent_monitor = R11_scratch1,
- Rcurrent_obj = R12_scratch2,
+ Register Rcurrent_monitor = R3_ARG1,
+ Rcurrent_obj = R4_ARG2,
Robj_to_lock = R17_tos,
- Rcurrent_obj_addr = R3_ARG1,
- Rlimit = R4_ARG2;
+ Rscratch = R11_scratch1,
+ Rbot = R12_scratch2;
+
Label Lfound, Lillegal_monitor_state;
- // Check corner case: unbalanced monitorEnter / Exit.
- __ ld(Rlimit, 0, R1_SP);
- __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
+ __ ld(Rscratch, _abi0(callers_sp), R1_SP); // load FP
+
+ // Set up search loop - start with topmost monitor.
+ __ mr(Rcurrent_monitor, R26_monitor);
+ __ addi(Rbot, Rscratch, -frame::ijava_state_size);
// Null pointer check.
- __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
+ __ null_check_throw(Robj_to_lock, -1, Rscratch);
- __ cmpld(CCR0, R26_monitor, Rlimit);
- __ bgt(CCR0, Lillegal_monitor_state);
+ // Check corner case: unbalanced monitorEnter / Exit.
+ __ cmpld(CCR0, Rcurrent_monitor, Rbot);
+ __ beq(CCR0, Lillegal_monitor_state);
// Find the corresponding slot in the monitors stack section.
{
Label Lloop;
- // Start with topmost monitor.
- __ addi(Rcurrent_obj_addr, R26_monitor, in_bytes(BasicObjectLock::obj_offset()));
- __ addi(Rlimit, Rlimit, in_bytes(BasicObjectLock::obj_offset()));
- __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
- __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
-
__ bind(Lloop);
+ __ ld(Rcurrent_obj, in_bytes(BasicObjectLock::obj_offset()), Rcurrent_monitor);
// Is this entry for same obj?
__ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
__ beq(CCR0, Lfound);
- // Check if last allocated BasicLockObj reached.
-
- __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
- __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
- __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
-
- // Next iteration if unchecked BasicObjectLocks exist on the stack.
- __ ble(CCR0, Lloop);
+ __ addi(Rcurrent_monitor, Rcurrent_monitor, frame::interpreter_frame_monitor_size_in_bytes());
+ __ cmpld(CCR0, Rcurrent_monitor, Rbot);
+ __ bne(CCR0, Lloop);
}
// Fell through without finding the basic obj lock => throw up!
@@ -4252,8 +4234,6 @@ void TemplateTable::monitorexit() {
__ align(32, 12);
__ bind(Lfound);
- __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
- -(frame::interpreter_frame_monitor_size() * wordSize) - in_bytes(BasicObjectLock::obj_offset()));
__ unlock_object(Rcurrent_monitor);
}
diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp
index 02fbf5bfbf7d8..0988056f15aeb 100644
--- a/src/hotspot/cpu/riscv/assembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp
@@ -2787,7 +2787,13 @@ enum Nf {
c_slli(Rd, shamt); \
return; \
} \
- _slli(Rd, Rs1, shamt); \
+ if (shamt != 0) { \
+ _slli(Rd, Rs1, shamt); \
+ } else { \
+ if (Rd != Rs1) { \
+ addi(Rd, Rs1, 0); \
+ } \
+ } \
}
INSN(slli);
@@ -2802,7 +2808,13 @@ enum Nf {
C_NAME(Rd, shamt); \
return; \
} \
- NORMAL_NAME(Rd, Rs1, shamt); \
+ if (shamt != 0) { \
+ NORMAL_NAME(Rd, Rs1, shamt); \
+ } else { \
+ if (Rd != Rs1) { \
+ addi(Rd, Rs1, 0); \
+ } \
+ } \
}
INSN(srai, c_srai, _srai);
@@ -2902,6 +2914,17 @@ enum Nf {
return uabs(target - branch) < branch_range;
}
+ // Decode the given instruction, checking if it's a 16-bit compressed
+ // instruction and return the address of the next instruction.
+ static address locate_next_instruction(address inst) {
+ // Instruction wider than 16 bits has the two least-significant bits set.
+ if ((0x3 & *inst) == 0x3) {
+ return inst + instruction_size;
+ } else {
+ return inst + compressed_instruction_size;
+ }
+ }
+
Assembler(CodeBuffer* code) : AbstractAssembler(code), _in_compressible_region(true) {}
};
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
index 99c683c2f4d1d..8e52a6775254e 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
@@ -363,7 +363,7 @@ int LIR_Assembler::emit_unwind_handler() {
if (LockingMode == LM_MONITOR) {
__ j(*stub->entry());
} else {
- __ unlock_object(x15, x14, x10, *stub->entry());
+ __ unlock_object(x15, x14, x10, x16, *stub->entry());
}
__ bind(*stub->continuation());
}
@@ -1506,22 +1506,23 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register(); // may not be an oop
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
+ Register temp = op->scratch_opr()->as_register();
if (LockingMode == LM_MONITOR) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
- __ null_check(obj);
+ __ null_check(obj, -1);
}
__ j(*op->stub()->entry());
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
- int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
+ int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
if (op->info() != nullptr) {
add_debug_info_for_null_check(null_check_offset, op->info());
}
} else if (op->code() == lir_unlock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
- __ unlock_object(hdr, obj, lock, *op->stub()->entry());
+ __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry());
} else {
Unimplemented();
}
@@ -1647,10 +1648,11 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
__ beqz(t1, none);
__ mv(t0, (u1)TypeEntries::null_seen);
__ beq(t0, t1, none);
- // There is a chance that the checks above (re-reading profiling
- // data from memory) fail if another thread has just set the
+ // There is a chance that the checks above
+ // fail if another thread has just set the
// profiling to this obj's klass
__ membar(MacroAssembler::LoadLoad);
+ __ xorr(tmp, tmp, t1); // get back original value before XOR
__ ld(t1, mdo_addr);
__ xorr(tmp, tmp, t1);
__ andi(t0, tmp, TypeEntries::type_klass_mask);
@@ -1677,6 +1679,10 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
__ bind(none);
// first time here. Set profile type.
__ sd(tmp, mdo_addr);
+#ifdef ASSERT
+ __ andi(tmp, tmp, TypeEntries::type_mask);
+ __ verify_klass_ptr(tmp);
+#endif
}
}
@@ -1711,6 +1717,10 @@ void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_kla
#endif
// first time here. Set profile type.
__ sd(tmp, mdo_addr);
+#ifdef ASSERT
+ __ andi(tmp, tmp, TypeEntries::type_mask);
+ __ verify_klass_ptr(tmp);
+#endif
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
diff --git a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
index 12df04749fab0..c3928b8b318dc 100644
--- a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
@@ -274,6 +274,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
+ LIR_Opr scratch = new_register(T_INT);
CodeEmitInfo* info_for_exception = nullptr;
if (x->needs_null_check()) {
@@ -282,7 +283,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
- monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
+ monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
x->monitor_no(), info_for_exception, info);
}
@@ -294,8 +295,9 @@ void LIRGenerator::do_MonitorExit(MonitorExit* x) {
LIR_Opr lock = new_register(T_INT);
LIR_Opr obj_temp = new_register(T_INT);
+ LIR_Opr scratch = new_register(T_INT);
set_no_result(x);
- monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
+ monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
}
// neg
@@ -801,7 +803,7 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
}
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
- fatal("vectorizedMismatch intrinsic is not implemented on this platform");
+ ShouldNotReachHere();
}
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
index 2af629a67cd07..6c1dce0de1598 100644
--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
@@ -49,10 +49,10 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
}
}
-int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
+int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord - 1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
- assert_different_registers(hdr, obj, disp_hdr);
+ assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
int null_check_offset = -1;
verify_oop(obj);
@@ -65,15 +65,15 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(hdr, obj);
lwu(hdr, Address(hdr, Klass::access_flags_offset()));
- test_bit(t0, hdr, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
- bnez(t0, slow_case, true /* is_far */);
+ test_bit(temp, hdr, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
+ bnez(temp, slow_case, true /* is_far */);
}
// Load object header
ld(hdr, Address(obj, hdr_offset));
if (LockingMode == LM_LIGHTWEIGHT) {
- fast_lock(obj, hdr, t0, t1, slow_case);
+ lightweight_lock(obj, hdr, temp, t1, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// and mark it as unlocked
@@ -83,8 +83,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header - if it is not the same, get the
// object header instead
- la(t1, Address(obj, hdr_offset));
- cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/nullptr);
+ la(temp, Address(obj, hdr_offset));
+ cmpxchgptr(hdr, disp_hdr, temp, t1, done, /*fallthough*/nullptr);
// if the object header was the same, we're done
// if the object header was not the same, it is now in the hdr register
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
@@ -100,8 +100,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
sub(hdr, hdr, sp);
- mv(t0, aligned_mask - (int)os::vm_page_size());
- andr(hdr, hdr, t0);
+ mv(temp, aligned_mask - (int)os::vm_page_size());
+ andr(hdr, hdr, temp);
// for recursive locking, the result is zero => save it in the displaced header
// location (null in the displaced hdr location indicates recursive locking)
sd(hdr, Address(disp_hdr, 0));
@@ -115,10 +115,10 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
return null_check_offset;
}
-void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
+void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord - 1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
- assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
+ assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1);
Label done;
if (LockingMode != LM_LIGHTWEIGHT) {
@@ -135,9 +135,9 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
if (LockingMode == LM_LIGHTWEIGHT) {
ld(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
- test_bit(t0, hdr, exact_log2(markWord::monitor_value));
- bnez(t0, slow_case, /* is_far */ true);
- fast_unlock(obj, hdr, t0, t1, slow_case);
+ test_bit(temp, hdr, exact_log2(markWord::monitor_value));
+ bnez(temp, slow_case, /* is_far */ true);
+ lightweight_unlock(obj, hdr, temp, t1, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
@@ -145,8 +145,8 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
// if the object header was not pointing to the displaced header,
// we do unlocking via runtime call
if (hdr_offset) {
- la(t0, Address(obj, hdr_offset));
- cmpxchgptr(disp_hdr, hdr, t0, t1, done, &slow_case);
+ la(temp, Address(obj, hdr_offset));
+ cmpxchgptr(disp_hdr, hdr, temp, t1, done, &slow_case);
} else {
cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
}
diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
index 738dac78f9df3..b737a438511c8 100644
--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
@@ -59,14 +59,16 @@ using MacroAssembler::null_check;
// hdr : must be x10, contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must point to the displaced header location, contents preserved
+ // temp : temporary register, must not be scratch register t0 or t1
// returns code offset at which to add null check debug information
- int lock_object (Register swap, Register obj, Register disp_hdr, Label& slow_case);
+ int lock_object(Register swap, Register obj, Register disp_hdr, Register temp, Label& slow_case);
// unlocking
// hdr : contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must be x10 & must point to the displaced header location, contents destroyed
- void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
+ // temp : temporary register, must not be scratch register t0 or t1
+ void unlock_object(Register swap, Register obj, Register lock, Register temp, Label& slow_case);
void initialize_object(
Register obj, // result: pointer to object after successful allocation
diff --git a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
index 80555b87729bb..ea086d46bdac2 100644
--- a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
@@ -498,6 +498,14 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// other registers used in this stub
const Register handler_addr = x11;
+ if (AbortVMOnException) {
+ __ enter();
+ save_live_registers(sasm);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), x10);
+ restore_live_registers(sasm);
+ __ leave();
+ }
+
// verify that only x10, is valid at this time
__ invalidate_registers(false, true, true, true, true, true);
diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
index a45fe0538ea44..a83be3b8f7548 100644
--- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
@@ -43,6 +43,223 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg,
+ Register tmp1Reg, Register tmp2Reg, Register tmp3Reg) {
+ // Use cr register to indicate the fast_lock result: zero for success; non-zero for failure.
+ Register flag = t1;
+ Register oop = objectReg;
+ Register box = boxReg;
+ Register disp_hdr = tmp1Reg;
+ Register tmp = tmp2Reg;
+ Label cont;
+ Label object_has_monitor;
+ Label count, no_count;
+
+ assert_different_registers(oop, box, tmp, disp_hdr, flag, tmp3Reg, t0);
+
+ // Load markWord from object into displaced_header.
+ ld(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
+
+ if (DiagnoseSyncOnValueBasedClasses != 0) {
+ load_klass(flag, oop);
+ lwu(flag, Address(flag, Klass::access_flags_offset()));
+ test_bit(flag, flag, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
+ bnez(flag, cont, true /* is_far */);
+ }
+
+ // Check for existing monitor
+ test_bit(t0, disp_hdr, exact_log2(markWord::monitor_value));
+ bnez(t0, object_has_monitor);
+
+ if (LockingMode == LM_MONITOR) {
+ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow-path
+ j(cont);
+ } else if (LockingMode == LM_LEGACY) {
+ // Set tmp to be (markWord of object | UNLOCK_VALUE).
+ ori(tmp, disp_hdr, markWord::unlocked_value);
+
+ // Initialize the box. (Must happen before we update the object mark!)
+ sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+ // Compare object markWord with an unlocked value (tmp) and if
+ // equal exchange the stack address of our box with object markWord.
+ // On failure disp_hdr contains the possibly locked markWord.
+ cmpxchg(/*memory address*/oop, /*expected value*/tmp, /*new value*/box, Assembler::int64, Assembler::aq,
+ Assembler::rl, /*result*/disp_hdr);
+ mv(flag, zr);
+ beq(disp_hdr, tmp, cont); // prepare zero flag and goto cont if we won the cas
+
+ assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+ // If the compare-and-exchange succeeded, then we found an unlocked
+ // object, will have now locked it will continue at label cont
+ // We did not see an unlocked object so try the fast recursive case.
+
+ // Check if the owner is self by comparing the value in the
+ // markWord of object (disp_hdr) with the stack pointer.
+ sub(disp_hdr, disp_hdr, sp);
+ mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
+ // If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto cont,
+ // hence we can store 0 as the displaced header in the box, which indicates that it is a
+ // recursive lock.
+ andr(tmp/*==0?*/, disp_hdr, tmp);
+ sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+ mv(flag, tmp); // we can use the value of tmp as the result here
+ j(cont);
+ } else {
+ assert(LockingMode == LM_LIGHTWEIGHT, "");
+ Label slow;
+ lightweight_lock(oop, disp_hdr, tmp, tmp3Reg, slow);
+
+ // Indicate success on completion.
+ mv(flag, zr);
+ j(count);
+ bind(slow);
+ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow-path
+ j(no_count);
+ }
+
+ // Handle existing monitor.
+ bind(object_has_monitor);
+ // The object's monitor m is unlocked iff m->owner == NULL,
+ // otherwise m->owner may contain a thread or a stack address.
+ //
+ // Try to CAS m->owner from NULL to current thread.
+ add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
+ cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/xthread, Assembler::int64, Assembler::aq,
+ Assembler::rl, /*result*/flag); // cas succeeds if flag == zr(expected)
+
+ if (LockingMode != LM_LIGHTWEIGHT) {
+ // Store a non-null value into the box to avoid looking like a re-entrant
+ // lock. The fast-path monitor unlock code checks for
+ // markWord::monitor_value so use markWord::unused_mark which has the
+ // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
+ mv(tmp, (address)markWord::unused_mark().value());
+ sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+ }
+
+ beqz(flag, cont); // CAS success means locking succeeded
+
+ bne(flag, xthread, cont); // Check for recursive locking
+
+ // Recursive lock case
+ mv(flag, zr);
+ increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, t0, tmp);
+
+ bind(cont);
+ // zero flag indicates success
+ // non-zero flag indicates failure
+ bnez(flag, no_count);
+
+ bind(count);
+ increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, t0, tmp);
+
+ bind(no_count);
+}
+
+void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg,
+ Register tmp1Reg, Register tmp2Reg) {
+ // Use cr register to indicate the fast_unlock result: zero for success; non-zero for failure.
+ Register flag = t1;
+ Register oop = objectReg;
+ Register box = boxReg;
+ Register disp_hdr = tmp1Reg;
+ Register tmp = tmp2Reg;
+ Label cont;
+ Label object_has_monitor;
+ Label count, no_count;
+
+ assert_different_registers(oop, box, tmp, disp_hdr, flag, t0);
+
+ if (LockingMode == LM_LEGACY) {
+ // Find the lock address and load the displaced header from the stack.
+ ld(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+ // If the displaced header is 0, we have a recursive unlock.
+ mv(flag, disp_hdr);
+ beqz(disp_hdr, cont);
+ }
+
+ // Handle existing monitor.
+ ld(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
+ test_bit(t0, tmp, exact_log2(markWord::monitor_value));
+ bnez(t0, object_has_monitor);
+
+ if (LockingMode == LM_MONITOR) {
+ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow path
+ j(cont);
+ } else if (LockingMode == LM_LEGACY) {
+ // Check if it is still a light weight lock, this is true if we
+ // see the stack address of the basicLock in the markWord of the
+ // object.
+
+ cmpxchg(/*memory address*/oop, /*expected value*/box, /*new value*/disp_hdr, Assembler::int64, Assembler::relaxed,
+ Assembler::rl, /*result*/tmp);
+ xorr(flag, box, tmp); // box == tmp if cas succeeds
+ j(cont);
+ } else {
+ assert(LockingMode == LM_LIGHTWEIGHT, "");
+ Label slow;
+ lightweight_unlock(oop, tmp, box, disp_hdr, slow);
+
+ // Indicate success on completion.
+ mv(flag, zr);
+ j(count);
+ bind(slow);
+ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow path
+ j(no_count);
+ }
+
+ assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+ // Handle existing monitor.
+ bind(object_has_monitor);
+ STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
+ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
+
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ // If the owner is anonymous, we need to fix it -- in an outline stub.
+ Register tmp2 = disp_hdr;
+ ld(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
+ test_bit(t0, tmp2, exact_log2(ObjectMonitor::ANONYMOUS_OWNER));
+ C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
+ Compile::current()->output()->add_stub(stub);
+ bnez(t0, stub->entry(), /* is_far */ true);
+ bind(stub->continuation());
+ }
+
+ ld(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
+
+ Label notRecursive;
+ beqz(disp_hdr, notRecursive); // Will be 0 if not recursive.
+
+ // Recursive lock
+ addi(disp_hdr, disp_hdr, -1);
+ sd(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
+ mv(flag, zr);
+ j(cont);
+
+ bind(notRecursive);
+ ld(flag, Address(tmp, ObjectMonitor::EntryList_offset()));
+ ld(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
+ orr(flag, flag, disp_hdr); // Will be 0 if both are 0.
+ bnez(flag, cont);
+ // need a release store here
+ la(tmp, Address(tmp, ObjectMonitor::owner_offset()));
+ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
+ sd(zr, Address(tmp)); // set unowned
+
+ bind(cont);
+ // zero flag indicates success
+ // non-zero flag indicates failure
+ bnez(flag, no_count);
+
+ bind(count);
+ decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, t0, tmp);
+
+ bind(no_count);
+}
+
// short string
// StringUTF16.indexOfChar
// StringLatin1.indexOfChar
@@ -491,7 +708,9 @@ void C2_MacroAssembler::string_indexof(Register haystack, Register needle,
}
bne(tmp3, skipch, BMSKIP); // if not equal, skipch is bad char
add(result, haystack, isLL ? nlen_tmp : ch2);
- ld(ch2, Address(result)); // load 8 bytes from source string
+ // load 8 bytes from source string
+ // if isLL is false then read granularity can be 2
+ load_long_misaligned(ch2, Address(result), ch1, isLL ? 1 : 2); // can use ch1 as temp register here as it will be trashed by next mv anyway
mv(ch1, tmp6);
if (isLL) {
j(BMLOOPSTR1_AFTER_LOAD);
@@ -679,10 +898,30 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne
slli(tmp3, result_tmp, haystack_chr_shift); // result as tmp
add(haystack, haystack, tmp3);
neg(hlen_neg, tmp3);
+ if (AvoidUnalignedAccesses) {
+ // preload first value, then we will read by 1 character per loop, instead of four
+ // just shifting previous ch2 right by size of character in bits
+ add(tmp3, haystack, hlen_neg);
+ (this->*load_4chr)(ch2, Address(tmp3), noreg);
+ if (isLL) {
+ // need to erase 1 most significant byte in 32-bit value of ch2
+ slli(ch2, ch2, 40);
+ srli(ch2, ch2, 32);
+ } else {
+ slli(ch2, ch2, 16); // 2 most significant bytes will be erased by this operation
+ }
+ }
bind(CH1_LOOP);
- add(ch2, haystack, hlen_neg);
- (this->*load_4chr)(ch2, Address(ch2), noreg);
+ add(tmp3, haystack, hlen_neg);
+ if (AvoidUnalignedAccesses) {
+ srli(ch2, ch2, isLL ? 8 : 16);
+ (this->*haystack_load_1chr)(tmp3, Address(tmp3, isLL ? 3 : 6), noreg);
+ slli(tmp3, tmp3, isLL ? 24 : 48);
+ add(ch2, ch2, tmp3);
+ } else {
+ (this->*load_4chr)(ch2, Address(tmp3), noreg);
+ }
beq(ch1, ch2, MATCH);
add(hlen_neg, hlen_neg, haystack_chr_size);
blez(hlen_neg, CH1_LOOP);
@@ -700,10 +939,23 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne
slli(tmp3, result_tmp, haystack_chr_shift);
add(haystack, haystack, tmp3);
neg(hlen_neg, tmp3);
-
+ if (AvoidUnalignedAccesses) {
+ // preload first value, then we will read by 1 character per loop, instead of two
+ // just shifting previous ch2 right by size of character in bits
+ add(tmp3, haystack, hlen_neg);
+ (this->*haystack_load_1chr)(ch2, Address(tmp3), noreg);
+ slli(ch2, ch2, isLL ? 8 : 16);
+ }
bind(CH1_LOOP);
add(tmp3, haystack, hlen_neg);
- (this->*load_2chr)(ch2, Address(tmp3), noreg);
+ if (AvoidUnalignedAccesses) {
+ srli(ch2, ch2, isLL ? 8 : 16);
+ (this->*haystack_load_1chr)(tmp3, Address(tmp3, isLL ? 1 : 2), noreg);
+ slli(tmp3, tmp3, isLL ? 8 : 16);
+ add(ch2, ch2, tmp3);
+ } else {
+ (this->*load_2chr)(ch2, Address(tmp3), noreg);
+ }
beq(ch1, ch2, MATCH);
add(hlen_neg, hlen_neg, haystack_chr_size);
blez(hlen_neg, CH1_LOOP);
@@ -727,7 +979,14 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne
bind(FIRST_LOOP);
add(ch2, haystack, hlen_neg);
- (this->*load_2chr)(ch2, Address(ch2), noreg);
+ if (AvoidUnalignedAccesses) {
+ (this->*haystack_load_1chr)(tmp2, Address(ch2, isLL ? 1 : 2), noreg); // we need a temp register, we can safely use hlen_tmp here, which is a synonym for tmp2
+ (this->*haystack_load_1chr)(ch2, Address(ch2), noreg);
+ slli(tmp2, tmp2, isLL ? 8 : 16);
+ add(ch2, ch2, tmp2);
+ } else {
+ (this->*load_2chr)(ch2, Address(ch2), noreg);
+ }
beq(first, ch2, STR1_LOOP);
bind(STR2_NEXT);
@@ -751,10 +1010,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne
bind(DO1);
(this->*needle_load_1chr)(ch1, Address(needle), noreg);
sub(result_tmp, haystack_len, 1);
- mv(tmp3, result_tmp);
- if (haystack_chr_shift) {
- slli(tmp3, result_tmp, haystack_chr_shift);
- }
+ slli(tmp3, result_tmp, haystack_chr_shift);
add(haystack, haystack, tmp3);
neg(hlen_neg, tmp3);
@@ -829,9 +1085,10 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
// load first parts of strings and finish initialization while loading
{
if (str1_isL == str2_isL) { // LL or UU
+ // check if str1 and str2 is same pointer
+ beq(str1, str2, DONE);
// load 8 bytes once to compare
ld(tmp1, Address(str1));
- beq(str1, str2, DONE);
ld(tmp2, Address(str2));
mv(t0, STUB_THRESHOLD);
bge(cnt2, t0, STUB);
@@ -874,9 +1131,8 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
addi(cnt1, cnt1, 8);
}
addi(cnt2, cnt2, isUL ? 4 : 8);
+ bne(tmp1, tmp2, DIFFERENCE);
bgez(cnt2, TAIL);
- xorr(tmp3, tmp1, tmp2);
- bnez(tmp3, DIFFERENCE);
// main loop
bind(NEXT_WORD);
@@ -905,38 +1161,30 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
addi(cnt1, cnt1, 8);
addi(cnt2, cnt2, 4);
}
- bgez(cnt2, TAIL);
-
- xorr(tmp3, tmp1, tmp2);
- beqz(tmp3, NEXT_WORD);
- j(DIFFERENCE);
+ bne(tmp1, tmp2, DIFFERENCE);
+ bltz(cnt2, NEXT_WORD);
bind(TAIL);
- xorr(tmp3, tmp1, tmp2);
- bnez(tmp3, DIFFERENCE);
- // Last longword. In the case where length == 4 we compare the
- // same longword twice, but that's still faster than another
- // conditional branch.
if (str1_isL == str2_isL) { // LL or UU
- ld(tmp1, Address(str1));
- ld(tmp2, Address(str2));
+ load_long_misaligned(tmp1, Address(str1), tmp3, isLL ? 1 : 2);
+ load_long_misaligned(tmp2, Address(str2), tmp3, isLL ? 1 : 2);
} else if (isLU) { // LU case
- lwu(tmp1, Address(str1));
- ld(tmp2, Address(str2));
+ load_int_misaligned(tmp1, Address(str1), tmp3, false);
+ load_long_misaligned(tmp2, Address(str2), tmp3, 2);
inflate_lo32(tmp3, tmp1);
mv(tmp1, tmp3);
} else { // UL case
- lwu(tmp2, Address(str2));
- ld(tmp1, Address(str1));
+ load_int_misaligned(tmp2, Address(str2), tmp3, false);
+ load_long_misaligned(tmp1, Address(str1), tmp3, 2);
inflate_lo32(tmp3, tmp2);
mv(tmp2, tmp3);
}
bind(TAIL_CHECK);
- xorr(tmp3, tmp1, tmp2);
- beqz(tmp3, DONE);
+ beq(tmp1, tmp2, DONE);
// Find the first different characters in the longwords and
// compute their difference.
bind(DIFFERENCE);
+ xorr(tmp3, tmp1, tmp2);
ctzc_bit(result, tmp3, isLL); // count zero from lsb to msb
srl(tmp1, tmp1, result);
srl(tmp2, tmp2, result);
@@ -1867,13 +2115,13 @@ void C2_MacroAssembler::integer_narrow_v(VectorRegister dst, BasicType dst_bt, i
}
}
} else if (src_bt == T_INT) {
- // T_SHORT
- vsetvli(t0, t0, Assembler::e16, Assembler::mf2);
- vncvt_x_x_w(dst, src);
- if (dst_bt == T_BYTE) {
- vsetvli(t0, t0, Assembler::e8, Assembler::mf2);
- vncvt_x_x_w(dst, dst);
- }
+ // T_SHORT
+ vsetvli(t0, t0, Assembler::e16, Assembler::mf2);
+ vncvt_x_x_w(dst, src);
+ if (dst_bt == T_BYTE) {
+ vsetvli(t0, t0, Assembler::e8, Assembler::mf2);
+ vncvt_x_x_w(dst, dst);
+ }
} else if (src_bt == T_SHORT) {
vsetvli(t0, t0, Assembler::e8, Assembler::mf2);
vncvt_x_x_w(dst, src);
@@ -1889,8 +2137,6 @@ void C2_MacroAssembler::VFLOATCVT##_safe(VectorRegister dst, VectorRegister src)
}
VFCVT_SAFE(vfcvt_rtz_x_f_v);
-VFCVT_SAFE(vfwcvt_rtz_x_f_v);
-VFCVT_SAFE(vfncvt_rtz_x_f_w);
#undef VFCVT_SAFE
@@ -1932,4 +2178,4 @@ void C2_MacroAssembler::extract_fp_v(FloatRegister dst, VectorRegister src, Basi
vslidedown_vx(tmp, src, t0);
vfmv_f_s(dst, tmp);
}
-}
\ No newline at end of file
+}
diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp
index c0de12c320571..f14f572be8570 100644
--- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp
@@ -39,6 +39,11 @@
VectorRegister vrs,
bool is_latin, Label& DONE);
public:
+ // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
+ // See full description in macroAssembler_riscv.cpp.
+ void fast_lock(Register object, Register box, Register tmp1, Register tmp2, Register tmp3);
+ void fast_unlock(Register object, Register box, Register tmp1, Register tmp2);
+
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,
Register tmp1, Register tmp2, Register tmp3,
@@ -242,8 +247,6 @@
VectorRegister src, BasicType src_bt);
void vfcvt_rtz_x_f_v_safe(VectorRegister dst, VectorRegister src);
- void vfwcvt_rtz_x_f_v_safe(VectorRegister dst, VectorRegister src);
- void vfncvt_rtz_x_f_w_safe(VectorRegister dst, VectorRegister src);
void extract_v(Register dst, VectorRegister src, BasicType bt, int idx, VectorRegister tmp);
void extract_fp_v(FloatRegister dst, VectorRegister src, BasicType bt, int idx, VectorRegister tmp);
diff --git a/src/hotspot/cpu/riscv/frame_riscv.cpp b/src/hotspot/cpu/riscv/frame_riscv.cpp
index 54e59d2d0f48f..f1518724608a5 100644
--- a/src/hotspot/cpu/riscv/frame_riscv.cpp
+++ b/src/hotspot/cpu/riscv/frame_riscv.cpp
@@ -478,7 +478,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// do some validation of frame elements
// first the method
- Method* m = *interpreter_frame_method_addr();
+ Method* m = safe_interpreter_frame_method();
// validate the method we'd find in this potential sender
if (!Method::is_valid_method(m)) {
return false;
diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv64.ad b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv.ad
similarity index 100%
rename from src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv64.ad
rename to src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv.ad
diff --git a/src/hotspot/cpu/riscv/gc/x/x_riscv64.ad b/src/hotspot/cpu/riscv/gc/x/x_riscv.ad
similarity index 100%
rename from src/hotspot/cpu/riscv/gc/x/x_riscv64.ad
rename to src/hotspot/cpu/riscv/gc/x/x_riscv.ad
diff --git a/src/hotspot/cpu/riscv/gc/z/z_riscv64.ad b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad
similarity index 100%
rename from src/hotspot/cpu/riscv/gc/z/z_riscv64.ad
rename to src/hotspot/cpu/riscv/gc/z/z_riscv.ad
diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
index 34d6eb87564c2..b5c834dc1d0d5 100644
--- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
+++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
@@ -690,7 +690,7 @@ void InterpreterMacroAssembler::remove_activation(
// Check that all monitors are unlocked
{
Label loop, exception, entry, restart;
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
const Address monitor_block_top(
fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
@@ -763,6 +763,12 @@ void InterpreterMacroAssembler::remove_activation(
// testing if reserved zone needs to be re-enabled
Label no_reserved_zone_enabling;
+ // check if already enabled - if so no re-enabling needed
+ assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
+ lw(t0, Address(xthread, JavaThread::stack_guard_state_offset()));
+ subw(t0, t0, StackOverflow::stack_guard_enabled);
+ beqz(t0, no_reserved_zone_enabling);
+
ld(t0, Address(xthread, JavaThread::reserved_stack_activation_offset()));
ble(t1, t0, no_reserved_zone_enabling);
@@ -794,7 +800,7 @@ void InterpreterMacroAssembler::remove_activation(
//
// Kills:
// x10
-// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
+// c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, .. (param regs)
// t0, t1 (temp regs)
void InterpreterMacroAssembler::lock_object(Register lock_reg)
{
@@ -809,6 +815,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
const Register swap_reg = x10;
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
+ const Register tmp2 = c_rarg4;
+ const Register tmp3 = c_rarg5;
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
@@ -829,7 +837,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
if (LockingMode == LM_LIGHTWEIGHT) {
ld(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- fast_lock(obj_reg, tmp, t0, t1, slow_case);
+ lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
j(count);
} else if (LockingMode == LM_LEGACY) {
// Load (object->mark() | 1) into swap_reg
@@ -893,7 +901,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
//
// Kills:
// x10
-// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
+// c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, ... (param regs)
// t0, t1 (temp regs)
void InterpreterMacroAssembler::unlock_object(Register lock_reg)
{
@@ -907,6 +915,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
const Register swap_reg = x10;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
+ const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
save_bcp(); // Save in case of exception
@@ -942,7 +951,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
ld(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
test_bit(t0, header_reg, exact_log2(markWord::monitor_value));
bnez(t0, slow_case);
- fast_unlock(obj_reg, header_reg, swap_reg, t0, slow_case);
+ lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
j(count);
bind(slow_case);
@@ -1723,8 +1732,8 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
bind(update);
load_klass(obj, obj);
- ld(t0, mdo_addr);
- xorr(obj, obj, t0);
+ ld(tmp, mdo_addr);
+ xorr(obj, obj, tmp);
andi(t0, obj, TypeEntries::type_klass_mask);
beqz(t0, next); // klass seen before, nothing to
// do. The unknown bit may have been
@@ -1734,15 +1743,15 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
bnez(t0, next);
// already unknown. Nothing to do anymore.
- ld(t0, mdo_addr);
- beqz(t0, none);
- mv(tmp, (u1)TypeEntries::null_seen);
- beq(t0, tmp, none);
- // There is a chance that the checks above (re-reading profiling
- // data from memory) fail if another thread has just set the
+ beqz(tmp, none);
+ mv(t0, (u1)TypeEntries::null_seen);
+ beq(tmp, t0, none);
+ // There is a chance that the checks above
+ // fail if another thread has just set the
// profiling to this obj's klass
- ld(t0, mdo_addr);
- xorr(obj, obj, t0);
+ xorr(obj, obj, tmp); // get back original value before XOR
+ ld(tmp, mdo_addr);
+ xorr(obj, obj, tmp);
andi(t0, obj, TypeEntries::type_klass_mask);
beqz(t0, next);
@@ -1753,6 +1762,10 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
bind(none);
// first time here. Set profile type.
sd(obj, mdo_addr);
+#ifdef ASSERT
+ andi(obj, obj, TypeEntries::type_mask);
+ verify_klass_ptr(obj);
+#endif
bind(next);
}
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index eb82ae2adeb30..fd5739d3d405f 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -1655,6 +1655,28 @@ void MacroAssembler::xorrw(Register Rd, Register Rs1, Register Rs2) {
sign_extend(Rd, Rd, 32);
}
+// Rd = Rs1 & (~Rd2)
+void MacroAssembler::andn(Register Rd, Register Rs1, Register Rs2) {
+ if (UseZbb) {
+ Assembler::andn(Rd, Rs1, Rs2);
+ return;
+ }
+
+ notr(Rd, Rs2);
+ andr(Rd, Rs1, Rd);
+}
+
+// Rd = Rs1 | (~Rd2)
+void MacroAssembler::orn(Register Rd, Register Rs1, Register Rs2) {
+ if (UseZbb) {
+ Assembler::orn(Rd, Rs1, Rs2);
+ return;
+ }
+
+ notr(Rd, Rs2);
+ orr(Rd, Rs1, Rd);
+}
+
// Note: load_unsigned_short used to be called load_unsigned_word.
int MacroAssembler::load_unsigned_short(Register dst, Address src) {
int off = offset();
@@ -1700,12 +1722,29 @@ void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in
}
}
-// granularity is 1, 2 bytes per load
+// granularity is 1 OR 2 bytes per load. dst and src.base() allowed to be the same register
+void MacroAssembler::load_short_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity) {
+ if (granularity != 1 && granularity != 2) {
+ ShouldNotReachHere();
+ }
+ if (AvoidUnalignedAccesses && (granularity != 2)) {
+ assert_different_registers(dst, tmp);
+ assert_different_registers(tmp, src.base());
+ is_signed ? lb(tmp, Address(src.base(), src.offset() + 1)) : lbu(tmp, Address(src.base(), src.offset() + 1));
+ slli(tmp, tmp, 8);
+ lbu(dst, src);
+ add(dst, dst, tmp);
+ } else {
+ is_signed ? lh(dst, src) : lhu(dst, src);
+ }
+}
+
+// granularity is 1, 2 OR 4 bytes per load, if granularity 2 or 4 then dst and src.base() allowed to be the same register
void MacroAssembler::load_int_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity) {
if (AvoidUnalignedAccesses && (granularity != 4)) {
- assert_different_registers(dst, tmp, src.base());
switch(granularity) {
case 1:
+ assert_different_registers(dst, tmp, src.base());
lbu(dst, src);
lbu(tmp, Address(src.base(), src.offset() + 1));
slli(tmp, tmp, 8);
@@ -1718,9 +1757,11 @@ void MacroAssembler::load_int_misaligned(Register dst, Address src, Register tmp
add(dst, dst, tmp);
break;
case 2:
- lhu(dst, src);
+ assert_different_registers(dst, tmp);
+ assert_different_registers(tmp, src.base());
is_signed ? lh(tmp, Address(src.base(), src.offset() + 2)) : lhu(tmp, Address(src.base(), src.offset() + 2));
slli(tmp, tmp, 16);
+ lhu(dst, src);
add(dst, dst, tmp);
break;
default:
@@ -1731,12 +1772,12 @@ void MacroAssembler::load_int_misaligned(Register dst, Address src, Register tmp
}
}
-// granularity is 1, 2 or 4 bytes per load
+// granularity is 1, 2, 4 or 8 bytes per load, if granularity 4 or 8 then dst and src.base() allowed to be same register
void MacroAssembler::load_long_misaligned(Register dst, Address src, Register tmp, int granularity) {
if (AvoidUnalignedAccesses && (granularity != 8)) {
- assert_different_registers(dst, tmp, src.base());
switch(granularity){
case 1:
+ assert_different_registers(dst, tmp, src.base());
lbu(dst, src);
lbu(tmp, Address(src.base(), src.offset() + 1));
slli(tmp, tmp, 8);
@@ -1761,6 +1802,7 @@ void MacroAssembler::load_long_misaligned(Register dst, Address src, Register tm
add(dst, dst, tmp);
break;
case 2:
+ assert_different_registers(dst, tmp, src.base());
lhu(dst, src);
lhu(tmp, Address(src.base(), src.offset() + 2));
slli(tmp, tmp, 16);
@@ -1773,9 +1815,11 @@ void MacroAssembler::load_long_misaligned(Register dst, Address src, Register tm
add(dst, dst, tmp);
break;
case 4:
- lwu(dst, src);
+ assert_different_registers(dst, tmp);
+ assert_different_registers(tmp, src.base());
lwu(tmp, Address(src.base(), src.offset() + 4));
slli(tmp, tmp, 32);
+ lwu(dst, src);
add(dst, dst, tmp);
break;
default:
@@ -1947,6 +1991,22 @@ void MacroAssembler::ror_imm(Register dst, Register src, uint32_t shift, Registe
orr(dst, dst, tmp);
}
+// rotate left with shift bits, 32-bit version
+void MacroAssembler::rolw_imm(Register dst, Register src, uint32_t shift, Register tmp) {
+ if (UseZbb) {
+ // no roliw available
+ roriw(dst, src, 32 - shift);
+ return;
+ }
+
+ assert_different_registers(dst, tmp);
+ assert_different_registers(src, tmp);
+ assert(shift < 32, "shift amount must be < 32");
+ srliw(tmp, src, 32 - shift);
+ slliw(dst, src, shift);
+ orr(dst, dst, tmp);
+}
+
void MacroAssembler::andi(Register Rd, Register Rn, int64_t imm, Register tmp) {
if (is_simm12(imm)) {
and_imm12(Rd, Rn, imm);
@@ -3946,18 +4006,17 @@ void MacroAssembler::ctzc_bit(Register Rd, Register Rs, bool isLL, Register tmp1
void MacroAssembler::inflate_lo32(Register Rd, Register Rs, Register tmp1, Register tmp2) {
assert_different_registers(Rd, Rs, tmp1, tmp2);
- mv(tmp1, 0xFF);
- mv(Rd, zr);
- for (int i = 0; i <= 3; i++) {
+ mv(tmp1, 0xFF000000); // first byte mask at lower word
+ andr(Rd, Rs, tmp1);
+ for (int i = 0; i < 2; i++) {
+ slli(Rd, Rd, wordSize);
+ srli(tmp1, tmp1, wordSize);
andr(tmp2, Rs, tmp1);
- if (i) {
- slli(tmp2, tmp2, i * 8);
- }
orr(Rd, Rd, tmp2);
- if (i != 3) {
- slli(tmp1, tmp1, 8);
- }
}
+ slli(Rd, Rd, wordSize);
+ andi(tmp2, Rs, 0xFF); // last byte mask at lower word
+ orr(Rd, Rd, tmp2);
}
// This instruction reads adjacent 4 bytes from the upper half of source register,
@@ -3966,17 +4025,8 @@ void MacroAssembler::inflate_lo32(Register Rd, Register Rs, Register tmp1, Regis
// Rd: 00A700A600A500A4
void MacroAssembler::inflate_hi32(Register Rd, Register Rs, Register tmp1, Register tmp2) {
assert_different_registers(Rd, Rs, tmp1, tmp2);
-
- mv(tmp1, 0xFF00000000);
- mv(Rd, zr);
- for (int i = 0; i <= 3; i++) {
- andr(tmp2, Rs, tmp1);
- orr(Rd, Rd, tmp2);
- srli(Rd, Rd, 8);
- if (i != 3) {
- slli(tmp1, tmp1, 8);
- }
- }
+ srli(Rs, Rs, 32); // only upper 32 bits are needed
+ inflate_lo32(Rd, Rs, tmp1, tmp2);
}
// The size of the blocks erased by the zero_blocks stub. We must
@@ -4170,6 +4220,57 @@ void MacroAssembler::zero_dcache_blocks(Register base, Register cnt, Register tm
bge(cnt, tmp1, loop);
}
+// java.lang.Math.round(float a)
+// Returns the closest int to the argument, with ties rounding to positive infinity.
+void MacroAssembler::java_round_float(Register dst, FloatRegister src, FloatRegister ftmp) {
+ // this instructions calling sequence provides performance improvement on all tested devices;
+ // don't change it without re-verification
+ Label done;
+ mv(t0, jint_cast(0.5f));
+ fmv_w_x(ftmp, t0);
+
+ // dst = 0 if NaN
+ feq_s(t0, src, src); // replacing fclass with feq as performance optimization
+ mv(dst, zr);
+ beqz(t0, done);
+
+ // dst = (src + 0.5f) rounded down towards negative infinity
+ // Adding 0.5f to some floats exceeds the precision limits for a float and rounding takes place.
+ // RDN is required for fadd_s, RNE gives incorrect results:
+ // --------------------------------------------------------------------
+ // fadd.s rne (src + 0.5f): src = 8388609.000000 ftmp = 8388610.000000
+ // fcvt.w.s rdn: ftmp = 8388610.000000 dst = 8388610
+ // --------------------------------------------------------------------
+ // fadd.s rdn (src + 0.5f): src = 8388609.000000 ftmp = 8388609.000000
+ // fcvt.w.s rdn: ftmp = 8388609.000000 dst = 8388609
+ // --------------------------------------------------------------------
+ fadd_s(ftmp, src, ftmp, RoundingMode::rdn);
+ fcvt_w_s(dst, ftmp, RoundingMode::rdn);
+
+ bind(done);
+}
+
+// java.lang.Math.round(double a)
+// Returns the closest long to the argument, with ties rounding to positive infinity.
+void MacroAssembler::java_round_double(Register dst, FloatRegister src, FloatRegister ftmp) {
+ // this instructions calling sequence provides performance improvement on all tested devices;
+ // don't change it without re-verification
+ Label done;
+ mv(t0, julong_cast(0.5));
+ fmv_d_x(ftmp, t0);
+
+ // dst = 0 if NaN
+ feq_d(t0, src, src); // replacing fclass with feq as performance optimization
+ mv(dst, zr);
+ beqz(t0, done);
+
+ // dst = (src + 0.5) rounded down towards negative infinity
+ fadd_d(ftmp, src, ftmp, RoundingMode::rdn); // RDN is required here otherwise some inputs produce incorrect results
+ fcvt_l_d(dst, ftmp, RoundingMode::rdn);
+
+ bind(done);
+}
+
#define FCVT_SAFE(FLOATCVT, FLOATSIG) \
void MacroAssembler::FLOATCVT##_safe(Register dst, FloatRegister src, Register tmp) { \
Label done; \
@@ -4586,25 +4687,31 @@ void MacroAssembler::rt_call(address dest, Register tmp) {
}
}
-void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos, Register tmp) {
+void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) {
assert(bit_pos < 64, "invalid bit range");
if (UseZbs) {
bexti(Rd, Rs, bit_pos);
return;
}
- andi(Rd, Rs, 1UL << bit_pos, tmp);
+ int64_t imm = (int64_t)(1UL << bit_pos);
+ if (is_simm12(imm)) {
+ and_imm12(Rd, Rs, imm);
+ } else {
+ srli(Rd, Rs, bit_pos);
+ and_imm12(Rd, Rd, 1);
+ }
}
-// Implements fast-locking.
+// Implements lightweight-locking.
// Branches to slow upon failure to lock the object.
// Falls through upon success.
//
// - obj: the object to be locked
// - hdr: the header, already loaded from obj, will be destroyed
// - tmp1, tmp2: temporary registers, will be destroyed
-void MacroAssembler::fast_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
+void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
- assert_different_registers(obj, hdr, tmp1, tmp2);
+ assert_different_registers(obj, hdr, tmp1, tmp2, t0);
// Check if we would have space on lock-stack for the object.
lwu(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
@@ -4629,16 +4736,16 @@ void MacroAssembler::fast_lock(Register obj, Register hdr, Register tmp1, Regist
sw(tmp1, Address(xthread, JavaThread::lock_stack_top_offset()));
}
-// Implements fast-unlocking.
+// Implements ligthweight-unlocking.
// Branches to slow upon failure.
// Falls through upon success.
//
// - obj: the object to be unlocked
// - hdr: the (pre-loaded) header of the object
// - tmp1, tmp2: temporary registers
-void MacroAssembler::fast_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
+void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
- assert_different_registers(obj, hdr, tmp1, tmp2);
+ assert_different_registers(obj, hdr, tmp1, tmp2, t0);
#ifdef ASSERT
{
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
index 83688b88846c6..21f64f4b20e45 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
@@ -431,6 +431,7 @@ class MacroAssembler: public Assembler {
void store_sized_value(Address dst, Register src, size_t size_in_bytes);
// Misaligned loads, will use the best way, according to the AvoidUnalignedAccess flag
+ void load_short_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity = 1);
void load_int_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity = 1);
void load_long_misaligned(Register dst, Address src, Register tmp, int granularity = 1);
@@ -595,7 +596,9 @@ class MacroAssembler: public Assembler {
void NAME(Register Rs1, Register Rs2, const address dest) { \
assert_cond(dest != nullptr); \
int64_t offset = dest - pc(); \
- guarantee(is_simm13(offset) && ((offset % 2) == 0), "offset is invalid."); \
+ guarantee(is_simm13(offset) && is_even(offset), \
+ "offset is invalid: is_simm_13: %s offset: " INT64_FORMAT, \
+ BOOL_TO_STR(is_simm13(offset)), offset); \
Assembler::NAME(Rs1, Rs2, offset); \
} \
INSN_ENTRY_RELOC(void, NAME(Register Rs1, Register Rs2, address dest, relocInfo::relocType rtype)) \
@@ -760,6 +763,10 @@ class MacroAssembler: public Assembler {
void orrw(Register Rd, Register Rs1, Register Rs2);
void xorrw(Register Rd, Register Rs1, Register Rs2);
+ // logic with negate
+ void andn(Register Rd, Register Rs1, Register Rs2);
+ void orn(Register Rd, Register Rs1, Register Rs2);
+
// revb
void revb_h_h(Register Rd, Register Rs, Register tmp = t0); // reverse bytes in halfword in lower 16 bits, sign-extend
void revb_w_w(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in lower word, sign-extend
@@ -771,6 +778,7 @@ class MacroAssembler: public Assembler {
void revb(Register Rd, Register Rs, Register tmp1 = t0, Register tmp2 = t1); // reverse bytes in doubleword
void ror_imm(Register dst, Register src, uint32_t shift, Register tmp = t0);
+ void rolw_imm(Register dst, Register src, uint32_t, Register tmp = t0);
void andi(Register Rd, Register Rn, int64_t imm, Register tmp = t0);
void orptr(Address adr, RegisterOrConstant src, Register tmp1 = t0, Register tmp2 = t1);
@@ -1216,7 +1224,7 @@ class MacroAssembler: public Assembler {
void shadd(Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt);
// test single bit in Rs, result is set to Rd
- void test_bit(Register Rd, Register Rs, uint32_t bit_pos, Register tmp = t0);
+ void test_bit(Register Rd, Register Rs, uint32_t bit_pos);
// Here the float instructions with safe deal with some exceptions.
// e.g. convert from NaN, +Inf, -Inf to int, float, double
@@ -1227,6 +1235,9 @@ class MacroAssembler: public Assembler {
void fcvt_w_d_safe(Register dst, FloatRegister src, Register tmp = t0);
void fcvt_l_d_safe(Register dst, FloatRegister src, Register tmp = t0);
+ void java_round_float(Register dst, FloatRegister src, FloatRegister ftmp);
+ void java_round_double(Register dst, FloatRegister src, FloatRegister ftmp);
+
// vector load/store unit-stride instructions
void vlex_v(VectorRegister vd, Register base, Assembler::SEW sew, VectorMask vm = unmasked) {
switch (sew) {
@@ -1433,8 +1444,8 @@ class MacroAssembler: public Assembler {
void store_conditional(Register addr, Register new_val, enum operand_size size, Assembler::Aqrl release);
public:
- void fast_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
- void fast_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
+ void lightweight_lock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
+ void lightweight_unlock(Register obj, Register hdr, Register tmp1, Register tmp2, Label& slow);
};
#ifdef ASSERT
diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad
index 6678d591625f5..e63ff695a86c7 100644
--- a/src/hotspot/cpu/riscv/riscv.ad
+++ b/src/hotspot/cpu/riscv/riscv.ad
@@ -981,6 +981,7 @@ definitions %{
int_def LOAD_COST ( 300, 3 * DEFAULT_COST); // load, fpload
int_def STORE_COST ( 100, 1 * DEFAULT_COST); // store, fpstore
int_def XFER_COST ( 300, 3 * DEFAULT_COST); // mfc, mtc, fcvt, fmove, fcmp
+ int_def FMVX_COST ( 100, 1 * DEFAULT_COST); // shuffles with no conversion
int_def BRANCH_COST ( 200, 2 * DEFAULT_COST); // branch, jmp, call
int_def IMUL_COST ( 1000, 10 * DEFAULT_COST); // imul
int_def IDIVSI_COST ( 3400, 34 * DEFAULT_COST); // idivdi
@@ -2176,7 +2177,7 @@ bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack,
encode %{
// BEGIN Non-volatile memory access
- enc_class riscv_enc_li_imm(iRegIorL dst, immIorL src) %{
+ enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
C2_MacroAssembler _masm(&cbuf);
int64_t con = (int64_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
@@ -2352,6 +2353,11 @@ encode %{
ciEnv::current()->record_failure("CodeCache is full");
return;
}
+ } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
+ // The NOP here is purely to ensure that eliding a call to
+ // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
+ __ nop();
+ __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
int method_index = resolved_method_index(cbuf);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
@@ -2431,223 +2437,6 @@ encode %{
}
%}
- // Use cr register to indicate the fast_lock result: zero for success; non-zero for failure.
- enc_class riscv_enc_fast_lock(iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2) %{
- C2_MacroAssembler _masm(&cbuf);
- Register flag = t1;
- Register oop = as_Register($object$$reg);
- Register box = as_Register($box$$reg);
- Register disp_hdr = as_Register($tmp1$$reg);
- Register tmp = as_Register($tmp2$$reg);
- Label cont;
- Label object_has_monitor;
- Label count, no_count;
-
- assert_different_registers(oop, box, tmp, disp_hdr, t0);
-
- // Load markWord from object into displaced_header.
- __ ld(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
-
- if (DiagnoseSyncOnValueBasedClasses != 0) {
- __ load_klass(flag, oop);
- __ lwu(flag, Address(flag, Klass::access_flags_offset()));
- __ test_bit(flag, flag, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS), tmp /* tmp */);
- __ bnez(flag, cont, true /* is_far */);
- }
-
- // Check for existing monitor
- __ test_bit(t0, disp_hdr, exact_log2(markWord::monitor_value));
- __ bnez(t0, object_has_monitor);
-
- if (LockingMode == LM_MONITOR) {
- __ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow-path
- __ j(cont);
- } else if (LockingMode == LM_LEGACY) {
- // Set tmp to be (markWord of object | UNLOCK_VALUE).
- __ ori(tmp, disp_hdr, markWord::unlocked_value);
-
- // Initialize the box. (Must happen before we update the object mark!)
- __ sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
-
- // Compare object markWord with an unlocked value (tmp) and if
- // equal exchange the stack address of our box with object markWord.
- // On failure disp_hdr contains the possibly locked markWord.
- __ cmpxchg(/*memory address*/oop, /*expected value*/tmp, /*new value*/box, Assembler::int64, Assembler::aq,
- Assembler::rl, /*result*/disp_hdr);
- __ mv(flag, zr);
- __ beq(disp_hdr, tmp, cont); // prepare zero flag and goto cont if we won the cas
-
- assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
-
- // If the compare-and-exchange succeeded, then we found an unlocked
- // object, will have now locked it will continue at label cont
- // We did not see an unlocked object so try the fast recursive case.
-
- // Check if the owner is self by comparing the value in the
- // markWord of object (disp_hdr) with the stack pointer.
- __ sub(disp_hdr, disp_hdr, sp);
- __ mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
- // If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto cont,
- // hence we can store 0 as the displaced header in the box, which indicates that it is a
- // recursive lock.
- __ andr(tmp/*==0?*/, disp_hdr, tmp);
- __ sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
- __ mv(flag, tmp); // we can use the value of tmp as the result here
- __ j(cont);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "");
- Label slow;
- __ fast_lock(oop, disp_hdr, tmp, t0, slow);
-
- // Indicate success on completion.
- __ mv(flag, zr);
- __ j(count);
- __ bind(slow);
- __ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow-path
- __ j(no_count);
- }
-
- // Handle existing monitor.
- __ bind(object_has_monitor);
- // The object's monitor m is unlocked iff m->owner == NULL,
- // otherwise m->owner may contain a thread or a stack address.
- //
- // Try to CAS m->owner from NULL to current thread.
- __ add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
- __ cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/xthread, Assembler::int64, Assembler::aq,
- Assembler::rl, /*result*/flag); // cas succeeds if flag == zr(expected)
-
- if (LockingMode != LM_LIGHTWEIGHT) {
- // Store a non-null value into the box to avoid looking like a re-entrant
- // lock. The fast-path monitor unlock code checks for
- // markWord::monitor_value so use markWord::unused_mark which has the
- // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
- __ mv(tmp, (address)markWord::unused_mark().value());
- __ sd(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
- }
-
- __ beqz(flag, cont); // CAS success means locking succeeded
-
- __ bne(flag, xthread, cont); // Check for recursive locking
-
- // Recursive lock case
- __ mv(flag, zr);
- __ increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, t0, tmp);
-
- __ bind(cont);
- // zero flag indicates success
- // non-zero flag indicates failure
- __ bnez(flag, no_count);
-
- __ bind(count);
- __ increment(Address(xthread, JavaThread::held_monitor_count_offset()), 1, t0, tmp);
-
- __ bind(no_count);
- %}
-
- // Use cr register to indicate the fast_unlock result: zero for success; non-zero for failure.
- enc_class riscv_enc_fast_unlock(iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2) %{
- C2_MacroAssembler _masm(&cbuf);
- Register flag = t1;
- Register oop = as_Register($object$$reg);
- Register box = as_Register($box$$reg);
- Register disp_hdr = as_Register($tmp1$$reg);
- Register tmp = as_Register($tmp2$$reg);
- Label cont;
- Label object_has_monitor;
- Label count, no_count;
-
- assert_different_registers(oop, box, tmp, disp_hdr, flag);
-
- if (LockingMode == LM_LEGACY) {
- // Find the lock address and load the displaced header from the stack.
- __ ld(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
-
- // If the displaced header is 0, we have a recursive unlock.
- __ mv(flag, disp_hdr);
- __ beqz(disp_hdr, cont);
- }
-
- // Handle existing monitor.
- __ ld(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
- __ test_bit(t0, tmp, exact_log2(markWord::monitor_value));
- __ bnez(t0, object_has_monitor);
-
- if (LockingMode == LM_MONITOR) {
- __ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow path
- __ j(cont);
- } else if (LockingMode == LM_LEGACY) {
- // Check if it is still a light weight lock, this is true if we
- // see the stack address of the basicLock in the markWord of the
- // object.
-
- __ cmpxchg(/*memory address*/oop, /*expected value*/box, /*new value*/disp_hdr, Assembler::int64, Assembler::relaxed,
- Assembler::rl, /*result*/tmp);
- __ xorr(flag, box, tmp); // box == tmp if cas succeeds
- __ j(cont);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "");
- Label slow;
- __ fast_unlock(oop, tmp, box, disp_hdr, slow);
-
- // Indicate success on completion.
- __ mv(flag, zr);
- __ j(count);
- __ bind(slow);
- __ mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow path
- __ j(no_count);
- }
-
- assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
-
- // Handle existing monitor.
- __ bind(object_has_monitor);
- STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
- __ add(tmp, tmp, -(int)markWord::monitor_value); // monitor
-
- if (LockingMode == LM_LIGHTWEIGHT) {
- // If the owner is anonymous, we need to fix it -- in an outline stub.
- Register tmp2 = disp_hdr;
- __ ld(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
- __ test_bit(t0, tmp2, exact_log2(ObjectMonitor::ANONYMOUS_OWNER));
- C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
- Compile::current()->output()->add_stub(stub);
- __ bnez(t0, stub->entry(), /* is_far */ true);
- __ bind(stub->continuation());
- }
-
- __ ld(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
-
- Label notRecursive;
- __ beqz(disp_hdr, notRecursive); // Will be 0 if not recursive.
-
- // Recursive lock
- __ addi(disp_hdr, disp_hdr, -1);
- __ sd(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
- __ mv(flag, zr);
- __ j(cont);
-
- __ bind(notRecursive);
- __ ld(flag, Address(tmp, ObjectMonitor::EntryList_offset()));
- __ ld(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
- __ orr(flag, flag, disp_hdr); // Will be 0 if both are 0.
- __ bnez(flag, cont);
- // need a release store here
- __ la(tmp, Address(tmp, ObjectMonitor::owner_offset()));
- __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
- __ sd(zr, Address(tmp)); // set unowned
-
- __ bind(cont);
- // zero flag indicates success
- // non-zero flag indicates failure
- __ bnez(flag, no_count);
-
- __ bind(count);
- __ decrement(Address(xthread, JavaThread::held_monitor_count_offset()), 1, t0, tmp);
-
- __ bind(no_count);
- %}
-
// arithmetic encodings
enc_class riscv_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
@@ -4965,9 +4754,9 @@ instruct loadConI(iRegINoSp dst, immI src)
match(Set dst src);
ins_cost(ALU_COST);
- format %{ "li $dst, $src\t# int, #@loadConI" %}
+ format %{ "mv $dst, $src\t# int, #@loadConI" %}
- ins_encode(riscv_enc_li_imm(dst, src));
+ ins_encode(riscv_enc_mov_imm(dst, src));
ins_pipe(ialu_imm);
%}
@@ -4978,9 +4767,9 @@ instruct loadConL(iRegLNoSp dst, immL src)
match(Set dst src);
ins_cost(ALU_COST);
- format %{ "li $dst, $src\t# long, #@loadConL" %}
+ format %{ "mv $dst, $src\t# long, #@loadConL" %}
- ins_encode(riscv_enc_li_imm(dst, src));
+ ins_encode(riscv_enc_mov_imm(dst, src));
ins_pipe(ialu_imm);
%}
@@ -6859,6 +6648,21 @@ instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
ins_pipe(lmul_reg_reg);
%}
+instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
+%{
+ match(Set dst (UMulHiL src1 src2));
+ ins_cost(IMUL_COST);
+ format %{ "mulhu $dst, $src1, $src2\t# umulhi, #@umulHiL_rReg" %}
+
+ ins_encode %{
+ __ mulhu(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg));
+ %}
+
+ ins_pipe(lmul_reg_reg);
+%}
+
// Integer Divide
instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
@@ -8423,6 +8227,34 @@ instruct convN2I(iRegINoSp dst, iRegN src)
ins_pipe(ialu_reg);
%}
+instruct round_double_reg(iRegLNoSp dst, fRegD src, fRegD ftmp) %{
+ match(Set dst (RoundD src));
+
+ ins_cost(XFER_COST + BRANCH_COST);
+ effect(TEMP ftmp);
+ format %{ "java_round_double $dst, $src\t#@round_double_reg" %}
+
+ ins_encode %{
+ __ java_round_double($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct round_float_reg(iRegINoSp dst, fRegF src, fRegF ftmp) %{
+ match(Set dst (RoundF src));
+
+ ins_cost(XFER_COST + BRANCH_COST);
+ effect(TEMP ftmp);
+ format %{ "java_round_float $dst, $src\t#@round_float_reg" %}
+
+ ins_encode %{
+ __ java_round_float($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
// Convert oop pointer into compressed form
instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{
match(Set dst (EncodeP src));
@@ -8652,7 +8484,7 @@ instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{
effect(DEF dst, USE src);
- ins_cost(XFER_COST);
+ ins_cost(FMVX_COST);
format %{ "fmv.x.w $dst, $src\t#@MoveL2D_reg_stack" %}
@@ -8670,7 +8502,7 @@ instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{
effect(DEF dst, USE src);
- ins_cost(XFER_COST);
+ ins_cost(FMVX_COST);
format %{ "fmv.w.x $dst, $src\t#@MoveI2F_reg_reg" %}
@@ -8688,7 +8520,7 @@ instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
effect(DEF dst, USE src);
- ins_cost(XFER_COST);
+ ins_cost(FMVX_COST);
format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %}
@@ -8706,7 +8538,7 @@ instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{
effect(DEF dst, USE src);
- ins_cost(XFER_COST);
+ ins_cost(FMVX_COST);
format %{ "fmv.d.x $dst, $src\t#@MoveD2L_reg_reg" %}
@@ -10374,15 +10206,17 @@ instruct tlsLoadP(javaThread_RegP dst)
// inlined locking and unlocking
// using t1 as the 'flag' register to bridge the BoolNode producers and consumers
-instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2)
+instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
match(Set cr (FastLock object box));
- effect(TEMP tmp1, TEMP tmp2);
+ effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
ins_cost(LOAD_COST * 2 + STORE_COST * 3 + ALU_COST * 6 + BRANCH_COST * 3);
- format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2, #@cmpFastLock" %}
+ format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3, #@cmpFastLock" %}
- ins_encode(riscv_enc_fast_lock(object, box, tmp1, tmp2));
+ ins_encode %{
+ __ fast_lock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
+ %}
ins_pipe(pipe_serial);
%}
@@ -10396,7 +10230,9 @@ instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iR
ins_cost(LOAD_COST * 2 + STORE_COST + ALU_COST * 2 + BRANCH_COST * 4);
format %{ "fastunlock $object,$box\t! kills $tmp1, $tmp2, #@cmpFastUnlock" %}
- ins_encode(riscv_enc_fast_unlock(object, box, tmp1, tmp2));
+ ins_encode %{
+ __ fast_unlock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
+ %}
ins_pipe(pipe_serial);
%}
diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad
index 81a9b8a011481..06826ecf04601 100644
--- a/src/hotspot/cpu/riscv/riscv_v.ad
+++ b/src/hotspot/cpu/riscv/riscv_v.ad
@@ -934,7 +934,7 @@ instruct vmla(vReg dst_src1, vReg src2, vReg src3) %{
match(Set dst_src1 (AddVI dst_src1 (MulVI src2 src3)));
match(Set dst_src1 (AddVL dst_src1 (MulVL src2 src3)));
ins_cost(VEC_COST);
- format %{ "vmla $dst_src1, $dst_src1, src2, src3" %}
+ format %{ "vmla $dst_src1, $dst_src1, $src2, $src3" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ vsetvli_helper(bt, Matcher::vector_length(this));
@@ -970,7 +970,7 @@ instruct vmls(vReg dst_src1, vReg src2, vReg src3) %{
match(Set dst_src1 (SubVI dst_src1 (MulVI src2 src3)));
match(Set dst_src1 (SubVL dst_src1 (MulVL src2 src3)));
ins_cost(VEC_COST);
- format %{ "vmls $dst_src1, $dst_src1, src2, src3" %}
+ format %{ "vmls $dst_src1, $dst_src1, $src2, $src3" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ vsetvli_helper(bt, Matcher::vector_length(this));
@@ -2954,11 +2954,9 @@ instruct vmask_gen_I(vRegMask dst, iRegI src) %{
format %{ "vmask_gen_I $dst, $src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
- Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
__ vsetvli_helper(bt, Matcher::vector_length(this));
- __ vmclr_m(as_VectorRegister($dst$$reg));
- __ vsetvli(t0, $src$$Register, sew);
- __ vmset_m(as_VectorRegister($dst$$reg));
+ __ vid_v(as_VectorRegister($dst$$reg));
+ __ vmsltu_vx(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), $src$$Register);
%}
ins_pipe(pipe_slow);
%}
@@ -2968,26 +2966,30 @@ instruct vmask_gen_L(vRegMask dst, iRegL src) %{
format %{ "vmask_gen_L $dst, $src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
- Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
__ vsetvli_helper(bt, Matcher::vector_length(this));
- __ vmclr_m(as_VectorRegister($dst$$reg));
- __ vsetvli(t0, $src$$Register, sew);
- __ vmset_m(as_VectorRegister($dst$$reg));
+ __ vid_v(as_VectorRegister($dst$$reg));
+ __ vmsltu_vx(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), $src$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct vmask_gen_imm(vRegMask dst, immL con) %{
+ predicate(n->in(1)->get_long() <= 16 ||
+ n->in(1)->get_long() == Matcher::vector_length(n));
match(Set dst (VectorMaskGen con));
format %{ "vmask_gen_imm $dst, $con" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
- if ($con$$constant != Matcher::vector_length(this)) {
- __ vsetvli_helper(bt, Matcher::vector_length(this));
+ __ vsetvli_helper(bt, Matcher::vector_length(this));
+ if ((uint)($con$$constant) == 0) {
__ vmclr_m(as_VectorRegister($dst$$reg));
+ } else if ((uint)($con$$constant) == Matcher::vector_length(this)) {
+ __ vmset_m(as_VectorRegister($dst$$reg));
+ } else {
+ assert((uint)($con$$constant) < Matcher::vector_length(this), "unsupported input lane_cnt");
+ __ vid_v(as_VectorRegister($dst$$reg));
+ __ vmsleu_vi(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), (uint)($con$$constant) - 1);
}
- __ vsetvli_helper(bt, (uint)($con$$constant));
- __ vmset_m(as_VectorRegister($dst$$reg));
%}
ins_pipe(pipe_slow);
%}
@@ -3197,13 +3199,12 @@ instruct vcvtStoX_fp_extend(vReg dst, vReg src) %{
effect(TEMP_DEF dst);
format %{ "vcvtStoX_fp_extend $dst, $src" %}
ins_encode %{
- __ vsetvli_helper(T_SHORT, Matcher::vector_length(this), Assembler::mf2);
+ BasicType bt = Matcher::vector_element_basic_type(this);
+ __ integer_extend_v(as_VectorRegister($dst$$reg), (bt == T_FLOAT ? T_INT : T_LONG),
+ Matcher::vector_length(this), as_VectorRegister($src$$reg), T_SHORT);
+ __ vsetvli_helper(bt, Matcher::vector_length(this));
__ csrwi(CSR_FRM, C2_MacroAssembler::rne);
- __ vfwcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
- if (Matcher::vector_element_basic_type(this) == T_DOUBLE) {
- __ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2);
- __ vfwcvt_f_f_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
- }
+ __ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
%}
ins_pipe(pipe_slow);
%}
@@ -3309,12 +3310,11 @@ instruct vcvtFtoX_narrow(vReg dst, vReg src, vRegMask_V0 v0) %{
effect(TEMP_DEF dst, TEMP v0);
format %{ "vcvtFtoX_narrow $dst, $src" %}
ins_encode %{
- __ vsetvli_helper(T_SHORT, Matcher::vector_length(this), Assembler::mf2);
- __ vfncvt_rtz_x_f_w_safe(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
- if (Matcher::vector_element_basic_type(this) == T_BYTE) {
- __ vsetvli_helper(T_BYTE, Matcher::vector_length(this), Assembler::mf2);
- __ vncvt_x_x_w(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
- }
+ __ vsetvli_helper(T_FLOAT, Matcher::vector_length(this));
+ __ vfcvt_rtz_x_f_v_safe(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
+ BasicType bt = Matcher::vector_element_basic_type(this);
+ __ integer_narrow_v(as_VectorRegister($dst$$reg), bt, Matcher::vector_length(this),
+ as_VectorRegister($dst$$reg), T_INT);
%}
ins_pipe(pipe_slow);
%}
@@ -3337,8 +3337,11 @@ instruct vcvtFtoL(vReg dst, vReg src, vRegMask_V0 v0) %{
effect(TEMP_DEF dst, TEMP v0);
format %{ "vcvtFtoL $dst, $src" %}
ins_encode %{
+ __ vsetvli_helper(T_LONG, Matcher::vector_length(this));
+ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
__ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2);
- __ vfwcvt_rtz_x_f_v_safe(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
+ __ vmfeq_vv(as_VectorRegister($v0$$reg), as_VectorRegister($src$$reg), as_VectorRegister($src$$reg));
+ __ vfwcvt_rtz_x_f_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), Assembler::v0_t);
%}
ins_pipe(pipe_slow);
%}
@@ -3366,8 +3369,11 @@ instruct vcvtDtoX_narrow(vReg dst, vReg src, vRegMask_V0 v0) %{
effect(TEMP_DEF dst, TEMP v0);
format %{ "vcvtDtoX_narrow $dst, $src" %}
ins_encode %{
+ __ vsetvli_helper(T_DOUBLE, Matcher::vector_length(this));
+ __ vmfeq_vv(as_VectorRegister($v0$$reg), as_VectorRegister($src$$reg), as_VectorRegister($src$$reg));
__ vsetvli_helper(T_INT, Matcher::vector_length(this), Assembler::mf2);
- __ vfncvt_rtz_x_f_w_safe(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg));
+ __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg));
+ __ vfncvt_rtz_x_f_w(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), Assembler::v0_t);
BasicType bt = Matcher::vector_element_basic_type(this);
if (bt == T_BYTE || bt == T_SHORT) {
__ integer_narrow_v(as_VectorRegister($dst$$reg), bt, Matcher::vector_length(this),
@@ -3551,18 +3557,16 @@ instruct extractD(fRegD dst, vReg src, immI idx, vReg tmp)
// ------------------------------ Compress/Expand Operations -------------------
-instruct mcompress(vRegMask dst, vRegMask src, iRegLNoSp tmp) %{
+instruct mcompress(vRegMask dst, vRegMask src, vReg tmp) %{
match(Set dst (CompressM src));
- effect(TEMP_DEF dst, TEMP tmp);
+ effect(TEMP tmp);
format %{ "mcompress $dst, $src\t# KILL $tmp" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
- Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
__ vsetvli_helper(bt, Matcher::vector_length(this));
- __ vmclr_m(as_VectorRegister($dst$$reg));
- __ vcpop_m($tmp$$Register, as_VectorRegister($src$$reg));
- __ vsetvli(t0, $tmp$$Register, sew);
- __ vmset_m(as_VectorRegister($dst$$reg));
+ __ vid_v(as_VectorRegister($tmp$$reg));
+ __ vcpop_m(t0, as_VectorRegister($src$$reg));
+ __ vmsltu_vx(as_VectorRegister($dst$$reg), as_VectorRegister($tmp$$reg), t0);
%}
ins_pipe(pipe_slow);
%}
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index 0c671ae1203e0..691dfa1bd7040 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -1650,6 +1650,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register obj_reg = x9; // Will contain the oop
const Register lock_reg = x30; // Address of compiler lock object (BasicLock)
const Register old_hdr = x30; // value of old header at unlock time
+ const Register lock_tmp = x31; // Temporary used by lightweight_lock/unlock
const Register tmp = ra;
Label slow_path_lock;
@@ -1701,7 +1702,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
__ ld(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ fast_lock(obj_reg, swap_reg, tmp, t0, slow_path_lock);
+ __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
}
__ bind(count);
@@ -1829,7 +1830,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ test_bit(t0, old_hdr, exact_log2(markWord::monitor_value));
__ bnez(t0, slow_path_unlock);
- __ fast_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
}
diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
index 60c1fc8c3d5a9..aab65019619f7 100644
--- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
@@ -2310,24 +2310,21 @@ class StubGenerator: public StubCodeGenerator {
}
// code for comparing 8 characters of strings with Latin1 and Utf16 encoding
- void compare_string_8_x_LU(Register tmpL, Register tmpU, Label &DIFF1,
- Label &DIFF2) {
- const Register strU = x12, curU = x7, strL = x29, tmp = x30;
- __ ld(tmpL, Address(strL));
- __ addi(strL, strL, 8);
+ void compare_string_8_x_LU(Register tmpL, Register tmpU, Register strL, Register strU, Label& DIFF) {
+ const Register tmp = x30, tmpLval = x12;
+ __ ld(tmpLval, Address(strL));
+ __ addi(strL, strL, wordSize);
__ ld(tmpU, Address(strU));
- __ addi(strU, strU, 8);
- __ inflate_lo32(tmp, tmpL);
- __ mv(t0, tmp);
- __ xorr(tmp, curU, t0);
- __ bnez(tmp, DIFF2);
-
- __ ld(curU, Address(strU));
- __ addi(strU, strU, 8);
- __ inflate_hi32(tmp, tmpL);
- __ mv(t0, tmp);
- __ xorr(tmp, tmpU, t0);
- __ bnez(tmp, DIFF1);
+ __ addi(strU, strU, wordSize);
+ __ inflate_lo32(tmpL, tmpLval);
+ __ xorr(tmp, tmpU, tmpL);
+ __ bnez(tmp, DIFF);
+
+ __ ld(tmpU, Address(strU));
+ __ addi(strU, strU, wordSize);
+ __ inflate_hi32(tmpL, tmpLval);
+ __ xorr(tmp, tmpU, tmpL);
+ __ bnez(tmp, DIFF);
}
// x10 = result
@@ -2342,11 +2339,9 @@ class StubGenerator: public StubCodeGenerator {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", isLU ? "compare_long_string_different_encoding LU" : "compare_long_string_different_encoding UL");
address entry = __ pc();
- Label SMALL_LOOP, TAIL, TAIL_LOAD_16, LOAD_LAST, DIFF1, DIFF2,
- DONE, CALCULATE_DIFFERENCE;
- const Register result = x10, str1 = x11, cnt1 = x12, str2 = x13, cnt2 = x14,
- tmp1 = x28, tmp2 = x29, tmp3 = x30, tmp4 = x7, tmp5 = x31;
- RegSet spilled_regs = RegSet::of(tmp4, tmp5);
+ Label SMALL_LOOP, TAIL, LOAD_LAST, DONE, CALCULATE_DIFFERENCE;
+ const Register result = x10, str1 = x11, str2 = x13, cnt2 = x14,
+ tmp1 = x28, tmp2 = x29, tmp3 = x30, tmp4 = x12;
// cnt2 == amount of characters left to compare
// Check already loaded first 4 symbols
@@ -2354,77 +2349,81 @@ class StubGenerator: public StubCodeGenerator {
__ mv(isLU ? tmp1 : tmp2, tmp3);
__ addi(str1, str1, isLU ? wordSize / 2 : wordSize);
__ addi(str2, str2, isLU ? wordSize : wordSize / 2);
- __ sub(cnt2, cnt2, 8); // Already loaded 4 symbols. Last 4 is special case.
- __ push_reg(spilled_regs, sp);
+ __ sub(cnt2, cnt2, wordSize / 2); // Already loaded 4 symbols
- if (isLU) {
- __ add(str1, str1, cnt2);
- __ shadd(str2, cnt2, str2, t0, 1);
- } else {
- __ shadd(str1, cnt2, str1, t0, 1);
- __ add(str2, str2, cnt2);
- }
__ xorr(tmp3, tmp1, tmp2);
- __ mv(tmp5, tmp2);
__ bnez(tmp3, CALCULATE_DIFFERENCE);
Register strU = isLU ? str2 : str1,
strL = isLU ? str1 : str2,
- tmpU = isLU ? tmp5 : tmp1, // where to keep U for comparison
- tmpL = isLU ? tmp1 : tmp5; // where to keep L for comparison
+ tmpU = isLU ? tmp2 : tmp1, // where to keep U for comparison
+ tmpL = isLU ? tmp1 : tmp2; // where to keep L for comparison
- __ sub(tmp2, strL, cnt2); // strL pointer to load from
- __ slli(t0, cnt2, 1);
- __ sub(cnt1, strU, t0); // strU pointer to load from
+ // make sure main loop is 8 byte-aligned, we should load another 4 bytes from strL
+ // cnt2 is >= 68 here, no need to check it for >= 0
+ __ lwu(tmpL, Address(strL));
+ __ addi(strL, strL, wordSize / 2);
+ __ ld(tmpU, Address(strU));
+ __ addi(strU, strU, wordSize);
+ __ inflate_lo32(tmp3, tmpL);
+ __ mv(tmpL, tmp3);
+ __ xorr(tmp3, tmpU, tmpL);
+ __ bnez(tmp3, CALCULATE_DIFFERENCE);
+ __ addi(cnt2, cnt2, -wordSize / 2);
- __ ld(tmp4, Address(cnt1));
- __ addi(cnt1, cnt1, 8);
- __ beqz(cnt2, LOAD_LAST); // no characters left except last load
- __ sub(cnt2, cnt2, 16);
+ // we are now 8-bytes aligned on strL
+ __ sub(cnt2, cnt2, wordSize * 2);
__ bltz(cnt2, TAIL);
__ bind(SMALL_LOOP); // smaller loop
- __ sub(cnt2, cnt2, 16);
- compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2);
- compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2);
+ __ sub(cnt2, cnt2, wordSize * 2);
+ compare_string_8_x_LU(tmpL, tmpU, strL, strU, CALCULATE_DIFFERENCE);
+ compare_string_8_x_LU(tmpL, tmpU, strL, strU, CALCULATE_DIFFERENCE);
__ bgez(cnt2, SMALL_LOOP);
- __ addi(t0, cnt2, 16);
- __ beqz(t0, LOAD_LAST);
- __ bind(TAIL); // 1..15 characters left until last load (last 4 characters)
- // Address of 8 bytes before last 4 characters in UTF-16 string
- __ shadd(cnt1, cnt2, cnt1, t0, 1);
- // Address of 16 bytes before last 4 characters in Latin1 string
- __ add(tmp2, tmp2, cnt2);
- __ ld(tmp4, Address(cnt1, -8));
- // last 16 characters before last load
- compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2);
- compare_string_8_x_LU(tmpL, tmpU, DIFF1, DIFF2);
- __ j(LOAD_LAST);
- __ bind(DIFF2);
- __ mv(tmpU, tmp4);
- __ bind(DIFF1);
- __ mv(tmpL, t0);
- __ j(CALCULATE_DIFFERENCE);
- __ bind(LOAD_LAST);
- // Last 4 UTF-16 characters are already pre-loaded into tmp4 by compare_string_8_x_LU.
- // No need to load it again
- __ mv(tmpU, tmp4);
- __ ld(tmpL, Address(strL));
+ __ addi(t0, cnt2, wordSize * 2);
+ __ beqz(t0, DONE);
+ __ bind(TAIL); // 1..15 characters left
+ // Aligned access. Load bytes in portions - 4, 2, 1.
+
+ __ addi(t0, cnt2, wordSize);
+ __ addi(cnt2, cnt2, wordSize * 2); // amount of characters left to process
+ __ bltz(t0, LOAD_LAST);
+ // remaining characters are greater than or equals to 8, we can do one compare_string_8_x_LU
+ compare_string_8_x_LU(tmpL, tmpU, strL, strU, CALCULATE_DIFFERENCE);
+ __ addi(cnt2, cnt2, -wordSize);
+ __ beqz(cnt2, DONE); // no character left
+ __ bind(LOAD_LAST); // cnt2 = 1..7 characters left
+
+ __ addi(cnt2, cnt2, -wordSize); // cnt2 is now an offset in strL which points to last 8 bytes
+ __ slli(t0, cnt2, 1); // t0 is now an offset in strU which points to last 16 bytes
+ __ add(strL, strL, cnt2); // Address of last 8 bytes in Latin1 string
+ __ add(strU, strU, t0); // Address of last 16 bytes in UTF-16 string
+ __ load_int_misaligned(tmpL, Address(strL), t0, false);
+ __ load_long_misaligned(tmpU, Address(strU), t0, 2);
__ inflate_lo32(tmp3, tmpL);
__ mv(tmpL, tmp3);
__ xorr(tmp3, tmpU, tmpL);
- __ beqz(tmp3, DONE);
+ __ bnez(tmp3, CALCULATE_DIFFERENCE);
+
+ __ addi(strL, strL, wordSize / 2); // Address of last 4 bytes in Latin1 string
+ __ addi(strU, strU, wordSize); // Address of last 8 bytes in UTF-16 string
+ __ load_int_misaligned(tmpL, Address(strL), t0, false);
+ __ load_long_misaligned(tmpU, Address(strU), t0, 2);
+ __ inflate_lo32(tmp3, tmpL);
+ __ mv(tmpL, tmp3);
+ __ xorr(tmp3, tmpU, tmpL);
+ __ bnez(tmp3, CALCULATE_DIFFERENCE);
+ __ j(DONE); // no character left
// Find the first different characters in the longwords and
// compute their difference.
__ bind(CALCULATE_DIFFERENCE);
__ ctzc_bit(tmp4, tmp3);
__ srl(tmp1, tmp1, tmp4);
- __ srl(tmp5, tmp5, tmp4);
+ __ srl(tmp2, tmp2, tmp4);
__ andi(tmp1, tmp1, 0xFFFF);
- __ andi(tmp5, tmp5, 0xFFFF);
- __ sub(result, tmp1, tmp5);
+ __ andi(tmp2, tmp2, 0xFFFF);
+ __ sub(result, tmp1, tmp2);
__ bind(DONE);
- __ pop_reg(spilled_regs, sp);
__ ret();
return entry;
}
@@ -2537,9 +2536,9 @@ class StubGenerator: public StubCodeGenerator {
__ xorr(tmp4, tmp1, tmp2);
__ bnez(tmp4, DIFF);
__ add(str1, str1, cnt2);
- __ ld(tmp5, Address(str1));
+ __ load_long_misaligned(tmp5, Address(str1), tmp3, isLL ? 1 : 2);
__ add(str2, str2, cnt2);
- __ ld(cnt1, Address(str2));
+ __ load_long_misaligned(cnt1, Address(str2), tmp3, isLL ? 1 : 2);
__ xorr(tmp4, tmp5, cnt1);
__ beqz(tmp4, LENGTH_DIFF);
// Find the first different characters in the longwords and
@@ -3950,6 +3949,375 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+ // Set of L registers that correspond to a contiguous memory area.
+ // Each 64-bit register typically corresponds to 2 32-bit integers.
+ template
+ class RegCache {
+ private:
+ MacroAssembler *_masm;
+ Register _regs[L];
+
+ public:
+ RegCache(MacroAssembler *masm, RegSet rs): _masm(masm) {
+ assert(rs.size() == L, "%u registers are used to cache %u 4-byte data", rs.size(), 2 * L);
+ auto it = rs.begin();
+ for (auto &r: _regs) {
+ r = *it;
+ ++it;
+ }
+ }
+
+ // generate load for the i'th register
+ void gen_load(uint i, Register base) {
+ assert(i < L, "invalid i: %u", i);
+ __ ld(_regs[i], Address(base, 8 * i));
+ }
+
+ // add i'th 32-bit integer to dest
+ void add_u32(const Register dest, uint i, const Register rtmp = t0) {
+ assert(i < 2 * L, "invalid i: %u", i);
+
+ if (is_even(i)) {
+ // Use the bottom 32 bits. No need to mask off the top 32 bits
+ // as addw will do the right thing.
+ __ addw(dest, dest, _regs[i / 2]);
+ } else {
+ // Use the top 32 bits by right-shifting them.
+ __ srli(rtmp, _regs[i / 2], 32);
+ __ addw(dest, dest, rtmp);
+ }
+ }
+ };
+
+ typedef RegCache<8> BufRegCache;
+
+ // a += value + x + ac;
+ // a = Integer.rotateLeft(a, s) + b;
+ void m5_FF_GG_HH_II_epilogue(BufRegCache& reg_cache,
+ Register a, Register b, Register c, Register d,
+ int k, int s, int t,
+ Register value) {
+ // a += ac
+ __ addw(a, a, t, t1);
+
+ // a += x;
+ reg_cache.add_u32(a, k);
+ // a += value;
+ __ addw(a, a, value);
+
+ // a = Integer.rotateLeft(a, s) + b;
+ __ rolw_imm(a, a, s);
+ __ addw(a, a, b);
+ }
+
+ // a += ((b & c) | ((~b) & d)) + x + ac;
+ // a = Integer.rotateLeft(a, s) + b;
+ void md5_FF(BufRegCache& reg_cache,
+ Register a, Register b, Register c, Register d,
+ int k, int s, int t,
+ Register rtmp1, Register rtmp2) {
+ // rtmp1 = b & c
+ __ andr(rtmp1, b, c);
+
+ // rtmp2 = (~b) & d
+ __ andn(rtmp2, d, b);
+
+ // rtmp1 = (b & c) | ((~b) & d)
+ __ orr(rtmp1, rtmp1, rtmp2);
+
+ m5_FF_GG_HH_II_epilogue(reg_cache, a, b, c, d, k, s, t, rtmp1);
+ }
+
+ // a += ((b & d) | (c & (~d))) + x + ac;
+ // a = Integer.rotateLeft(a, s) + b;
+ void md5_GG(BufRegCache& reg_cache,
+ Register a, Register b, Register c, Register d,
+ int k, int s, int t,
+ Register rtmp1, Register rtmp2) {
+ // rtmp1 = b & d
+ __ andr(rtmp1, b, d);
+
+ // rtmp2 = c & (~d)
+ __ andn(rtmp2, c, d);
+
+ // rtmp1 = (b & d) | (c & (~d))
+ __ orr(rtmp1, rtmp1, rtmp2);
+
+ m5_FF_GG_HH_II_epilogue(reg_cache, a, b, c, d, k, s, t, rtmp1);
+ }
+
+ // a += ((b ^ c) ^ d) + x + ac;
+ // a = Integer.rotateLeft(a, s) + b;
+ void md5_HH(BufRegCache& reg_cache,
+ Register a, Register b, Register c, Register d,
+ int k, int s, int t,
+ Register rtmp1, Register rtmp2) {
+ // rtmp1 = (b ^ c) ^ d
+ __ xorr(rtmp2, b, c);
+ __ xorr(rtmp1, rtmp2, d);
+
+ m5_FF_GG_HH_II_epilogue(reg_cache, a, b, c, d, k, s, t, rtmp1);
+ }
+
+ // a += (c ^ (b | (~d))) + x + ac;
+ // a = Integer.rotateLeft(a, s) + b;
+ void md5_II(BufRegCache& reg_cache,
+ Register a, Register b, Register c, Register d,
+ int k, int s, int t,
+ Register rtmp1, Register rtmp2) {
+ // rtmp1 = c ^ (b | (~d))
+ __ orn(rtmp2, b, d);
+ __ xorr(rtmp1, c, rtmp2);
+
+ m5_FF_GG_HH_II_epilogue(reg_cache, a, b, c, d, k, s, t, rtmp1);
+ }
+
+ // Arguments:
+ //
+ // Inputs:
+ // c_rarg0 - byte[] source+offset
+ // c_rarg1 - int[] SHA.state
+ // c_rarg2 - int offset (multi_block == True)
+ // c_rarg3 - int limit (multi_block == True)
+ //
+ // Registers:
+ // x0 zero (zero)
+ // x1 ra (return address)
+ // x2 sp (stack pointer)
+ // x3 gp (global pointer)
+ // x4 tp (thread pointer)
+ // x5 t0 (tmp register)
+ // x6 t1 (tmp register)
+ // x7 t2 state0
+ // x8 f0/s0 (frame pointer)
+ // x9 s1
+ // x10 a0 rtmp1 / c_rarg0
+ // x11 a1 rtmp2 / c_rarg1
+ // x12 a2 a / c_rarg2
+ // x13 a3 b / c_rarg3
+ // x14 a4 c
+ // x15 a5 d
+ // x16 a6 buf
+ // x17 a7 state
+ // x18 s2 ofs [saved-reg] (multi_block == True)
+ // x19 s3 limit [saved-reg] (multi_block == True)
+ // x20 s4 state1 [saved-reg]
+ // x21 s5 state2 [saved-reg]
+ // x22 s6 state3 [saved-reg]
+ // x23 s7
+ // x24 s8 buf0 [saved-reg]
+ // x25 s9 buf1 [saved-reg]
+ // x26 s10 buf2 [saved-reg]
+ // x27 s11 buf3 [saved-reg]
+ // x28 t3 buf4
+ // x29 t4 buf5
+ // x30 t5 buf6
+ // x31 t6 buf7
+ address generate_md5_implCompress(bool multi_block, const char *name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+ address start = __ pc();
+
+ // rotation constants
+ const int S11 = 7;
+ const int S12 = 12;
+ const int S13 = 17;
+ const int S14 = 22;
+ const int S21 = 5;
+ const int S22 = 9;
+ const int S23 = 14;
+ const int S24 = 20;
+ const int S31 = 4;
+ const int S32 = 11;
+ const int S33 = 16;
+ const int S34 = 23;
+ const int S41 = 6;
+ const int S42 = 10;
+ const int S43 = 15;
+ const int S44 = 21;
+
+ const int64_t mask32 = 0xffffffff;
+
+ Register buf_arg = c_rarg0; // a0
+ Register state_arg = c_rarg1; // a1
+ Register ofs_arg = c_rarg2; // a2
+ Register limit_arg = c_rarg3; // a3
+
+ // we'll copy the args to these registers to free up a0-a3
+ // to use for other values manipulated by instructions
+ // that can be compressed
+ Register buf = x16; // a6
+ Register state = x17; // a7
+ Register ofs = x18; // s2
+ Register limit = x19; // s3
+
+ // using x12->15 to allow compressed instructions
+ Register a = x12; // a2
+ Register b = x13; // a3
+ Register c = x14; // a4
+ Register d = x15; // a5
+
+ Register state0 = x7; // t2
+ Register state1 = x20; // s4
+ Register state2 = x21; // s5
+ Register state3 = x22; // s6
+
+ // using x10->x11 to allow compressed instructions
+ Register rtmp1 = x10; // a0
+ Register rtmp2 = x11; // a1
+
+ RegSet reg_cache_saved_regs = RegSet::of(x24, x25, x26, x27); // s8, s9, s10, s11
+ RegSet reg_cache_regs;
+ reg_cache_regs += reg_cache_saved_regs;
+ reg_cache_regs += RegSet::of(x28, x29, x30, x31); // t3, t4, t5, t6
+ BufRegCache reg_cache(_masm, reg_cache_regs);
+
+ RegSet saved_regs;
+ if (multi_block) {
+ saved_regs += RegSet::of(ofs, limit);
+ }
+ saved_regs += RegSet::of(state1, state2, state3);
+ saved_regs += reg_cache_saved_regs;
+
+ __ push_reg(saved_regs, sp);
+
+ __ mv(buf, buf_arg);
+ __ mv(state, state_arg);
+ if (multi_block) {
+ __ mv(ofs, ofs_arg);
+ __ mv(limit, limit_arg);
+ }
+
+ // to minimize the number of memory operations:
+ // read the 4 state 4-byte values in pairs, with a single ld,
+ // and split them into 2 registers
+ __ mv(t0, mask32);
+ __ ld(state0, Address(state));
+ __ srli(state1, state0, 32);
+ __ andr(state0, state0, t0);
+ __ ld(state2, Address(state, 8));
+ __ srli(state3, state2, 32);
+ __ andr(state2, state2, t0);
+
+ Label md5_loop;
+ __ BIND(md5_loop);
+
+ __ mv(a, state0);
+ __ mv(b, state1);
+ __ mv(c, state2);
+ __ mv(d, state3);
+
+ // Round 1
+ reg_cache.gen_load(0, buf);
+ md5_FF(reg_cache, a, b, c, d, 0, S11, 0xd76aa478, rtmp1, rtmp2);
+ md5_FF(reg_cache, d, a, b, c, 1, S12, 0xe8c7b756, rtmp1, rtmp2);
+ reg_cache.gen_load(1, buf);
+ md5_FF(reg_cache, c, d, a, b, 2, S13, 0x242070db, rtmp1, rtmp2);
+ md5_FF(reg_cache, b, c, d, a, 3, S14, 0xc1bdceee, rtmp1, rtmp2);
+ reg_cache.gen_load(2, buf);
+ md5_FF(reg_cache, a, b, c, d, 4, S11, 0xf57c0faf, rtmp1, rtmp2);
+ md5_FF(reg_cache, d, a, b, c, 5, S12, 0x4787c62a, rtmp1, rtmp2);
+ reg_cache.gen_load(3, buf);
+ md5_FF(reg_cache, c, d, a, b, 6, S13, 0xa8304613, rtmp1, rtmp2);
+ md5_FF(reg_cache, b, c, d, a, 7, S14, 0xfd469501, rtmp1, rtmp2);
+ reg_cache.gen_load(4, buf);
+ md5_FF(reg_cache, a, b, c, d, 8, S11, 0x698098d8, rtmp1, rtmp2);
+ md5_FF(reg_cache, d, a, b, c, 9, S12, 0x8b44f7af, rtmp1, rtmp2);
+ reg_cache.gen_load(5, buf);
+ md5_FF(reg_cache, c, d, a, b, 10, S13, 0xffff5bb1, rtmp1, rtmp2);
+ md5_FF(reg_cache, b, c, d, a, 11, S14, 0x895cd7be, rtmp1, rtmp2);
+ reg_cache.gen_load(6, buf);
+ md5_FF(reg_cache, a, b, c, d, 12, S11, 0x6b901122, rtmp1, rtmp2);
+ md5_FF(reg_cache, d, a, b, c, 13, S12, 0xfd987193, rtmp1, rtmp2);
+ reg_cache.gen_load(7, buf);
+ md5_FF(reg_cache, c, d, a, b, 14, S13, 0xa679438e, rtmp1, rtmp2);
+ md5_FF(reg_cache, b, c, d, a, 15, S14, 0x49b40821, rtmp1, rtmp2);
+
+ // Round 2
+ md5_GG(reg_cache, a, b, c, d, 1, S21, 0xf61e2562, rtmp1, rtmp2);
+ md5_GG(reg_cache, d, a, b, c, 6, S22, 0xc040b340, rtmp1, rtmp2);
+ md5_GG(reg_cache, c, d, a, b, 11, S23, 0x265e5a51, rtmp1, rtmp2);
+ md5_GG(reg_cache, b, c, d, a, 0, S24, 0xe9b6c7aa, rtmp1, rtmp2);
+ md5_GG(reg_cache, a, b, c, d, 5, S21, 0xd62f105d, rtmp1, rtmp2);
+ md5_GG(reg_cache, d, a, b, c, 10, S22, 0x02441453, rtmp1, rtmp2);
+ md5_GG(reg_cache, c, d, a, b, 15, S23, 0xd8a1e681, rtmp1, rtmp2);
+ md5_GG(reg_cache, b, c, d, a, 4, S24, 0xe7d3fbc8, rtmp1, rtmp2);
+ md5_GG(reg_cache, a, b, c, d, 9, S21, 0x21e1cde6, rtmp1, rtmp2);
+ md5_GG(reg_cache, d, a, b, c, 14, S22, 0xc33707d6, rtmp1, rtmp2);
+ md5_GG(reg_cache, c, d, a, b, 3, S23, 0xf4d50d87, rtmp1, rtmp2);
+ md5_GG(reg_cache, b, c, d, a, 8, S24, 0x455a14ed, rtmp1, rtmp2);
+ md5_GG(reg_cache, a, b, c, d, 13, S21, 0xa9e3e905, rtmp1, rtmp2);
+ md5_GG(reg_cache, d, a, b, c, 2, S22, 0xfcefa3f8, rtmp1, rtmp2);
+ md5_GG(reg_cache, c, d, a, b, 7, S23, 0x676f02d9, rtmp1, rtmp2);
+ md5_GG(reg_cache, b, c, d, a, 12, S24, 0x8d2a4c8a, rtmp1, rtmp2);
+
+ // Round 3
+ md5_HH(reg_cache, a, b, c, d, 5, S31, 0xfffa3942, rtmp1, rtmp2);
+ md5_HH(reg_cache, d, a, b, c, 8, S32, 0x8771f681, rtmp1, rtmp2);
+ md5_HH(reg_cache, c, d, a, b, 11, S33, 0x6d9d6122, rtmp1, rtmp2);
+ md5_HH(reg_cache, b, c, d, a, 14, S34, 0xfde5380c, rtmp1, rtmp2);
+ md5_HH(reg_cache, a, b, c, d, 1, S31, 0xa4beea44, rtmp1, rtmp2);
+ md5_HH(reg_cache, d, a, b, c, 4, S32, 0x4bdecfa9, rtmp1, rtmp2);
+ md5_HH(reg_cache, c, d, a, b, 7, S33, 0xf6bb4b60, rtmp1, rtmp2);
+ md5_HH(reg_cache, b, c, d, a, 10, S34, 0xbebfbc70, rtmp1, rtmp2);
+ md5_HH(reg_cache, a, b, c, d, 13, S31, 0x289b7ec6, rtmp1, rtmp2);
+ md5_HH(reg_cache, d, a, b, c, 0, S32, 0xeaa127fa, rtmp1, rtmp2);
+ md5_HH(reg_cache, c, d, a, b, 3, S33, 0xd4ef3085, rtmp1, rtmp2);
+ md5_HH(reg_cache, b, c, d, a, 6, S34, 0x04881d05, rtmp1, rtmp2);
+ md5_HH(reg_cache, a, b, c, d, 9, S31, 0xd9d4d039, rtmp1, rtmp2);
+ md5_HH(reg_cache, d, a, b, c, 12, S32, 0xe6db99e5, rtmp1, rtmp2);
+ md5_HH(reg_cache, c, d, a, b, 15, S33, 0x1fa27cf8, rtmp1, rtmp2);
+ md5_HH(reg_cache, b, c, d, a, 2, S34, 0xc4ac5665, rtmp1, rtmp2);
+
+ // Round 4
+ md5_II(reg_cache, a, b, c, d, 0, S41, 0xf4292244, rtmp1, rtmp2);
+ md5_II(reg_cache, d, a, b, c, 7, S42, 0x432aff97, rtmp1, rtmp2);
+ md5_II(reg_cache, c, d, a, b, 14, S43, 0xab9423a7, rtmp1, rtmp2);
+ md5_II(reg_cache, b, c, d, a, 5, S44, 0xfc93a039, rtmp1, rtmp2);
+ md5_II(reg_cache, a, b, c, d, 12, S41, 0x655b59c3, rtmp1, rtmp2);
+ md5_II(reg_cache, d, a, b, c, 3, S42, 0x8f0ccc92, rtmp1, rtmp2);
+ md5_II(reg_cache, c, d, a, b, 10, S43, 0xffeff47d, rtmp1, rtmp2);
+ md5_II(reg_cache, b, c, d, a, 1, S44, 0x85845dd1, rtmp1, rtmp2);
+ md5_II(reg_cache, a, b, c, d, 8, S41, 0x6fa87e4f, rtmp1, rtmp2);
+ md5_II(reg_cache, d, a, b, c, 15, S42, 0xfe2ce6e0, rtmp1, rtmp2);
+ md5_II(reg_cache, c, d, a, b, 6, S43, 0xa3014314, rtmp1, rtmp2);
+ md5_II(reg_cache, b, c, d, a, 13, S44, 0x4e0811a1, rtmp1, rtmp2);
+ md5_II(reg_cache, a, b, c, d, 4, S41, 0xf7537e82, rtmp1, rtmp2);
+ md5_II(reg_cache, d, a, b, c, 11, S42, 0xbd3af235, rtmp1, rtmp2);
+ md5_II(reg_cache, c, d, a, b, 2, S43, 0x2ad7d2bb, rtmp1, rtmp2);
+ md5_II(reg_cache, b, c, d, a, 9, S44, 0xeb86d391, rtmp1, rtmp2);
+
+ __ addw(state0, state0, a);
+ __ addw(state1, state1, b);
+ __ addw(state2, state2, c);
+ __ addw(state3, state3, d);
+
+ if (multi_block) {
+ __ addi(buf, buf, 64);
+ __ addi(ofs, ofs, 64);
+ // if (ofs <= limit) goto m5_loop
+ __ bge(limit, ofs, md5_loop);
+ __ mv(c_rarg0, ofs); // return ofs
+ }
+
+ // to minimize the number of memory operations:
+ // write back the 4 state 4-byte values in pairs, with a single sd
+ __ mv(t0, mask32);
+ __ andr(state0, state0, t0);
+ __ slli(state1, state1, 32);
+ __ orr(state0, state0, state1);
+ __ sd(state0, Address(state));
+ __ andr(state2, state2, t0);
+ __ slli(state3, state3, 32);
+ __ orr(state2, state2, state3);
+ __ sd(state2, Address(state, 8));
+
+ __ pop_reg(saved_regs, sp);
+ __ ret();
+
+ return (address) start;
+ }
+
#if INCLUDE_JFR
static void jfr_prologue(address the_pc, MacroAssembler* _masm, Register thread) {
@@ -4164,6 +4532,11 @@ class StubGenerator: public StubCodeGenerator {
generate_compare_long_strings();
generate_string_indexof_stubs();
+
+ if (UseMD5Intrinsics) {
+ StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress");
+ StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB");
+ }
#endif // COMPILER2_OR_JVMCI
}
diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
index 6c9174da53b16..1bcc761d06f1e 100644
--- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
@@ -600,7 +600,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// monitor entry size: see picture of stack set
// (generate_method_entry) and frame_amd64.hpp
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
// total overhead size: entry_size + (saved fp through expr stack
// bottom). be sure to change this if you add/subtract anything
@@ -673,7 +673,7 @@ void TemplateInterpreterGenerator::lock_method() {
// synchronize method
const Address access_flags(xmethod, Method::access_flags_offset());
const Address monitor_block_top(fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
#ifdef ASSERT
__ lwu(x10, access_flags);
diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
index b8d176d0d4503..22e2531a83ad0 100644
--- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
@@ -3739,7 +3739,7 @@ void TemplateTable::monitorenter() {
fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
fp, frame::interpreter_frame_initial_sp_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
Label allocated;
@@ -3837,7 +3837,7 @@ void TemplateTable::monitorexit() {
fp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
fp, frame::interpreter_frame_initial_sp_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
Label found;
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
index ddddb6df3bcee..4ad0b16b6236d 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
@@ -171,9 +171,13 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
}
- if (UseMD5Intrinsics) {
- warning("MD5 intrinsics are not available on this CPU.");
- FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
+ if (UseVectorizedMismatchIntrinsic) {
+ warning("VectorizedMismatch intrinsic is not available on this CPU.");
+ FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
+ }
+
+ if (FLAG_IS_DEFAULT(UseMD5Intrinsics)) {
+ FLAG_SET_DEFAULT(UseMD5Intrinsics, true);
}
if (UseRVV) {
@@ -261,8 +265,8 @@ void VM_Version::c2_initialize() {
if (MaxVectorSize > _initial_vector_length) {
warning("Current system only supports max RVV vector length %d. Set MaxVectorSize to %d",
_initial_vector_length, _initial_vector_length);
+ MaxVectorSize = _initial_vector_length;
}
- MaxVectorSize = _initial_vector_length;
} else {
vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d", (int)MaxVectorSize));
}
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.hpp b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
index 6d9ea5c2ac33a..93ebd9e4e7dc3 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.hpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
@@ -201,6 +201,9 @@ class VM_Version : public Abstract_VM_Version {
constexpr static bool supports_stack_watermark_barrier() { return true; }
static bool supports_on_spin_wait() { return UseZihintpause; }
+
+ // RISCV64 supports fast class initialization checks
+ static bool supports_fast_class_init_checks() { return true; }
};
#endif // CPU_RISCV_VM_VERSION_RISCV_HPP
diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp
index 5762161662dbc..0df4d10d47d32 100644
--- a/src/hotspot/cpu/s390/assembler_s390.hpp
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp
@@ -140,7 +140,8 @@ class RelAddr {
if ((target == nullptr) || (target == pc)) {
return 0; // Yet unknown branch destination.
} else {
- guarantee(is_in_range_of_RelAddr(target, pc, shortForm), "target not within reach");
+ guarantee(is_in_range_of_RelAddr(target, pc, shortForm),
+ "target not within reach at " INTPTR_FORMAT ", distance = " INTX_FORMAT, p2i(pc), (target - pc) );
return (int)((target - pc)>>1);
}
}
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
index 02b1e730c59e8..40edca6559aa4 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -83,60 +83,63 @@ void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
if (breakAtEntry) z_illtrap(0xC1);
}
-void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
+void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
const int hdr_offset = oopDesc::mark_offset_in_bytes();
- assert_different_registers(hdr, obj, disp_hdr);
- verify_oop(obj, FILE_AND_LINE);
+ const Register tmp = Z_R1_scratch;
+
+ assert_different_registers(Rmark, Roop, Rbox, tmp);
+
+ verify_oop(Roop, FILE_AND_LINE);
// Load object header.
- z_lg(hdr, Address(obj, hdr_offset));
+ z_lg(Rmark, Address(Roop, hdr_offset));
// Save object being locked into the BasicObjectLock...
- z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
+ z_stg(Roop, Address(Rbox, BasicObjectLock::obj_offset()));
if (DiagnoseSyncOnValueBasedClasses != 0) {
- load_klass(Z_R1_scratch, obj);
- testbit(Address(Z_R1_scratch, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
- z_btrue(slow_case);
+ load_klass(tmp, Roop);
+ testbit(Address(tmp, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
+ branch_optimized(Assembler::bcondAllOne, slow_case);
}
assert(LockingMode != LM_MONITOR, "LM_MONITOR is already handled, by emit_lock()");
if (LockingMode == LM_LIGHTWEIGHT) {
- Unimplemented();
+ lightweight_lock(Roop, Rmark, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
NearLabel done;
// and mark it as unlocked.
- z_oill(hdr, markWord::unlocked_value);
+ z_oill(Rmark, markWord::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
- z_stg(hdr, Address(disp_hdr, (intptr_t) 0));
+ z_stg(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
// Test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header. If it is not the same, get the
// object header instead.
- z_csg(hdr, disp_hdr, hdr_offset, obj);
+ z_csg(Rmark, Rbox, hdr_offset, Roop);
// If the object header was the same, we're done.
branch_optimized(Assembler::bcondEqual, done);
- // If the object header was not the same, it is now in the hdr register.
+ // If the object header was not the same, it is now in the Rmark register.
// => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
- // 1) (hdr & markWord::lock_mask_in_place) == 0
- // 2) rsp <= hdr
- // 3) hdr <= rsp + page_size
+ // 1) (Rmark & markWord::lock_mask_in_place) == 0
+ // 2) rsp <= Rmark
+ // 3) Rmark <= rsp + page_size
//
// These 3 tests can be done by evaluating the following expression:
//
- // (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place)
+ // (Rmark - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
- z_sgr(hdr, Z_SP);
+ z_sgr(Rmark, Z_SP);
load_const_optimized(Z_R0_scratch, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place));
- z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
+ z_ngr(Rmark, Z_R0_scratch); // AND sets CC (result eq/ne 0).
// For recursive locking, the result is zero. => Save it in the displaced header
- // location (null in the displaced hdr location indicates recursive locking).
- z_stg(hdr, Address(disp_hdr, (intptr_t) 0));
+ // location (null in the displaced Rmark location indicates recursive locking).
+ z_stg(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
// Otherwise we don't care about the result and handle locking via runtime call.
branch_optimized(Assembler::bcondNotZero, slow_case);
// done
@@ -144,35 +147,41 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
}
}
-void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
- const int aligned_mask = BytesPerWord -1;
+void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
const int hdr_offset = oopDesc::mark_offset_in_bytes();
- assert_different_registers(hdr, obj, disp_hdr);
+
+ assert_different_registers(Rmark, Roop, Rbox);
+
NearLabel done;
if (LockingMode != LM_LIGHTWEIGHT) {
// Load displaced header.
- z_ltg(hdr, Address(disp_hdr, (intptr_t) 0));
- // If the loaded hdr is null we had recursive locking, and we are done.
+ z_ltg(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
+ // If the loaded Rmark is null we had recursive locking, and we are done.
z_bre(done);
}
// Load object.
- z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
- verify_oop(obj, FILE_AND_LINE);
+ z_lg(Roop, Address(Rbox, BasicObjectLock::obj_offset()));
+ verify_oop(Roop, FILE_AND_LINE);
if (LockingMode == LM_LIGHTWEIGHT) {
- Unimplemented();
- } else {
+ const Register tmp = Z_R1_scratch;
+ z_lg(Rmark, Address(Roop, hdr_offset));
+ z_lgr(tmp, Rmark);
+ z_nill(tmp, markWord::monitor_value);
+ branch_optimized(Assembler::bcondNotZero, slow_case);
+ lightweight_unlock(Roop, Rmark, tmp, slow_case);
+ } else if (LockingMode == LM_LEGACY) {
// Test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object. If the object header is not pointing to
// the displaced header, get the object header instead.
- z_csg(disp_hdr, hdr, hdr_offset, obj);
+ z_csg(Rbox, Rmark, hdr_offset, Roop);
// If the object header was not pointing to the displaced header,
// we do unlocking via runtime call.
branch_optimized(Assembler::bcondNotEqual, slow_case);
- // done
}
+ // done
bind(done);
}
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
index 1ff914b7b71d5..7a4f76af1546e 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,17 +41,18 @@
void initialize_body(Register objectFields, Register len_in_bytes, Register Rzero);
// locking
- // hdr : Used to hold locked markWord to be CASed into obj, contents destroyed.
- // obj : Must point to the object to lock, contents preserved.
- // disp_hdr: Must point to the displaced header location, contents preserved.
- // Returns code offset at which to add null check debug information.
- void lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
+ // Rmark : Used to hold locked markWord to be CASed into obj, contents destroyed.
+ // Roop : Must point to the object to lock, contents preserved.
+ // Rbox : Must point to the displaced header location, contents preserved.
+ // Z_R1_scratch : Used as temp and will be killed
+ void lock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case);
// unlocking
- // hdr : Used to hold original markWord to be CASed back into obj, contents destroyed.
- // obj : Must point to the object to lock, contents preserved.
- // disp_hdr: Must point to the displaced header location, contents destroyed.
- void unlock_object(Register hdr, Register obj, Register lock, Label& slow_case);
+ // Rmark : Used to hold original markWord to be CASed back into obj, contents destroyed.
+ // Roop : Must point to the object to lock, contents preserved.
+ // Rbox : Must point to the displaced header location, contents destroyed.
+ // Z_R1_scratch : Used as temp and will be killed
+ void unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case);
void initialize_object(
Register obj, // result: Pointer to object after successful allocation.
diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
index 28acb398c1faa..257148827be4e 100644
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
@@ -233,6 +233,12 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// Other registers used in this stub.
const Register handler_addr = Z_R4;
+ if (AbortVMOnException) {
+ save_live_registers(sasm);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), Z_EXC_OOP);
+ restore_live_registers(sasm);
+ }
+
// Verify that only exception_oop, is valid at this time.
__ invalidate_registers(Z_EXC_OOP, Z_EXC_PC);
diff --git a/src/hotspot/cpu/s390/downcallLinker_s390.cpp b/src/hotspot/cpu/s390/downcallLinker_s390.cpp
index baee7d7a043d7..f831da9075599 100644
--- a/src/hotspot/cpu/s390/downcallLinker_s390.cpp
+++ b/src/hotspot/cpu/s390/downcallLinker_s390.cpp
@@ -23,8 +23,76 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/codeBlob.hpp"
+#include "code/codeCache.hpp"
+#include "code/vmreg.inline.hpp"
+#include "compiler/oopMap.hpp"
+#include "logging/logStream.hpp"
+#include "memory/resourceArea.hpp"
#include "prims/downcallLinker.hpp"
-#include "utilities/debug.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+
+#define __ _masm->
+
+class DowncallStubGenerator : public StubCodeGenerator {
+ BasicType* _signature;
+ int _num_args;
+ BasicType _ret_bt;
+ const ABIDescriptor& _abi;
+
+ const GrowableArray& _input_registers;
+ const GrowableArray& _output_registers;
+
+ bool _needs_return_buffer;
+ int _captured_state_mask;
+ bool _needs_transition;
+
+ int _frame_complete;
+ int _frame_size_slots;
+ OopMapSet* _oop_maps;
+ public:
+ DowncallStubGenerator(CodeBuffer* buffer,
+ BasicType* signature,
+ int num_args,
+ BasicType ret_bt,
+ const ABIDescriptor& abi,
+ const GrowableArray& input_registers,
+ const GrowableArray& output_registers,
+ bool needs_return_buffer,
+ int captured_state_mask,
+ bool needs_transition)
+ :StubCodeGenerator(buffer, PrintMethodHandleStubs),
+ _signature(signature),
+ _num_args(num_args),
+ _ret_bt(ret_bt),
+ _abi(abi),
+ _input_registers(input_registers),
+ _output_registers(output_registers),
+ _needs_return_buffer(needs_return_buffer),
+ _captured_state_mask(captured_state_mask),
+ _needs_transition(needs_transition),
+ _frame_complete(0),
+ _frame_size_slots(0),
+ _oop_maps(nullptr) {
+ }
+ void generate();
+ int frame_complete() const {
+ return _frame_complete;
+ }
+
+ int framesize() const {
+ return (_frame_size_slots >> (LogBytesPerWord - LogBytesPerInt));
+ }
+
+ OopMapSet* oop_maps() const {
+ return _oop_maps;
+ }
+};
+
+static const int native_invoker_code_base_size = 512;
+static const int native_invoker_size_per_args = 8;
RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
int num_args,
@@ -35,6 +103,197 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
bool needs_return_buffer,
int captured_state_mask,
bool needs_transition) {
- Unimplemented();
- return nullptr;
+
+ int code_size = native_invoker_code_base_size + (num_args * native_invoker_size_per_args);
+ int locs_size = 1; //must be non zero
+ CodeBuffer code("nep_invoker_blob", code_size, locs_size);
+
+ DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi,
+ input_registers, output_registers,
+ needs_return_buffer, captured_state_mask,
+ needs_transition);
+ g.generate();
+ code.log_section_sizes("nep_invoker_blob");
+
+ RuntimeStub* stub =
+ RuntimeStub::new_runtime_stub("nep_invoker_blob",
+ &code,
+ g.frame_complete(),
+ g.framesize(),
+ g.oop_maps(), false);
+
+#ifndef PRODUCT
+ LogTarget(Trace, foreign, downcall) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ stub->print_on(&ls);
+ }
+#endif
+
+ return stub;
+}
+
+void DowncallStubGenerator::generate() {
+ Register call_target_address = Z_R1_scratch,
+ tmp = Z_R0_scratch;
+
+ VMStorage shuffle_reg = _abi._scratch1;
+
+ JavaCallingConvention in_conv;
+ NativeCallingConvention out_conv(_input_registers);
+ ArgumentShuffle arg_shuffle(_signature, _num_args, _signature, _num_args, &in_conv, &out_conv, shuffle_reg);
+
+#ifndef PRODUCT
+ LogTarget(Trace, foreign, downcall) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ arg_shuffle.print_on(&ls);
+ }
+#endif
+
+ assert(_abi._shadow_space_bytes == frame::z_abi_160_size, "expected space according to ABI");
+ int allocated_frame_size = _abi._shadow_space_bytes;
+ allocated_frame_size += arg_shuffle.out_arg_bytes();
+
+ assert(!_needs_return_buffer, "unexpected needs_return_buffer");
+ RegSpiller out_reg_spiller(_output_registers);
+ int spill_offset = allocated_frame_size;
+ allocated_frame_size += BytesPerWord;
+
+ StubLocations locs;
+ locs.set(StubLocations::TARGET_ADDRESS, _abi._scratch2);
+
+ if (_captured_state_mask != 0) {
+ __ block_comment("{ _captured_state_mask is set");
+ locs.set_frame_data(StubLocations::CAPTURED_STATE_BUFFER, allocated_frame_size);
+ allocated_frame_size += BytesPerWord;
+ __ block_comment("} _captured_state_mask is set");
+ }
+
+ allocated_frame_size = align_up(allocated_frame_size, StackAlignmentInBytes);
+ _frame_size_slots = allocated_frame_size >> LogBytesPerInt;
+
+ _oop_maps = _needs_transition ? new OopMapSet() : nullptr;
+ address start = __ pc();
+
+ __ save_return_pc();
+ __ push_frame(allocated_frame_size, Z_R11); // Create a new frame for the wrapper.
+
+ _frame_complete = __ pc() - start; // frame build complete.
+
+ if (_needs_transition) {
+ __ block_comment("{ thread java2native");
+ __ get_PC(Z_R1_scratch);
+ address the_pc = __ pc();
+ __ set_last_Java_frame(Z_SP, Z_R1_scratch);
+
+ OopMap* map = new OopMap(_frame_size_slots, 0);
+ _oop_maps->add_gc_map(the_pc - start, map);
+
+ // State transition
+ __ set_thread_state(_thread_in_native);
+ __ block_comment("} thread java2native");
+ }
+ __ block_comment("{ argument shuffle");
+ arg_shuffle.generate(_masm, shuffle_reg, frame::z_jit_out_preserve_size, _abi._shadow_space_bytes, locs);
+ __ block_comment("} argument shuffle");
+
+ __ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS)));
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ if (_captured_state_mask != 0) {
+ __ block_comment("{ save thread local");
+
+ out_reg_spiller.generate_spill(_masm, spill_offset);
+
+ __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, DowncallLinker::capture_state));
+ __ z_lg(Z_ARG1, Address(Z_SP, locs.data_offset(StubLocations::CAPTURED_STATE_BUFFER)));
+ __ load_const_optimized(Z_ARG2, _captured_state_mask);
+ __ call(call_target_address);
+
+ out_reg_spiller.generate_fill(_masm, spill_offset);
+
+ __ block_comment("} save thread local");
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ Label L_after_safepoint_poll;
+ Label L_safepoint_poll_slow_path;
+ Label L_reguard;
+ Label L_after_reguard;
+
+ if (_needs_transition) {
+ __ block_comment("{ thread native2java");
+ __ set_thread_state(_thread_in_native_trans);
+
+ if (!UseSystemMemoryBarrier) {
+ __ z_fence(); // Order state change wrt. safepoint poll.
+ }
+
+ __ safepoint_poll(L_safepoint_poll_slow_path, tmp);
+
+ __ load_and_test_int(tmp, Address(Z_thread, JavaThread::suspend_flags_offset()));
+ __ z_brne(L_safepoint_poll_slow_path);
+
+ __ bind(L_after_safepoint_poll);
+
+ // change thread state
+ __ set_thread_state(_thread_in_Java);
+
+ __ block_comment("reguard stack check");
+ __ z_cli(Address(Z_thread, JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(StackOverflow::StackGuardState) - 1)),
+ StackOverflow::stack_guard_yellow_reserved_disabled);
+ __ z_bre(L_reguard);
+ __ bind(L_after_reguard);
+
+ __ reset_last_Java_frame();
+ __ block_comment("} thread native2java");
+ }
+
+ __ pop_frame();
+ __ restore_return_pc(); // This is the way back to the caller.
+ __ z_br(Z_R14);
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ if (_needs_transition) {
+ __ block_comment("{ L_safepoint_poll_slow_path");
+ __ bind(L_safepoint_poll_slow_path);
+
+ // Need to save the native result registers around any runtime calls.
+ out_reg_spiller.generate_spill(_masm, spill_offset);
+
+ __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, JavaThread::check_special_condition_for_native_trans));
+ __ z_lgr(Z_ARG1, Z_thread);
+ __ call(call_target_address);
+
+ out_reg_spiller.generate_fill(_masm, spill_offset);
+
+ __ z_bru(L_after_safepoint_poll);
+ __ block_comment("} L_safepoint_poll_slow_path");
+
+ //////////////////////////////////////////////////////////////////////////////
+ __ block_comment("{ L_reguard");
+ __ bind(L_reguard);
+
+ // Need to save the native result registers around any runtime calls.
+ out_reg_spiller.generate_spill(_masm, spill_offset);
+
+ __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, SharedRuntime::reguard_yellow_pages));
+ __ call(call_target_address);
+
+ out_reg_spiller.generate_fill(_masm, spill_offset);
+
+ __ z_bru(L_after_reguard);
+
+ __ block_comment("} L_reguard");
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ __ flush();
}
diff --git a/src/hotspot/cpu/s390/foreignGlobals_s390.cpp b/src/hotspot/cpu/s390/foreignGlobals_s390.cpp
index d3a318536bd7b..9796ab4ffe4d8 100644
--- a/src/hotspot/cpu/s390/foreignGlobals_s390.cpp
+++ b/src/hotspot/cpu/s390/foreignGlobals_s390.cpp
@@ -23,34 +23,209 @@
*/
#include "precompiled.hpp"
-#include "code/vmreg.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/vmreg.inline.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/jniHandles.inline.hpp"
+#include "oops/typeArrayOop.inline.hpp"
+#include "oops/oopCast.inline.hpp"
#include "prims/foreignGlobals.hpp"
-#include "utilities/debug.hpp"
+#include "prims/foreignGlobals.inline.hpp"
+#include "prims/vmstorage.hpp"
+#include "utilities/formatBuffer.hpp"
-class MacroAssembler;
+#define __ masm->
+
+bool ABIDescriptor::is_volatile_reg(Register reg) const {
+ return _integer_volatile_registers.contains(reg);
+}
+
+bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const {
+ return _float_argument_registers.contains(reg)
+ || _float_additional_volatile_registers.contains(reg);
+}
bool ForeignGlobals::is_foreign_linker_supported() {
- return false;
+ return true;
}
const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
- Unimplemented();
- return {};
+ oop abi_oop = JNIHandles::resolve_non_null(jabi);
+ ABIDescriptor abi;
+
+ objArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
+ parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
+ parse_register_array(inputStorage, StorageType::FLOAT, abi._float_argument_registers, as_FloatRegister);
+
+ objArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
+ parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
+ parse_register_array(outputStorage, StorageType::FLOAT, abi._float_return_registers, as_FloatRegister);
+
+ objArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
+ parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_volatile_registers, as_Register);
+ parse_register_array(volatileStorage, StorageType::FLOAT, abi._float_additional_volatile_registers, as_FloatRegister);
+
+ abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
+ abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
+
+ abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
+ abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
+
+ return abi;
}
int RegSpiller::pd_reg_size(VMStorage reg) {
- Unimplemented();
- return -1;
+ if (reg.type() == StorageType::INTEGER || reg.type() == StorageType::FLOAT) {
+ return 8;
+ }
+ return 0; // stack and BAD
}
void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
- Unimplemented();
+ if (reg.type() == StorageType::INTEGER) {
+ __ reg2mem_opt(as_Register(reg), Address(Z_SP, offset), true);
+ } else if (reg.type() == StorageType::FLOAT) {
+ __ freg2mem_opt(as_FloatRegister(reg), Address(Z_SP, offset), true);
+ } else {
+ // stack and BAD
+ }
}
void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
- Unimplemented();
+ if (reg.type() == StorageType::INTEGER) {
+ __ mem2reg_opt(as_Register(reg), Address(Z_SP, offset), true);
+ } else if (reg.type() == StorageType::FLOAT) {
+ __ mem2freg_opt(as_FloatRegister(reg), Address(Z_SP, offset), true);
+ } else {
+ // stack and BAD
+ }
+}
+
+static int reg2offset(VMStorage vms, int stk_bias) {
+ assert(!vms.is_reg(), "wrong usage");
+ return vms.index_or_offset() + stk_bias;
+}
+
+static void move_reg(MacroAssembler* masm, int out_stk_bias,
+ VMStorage from_reg, VMStorage to_reg) {
+ int out_bias = 0;
+ switch (to_reg.type()) {
+ case StorageType::INTEGER:
+ if (to_reg.segment_mask() == REG64_MASK && from_reg.segment_mask() == REG32_MASK ) {
+ // see CCallingConventionRequiresIntsAsLongs
+ __ z_lgfr(as_Register(to_reg), as_Register(from_reg));
+ } else {
+ __ lgr_if_needed(as_Register(to_reg), as_Register(from_reg));
+ }
+ break;
+ case StorageType::STACK:
+ out_bias = out_stk_bias; //fallthrough
+ case StorageType::FRAME_DATA: {
+ // Integer types always get a 64 bit slot in C.
+ if (from_reg.segment_mask() == REG32_MASK) {
+ // see CCallingConventionRequiresIntsAsLongs
+ __ z_lgfr(as_Register(from_reg), as_Register(from_reg));
+ }
+ switch (to_reg.stack_size()) {
+ case 8: __ reg2mem_opt(as_Register(from_reg), Address(Z_SP, reg2offset(to_reg, out_bias)), true); break;
+ case 4: __ reg2mem_opt(as_Register(from_reg), Address(Z_SP, reg2offset(to_reg, out_bias)), false); break;
+ default: ShouldNotReachHere();
+ }
+ } break;
+ default: ShouldNotReachHere();
+ }
+}
+
+static void move_float(MacroAssembler* masm, int out_stk_bias,
+ VMStorage from_reg, VMStorage to_reg) {
+ switch (to_reg.type()) {
+ case StorageType::FLOAT:
+ if (from_reg.segment_mask() == REG64_MASK)
+ __ move_freg_if_needed(as_FloatRegister(to_reg), T_DOUBLE, as_FloatRegister(from_reg), T_DOUBLE);
+ else
+ __ move_freg_if_needed(as_FloatRegister(to_reg), T_FLOAT, as_FloatRegister(from_reg), T_FLOAT);
+ break;
+ case StorageType::STACK:
+ if (from_reg.segment_mask() == REG64_MASK) {
+ assert(to_reg.stack_size() == 8, "size should match");
+ __ freg2mem_opt(as_FloatRegister(from_reg), Address(Z_SP, reg2offset(to_reg, out_stk_bias)), true);
+ } else {
+ assert(to_reg.stack_size() == 4, "size should match");
+ __ freg2mem_opt(as_FloatRegister(from_reg), Address(Z_SP, reg2offset(to_reg, out_stk_bias)), false);
+ }
+ break;
+ default: ShouldNotReachHere();
+ }
+}
+
+static void move_stack(MacroAssembler* masm, Register tmp_reg, int in_stk_bias, int out_stk_bias,
+ VMStorage from_reg, VMStorage to_reg) {
+ int out_bias = 0;
+ Address from_addr(Z_R11, reg2offset(from_reg, in_stk_bias));
+ switch (to_reg.type()) {
+ case StorageType::INTEGER:
+ switch (from_reg.stack_size()) {
+ case 8: __ mem2reg_opt(as_Register(to_reg), from_addr, true);break;
+ case 4: __ mem2reg_opt(as_Register(to_reg), from_addr, false);break;
+ default: ShouldNotReachHere();
+ }
+ break;
+ case StorageType::FLOAT:
+ switch (from_reg.stack_size()) {
+ case 8: __ mem2freg_opt(as_FloatRegister(to_reg), from_addr, true);break;
+ case 4: __ mem2freg_opt(as_FloatRegister(to_reg), from_addr, false);break;
+ default: ShouldNotReachHere();
+ }
+ break;
+ case StorageType::STACK:
+ out_bias = out_stk_bias; // fallthrough
+ case StorageType::FRAME_DATA: {
+ switch (from_reg.stack_size()) {
+ case 8: __ mem2reg_opt(tmp_reg, from_addr, true); break;
+ case 4: if (to_reg.stack_size() == 8) {
+ __ mem2reg_signed_opt(tmp_reg, from_addr);
+ } else {
+ __ mem2reg_opt(tmp_reg, from_addr, false);
+ }
+ break;
+ default: ShouldNotReachHere();
+ }
+ switch (to_reg.stack_size()) {
+ case 8: __ reg2mem_opt(tmp_reg, Address (Z_SP, reg2offset(to_reg, out_bias)), true); break;
+ case 4: __ reg2mem_opt(tmp_reg, Address (Z_SP, reg2offset(to_reg, out_bias)), false); break;
+ default: ShouldNotReachHere();
+ }
+ } break;
+ default: ShouldNotReachHere();
+ }
}
void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias, const StubLocations& locs) const {
- Unimplemented();
+ Register tmp_reg = as_Register(tmp);
+ for (int i = 0; i < _moves.length(); i++) {
+ Move move = _moves.at(i);
+ VMStorage from_reg = move.from;
+ VMStorage to_reg = move.to;
+
+ // replace any placeholders
+ if (from_reg.type() == StorageType::PLACEHOLDER) {
+ from_reg = locs.get(from_reg);
+ }
+ if (to_reg.type() == StorageType::PLACEHOLDER) {
+ to_reg = locs.get(to_reg);
+ }
+
+ switch (from_reg.type()) {
+ case StorageType::INTEGER:
+ move_reg(masm, out_stk_bias, from_reg, to_reg);
+ break;
+ case StorageType::FLOAT:
+ move_float(masm, out_stk_bias, from_reg, to_reg);
+ break;
+ case StorageType::STACK:
+ move_stack(masm, tmp_reg, in_stk_bias, out_stk_bias, from_reg, to_reg);
+ break;
+ default: ShouldNotReachHere();
+ }
+ }
}
diff --git a/src/hotspot/cpu/s390/foreignGlobals_s390.hpp b/src/hotspot/cpu/s390/foreignGlobals_s390.hpp
index 8b86a2b06a601..4ff3b3e40b4ff 100644
--- a/src/hotspot/cpu/s390/foreignGlobals_s390.hpp
+++ b/src/hotspot/cpu/s390/foreignGlobals_s390.hpp
@@ -24,6 +24,23 @@
#ifndef CPU_S390_VM_FOREIGN_GLOBALS_S390_HPP
#define CPU_S390_VM_FOREIGN_GLOBALS_S390_HPP
-class ABIDescriptor {};
+struct ABIDescriptor {
+ GrowableArray _integer_argument_registers;
+ GrowableArray _integer_return_registers;
+ GrowableArray _float_argument_registers;
+ GrowableArray _float_return_registers;
+
+ GrowableArray _integer_volatile_registers;
+ GrowableArray _float_additional_volatile_registers;
+
+ int32_t _stack_alignment_bytes;
+ int32_t _shadow_space_bytes;
+
+ VMStorage _scratch1;
+ VMStorage _scratch2;
+
+ bool is_volatile_reg(Register reg) const;
+ bool is_volatile_reg(FloatRegister reg) const;
+};
#endif // CPU_S390_VM_FOREIGN_GLOBALS_S390_HPP
diff --git a/src/hotspot/cpu/s390/frame_s390.cpp b/src/hotspot/cpu/s390/frame_s390.cpp
index 23547fa6617b4..dbaa243eb1cac 100644
--- a/src/hotspot/cpu/s390/frame_s390.cpp
+++ b/src/hotspot/cpu/s390/frame_s390.cpp
@@ -218,13 +218,32 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
}
UpcallStub::FrameData* UpcallStub::frame_data_for_frame(const frame& frame) const {
- ShouldNotCallThis();
- return nullptr;
+ assert(frame.is_upcall_stub_frame(), "wrong frame");
+ // need unextended_sp here, since normal sp is wrong for interpreter callees
+ return reinterpret_cast(
+ reinterpret_cast(frame.unextended_sp()) + in_bytes(_frame_data_offset));
}
bool frame::upcall_stub_frame_is_first() const {
- ShouldNotCallThis();
- return false;
+ assert(is_upcall_stub_frame(), "must be optimized entry frame");
+ UpcallStub* blob = _cb->as_upcall_stub();
+ JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
+ return jfa->last_Java_sp() == nullptr;
+}
+
+frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const {
+ assert(map != nullptr, "map must be set");
+ UpcallStub* blob = _cb->as_upcall_stub();
+ // Java frame called from C; skip all C frames and return top C
+ // frame of that chunk as the sender
+ JavaFrameAnchor* jfa = blob->jfa_for_frame(*this);
+ assert(!upcall_stub_frame_is_first(), "must have a frame anchor to go back to");
+ assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
+ map->clear();
+ assert(map->include_argument_oops(), "should be set by clear");
+ frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
+
+ return fr;
}
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
@@ -653,7 +672,6 @@ intptr_t *frame::initial_deoptimization_info() {
return fp();
}
-// Pointer beyond the "oldest/deepest" BasicObjectLock on stack.
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
return interpreter_frame_monitors();
}
diff --git a/src/hotspot/cpu/s390/frame_s390.hpp b/src/hotspot/cpu/s390/frame_s390.hpp
index 85ce9c023e3da..3f81cd254d080 100644
--- a/src/hotspot/cpu/s390/frame_s390.hpp
+++ b/src/hotspot/cpu/s390/frame_s390.hpp
@@ -488,11 +488,6 @@
template
static void update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr);
- // Additional interface for interpreter frames:
- static int interpreter_frame_interpreterstate_size_in_bytes();
- static int interpreter_frame_monitor_size_in_bytes();
-
-
// template interpreter state
inline z_ijava_state* ijava_state_unchecked() const;
diff --git a/src/hotspot/cpu/s390/frame_s390.inline.hpp b/src/hotspot/cpu/s390/frame_s390.inline.hpp
index dfa68940bac5c..008c4b182f88f 100644
--- a/src/hotspot/cpu/s390/frame_s390.inline.hpp
+++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp
@@ -254,15 +254,6 @@ inline int frame::interpreter_frame_monitor_size() {
WordsPerLong /* Number of stack slots for a Java long. */);
}
-inline int frame::interpreter_frame_monitor_size_in_bytes() {
- // Number of bytes for a monitor.
- return frame::interpreter_frame_monitor_size() * wordSize;
-}
-
-inline int frame::interpreter_frame_interpreterstate_size_in_bytes() {
- return z_ijava_state_size;
-}
-
inline Method** frame::interpreter_frame_method_addr() const {
return (Method**)&(ijava_state()->method);
}
@@ -352,12 +343,10 @@ inline frame frame::sender(RegisterMap* map) const {
// update it accordingly.
map->set_include_argument_oops(false);
- if (is_entry_frame()) {
- return sender_for_entry_frame(map);
- }
- if (is_interpreted_frame()) {
- return sender_for_interpreter_frame(map);
- }
+ if (is_entry_frame()) return sender_for_entry_frame(map);
+ if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
+ if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
+
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (_cb != nullptr) return sender_for_compiled_frame(map);
diff --git a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp
index 99906bb369e54..2232215a5875f 100644
--- a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp
+++ b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp
@@ -28,7 +28,7 @@
#define ShortenBranches true
-const int StackAlignmentInBytes = 16;
+const int StackAlignmentInBytes = 8;
#define SUPPORTS_NATIVE_CX8
diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp
index 8bfc8249ed289..35016b60fa41b 100644
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp
@@ -838,7 +838,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
// Check that all monitors are unlocked.
{
NearLabel loop, exception, entry, restart;
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
// We use Z_ARG2 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly.
Register R_current_monitor = Z_ARG2;
@@ -951,6 +951,11 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// Test if reserved zone needs to be enabled.
Label no_reserved_zone_enabling;
+ // check if already enabled - if so no re-enabling needed
+ assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
+ z_ly(Z_R0, Address(Z_thread, JavaThread::stack_guard_state_offset()));
+ compare32_and_branch(Z_R0, StackOverflow::stack_guard_enabled, bcondEqual, no_reserved_zone_enabling);
+
// Compare frame pointers. There is no good stack pointer, as with stack
// frame compression we can get different SPs when we do calls. A subsequent
// call could have a smaller SP, so that this compare succeeds for an
@@ -977,9 +982,10 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// lock object
//
// Registers alive
-// monitor - Address of the BasicObjectLock to be used for locking,
+// monitor (Z_R10) - Address of the BasicObjectLock to be used for locking,
// which must be initialized with the object to lock.
-// object - Address of the object to be locked.
+// object (Z_R11, Z_R2) - Address of the object to be locked.
+// templateTable (monitorenter) is using Z_R2 for object
void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
if (LockingMode == LM_MONITOR) {
@@ -987,7 +993,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
return;
}
- // template code:
+ // template code: (for LM_LEGACY)
//
// markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
@@ -1001,68 +1007,77 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// InterpreterRuntime::monitorenter(THREAD, monitor);
// }
- const Register displaced_header = Z_ARG5;
+ const int hdr_offset = oopDesc::mark_offset_in_bytes();
+
+ const Register header = Z_ARG5;
const Register object_mark_addr = Z_ARG4;
const Register current_header = Z_ARG5;
+ const Register tmp = Z_R1_scratch;
- NearLabel done;
- NearLabel slow_case;
+ NearLabel done, slow_case;
- // markWord displaced_header = obj->mark().set_unlocked();
+ // markWord header = obj->mark().set_unlocked();
- // Load markWord from object into displaced_header.
- z_lg(displaced_header, oopDesc::mark_offset_in_bytes(), object);
+ // Load markWord from object into header.
+ z_lg(header, hdr_offset, object);
if (DiagnoseSyncOnValueBasedClasses != 0) {
- load_klass(Z_R1_scratch, object);
- testbit(Address(Z_R1_scratch, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
+ load_klass(tmp, object);
+ testbit(Address(tmp, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
z_btrue(slow_case);
}
- // Set displaced_header to be (markWord of object | UNLOCK_VALUE).
- z_oill(displaced_header, markWord::unlocked_value);
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ lightweight_lock(object, /* mark word */ header, tmp, slow_case);
+ } else if (LockingMode == LM_LEGACY) {
- // monitor->lock()->set_displaced_header(displaced_header);
+ // Set header to be (markWord of object | UNLOCK_VALUE).
+ // This will not change anything if it was unlocked before.
+ z_oill(header, markWord::unlocked_value);
- // Initialize the box (Must happen before we update the object mark!).
- z_stg(displaced_header, in_bytes(BasicObjectLock::lock_offset()) +
- BasicLock::displaced_header_offset_in_bytes(), monitor);
+ // monitor->lock()->set_displaced_header(displaced_header);
+ const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
+ const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
- // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
+ // Initialize the box (Must happen before we update the object mark!).
+ z_stg(header, mark_offset, monitor);
- // Store stack address of the BasicObjectLock (this is monitor) into object.
- add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
+ // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
- z_csg(displaced_header, monitor, 0, object_mark_addr);
- assert(current_header==displaced_header, "must be same register"); // Identified two registers from z/Architecture.
+ // not necessary, use offset in instruction directly.
+ // add2reg(object_mark_addr, hdr_offset, object);
- z_bre(done);
+ // Store stack address of the BasicObjectLock (this is monitor) into object.
+ z_csg(header, monitor, hdr_offset, object);
+ assert(current_header == header,
+ "must be same register"); // Identified two registers from z/Architecture.
- // } else if (THREAD->is_lock_owned((address)displaced_header))
- // // Simple recursive case.
- // monitor->lock()->set_displaced_header(nullptr);
+ z_bre(done);
- // We did not see an unlocked object so try the fast recursive case.
+ // } else if (THREAD->is_lock_owned((address)displaced_header))
+ // // Simple recursive case.
+ // monitor->lock()->set_displaced_header(nullptr);
- // Check if owner is self by comparing the value in the markWord of object
- // (current_header) with the stack pointer.
- z_sgr(current_header, Z_SP);
+ // We did not see an unlocked object so try the fast recursive case.
- assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
+ // Check if owner is self by comparing the value in the markWord of object
+ // (current_header) with the stack pointer.
+ z_sgr(current_header, Z_SP);
- // The prior sequence "LGR, NGR, LTGR" can be done better
- // (Z_R1 is temp and not used after here).
- load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
- z_ngr(Z_R0, current_header); // AND sets CC (result eq/ne 0)
+ assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
- // If condition is true we are done and hence we can store 0 in the displaced
- // header indicating it is a recursive lock and be done.
- z_brne(slow_case);
- z_release(); // Membar unnecessary on zarch AND because the above csg does a sync before and after.
- z_stg(Z_R0/*==0!*/, in_bytes(BasicObjectLock::lock_offset()) +
- BasicLock::displaced_header_offset_in_bytes(), monitor);
- z_bru(done);
+ // The prior sequence "LGR, NGR, LTGR" can be done better
+ // (Z_R1 is temp and not used after here).
+ load_const_optimized(Z_R0, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place));
+ z_ngr(Z_R0, current_header); // AND sets CC (result eq/ne 0)
+ // If condition is true we are done and hence we can store 0 in the displaced
+ // header indicating it is a recursive lock and be done.
+ z_brne(slow_case);
+ z_release(); // Member unnecessary on zarch AND because the above csg does a sync before and after.
+ z_stg(Z_R0/*==0!*/, mark_offset, monitor);
+ }
+ z_bru(done);
// } else {
// // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor);
@@ -1070,8 +1085,16 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// None of the above fast optimizations worked so we have to get into the
// slow case of monitor enter.
bind(slow_case);
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
-
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ // for lightweight locking we need to use monitorenter_obj, see interpreterRuntime.cpp
+ call_VM(noreg,
+ CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj),
+ object);
+ } else {
+ call_VM(noreg,
+ CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
+ monitor);
+ }
// }
bind(done);
@@ -1092,7 +1115,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
}
// else {
- // template code:
+ // template code: (for LM_LEGACY):
//
// if ((displaced_header = monitor->displaced_header()) == nullptr) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
@@ -1105,10 +1128,12 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
// InterpreterRuntime::monitorexit(monitor);
// }
- const Register displaced_header = Z_ARG4;
- const Register current_header = Z_R1;
+ const int hdr_offset = oopDesc::mark_offset_in_bytes();
+
+ const Register header = Z_ARG4;
+ const Register current_header = Z_R1_scratch;
Address obj_entry(monitor, BasicObjectLock::obj_offset());
- Label done;
+ Label done, slow_case;
if (object == noreg) {
// In the template interpreter, we must assure that the object
@@ -1118,35 +1143,63 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
z_lg(object, obj_entry);
}
- assert_different_registers(monitor, object, displaced_header, current_header);
+ assert_different_registers(monitor, object, header, current_header);
// if ((displaced_header = monitor->displaced_header()) == nullptr) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
// monitor->set_obj(nullptr);
- clear_mem(obj_entry, sizeof(oop));
+ // monitor->lock()->set_displaced_header(displaced_header);
+ const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
+ const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
- // Test first if we are in the fast recursive case.
- MacroAssembler::load_and_test_long(displaced_header,
- Address(monitor, in_bytes(BasicObjectLock::lock_offset()) +
- BasicLock::displaced_header_offset_in_bytes()));
- z_bre(done); // displaced_header == 0 -> goto done
+ clear_mem(obj_entry, sizeof(oop));
+ if (LockingMode != LM_LIGHTWEIGHT) {
+ // Test first if we are in the fast recursive case.
+ MacroAssembler::load_and_test_long(header, Address(monitor, mark_offset));
+ z_bre(done); // header == 0 -> goto done
+ }
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(nullptr);
// If we still have a lightweight lock, unlock the object and be done.
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ // Check for non-symmetric locking. This is allowed by the spec and the interpreter
+ // must handle it.
+
+ Register tmp = current_header;
- // The markword is expected to be at offset 0.
- assert(oopDesc::mark_offset_in_bytes() == 0, "unlock_object: review code below");
+ // First check for lock-stack underflow.
+ z_lgf(tmp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
+ compareU32_and_branch(tmp, (unsigned)LockStack::start_offset(), Assembler::bcondNotHigh, slow_case);
- // We have the displaced header in displaced_header. If the lock is still
- // lightweight, it will contain the monitor address and we'll store the
- // displaced header back into the object's mark word.
- z_lgr(current_header, monitor);
- z_csg(current_header, displaced_header, 0, object);
- z_bre(done);
+ // Then check if the top of the lock-stack matches the unlocked object.
+ z_aghi(tmp, -oopSize);
+ z_lg(tmp, Address(Z_thread, tmp));
+ compare64_and_branch(tmp, object, Assembler::bcondNotEqual, slow_case);
+
+ z_lg(header, Address(object, hdr_offset));
+ z_lgr(tmp, header);
+ z_nill(tmp, markWord::monitor_value);
+ z_brne(slow_case);
+
+ lightweight_unlock(object, header, tmp, slow_case);
+
+ z_bru(done);
+ } else {
+ // The markword is expected to be at offset 0.
+ // This is not required on s390, at least not here.
+ assert(hdr_offset == 0, "unlock_object: review code below");
+
+ // We have the displaced header in header. If the lock is still
+ // lightweight, it will contain the monitor address and we'll store the
+ // displaced header back into the object's mark word.
+ z_lgr(current_header, monitor);
+ z_csg(current_header, header, hdr_offset, object);
+ z_bre(done);
+ }
// } else {
// // Slow path.
@@ -1154,6 +1207,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
// The lock has been converted into a heavy lock and hence
// we need to get into the slow case.
+ bind(slow_case);
z_stg(object, obj_entry); // Restore object entry, has been cleared above.
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
@@ -2033,7 +2087,7 @@ void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty,
const Register Rcurr_slot = Rtemp1;
const Register Rlimit = Rtemp2;
- const jint delta = -frame::interpreter_frame_monitor_size() * wordSize;
+ const jint delta = -frame::interpreter_frame_monitor_size_in_bytes();
assert((delta & LongAlignmentMask) == 0,
"sizeof BasicObjectLock must be even number of doublewords");
@@ -2214,6 +2268,6 @@ void InterpreterMacroAssembler::pop_interpreter_frame(Register return_pc, Regist
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
if (VerifyFPU) {
- unimplemented("verfiyFPU");
+ unimplemented("verifyFPU");
}
}
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 1960e6d3e0764..8a56f3e4c2b30 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -3165,11 +3165,15 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
- z_lr(temp, displacedHeader);
+ z_lgr(temp, displacedHeader);
z_nill(temp, markWord::monitor_value);
z_brne(object_has_monitor);
- if (LockingMode != LM_MONITOR) {
+ if (LockingMode == LM_MONITOR) {
+ // Set NE to indicate 'failure' -> take slow-path
+ z_ltgr(oop, oop);
+ z_bru(done);
+ } else if (LockingMode == LM_LEGACY) {
// Set mark to markWord | markWord::unlocked_value.
z_oill(displacedHeader, markWord::unlocked_value);
@@ -3186,23 +3190,23 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
z_csg(displacedHeader, box, 0, oop);
assert(currentHeader == displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
z_bre(done);
- } else {
- // Set NE to indicate 'failure' -> take slow-path
- z_ltgr(oop, oop);
- z_bru(done);
- }
- // We did not see an unlocked object so try the fast recursive case.
+ // We did not see an unlocked object so try the fast recursive case.
- z_sgr(currentHeader, Z_SP);
- load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
+ z_sgr(currentHeader, Z_SP);
+ load_const_optimized(temp, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place));
- z_ngr(currentHeader, temp);
- // z_brne(done);
- // z_release();
- z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
+ z_ngr(currentHeader, temp);
+ // z_brne(done);
+ // z_release();
+ z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
- z_bru(done);
+ z_bru(done);
+ } else {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ lightweight_lock(oop, displacedHeader, temp, done);
+ z_bru(done);
+ }
Register zero = temp;
Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
@@ -3214,8 +3218,10 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
z_lghi(zero, 0);
// If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
- // Store a non-null value into the box.
- z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
+ if (LockingMode != LM_LIGHTWEIGHT) {
+ // Store a non-null value into the box.
+ z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
+ }
#ifdef ASSERT
z_brne(done);
// We've acquired the monitor, check some invariants.
@@ -3238,11 +3244,13 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
Register temp = temp1;
Register monitor = temp2;
+ const int hdr_offset = oopDesc::mark_offset_in_bytes();
+
Label done, object_has_monitor;
BLOCK_COMMENT("compiler_fast_unlock_object {");
- if (LockingMode != LM_MONITOR) {
+ if (LockingMode == LM_LEGACY) {
// Find the lock address and load the displaced header from the stack.
// if the displaced header is zero, we have a recursive unlock.
load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
@@ -3251,27 +3259,41 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
- z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
+ z_lg(currentHeader, hdr_offset, oop);
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ z_lgr(temp, currentHeader);
+ }
z_nill(currentHeader, markWord::monitor_value);
z_brne(object_has_monitor);
- if (LockingMode != LM_MONITOR) {
+ if (LockingMode == LM_MONITOR) {
+ // Set NE to indicate 'failure' -> take slow-path
+ z_ltgr(oop, oop);
+ z_bru(done);
+ } else if (LockingMode == LM_LEGACY) {
// Check if it is still a light weight lock, this is true if we see
// the stack address of the basicLock in the markWord of the object
// copy box to currentHeader such that csg does not kill it.
z_lgr(currentHeader, box);
z_csg(currentHeader, displacedHeader, 0, oop);
- z_bru(done); // Csg sets CR as desired.
+ z_bru(done); // csg sets CR as desired.
} else {
- // Set NE to indicate 'failure' -> take slow-path
- z_ltgr(oop, oop);
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+
+ // don't load currentHead again from stack-top after monitor check, as it is possible
+ // some other thread modified it.
+ // currentHeader is altered, but it's contents are copied in temp as well
+ lightweight_unlock(oop, temp, currentHeader, done);
z_bru(done);
}
+ // In case of LM_LIGHTWEIGHT, we may reach here with (temp & ObjectMonitor::ANONYMOUS_OWNER) != 0.
+ // This is handled like owner thread mismatches: We take the slow path.
+
// Handle existing monitor.
bind(object_has_monitor);
- z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set.
+ z_lg(currentHeader, hdr_offset, oop); // CurrentHeader is tagged with monitor_value set.
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
z_brne(done);
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
@@ -5621,3 +5643,103 @@ SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value
SkipIfEqual::~SkipIfEqual() {
_masm->bind(_label);
}
+
+// Implements lightweight-locking.
+// Branches to slow upon failure to lock the object.
+// Falls through upon success.
+//
+// - obj: the object to be locked, contents preserved.
+// - hdr: the header, already loaded from obj, contents destroyed.
+// Note: make sure Z_R1 is not manipulated here when C2 compiler is in play
+void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register temp, Label& slow_case) {
+
+ assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
+ assert_different_registers(obj, hdr, temp);
+
+ // First we need to check if the lock-stack has room for pushing the object reference.
+ z_lgf(temp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
+
+ compareU32_and_branch(temp, (unsigned)LockStack::end_offset()-1, bcondHigh, slow_case);
+
+ // attempting a lightweight_lock
+ // Load (object->mark() | 1) into hdr
+ z_oill(hdr, markWord::unlocked_value);
+
+ z_lgr(temp, hdr);
+
+ // Clear lock-bits from hdr (locked state)
+ z_xilf(temp, markWord::unlocked_value);
+
+ z_csg(hdr, temp, oopDesc::mark_offset_in_bytes(), obj);
+ branch_optimized(Assembler::bcondNotEqual, slow_case);
+
+ // After successful lock, push object on lock-stack
+ z_lgf(temp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
+ z_stg(obj, Address(Z_thread, temp));
+ z_ahi(temp, oopSize);
+ z_st(temp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
+
+ // as locking was successful, set CC to EQ
+ z_cr(temp, temp);
+}
+
+// Implements lightweight-unlocking.
+// Branches to slow upon failure.
+// Falls through upon success.
+//
+// - obj: the object to be unlocked
+// - hdr: the (pre-loaded) header of the object, will be destroyed
+// - Z_R1_scratch: will be killed in case of Interpreter & C1 Compiler
+void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) {
+
+ assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
+ assert_different_registers(obj, hdr, tmp);
+
+#ifdef ASSERT
+ {
+ // Check that hdr is lightweight-locked.
+ Label hdr_ok;
+ z_lgr(tmp, hdr);
+ z_nill(tmp, markWord::lock_mask_in_place);
+ z_bre(hdr_ok);
+ stop("Header is not lightweight-locked");
+ bind(hdr_ok);
+ }
+ {
+ // The following checks rely on the fact that LockStack is only ever modified by
+ // its owning thread, even if the lock got inflated concurrently; removal of LockStack
+ // entries after inflation will happen delayed in that case.
+
+ // Check for lock-stack underflow.
+ Label stack_ok;
+ z_lgf(tmp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
+ compareU32_and_branch(tmp, (unsigned)LockStack::start_offset(), Assembler::bcondHigh, stack_ok);
+ stop("Lock-stack underflow");
+ bind(stack_ok);
+ }
+ {
+ // Check if the top of the lock-stack matches the unlocked object.
+ Label tos_ok;
+ z_aghi(tmp, -oopSize);
+ z_lg(tmp, Address(Z_thread, tmp));
+ compare64_and_branch(tmp, obj, Assembler::bcondEqual, tos_ok);
+ stop("Top of lock-stack does not match the unlocked object");
+ bind(tos_ok);
+ }
+#endif // ASSERT
+
+ z_lgr(tmp, hdr);
+ z_oill(tmp, markWord::unlocked_value);
+ z_csg(hdr, tmp, oopDesc::mark_offset_in_bytes(), obj);
+ branch_optimized(Assembler::bcondNotEqual, slow);
+
+ // After successful unlock, pop object from lock-stack
+#ifdef ASSERT
+ z_lgf(tmp, Address(Z_thread, JavaThread::lock_stack_top_offset()));
+ z_aghi(tmp, -oopSize);
+ z_agr(tmp, Z_thread);
+ z_xc(0, oopSize-1, tmp, 0, tmp); // wipe out lock-stack entry
+#endif
+ z_alsi(in_bytes(JavaThread::lock_stack_top_offset()), Z_thread, -oopSize); // pop object
+ z_cr(tmp, tmp); // set CC to EQ
+}
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
index fad35cf08b215..bf14b42e2d1b3 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
@@ -722,6 +722,8 @@ class MacroAssembler: public Assembler {
void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2);
void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2);
+ void lightweight_lock(Register obj, Register hdr, Register tmp, Label& slow);
+ void lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow);
void resolve_jobject(Register value, Register tmp1, Register tmp2);
diff --git a/src/hotspot/cpu/s390/methodHandles_s390.cpp b/src/hotspot/cpu/s390/methodHandles_s390.cpp
index aaccdbabb9e22..ef8722f2499c0 100644
--- a/src/hotspot/cpu/s390/methodHandles_s390.cpp
+++ b/src/hotspot/cpu/s390/methodHandles_s390.cpp
@@ -349,7 +349,16 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
void MethodHandles::jump_to_native_invoker(MacroAssembler* _masm, Register nep_reg, Register temp_target) {
BLOCK_COMMENT("jump_to_native_invoker {");
- __ should_not_reach_here();
+ assert(nep_reg != noreg, "required register");
+
+ // Load the invoker, as NEP -> .invoker
+ __ verify_oop(nep_reg);
+
+ __ z_lg(temp_target, Address(nep_reg,
+ NONZERO(jdk_internal_foreign_abi_NativeEntryPoint::downcall_stub_address_offset_in_bytes())));
+
+ __ z_br(temp_target);
+
BLOCK_COMMENT("} jump_to_native_invoker");
}
@@ -387,6 +396,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
} else if (iid == vmIntrinsics::_linkToNative) {
assert(for_compiler_entry, "only compiler entry is supported");
jump_to_native_invoker(_masm, member_reg, temp1);
+ return;
}
// The method is a member invoker used by direct method handles.
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 26469e2fb3d58..05b607ec03c52 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -1716,7 +1716,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ add2reg(r_box, lock_offset, Z_SP);
// Try fastpath for locking.
- // Fast_lock kills r_temp_1, r_temp_2. (Don't use R1 as temp, won't work!)
+ // Fast_lock kills r_temp_1, r_temp_2.
+ // in case of DiagnoseSyncOnValueBasedClasses content for Z_R1_scratch
+ // will be destroyed, So avoid using Z_R1 as temp here.
__ compiler_fast_lock_object(r_oop, r_box, r_tmp1, r_tmp2);
__ z_bre(done);
@@ -1915,7 +1917,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ add2reg(r_box, lock_offset, Z_SP);
// Try fastpath for unlocking.
- __ compiler_fast_unlock_object(r_oop, r_box, r_tmp1, r_tmp2); // Don't use R1 as temp.
+ // Fast_unlock kills r_tmp1, r_tmp2.
+ __ compiler_fast_unlock_object(r_oop, r_box, r_tmp1, r_tmp2);
__ z_bre(done);
// Slow path for unlocking.
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 78765c1835062..689c760567593 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -4143,7 +4143,7 @@ void TemplateTable::monitorenter() {
// Check for null object.
__ null_check(Z_tos);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
NearLabel allocated;
// Initialize entry pointer.
const Register Rfree_slot = Z_tmp_1;
@@ -4238,7 +4238,7 @@ void TemplateTable::monitorexit() {
// Find matching slot.
{
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
NearLabel entry, loop;
const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block.
diff --git a/src/hotspot/cpu/s390/upcallLinker_s390.cpp b/src/hotspot/cpu/s390/upcallLinker_s390.cpp
index 3e1fb04218b51..b748ec547ccf1 100644
--- a/src/hotspot/cpu/s390/upcallLinker_s390.cpp
+++ b/src/hotspot/cpu/s390/upcallLinker_s390.cpp
@@ -22,15 +22,287 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "logging/logStream.hpp"
+#include "memory/resourceArea.hpp"
#include "prims/upcallLinker.hpp"
-#include "utilities/debug.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/formatBuffer.hpp"
+#include "utilities/globalDefinitions.hpp"
+#define __ _masm->
+
+// for callee saved regs, according to the caller's ABI
+static int compute_reg_save_area_size(const ABIDescriptor& abi) {
+ int size = 0;
+ for (int i = 0; i < Register::number_of_registers; i++) {
+ Register reg = as_Register(i);
+ // Z_SP saved/restored by prologue/epilogue
+ if (reg == Z_SP) continue;
+ if (!abi.is_volatile_reg(reg)) {
+ size += 8; // bytes
+ }
+ }
+
+ for (int i = 0; i < FloatRegister::number_of_registers; i++) {
+ FloatRegister reg = as_FloatRegister(i);
+ if (!abi.is_volatile_reg(reg)) {
+ size += 8; // bytes
+ }
+ }
+
+ return size;
+}
+
+static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) {
+ // 1. iterate all registers in the architecture
+ // - check if they are volatile or not for the given abi
+ // - if NOT, we need to save it here
+
+ int offset = reg_save_area_offset;
+
+ __ block_comment("{ preserve_callee_saved_regs ");
+ for (int i = 0; i < Register::number_of_registers; i++) {
+ Register reg = as_Register(i);
+ // Z_SP saved/restored by prologue/epilogue
+ if (reg == Z_SP) continue;
+ if (!abi.is_volatile_reg(reg)) {
+ __ z_stg(reg, Address(Z_SP, offset));
+ offset += 8;
+ }
+ }
+
+ for (int i = 0; i < FloatRegister::number_of_registers; i++) {
+ FloatRegister reg = as_FloatRegister(i);
+ if (!abi.is_volatile_reg(reg)) {
+ __ z_std(reg, Address(Z_SP, offset));
+ offset += 8;
+ }
+ }
+
+ __ block_comment("} preserve_callee_saved_regs ");
+}
+
+static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) {
+ // 1. iterate all registers in the architecture
+ // - check if they are volatile or not for the given abi
+ // - if NOT, we need to restore it here
+
+ int offset = reg_save_area_offset;
+
+ __ block_comment("{ restore_callee_saved_regs ");
+ for (int i = 0; i < Register::number_of_registers; i++) {
+ Register reg = as_Register(i);
+ // Z_SP saved/restored by prologue/epilogue
+ if (reg == Z_SP) continue;
+ if (!abi.is_volatile_reg(reg)) {
+ __ z_lg(reg, Address(Z_SP, offset));
+ offset += 8;
+ }
+ }
+
+ for (int i = 0; i < FloatRegister::number_of_registers; i++) {
+ FloatRegister reg = as_FloatRegister(i);
+ if (!abi.is_volatile_reg(reg)) {
+ __ z_ld(reg, Address(Z_SP, offset));
+ offset += 8;
+ }
+ }
+
+ __ block_comment("} restore_callee_saved_regs ");
+}
+
+static const int upcall_stub_code_base_size = 1024; // depends on GC (resolve_jobject)
+static const int upcall_stub_size_per_arg = 16; // arg save & restore + move
address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
BasicType* in_sig_bt, int total_in_args,
BasicType* out_sig_bt, int total_out_args,
BasicType ret_type,
jobject jabi, jobject jconv,
bool needs_return_buffer, int ret_buf_size) {
- ShouldNotCallThis();
- return nullptr;
+ ResourceMark rm;
+ const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
+ const CallRegs call_regs = ForeignGlobals::parse_call_regs(jconv);
+ int code_size = upcall_stub_code_base_size + (total_in_args * upcall_stub_size_per_arg);
+ CodeBuffer buffer("upcall_stub", code_size, /* locs_size = */ 0);
+
+ Register call_target_address = Z_R1_scratch;
+
+ VMStorage shuffle_reg = abi._scratch1;
+ JavaCallingConvention out_conv;
+ NativeCallingConvention in_conv(call_regs._arg_regs);
+ ArgumentShuffle arg_shuffle(in_sig_bt, total_in_args, out_sig_bt, total_out_args, &in_conv, &out_conv, shuffle_reg);
+
+ // The Java call uses the JIT ABI, but we also call C.
+ int out_arg_area = MAX2(frame::z_jit_out_preserve_size + arg_shuffle.out_arg_bytes(), (int)frame::z_abi_160_size);
+
+#ifndef PRODUCT
+ LogTarget(Trace, foreign, upcall) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ arg_shuffle.print_on(&ls);
+ }
+#endif
+
+
+ int reg_save_area_size = compute_reg_save_area_size(abi);
+ RegSpiller arg_spiller(call_regs._arg_regs);
+ RegSpiller result_spiller(call_regs._ret_regs);
+
+ int res_save_area_offset = out_arg_area;
+ int arg_save_area_offset = res_save_area_offset + result_spiller.spill_size_bytes();
+ int reg_save_area_offset = arg_save_area_offset + arg_spiller.spill_size_bytes();
+ int frame_data_offset = reg_save_area_offset + reg_save_area_size;
+ int frame_bottom_offset = frame_data_offset + sizeof(UpcallStub::FrameData);
+
+ int frame_size = align_up(frame_bottom_offset, StackAlignmentInBytes);
+ StubLocations locs;
+
+ // The space we have allocated will look like:
+ //
+ //
+ // FP-> | |
+ // |---------------------| = frame_bottom_offset = frame_size
+ // | |
+ // | FrameData |
+ // |---------------------| = frame_data_offset
+ // | |
+ // | reg_save_area |
+ // |---------------------| = reg_save_are_offset
+ // | |
+ // | arg_save_area |
+ // |---------------------| = arg_save_are_offset
+ // | |
+ // | res_save_area |
+ // |---------------------| = res_save_are_offset
+ // | |
+ // SP-> | out_arg_area | needs to be at end for shadow space
+ //
+ //
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ MacroAssembler* _masm = new MacroAssembler(&buffer);
+ address start = __ pc();
+
+ __ save_return_pc();
+ assert((abi._stack_alignment_bytes % StackAlignmentInBytes) == 0, "must be 8 byte aligned");
+ // allocate frame (frame_size is also aligned, so stack is still aligned)
+ __ push_frame(frame_size);
+
+ // we have to always spill args since we need to do a call to get the thread
+ // (and maybe attach it).
+ arg_spiller.generate_spill(_masm, arg_save_area_offset);
+ // Java methods won't preserve them, so save them here:
+ preserve_callee_saved_registers(_masm, abi, reg_save_area_offset);
+
+ __ block_comment("{ on_entry");
+ __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_entry));
+ __ z_aghik(Z_ARG1, Z_SP, frame_data_offset);
+ __ call(call_target_address);
+ __ z_lgr(Z_thread, Z_RET);
+ __ block_comment("} on_entry");
+
+ arg_spiller.generate_fill(_masm, arg_save_area_offset);
+ __ block_comment("{ argument shuffle");
+ arg_shuffle.generate(_masm, shuffle_reg, abi._shadow_space_bytes, frame::z_jit_out_preserve_size, locs);
+ __ block_comment("} argument shuffle");
+
+ __ block_comment("{ receiver ");
+ __ load_const_optimized(Z_ARG1, (intptr_t)receiver);
+ __ resolve_jobject(Z_ARG1, Z_tmp_1, Z_tmp_2);
+ __ block_comment("} receiver ");
+
+ __ load_const_optimized(Z_method, (intptr_t)entry);
+ __ z_stg(Z_method, Address(Z_thread, in_bytes(JavaThread::callee_target_offset())));
+
+ __ z_lg(call_target_address, Address(Z_method, in_bytes(Method::from_compiled_offset())));
+ __ call(call_target_address);
+
+ // return value shuffle
+ assert(!needs_return_buffer, "unexpected needs_return_buffer");
+ // CallArranger can pick a return type that goes in the same reg for both CCs.
+ if (call_regs._ret_regs.length() > 0) { // 0 or 1
+ VMStorage ret_reg = call_regs._ret_regs.at(0);
+ // Check if the return reg is as expected.
+ switch (ret_type) {
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_SHORT:
+ case T_CHAR:
+ case T_INT:
+ __ z_lgfr(Z_RET, Z_RET); // Clear garbage in high half.
+ // fallthrough
+ case T_LONG:
+ assert(as_Register(ret_reg) == Z_RET, "unexpected result register");
+ break;
+ case T_FLOAT:
+ case T_DOUBLE:
+ assert(as_FloatRegister(ret_reg) == Z_FRET, "unexpected result register");
+ break;
+ default:
+ fatal("unexpected return type: %s", type2name(ret_type));
+ }
+ }
+
+ result_spiller.generate_spill(_masm, res_save_area_offset);
+
+ __ block_comment("{ on_exit");
+ __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_exit));
+ __ z_aghik(Z_ARG1, Z_SP, frame_data_offset);
+ __ call(call_target_address);
+ __ block_comment("} on_exit");
+
+ restore_callee_saved_registers(_masm, abi, reg_save_area_offset);
+
+ result_spiller.generate_fill(_masm, res_save_area_offset);
+
+ __ pop_frame();
+ __ restore_return_pc();
+ __ z_br(Z_R14);
+
+ //////////////////////////////////////////////////////////////////////////////
+
+ __ block_comment("{ exception handler");
+
+ intptr_t exception_handler_offset = __ pc() - start;
+
+ // Native caller has no idea how to handle exceptions,
+ // so we just crash here. Up to callee to catch exceptions.
+ __ verify_oop(Z_ARG1);
+ __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::handle_uncaught_exception));
+ __ call_c(call_target_address);
+ __ should_not_reach_here();
+
+ __ block_comment("} exception handler");
+
+ _masm->flush();
+
+#ifndef PRODUCT
+ stringStream ss;
+ ss.print("upcall_stub_%s", entry->signature()->as_C_string());
+ const char* name = _masm->code_string(ss.as_string());
+#else // PRODUCT
+ const char* name = "upcall_stub";
+#endif // PRODUCT
+
+ buffer.log_section_sizes(name);
+ UpcallStub* blob
+ = UpcallStub::create(name,
+ &buffer,
+ exception_handler_offset,
+ receiver,
+ in_ByteSize(frame_data_offset));
+#ifndef PRODUCT
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ blob->print_on(&ls);
+ }
+#endif
+
+ return blob->code_begin();
}
diff --git a/src/hotspot/cpu/s390/vmstorage_s390.hpp b/src/hotspot/cpu/s390/vmstorage_s390.hpp
index 192159adc4cc5..6a59567092078 100644
--- a/src/hotspot/cpu/s390/vmstorage_s390.hpp
+++ b/src/hotspot/cpu/s390/vmstorage_s390.hpp
@@ -29,24 +29,79 @@
#include "asm/register.hpp"
enum class StorageType : int8_t {
- STACK = 0,
- PLACEHOLDER = 1,
-// special locations used only by native code
- FRAME_DATA = PLACEHOLDER + 1,
+ INTEGER = 0,
+ FLOAT = 1,
+ STACK = 2,
+ PLACEHOLDER = 3,
+ // special locations used only by native code
+ FRAME_DATA = 4,
INVALID = -1
};
// need to define this before constructing VMStorage (below)
constexpr inline bool VMStorage::is_reg(StorageType type) {
- return false;
+ return type == StorageType::INTEGER || type == StorageType::FLOAT;
}
constexpr inline StorageType VMStorage::stack_type() { return StorageType::STACK; }
constexpr inline StorageType VMStorage::placeholder_type() { return StorageType::PLACEHOLDER; }
constexpr inline StorageType VMStorage::frame_data_type() { return StorageType::FRAME_DATA; }
+// Needs to be consistent with S390Architecture.java.
+constexpr uint16_t REG32_MASK = 0b0000000000000001;
+constexpr uint16_t REG64_MASK = 0b0000000000000011;
+
+inline Register as_Register(VMStorage vms) {
+ assert(vms.type() == StorageType::INTEGER, "not the right type");
+ return ::as_Register(vms.index());
+}
+
+inline FloatRegister as_FloatRegister(VMStorage vms) {
+ assert(vms.type() == StorageType::FLOAT, "not the right type");
+ return ::as_FloatRegister(vms.index());
+}
+
+inline VMStorage as_VMStorage(Register reg, uint16_t segment_mask = REG64_MASK) {
+ return VMStorage::reg_storage(StorageType::INTEGER, segment_mask, reg->encoding());
+}
+
+inline VMStorage as_VMStorage(FloatRegister reg, uint16_t segment_mask = REG64_MASK) {
+ return VMStorage::reg_storage(StorageType::FLOAT, segment_mask, reg->encoding());
+}
+
inline VMStorage as_VMStorage(VMReg reg, BasicType bt) {
+ if (reg->is_Register()) {
+ uint16_t segment_mask = 0;
+ switch (bt) {
+ case T_BOOLEAN:
+ case T_CHAR :
+ case T_BYTE :
+ case T_SHORT :
+ case T_INT : segment_mask = REG32_MASK; break;
+ default : segment_mask = REG64_MASK; break;
+ }
+ return as_VMStorage(reg->as_Register(), segment_mask);
+ } else if (reg->is_FloatRegister()) {
+ // FP regs always use double format. However, we need the correct format for loads /stores.
+ return as_VMStorage(reg->as_FloatRegister(), (bt == T_FLOAT) ? REG32_MASK : REG64_MASK);
+ } else if (reg->is_stack()) {
+ uint16_t size = 0;
+ switch (bt) {
+ case T_BOOLEAN:
+ case T_CHAR :
+ case T_BYTE :
+ case T_SHORT :
+ case T_INT :
+ case T_FLOAT : size = 4; break;
+ default : size = 8; break;
+ }
+ return VMStorage(StorageType::STACK, size,
+ checked_cast(reg->reg2stack() * VMRegImpl::stack_slot_size));
+ } else if (!reg->is_valid()) {
+ return VMStorage::invalid();
+ }
+
ShouldNotReachHere();
return VMStorage::invalid();
}
-#endif // CPU_S390_VMSTORAGE_S390_INLINE_HPP
\ No newline at end of file
+#endif // CPU_S390_VMSTORAGE_S390_INLINE_HPP
diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp
index a228bd28db7ce..ff0fee0f5deff 100644
--- a/src/hotspot/cpu/x86/assembler_x86.hpp
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp
@@ -315,7 +315,7 @@ class Address {
}
bool xmmindex_needs_rex() const {
- return _xmmindex->is_valid() && _xmmindex->encoding() >= 8;
+ return _xmmindex->is_valid() && ((_xmmindex->encoding() & 8) == 8);
}
relocInfo::relocType reloc() const { return _rspec.type(); }
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index cec75f210e9bf..57da386032402 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -1713,7 +1713,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
assert_different_registers(obj, k_RInfo, klass_RInfo);
- __ cmpptr(obj, NULL_WORD);
+ __ testptr(obj, obj);
if (op->should_profile()) {
Label not_null;
__ jccb(Assembler::notEqual, not_null);
@@ -1792,7 +1792,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ pop(klass_RInfo);
__ pop(klass_RInfo);
// result is a boolean
- __ cmpl(klass_RInfo, 0);
+ __ testl(klass_RInfo, klass_RInfo);
__ jcc(Assembler::equal, *failure_target);
// successful cast, fall through to profile or jump
}
@@ -1806,7 +1806,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ pop(klass_RInfo);
__ pop(k_RInfo);
// result is a boolean
- __ cmpl(k_RInfo, 0);
+ __ testl(k_RInfo, k_RInfo);
__ jcc(Assembler::equal, *failure_target);
// successful cast, fall through to profile or jump
}
@@ -1859,7 +1859,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
- __ cmpptr(value, NULL_WORD);
+ __ testptr(value, value);
if (op->should_profile()) {
Label not_null;
__ jccb(Assembler::notEqual, not_null);
@@ -1890,7 +1890,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ pop(klass_RInfo);
__ pop(k_RInfo);
// result is a boolean
- __ cmpl(k_RInfo, 0);
+ __ testl(k_RInfo, k_RInfo);
__ jcc(Assembler::equal, *failure_target);
// fall through to the success case
@@ -2047,7 +2047,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
} else {
Label skip;
- __ jcc (acond, skip);
+ __ jccb(acond, skip);
if (opr2->is_cpu_register()) {
reg2reg(opr2, result);
} else if (opr2->is_stack()) {
@@ -2664,13 +2664,18 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
// cpu register - constant
LIR_Const* c = opr2->as_constant_ptr();
if (c->type() == T_INT) {
- __ cmpl(reg1, c->as_jint());
+ jint i = c->as_jint();
+ if (i == 0) {
+ __ testl(reg1, reg1);
+ } else {
+ __ cmpl(reg1, i);
+ }
} else if (c->type() == T_METADATA) {
// All we need for now is a comparison with null for equality.
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* m = c->as_metadata();
if (m == nullptr) {
- __ cmpptr(reg1, NULL_WORD);
+ __ testptr(reg1, reg1);
} else {
ShouldNotReachHere();
}
@@ -2678,7 +2683,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
// In 64bit oops are single register
jobject o = c->as_jobject();
if (o == nullptr) {
- __ cmpptr(reg1, NULL_WORD);
+ __ testptr(reg1, reg1);
} else {
__ cmpoop(reg1, o, rscratch1);
}
@@ -3146,7 +3151,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#endif // _LP64
- __ cmpl(rax, 0);
+ __ testl(rax, rax);
__ jcc(Assembler::equal, *stub->continuation());
__ mov(tmp, rax);
@@ -3288,7 +3293,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ pop(dst);
__ pop(src);
- __ cmpl(src, 0);
+ __ testl(src, src);
__ jcc(Assembler::notEqual, cont);
__ bind(slow);
@@ -3635,13 +3640,33 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ verify_oop(obj);
- if (tmp != obj) {
- __ mov(tmp, obj);
+#ifdef ASSERT
+ if (obj == tmp) {
+#ifdef _LP64
+ assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
+#else
+ assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
+#endif
+ } else {
+#ifdef _LP64
+ assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index());
+#else
+ assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index());
+#endif
}
+#endif
if (do_null) {
- __ testptr(tmp, tmp);
+ __ testptr(obj, obj);
__ jccb(Assembler::notZero, update);
if (!TypeEntries::was_null_seen(current_klass)) {
+ __ testptr(mdo_addr, TypeEntries::null_seen);
+#ifndef ASSERT
+ __ jccb(Assembler::notZero, next); // already set
+#else
+ __ jcc(Assembler::notZero, next); // already set
+#endif
+ // atomic update to prevent overwriting Klass* with 0
+ __ lock();
__ orptr(mdo_addr, TypeEntries::null_seen);
}
if (do_update) {
@@ -3652,7 +3677,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ jmp(next);
}
} else {
- __ testptr(tmp, tmp);
+ __ testptr(obj, obj);
__ jcc(Assembler::notZero, update);
__ stop("unexpected null obj");
#endif
@@ -3664,7 +3689,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#ifdef ASSERT
if (exact_klass != nullptr) {
Label ok;
- __ load_klass(tmp, tmp, tmp_load_klass);
+ __ load_klass(tmp, obj, tmp_load_klass);
__ push(tmp);
__ mov_metadata(tmp, exact_klass->constant_encoding());
__ cmpptr(tmp, Address(rsp, 0));
@@ -3679,9 +3704,11 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (exact_klass != nullptr) {
__ mov_metadata(tmp, exact_klass->constant_encoding());
} else {
- __ load_klass(tmp, tmp, tmp_load_klass);
+ __ load_klass(tmp, obj, tmp_load_klass);
}
-
+#ifdef _LP64
+ __ mov(rscratch1, tmp); // save original value before XOR
+#endif
__ xorptr(tmp, mdo_addr);
__ testptr(tmp, TypeEntries::type_klass_mask);
// klass seen before, nothing to do. The unknown bit may have been
@@ -3692,23 +3719,23 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
if (TypeEntries::is_type_none(current_klass)) {
- __ cmpptr(mdo_addr, 0);
- __ jccb(Assembler::equal, none);
- __ cmpptr(mdo_addr, TypeEntries::null_seen);
- __ jccb(Assembler::equal, none);
+ __ testptr(mdo_addr, TypeEntries::type_mask);
+ __ jccb(Assembler::zero, none);
+#ifdef _LP64
// There is a chance that the checks above (re-reading profiling
// data from memory) fail if another thread has just set the
// profiling to this obj's klass
+ __ mov(tmp, rscratch1); // get back original value before XOR
__ xorptr(tmp, mdo_addr);
__ testptr(tmp, TypeEntries::type_klass_mask);
__ jccb(Assembler::zero, next);
+#endif
}
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
- __ movptr(tmp, mdo_addr);
- __ testptr(tmp, TypeEntries::type_unknown);
+ __ testptr(mdo_addr, TypeEntries::type_unknown);
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
}
@@ -3721,6 +3748,10 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ bind(none);
// first time here. Set profile type.
__ movptr(mdo_addr, tmp);
+#ifdef ASSERT
+ __ andptr(tmp, TypeEntries::type_klass_mask);
+ __ verify_klass_ptr(tmp);
+#endif
}
} else {
// There's a single possible klass at this profile point
@@ -3735,10 +3766,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
{
Label ok;
__ push(tmp);
- __ cmpptr(mdo_addr, 0);
- __ jcc(Assembler::equal, ok);
- __ cmpptr(mdo_addr, TypeEntries::null_seen);
- __ jcc(Assembler::equal, ok);
+ __ testptr(mdo_addr, TypeEntries::type_mask);
+ __ jcc(Assembler::zero, ok);
// may have been set by another thread
__ mov_metadata(tmp, exact_klass->constant_encoding());
__ xorptr(tmp, mdo_addr);
@@ -3754,20 +3783,22 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#endif
// first time here. Set profile type.
__ movptr(mdo_addr, tmp);
+#ifdef ASSERT
+ __ andptr(tmp, TypeEntries::type_klass_mask);
+ __ verify_klass_ptr(tmp);
+#endif
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
- __ movptr(tmp, mdo_addr);
- __ testptr(tmp, TypeEntries::type_unknown);
+ __ testptr(mdo_addr, TypeEntries::type_unknown);
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
__ orptr(mdo_addr, TypeEntries::type_unknown);
}
}
-
- __ bind(next);
}
+ __ bind(next);
}
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
index c760722187a38..ce9133986178c 100644
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
@@ -69,7 +69,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
const Register thread = disp_hdr;
get_thread(thread);
#endif
- fast_lock_impl(obj, hdr, thread, tmp, slow_case);
+ lightweight_lock(obj, hdr, thread, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// and mark it as unlocked
@@ -135,7 +135,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
if (LockingMode == LM_LIGHTWEIGHT) {
movptr(disp_hdr, Address(obj, hdr_offset));
andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place);
- fast_unlock_impl(obj, disp_hdr, hdr, slow_case);
+ lightweight_unlock(obj, disp_hdr, hdr, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
index 7dd83bcc7a5bf..8b56f464f2739 100644
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
@@ -798,6 +798,14 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
const Register handler_addr = rbx;
const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+ if (AbortVMOnException) {
+ __ enter();
+ save_live_registers(sasm, 2);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, check_abort_on_vm_exception), rax);
+ restore_live_registers(sasm);
+ __ leave();
+ }
+
// verify that only rax, is valid at this time
__ invalidate_registers(false, true, true, true, true, true);
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 0dc02d9f07d8b..84a1f18951803 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -621,7 +621,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
movptr(Address(boxReg, 0), tmpReg);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
- fast_lock_impl(objReg, tmpReg, thread, scrReg, NO_COUNT);
+ lightweight_lock(objReg, tmpReg, thread, scrReg, NO_COUNT);
jmp(COUNT);
}
jmp(DONE_LABEL);
@@ -925,7 +925,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
bind (Stacked);
if (LockingMode == LM_LIGHTWEIGHT) {
mov(boxReg, tmpReg);
- fast_unlock_impl(objReg, boxReg, tmpReg, NO_COUNT);
+ lightweight_unlock(objReg, boxReg, tmpReg, NO_COUNT);
jmp(COUNT);
} else if (LockingMode == LM_LEGACY) {
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp
index b6e88c969f2ed..7e4b24e097d5e 100644
--- a/src/hotspot/cpu/x86/frame_x86.cpp
+++ b/src/hotspot/cpu/x86/frame_x86.cpp
@@ -496,7 +496,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// do some validation of frame elements
// first the method
- Method* m = *interpreter_frame_method_addr();
+ Method* m = safe_interpreter_frame_method();
// validate the method we'd find in this potential sender
if (!Method::is_valid_method(m)) return false;
diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp
index fc5056d2d9383..0f1c808b27be8 100644
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp
@@ -51,15 +51,28 @@ void InterpreterMacroAssembler::jump_to_entry(address entry) {
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
Label update, next, none;
+#ifdef _LP64
+ assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
+#else
+ assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
+#endif
+
interp_verify_oop(obj, atos);
testptr(obj, obj);
jccb(Assembler::notZero, update);
+ testptr(mdo_addr, TypeEntries::null_seen);
+ jccb(Assembler::notZero, next); // null already seen. Nothing to do anymore.
+ // atomic update to prevent overwriting Klass* with 0
+ lock();
orptr(mdo_addr, TypeEntries::null_seen);
jmpb(next);
bind(update);
load_klass(obj, obj, rscratch1);
+#ifdef _LP64
+ mov(rscratch1, obj);
+#endif
xorptr(obj, mdo_addr);
testptr(obj, TypeEntries::type_klass_mask);
@@ -74,12 +87,15 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
jccb(Assembler::equal, none);
cmpptr(mdo_addr, TypeEntries::null_seen);
jccb(Assembler::equal, none);
+#ifdef _LP64
// There is a chance that the checks above (re-reading profiling
// data from memory) fail if another thread has just set the
// profiling to this obj's klass
+ mov(obj, rscratch1);
xorptr(obj, mdo_addr);
testptr(obj, TypeEntries::type_klass_mask);
jccb(Assembler::zero, next);
+#endif
// different than before. Cannot keep accurate profile.
orptr(mdo_addr, TypeEntries::type_unknown);
@@ -88,6 +104,10 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
bind(none);
// first time here. Set profile type.
movptr(mdo_addr, obj);
+#ifdef ASSERT
+ andptr(obj, TypeEntries::type_klass_mask);
+ verify_klass_ptr(obj);
+#endif
bind(next);
}
@@ -1074,7 +1094,7 @@ void InterpreterMacroAssembler::remove_activation(
// Check that all monitors are unlocked
{
Label loop, exception, entry, restart;
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
const Address monitor_block_top(
rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
@@ -1150,6 +1170,8 @@ void InterpreterMacroAssembler::remove_activation(
NOT_LP64(get_thread(rthread);)
+ // check if already enabled - if so no re-enabling needed
+ assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
jcc(Assembler::equal, no_reserved_zone_enabling);
@@ -1232,7 +1254,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
#endif
// Load object header, prepare for CAS from unlocked to locked.
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- fast_lock_impl(obj_reg, swap_reg, thread, tmp_reg, slow_case);
+ lightweight_lock(obj_reg, swap_reg, thread, tmp_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load immediate 1 into swap_reg %rax
movl(swap_reg, 1);
@@ -1362,7 +1384,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Try to swing header from locked to unlocked.
movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- fast_unlock_impl(obj_reg, swap_reg, header_reg, slow_case);
+ lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
movptr(header_reg, Address(swap_reg,
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index da3bc94f30478..101d8eb0ace04 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -9801,7 +9801,7 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
bind(L_stack_ok);
}
-// Implements fast-locking.
+// Implements lightweight-locking.
// Branches to slow upon failure to lock the object, with ZF cleared.
// Falls through upon success with unspecified ZF.
//
@@ -9809,7 +9809,7 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
// hdr: the (pre-loaded) header of the object, must be rax
// thread: the thread which attempts to lock obj
// tmp: a temporary register
-void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
+void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
assert(hdr == rax, "header must be in rax for cmpxchg");
assert_different_registers(obj, hdr, thread, tmp);
@@ -9837,14 +9837,14 @@ void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread,
movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp);
}
-// Implements fast-unlocking.
+// Implements lightweight-unlocking.
// Branches to slow upon failure, with ZF cleared.
// Falls through upon success, with unspecified ZF.
//
// obj: the object to be unlocked
// hdr: the (pre-loaded) header of the object, must be rax
// tmp: a temporary register
-void MacroAssembler::fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow) {
+void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) {
assert(hdr == rax, "header must be in rax for cmpxchg");
assert_different_registers(obj, hdr, tmp);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index 61db66ae00f39..7eb037e6fe26e 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -876,6 +876,7 @@ class MacroAssembler: public Assembler {
void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
+ void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
void testptr(Register src1, Register src2);
void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
@@ -2022,8 +2023,8 @@ class MacroAssembler: public Assembler {
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
- void fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
- void fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow);
+ void lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
+ void lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow);
};
/**
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
index 7995ed5b265bd..c391349cfa323 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
@@ -1717,7 +1717,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
// Load object header
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ fast_lock_impl(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
+ __ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
}
__ bind(count_mon);
__ inc_held_monitor_count();
@@ -1876,7 +1876,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 20495135c642e..0712ba50c078a 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -2187,7 +2187,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
// Load object header
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ fast_lock_impl(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
+ __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
}
__ bind(count_mon);
__ inc_held_monitor_count();
@@ -2331,7 +2331,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
index 014737322d81b..2a52fa200c94e 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2324,7 +2324,7 @@ address StubGenerator::generate_base64_decodeBlock() {
const Register isURL = c_rarg5;// Base64 or URL character set
__ movl(isMIME, Address(rbp, 2 * wordSize));
#else
- const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64
+ const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64
const Address isURL_mem(rbp, 7 * wordSize);
const Register isURL = r10; // pick the volatile windows register
const Register dp = r12;
@@ -2546,10 +2546,12 @@ address StubGenerator::generate_base64_decodeBlock() {
// output_size in r13
// Strip pad characters, if any, and adjust length and mask
+ __ addq(length, start_offset);
__ cmpb(Address(source, length, Address::times_1, -1), '=');
__ jcc(Assembler::equal, L_padding);
__ BIND(L_donePadding);
+ __ subq(length, start_offset);
// Output size is (64 - output_size), output mask is (all 1s >> output_size).
__ kmovql(input_mask, rax);
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp
index bb8e8f6b1eac8..84f5cc80b0d3a 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp
@@ -330,6 +330,7 @@ address StubGenerator::generate_updateBytesAdler32() {
__ movq(r13, xtmp4);
__ movq(r12, xtmp3);
+ __ vzeroupper();
__ leave();
__ ret(0);
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp
index f94297ce82d73..47354f4fc7cf7 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp
@@ -291,6 +291,9 @@ address StubGenerator::generate_chacha20Block_avx() {
// registers. That length should be returned through %rax.
__ mov64(rax, outlen);
+ if (outlen == 256) {
+ __ vzeroupper();
+ }
__ leave();
__ ret(0);
return start;
@@ -460,6 +463,7 @@ address StubGenerator::generate_chacha20Block_avx512() {
// and that length should be returned through %rax.
__ mov64(rax, 1024);
+ __ vzeroupper();
__ leave();
__ ret(0);
return start;
diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp
index 48f5b860faf4c..817baea61746f 100644
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp
@@ -477,7 +477,7 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// monitor entry size: see picture of stack in frame_x86.hpp
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
// total overhead size: entry_size + (saved rbp through expr stack
// bottom). be sure to change this if you add/subtract anything
@@ -566,7 +566,7 @@ void TemplateInterpreterGenerator::lock_method() {
const Address monitor_block_top(
rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
#ifdef ASSERT
{
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index d56cc67bff9f7..62d667abe0d19 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -4319,7 +4319,7 @@ void TemplateTable::monitorenter() {
rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
Label allocated;
@@ -4416,7 +4416,7 @@ void TemplateTable::monitorexit() {
rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const Address monitor_block_bot(
rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
- const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const int entry_size = frame::interpreter_frame_monitor_size_in_bytes();
Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index b4e9e721b5ad0..1f9238c60cba1 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -1124,6 +1124,7 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
}
+#ifdef _LP64
// ChaCha20 Intrinsics
// As long as the system supports AVX as a baseline we can do a
// SIMD-enabled block function. StubGenerator makes the determination
@@ -1139,6 +1140,13 @@ void VM_Version::get_processor_features() {
}
FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
}
+#else
+ // No support currently for ChaCha20 intrinsics on 32-bit platforms
+ if (UseChaCha20Intrinsics) {
+ warning("ChaCha20 intrinsics are not available on this CPU.");
+ FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
+ }
+#endif // _LP64
// Base64 Intrinsics (Check the condition for which the intrinsic will be active)
if (UseAVX >= 2) {
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index 55f465bf76b31..1a37e3124d573 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -8985,9 +8985,9 @@ instruct vmask_cmp_node(rRegI dst, vec src1, vec src2, kReg mask, kReg ktmp1, kR
%}
-instruct vmask_gen(kReg dst, rRegL len, rRegL temp) %{
+instruct vmask_gen(kReg dst, rRegL len, rRegL temp, rFlagsReg cr) %{
match(Set dst (VectorMaskGen len));
- effect(TEMP temp);
+ effect(TEMP temp, KILL cr);
format %{ "vector_mask_gen32 $dst, $len \t! vector mask generator" %}
ins_encode %{
__ genmask($dst$$KRegister, $len$$Register, $temp$$Register);
diff --git a/src/hotspot/cpu/zero/frame_zero.cpp b/src/hotspot/cpu/zero/frame_zero.cpp
index 923d3082b25e7..5ddd23a9d59ef 100644
--- a/src/hotspot/cpu/zero/frame_zero.cpp
+++ b/src/hotspot/cpu/zero/frame_zero.cpp
@@ -82,7 +82,6 @@ BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
return get_interpreterState()->monitor_base();
}
-// Pointer beyond the "oldest/deepest" BasicObjectLock on stack.
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
return (BasicObjectLock*) get_interpreterState()->stack_base();
}
diff --git a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp
index b8f01f4045486..ca11d106c2618 100644
--- a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp
+++ b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp
@@ -30,6 +30,8 @@
#define SUPPORTS_NATIVE_CX8
#endif
+#define DEFAULT_CACHE_LINE_SIZE 64
+
#define SUPPORT_MONITOR_COUNT
#ifdef __APPLE__
diff --git a/src/hotspot/cpu/zero/vm_version_zero.cpp b/src/hotspot/cpu/zero/vm_version_zero.cpp
index 3d17e159a619c..a99885a533df8 100644
--- a/src/hotspot/cpu/zero/vm_version_zero.cpp
+++ b/src/hotspot/cpu/zero/vm_version_zero.cpp
@@ -137,6 +137,12 @@ void VM_Version::initialize() {
#ifdef ASSERT
UNSUPPORTED_OPTION(CountCompiledCalls);
#endif
+
+ // Supports 8-byte cmpxchg with compiler built-ins.
+ // These built-ins are supposed to be implemented on
+ // all platforms (even if not natively), so we claim
+ // the support unconditionally.
+ _supports_cx8 = true;
}
void VM_Version::initialize_cpu_information(void) {
diff --git a/src/hotspot/os/aix/globals_aix.hpp b/src/hotspot/os/aix/globals_aix.hpp
index a047e79b695fa..fb353348a5364 100644
--- a/src/hotspot/os/aix/globals_aix.hpp
+++ b/src/hotspot/os/aix/globals_aix.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,11 +49,12 @@
"Allow VM to run with EXTSHM=ON.") \
\
/* Maximum expected size of the data segment. That correlates with the */ \
- /* to the maximum C Heap consumption we expect. */ \
- /* We need to know this because we need to leave "breathing space" for the */ \
- /* data segment when placing the java heap. If that space is too small, we */ \
- /* reduce our chance of getting a low heap address (needed for compressed */ \
- /* Oops). */ \
+ /* maximum C Heap consumption we expect. */ \
+ /* We need to leave "breathing space" for the data segment when */ \
+ /* placing the java heap. If the MaxExpectedDataSegmentSize setting */ \
+ /* is too small, we might run into resource issues creating many native */ \
+ /* threads, if it is too large, we reduce our chance of getting a low heap */ \
+ /* address (needed for compressed Oops). */ \
product(uintx, MaxExpectedDataSegmentSize, 8*G, \
"Maximum expected Data Segment Size.") \
\
diff --git a/src/hotspot/os/aix/libodm_aix.cpp b/src/hotspot/os/aix/libodm_aix.cpp
index db8e8a5d96009..9fe0fb7abd842 100644
--- a/src/hotspot/os/aix/libodm_aix.cpp
+++ b/src/hotspot/os/aix/libodm_aix.cpp
@@ -29,13 +29,16 @@
#include
#include
#include "runtime/arguments.hpp"
+#include "runtime/os.hpp"
dynamicOdm::dynamicOdm() {
- const char *libodmname = "/usr/lib/libodm.a(shr_64.o)";
- _libhandle = dlopen(libodmname, RTLD_MEMBER | RTLD_NOW);
+ const char* libodmname = "/usr/lib/libodm.a(shr_64.o)";
+ char ebuf[512];
+ _libhandle = os::dll_load(libodmname, ebuf, sizeof(ebuf));
+
if (!_libhandle) {
- trcVerbose("Couldn't open %s", libodmname);
+ trcVerbose("Cannot load %s (error %s)", libodmname, ebuf);
return;
}
_odm_initialize = (fun_odm_initialize )dlsym(_libhandle, "odm_initialize" );
@@ -45,14 +48,14 @@ dynamicOdm::dynamicOdm() {
_odm_terminate = (fun_odm_terminate )dlsym(_libhandle, "odm_terminate" );
if (!_odm_initialize || !_odm_set_path || !_odm_mount_class || !_odm_get_obj || !_odm_terminate) {
trcVerbose("Couldn't find all required odm symbols from %s", libodmname);
- dlclose(_libhandle);
+ os::dll_unload(_libhandle);
_libhandle = nullptr;
return;
}
}
dynamicOdm::~dynamicOdm() {
- if (_libhandle) { dlclose(_libhandle); }
+ if (_libhandle) { os::dll_unload(_libhandle); }
}
diff --git a/src/hotspot/os/aix/libperfstat_aix.cpp b/src/hotspot/os/aix/libperfstat_aix.cpp
index 79b8f09cc65ec..f547b4c78e77c 100644
--- a/src/hotspot/os/aix/libperfstat_aix.cpp
+++ b/src/hotspot/os/aix/libperfstat_aix.cpp
@@ -26,6 +26,7 @@
#include "libperfstat_aix.hpp"
#include "misc_aix.hpp"
+#include "runtime/os.hpp"
#include
@@ -71,11 +72,11 @@ static fun_perfstat_reset_t g_fun_perfstat_reset = nullptr;
static fun_wpar_getcid_t g_fun_wpar_getcid = nullptr;
bool libperfstat::init() {
-
- // Dynamically load the libperfstat porting library.
- g_libhandle = dlopen("/usr/lib/libperfstat.a(shr_64.o)", RTLD_MEMBER | RTLD_NOW);
+ const char* libperfstat = "/usr/lib/libperfstat.a(shr_64.o)";
+ char ebuf[512];
+ g_libhandle = os::dll_load(libperfstat, ebuf, sizeof(ebuf));
if (!g_libhandle) {
- trcVerbose("Cannot load libperfstat.a (dlerror: %s)", dlerror());
+ trcVerbose("Cannot load %s (error: %s)", libperfstat, ebuf);
return false;
}
@@ -113,7 +114,7 @@ bool libperfstat::init() {
void libperfstat::cleanup() {
if (g_libhandle) {
- dlclose(g_libhandle);
+ os::dll_unload(g_libhandle);
g_libhandle = nullptr;
}
diff --git a/src/hotspot/os/aix/loadlib_aix.cpp b/src/hotspot/os/aix/loadlib_aix.cpp
index 3a71a78e45cea..bc71ca2e3101b 100644
--- a/src/hotspot/os/aix/loadlib_aix.cpp
+++ b/src/hotspot/os/aix/loadlib_aix.cpp
@@ -225,6 +225,7 @@ static bool reload_table() {
lm->path = g_stringlist.add(ldi->ldinfo_filename);
if (!lm->path) {
trcVerbose("OOM.");
+ free(lm);
goto cleanup;
}
@@ -246,6 +247,7 @@ static bool reload_table() {
lm->member = g_stringlist.add(p_mbr_name);
if (!lm->member) {
trcVerbose("OOM.");
+ free(lm);
goto cleanup;
}
} else {
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index 91a9bbcff7137..3321c32a68791 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -828,7 +828,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%d=%s) for attributes: %s.",
thread->name(), ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
// Log some OS information which might explain why creating the thread failed.
- log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
+ log_warning(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
+ log_warning(os, thread)("Checking JVM parameter MaxExpectedDataSegmentSize (currently " SIZE_FORMAT "k) might be helpful", MaxExpectedDataSegmentSize/K);
LogStream st(Log(os, thread)::info());
os::Posix::print_rlimit_info(&st);
os::print_memory_info(&st);
@@ -1010,6 +1011,10 @@ int os::current_process_id() {
// directory not the java application's temp directory, ala java.io.tmpdir.
const char* os::get_temp_directory() { return "/tmp"; }
+void os::prepare_native_symbols() {
+ LoadedLibraries::reload();
+}
+
// Check if addr is inside libjvm.so.
bool os::address_is_in_vm(address addr) {
@@ -1097,8 +1102,6 @@ bool os::dll_address_to_library_name(address addr, char* buf,
return true;
}
-// Loads .dll/.so and in case of error it checks if .dll/.so was built
-// for the same architecture as Hotspot is running on.
void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
log_info(os)("attempting shared library load of %s", filename);
@@ -1109,12 +1112,25 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
if (!filename || strlen(filename) == 0) {
- ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
+ if (ebuf != nullptr && ebuflen > 0) {
+ ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
+ }
return nullptr;
}
- // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
- void * result= ::dlopen(filename, RTLD_LAZY);
+ // RTLD_LAZY has currently the same behavior as RTLD_NOW
+ // The dl is loaded immediately with all its dependants.
+ int dflags = RTLD_LAZY;
+ // check for filename ending with ')', it indicates we want to load
+ // a MEMBER module that is a member of an archive.
+ int flen = strlen(filename);
+ if (flen > 0 && filename[flen - 1] == ')') {
+ dflags |= RTLD_MEMBER;
+ }
+
+ void* result;
+ const char* error_report = nullptr;
+ result = Aix_dlopen(filename, dflags, &error_report);
if (result != nullptr) {
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
// Reload dll cache. Don't do this in signal handling.
@@ -1123,7 +1139,6 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
return result;
} else {
// error analysis when dlopen fails
- const char* error_report = ::dlerror();
if (error_report == nullptr) {
error_report = "dlerror returned no error description";
}
@@ -3008,3 +3023,4 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {}
void os::jfr_report_memory_info() {}
#endif // INCLUDE_JFR
+
diff --git a/src/hotspot/os/aix/os_aix.hpp b/src/hotspot/os/aix/os_aix.hpp
index 9756734efd217..a1db2b2be3cc7 100644
--- a/src/hotspot/os/aix/os_aix.hpp
+++ b/src/hotspot/os/aix/os_aix.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2013, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -174,6 +174,7 @@ class os::Aix {
static bool platform_print_native_stack(outputStream* st, const void* context, char *buf, int buf_size, address& lastpc);
static void* resolve_function_descriptor(void* p);
+
};
#endif // OS_AIX_OS_AIX_HPP
diff --git a/src/hotspot/os/aix/os_aix.inline.hpp b/src/hotspot/os/aix/os_aix.inline.hpp
index 5f7415e4a5181..f7e7ee8abc65a 100644
--- a/src/hotspot/os/aix/os_aix.inline.hpp
+++ b/src/hotspot/os/aix/os_aix.inline.hpp
@@ -52,7 +52,7 @@ inline bool os::must_commit_stack_guard_pages() {
inline void os::map_stack_shadow_pages(address sp) {
}
-// stubbed-out trim-native support
+// Trim-native support, stubbed out for now, may be enabled later
inline bool os::can_trim_native_heap() { return false; }
inline bool os::trim_native_heap(os::size_change_t* rss_change) { return false; }
diff --git a/src/hotspot/os/aix/os_perf_aix.cpp b/src/hotspot/os/aix/os_perf_aix.cpp
index bdb70250a76a9..f4e13374fd7c4 100644
--- a/src/hotspot/os/aix/os_perf_aix.cpp
+++ b/src/hotspot/os/aix/os_perf_aix.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2022, IBM Corp.
+ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, IBM Corp.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -87,6 +87,7 @@ static bool read_psinfo(const u_longlong_t& pid, psinfo_t& psinfo) {
}
len = fread(&psinfo, 1, sizeof(psinfo_t), fp);
+ fclose(fp);
return len == sizeof(psinfo_t);
}
diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp
index ab84dc8102770..68233097b4957 100644
--- a/src/hotspot/os/aix/porting_aix.cpp
+++ b/src/hotspot/os/aix/porting_aix.cpp
@@ -21,6 +21,12 @@
* questions.
*
*/
+// needs to be defined first, so that the implicit loaded xcoff.h header defines
+// the right structures to analyze the loader header of 64 Bit executable files
+// this is needed for rtv_linkedin_libpath() to get the linked (burned) in library
+// search path of an XCOFF executable
+#define __XCOFF64__
+#include
#include "asm/assembler.hpp"
#include "compiler/disassembler.hpp"
@@ -891,3 +897,275 @@ bool AixMisc::query_stack_bounds_for_current_thread(stackbounds_t* out) {
return true;
}
+
+// variables needed to emulate linux behavior in os::dll_load() if library is loaded twice
+static pthread_mutex_t g_handletable_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+struct TableLocker {
+ TableLocker() { pthread_mutex_lock(&g_handletable_mutex); }
+ ~TableLocker() { pthread_mutex_unlock(&g_handletable_mutex); }
+};
+struct handletableentry{
+ void* handle;
+ ino64_t inode;
+ dev64_t devid;
+ uint refcount;
+};
+constexpr unsigned init_num_handles = 128;
+static unsigned max_handletable = 0;
+static unsigned g_handletable_used = 0;
+// We start with an empty array. At first use we will dynamically allocate memory for 128 entries.
+// If this table is full we dynamically reallocate a memory reagion of double size, and so on.
+static struct handletableentry* p_handletable = nullptr;
+
+// get the library search path burned in to the executable file during linking
+// If the libpath cannot be retrieved return an empty path
+static const char* rtv_linkedin_libpath() {
+ constexpr int bufsize = 4096;
+ static char buffer[bufsize];
+ static const char* libpath = 0;
+
+ // we only try to retrieve the libpath once. After that try we
+ // let libpath point to buffer, which then contains a valid libpath
+ // or an empty string
+ if (libpath != nullptr) {
+ return libpath;
+ }
+
+ // retrieve the path to the currently running executable binary
+ // to open it
+ snprintf(buffer, 100, "/proc/%ld/object/a.out", (long)getpid());
+ FILE* f = nullptr;
+ struct xcoffhdr the_xcoff;
+ struct scnhdr the_scn;
+ struct ldhdr the_ldr;
+ constexpr size_t xcoffsz = FILHSZ + _AOUTHSZ_EXEC;
+ STATIC_ASSERT(sizeof(the_xcoff) == xcoffsz);
+ STATIC_ASSERT(sizeof(the_scn) == SCNHSZ);
+ STATIC_ASSERT(sizeof(the_ldr) == LDHDRSZ);
+ // read the generic XCOFF header and analyze the substructures
+ // to find the burned in libpath. In any case of error perform the assert
+ if (nullptr == (f = fopen(buffer, "r")) ||
+ xcoffsz != fread(&the_xcoff, 1, xcoffsz, f) ||
+ the_xcoff.filehdr.f_magic != U64_TOCMAGIC ||
+ 0 != fseek(f, (FILHSZ + the_xcoff.filehdr.f_opthdr + (the_xcoff.aouthdr.o_snloader -1)*SCNHSZ), SEEK_SET) ||
+ SCNHSZ != fread(&the_scn, 1, SCNHSZ, f) ||
+ 0 != strcmp(the_scn.s_name, ".loader") ||
+ 0 != fseek(f, the_scn.s_scnptr, SEEK_SET) ||
+ LDHDRSZ != fread(&the_ldr, 1, LDHDRSZ, f) ||
+ 0 != fseek(f, the_scn.s_scnptr + the_ldr.l_impoff, SEEK_SET) ||
+ 0 == fread(buffer, 1, bufsize, f)) {
+ buffer[0] = 0;
+ assert(false, "could not retrieve burned in library path from executables loader section");
+ }
+
+ if (f) {
+ fclose(f);
+ }
+ libpath = buffer;
+
+ return libpath;
+}
+
+// Simulate the library search algorithm of dlopen() (in os::dll_load)
+static bool search_file_in_LIBPATH(const char* path, struct stat64x* stat) {
+ if (path == nullptr)
+ return false;
+
+ char* path2 = os::strdup(path);
+ // if exist, strip off trailing (shr_64.o) or similar
+ char* substr;
+ if (path2[strlen(path2) - 1] == ')' && (substr = strrchr(path2, '('))) {
+ *substr = 0;
+ }
+
+ bool ret = false;
+ // If FilePath contains a slash character, FilePath is used directly,
+ // and no directories are searched.
+ // But if FilePath does not start with / or . we have to prepend it with ./
+ if (strchr(path2, '/')) {
+ stringStream combined;
+ if (*path2 == '/' || *path2 == '.') {
+ combined.print("%s", path2);
+ } else {
+ combined.print("./%s", path2);
+ }
+ ret = (0 == stat64x(combined.base(), stat));
+ os::free(path2);
+ return ret;
+ }
+
+ const char* env = getenv("LIBPATH");
+ if (env == nullptr) {
+ // no LIBPATH, try with LD_LIBRARY_PATH
+ env = getenv("LD_LIBRARY_PATH");
+ }
+
+ stringStream Libpath;
+ if (env == nullptr) {
+ // no LIBPATH or LD_LIBRARY_PATH given -> try only with burned in libpath
+ Libpath.print("%s", rtv_linkedin_libpath());
+ } else if (*env == 0) {
+ // LIBPATH or LD_LIBRARY_PATH given but empty -> try first with burned
+ // in libpath and with current working directory second
+ Libpath.print("%s:.", rtv_linkedin_libpath());
+ } else {
+ // LIBPATH or LD_LIBRARY_PATH given with content -> try first with
+ // LIBPATH or LD_LIBRARY_PATH and second with burned in libpath.
+ // No check against current working directory
+ Libpath.print("%s:%s", env, rtv_linkedin_libpath());
+ }
+
+ char* libpath = os::strdup(Libpath.base());
+
+ char *saveptr, *token;
+ for (token = strtok_r(libpath, ":", &saveptr); token != nullptr; token = strtok_r(nullptr, ":", &saveptr)) {
+ stringStream combined;
+ combined.print("%s/%s", token, path2);
+ if ((ret = (0 == stat64x(combined.base(), stat))))
+ break;
+ }
+
+ os::free(libpath);
+ os::free(path2);
+ return ret;
+}
+
+// specific AIX versions for ::dlopen() and ::dlclose(), which handles the struct g_handletable
+// This way we mimic dl handle equality for a library
+// opened a second time, as it is implemented on other platforms.
+void* Aix_dlopen(const char* filename, int Flags, const char** error_report) {
+ assert(error_report != nullptr, "error_report is nullptr");
+ void* result;
+ struct stat64x libstat;
+
+ if (false == search_file_in_LIBPATH(filename, &libstat)) {
+ // file with filename does not exist
+ #ifdef ASSERT
+ result = ::dlopen(filename, Flags);
+ assert(result == nullptr, "dll_load: Could not stat() file %s, but dlopen() worked; Have to improve stat()", filename);
+ #endif
+ *error_report = "Could not load module .\nSystem error: No such file or directory";
+ return nullptr;
+ }
+ else {
+ unsigned i = 0;
+ TableLocker lock;
+ // check if library belonging to filename is already loaded.
+ // If yes use stored handle from previous ::dlopen() and increase refcount
+ for (i = 0; i < g_handletable_used; i++) {
+ if ((p_handletable + i)->handle &&
+ (p_handletable + i)->inode == libstat.st_ino &&
+ (p_handletable + i)->devid == libstat.st_dev) {
+ (p_handletable + i)->refcount++;
+ result = (p_handletable + i)->handle;
+ break;
+ }
+ }
+ if (i == g_handletable_used) {
+ // library not yet loaded. Check if there is space left in array
+ // to store new ::dlopen() handle
+ if (g_handletable_used == max_handletable) {
+ // No place in array anymore; increase array.
+ unsigned new_max = MAX2(max_handletable * 2, init_num_handles);
+ struct handletableentry* new_tab = (struct handletableentry*)::realloc(p_handletable, new_max * sizeof(struct handletableentry));
+ assert(new_tab != nullptr, "no more memory for handletable");
+ if (new_tab == nullptr) {
+ *error_report = "dlopen: no more memory for handletable";
+ return nullptr;
+ }
+ max_handletable = new_max;
+ p_handletable = new_tab;
+ }
+ // Library not yet loaded; load it, then store its handle in handle table
+ result = ::dlopen(filename, Flags);
+ if (result != nullptr) {
+ g_handletable_used++;
+ (p_handletable + i)->handle = result;
+ (p_handletable + i)->inode = libstat.st_ino;
+ (p_handletable + i)->devid = libstat.st_dev;
+ (p_handletable + i)->refcount = 1;
+ }
+ else {
+ // error analysis when dlopen fails
+ *error_report = ::dlerror();
+ if (*error_report == nullptr) {
+ *error_report = "dlerror returned no error description";
+ }
+ }
+ }
+ }
+ return result;
+}
+
+bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
+ unsigned i = 0;
+ bool res = false;
+
+ if (ebuf && ebuflen > 0) {
+ ebuf[0] = '\0';
+ ebuf[ebuflen - 1] = '\0';
+ }
+
+ {
+ TableLocker lock;
+ // try to find handle in array, which means library was loaded by os::dll_load() call
+ for (i = 0; i < g_handletable_used; i++) {
+ if ((p_handletable + i)->handle == libhandle) {
+ // handle found, decrease refcount
+ assert((p_handletable + i)->refcount > 0, "Sanity");
+ (p_handletable + i)->refcount--;
+ if ((p_handletable + i)->refcount > 0) {
+ // if refcount is still >0 then we have to keep library and just return true
+ return true;
+ }
+ // refcount == 0, so we have to ::dlclose() the lib
+ // and delete the entry from the array.
+ break;
+ }
+ }
+
+ // If we reach this point either the libhandle was found with refcount == 0, or the libhandle
+ // was not found in the array at all. In both cases we have to ::dlclose the lib and perform
+ // the error handling. In the first case we then also have to delete the entry from the array
+ // while in the second case we simply have to nag.
+ res = (0 == ::dlclose(libhandle));
+ if (!res) {
+ // error analysis when dlopen fails
+ const char* error_report = ::dlerror();
+ if (error_report == nullptr) {
+ error_report = "dlerror returned no error description";
+ }
+ if (ebuf != nullptr && ebuflen > 0) {
+ snprintf(ebuf, ebuflen - 1, "%s", error_report);
+ }
+ assert(false, "os::pd_dll_unload() ::dlclose() failed");
+ }
+
+ if (i < g_handletable_used) {
+ if (res) {
+ // First case: libhandle was found (with refcount == 0) and ::dlclose successful,
+ // so delete entry from array
+ g_handletable_used--;
+ // If the entry was the last one of the array, the previous g_handletable_used--
+ // is sufficient to remove the entry from the array, otherwise we move the last
+ // entry of the array to the place of the entry we want to remove and overwrite it
+ if (i < g_handletable_used) {
+ *(p_handletable + i) = *(p_handletable + g_handletable_used);
+ (p_handletable + g_handletable_used)->handle = nullptr;
+ }
+ }
+ }
+ else {
+ // Second case: libhandle was not found (library was not loaded by os::dll_load())
+ // therefore nag
+ assert(false, "os::pd_dll_unload() library was not loaded by os::dll_load()");
+ }
+ }
+
+ // Update the dll cache
+ LoadedLibraries::reload();
+
+ return res;
+} // end: os::pd_dll_unload()
+
diff --git a/src/hotspot/os/aix/porting_aix.hpp b/src/hotspot/os/aix/porting_aix.hpp
index 2c4c0e002a8fa..109eceee3fca5 100644
--- a/src/hotspot/os/aix/porting_aix.hpp
+++ b/src/hotspot/os/aix/porting_aix.hpp
@@ -115,4 +115,6 @@ class AixMisc {
};
+void* Aix_dlopen(const char* filename, int Flags, const char** error_report);
+
#endif // OS_AIX_PORTING_AIX_HPP
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 79ea61c253641..4a2922cb7283d 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -891,6 +891,9 @@ bool os::address_is_in_vm(address addr) {
return false;
}
+void os::prepare_native_symbols() {
+}
+
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset,
bool demangle) {
@@ -2485,3 +2488,25 @@ void os::jfr_report_memory_info() {
}
#endif // INCLUDE_JFR
+
+bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
+
+ if (ebuf && ebuflen > 0) {
+ ebuf[0] = '\0';
+ ebuf[ebuflen - 1] = '\0';
+ }
+
+ bool res = (0 == ::dlclose(libhandle));
+ if (!res) {
+ // error analysis when dlopen fails
+ const char* error_report = ::dlerror();
+ if (error_report == nullptr) {
+ error_report = "dlerror returned no error description";
+ }
+ if (ebuf != nullptr && ebuflen > 0) {
+ snprintf(ebuf, ebuflen - 1, "%s", error_report);
+ }
+ }
+
+ return res;
+} // end: os::pd_dll_unload()
diff --git a/src/hotspot/os/bsd/os_bsd.inline.hpp b/src/hotspot/os/bsd/os_bsd.inline.hpp
index f30ac61e463ff..2049b337118a3 100644
--- a/src/hotspot/os/bsd/os_bsd.inline.hpp
+++ b/src/hotspot/os/bsd/os_bsd.inline.hpp
@@ -55,7 +55,7 @@ inline bool os::must_commit_stack_guard_pages() {
inline void os::map_stack_shadow_pages(address sp) {
}
-// stubbed-out trim-native support
+// Trim-native support, stubbed out for now, may be enabled later
inline bool os::can_trim_native_heap() { return false; }
inline bool os::trim_native_heap(os::size_change_t* rss_change) { return false; }
diff --git a/src/hotspot/os/linux/attachListener_linux.cpp b/src/hotspot/os/linux/attachListener_linux.cpp
index 63c5bfef12594..715603d4bafc2 100644
--- a/src/hotspot/os/linux/attachListener_linux.cpp
+++ b/src/hotspot/os/linux/attachListener_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -183,6 +183,8 @@ int LinuxAttachListener::init() {
char initial_path[UNIX_PATH_MAX]; // socket file during setup
int listener; // listener socket (file descriptor)
+ static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
+
// register function to cleanup
if (!_atexit_registered) {
_atexit_registered = true;
@@ -445,14 +447,14 @@ AttachOperation* AttachListener::dequeue() {
void AttachListener::vm_start() {
char fn[UNIX_PATH_MAX];
- struct stat64 st;
+ struct stat st;
int ret;
int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
os::get_temp_directory(), os::current_process_id());
assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
- RESTARTABLE(::stat64(fn, &st), ret);
+ RESTARTABLE(::stat(fn, &st), ret);
if (ret == 0) {
ret = ::unlink(fn);
if (ret == -1) {
@@ -472,8 +474,8 @@ int AttachListener::pd_init() {
bool AttachListener::check_socket_file() {
int ret;
- struct stat64 st;
- ret = stat64(LinuxAttachListener::path(), &st);
+ struct stat st;
+ ret = stat(LinuxAttachListener::path(), &st);
if (ret == -1) { // need to restart attach listener.
log_debug(attach)("Socket file %s does not exist - Restart Attach Listener",
LinuxAttachListener::path());
@@ -512,14 +514,14 @@ bool AttachListener::is_init_trigger() {
}
char fn[PATH_MAX + 1];
int ret;
- struct stat64 st;
+ struct stat st;
os::snprintf_checked(fn, sizeof(fn), ".attach_pid%d", os::current_process_id());
- RESTARTABLE(::stat64(fn, &st), ret);
+ RESTARTABLE(::stat(fn, &st), ret);
if (ret == -1) {
log_trace(attach)("Failed to find attach file: %s, trying alternate", fn);
snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
os::get_temp_directory(), os::current_process_id());
- RESTARTABLE(::stat64(fn, &st), ret);
+ RESTARTABLE(::stat(fn, &st), ret);
if (ret == -1) {
log_debug(attach)("Failed to find attach file: %s", fn);
}
diff --git a/src/hotspot/os/linux/globals_linux.hpp b/src/hotspot/os/linux/globals_linux.hpp
index 9dc070233fee9..f0cbc9780b230 100644
--- a/src/hotspot/os/linux/globals_linux.hpp
+++ b/src/hotspot/os/linux/globals_linux.hpp
@@ -77,7 +77,28 @@
"Use CPU_ALLOC code path in os::active_processor_count ") \
\
product(bool, DumpPerfMapAtExit, false, DIAGNOSTIC, \
- "Write map file for Linux perf tool at exit")
+ "Write map file for Linux perf tool at exit") \
+ \
+ product(intx, TimerSlack, -1, EXPERIMENTAL, \
+ "Overrides the timer slack value to the given number of " \
+ "nanoseconds. Lower value provides more accurate " \
+ "high-precision timers, at the expense of (possibly) worse " \
+ "power efficiency. In current Linux, 0 means using the " \
+ "system-wide default, which would disable the override, but " \
+ "VM would still print the current timer slack values. Use -1 "\
+ "to disable both the override and the printouts." \
+ "See prctl(PR_SET_TIMERSLACK) for more info.") \
+ \
+ product(bool, THPStackMitigation, true, DIAGNOSTIC, \
+ "If THPs are unconditionally enabled on the system (mode " \
+ "\"always\"), the JVM will prevent THP from forming in " \
+ "thread stacks. When disabled, the absence of this mitigation"\
+ "allows THPs to form in thread stacks.") \
+ \
+ develop(bool, DelayThreadStartALot, false, \
+ "Artificially delay thread starts randomly for testing.") \
+ \
+
// end of RUNTIME_OS_FLAGS
diff --git a/src/hotspot/os/linux/hugepages.cpp b/src/hotspot/os/linux/hugepages.cpp
new file mode 100644
index 0000000000000..f9f9dd497c7b6
--- /dev/null
+++ b/src/hotspot/os/linux/hugepages.cpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2023, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "hugepages.hpp"
+
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+#include
+
+StaticHugePageSupport::StaticHugePageSupport() :
+ _initialized(false), _pagesizes(), _default_hugepage_size(SIZE_MAX), _inconsistent(false) {}
+
+os::PageSizes StaticHugePageSupport::pagesizes() const {
+ assert(_initialized, "Not initialized");
+ return _pagesizes;
+}
+
+size_t StaticHugePageSupport::default_hugepage_size() const {
+ assert(_initialized, "Not initialized");
+ return _default_hugepage_size;
+}
+
+// Scan /proc/meminfo and return value of Hugepagesize
+static size_t scan_default_hugepagesize() {
+ size_t pagesize = 0;
+
+ // large_page_size on Linux is used to round up heap size. x86 uses either
+ // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
+ // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
+ // page as large as 1G.
+ //
+ // Here we try to figure out page size by parsing /proc/meminfo and looking
+ // for a line with the following format:
+ // Hugepagesize: 2048 kB
+ //
+ // If we can't determine the value (e.g. /proc is not mounted, or the text
+ // format has been changed), we'll set largest page size to 0
+
+ FILE *fp = os::fopen("/proc/meminfo", "r");
+ if (fp) {
+ while (!feof(fp)) {
+ int x = 0;
+ char buf[16];
+ if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
+ if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
+ pagesize = x * K;
+ break;
+ }
+ } else {
+ // skip to next line
+ for (;;) {
+ int ch = fgetc(fp);
+ if (ch == EOF || ch == (int)'\n') break;
+ }
+ }
+ }
+ fclose(fp);
+ }
+
+ return pagesize;
+}
+
+// Given a file that contains a single (integral) number, return that number in (*out) and true;
+// in case of an error, return false.
+static bool read_number_file(const char* file, size_t* out) {
+ FILE* f = ::fopen(file, "r");
+ bool rc = false;
+ if (f != nullptr) {
+ uint64_t i = 0;
+ if (::fscanf(f, SIZE_FORMAT, out) == 1) {
+ rc = true;
+ }
+ ::fclose(f);
+ }
+ return rc;
+}
+
+static const char* const sys_hugepages = "/sys/kernel/mm/hugepages";
+
+// Scan all directories in /sys/kernel/mm/hugepages/hugepages-xxxx
+// to discover the available page sizes
+static os::PageSizes scan_hugepages() {
+
+ os::PageSizes pagesizes;
+
+ DIR* dir = opendir(sys_hugepages);
+
+ if (dir != nullptr) {
+ struct dirent *entry;
+ size_t pagesize;
+ while ((entry = readdir(dir)) != nullptr) {
+ if (entry->d_type == DT_DIR &&
+ sscanf(entry->d_name, "hugepages-%zukB", &pagesize) == 1) {
+ // The kernel is using kB, hotspot uses bytes
+ // Add each found Large Page Size to page_sizes
+ pagesize *= K;
+ pagesizes.add(pagesize);
+ }
+ }
+ closedir(dir);
+ }
+
+ return pagesizes;
+}
+
+void StaticHugePageSupport::print_on(outputStream* os) {
+ if (_initialized) {
+ os->print_cr("Static hugepage support:");
+ for (size_t s = _pagesizes.smallest(); s != 0; s = _pagesizes.next_larger(s)) {
+ os->print_cr(" hugepage size: " EXACTFMT, EXACTFMTARGS(s));
+ }
+ os->print_cr(" default hugepage size: " EXACTFMT, EXACTFMTARGS(_default_hugepage_size));
+ } else {
+ os->print_cr(" unknown.");
+ }
+ if (_inconsistent) {
+ os->print_cr(" Support inconsistent. JVM will not use static hugepages.");
+ }
+}
+
+void StaticHugePageSupport::scan_os() {
+ _default_hugepage_size = scan_default_hugepagesize();
+ if (_default_hugepage_size > 0) {
+ _pagesizes = scan_hugepages();
+ // See https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt: /proc/meminfo should match
+ // /sys/kernel/mm/hugepages/hugepages-xxxx. However, we may run on a broken kernel (e.g. on WSL)
+ // that only exposes /proc/meminfo but not /sys/kernel/mm/hugepages. In that case, we are not
+ // sure about the state of hugepage support by the kernel, so we won't use static hugepages.
+ if (!_pagesizes.contains(_default_hugepage_size)) {
+ log_info(pagesize)("Unexpected configuration: default pagesize (" SIZE_FORMAT ") "
+ "has no associated directory in /sys/kernel/mm/hugepages..", _default_hugepage_size);
+ _inconsistent = true;
+ }
+ }
+ _initialized = true;
+ LogTarget(Info, pagesize) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ print_on(&ls);
+ }
+}
+
+THPSupport::THPSupport() :
+ _initialized(false), _mode(THPMode::never), _pagesize(SIZE_MAX) {}
+
+
+THPMode THPSupport::mode() const {
+ assert(_initialized, "Not initialized");
+ return _mode;
+}
+
+size_t THPSupport::pagesize() const {
+ assert(_initialized, "Not initialized");
+ return _pagesize;
+}
+
+void THPSupport::scan_os() {
+ // Scan /sys/kernel/mm/transparent_hugepage/enabled
+ // see mm/huge_memory.c
+ _mode = THPMode::never;
+ const char* filename = "/sys/kernel/mm/transparent_hugepage/enabled";
+ FILE* f = ::fopen(filename, "r");
+ if (f != nullptr) {
+ char buf[64];
+ char* s = fgets(buf, sizeof(buf), f);
+ assert(s == buf, "Should have worked");
+ if (::strstr(buf, "[madvise]") != nullptr) {
+ _mode = THPMode::madvise;
+ } else if (::strstr(buf, "[always]") != nullptr) {
+ _mode = THPMode::always;
+ } else {
+ assert(::strstr(buf, "[never]") != nullptr, "Weird content of %s: %s", filename, buf);
+ }
+ fclose(f);
+ }
+
+ // Scan large page size for THP from hpage_pmd_size
+ _pagesize = 0;
+ if (read_number_file("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", &_pagesize)) {
+ assert(_pagesize > 0, "Expected");
+ }
+ _initialized = true;
+
+ LogTarget(Info, pagesize) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ print_on(&ls);
+ }
+}
+
+void THPSupport::print_on(outputStream* os) {
+ if (_initialized) {
+ os->print_cr("Transparent hugepage (THP) support:");
+ os->print_cr(" THP mode: %s",
+ (_mode == THPMode::always ? "always" : (_mode == THPMode::never ? "never" : "madvise")));
+ os->print_cr(" THP pagesize: " EXACTFMT, EXACTFMTARGS(_pagesize));
+ } else {
+ os->print_cr(" unknown.");
+ }
+}
+
+StaticHugePageSupport HugePages::_static_hugepage_support;
+THPSupport HugePages::_thp_support;
+
+void HugePages::initialize() {
+ _static_hugepage_support.scan_os();
+ _thp_support.scan_os();
+}
+
+void HugePages::print_on(outputStream* os) {
+ _static_hugepage_support.print_on(os);
+ _thp_support.print_on(os);
+}
diff --git a/src/hotspot/os/linux/hugepages.hpp b/src/hotspot/os/linux/hugepages.hpp
new file mode 100644
index 0000000000000..cb7c992d78950
--- /dev/null
+++ b/src/hotspot/os/linux/hugepages.hpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2023, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_LINUX_HUGEPAGES_HPP
+#define OS_LINUX_HUGEPAGES_HPP
+
+#include "memory/allStatic.hpp"
+#include "runtime/os.hpp" // for os::PageSizes
+#include "utilities/globalDefinitions.hpp"
+
+class outputStream;
+
+// Header contains the interface that reads OS information about
+// available hugepage support:
+// - class StaticHugePageSupport - about static (non-THP) hugepages
+// - class THPSupport - about transparent huge pages
+// and:
+// - class HugePages - a static umbrella wrapper
+
+// Information about static (non-thp) hugepages
+class StaticHugePageSupport {
+ bool _initialized;
+
+ // All supported hugepage sizes (sizes for which entries exist
+ // in /sys/kernel/mm/hugepages/hugepage-xxx)
+ os::PageSizes _pagesizes;
+
+ // Contains the default hugepage. The "default hugepage size" is the one that
+ // - is marked in /proc/meminfo as "Hugepagesize"
+ // - is the size one gets when using mmap(MAP_HUGETLB) when omitting size specifiers like MAP_HUGE_SHIFT)
+ size_t _default_hugepage_size;
+
+ // If true, the kernel support for hugepages is inconsistent
+ bool _inconsistent;
+
+public:
+ StaticHugePageSupport();
+
+ void scan_os();
+
+ os::PageSizes pagesizes() const;
+ size_t default_hugepage_size() const;
+ void print_on(outputStream* os);
+
+ bool inconsistent() const { return _inconsistent; }
+};
+
+enum class THPMode { always, never, madvise };
+
+// 2) for transparent hugepages
+class THPSupport {
+ bool _initialized;
+
+ // See /sys/kernel/mm/transparent_hugepages/enabled
+ THPMode _mode;
+
+ // Contains the THP page size
+ size_t _pagesize;
+
+public:
+
+ THPSupport();
+
+ // Queries the OS, fills in object
+ void scan_os();
+
+ THPMode mode() const;
+ size_t pagesize() const;
+ void print_on(outputStream* os);
+};
+
+// Umbrella static interface
+class HugePages : public AllStatic {
+
+ static StaticHugePageSupport _static_hugepage_support;
+ static THPSupport _thp_support;
+
+public:
+
+ static const StaticHugePageSupport& static_info() { return _static_hugepage_support; }
+ static const THPSupport& thp_info() { return _thp_support; }
+
+ static size_t default_static_hugepage_size() { return _static_hugepage_support.default_hugepage_size(); }
+ static bool supports_static_hugepages() { return default_static_hugepage_size() > 0 && !_static_hugepage_support.inconsistent(); }
+ static THPMode thp_mode() { return _thp_support.mode(); }
+ static bool supports_thp() { return thp_mode() == THPMode::madvise || thp_mode() == THPMode::always; }
+ static size_t thp_pagesize() { return _thp_support.pagesize(); }
+
+ static void initialize();
+ static void print_on(outputStream* os);
+};
+
+#endif // OS_LINUX_HUGEPAGES_HPP
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index a77476e218a5e..aa8be1d897d42 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2015, 2022 SAP SE. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
+#include "hugepages.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "jvmtifiles/jvmti.h"
@@ -113,6 +114,7 @@
# include
# include
# include
+# include
#ifdef __GLIBC__
# include
#endif
@@ -169,7 +171,6 @@ pthread_t os::Linux::_main_thread;
bool os::Linux::_supports_fast_thread_cpu_time = false;
const char * os::Linux::_libc_version = nullptr;
const char * os::Linux::_libpthread_version = nullptr;
-size_t os::Linux::_default_large_page_size = 0;
#ifdef __GLIBC__
// We want to be buildable and runnable on older and newer glibcs, so resolve both
@@ -410,7 +411,7 @@ pid_t os::Linux::gettid() {
julong os::Linux::host_swap() {
struct sysinfo si;
sysinfo(&si);
- return (julong)si.totalswap;
+ return (julong)(si.totalswap * si.mem_unit);
}
// Most versions of linux have a bug where the number of processors are
@@ -774,6 +775,10 @@ static void *thread_native_entry(Thread *thread) {
assert(osthread->pthread_id() != 0, "pthread_id was not set as expected");
+ if (DelayThreadStartALot) {
+ os::naked_short_sleep(100);
+ }
+
// call one more level start routine
thread->call_run();
@@ -910,6 +915,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
// Calculate stack size if it's not specified by caller.
size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
size_t guard_size = os::Linux::default_guard_size(thr_type);
+
// Configure glibc guard page. Must happen before calling
// get_static_tls_area_size(), which uses the guard_size.
pthread_attr_setguardsize(&attr, guard_size);
@@ -930,13 +936,16 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
}
assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
- // Add an additional page to the stack size to reduce its chances of getting large page aligned
- // so that the stack does not get backed by a transparent huge page.
- size_t default_large_page_size = os::Linux::default_large_page_size();
- if (default_large_page_size != 0 &&
- stack_size >= default_large_page_size &&
- is_aligned(stack_size, default_large_page_size)) {
- stack_size += os::vm_page_size();
+ if (THPStackMitigation) {
+ // In addition to the glibc guard page that prevents inter-thread-stack hugepage
+ // coalescing (see comment in os::Linux::default_guard_size()), we also make
+ // sure the stack size itself is not huge-page-size aligned; that makes it much
+ // more likely for thread stack boundaries to be unaligned as well and hence
+ // protects thread stacks from being targeted by khugepaged.
+ if (HugePages::thp_pagesize() > 0 &&
+ is_aligned(stack_size, HugePages::thp_pagesize())) {
+ stack_size += os::vm_page_size();
+ }
}
int status = pthread_attr_setstacksize(&attr, stack_size);
@@ -967,6 +976,16 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
if (ret == 0) {
log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
+
+ // Print current timer slack if override is enabled and timer slack value is available.
+ // Avoid calling prctl otherwise for extra safety.
+ if (TimerSlack >= 0) {
+ int slack = prctl(PR_GET_TIMERSLACK);
+ if (slack >= 0) {
+ log_info(os, thread)("Thread \"%s\" (pthread id: " UINTX_FORMAT ") timer slack: %dns",
+ thread->name(), (uintx) tid, slack);
+ }
+ }
} else {
log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%s) for attributes: %s.",
thread->name(), os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
@@ -1441,6 +1460,9 @@ bool os::address_is_in_vm(address addr) {
return false;
}
+void os::prepare_native_symbols() {
+}
+
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset,
bool demangle) {
@@ -1708,11 +1730,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
static Elf32_Half running_arch_code=EM_SH;
#elif (defined RISCV)
static Elf32_Half running_arch_code=EM_RISCV;
-#elif (defined LOONGARCH)
+#elif (defined LOONGARCH64)
static Elf32_Half running_arch_code=EM_LOONGARCH;
#else
#error Method os::dll_load requires that one of following is defined:\
- AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
+ AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
#endif
// Identify compatibility class for VM's architecture and library's architecture
@@ -2016,7 +2038,6 @@ const char* distro_files[] = {
"/etc/mandrake-release",
"/etc/sun-release",
"/etc/redhat-release",
- "/etc/SuSE-release",
"/etc/lsb-release",
"/etc/turbolinux-release",
"/etc/gentoo-release",
@@ -2024,6 +2045,7 @@ const char* distro_files[] = {
"/etc/angstrom-version",
"/etc/system-release",
"/etc/os-release",
+ "/etc/SuSE-release", // Deprecated in favor of os-release since SuSE 12
nullptr };
void os::Linux::print_distro_info(outputStream* st) {
@@ -2135,6 +2157,8 @@ void os::Linux::print_system_memory_info(outputStream* st) {
// https://www.kernel.org/doc/Documentation/vm/transhuge.txt
_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled",
"/sys/kernel/mm/transparent_hugepage/enabled", st);
+ _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
+ "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", st);
_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)",
"/sys/kernel/mm/transparent_hugepage/defrag", st);
}
@@ -2671,6 +2695,8 @@ void os::jvm_path(char *buf, jint buflen) {
void linux_wrap_code(char* base, size_t size) {
static volatile jint cnt = 0;
+ static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
+
if (!UseOprofile) {
return;
}
@@ -3066,6 +3092,27 @@ bool os::Linux::libnuma_init() {
}
size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
+
+ if (THPStackMitigation) {
+ // If THPs are unconditionally enabled, the following scenario can lead to huge RSS
+ // - parent thread spawns, in quick succession, multiple child threads
+ // - child threads are slow to start
+ // - thread stacks of future child threads are adjacent and get merged into one large VMA
+ // by the kernel, and subsequently transformed into huge pages by khugepaged
+ // - child threads come up, place JVM guard pages, thus splinter the large VMA, splinter
+ // the huge pages into many (still paged-in) small pages.
+ // The result of that sequence are thread stacks that are fully paged-in even though the
+ // threads did not even start yet.
+ // We prevent that by letting the glibc allocate a guard page, which causes a VMA with different
+ // permission bits to separate two ajacent thread stacks and therefore prevent merging stacks
+ // into one VMA.
+ //
+ // Yes, this means we have two guard sections - the glibc and the JVM one - per thread. But the
+ // cost for that one extra protected page is dwarfed from a large win in performance and memory
+ // that avoiding interference by khugepaged buys us.
+ return os::vm_page_size();
+ }
+
// Creating guard page is very expensive. Java thread has HotSpot
// guard pages, only enable glibc guard page for non-Java threads.
// (Remember: compiler thread is a Java thread, too!)
@@ -3545,7 +3592,7 @@ bool os::Linux::transparent_huge_pages_sanity_check(bool warn,
}
int os::Linux::hugetlbfs_page_size_flag(size_t page_size) {
- if (page_size != default_large_page_size()) {
+ if (page_size != HugePages::default_static_hugepage_size()) {
return (exact_log2(page_size) << MAP_HUGE_SHIFT);
}
return 0;
@@ -3653,79 +3700,6 @@ static void set_coredump_filter(CoredumpFilterBit bit) {
static size_t _large_page_size = 0;
-static size_t scan_default_large_page_size() {
- size_t default_large_page_size = 0;
-
- // large_page_size on Linux is used to round up heap size. x86 uses either
- // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
- // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
- // page as large as 1G.
- //
- // Here we try to figure out page size by parsing /proc/meminfo and looking
- // for a line with the following format:
- // Hugepagesize: 2048 kB
- //
- // If we can't determine the value (e.g. /proc is not mounted, or the text
- // format has been changed), we'll set largest page size to 0
-
- FILE *fp = os::fopen("/proc/meminfo", "r");
- if (fp) {
- while (!feof(fp)) {
- int x = 0;
- char buf[16];
- if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
- if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
- default_large_page_size = x * K;
- break;
- }
- } else {
- // skip to next line
- for (;;) {
- int ch = fgetc(fp);
- if (ch == EOF || ch == (int)'\n') break;
- }
- }
- }
- fclose(fp);
- }
-
- return default_large_page_size;
-}
-
-static os::PageSizes scan_multiple_page_support() {
- // Scan /sys/kernel/mm/hugepages
- // to discover the available page sizes
- const char* sys_hugepages = "/sys/kernel/mm/hugepages";
- os::PageSizes page_sizes;
-
- DIR *dir = opendir(sys_hugepages);
-
- struct dirent *entry;
- size_t page_size;
- while ((entry = readdir(dir)) != nullptr) {
- if (entry->d_type == DT_DIR &&
- sscanf(entry->d_name, "hugepages-%zukB", &page_size) == 1) {
- // The kernel is using kB, hotspot uses bytes
- // Add each found Large Page Size to page_sizes
- page_sizes.add(page_size * K);
- }
- }
- closedir(dir);
-
- LogTarget(Debug, pagesize) lt;
- if (lt.is_enabled()) {
- LogStream ls(lt);
- ls.print("Large Page sizes: ");
- page_sizes.print_on(&ls);
- }
-
- return page_sizes;
-}
-
-size_t os::Linux::default_large_page_size() {
- return _default_large_page_size;
-}
-
void warn_no_large_pages_configured() {
if (!FLAG_IS_DEFAULT(UseLargePages)) {
log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system.");
@@ -3778,10 +3752,44 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
return false;
}
+struct LargePageInitializationLoggerMark {
+ ~LargePageInitializationLoggerMark() {
+ LogTarget(Info, pagesize) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ if (UseLargePages) {
+ ls.print_cr("UseLargePages=1, UseTransparentHugePages=%d, UseHugeTLBFS=%d, UseSHM=%d",
+ UseTransparentHugePages, UseHugeTLBFS, UseSHM);
+ ls.print("Large page support enabled. Usable page sizes: ");
+ os::page_sizes().print_on(&ls);
+ ls.print_cr(". Default large page size: " EXACTFMT ".", EXACTFMTARGS(os::large_page_size()));
+ } else {
+ ls.print("Large page support disabled.");
+ }
+ }
+ }
+};
+
void os::large_page_init() {
- // Always initialize the default large page size even if large pages are not being used.
- size_t default_large_page_size = scan_default_large_page_size();
- os::Linux::_default_large_page_size = default_large_page_size;
+ LargePageInitializationLoggerMark logger;
+
+ // Query OS information first.
+ HugePages::initialize();
+
+ // If THPs are unconditionally enabled (THP mode "always"), khugepaged may attempt to
+ // coalesce small pages in thread stacks to huge pages. That costs a lot of memory and
+ // is usually unwanted for thread stacks. Therefore we attempt to prevent THP formation in
+ // thread stacks unless the user explicitly allowed THP formation by manually disabling
+ // -XX:-THPStackMitigation.
+ if (HugePages::thp_mode() == THPMode::always) {
+ if (THPStackMitigation) {
+ log_info(pagesize)("JVM will attempt to prevent THPs in thread stacks.");
+ } else {
+ log_info(pagesize)("JVM will *not* prevent THPs in thread stacks. This may cause high RSS.");
+ }
+ } else {
+ FLAG_SET_ERGO(THPStackMitigation, false); // Mitigation not needed
+ }
// 1) Handle the case where we do not want to use huge pages
if (!UseLargePages &&
@@ -3801,67 +3809,77 @@ void os::large_page_init() {
return;
}
- // 2) check if large pages are configured
- if (default_large_page_size == 0) {
- // No large pages configured, return.
+ // 2) check if the OS supports THPs resp. static hugepages.
+ if (UseTransparentHugePages && !HugePages::supports_thp()) {
+ if (!FLAG_IS_DEFAULT(UseTransparentHugePages)) {
+ log_warning(pagesize)("UseTransparentHugePages disabled, transparent huge pages are not supported by the operating system.");
+ }
+ UseLargePages = UseTransparentHugePages = UseHugeTLBFS = UseSHM = false;
+ return;
+ }
+ if (!UseTransparentHugePages && !HugePages::supports_static_hugepages()) {
warn_no_large_pages_configured();
- UseLargePages = false;
- UseTransparentHugePages = false;
- UseHugeTLBFS = false;
- UseSHM = false;
+ UseLargePages = UseTransparentHugePages = UseHugeTLBFS = UseSHM = false;
return;
}
- os::PageSizes all_large_pages = scan_multiple_page_support();
-
- // 3) Consistency check and post-processing
-
- // It is unclear if /sys/kernel/mm/hugepages/ and /proc/meminfo could disagree. Manually
- // re-add the default page size to the list of page sizes to be sure.
- all_large_pages.add(default_large_page_size);
-
- // Check LargePageSizeInBytes matches an available page size and if so set _large_page_size
- // using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes
- // doesn't match an available page size set _large_page_size to default_large_page_size
- // and use it as the maximum.
- if (FLAG_IS_DEFAULT(LargePageSizeInBytes) ||
- LargePageSizeInBytes == 0 ||
- LargePageSizeInBytes == default_large_page_size) {
- _large_page_size = default_large_page_size;
- log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s",
- byte_size_in_exact_unit(_large_page_size),
- exact_unit_for_byte_size(_large_page_size));
+
+ if (UseTransparentHugePages) {
+ // In THP mode:
+ // - os::large_page_size() is the *THP page size*
+ // - os::pagesizes() has two members, the THP page size and the system page size
+ assert(HugePages::supports_thp() && HugePages::thp_pagesize() > 0, "Missing OS info");
+ _large_page_size = HugePages::thp_pagesize();
+ _page_sizes.add(_large_page_size);
+ _page_sizes.add(os::vm_page_size());
+
} else {
- if (all_large_pages.contains(LargePageSizeInBytes)) {
- _large_page_size = LargePageSizeInBytes;
- log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) "
- "using LargePageSizeInBytes: " SIZE_FORMAT "%s",
- byte_size_in_exact_unit(default_large_page_size),
- exact_unit_for_byte_size(default_large_page_size),
- byte_size_in_exact_unit(_large_page_size),
- exact_unit_for_byte_size(_large_page_size));
- } else {
+
+ // In static hugepage mode:
+ // - os::large_page_size() is the default static hugepage size (/proc/meminfo "Hugepagesize")
+ // - os::pagesizes() contains all hugepage sizes the kernel supports, regardless whether there
+ // are pages configured in the pool or not (from /sys/kernel/hugepages/hugepage-xxxx ...)
+ os::PageSizes all_large_pages = HugePages::static_info().pagesizes();
+ const size_t default_large_page_size = HugePages::default_static_hugepage_size();
+
+ // 3) Consistency check and post-processing
+
+ // Check LargePageSizeInBytes matches an available page size and if so set _large_page_size
+ // using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes
+ // doesn't match an available page size set _large_page_size to default_large_page_size
+ // and use it as the maximum.
+ if (FLAG_IS_DEFAULT(LargePageSizeInBytes) ||
+ LargePageSizeInBytes == 0 ||
+ LargePageSizeInBytes == default_large_page_size) {
_large_page_size = default_large_page_size;
- log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) "
- "using the default large page size: " SIZE_FORMAT "%s",
- byte_size_in_exact_unit(LargePageSizeInBytes),
- exact_unit_for_byte_size(LargePageSizeInBytes),
+ log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s",
byte_size_in_exact_unit(_large_page_size),
exact_unit_for_byte_size(_large_page_size));
+ } else {
+ if (all_large_pages.contains(LargePageSizeInBytes)) {
+ _large_page_size = LargePageSizeInBytes;
+ log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) "
+ "using LargePageSizeInBytes: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(default_large_page_size),
+ exact_unit_for_byte_size(default_large_page_size),
+ byte_size_in_exact_unit(_large_page_size),
+ exact_unit_for_byte_size(_large_page_size));
+ } else {
+ _large_page_size = default_large_page_size;
+ log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) "
+ "using the default large page size: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(LargePageSizeInBytes),
+ exact_unit_for_byte_size(LargePageSizeInBytes),
+ byte_size_in_exact_unit(_large_page_size),
+ exact_unit_for_byte_size(_large_page_size));
+ }
}
- }
- // Populate _page_sizes with large page sizes less than or equal to
- // _large_page_size.
- for (size_t page_size = _large_page_size; page_size != 0;
- page_size = all_large_pages.next_smaller(page_size)) {
- _page_sizes.add(page_size);
- }
-
- LogTarget(Info, pagesize) lt;
- if (lt.is_enabled()) {
- LogStream ls(lt);
- ls.print("Usable page sizes: ");
- _page_sizes.print_on(&ls);
+ // Populate _page_sizes with large page sizes less than or equal to
+ // _large_page_size.
+ for (size_t page_size = _large_page_size; page_size != 0;
+ page_size = all_large_pages.next_smaller(page_size)) {
+ _page_sizes.add(page_size);
+ }
}
// Now determine the type of large pages to use:
@@ -4701,6 +4719,15 @@ jint os::init_2(void) {
FLAG_SET_DEFAULT(UseCodeCacheFlushing, false);
}
+ // Override the timer slack value if needed. The adjustment for the main
+ // thread will establish the setting for child threads, which would be
+ // most threads in JDK/JVM.
+ if (TimerSlack >= 0) {
+ if (prctl(PR_SET_TIMERSLACK, TimerSlack) < 0) {
+ vm_exit_during_initialization("Setting timer slack failed: %s", os::strerror(errno));
+ }
+ }
+
return JNI_OK;
}
@@ -4988,14 +5015,14 @@ int os::open(const char *path, int oflag, int mode) {
oflag |= O_CLOEXEC;
#endif
- int fd = ::open64(path, oflag, mode);
+ int fd = ::open(path, oflag, mode);
if (fd == -1) return -1;
//If the open succeeded, the file might still be a directory
{
- struct stat64 buf64;
- int ret = ::fstat64(fd, &buf64);
- int st_mode = buf64.st_mode;
+ struct stat buf;
+ int ret = ::fstat(fd, &buf);
+ int st_mode = buf.st_mode;
if (ret != -1) {
if ((st_mode & S_IFMT) == S_IFDIR) {
@@ -5032,17 +5059,17 @@ int os::open(const char *path, int oflag, int mode) {
int os::create_binary_file(const char* path, bool rewrite_existing) {
int oflags = O_WRONLY | O_CREAT;
oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
- return ::open64(path, oflags, S_IREAD | S_IWRITE);
+ return ::open(path, oflags, S_IREAD | S_IWRITE);
}
// return current position of file pointer
jlong os::current_file_offset(int fd) {
- return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
+ return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
}
// move file pointer to the specified offset
jlong os::seek_to_file_offset(int fd, jlong offset) {
- return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
+ return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
}
// Map a block of memory.
@@ -5543,3 +5570,25 @@ bool os::trim_native_heap(os::size_change_t* rss_change) {
return false; // musl
#endif
}
+
+bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
+
+ if (ebuf && ebuflen > 0) {
+ ebuf[0] = '\0';
+ ebuf[ebuflen - 1] = '\0';
+ }
+
+ bool res = (0 == ::dlclose(libhandle));
+ if (!res) {
+ // error analysis when dlopen fails
+ const char* error_report = ::dlerror();
+ if (error_report == nullptr) {
+ error_report = "dlerror returned no error description";
+ }
+ if (ebuf != nullptr && ebuflen > 0) {
+ snprintf(ebuf, ebuflen - 1, "%s", error_report);
+ }
+ }
+
+ return res;
+} // end: os::pd_dll_unload()
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index a7cf69f3164c8..ace7e4ab2ddef 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -49,8 +49,6 @@ class os::Linux {
static GrowableArray* _cpu_to_node;
static GrowableArray* _nindex_to_node;
- static size_t _default_large_page_size;
-
static julong available_memory_in_container();
protected:
@@ -77,10 +75,6 @@ class os::Linux {
static GrowableArray* cpu_to_node() { return _cpu_to_node; }
static GrowableArray* nindex_to_node() { return _nindex_to_node; }
- static size_t default_large_page_size();
- static size_t scan_default_large_page_size();
- static os::PageSizes scan_multiple_page_support();
-
static bool setup_large_page_type(size_t page_size);
static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
diff --git a/src/hotspot/os/linux/trimCHeapDCmd.cpp b/src/hotspot/os/linux/trimCHeapDCmd.cpp
index 33dd6f3a5bdc7..26c066ffe5b6e 100644
--- a/src/hotspot/os/linux/trimCHeapDCmd.cpp
+++ b/src/hotspot/os/linux/trimCHeapDCmd.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "logging/log.hpp"
#include "runtime/os.inline.hpp"
#include "trimCHeapDCmd.hpp"
#include "utilities/debug.hpp"
@@ -42,6 +43,9 @@ void TrimCLibcHeapDCmd::execute(DCmdSource source, TRAPS) {
const char sign = sc.after < sc.before ? '-' : '+';
_output->print_cr("RSS+Swap: " PROPERFMT "->" PROPERFMT " (%c" PROPERFMT ")",
PROPERFMTARGS(sc.before), PROPERFMTARGS(sc.after), sign, PROPERFMTARGS(delta));
+ // Also log if native trim log is active
+ log_info(trimnative)("Manual Trim: " PROPERFMT "->" PROPERFMT " (%c" PROPERFMT ")",
+ PROPERFMTARGS(sc.before), PROPERFMTARGS(sc.after), sign, PROPERFMTARGS(delta));
} else {
_output->print_cr("(no details available).");
}
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index 893ca3a7b73f7..af7de184b1471 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@
#include "utilities/vmError.hpp"
#ifdef AIX
#include "loadlib_aix.hpp"
+#include "os_aix.hpp"
#endif
#ifdef LINUX
#include "os_linux.hpp"
@@ -293,6 +294,7 @@ static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
}
static int util_posix_fallocate(int fd, off_t offset, off_t len) {
+ static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
#ifdef __APPLE__
fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
// First we try to get a continuous chunk of disk space
@@ -733,34 +735,29 @@ void os::dll_unload(void *lib) {
if (l_path == nullptr) {
l_path = "";
}
- int res = ::dlclose(lib);
- if (res == 0) {
+ char ebuf[1024];
+ bool res = os::pd_dll_unload(lib, ebuf, sizeof(ebuf));
+
+ if (res) {
Events::log_dll_message(nullptr, "Unloaded shared library \"%s\" [" INTPTR_FORMAT "]",
l_path, p2i(lib));
log_info(os)("Unloaded shared library \"%s\" [" INTPTR_FORMAT "]", l_path, p2i(lib));
} else {
- const char* error_report = ::dlerror();
- if (error_report == nullptr) {
- error_report = "dlerror returned no error description";
- }
-
Events::log_dll_message(nullptr, "Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s",
- l_path, p2i(lib), error_report);
+ l_path, p2i(lib), ebuf);
log_info(os)("Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s",
- l_path, p2i(lib), error_report);
+ l_path, p2i(lib), ebuf);
}
- // Update the dll cache
- AIX_ONLY(LoadedLibraries::reload());
LINUX_ONLY(os::free(l_pathdup));
}
jlong os::lseek(int fd, jlong offset, int whence) {
- return (jlong) BSD_ONLY(::lseek) NOT_BSD(::lseek64)(fd, offset, whence);
+ return (jlong) AIX_ONLY(::lseek64) NOT_AIX(::lseek)(fd, offset, whence);
}
int os::ftruncate(int fd, jlong length) {
- return BSD_ONLY(::ftruncate) NOT_BSD(::ftruncate64)(fd, length);
+ return AIX_ONLY(::ftruncate64) NOT_AIX(::ftruncate)(fd, length);
}
const char* os::get_current_directory(char *buf, size_t buflen) {
diff --git a/src/hotspot/os/posix/os_posix.hpp b/src/hotspot/os/posix/os_posix.hpp
index 051b23d51bdb7..9e98f4316c4a5 100644
--- a/src/hotspot/os/posix/os_posix.hpp
+++ b/src/hotspot/os/posix/os_posix.hpp
@@ -31,7 +31,7 @@
// Note: the Posix API aims to capture functionality available on all Posix
// compliant platforms, but in practice the implementations may depend on
-// non-Posix functionality. For example, the use of lseek64 and ftruncate64.
+// non-Posix functionality.
// This use of non-Posix API's is made possible by compiling/linking in a mode
// that is not restricted to being fully Posix complaint, such as by declaring
// -D_GNU_SOURCE. But be aware that in doing so we may enable non-Posix
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 69c957135c09a..a6aa82012e851 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -1395,6 +1395,9 @@ const char* os::get_current_directory(char *buf, size_t buflen) {
return _getcwd(buf, n);
}
+void os::prepare_native_symbols() {
+}
+
//-----------------------------------------------------------
// Helper functions for fatal error handler
#ifdef _WIN64
diff --git a/src/hotspot/os/windows/os_windows.inline.hpp b/src/hotspot/os/windows/os_windows.inline.hpp
index d765966b7c665..ce5647e275b25 100644
--- a/src/hotspot/os/windows/os_windows.inline.hpp
+++ b/src/hotspot/os/windows/os_windows.inline.hpp
@@ -97,7 +97,7 @@ inline void PlatformMonitor::notify_all() {
WakeAllConditionVariable(&_cond);
}
-// stubbed-out trim-native support
+// Trim-native support, stubbed out for now, may be enabled later
inline bool os::can_trim_native_heap() { return false; }
inline bool os::trim_native_heap(os::size_change_t* rss_change) { return false; }
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index 3882ed6770389..2ade1c7153be9 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -463,7 +463,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
- print_instructions(st, pc, /*instrsize=*/4);
+ print_instructions(st, pc);
st->cr();
// Try to decode the instructions.
diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
index ae5d249a5db08..feda935c6c1e6 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
+++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
@@ -177,7 +177,7 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
// JVM compiled with -fno-omit-frame-pointer, so RFP is saved on the stack.
frame os::get_sender_for_C_frame(frame* fr) {
- return frame(fr->link(), fr->link(), fr->sender_pc());
+ return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
}
NOINLINE frame os::current_frame() {
@@ -477,7 +477,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
- print_instructions(st, pc, 4/*native instruction size*/);
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
index 77104194b0b78..9ba246f553d88 100644
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
@@ -153,6 +153,14 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
+// No direct support for 8-byte xchg; emulate using cmpxchg.
+template<>
+struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
+
+// No direct support for 8-byte add; emulate using cmpxchg.
+template<>
+struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
+
template<>
template
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
index 961464fa38dea..0fb1b958339dd 100644
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
@@ -854,7 +854,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
- print_instructions(st, pc, sizeof(char));
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
index b39f03fd0d2a7..37cd93e765d97 100644
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
@@ -31,135 +31,6 @@
// Implementation of class atomic
-#ifdef M68K
-
-/*
- * __m68k_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Returns newval on success and oldval if no exchange happened.
- * This implementation is processor specific and works on
- * 68020 68030 68040 and 68060.
- *
- * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
- * instruction.
- * Using a kernelhelper would be better for arch complete implementation.
- *
- */
-
-static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
- int ret;
- __asm __volatile ("cas%.l %0,%2,%1"
- : "=d" (ret), "+m" (*(ptr))
- : "d" (newval), "0" (oldval));
- return ret;
-}
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
- is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
- `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(int newval,
- volatile int *ptr,
- int oldval) {
- for (;;) {
- int prev = *ptr;
- if (prev != oldval)
- return prev;
-
- if (__m68k_cmpxchg (prev, newval, ptr) == newval)
- // Success.
- return prev;
-
- // We failed even though prev == oldval. Try again.
- }
-}
-
-/* Atomically add an int to memory. */
-static inline int m68k_add_then_fetch(int add_value, volatile int *ptr) {
- for (;;) {
- // Loop until success.
-
- int prev = *ptr;
-
- if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
- return prev + add_value;
- }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
- contents of `*PTR'. */
-static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
- for (;;) {
- // Loop until success.
- int prev = *ptr;
-
- if (__m68k_cmpxchg (prev, newval, ptr) == prev)
- return prev;
- }
-}
-#endif // M68K
-
-#ifdef ARM
-
-/*
- * __kernel_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- */
-
-typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
-#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
-
-
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
- is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
- `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(int newval,
- volatile int *ptr,
- int oldval) {
- for (;;) {
- int prev = *ptr;
- if (prev != oldval)
- return prev;
-
- if (__kernel_cmpxchg (prev, newval, ptr) == 0)
- // Success.
- return prev;
-
- // We failed even though prev == oldval. Try again.
- }
-}
-
-/* Atomically add an int to memory. */
-static inline int arm_add_then_fetch(int add_value, volatile int *ptr) {
- for (;;) {
- // Loop until a __kernel_cmpxchg succeeds.
-
- int prev = *ptr;
-
- if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
- return prev + add_value;
- }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
- contents of `*PTR'. */
-static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
- for (;;) {
- // Loop until a __kernel_cmpxchg succeeds.
- int prev = *ptr;
-
- if (__kernel_cmpxchg (prev, newval, ptr) == 0)
- return prev;
- }
-}
-#endif // ARM
-
template
struct Atomic::PlatformAdd {
template
@@ -178,17 +49,9 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value,
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
-#ifdef ARM
- return add_using_helper(arm_add_then_fetch, dest, add_value);
-#else
-#ifdef M68K
- return add_using_helper(m68k_add_then_fetch, dest, add_value);
-#else
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER;
return res;
-#endif // M68K
-#endif // ARM
}
template<>
@@ -209,26 +72,10 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
-#ifdef ARM
- return xchg_using_helper(arm_lock_test_and_set, dest, exchange_value);
-#else
-#ifdef M68K
- return xchg_using_helper(m68k_lock_test_and_set, dest, exchange_value);
-#else
- // __sync_lock_test_and_set is a bizarrely named atomic exchange
- // operation. Note that some platforms only support this with the
- // limitation that the only valid value to store is the immediate
- // constant 1. There is a test for this in JNI_CreateJavaVM().
- T result = __sync_lock_test_and_set (dest, exchange_value);
- // All atomic operations are expected to be full memory barriers
- // (see atomic.hpp). However, __sync_lock_test_and_set is not
- // a full memory barrier, but an acquire barrier. Hence, this added
- // barrier. Some platforms (notably ARM) have peculiarities with
- // their barrier implementations, delegate it to OrderAccess.
- OrderAccess::fence();
+ FULL_MEM_BARRIER;
+ T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
+ FULL_MEM_BARRIER;
return result;
-#endif // M68K
-#endif // ARM
}
template<>
@@ -237,8 +84,9 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
- T result = __sync_lock_test_and_set (dest, exchange_value);
- OrderAccess::fence();
+ FULL_MEM_BARRIER;
+ T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
+ FULL_MEM_BARRIER;
return result;
}
@@ -253,20 +101,12 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
-#ifdef ARM
- return cmpxchg_using_helper(arm_compare_and_swap, dest, compare_value, exchange_value);
-#else
-#ifdef M68K
- return cmpxchg_using_helper(m68k_compare_and_swap, dest, compare_value, exchange_value);
-#else
T value = compare_value;
FULL_MEM_BARRIER;
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
FULL_MEM_BARRIER;
return value;
-#endif // M68K
-#endif // ARM
}
template<>
@@ -286,31 +126,19 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
}
// Atomically copy 64 bits of data
-static void atomic_copy64(const volatile void *src, volatile void *dst) {
-#if defined(PPC32)
- double tmp;
- asm volatile ("lfd %0, 0(%1)\n"
- "stfd %0, 0(%2)\n"
- : "=f"(tmp)
- : "b"(src), "b"(dst));
-#elif defined(S390) && !defined(_LP64)
- double tmp;
- asm volatile ("ld %0, 0(%1)\n"
- "std %0, 0(%2)\n"
- : "=r"(tmp)
- : "a"(src), "a"(dst));
-#else
- *(jlong *) dst = *(const jlong *) src;
-#endif
+inline void atomic_copy64(const volatile void *src, volatile void *dst) {
+ int64_t tmp;
+ __atomic_load(reinterpret_cast(src), &tmp, __ATOMIC_RELAXED);
+ __atomic_store(reinterpret_cast(dst), &tmp, __ATOMIC_RELAXED);
}
template<>
template
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
- volatile int64_t dest;
- atomic_copy64(reinterpret_cast(src), reinterpret_cast(&dest));
- return PrimitiveConversions::cast(dest);
+ T dest;
+ __atomic_load(const_cast(src), &dest, __ATOMIC_RELAXED);
+ return dest;
}
template<>
@@ -318,7 +146,7 @@ template
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
- atomic_copy64(reinterpret_cast(&store_value), reinterpret_cast(dest));
+ __atomic_store(dest, &store_value, __ATOMIC_RELAXED);
}
#endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP
diff --git a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
index 55b5efd5f6efc..c1bcf841b0848 100644
--- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
+++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
@@ -331,22 +331,6 @@ extern "C" {
}
};
-/////////////////////////////////////////////////////////////////////////////
-// Implementations of atomic operations not supported by processors.
-// -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
-
-#ifndef _LP64
-extern "C" {
- long long unsigned int __sync_val_compare_and_swap_8(
- volatile void *ptr,
- long long unsigned int oldval,
- long long unsigned int newval) {
- ShouldNotCallThis();
- return 0; // silence compiler warnings
- }
-};
-#endif // !_LP64
-
#ifndef PRODUCT
void os::verify_stack_alignment() {
}
diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
index 28e17385d4339..86721c94797df 100644
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
@@ -152,8 +152,23 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
// By default, gcc always saves frame pointer rfp on this stack. This
// may get turned off by -fomit-frame-pointer.
+// The "Procedure Call Standard for the Arm 64-bit Architecture" doesn't
+// specify a location for the frame record within a stack frame (6.4.6).
+// GCC currently chooses to save it at the top of the frame (lowest address).
+// This means that using fr->sender_sp() to set the caller's frame _unextended_sp,
+// as we do in x86, is wrong. Using fr->link() instead only makes sense for
+// native frames. Setting a correct value for _unextended_sp is important
+// if this value is later used to get that frame's caller. This will happen
+// if we end up calling frame::sender_for_compiled_frame(), which will be the
+// case if the _pc is associated with a CodeBlob that has a _frame_size > 0
+// (nmethod, runtime stub, safepoint stub, etc).
frame os::get_sender_for_C_frame(frame* fr) {
- return frame(fr->link(), fr->link(), fr->sender_pc());
+ address pc = fr->sender_pc();
+ CodeBlob* cb = CodeCache::find_blob(pc);
+ bool use_codeblob = cb != nullptr && cb->frame_size() > 0;
+ assert(!use_codeblob || !Interpreter::contains(pc), "should not be an interpreter frame");
+ intptr_t* sender_sp = use_codeblob ? (fr->link() + frame::metadata_words - cb->frame_size()) : fr->link();
+ return frame(sender_sp, sender_sp, fr->link(), pc, cb, true /* allow_cb_null */);
}
NOINLINE frame os::current_frame() {
@@ -355,7 +370,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::fetch_frame_from_context(uc).pc();
- print_instructions(st, pc, 4/*native instruction size*/);
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
index 814dbd9aab501..513217649e633 100644
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
@@ -128,6 +128,13 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
return xchg_using_helper(ARMAtomicFuncs::_xchg_func, dest, exchange_value);
}
+// No direct support for 8-byte xchg; emulate using cmpxchg.
+template<>
+struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
+
+// No direct support for 8-byte add; emulate using cmpxchg.
+template<>
+struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
index 3bbe93fe798e4..86e8ed25618c1 100644
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
@@ -483,7 +483,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
- print_instructions(st, pc, Assembler::InstructionSize);
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index e5837af0a73f5..2e603ac06909b 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -477,7 +477,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
- print_instructions(st, pc, /*instrsize=*/4);
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
index 393c245ec0278..fdb8b340ab9ed 100644
--- a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
+++ b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
@@ -33,10 +33,23 @@
// Note that memory_order_conservative requires a full barrier after atomic stores.
// See https://patchwork.kernel.org/patch/3575821/
+#if defined(__clang_major__)
+#define FULL_COMPILER_ATOMIC_SUPPORT
+#elif (__GNUC__ > 13) || ((__GNUC__ == 13) && (__GNUC_MINOR__ >= 2))
+#define FULL_COMPILER_ATOMIC_SUPPORT
+#endif
+
template
struct Atomic::PlatformAdd {
template
D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
+
+#ifndef FULL_COMPILER_ATOMIC_SUPPORT
+ // If we add add and fetch for sub word and are using older compiler
+ // it must be added here due to not using lib atomic.
+ STATIC_ASSERT(byte_size >= 4);
+#endif
+
if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
@@ -55,12 +68,65 @@ struct Atomic::PlatformAdd {
}
};
+#ifndef FULL_COMPILER_ATOMIC_SUPPORT
+template<>
+template
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((unused)),
+ T compare_value,
+ T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(1 == sizeof(T));
+
+ if (order != memory_order_relaxed) {
+ FULL_MEM_BARRIER;
+ }
+
+ uint32_t volatile* aligned_dst = (uint32_t volatile*)(((uintptr_t)dest) & (~((uintptr_t)0x3)));
+ int shift = 8 * (((uintptr_t)dest) - ((uintptr_t)aligned_dst)); // 0, 8, 16, 24
+
+ uint64_t mask = 0xfful << shift; // 0x00000000..FF..
+ uint64_t remask = ~mask; // 0xFFFFFFFF..00..
+
+ uint64_t w_cv = ((uint64_t)(unsigned char)compare_value) << shift; // widen to 64-bit 0x00000000..CC..
+ uint64_t w_ev = ((uint64_t)(unsigned char)exchange_value) << shift; // widen to 64-bit 0x00000000..EE..
+
+ uint64_t old_value;
+ uint64_t rc_temp;
+
+ __asm__ __volatile__ (
+ "1: lr.w %0, %2 \n\t"
+ " and %1, %0, %5 \n\t" // ignore unrelated bytes and widen to 64-bit 0x00000000..XX..
+ " bne %1, %3, 2f \n\t" // compare 64-bit w_cv
+ " and %1, %0, %6 \n\t" // remove old byte
+ " or %1, %1, %4 \n\t" // add new byte
+ " sc.w %1, %1, %2 \n\t" // store new word
+ " bnez %1, 1b \n\t"
+ "2: \n\t"
+ : /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc_temp), /*%2*/"+A" (*aligned_dst)
+ : /*%3*/"r" (w_cv), /*%4*/"r" (w_ev), /*%5*/"r" (mask), /*%6*/"r" (remask)
+ : "memory" );
+
+ if (order != memory_order_relaxed) {
+ FULL_MEM_BARRIER;
+ }
+
+ return (T)((old_value & mask) >> shift);
+}
+#endif
+
template
template
inline T Atomic::PlatformXchg::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
+#ifndef FULL_COMPILER_ATOMIC_SUPPORT
+ // If we add xchg for sub word and are using older compiler
+ // it must be added here due to not using lib atomic.
+ STATIC_ASSERT(byte_size >= 4);
+#endif
+
STATIC_ASSERT(byte_size == sizeof(T));
+
if (order != memory_order_relaxed) {
FULL_MEM_BARRIER;
}
@@ -80,6 +146,11 @@ inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attri
T compare_value,
T exchange_value,
atomic_memory_order order) const {
+
+#ifndef FULL_COMPILER_ATOMIC_SUPPORT
+ STATIC_ASSERT(byte_size >= 4);
+#endif
+
STATIC_ASSERT(byte_size == sizeof(T));
T value = compare_value;
if (order != memory_order_relaxed) {
@@ -148,4 +219,6 @@ struct Atomic::PlatformOrderedStore
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
};
+#undef FULL_COMPILER_ATOMIC_SUPPORT
+
#endif // OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP
diff --git a/src/hotspot/os_cpu/linux_riscv/orderAccess_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/orderAccess_linux_riscv.hpp
index 2b500376f9b48..a7dc84770f84c 100644
--- a/src/hotspot/os_cpu/linux_riscv/orderAccess_linux_riscv.hpp
+++ b/src/hotspot/os_cpu/linux_riscv/orderAccess_linux_riscv.hpp
@@ -37,7 +37,7 @@ inline void OrderAccess::storestore() { release(); }
inline void OrderAccess::loadstore() { acquire(); }
inline void OrderAccess::storeload() { fence(); }
-#define FULL_MEM_BARRIER __sync_synchronize()
+#define FULL_MEM_BARRIER __atomic_thread_fence(__ATOMIC_SEQ_CST);
#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
diff --git a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
index 814ae19d639fe..eb3bf02d66cb5 100644
--- a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
@@ -232,7 +232,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
- address next_pc = pc + NativeCall::instruction_size;
+ address next_pc = Assembler::locate_next_instruction(pc);
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
@@ -273,7 +273,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
- address next_pc = pc + NativeCall::instruction_size;
+ address next_pc = Assembler::locate_next_instruction(pc);
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
@@ -367,7 +367,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::fetch_frame_from_context(uc).pc();
- print_instructions(st, pc, UseRVC ? sizeof(char) : (int)NativeInstruction::instruction_size);
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp b/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
index f5b2f03ff1f89..243c4b850ee43 100644
--- a/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
@@ -45,6 +45,10 @@
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
#define RISCV_HWPROBE_IMA_FD (1 << 0)
#define RISCV_HWPROBE_IMA_C (1 << 1)
+#define RISCV_HWPROBE_IMA_V (1 << 2)
+#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
+#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
+#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
@@ -100,7 +104,7 @@ static bool is_valid(int64_t key) {
static bool is_set(int64_t key, uint64_t value_mask) {
if (is_valid(key)) {
- return query[key].value & value_mask != 0;
+ return (query[key].value & value_mask) != 0;
}
return false;
}
@@ -129,6 +133,18 @@ void RiscvHwprobe::add_features_from_query_result() {
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_C)) {
VM_Version::ext_C.enable_feature();
}
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_V)) {
+ VM_Version::ext_V.enable_feature();
+ }
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBA)) {
+ VM_Version::ext_Zba.enable_feature();
+ }
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBB)) {
+ VM_Version::ext_Zbb.enable_feature();
+ }
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBS)) {
+ VM_Version::ext_Zbs.enable_feature();
+ }
if (is_valid(RISCV_HWPROBE_KEY_CPUPERF_0)) {
VM_Version::unaligned_access.enable_feature(
query[RISCV_HWPROBE_KEY_CPUPERF_0].value & RISCV_HWPROBE_MISALIGNED_MASK);
diff --git a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
index 54c8ae13bfa71..f890bfbdc028c 100644
--- a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
@@ -149,23 +149,32 @@ void VM_Version::setup_cpu_available_features() {
void VM_Version::os_aux_features() {
uint64_t auxv = getauxval(AT_HWCAP);
- int i = 0;
- while (_feature_list[i] != nullptr) {
+ for (int i = 0; _feature_list[i] != nullptr; i++) {
+ if (_feature_list[i]->feature_bit() == HWCAP_ISA_V) {
+ // Special case for V: some dev boards only support RVV version 0.7, while
+ // the OpenJDK only supports RVV version 1.0. These two versions are not
+ // compatible with each other. Given the V bit is set through HWCAP on
+ // some custom kernels, regardless of the version, it can lead to
+ // generating V instructions on boards that don't support RVV version 1.0
+ // (ex: Sipeed LicheePi), leading to a SIGILL.
+ // That is an acceptable workaround as only Linux Kernel v6.5+ supports V,
+ // and that version already support hwprobe anyway
+ continue;
+ }
if ((_feature_list[i]->feature_bit() & auxv) != 0) {
_feature_list[i]->enable_feature();
}
- i++;
}
}
VM_Version::VM_MODE VM_Version::parse_satp_mode(const char* vm_mode) {
- if (!strcmp(vm_mode, "sv39")) {
+ if (!strncmp(vm_mode, "sv39", sizeof "sv39" - 1)) {
return VM_SV39;
- } else if (!strcmp(vm_mode, "sv48")) {
+ } else if (!strncmp(vm_mode, "sv48", sizeof "sv48" - 1)) {
return VM_SV48;
- } else if (!strcmp(vm_mode, "sv57")) {
+ } else if (!strncmp(vm_mode, "sv57", sizeof "sv57" - 1)) {
return VM_SV57;
- } else if (!strcmp(vm_mode, "sv64")) {
+ } else if (!strncmp(vm_mode, "sv64", sizeof "sv64" - 1)) {
return VM_SV64;
} else {
return VM_MBARE;
@@ -187,7 +196,7 @@ char* VM_Version::os_uarch_additional_features() {
if ((p = strchr(buf, ':')) != nullptr) {
if (mode == VM_NOTSET) {
if (strncmp(buf, "mmu", sizeof "mmu" - 1) == 0) {
- mode = VM_Version::parse_satp_mode(p);
+ mode = VM_Version::parse_satp_mode(p + 2);
}
}
if (ret == nullptr) {
@@ -224,19 +233,11 @@ void VM_Version::vendor_features() {
void VM_Version::rivos_features() {
// Enable common features not dependent on marchid/mimpid.
- ext_I.enable_feature();
- ext_M.enable_feature();
- ext_A.enable_feature();
- ext_F.enable_feature();
- ext_D.enable_feature();
- ext_C.enable_feature();
- ext_H.enable_feature();
- ext_V.enable_feature();
-
ext_Zicbom.enable_feature();
ext_Zicboz.enable_feature();
ext_Zicbop.enable_feature();
+ // If we running on a pre-6.5 kernel
ext_Zba.enable_feature();
ext_Zbb.enable_feature();
ext_Zbs.enable_feature();
diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
index 206573b078ae5..033ea14ead6a4 100644
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
@@ -456,7 +456,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
- print_instructions(st, pc, /*intrsize=*/4);
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
index 2e472a020683a..0156546ba9b77 100644
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
@@ -153,6 +153,14 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
+// No direct support for 8-byte xchg; emulate using cmpxchg.
+template<>
+struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {};
+
+// No direct support for 8-byte add; emulate using cmpxchg.
+template<>
+struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {};
+
template<>
template
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index 930cf3f2657d5..6cee67a867eee 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -571,7 +571,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::fetch_frame_from_context(uc).pc();
- print_instructions(st, pc, sizeof(char));
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
index eefa8d5d06201..6409942c07de9 100644
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
@@ -71,17 +71,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
- // __sync_lock_test_and_set is a bizarrely named atomic exchange
- // operation. Note that some platforms only support this with the
- // limitation that the only valid value to store is the immediate
- // constant 1. There is a test for this in JNI_CreateJavaVM().
- T result = __sync_lock_test_and_set (dest, exchange_value);
- // All atomic operations are expected to be full memory barriers
- // (see atomic.hpp). However, __sync_lock_test_and_set is not
- // a full memory barrier, but an acquire barrier. Hence, this added
- // barrier. Some platforms (notably ARM) have peculiarities with
- // their barrier implementations, delegate it to OrderAccess.
- OrderAccess::fence();
+ FULL_MEM_BARRIER;
+ T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
+ FULL_MEM_BARRIER;
return result;
}
@@ -91,8 +83,9 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
- T result = __sync_lock_test_and_set (dest, exchange_value);
- OrderAccess::fence();
+ FULL_MEM_BARRIER;
+ T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED);
+ FULL_MEM_BARRIER;
return result;
}
@@ -134,54 +127,18 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
// Atomically copy 64 bits of data
inline void atomic_copy64(const volatile void *src, volatile void *dst) {
-#if defined(PPC32) && !defined(__SPE__)
- double tmp;
- asm volatile ("lfd %0, %2\n"
- "stfd %0, %1\n"
- : "=&f"(tmp), "=Q"(*(volatile double*)dst)
- : "Q"(*(volatile double*)src));
-#elif defined(PPC32) && defined(__SPE__)
- long tmp;
- asm volatile ("evldd %0, %2\n"
- "evstdd %0, %1\n"
- : "=&r"(tmp), "=Q"(*(volatile long*)dst)
- : "Q"(*(volatile long*)src));
-#elif defined(S390) && !defined(_LP64)
- double tmp;
- asm volatile ("ld %0, %2\n"
- "std %0, %1\n"
- : "=&f"(tmp), "=Q"(*(volatile double*)dst)
- : "Q"(*(volatile double*)src));
-#elif defined(__ARM_ARCH_7A__)
- // The only way to perform the atomic 64-bit load/store
- // is to use ldrexd/strexd for both reads and writes.
- // For store, we need to have the matching (fake) load first.
- // Put clrex between exclusive ops on src and dst for clarity.
- uint64_t tmp_r, tmp_w;
- uint32_t flag_w;
- asm volatile ("ldrexd %[tmp_r], [%[src]]\n"
- "clrex\n"
- "1:\n"
- "ldrexd %[tmp_w], [%[dst]]\n"
- "strexd %[flag_w], %[tmp_r], [%[dst]]\n"
- "cmp %[flag_w], 0\n"
- "bne 1b\n"
- : [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w),
- [flag_w] "=&r" (flag_w)
- : [src] "r" (src), [dst] "r" (dst)
- : "cc", "memory");
-#else
- *(jlong *) dst = *(const jlong *) src;
-#endif
+ int64_t tmp;
+ __atomic_load(reinterpret_cast(src), &tmp, __ATOMIC_RELAXED);
+ __atomic_store(reinterpret_cast(dst), &tmp, __ATOMIC_RELAXED);
}
template<>
template
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
- volatile int64_t dest;
- atomic_copy64(reinterpret_cast(src), reinterpret_cast(&dest));
- return PrimitiveConversions::cast(dest);
+ T dest;
+ __atomic_load(const_cast(src), &dest, __ATOMIC_RELAXED);
+ return dest;
}
template<>
@@ -189,7 +146,7 @@ template
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
- atomic_copy64(reinterpret_cast(&store_value), reinterpret_cast(dest));
+ __atomic_store(dest, &store_value, __ATOMIC_RELAXED);
}
#endif // OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP
diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
index 101dbdcb4d10b..73adff11eddff 100644
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
@@ -409,7 +409,7 @@ void os::print_tos_pc(outputStream *st, const void* ucVoid) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
- print_instructions(st, pc, sizeof(char));
+ print_instructions(st, pc);
st->cr();
}
@@ -492,22 +492,6 @@ extern "C" {
}
};
-/////////////////////////////////////////////////////////////////////////////
-// Implementations of atomic operations not supported by processors.
-// -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
-
-#ifndef _LP64
-extern "C" {
- long long unsigned int __sync_val_compare_and_swap_8(
- volatile void *ptr,
- long long unsigned int oldval,
- long long unsigned int newval) {
- ShouldNotCallThis();
- return 0; // silence compiler warnings
- }
-};
-#endif // !_LP64
-
#ifndef PRODUCT
void os::verify_stack_alignment() {
}
diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
index 73b8a46126d29..abd1a22ea2d60 100644
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
@@ -464,7 +464,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::fetch_frame_from_context(uc).pc();
- print_instructions(st, pc, sizeof(char));
+ print_instructions(st, pc);
st->cr();
}
diff --git a/src/hotspot/share/adlc/output_c.cpp b/src/hotspot/share/adlc/output_c.cpp
index b0c4e7eff6b81..7c6ca6a38d428 100644
--- a/src/hotspot/share/adlc/output_c.cpp
+++ b/src/hotspot/share/adlc/output_c.cpp
@@ -3124,6 +3124,9 @@ static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
fprintf(fp_cpp, " if( i != cisc_operand() ) \n");
fprintf(fp_cpp, " to[i] = _opnds[i]->clone();\n");
fprintf(fp_cpp, " }\n");
+ fprintf(fp_cpp, " // Do not increment node index counter, since node reuses my index\n");
+ fprintf(fp_cpp, " Compile* C = Compile::current();\n");
+ fprintf(fp_cpp, " C->set_unique(C->unique() - 1);\n");
fprintf(fp_cpp, "}\n");
}
fprintf(fp_cpp, "\n");
diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp
index 37f1fcbe831b2..d40d7cfb119ba 100644
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp
@@ -2087,9 +2087,10 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
assert(singleton != declared_interface, "not a unique implementor");
cha_monomorphic_target = target->find_monomorphic_target(calling_klass, declared_interface, singleton);
if (cha_monomorphic_target != nullptr) {
- if (cha_monomorphic_target->holder() != compilation()->env()->Object_klass()) {
- ciInstanceKlass* holder = cha_monomorphic_target->holder();
- ciInstanceKlass* constraint = (holder->is_subtype_of(singleton) ? holder : singleton); // avoid upcasts
+ ciInstanceKlass* holder = cha_monomorphic_target->holder();
+ ciInstanceKlass* constraint = (holder->is_subtype_of(singleton) ? holder : singleton); // avoid upcasts
+ if (holder != compilation()->env()->Object_klass() &&
+ (!type_is_exact || receiver_klass->is_subtype_of(constraint))) {
actual_recv = declared_interface;
// insert a check it's really the expected class.
@@ -2102,7 +2103,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
dependency_recorder()->assert_unique_implementor(declared_interface, singleton);
} else {
- cha_monomorphic_target = nullptr; // subtype check against Object is useless
+ cha_monomorphic_target = nullptr;
}
}
}
diff --git a/src/hotspot/share/c1/c1_Instruction.hpp b/src/hotspot/share/c1/c1_Instruction.hpp
index 7308facda6f19..f4794297da8aa 100644
--- a/src/hotspot/share/c1/c1_Instruction.hpp
+++ b/src/hotspot/share/c1/c1_Instruction.hpp
@@ -954,7 +954,7 @@ LEAF(LoadIndexed, AccessIndexed)
ciType* declared_type() const;
// generic;
- HASHING3(LoadIndexed, true, type()->tag(), array()->subst(), index()->subst())
+ HASHING3(LoadIndexed, true, elt_type(), array()->subst(), index()->subst())
};
diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp
index 5013ad9c1b6ae..5dbb6d6574e2e 100644
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp
@@ -78,6 +78,7 @@ void PhiResolverState::reset() {
PhiResolver::PhiResolver(LIRGenerator* gen)
: _gen(gen)
, _state(gen->resolver_state())
+ , _loop(nullptr)
, _temp(LIR_OprFact::illegalOpr)
{
// reinitialize the shared state arrays
diff --git a/src/hotspot/share/c1/c1_LinearScan.cpp b/src/hotspot/share/c1/c1_LinearScan.cpp
index a2663adacf079..0634d970c26f7 100644
--- a/src/hotspot/share/c1/c1_LinearScan.cpp
+++ b/src/hotspot/share/c1/c1_LinearScan.cpp
@@ -1949,6 +1949,14 @@ void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, i
// interval at the throwing instruction must be searched using the operands
// of the phi function
Value from_value = phi->operand_at(handler->phi_operand());
+ if (from_value == nullptr) {
+ // We have reached here in a kotlin application running with JVMTI
+ // capability "can_access_local_variables".
+ // The illegal state is not yet propagated to this phi. Do it here.
+ phi->make_illegal();
+ // We can skip the illegal phi edge.
+ return;
+ }
// with phi functions it can happen that the same from_value is used in
// multiple mappings, so notify move-resolver that this is allowed
diff --git a/src/hotspot/share/c1/c1_RangeCheckElimination.cpp b/src/hotspot/share/c1/c1_RangeCheckElimination.cpp
index 22bf4bc2cffb9..b03e277cfa996 100644
--- a/src/hotspot/share/c1/c1_RangeCheckElimination.cpp
+++ b/src/hotspot/share/c1/c1_RangeCheckElimination.cpp
@@ -404,8 +404,11 @@ void RangeCheckEliminator::add_access_indexed_info(InstructionList &indices, int
aii->_max = idx;
aii->_list = new AccessIndexedList();
} else if (idx >= aii->_min && idx <= aii->_max) {
- remove_range_check(ai);
- return;
+ // Guard against underflow/overflow (see 'range_cond' check in RangeCheckEliminator::in_block_motion)
+ if (aii->_max < 0 || (aii->_max + min_jint) <= aii->_min) {
+ remove_range_check(ai);
+ return;
+ }
}
aii->_min = MIN2(aii->_min, idx);
aii->_max = MAX2(aii->_max, idx);
@@ -448,9 +451,9 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList
}
}
} else {
- int last_integer = 0;
+ jint last_integer = 0;
Instruction *last_instruction = index;
- int base = 0;
+ jint base = 0;
ArithmeticOp *ao = index->as_ArithmeticOp();
while (ao != nullptr && (ao->x()->as_Constant() || ao->y()->as_Constant()) && (ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub)) {
@@ -462,12 +465,12 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList
}
if (c) {
- int value = c->type()->as_IntConstant()->value();
+ jint value = c->type()->as_IntConstant()->value();
if (value != min_jint) {
if (ao->op() == Bytecodes::_isub) {
value = -value;
}
- base += value;
+ base = java_add(base, value);
last_integer = base;
last_instruction = other;
}
@@ -489,12 +492,12 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList
assert(info != nullptr, "Info must not be null");
// if idx < 0, max > 0, max + idx may fall between 0 and
- // length-1 and if min < 0, min + idx may overflow and be >=
+ // length-1 and if min < 0, min + idx may underflow/overflow and be >=
// 0. The predicate wouldn't trigger but some accesses could
// be with a negative index. This test guarantees that for the
// min and max value that are kept the predicate can't let
// some incorrect accesses happen.
- bool range_cond = (info->_max < 0 || info->_max + min_jint <= info->_min);
+ bool range_cond = (info->_max < 0 || (info->_max + min_jint) <= info->_min);
// Generate code only if more than 2 range checks can be eliminated because of that.
// 2 because at least 2 comparisons are done
@@ -843,7 +846,7 @@ void RangeCheckEliminator::process_access_indexed(BlockBegin *loop_header, Block
);
remove_range_check(ai);
- } else if (_optimistic && loop_header) {
+ } else if (false && _optimistic && loop_header) {
assert(ai->array(), "Array must not be null!");
assert(ai->index(), "Index must not be null!");
diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp
index 3183bb04e77d9..41a6b151b28a0 100644
--- a/src/hotspot/share/c1/c1_Runtime1.cpp
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp
@@ -1504,6 +1504,19 @@ JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
JRT_END
+// Check exception if AbortVMOnException flag set
+JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
+ ResourceMark rm;
+ const char* message = nullptr;
+ if (ex->is_a(vmClasses::Throwable_klass())) {
+ oop msg = java_lang_Throwable::message(ex);
+ if (msg != nullptr) {
+ message = java_lang_String::as_utf8_string(msg);
+ }
+ }
+ Exceptions::debug_check_abort(ex->klass()->external_name(), message);
+JRT_END
+
#ifndef PRODUCT
void Runtime1::print_statistics() {
tty->print_cr("C1 Runtime statistics:");
diff --git a/src/hotspot/share/c1/c1_Runtime1.hpp b/src/hotspot/share/c1/c1_Runtime1.hpp
index 3dcb27476a6f9..525cfd7f92ee5 100644
--- a/src/hotspot/share/c1/c1_Runtime1.hpp
+++ b/src/hotspot/share/c1/c1_Runtime1.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -191,6 +191,8 @@ class Runtime1: public AllStatic {
static void predicate_failed_trap(JavaThread* current);
+ static void check_abort_on_vm_exception(oopDesc* ex);
+
static void print_statistics() PRODUCT_RETURN;
};
diff --git a/src/hotspot/share/c1/c1_ValueMap.cpp b/src/hotspot/share/c1/c1_ValueMap.cpp
index 9fe4ed8df67e4..2e600ff0f10d3 100644
--- a/src/hotspot/share/c1/c1_ValueMap.cpp
+++ b/src/hotspot/share/c1/c1_ValueMap.cpp
@@ -359,6 +359,33 @@ LoopInvariantCodeMotion::LoopInvariantCodeMotion(ShortLoopOptimizer *slo, Global
}
}
+class CheckInsertionPoint : public ValueVisitor {
+ private:
+ Value _insert;
+ bool _valid = true;
+
+ void visit(Value* vp) {
+ assert(*vp != nullptr, "value should not be null");
+ if (_insert->dominator_depth() < (*vp)->dominator_depth()) {
+ _valid = false;
+ }
+ }
+
+ public:
+ bool is_valid() { return _valid; }
+ CheckInsertionPoint(Value insert)
+ : _insert(insert) {
+ assert(insert != nullptr, "insertion point should not be null");
+ }
+};
+
+// Check that insertion point has higher dom depth than all inputs to cur
+static bool is_dominated_by_inputs(Instruction* insertion_point, Instruction* cur) {
+ CheckInsertionPoint v(insertion_point);
+ cur->input_values_do(&v);
+ return v.is_valid();
+}
+
void LoopInvariantCodeMotion::process_block(BlockBegin* block) {
TRACE_VALUE_NUMBERING(tty->print_cr("processing block B%d", block->block_id()));
@@ -394,7 +421,7 @@ void LoopInvariantCodeMotion::process_block(BlockBegin* block) {
cur_invariant = is_invariant(cvt->value());
}
- if (cur_invariant) {
+ if (cur_invariant && is_dominated_by_inputs(_insertion_point, cur)) {
// perform value numbering and mark instruction as loop-invariant
_gvn->substitute(cur);
diff --git a/src/hotspot/share/cds/cdsProtectionDomain.cpp b/src/hotspot/share/cds/cdsProtectionDomain.cpp
index d1020b992e5bc..3ad2af670f047 100644
--- a/src/hotspot/share/cds/cdsProtectionDomain.cpp
+++ b/src/hotspot/share/cds/cdsProtectionDomain.cpp
@@ -241,7 +241,7 @@ Handle CDSProtectionDomain::get_shared_protection_domain(Handle class_loader,
TRAPS) {
Handle protection_domain;
if (shared_protection_domain(shared_path_index) == nullptr) {
- Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
+ Handle pd = get_protection_domain_from_classloader(class_loader, url, CHECK_NH);
atomic_set_shared_protection_domain(shared_path_index, pd());
}
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index b8fe625d12adb..7d12e1acaf8e4 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -368,7 +368,7 @@ void SharedClassPathEntry::copy_from(SharedClassPathEntry* ent, ClassLoaderData*
_from_class_path_attr = ent->_from_class_path_attr;
set_name(ent->name(), CHECK);
- if (ent->is_jar() && !ent->is_signed() && ent->manifest() != nullptr) {
+ if (ent->is_jar() && ent->manifest() != nullptr) {
Array* buf = MetadataFactory::new_array(loader_data,
ent->manifest_size(),
CHECK);
@@ -622,29 +622,6 @@ class ManifestStream: public ResourceObj {
buf[len] = 0;
return buf;
}
-
- // The return value indicates if the JAR is signed or not
- bool check_is_signed() {
- u1* attr = _current;
- bool isSigned = false;
- while (_current < _buffer_end) {
- if (*_current == '\n') {
- *_current = '\0';
- u1* value = (u1*)strchr((char*)attr, ':');
- if (value != nullptr) {
- assert(*(value+1) == ' ', "Unrecognized format" );
- if (strstr((char*)attr, "-Digest") != nullptr) {
- isSigned = true;
- break;
- }
- }
- *_current = '\n'; // restore
- attr = _current + 1;
- }
- _current ++;
- }
- return isSigned;
- }
};
void FileMapInfo::update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry* ent, TRAPS) {
@@ -657,18 +634,14 @@ void FileMapInfo::update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry*
if (manifest != nullptr) {
ManifestStream* stream = new ManifestStream((u1*)manifest,
manifest_size);
- if (stream->check_is_signed()) {
- ent->set_is_signed();
- } else {
- // Copy the manifest into the shared archive
- manifest = ClassLoaderExt::read_raw_manifest(THREAD, cpe, &manifest_size);
- Array* buf = MetadataFactory::new_array(loader_data,
- manifest_size,
- CHECK);
- char* p = (char*)(buf->data());
- memcpy(p, manifest, manifest_size);
- ent->set_manifest(buf);
- }
+ // Copy the manifest into the shared archive
+ manifest = ClassLoaderExt::read_raw_manifest(THREAD, cpe, &manifest_size);
+ Array* buf = MetadataFactory::new_array(loader_data,
+ manifest_size,
+ CHECK);
+ char* p = (char*)(buf->data());
+ memcpy(p, manifest, manifest_size);
+ ent->set_manifest(buf);
}
}
diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
index 160f598796dfc..9848e8f434a27 100644
--- a/src/hotspot/share/cds/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -53,7 +53,6 @@ class SharedClassPathEntry : public MetaspaceObj {
enum {
modules_image_entry,
jar_entry,
- signed_jar_entry,
dir_entry,
non_existent_entry,
unknown_entry
@@ -90,10 +89,6 @@ class SharedClassPathEntry : public MetaspaceObj {
bool is_dir() const { return _type == dir_entry; }
bool is_modules_image() const { return _type == modules_image_entry; }
bool is_jar() const { return _type == jar_entry; }
- bool is_signed() const { return _type == signed_jar_entry; }
- void set_is_signed() {
- _type = signed_jar_entry;
- }
bool from_class_path_attr() { return _from_class_path_attr; }
time_t timestamp() const { return _timestamp; }
const char* name() const;
diff --git a/src/hotspot/share/cds/unregisteredClasses.cpp b/src/hotspot/share/cds/unregisteredClasses.cpp
index 53b9debcd6295..06d006ea1bb97 100644
--- a/src/hotspot/share/cds/unregisteredClasses.cpp
+++ b/src/hotspot/share/cds/unregisteredClasses.cpp
@@ -46,11 +46,9 @@ InstanceKlass* UnregisteredClasses::load_class(Symbol* name, const char* path, T
assert(name != nullptr, "invariant");
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
- {
- PerfClassTraceTime vmtimer(ClassLoader::perf_sys_class_lookup_time(),
- THREAD->get_thread_stat()->perf_timers_addr(),
- PerfClassTraceTime::CLASS_LOAD);
- }
+ PerfClassTraceTime vmtimer(ClassLoader::perf_app_classload_time(),
+ THREAD->get_thread_stat()->perf_timers_addr(),
+ PerfClassTraceTime::CLASS_LOAD);
Symbol* path_symbol = SymbolTable::new_symbol(path);
Handle url_classloader = get_url_classloader(path_symbol, CHECK_NULL);
diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp
index 7212b0d90235f..74208152988bd 100644
--- a/src/hotspot/share/ci/ciEnv.cpp
+++ b/src/hotspot/share/ci/ciEnv.cpp
@@ -1715,6 +1715,7 @@ void ciEnv::dump_replay_data(int compile_id) {
tty->print_cr("# Compiler replay data is saved as: %s", buffer);
} else {
tty->print_cr("# Can't open file to dump replay data.");
+ close(fd);
}
}
}
@@ -1739,6 +1740,7 @@ void ciEnv::dump_inline_data(int compile_id) {
tty->print_cr("%s", buffer);
} else {
tty->print_cr("# Can't open file to dump inline data.");
+ close(fd);
}
}
}
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index 4e1078a1cd9cd..20de43ad835d0 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -4321,6 +4321,7 @@ void ClassFileParser::check_super_interface_access(const InstanceKlass* this_kla
(same_module) ? this_klass->joint_in_module_of_loader(k) : this_klass->class_in_module_of_loader(),
(same_module) ? "" : "; ",
(same_module) ? "" : k->class_in_module_of_loader());
+ return;
} else {
// Add additional message content.
Exceptions::fthrow(
@@ -4328,6 +4329,7 @@ void ClassFileParser::check_super_interface_access(const InstanceKlass* this_kla
vmSymbols::java_lang_IllegalAccessError(),
"superinterface check failed: %s",
msg);
+ return;
}
}
}
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index ab29ec3524d40..5e89673a56063 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -122,7 +122,6 @@ PerfCounter* ClassLoader::_perf_class_verify_selftime = nullptr;
PerfCounter* ClassLoader::_perf_classes_linked = nullptr;
PerfCounter* ClassLoader::_perf_class_link_time = nullptr;
PerfCounter* ClassLoader::_perf_class_link_selftime = nullptr;
-PerfCounter* ClassLoader::_perf_sys_class_lookup_time = nullptr;
PerfCounter* ClassLoader::_perf_shared_classload_time = nullptr;
PerfCounter* ClassLoader::_perf_sys_classload_time = nullptr;
PerfCounter* ClassLoader::_perf_app_classload_time = nullptr;
@@ -521,7 +520,8 @@ void ClassLoader::setup_app_search_path(JavaThread* current, const char *class_p
while (cp_stream.has_next()) {
const char* path = cp_stream.get_next();
- update_class_path_entry_list(current, path, false, false, false);
+ update_class_path_entry_list(current, path, /* check_for_duplicates */ true,
+ /* is_boot_append */ false, /* from_class_path_attr */ false);
}
}
@@ -666,7 +666,8 @@ void ClassLoader::setup_bootstrap_search_path_impl(JavaThread* current, const ch
} else {
// Every entry on the boot class path after the initial base piece,
// which is set by os::set_boot_path(), is considered an appended entry.
- update_class_path_entry_list(current, path, false, true, false);
+ update_class_path_entry_list(current, path, /* check_for_duplicates */ false,
+ /* is_boot_append */ true, /* from_class_path_attr */ false);
}
}
}
@@ -801,7 +802,7 @@ void ClassLoader::add_to_boot_append_entries(ClassPathEntry *new_entry) {
// Note that at dump time, ClassLoader::_app_classpath_entries are NOT used for
// loading app classes. Instead, the app class are loaded by the
// jdk/internal/loader/ClassLoaders$AppClassLoader instance.
-void ClassLoader::add_to_app_classpath_entries(JavaThread* current,
+bool ClassLoader::add_to_app_classpath_entries(JavaThread* current,
ClassPathEntry* entry,
bool check_for_duplicates) {
#if INCLUDE_CDS
@@ -811,7 +812,7 @@ void ClassLoader::add_to_app_classpath_entries(JavaThread* current,
while (e != nullptr) {
if (strcmp(e->name(), entry->name()) == 0) {
// entry already exists
- return;
+ return false;
}
e = e->next();
}
@@ -830,6 +831,7 @@ void ClassLoader::add_to_app_classpath_entries(JavaThread* current,
ClassLoaderExt::process_jar_manifest(current, entry);
}
#endif
+ return true;
}
// Returns true IFF the file/dir exists and the entry was successfully created.
@@ -852,7 +854,10 @@ bool ClassLoader::update_class_path_entry_list(JavaThread* current,
if (is_boot_append) {
add_to_boot_append_entries(new_entry);
} else {
- add_to_app_classpath_entries(current, new_entry, check_for_duplicates);
+ if (!add_to_app_classpath_entries(current, new_entry, check_for_duplicates)) {
+ // new_entry is not saved, free it now
+ delete new_entry;
+ }
}
return true;
} else {
@@ -1368,7 +1373,6 @@ void ClassLoader::initialize(TRAPS) {
NEWPERFEVENTCOUNTER(_perf_classes_linked, SUN_CLS, "linkedClasses");
NEWPERFEVENTCOUNTER(_perf_classes_verified, SUN_CLS, "verifiedClasses");
- NEWPERFTICKCOUNTER(_perf_sys_class_lookup_time, SUN_CLS, "lookupSysClassTime");
NEWPERFTICKCOUNTER(_perf_shared_classload_time, SUN_CLS, "sharedClassLoadTime");
NEWPERFTICKCOUNTER(_perf_sys_classload_time, SUN_CLS, "sysClassLoadTime");
NEWPERFTICKCOUNTER(_perf_app_classload_time, SUN_CLS, "appClassLoadTime");
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index 0733a5a347c82..4cb1967194ab9 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -168,7 +168,6 @@ class ClassLoader: AllStatic {
static PerfCounter* _perf_classes_linked;
static PerfCounter* _perf_class_link_time;
static PerfCounter* _perf_class_link_selftime;
- static PerfCounter* _perf_sys_class_lookup_time;
static PerfCounter* _perf_shared_classload_time;
static PerfCounter* _perf_sys_classload_time;
static PerfCounter* _perf_app_classload_time;
@@ -222,7 +221,7 @@ class ClassLoader: AllStatic {
CDS_ONLY(static ClassPathEntry* _last_module_path_entry;)
CDS_ONLY(static void setup_app_search_path(JavaThread* current, const char* class_path);)
CDS_ONLY(static void setup_module_search_path(JavaThread* current, const char* path);)
- static void add_to_app_classpath_entries(JavaThread* current,
+ static bool add_to_app_classpath_entries(JavaThread* current,
ClassPathEntry* entry,
bool check_for_duplicates);
CDS_ONLY(static void add_to_module_path_entries(const char* path,
@@ -289,7 +288,6 @@ class ClassLoader: AllStatic {
static PerfCounter* perf_classes_linked() { return _perf_classes_linked; }
static PerfCounter* perf_class_link_time() { return _perf_class_link_time; }
static PerfCounter* perf_class_link_selftime() { return _perf_class_link_selftime; }
- static PerfCounter* perf_sys_class_lookup_time() { return _perf_sys_class_lookup_time; }
static PerfCounter* perf_shared_classload_time() { return _perf_shared_classload_time; }
static PerfCounter* perf_sys_classload_time() { return _perf_sys_classload_time; }
static PerfCounter* perf_app_classload_time() { return _perf_app_classload_time; }
diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp
index aa1f6f531077b..282a025c31d3c 100644
--- a/src/hotspot/share/classfile/classLoaderData.cpp
+++ b/src/hotspot/share/classfile/classLoaderData.cpp
@@ -1009,7 +1009,11 @@ void ClassLoaderData::print_on(outputStream* out) const {
_holder.print_on(out);
out->print_cr("");
}
- out->print_cr(" - class loader " INTPTR_FORMAT, p2i(_class_loader.ptr_raw()));
+ if (!_unloading) {
+ out->print_cr(" - class loader " INTPTR_FORMAT, p2i(_class_loader.peek()));
+ } else {
+ out->print_cr(" - class loader ");
+ }
out->print_cr(" - metaspace " INTPTR_FORMAT, p2i(_metaspace));
out->print_cr(" - unloading %s", _unloading ? "true" : "false");
out->print_cr(" - class mirror holder %s", _has_class_mirror_holder ? "true" : "false");
diff --git a/src/hotspot/share/classfile/dictionary.cpp b/src/hotspot/share/classfile/dictionary.cpp
index f60d426f5fbce..f1f02e11d8133 100644
--- a/src/hotspot/share/classfile/dictionary.cpp
+++ b/src/hotspot/share/classfile/dictionary.cpp
@@ -229,11 +229,13 @@ class DictionaryLookup : StackObj {
uintx get_hash() const {
return _name->identity_hash();
}
- bool equals(DictionaryEntry** value, bool* is_dead) {
+ bool equals(DictionaryEntry** value) {
DictionaryEntry *entry = *value;
- *is_dead = false;
return (entry->instance_klass()->name() == _name);
}
+ bool is_dead(DictionaryEntry** value) {
+ return false;
+ }
};
// Add a loaded class to the dictionary.
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index a7582e535e6e5..3e00a3fab4e1f 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -86,6 +86,7 @@
#include "runtime/vframe.inline.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
+#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/preserveException.hpp"
#include "utilities/utf8.hpp"
@@ -2761,15 +2762,19 @@ Handle java_lang_Throwable::create_initialization_error(JavaThread* current, Han
assert(throwable.not_null(), "shouldn't be");
// Now create the message from the original exception and thread name.
- Symbol* message = java_lang_Throwable::detail_message(throwable());
ResourceMark rm(current);
+ const char *message = nullptr;
+ oop detailed_message = java_lang_Throwable::message(throwable());
+ if (detailed_message != nullptr) {
+ message = java_lang_String::as_utf8_string(detailed_message);
+ }
stringStream st;
st.print("Exception %s%s ", throwable()->klass()->name()->as_klass_external_name(),
message == nullptr ? "" : ":");
if (message == nullptr) {
st.print("[in thread \"%s\"]", current->name());
} else {
- st.print("%s [in thread \"%s\"]", message->as_C_string(), current->name());
+ st.print("%s [in thread \"%s\"]", message, current->name());
}
Symbol* exception_name = vmSymbols::java_lang_ExceptionInInitializerError();
@@ -4721,7 +4726,7 @@ class UnsafeConstantsFixup : public FieldClosure {
UnsafeConstantsFixup() {
// round up values for all static final fields
_address_size = sizeof(void*);
- _page_size = (int)os::vm_page_size();
+ _page_size = AIX_ONLY(sysconf(_SC_PAGESIZE)) NOT_AIX((int)os::vm_page_size());
_big_endian = LITTLE_ENDIAN_ONLY(false) BIG_ENDIAN_ONLY(true);
_use_unaligned_access = UseUnalignedAccesses;
_data_cache_line_flush_size = (int)VM_Version::data_cache_line_flush_size();
diff --git a/src/hotspot/share/classfile/loaderConstraints.cpp b/src/hotspot/share/classfile/loaderConstraints.cpp
index 261ec96604a46..3a34800c05172 100644
--- a/src/hotspot/share/classfile/loaderConstraints.cpp
+++ b/src/hotspot/share/classfile/loaderConstraints.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -348,7 +348,7 @@ bool LoaderConstraintTable::add_entry(Symbol* class_name,
} else if (pp1 == nullptr) {
pp2->extend_loader_constraint(class_name, loader1, klass);
} else if (pp2 == nullptr) {
- pp1->extend_loader_constraint(class_name, loader1, klass);
+ pp1->extend_loader_constraint(class_name, loader2, klass);
} else {
merge_loader_constraints(class_name, pp1, pp2, klass);
}
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index 89381b1a785f1..19ac5cc7709de 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -53,6 +53,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/timerTrace.hpp"
+#include "runtime/trimNativeHeap.hpp"
#include "services/diagnosticCommand.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
@@ -175,11 +176,9 @@ class StringTableLookupJchar : StackObj {
uintx get_hash() const {
return _hash;
}
- bool equals(WeakHandle* value, bool* is_dead) {
+ bool equals(WeakHandle* value) {
oop val_oop = value->peek();
if (val_oop == nullptr) {
- // dead oop, mark this hash dead for cleaning
- *is_dead = true;
return false;
}
bool equals = java_lang_String::equals(val_oop, _str, _len);
@@ -190,6 +189,10 @@ class StringTableLookupJchar : StackObj {
_found = Handle(_thread, value->resolve());
return true;
}
+ bool is_dead(WeakHandle* value) {
+ oop val_oop = value->peek();
+ return val_oop == nullptr;
+ }
};
class StringTableLookupOop : public StackObj {
@@ -207,11 +210,9 @@ class StringTableLookupOop : public StackObj {
return _hash;
}
- bool equals(WeakHandle* value, bool* is_dead) {
+ bool equals(WeakHandle* value) {
oop val_oop = value->peek();
if (val_oop == nullptr) {
- // dead oop, mark this hash dead for cleaning
- *is_dead = true;
return false;
}
bool equals = java_lang_String::equals(_find(), val_oop);
@@ -222,6 +223,11 @@ class StringTableLookupOop : public StackObj {
_found = Handle(_thread, value->resolve());
return true;
}
+
+ bool is_dead(WeakHandle* value) {
+ oop val_oop = value->peek();
+ return val_oop == nullptr;
+ }
};
void StringTable::create_table() {
@@ -456,6 +462,7 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
StringTableDeleteCheck stdc;
StringTableDoDelete stdd;
+ NativeHeapTrimmer::SuspendMark sm("stringtable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
while(bdt.do_task(jt, stdc, stdd)) {
diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp
index 61d5ba576b54a..91eacc923c250 100644
--- a/src/hotspot/share/classfile/symbolTable.cpp
+++ b/src/hotspot/share/classfile/symbolTable.cpp
@@ -37,6 +37,7 @@
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/timerTrace.hpp"
+#include "runtime/trimNativeHeap.hpp"
#include "services/diagnosticCommand.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
@@ -337,6 +338,7 @@ Symbol* SymbolTable::lookup_common(const char* name,
}
Symbol* SymbolTable::new_symbol(const char* name, int len) {
+ assert(len <= Symbol::max_length(), "sanity");
unsigned int hash = hash_symbol(name, len, _alt_hash);
Symbol* sym = lookup_common(name, len, hash);
if (sym == nullptr) {
@@ -352,6 +354,7 @@ Symbol* SymbolTable::new_symbol(const Symbol* sym, int begin, int end) {
assert(sym->refcount() != 0, "require a valid symbol");
const char* name = (const char*)sym->base() + begin;
int len = end - begin;
+ assert(len <= Symbol::max_length(), "sanity");
unsigned int hash = hash_symbol(name, len, _alt_hash);
Symbol* found = lookup_common(name, len, hash);
if (found == nullptr) {
@@ -371,7 +374,11 @@ class SymbolTableLookup : StackObj {
uintx get_hash() const {
return _hash;
}
- bool equals(Symbol* value, bool* is_dead) {
+ // Note: When equals() returns "true", the symbol's refcount is incremented. This is
+ // needed to ensure that the symbol is kept alive before equals() returns to the caller,
+ // so that another thread cannot clean the symbol up concurrently. The caller is
+ // responsible for decrementing the refcount, when the symbol is no longer needed.
+ bool equals(Symbol* value) {
assert(value != nullptr, "expected valid value");
Symbol *sym = value;
if (sym->equals(_str, _len)) {
@@ -380,14 +387,15 @@ class SymbolTableLookup : StackObj {
return true;
} else {
assert(sym->refcount() == 0, "expected dead symbol");
- *is_dead = true;
return false;
}
} else {
- *is_dead = (sym->refcount() == 0);
return false;
}
}
+ bool is_dead(Symbol* value) {
+ return value->refcount() == 0;
+ }
};
class SymbolTableGet : public StackObj {
@@ -737,6 +745,7 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
SymbolTableDeleteCheck stdc;
SymbolTableDoDelete stdd;
+ NativeHeapTrimmer::SuspendMark sm("symboltable");
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
while (bdt.do_task(jt, stdc, stdd)) {
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index 13ee901371ac9..34f65a0e90743 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -2250,11 +2250,12 @@ void ClassVerifier::verify_switch(
"low must be less than or equal to high in tableswitch");
return;
}
- keys = high - low + 1;
- if (keys < 0) {
+ int64_t keys64 = ((int64_t)high - low) + 1;
+ if (keys64 > 65535) { // Max code length
verify_error(ErrorContext::bad_code(bci), "too many keys in tableswitch");
return;
}
+ keys = (int)keys64;
delta = 1;
} else {
keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp
index 18843ad962266..afb807065ab0b 100644
--- a/src/hotspot/share/code/codeBlob.cpp
+++ b/src/hotspot/share/code/codeBlob.cpp
@@ -204,7 +204,8 @@ void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const cha
if (PrintStubCode) {
ttyLocker ttyl;
tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
- tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
+ tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
+ stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
Disassembler::decode(stub->code_begin(), stub->code_end(), tty
NOT_PRODUCT(COMMA &stub->asm_remarks()));
if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp
index eb7f49b185af0..6c40fe992aea0 100644
--- a/src/hotspot/share/code/codeBlob.hpp
+++ b/src/hotspot/share/code/codeBlob.hpp
@@ -119,11 +119,6 @@ class CodeBlob {
#ifndef PRODUCT
AsmRemarks _asm_remarks;
DbgStrings _dbg_strings;
-
- ~CodeBlob() {
- _asm_remarks.clear();
- _dbg_strings.clear();
- }
#endif // not PRODUCT
CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset,
@@ -132,10 +127,17 @@ class CodeBlob {
CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset,
int frame_size, OopMapSet* oop_maps,
bool caller_must_gc_arguments, bool compiled = false);
+
+ void operator delete(void* p) { }
+
public:
// Only used by unit test.
CodeBlob() : _type(compiler_none) {}
+ virtual ~CodeBlob() {
+ assert(_oop_maps == nullptr, "Not flushed");
+ }
+
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
static unsigned int align_code_offset(int offset);
@@ -404,10 +406,6 @@ class BufferBlob: public RuntimeBlob {
BufferBlob(const char* name, int size);
BufferBlob(const char* name, int size, CodeBuffer* cb);
- // This ordinary operator delete is needed even though not used, so the
- // below two-argument operator delete will be treated as a placement
- // delete rather than an ordinary sized delete; see C++14 3.7.4.2/p2.
- void operator delete(void* p);
void* operator new(size_t s, unsigned size) throw();
public:
@@ -492,10 +490,6 @@ class RuntimeStub: public RuntimeBlob {
bool caller_must_gc_arguments
);
- // This ordinary operator delete is needed even though not used, so the
- // below two-argument operator delete will be treated as a placement
- // delete rather than an ordinary sized delete; see C++14 3.7.4.2/p2.
- void operator delete(void* p);
void* operator new(size_t s, unsigned size) throw();
public:
@@ -532,10 +526,6 @@ class SingletonBlob: public RuntimeBlob {
friend class VMStructs;
protected:
- // This ordinary operator delete is needed even though not used, so the
- // below two-argument operator delete will be treated as a placement
- // delete rather than an ordinary sized delete; see C++14 3.7.4.2/p2.
- void operator delete(void* p);
void* operator new(size_t s, unsigned size) throw();
public:
@@ -750,10 +740,6 @@ class UpcallStub: public RuntimeBlob {
intptr_t exception_handler_offset,
jobject receiver, ByteSize frame_data_offset);
- // This ordinary operator delete is needed even though not used, so the
- // below two-argument operator delete will be treated as a placement
- // delete rather than an ordinary sized delete; see C++14 3.7.4.2/p2.
- void operator delete(void* p);
void* operator new(size_t s, unsigned size) throw();
struct FrameData {
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 2ea72a1fcbdaa..3bc1db70251b3 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -310,9 +310,20 @@ void CodeCache::initialize_heaps() {
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
+ const size_t ps = page_size(false, 8);
+ // Print warning if using large pages but not able to use the size given
+ if (UseLargePages) {
+ const size_t lg_ps = page_size(false, 1);
+ if (ps < lg_ps) {
+ log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
+ "Reverting to smaller page size (" PROPERFMT ").",
+ PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
+ }
+ }
+
// If large page support is enabled, align code heaps according to large
// page size to make sure that code cache is covered by large pages.
- const size_t alignment = MAX2(page_size(false, 8), os::vm_allocation_granularity());
+ const size_t alignment = MAX2(ps, os::vm_allocation_granularity());
non_nmethod_size = align_up(non_nmethod_size, alignment);
profiled_size = align_down(profiled_size, alignment);
non_profiled_size = align_down(non_profiled_size, alignment);
@@ -324,7 +335,7 @@ void CodeCache::initialize_heaps() {
// Non-nmethods
// Profiled nmethods
// ---------- low ------------
- ReservedCodeSpace rs = reserve_heap_memory(cache_size);
+ ReservedCodeSpace rs = reserve_heap_memory(cache_size, ps);
ReservedSpace profiled_space = rs.first_part(profiled_size);
ReservedSpace rest = rs.last_part(profiled_size);
ReservedSpace non_method_space = rest.first_part(non_nmethod_size);
@@ -354,9 +365,8 @@ size_t CodeCache::page_size(bool aligned, size_t min_pages) {
}
}
-ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
+ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
// Align and reserve space for code cache
- const size_t rs_ps = page_size();
const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
const size_t rs_size = align_up(size, rs_align);
ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
@@ -462,10 +472,10 @@ CodeHeap* CodeCache::get_code_heap_containing(void* start) {
return nullptr;
}
-CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
+CodeHeap* CodeCache::get_code_heap(const void* cb) {
assert(cb != nullptr, "CodeBlob is null");
FOR_ALL_HEAPS(heap) {
- if ((*heap)->contains_blob(cb)) {
+ if ((*heap)->contains(cb)) {
return *heap;
}
}
@@ -594,6 +604,7 @@ void CodeCache::free(CodeBlob* cb) {
heap->set_adapter_count(heap->adapter_count() - 1);
}
+ cb->~CodeBlob();
// Get heap for given CodeBlob and deallocate
get_code_heap(cb)->deallocate(cb);
@@ -1194,7 +1205,7 @@ void CodeCache::initialize() {
FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size());
FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
- ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
+ ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, 8));
// Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
add_heap(rs, "CodeCache", CodeBlobType::All);
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index 6fb3a6bd981a8..8abc4043ae6dd 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -117,11 +117,11 @@ class CodeCache : AllStatic {
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
static void add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type);
static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or nullptr
- static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
+ static CodeHeap* get_code_heap(const void* cb); // Returns the CodeHeap for the given CodeBlob
static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap
static const char* get_code_heap_flag_name(CodeBlobType code_blob_type);
- static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
+ static ReservedCodeSpace reserve_heap_memory(size_t size, size_t rs_ps); // Reserves one continuous chunk of memory for the CodeHeaps
// Iteration
static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
@@ -397,10 +397,10 @@ template class CodeBlobIterator : publi
// If set to nullptr, initialized by first call to next()
_code_blob = nm;
if (nm != nullptr) {
- while(!(*_heap)->contains_blob(_code_blob)) {
+ while(!(*_heap)->contains(_code_blob)) {
++_heap;
}
- assert((*_heap)->contains_blob(_code_blob), "match not found");
+ assert((*_heap)->contains(_code_blob), "match not found");
}
}
diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp
index c19ee182d5186..c6294ff56279b 100644
--- a/src/hotspot/share/code/compiledIC.cpp
+++ b/src/hotspot/share/code/compiledIC.cpp
@@ -128,11 +128,13 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
tty->cr();
}
+#ifdef ASSERT
{
CodeBlob* cb = CodeCache::find_blob(_call->instruction_address());
assert(cb != nullptr && cb->is_compiled(), "must be compiled");
- _call->set_destination_mt_safe(entry_point);
}
+#endif
+ _call->set_destination_mt_safe(entry_point);
if (is_optimized() || is_icstub) {
// Optimized call sites don't have a cache value and ICStub call
diff --git a/src/hotspot/share/code/icBuffer.cpp b/src/hotspot/share/code/icBuffer.cpp
index a43d3678ad31b..520a8c7525906 100644
--- a/src/hotspot/share/code/icBuffer.cpp
+++ b/src/hotspot/share/code/icBuffer.cpp
@@ -140,7 +140,7 @@ void ICStub::print() {
void InlineCacheBuffer::initialize() {
if (_buffer != nullptr) return; // already initialized
- _buffer = new StubQueue(new ICStubInterface, 10*K, InlineCacheBuffer_lock, "InlineCacheBuffer");
+ _buffer = new StubQueue(new ICStubInterface, checked_cast(InlineCacheBufferSize), InlineCacheBuffer_lock, "InlineCacheBuffer");
assert (_buffer != nullptr, "cannot allocate InlineCacheBuffer");
}
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index ded61f989487f..fb2e5bd78b083 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -394,6 +394,7 @@ PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
}
void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
+ MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());)
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
// Update the LRU cache by shifting pc_desc forward.
for (int i = 0; i < cache_size; i++) {
@@ -2751,9 +2752,6 @@ void nmethod::decode2(outputStream* ost) const {
AbstractDisassembler::show_block_comment());
#endif
- // Decoding an nmethod can write to a PcDescCache (see PcDescCache::add_pc_desc)
- MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());)
-
st->cr();
this->print(st);
st->cr();
diff --git a/src/hotspot/share/code/stubs.cpp b/src/hotspot/share/code/stubs.cpp
index 55e0207c87d6e..6037b683b12dc 100644
--- a/src/hotspot/share/code/stubs.cpp
+++ b/src/hotspot/share/code/stubs.cpp
@@ -217,8 +217,6 @@ void StubQueue::verify() {
guarantee(0 <= _queue_begin && _queue_begin < _buffer_limit, "_queue_begin out of bounds");
guarantee(0 <= _queue_end && _queue_end <= _buffer_limit, "_queue_end out of bounds");
// verify alignment
- guarantee(_buffer_size % stub_alignment() == 0, "_buffer_size not aligned");
- guarantee(_buffer_limit % stub_alignment() == 0, "_buffer_limit not aligned");
guarantee(_queue_begin % stub_alignment() == 0, "_queue_begin not aligned");
guarantee(_queue_end % stub_alignment() == 0, "_queue_end not aligned");
// verify buffer limit/size relationship
diff --git a/src/hotspot/share/code/vtableStubs.cpp b/src/hotspot/share/code/vtableStubs.cpp
index 42a8a63a38fc1..934f805eefcd6 100644
--- a/src/hotspot/share/code/vtableStubs.cpp
+++ b/src/hotspot/share/code/vtableStubs.cpp
@@ -230,8 +230,9 @@ address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
enter(is_vtable_stub, vtable_index, s);
if (PrintAdapterHandlers) {
- tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT,
- is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));
+ tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (" SIZE_FORMAT " bytes)",
+ is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
+ p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
Disassembler::decode(s->code_begin(), s->code_end());
}
// Notify JVMTI about this stub. The event will be recorded by the enclosing
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index de8cce86578ad..8a83a864f62d9 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -1772,17 +1772,22 @@ bool CompileBroker::init_compiler_runtime() {
return true;
}
+void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) {
+ BufferBlob* blob = thread->get_buffer_blob();
+ if (blob != nullptr) {
+ blob->flush();
+ MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CodeCache::free(blob);
+ }
+}
+
/**
* If C1 and/or C2 initialization failed, we shut down all compilation.
* We do this to keep things simple. This can be changed if it ever turns
* out to be a problem.
*/
void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
- // Free buffer blob, if allocated
- if (thread->get_buffer_blob() != nullptr) {
- MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- CodeCache::free(thread->get_buffer_blob());
- }
+ free_buffer_blob_if_allocated(thread);
if (comp->should_perform_shutdown()) {
// There are two reasons for shutting down the compiler
@@ -1921,11 +1926,7 @@ void CompileBroker::compiler_thread_loop() {
// Notify compiler that the compiler thread is about to stop
thread->compiler()->stopping_compiler_thread(thread);
- // Free buffer blob, if allocated
- if (thread->get_buffer_blob() != nullptr) {
- MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- CodeCache::free(thread->get_buffer_blob());
- }
+ free_buffer_blob_if_allocated(thread);
return; // Stop this thread.
}
}
@@ -2651,8 +2652,8 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) {
int total_bailout_count = CompileBroker::_total_bailout_count;
int total_invalidated_count = CompileBroker::_total_invalidated_count;
- int nmethods_size = CompileBroker::_sum_nmethod_code_size;
- int nmethods_code_size = CompileBroker::_sum_nmethod_size;
+ int nmethods_code_size = CompileBroker::_sum_nmethod_code_size;
+ int nmethods_size = CompileBroker::_sum_nmethod_size;
tty->cr();
tty->print_cr("Accumulated compiler times");
diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp
index e87c7f35555ba..b7f09259fa8e8 100644
--- a/src/hotspot/share/compiler/compileBroker.hpp
+++ b/src/hotspot/share/compiler/compileBroker.hpp
@@ -252,6 +252,8 @@ class CompileBroker: AllStatic {
static bool wait_for_jvmci_completion(JVMCICompiler* comp, CompileTask* task, JavaThread* thread);
#endif
+ static void free_buffer_blob_if_allocated(CompilerThread* thread);
+
static void invoke_compiler_on_method(CompileTask* task);
static void handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env,
int compilable, const char* failure_reason);
diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp
index 23af57f3910fd..54bd1cbc7fcdb 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp
@@ -497,6 +497,11 @@ bool CompilerConfig::check_args_consistency(bool status) {
"Invalid NonNMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonNMethodCodeHeapSize/K,
min_code_cache_size/K);
status = false;
+ } else if (InlineCacheBufferSize > NonNMethodCodeHeapSize / 2) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid InlineCacheBufferSize=" SIZE_FORMAT "K. Must be less than or equal to " SIZE_FORMAT "K.\n",
+ InlineCacheBufferSize/K, NonNMethodCodeHeapSize/2/K);
+ status = false;
}
#ifdef _LP64
@@ -614,6 +619,7 @@ void CompilerConfig::ergo_initialize() {
IncrementalInline = false;
IncrementalInlineMH = false;
IncrementalInlineVirtual = false;
+ StressIncrementalInlining = false;
}
#ifndef PRODUCT
if (!IncrementalInline) {
diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp
index 739c7cc5b9dc1..125a56a113f23 100644
--- a/src/hotspot/share/compiler/compilerOracle.cpp
+++ b/src/hotspot/share/compiler/compilerOracle.cpp
@@ -1050,21 +1050,10 @@ void CompilerOracle::parse_compile_only(char* line) {
}
}
- if (*line == method_sep) {
- if (className == nullptr) {
- className = "";
- c_match = MethodMatcher::Any;
- }
- } else {
- // got foo or foo/bar
- if (className == nullptr) {
- ShouldNotReachHere();
- } else {
- // missing class name handled as "Any" class match
- if (className[0] == '\0') {
- c_match = MethodMatcher::Any;
- }
- }
+ if (className == nullptr || className[0] == '\0') {
+ // missing class name handled as "Any" class match
+ className = "";
+ c_match = MethodMatcher::Any;
}
// each directive is terminated by , or NUL or . followed by NUL
diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp
index 4e3f08ddc9d21..f39e206673977 100644
--- a/src/hotspot/share/gc/g1/g1CardSet.cpp
+++ b/src/hotspot/share/gc/g1/g1CardSet.cpp
@@ -258,10 +258,13 @@ class G1CardSetHashTable : public CHeapObj {
uintx get_hash() const { return G1CardSetHashTable::get_hash(_region_idx); }
- bool equals(G1CardSetHashTableValue* value, bool* is_dead) {
- *is_dead = false;
+ bool equals(G1CardSetHashTableValue* value) {
return value->_region_idx == _region_idx;
}
+
+ bool is_dead(G1CardSetHashTableValue*) {
+ return false;
+ }
};
class G1CardSetHashTableFound : public StackObj {
diff --git a/src/hotspot/share/gc/parallel/psCardTable.cpp b/src/hotspot/share/gc/parallel/psCardTable.cpp
index d08ad1572612e..4d63cdb9a3f3a 100644
--- a/src/hotspot/share/gc/parallel/psCardTable.cpp
+++ b/src/hotspot/share/gc/parallel/psCardTable.cpp
@@ -33,6 +33,7 @@
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/prefetch.inline.hpp"
+#include "utilities/spinYield.hpp"
#include "utilities/align.hpp"
// Checks an individual oop for missing precise marks. Mark
@@ -123,70 +124,184 @@ static void prefetch_write(void *p) {
}
}
-// postcondition: ret is a dirty card or end_card
-CardTable::CardValue* PSCardTable::find_first_dirty_card(CardValue* const start_card,
- CardValue* const end_card) {
- for (CardValue* i_card = start_card; i_card < end_card; ++i_card) {
- if (*i_card != PSCardTable::clean_card_val()) {
- return i_card;
- }
+void PSCardTable::scan_obj_with_limit(PSPromotionManager* pm,
+ oop obj,
+ HeapWord* start,
+ HeapWord* end) {
+ if (!obj->is_typeArray()) {
+ prefetch_write(start);
+ pm->push_contents_bounded(obj, start, end);
}
- return end_card;
}
-// postcondition: ret is a clean card or end_card
-// Note: if a part of an object is on a dirty card, all cards this object
-// resides on are considered dirty.
-CardTable::CardValue* PSCardTable::find_first_clean_card(ObjectStartArray* const start_array,
- CardValue* const start_card,
- CardValue* const end_card) {
- assert(start_card == end_card ||
- *start_card != PSCardTable::clean_card_val(), "precondition");
- // Skip the first dirty card.
- CardValue* i_card = start_card + 1;
- while (i_card < end_card) {
- if (*i_card != PSCardTable::clean_card_val()) {
- i_card++;
- continue;
- }
- assert(i_card - 1 >= start_card, "inv");
- assert(*(i_card - 1) != PSCardTable::clean_card_val(), "prev card must be dirty");
- // Find the final obj on the prev dirty card.
- HeapWord* obj_addr = start_array->object_start(addr_for(i_card)-1);
- HeapWord* obj_end_addr = obj_addr + cast_to_oop(obj_addr)->size();
- CardValue* final_card_by_obj = byte_for(obj_end_addr - 1);
- assert(final_card_by_obj < end_card, "inv");
- if (final_card_by_obj <= i_card) {
- return i_card;
+void PSCardTable::pre_scavenge(HeapWord* old_gen_bottom, uint active_workers) {
+ _preprocessing_active_workers = active_workers;
+}
+
+// The "shadow" table is a copy of the card table entries of the current stripe.
+// It is used to separate card reading, clearing and redirtying which reduces
+// complexity significantly.
+class PSStripeShadowCardTable {
+ typedef CardTable::CardValue CardValue;
+
+ const uint _card_shift;
+ const uint _card_size;
+ CardValue _table[PSCardTable::num_cards_in_stripe];
+ const CardValue* _table_base;
+
+public:
+ PSStripeShadowCardTable(PSCardTable* pst, HeapWord* const start, HeapWord* const end) :
+ _card_shift(CardTable::card_shift()),
+ _card_size(CardTable::card_size()),
+ _table_base(_table - (uintptr_t(start) >> _card_shift)) {
+ size_t stripe_byte_size = pointer_delta(end, start) * HeapWordSize;
+ size_t copy_length = align_up(stripe_byte_size, _card_size) >> _card_shift;
+ // The end of the last stripe may not be card aligned as it is equal to old
+ // gen top at scavenge start. We should not clear the card containing old gen
+ // top if not card aligned because there can be promoted objects on that
+ // same card. If it was marked dirty because of the promoted objects and we
+ // cleared it, we would loose a card mark.
+ size_t clear_length = align_down(stripe_byte_size, _card_size) >> _card_shift;
+ CardValue* stripe_start_card = pst->byte_for(start);
+ memcpy(_table, stripe_start_card, copy_length);
+ memset(stripe_start_card, CardTable::clean_card_val(), clear_length);
+ }
+
+ HeapWord* addr_for(const CardValue* const card) {
+ assert(card >= _table && card <= &_table[PSCardTable::num_cards_in_stripe], "out of bounds");
+ return (HeapWord*) ((card - _table_base) << _card_shift);
+ }
+
+ const CardValue* card_for(HeapWord* addr) {
+ return &_table_base[uintptr_t(addr) >> _card_shift];
+ }
+
+ bool is_dirty(const CardValue* const card) {
+ return !is_clean(card);
+ }
+
+ bool is_clean(const CardValue* const card) {
+ assert(card >= _table && card < &_table[PSCardTable::num_cards_in_stripe], "out of bounds");
+ return *card == PSCardTable::clean_card_val();
+ }
+
+ const CardValue* find_first_dirty_card(const CardValue* const start,
+ const CardValue* const end) {
+ for (const CardValue* i = start; i < end; ++i) {
+ if (is_dirty(i)) {
+ return i;
+ }
}
- // This final obj extends beyond i_card, check if this new card is dirty.
- if (*final_card_by_obj == PSCardTable::clean_card_val()) {
- return final_card_by_obj;
+ return end;
+ }
+
+ const CardValue* find_first_clean_card(const CardValue* const start,
+ const CardValue* const end) {
+ for (const CardValue* i = start; i < end; ++i) {
+ if (is_clean(i)) {
+ return i;
+ }
}
- // This new card is dirty, continuing the search...
- i_card = final_card_by_obj + 1;
+ return end;
}
- return end_card;
-}
+};
+
+template
+void PSCardTable::process_range(Func&& object_start,
+ PSPromotionManager* pm,
+ HeapWord* const start,
+ HeapWord* const end) {
+ assert(start < end, "precondition");
+ assert(is_card_aligned(start), "precondition");
+
+ PSStripeShadowCardTable sct(this, start, end);
+
+ // end might not be card-aligned.
+ const CardValue* end_card = sct.card_for(end - 1) + 1;
-void PSCardTable::clear_cards(CardValue* const start, CardValue* const end) {
- for (CardValue* i_card = start; i_card < end; ++i_card) {
- *i_card = clean_card;
+ for (HeapWord* i_addr = start; i_addr < end; /* empty */) {
+ const CardValue* dirty_l = sct.find_first_dirty_card(sct.card_for(i_addr), end_card);
+ const CardValue* dirty_r = sct.find_first_clean_card(dirty_l, end_card);
+
+ assert(dirty_l <= dirty_r, "inv");
+
+ if (dirty_l == dirty_r) {
+ assert(dirty_r == end_card, "inv");
+ break;
+ }
+
+ // Located a non-empty dirty chunk [dirty_l, dirty_r).
+ HeapWord* addr_l = sct.addr_for(dirty_l);
+ HeapWord* addr_r = MIN2(sct.addr_for(dirty_r), end);
+
+ // Scan objects overlapping [addr_l, addr_r) limited to [start, end).
+ HeapWord* obj_addr = object_start(addr_l);
+
+ while (true) {
+ assert(obj_addr < addr_r, "inv");
+
+ oop obj = cast_to_oop(obj_addr);
+ const bool is_obj_array = obj->is_objArray();
+ HeapWord* const obj_end_addr = obj_addr + obj->size();
+
+ if (is_obj_array) {
+ // Always scan obj arrays precisely (they are always marked precisely)
+ // to avoid unnecessary work.
+ scan_obj_with_limit(pm, obj, addr_l, addr_r);
+ } else {
+ if (obj_addr < i_addr && i_addr > start) {
+ // Already scanned this object. Has been one that spans multiple dirty chunks.
+ // The second condition makes sure objects reaching in the stripe are scanned once.
+ } else {
+ scan_obj_with_limit(pm, obj, addr_l, end);
+ }
+ }
+
+ if (obj_end_addr >= addr_r) {
+ i_addr = is_obj_array ? addr_r : obj_end_addr;
+ break;
+ }
+
+ // Move to next obj inside this dirty chunk.
+ obj_addr = obj_end_addr;
+ }
+
+ // Finished a dirty chunk.
+ pm->drain_stacks_cond_depth();
}
}
-void PSCardTable::scan_objects_in_range(PSPromotionManager* pm,
- HeapWord* start,
- HeapWord* end) {
- HeapWord* obj_addr = start;
- while (obj_addr < end) {
- oop obj = cast_to_oop(obj_addr);
- assert(oopDesc::is_oop(obj), "inv");
- prefetch_write(obj_addr);
- pm->push_contents(obj);
- obj_addr += obj->size();
+template
+void PSCardTable::preprocess_card_table_parallel(Func&& object_start,
+ HeapWord* old_gen_bottom,
+ HeapWord* old_gen_top,
+ uint stripe_index,
+ uint n_stripes) {
+ const size_t num_cards_in_slice = num_cards_in_stripe * n_stripes;
+ CardValue* cur_card = byte_for(old_gen_bottom) + stripe_index * num_cards_in_stripe;
+ CardValue* const end_card = byte_for(old_gen_top - 1) + 1;
+
+ for (/* empty */; cur_card < end_card; cur_card += num_cards_in_slice) {
+ HeapWord* stripe_addr = addr_for(cur_card);
+ if (is_dirty(cur_card)) {
+ // The first card of this stripe is already dirty, no need to see if the
+ // reaching-in object is a potentially imprecisely marked non-array
+ // object.
+ continue;
+ }
+ HeapWord* first_obj_addr = object_start(stripe_addr);
+ if (first_obj_addr == stripe_addr) {
+ // No object reaching into this stripe.
+ continue;
+ }
+ oop first_obj = cast_to_oop(first_obj_addr);
+ if (!first_obj->is_array() && is_dirty(byte_for(first_obj_addr))) {
+ // Found a non-array object reaching into the stripe that has
+ // potentially been marked imprecisely. Mark first card of the stripe
+ // dirty so it will be processed later.
+ *cur_card = dirty_card_val();
+ }
}
- pm->drain_stacks_cond_depth();
}
// We get passed the space_top value to prevent us from traversing into
@@ -227,103 +342,61 @@ void PSCardTable::scan_objects_in_range(PSPromotionManager* pm,
// slice_size_in_words to the start of stripe 0 in slice 0 to get to the start
// of stripe 0 in slice 1.
+// Scavenging and accesses to the card table are strictly limited to the stripe.
+// In particular scavenging of an object crossing stripe boundaries is shared
+// among the threads assigned to the stripes it resides on. This reduces
+// complexity and enables shared scanning of large objects.
+// It requires preprocessing of the card table though where imprecise card marks of
+// objects crossing stripe boundaries are propagated to the first card of
+// each stripe covered by the individual object.
+
void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
- MutableSpace* sp,
- HeapWord* space_top,
+ HeapWord* old_gen_bottom,
+ HeapWord* old_gen_top,
PSPromotionManager* pm,
uint stripe_index,
uint n_stripes) {
- const size_t num_cards_in_stripe = 128;
- const size_t stripe_size_in_words = num_cards_in_stripe * _card_size_in_words;
- const size_t slice_size_in_words = stripe_size_in_words * n_stripes;
-
- HeapWord* cur_stripe_addr = sp->bottom() + stripe_index * stripe_size_in_words;
-
- for (/* empty */; cur_stripe_addr < space_top; cur_stripe_addr += slice_size_in_words) {
- // exclusive
- HeapWord* const cur_stripe_end_addr = MIN2(cur_stripe_addr + stripe_size_in_words,
- space_top);
-
- // Process a stripe iff it contains any obj-start
- if (!start_array->object_starts_in_range(cur_stripe_addr, cur_stripe_end_addr)) {
- continue;
+ // ObjectStartArray queries can be expensive for large objects. We cache known objects.
+ struct {
+ HeapWord* start_addr;
+ HeapWord* end_addr;
+ } cached_obj {nullptr, old_gen_bottom};
+
+ // Queries must be monotonic because we don't check addr >= cached_obj.start_addr.
+ auto object_start = [&] (HeapWord* addr) {
+ if (addr < cached_obj.end_addr) {
+ assert(cached_obj.start_addr != nullptr, "inv");
+ return cached_obj.start_addr;
}
+ HeapWord* result = start_array->object_start(addr);
- // Constraints:
- // 1. range of cards checked for being dirty or clean: [iter_limit_l, iter_limit_r)
- // 2. range of cards can be cleared: [clear_limit_l, clear_limit_r)
- // 3. range of objs (obj-start) can be scanned: [first_obj_addr, cur_stripe_end_addr)
-
- CardValue* iter_limit_l;
- CardValue* iter_limit_r;
- CardValue* clear_limit_l;
- CardValue* clear_limit_r;
-
- // Identify left ends and the first obj-start inside this stripe.
- HeapWord* first_obj_addr = start_array->object_start(cur_stripe_addr);
- if (first_obj_addr < cur_stripe_addr) {
- // this obj belongs to previous stripe; can't clear any cards it occupies
- first_obj_addr += cast_to_oop(first_obj_addr)->size();
- clear_limit_l = byte_for(first_obj_addr - 1) + 1;
- iter_limit_l = byte_for(first_obj_addr);
- } else {
- assert(first_obj_addr == cur_stripe_addr, "inv");
- iter_limit_l = clear_limit_l = byte_for(cur_stripe_addr);
- }
+ cached_obj.start_addr = result;
+ cached_obj.end_addr = result + cast_to_oop(result)->size();
- assert(cur_stripe_addr <= first_obj_addr, "inside this stripe");
- assert(first_obj_addr <= cur_stripe_end_addr, "can be empty");
+ return result;
+ };
- {
- // Identify right ends.
- HeapWord* obj_addr = start_array->object_start(cur_stripe_end_addr - 1);
- HeapWord* obj_end_addr = obj_addr + cast_to_oop(obj_addr)->size();
- assert(obj_end_addr >= cur_stripe_end_addr, "inv");
- clear_limit_r = byte_for(obj_end_addr);
- iter_limit_r = byte_for(obj_end_addr - 1) + 1;
- }
-
- assert(iter_limit_l <= clear_limit_l &&
- clear_limit_r <= iter_limit_r, "clear cards only if we iterate over them");
-
- // Process dirty chunks, i.e. consecutive dirty cards [dirty_l, dirty_r),
- // chunk by chunk inside [iter_limit_l, iter_limit_r).
- CardValue* dirty_l;
- CardValue* dirty_r;
-
- for (CardValue* cur_card = iter_limit_l; cur_card < iter_limit_r; cur_card = dirty_r + 1) {
- dirty_l = find_first_dirty_card(cur_card, iter_limit_r);
- dirty_r = find_first_clean_card(start_array, dirty_l, iter_limit_r);
- assert(dirty_l <= dirty_r, "inv");
-
- // empty
- if (dirty_l == dirty_r) {
- assert(dirty_r == iter_limit_r, "no more dirty cards in this stripe");
- break;
- }
-
- assert(*dirty_l != clean_card, "inv");
- assert(*dirty_r == clean_card || dirty_r >= clear_limit_r,
- "clean card or belonging to next stripe");
+ // Prepare scavenge.
+ preprocess_card_table_parallel(object_start, old_gen_bottom, old_gen_top, stripe_index, n_stripes);
- // Process this non-empty dirty chunk in two steps:
- {
- // 1. Clear card in [dirty_l, dirty_r) subject to [clear_limit_l, clear_limit_r) constraint
- clear_cards(MAX2(dirty_l, clear_limit_l),
- MIN2(dirty_r, clear_limit_r));
- }
-
- {
- // 2. Scan objs in [dirty_l, dirty_r) subject to [first_obj_addr, cur_stripe_end_addr) constraint
- HeapWord* obj_l = MAX2(start_array->object_start(addr_for(dirty_l)),
- first_obj_addr);
+ // Sync with other workers.
+ Atomic::dec(&_preprocessing_active_workers);
+ SpinYield spin_yield;
+ while (Atomic::load_acquire(&_preprocessing_active_workers) > 0) {
+ spin_yield.wait();
+ }
- HeapWord* obj_r = MIN2(addr_for(dirty_r),
- cur_stripe_end_addr);
+ // Scavenge
+ cached_obj = {nullptr, old_gen_bottom};
+ const size_t stripe_size_in_words = num_cards_in_stripe * _card_size_in_words;
+ const size_t slice_size_in_words = stripe_size_in_words * n_stripes;
+ HeapWord* cur_addr = old_gen_bottom + stripe_index * stripe_size_in_words;
+ for (/* empty */; cur_addr < old_gen_top; cur_addr += slice_size_in_words) {
+ HeapWord* const stripe_l = cur_addr;
+ HeapWord* const stripe_r = MIN2(cur_addr + stripe_size_in_words,
+ old_gen_top);
- scan_objects_in_range(pm, obj_l, obj_r);
- }
- }
+ process_range(object_start, pm, stripe_l, stripe_r);
}
}
diff --git a/src/hotspot/share/gc/parallel/psCardTable.hpp b/src/hotspot/share/gc/parallel/psCardTable.hpp
index 6953c15d37c2f..b0634d5c0b084 100644
--- a/src/hotspot/share/gc/parallel/psCardTable.hpp
+++ b/src/hotspot/share/gc/parallel/psCardTable.hpp
@@ -33,7 +33,35 @@ class ObjectStartArray;
class PSPromotionManager;
class PSCardTable: public CardTable {
- private:
+ friend class PSStripeShadowCardTable;
+ static constexpr size_t num_cards_in_stripe = 128;
+ static_assert(num_cards_in_stripe >= 1, "progress");
+
+ volatile int _preprocessing_active_workers;
+
+ bool is_dirty(CardValue* card) {
+ return !is_clean(card);
+ }
+
+ bool is_clean(CardValue* card) {
+ return *card == clean_card_val();
+ }
+
+ // Iterate the stripes with the given index and copy imprecise card marks of
+ // objects reaching into a stripe to its first card.
+ template
+ void preprocess_card_table_parallel(Func&& object_start,
+ HeapWord* old_gen_bottom,
+ HeapWord* old_gen_top,
+ uint stripe_index,
+ uint n_stripes);
+
+ // Scavenge contents on dirty cards of the given stripe [start, end).
+ template
+ void process_range(Func&& object_start,
+ PSPromotionManager* pm,
+ HeapWord* const start,
+ HeapWord* const end);
void verify_all_young_refs_precise_helper(MemRegion mr);
@@ -42,29 +70,24 @@ class PSCardTable: public CardTable {
verify_card = CT_MR_BS_last_reserved + 5
};
- CardValue* find_first_dirty_card(CardValue* const start_card,
- CardValue* const end_card);
-
- CardValue* find_first_clean_card(ObjectStartArray* start_array,
- CardValue* const start_card,
- CardValue* const end_card);
-
- void clear_cards(CardValue* const start, CardValue* const end);
-
- void scan_objects_in_range(PSPromotionManager* pm,
- HeapWord* start,
- HeapWord* end);
+ void scan_obj_with_limit(PSPromotionManager* pm,
+ oop obj,
+ HeapWord* start,
+ HeapWord* end);
public:
- PSCardTable(MemRegion whole_heap) : CardTable(whole_heap) {}
+ PSCardTable(MemRegion whole_heap) : CardTable(whole_heap),
+ _preprocessing_active_workers(0) {}
static CardValue youngergen_card_val() { return youngergen_card; }
static CardValue verify_card_val() { return verify_card; }
// Scavenge support
+ void pre_scavenge(HeapWord* old_gen_bottom, uint active_workers);
+ // Scavenge contents of stripes with the given index.
void scavenge_contents_parallel(ObjectStartArray* start_array,
- MutableSpace* sp,
- HeapWord* space_top,
+ HeapWord* old_gen_bottom,
+ HeapWord* old_gen_top,
PSPromotionManager* pm,
uint stripe_index,
uint n_stripes);
diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.hpp
index a1d2b38db31fa..d053ffb6cc949 100644
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp
@@ -177,6 +177,7 @@ class PSPromotionManager {
TASKQUEUE_STATS_ONLY(inline void record_steal(ScannerTask task);)
void push_contents(oop obj);
+ void push_contents_bounded(oop obj, HeapWord* left, HeapWord* right);
};
#endif // SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
index f702bc483481c..c1cbeb0f597bb 100644
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
@@ -131,6 +131,11 @@ inline void PSPromotionManager::push_contents(oop obj) {
}
}
+inline void PSPromotionManager::push_contents_bounded(oop obj, HeapWord* left, HeapWord* right) {
+ PSPushContentsClosure pcc(this);
+ obj->oop_iterate(&pcc, MemRegion(left, right));
+}
+
template
inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
assert(should_scavenge(&o), "Sanity");
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index e148d0cebe86e..6c35ed6b593b8 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -87,7 +87,6 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
- PSScavengeRootsClosure roots_closure(pm);
PSPromoteRootsClosure roots_to_old_closure(pm);
switch (root_type) {
@@ -301,6 +300,11 @@ class ScavengeRootsTask : public WorkerTask {
_is_old_gen_empty(old_gen->object_space()->is_empty()),
_terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
assert(_old_gen != nullptr, "Sanity");
+
+ if (!_is_old_gen_empty) {
+ PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
+ card_table->pre_scavenge(_old_gen->object_space()->bottom(), active_workers);
+ }
}
virtual void work(uint worker_id) {
@@ -314,8 +318,9 @@ class ScavengeRootsTask : public WorkerTask {
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
+ // The top of the old gen changes during scavenge when objects are promoted.
card_table->scavenge_contents_parallel(_old_gen->start_array(),
- _old_gen->object_space(),
+ _old_gen->object_space()->bottom(),
_gen_top,
pm,
worker_id,
diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp
index 274744d5de256..adf0527681b5e 100644
--- a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp
+++ b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp
@@ -186,6 +186,14 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
// Called upon first entry after being armed
bool may_enter = bs_nm->nmethod_entry_barrier(nm);
+ // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
+ // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
+ // code, where the existence of new instructions is communicated via data (the guard value).
+ // This cross modify fence is only needed when the nmethod entry barrier modifies the
+ // instructions. Not all platforms currently do that, so if this check becomes expensive,
+ // it can be made conditional on the nmethod_patching_type.
+ OrderAccess::cross_modify_fence();
+
// Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
// a very rare event.
if (DeoptimizeNMethodBarriersALot) {
@@ -214,6 +222,7 @@ bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
assert(nm->is_osr_method(), "Should not reach here");
log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
+ bool result = nmethod_entry_barrier(nm);
OrderAccess::cross_modify_fence();
- return nmethod_entry_barrier(nm);
+ return result;
}
diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
index 90fae8c358841..41bd15ab0000f 100644
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
@@ -675,8 +675,15 @@ void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* si
Node* payload_size = size;
Node* offset = kit->MakeConX(base_off);
payload_size = kit->gvn().transform(new SubXNode(payload_size, offset));
+ if (is_array) {
+ // Ensure the array payload size is rounded up to the next BytesPerLong
+ // multiple when converting to double-words. This is necessary because array
+ // size does not include object alignment padding, so it might not be a
+ // multiple of BytesPerLong for sub-long element types.
+ payload_size = kit->gvn().transform(new AddXNode(payload_size, kit->MakeConX(BytesPerLong - 1)));
+ }
payload_size = kit->gvn().transform(new URShiftXNode(payload_size, kit->intcon(LogBytesPerLong)));
- ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, payload_size, true, false);
+ ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, payload_size, true, false);
if (is_array) {
ac->set_clone_array();
} else {
diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp
index e5189abed5740..ed54128d34a1a 100644
--- a/src/hotspot/share/gc/shared/memAllocator.cpp
+++ b/src/hotspot/share/gc/shared/memAllocator.cpp
@@ -400,15 +400,6 @@ oop ObjAllocator::initialize(HeapWord* mem) const {
return finish(mem);
}
-MemRegion ObjArrayAllocator::obj_memory_range(oop obj) const {
- if (_do_zero) {
- return MemAllocator::obj_memory_range(obj);
- }
- ArrayKlass* array_klass = ArrayKlass::cast(_klass);
- const size_t hs = arrayOopDesc::header_size(array_klass->element_type());
- return MemRegion(cast_from_oop(obj) + hs, _word_size - hs);
-}
-
oop ObjArrayAllocator::initialize(HeapWord* mem) const {
// Set array length before setting the _klass field because a
// non-null klass field indicates that the object is parsable by
diff --git a/src/hotspot/share/gc/shared/memAllocator.hpp b/src/hotspot/share/gc/shared/memAllocator.hpp
index e42399cdffdb4..48faded1337f9 100644
--- a/src/hotspot/share/gc/shared/memAllocator.hpp
+++ b/src/hotspot/share/gc/shared/memAllocator.hpp
@@ -78,10 +78,6 @@ class MemAllocator: StackObj {
// back to calling CollectedHeap::mem_allocate().
HeapWord* mem_allocate(Allocation& allocation) const;
- virtual MemRegion obj_memory_range(oop obj) const {
- return MemRegion(cast_from_oop(obj), _word_size);
- }
-
public:
// Allocate and fully construct the object, and perform various instrumentation. Could safepoint.
oop allocate() const;
@@ -100,8 +96,6 @@ class ObjArrayAllocator: public MemAllocator {
const int _length;
const bool _do_zero;
- virtual MemRegion obj_memory_range(oop obj) const;
-
public:
ObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero,
Thread* thread = Thread::current())
diff --git a/src/hotspot/share/gc/shared/tlab_globals.hpp b/src/hotspot/share/gc/shared/tlab_globals.hpp
index 8ecc5283642ba..0b047b4b9e8ac 100644
--- a/src/hotspot/share/gc/shared/tlab_globals.hpp
+++ b/src/hotspot/share/gc/shared/tlab_globals.hpp
@@ -70,10 +70,13 @@
"Allocation averaging weight") \
range(0, 100) \
\
+ /* At GC all TLABs are retired, and each thread's active */ \
+ /* TLAB is assumed to be half full on average. The */ \
+ /* remaining space is waste, proportional to TLAB size. */ \
+ product(uintx, TLABWasteTargetPercent, 1, \
+ "Percentage of Eden that can be wasted (half-full TLABs at GC)") \
/* Limit the lower bound of this flag to 1 as it is used */ \
/* in a division expression. */ \
- product(uintx, TLABWasteTargetPercent, 1, \
- "Percentage of Eden that can be wasted") \
range(1, 100) \
\
product(uintx, TLABRefillWasteFraction, 64, \
diff --git a/src/hotspot/share/gc/shared/workerThread.cpp b/src/hotspot/share/gc/shared/workerThread.cpp
index b64c5050a2230..49e43c284fad9 100644
--- a/src/hotspot/share/gc/shared/workerThread.cpp
+++ b/src/hotspot/share/gc/shared/workerThread.cpp
@@ -31,6 +31,7 @@
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
WorkerTaskDispatcher::WorkerTaskDispatcher() :
_task(nullptr),
@@ -141,40 +142,44 @@ void WorkerThreads::threads_do(ThreadClosure* tc) const {
}
}
-void WorkerThreads::set_indirectly_suspendible_threads() {
+template
+void WorkerThreads::threads_do_f(Function function) const {
+ for (uint i = 0; i < _created_workers; i++) {
+ function(_workers[i]);
+ }
+}
+
+void WorkerThreads::set_indirect_states() {
#ifdef ASSERT
- class SetIndirectlySuspendibleThreadClosure : public ThreadClosure {
- virtual void do_thread(Thread* thread) {
+ const bool is_suspendible = Thread::current()->is_suspendible_thread();
+ const bool is_safepointed = Thread::current()->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
+
+ threads_do_f([&](Thread* thread) {
+ assert(!thread->is_indirectly_suspendible_thread(), "Unexpected");
+ assert(!thread->is_indirectly_safepoint_thread(), "Unexpected");
+ if (is_suspendible) {
thread->set_indirectly_suspendible_thread();
}
- };
-
- if (Thread::current()->is_suspendible_thread()) {
- SetIndirectlySuspendibleThreadClosure cl;
- threads_do(&cl);
- }
+ if (is_safepointed) {
+ thread->set_indirectly_safepoint_thread();
+ }
+ });
#endif
}
-void WorkerThreads::clear_indirectly_suspendible_threads() {
+void WorkerThreads::clear_indirect_states() {
#ifdef ASSERT
- class ClearIndirectlySuspendibleThreadClosure : public ThreadClosure {
- virtual void do_thread(Thread* thread) {
- thread->clear_indirectly_suspendible_thread();
- }
- };
-
- if (Thread::current()->is_suspendible_thread()) {
- ClearIndirectlySuspendibleThreadClosure cl;
- threads_do(&cl);
- }
+ threads_do_f([&](Thread* thread) {
+ thread->clear_indirectly_suspendible_thread();
+ thread->clear_indirectly_safepoint_thread();
+ });
#endif
}
void WorkerThreads::run_task(WorkerTask* task) {
- set_indirectly_suspendible_threads();
+ set_indirect_states();
_dispatcher.coordinator_distribute_task(task, _active_workers);
- clear_indirectly_suspendible_threads();
+ clear_indirect_states();
}
void WorkerThreads::run_task(WorkerTask* task, uint num_workers) {
diff --git a/src/hotspot/share/gc/shared/workerThread.hpp b/src/hotspot/share/gc/shared/workerThread.hpp
index d3b246c0930b4..f22e48d7bbc90 100644
--- a/src/hotspot/share/gc/shared/workerThread.hpp
+++ b/src/hotspot/share/gc/shared/workerThread.hpp
@@ -93,8 +93,8 @@ class WorkerThreads : public CHeapObj {
WorkerThread* create_worker(uint name_suffix);
- void set_indirectly_suspendible_threads();
- void clear_indirectly_suspendible_threads();
+ void set_indirect_states();
+ void clear_indirect_states();
protected:
virtual void on_create_worker(WorkerThread* worker) {}
@@ -111,6 +111,8 @@ class WorkerThreads : public CHeapObj {
uint set_active_workers(uint num_workers);
void threads_do(ThreadClosure* tc) const;
+ template
+ void threads_do_f(Function function) const;
const char* name() const { return _name; }
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
index 56a91f234359b..ac3afa774e151 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
@@ -530,7 +530,7 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
// 1: non-reference load, no additional barrier is needed
if (!access.is_oop()) {
- return BarrierSetC2::load_at_resolved(access, val_type);;
+ return BarrierSetC2::load_at_resolved(access, val_type);
}
Node* load = BarrierSetC2::load_at_resolved(access, val_type);
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
index 71068f76043c9..da75706ac4cfa 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
@@ -50,20 +50,18 @@ bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
state->load_reference_barriers_count()) > 0) {
assert(C->post_loop_opts_phase(), "no loop opts allowed");
C->reset_post_loop_opts_phase(); // ... but we know what we are doing
- bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
C->clear_major_progress();
PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
if (C->failing()) return false;
- PhaseIdealLoop::verify(igvn);
- if (attempt_more_loopopts) {
- C->set_major_progress();
- if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
- return false;
- }
- C->clear_major_progress();
- C->process_for_post_loop_opts_igvn(igvn);
+ C->set_major_progress();
+ if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
+ return false;
}
+ C->clear_major_progress();
+ C->process_for_post_loop_opts_igvn(igvn);
+ if (C->failing()) return false;
+
C->set_post_loop_opts_phase(); // now for real!
}
return true;
@@ -1385,11 +1383,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
Node* result_mem = nullptr;
Node* addr;
- if (ShenandoahSelfFixing) {
+ {
VectorSet visited;
addr = get_load_addr(phase, visited, lrb);
- } else {
- addr = phase->igvn().zerocon(T_OBJECT);
}
if (addr->Opcode() == Op_AddP) {
Node* orig_base = addr->in(AddPNode::Base);
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
index 819f1e8d74e24..a8f71c8c5dfc0 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
@@ -354,10 +354,6 @@ bool ShenandoahAllocationRate::is_spiking(double rate, double threshold) const {
return false;
}
-double ShenandoahAllocationRate::instantaneous_rate(size_t allocated) const {
- return instantaneous_rate(os::elapsedTime(), allocated);
-}
-
double ShenandoahAllocationRate::instantaneous_rate(double time, size_t allocated) const {
size_t last_value = _last_sample_value;
double last_time = _last_sample_time;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
index a1a0e6321fafe..17214391383c2 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
@@ -36,7 +36,6 @@ class ShenandoahAllocationRate : public CHeapObj {
double sample(size_t allocated);
- double instantaneous_rate(size_t allocated) const;
double upper_bound(double sds) const;
bool is_spiking(double rate, double threshold) const;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
index 4ba3a0315b7c5..be758d14ed1be 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
@@ -37,12 +37,6 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeu
// Aggressive evacuates everything, so it needs as much evac space as it can get
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
-
- // If class unloading is globally enabled, aggressive does unloading even with
- // concurrent cycles.
- if (ClassUnloading) {
- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1);
- }
}
void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
@@ -62,7 +56,7 @@ bool ShenandoahAggressiveHeuristics::should_start_gc() {
}
bool ShenandoahAggressiveHeuristics::should_unload_classes() {
- if (!can_unload_classes_normal()) return false;
+ if (!can_unload_classes()) return false;
if (has_metaspace_oom()) return true;
// Randomly unload classes with 50% chance.
return (os::random() & 1) == 1;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
index e571d39f6b3a1..ad924f87b677e 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
@@ -53,11 +53,6 @@ ShenandoahHeuristics::ShenandoahHeuristics() :
_gc_time_history(new TruncatedSeq(10, ShenandoahAdaptiveDecayFactor)),
_metaspace_oom()
{
- // No unloading during concurrent mark? Communicate that to heuristics
- if (!ClassUnloadingWithConcurrentMark) {
- FLAG_SET_DEFAULT(ShenandoahUnloadClassesFrequency, 0);
- }
-
size_t num_regions = ShenandoahHeap::heap()->num_regions();
assert(num_regions > 0, "Sanity");
@@ -262,23 +257,10 @@ bool ShenandoahHeuristics::can_unload_classes() {
return true;
}
-bool ShenandoahHeuristics::can_unload_classes_normal() {
- if (!can_unload_classes()) return false;
- if (has_metaspace_oom()) return true;
- if (!ClassUnloadingWithConcurrentMark) return false;
- if (ShenandoahUnloadClassesFrequency == 0) return false;
- return true;
-}
-
bool ShenandoahHeuristics::should_unload_classes() {
- if (!can_unload_classes_normal()) return false;
+ if (!can_unload_classes()) return false;
if (has_metaspace_oom()) return true;
- size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter();
- // Unload classes every Nth GC cycle.
- // This should not happen in the same cycle as process_references to amortize costs.
- // Offsetting by one is enough to break the rendezvous when periods are equal.
- // When periods are not equal, offsetting by one is just as good as any other guess.
- return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
+ return ClassUnloadingWithConcurrentMark;
}
void ShenandoahHeuristics::initialize() {
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
index 288d306d08962..8efe321692eac 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
@@ -121,7 +121,6 @@ class ShenandoahHeuristics : public CHeapObj {
virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
virtual bool can_unload_classes();
- virtual bool can_unload_classes_normal();
virtual bool should_unload_classes();
virtual const char* name() = 0;
diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp
index d4fa5a06305d7..d94ade25977b6 100644
--- a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp
+++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp
@@ -59,7 +59,6 @@ void ShenandoahIUMode::initialize_flags() const {
SHENANDOAH_CHECK_FLAG_SET(ShenandoahIUBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahNMethodBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStackWatermarkBarrier);
}
diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp
index 36de17d2d79db..c22c88217e9e7 100644
--- a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp
+++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp
@@ -49,7 +49,6 @@ void ShenandoahPassiveMode::initialize_flags() const {
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahIUBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahNMethodBarrier);
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStackWatermarkBarrier);
// Final configuration checks
diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp
index 5b4e1df49e0df..ff1ff5c2ed343 100644
--- a/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp
+++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahSATBMode.cpp
@@ -47,7 +47,6 @@ void ShenandoahSATBMode::initialize_flags() const {
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
- SHENANDOAH_CHECK_FLAG_SET(ShenandoahNMethodBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStackWatermarkBarrier);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
index 6d4dce4575cf8..d2857daccf6f3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
@@ -45,7 +45,7 @@ ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
BarrierSet(make_barrier_set_assembler(),
make_barrier_set_c1(),
make_barrier_set_c2(),
- ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : nullptr,
+ new ShenandoahBarrierSetNMethod(heap),
new ShenandoahBarrierSetStackChunk(),
BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
_heap(heap),
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
index 413dfe10faad9..b8da50dd6e109 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
@@ -123,7 +123,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators,
}
oop fwd = load_reference_barrier(obj);
- if (ShenandoahSelfFixing && load_addr != nullptr && fwd != obj) {
+ if (load_addr != nullptr && fwd != obj) {
// Since we are here and we know the load address, update the reference.
ShenandoahHeap::atomic_update_oop(fwd, load_addr, obj);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
index 1c8daba3d24af..6674c40f76854 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
@@ -220,4 +220,4 @@ void ShenandoahAssertNotForwardedClosure::do_oop(narrowOop* p) { do_oop_work(p);
void ShenandoahAssertNotForwardedClosure::do_oop(oop* p) { do_oop_work(p); }
#endif
-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
index 932252f9ee18f..92d447258f2a8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp
@@ -36,74 +36,24 @@
#include "runtime/atomic.hpp"
#include "utilities/powerOfTwo.hpp"
-ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray* heaps) {
- _length = heaps->length();
- _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
- for (int h = 0; h < _length; h++) {
- _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
- }
-}
-
-ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
- FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
-}
-
-void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
- for (int c = 0; c < _length; c++) {
- _iters[c].parallel_blobs_do(f);
- }
-}
-
-ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
- _heap(heap), _claimed_idx(0), _finished(false) {
-}
-
-void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
-
- /*
- * Parallel code heap walk.
- *
- * This code makes all threads scan all code heaps, but only one thread would execute the
- * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
- * had claimed the block, it can process all blobs in it. Others have to fast-forward to
- * next attempt without processing.
- *
- * Late threads would return immediately if iterator is finished.
- */
-
- if (_finished) {
- return;
- }
- int stride = 256; // educated guess
- int stride_mask = stride - 1;
- assert (is_power_of_2(stride), "sanity");
+ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
+int ShenandoahCodeRoots::_disarmed_value = 1;
- int count = 0;
- bool process_block = true;
+bool ShenandoahCodeRoots::use_nmethod_barriers_for_mark() {
+ // Continuations need nmethod barriers for scanning stack chunk nmethods.
+ if (Continuations::enabled()) return true;
- for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != nullptr; cb = CodeCache::next_blob(_heap, cb)) {
- int current = count++;
- if ((current & stride_mask) == 0) {
- process_block = (current >= _claimed_idx) &&
- (Atomic::cmpxchg(&_claimed_idx, current, current + stride, memory_order_relaxed) == current);
- }
- if (process_block) {
- f->do_code_blob(cb);
-#ifdef ASSERT
- if (cb->is_nmethod())
- Universe::heap()->verify_nmethod((nmethod*)cb);
-#endif
- }
- }
+ // Concurrent class unloading needs nmethod barriers.
+ // When a nmethod is about to be executed, we need to make sure that all its
+ // metadata are marked. The alternative is to remark thread roots at final mark
+ // pause, which would cause latency issues.
+ if (ShenandoahHeap::heap()->unload_classes()) return true;
- _finished = true;
+ // Otherwise, we can go without nmethod barriers.
+ return false;
}
-ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
-int ShenandoahCodeRoots::_disarmed_value = 1;
-
void ShenandoahCodeRoots::initialize() {
_nmethod_table = new ShenandoahNMethodTable();
}
@@ -118,8 +68,13 @@ void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
_nmethod_table->unregister_nmethod(nm);
}
-void ShenandoahCodeRoots::arm_nmethods() {
- assert(BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr, "Sanity");
+void ShenandoahCodeRoots::arm_nmethods_for_mark() {
+ if (use_nmethod_barriers_for_mark()) {
+ BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
+ }
+}
+
+void ShenandoahCodeRoots::arm_nmethods_for_evac() {
BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
}
@@ -163,7 +118,7 @@ class ShenandoahDisarmNMethodsTask : public WorkerTask {
};
void ShenandoahCodeRoots::disarm_nmethods() {
- if (ShenandoahNMethodBarrier) {
+ if (use_nmethod_barriers_for_mark()) {
ShenandoahDisarmNMethodsTask task;
ShenandoahHeap::heap()->workers()->run_task(&task);
}
@@ -284,7 +239,6 @@ void ShenandoahCodeRoots::purge() {
}
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
- _par_iterator(CodeCache::heaps()),
_table_snapshot(nullptr) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp
index 3493d118a9bc6..b8870e71ed06c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp
@@ -39,38 +39,9 @@ class ShenandoahNMethodTable;
class ShenandoahNMethodTableSnapshot;
class WorkerThreads;
-class ShenandoahParallelCodeHeapIterator {
- friend class CodeCache;
-private:
- CodeHeap* _heap;
- shenandoah_padding(0);
- volatile int _claimed_idx;
- volatile bool _finished;
- shenandoah_padding(1);
-public:
- ShenandoahParallelCodeHeapIterator(CodeHeap* heap);
- void parallel_blobs_do(CodeBlobClosure* f);
-};
-
-class ShenandoahParallelCodeCacheIterator {
- friend class CodeCache;
-private:
- ShenandoahParallelCodeHeapIterator* _iters;
- int _length;
-
- NONCOPYABLE(ShenandoahParallelCodeCacheIterator);
-
-public:
- ShenandoahParallelCodeCacheIterator(const GrowableArray* heaps);
- ~ShenandoahParallelCodeCacheIterator();
- void parallel_blobs_do(CodeBlobClosure* f);
-};
-
class ShenandoahCodeRootsIterator {
friend class ShenandoahCodeRoots;
protected:
- ShenandoahParallelCodeCacheIterator _par_iterator;
- ShenandoahSharedFlag _seq_claimed;
ShenandoahNMethodTableSnapshot* _table_snapshot;
public:
@@ -88,7 +59,6 @@ class ShenandoahCodeRoots : public AllStatic {
static void initialize();
static void register_nmethod(nmethod* nm);
static void unregister_nmethod(nmethod* nm);
- static void flush_nmethod(nmethod* nm);
static ShenandoahNMethodTable* table() {
return _nmethod_table;
@@ -97,11 +67,14 @@ class ShenandoahCodeRoots : public AllStatic {
// Concurrent nmethod unloading support
static void unlink(WorkerThreads* workers, bool unloading_occurred);
static void purge();
- static void arm_nmethods();
+ static void arm_nmethods_for_mark();
+ static void arm_nmethods_for_evac();
static void disarm_nmethods();
static int disarmed_value() { return _disarmed_value; }
static int* disarmed_value_address() { return &_disarmed_value; }
+ static bool use_nmethod_barriers_for_mark();
+
private:
static ShenandoahNMethodTable* _nmethod_table;
static int _disarmed_value;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
index 6f30f88e57ef1..a6ea6e976ae4d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
@@ -40,7 +40,8 @@ class ShenandoahCollectorPolicy : public CHeapObj {
private:
size_t _success_concurrent_gcs;
size_t _success_degenerated_gcs;
- size_t _success_full_gcs;
+ // Written by control thread, read by mutators
+ volatile size_t _success_full_gcs;
size_t _alloc_failure_degenerated;
size_t _alloc_failure_degenerated_upgrade_to_full;
size_t _alloc_failure_full;
@@ -82,6 +83,10 @@ class ShenandoahCollectorPolicy : public CHeapObj {
size_t cycle_counter() const;
void print_gc_stats(outputStream* out) const;
+
+ size_t full_gc_count() const {
+ return _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full;
+ }
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
index 375a797cdf897..7564af5f6b7c1 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -545,12 +545,9 @@ void ShenandoahConcurrentGC::op_init_mark() {
// Make above changes visible to worker threads
OrderAccess::fence();
- // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
- // we need to make sure that all its metadata are marked. alternative is to remark
- // thread roots at final mark pause, but it can be potential latency killer.
- if (heap->unload_classes()) {
- ShenandoahCodeRoots::arm_nmethods();
- }
+
+ // Arm nmethods for concurrent mark
+ ShenandoahCodeRoots::arm_nmethods_for_mark();
ShenandoahStackWatermark::change_epoch_id();
if (ShenandoahPacing) {
@@ -603,7 +600,7 @@ void ShenandoahConcurrentGC::op_final_mark() {
}
// Arm nmethods/stack for concurrent processing
- ShenandoahCodeRoots::arm_nmethods();
+ ShenandoahCodeRoots::arm_nmethods_for_evac();
ShenandoahStackWatermark::change_epoch_id();
if (ShenandoahPacing) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
index 1fde1944cac62..956cf8cc908f4 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
@@ -57,8 +57,7 @@ class ShenandoahConcurrentMarkingTask : public WorkerTask {
void work(uint worker_id) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahConcurrentWorkerSession worker_session(worker_id);
- ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
- ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
+ ShenandoahSuspendibleThreadSetJoiner stsj;
ShenandoahReferenceProcessor* rp = heap->ref_processor();
assert(rp != nullptr, "need reference processor");
StringDedup::Requests requests;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
index 1fcf554429307..e7cf402a52785 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
@@ -131,6 +131,27 @@ void ShenandoahDegenGC::op_degenerated() {
// and we can do evacuation. Otherwise, it would be the shortcut cycle.
if (heap->is_evacuation_in_progress()) {
+ if (_degen_point == _degenerated_evac) {
+ // Degeneration under oom-evac protocol allows the mutator LRB to expose
+ // references to from-space objects. This is okay, in theory, because we
+ // will come to the safepoint here to complete the evacuations and update
+ // the references. However, if the from-space reference is written to a
+ // region that was EC during final mark or was recycled after final mark
+ // it will not have TAMS or UWM updated. Such a region is effectively
+ // skipped during update references which can lead to crashes and corruption
+ // if the from-space reference is accessed.
+ if (UseTLAB) {
+ heap->labs_make_parsable();
+ }
+
+ for (size_t i = 0; i < heap->num_regions(); i++) {
+ ShenandoahHeapRegion* r = heap->get_region(i);
+ if (r->is_active() && r->top() > r->get_update_watermark()) {
+ r->set_update_watermark_at_safepoint(r->top());
+ }
+ }
+ }
+
// Degeneration under oom-evac protocol might have left some objects in
// collection set un-evacuated. Restart evacuation from the beginning to
// capture all objects. For all the objects that are already evacuated,
@@ -181,11 +202,9 @@ void ShenandoahDegenGC::op_degenerated() {
assert(!heap->cancelled_gc(), "STW reference update can not OOM");
}
- if (ClassUnloading) {
- // Disarm nmethods that armed in concurrent cycle.
- // In above case, update roots should disarm them
- ShenandoahCodeRoots::disarm_nmethods();
- }
+ // Disarm nmethods that armed in concurrent cycle.
+ // In above case, update roots should disarm them
+ ShenandoahCodeRoots::disarm_nmethods();
op_cleanup_complete();
break;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
index 4589183831281..72a3f411ea09c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
@@ -154,7 +154,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
HeapWord* result = nullptr;
size_t size = req.size();
- if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
+ if (req.is_lab_alloc()) {
size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
if (size > free) {
size = free;
@@ -279,7 +279,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
}
end++;
- };
+ }
size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index 18fd09ead0ade..4cef5378d30bc 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -910,6 +910,9 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
// Make empty regions that have been allocated into regular
if (r->is_empty() && live > 0) {
r->make_regular_bypass();
+ if (ZapUnusedHeapArea) {
+ SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
+ }
}
// Reclaim regular regions that became empty
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
index fa1938802933b..922f54edf3c0c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
@@ -66,7 +66,6 @@ class ShenandoahUpdateRootsTask : public WorkerTask {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahParallelWorkerSession worker_session(worker_id);
- ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahUpdateRefsClosure cl;
if (_check_alive) {
ShenandoahForwardedIsAliveClosure is_alive;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index a2159f71a5ae0..f1dcbf5a8bcfd 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -838,25 +838,14 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
// It might happen that one of the threads requesting allocation would unblock
// way later after GC happened, only to fail the second allocation, because
// other threads have already depleted the free storage. In this case, a better
- // strategy is to try again, as long as GC makes progress.
- //
- // Then, we need to make sure the allocation was retried after at least one
- // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
-
- size_t tries = 0;
-
- while (result == nullptr && _progress_last_gc.is_set()) {
- tries++;
- control_thread()->handle_alloc_failure(req);
- result = allocate_memory_under_lock(req, in_new_region);
- }
-
- while (result == nullptr && tries <= ShenandoahFullGCThreshold) {
- tries++;
+ // strategy is to try again, as long as GC makes progress (or until at least
+ // one full GC has completed).
+ size_t original_count = shenandoah_policy()->full_gc_count();
+ while (result == nullptr
+ && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
control_thread()->handle_alloc_failure(req);
result = allocate_memory_under_lock(req, in_new_region);
}
-
} else {
assert(req.is_gc_alloc(), "Can only accept GC allocs here");
result = allocate_memory_under_lock(req, in_new_region);
@@ -974,7 +963,7 @@ class ShenandoahEvacuationTask : public WorkerTask {
void work(uint worker_id) {
if (_concurrent) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
- ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
+ ShenandoahSuspendibleThreadSetJoiner stsj;
ShenandoahEvacOOMScope oom_evac_scope;
do_work();
} else {
@@ -1138,13 +1127,9 @@ void ShenandoahHeap::gclabs_retire(bool resize) {
// Returns size in bytes
size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
- if (ShenandoahElasticTLAB) {
- // With Elastic TLABs, return the max allowed size, and let the allocation path
- // figure out the safe size for current allocation.
- return ShenandoahHeapRegion::max_tlab_size_bytes();
- } else {
- return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
- }
+ // Return the max allowed size, and let the allocation path
+ // figure out the safe size for current allocation.
+ return ShenandoahHeapRegion::max_tlab_size_bytes();
}
size_t ShenandoahHeap::max_tlab_size() const {
@@ -1864,14 +1849,6 @@ address ShenandoahHeap::in_cset_fast_test_addr() {
return (address) heap->collection_set()->biased_map_address();
}
-address ShenandoahHeap::cancelled_gc_addr() {
- return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
-}
-
-address ShenandoahHeap::gc_state_addr() {
- return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
-}
-
size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
return Atomic::load(&_bytes_allocated_since_gc_start);
}
@@ -2012,7 +1989,7 @@ class ShenandoahUpdateHeapRefsTask : public WorkerTask {
void work(uint worker_id) {
if (CONCURRENT) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
- ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
+ ShenandoahSuspendibleThreadSetJoiner stsj;
do_work();
} else {
ShenandoahParallelWorkerSession worker_session(worker_id);
@@ -2194,15 +2171,11 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
}
void ShenandoahHeap::safepoint_synchronize_begin() {
- if (ShenandoahSuspendibleWorkers) {
- SuspendibleThreadSet::synchronize();
- }
+ SuspendibleThreadSet::synchronize();
}
void ShenandoahHeap::safepoint_synchronize_end() {
- if (ShenandoahSuspendibleWorkers) {
- SuspendibleThreadSet::desynchronize();
- }
+ SuspendibleThreadSet::desynchronize();
}
void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index ac1804237d0a9..642faef807e40 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -293,7 +293,6 @@ class ShenandoahHeap : public CollectedHeap {
public:
char gc_state() const;
- static address gc_state_addr();
void set_concurrent_mark_in_progress(bool in_progress);
void set_evacuation_in_progress(bool in_progress);
@@ -314,7 +313,7 @@ class ShenandoahHeap : public CollectedHeap {
inline bool is_full_gc_in_progress() const;
inline bool is_full_gc_move_in_progress() const;
inline bool has_forwarded_objects() const;
- inline bool is_gc_in_progress_mask(uint mask) const;
+
inline bool is_stw_gc_in_progress() const;
inline bool is_concurrent_strong_root_in_progress() const;
inline bool is_concurrent_weak_root_in_progress() const;
@@ -334,7 +333,6 @@ class ShenandoahHeap : public CollectedHeap {
bool try_cancel_gc();
public:
- static address cancelled_gc_addr();
inline bool cancelled_gc() const;
inline bool check_cancelled_gc_and_yield(bool sts_active = true);
@@ -354,7 +352,6 @@ class ShenandoahHeap : public CollectedHeap {
void prepare_gc();
void prepare_regions_and_collection_set(bool concurrent);
// Evacuation
- void prepare_evacuation(bool concurrent);
void evacuate_collection_set(bool concurrent);
// Concurrent root processing
void prepare_concurrent_roots();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
index 4158f4bee2293..226190822a158 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
@@ -244,7 +244,7 @@ inline bool ShenandoahHeap::cancelled_gc() const {
}
inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
- if (sts_active && ShenandoahSuspendibleWorkers && !cancelled_gc()) {
+ if (sts_active && !cancelled_gc()) {
if (SuspendibleThreadSet::should_yield()) {
SuspendibleThreadSet::yield();
}
@@ -383,10 +383,6 @@ inline bool ShenandoahHeap::is_evacuation_in_progress() const {
return _gc_state.is_set(EVACUATION);
}
-inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
- return _gc_state.is_set(mask);
-}
-
inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
return _degenerated_gc_in_progress.is_set();
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index 6cac61f848a00..a46c7edc3482a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -607,26 +607,8 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
- // The rationale for trimming the TLAB sizes has to do with the raciness in
- // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
- // about next free size, gets the answer for region #N, goes away for a while, then
- // tries to allocate in region #N, and fail because some other thread have claimed part
- // of the region #N, and then the freeset allocation code has to retire the region #N,
- // before moving the allocation to region #N+1.
- //
- // The worst case realizes when "answer" is "region size", which means it could
- // prematurely retire an entire region. Having smaller TLABs does not fix that
- // completely, but reduces the probability of too wasteful region retirement.
- // With current divisor, we will waste no more than 1/8 of region size in the worst
- // case. This also has a secondary effect on collection set selection: even under
- // the race, the regions would be at least 7/8 used, which allows relying on
- // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
- // below the garbage threshold that would never be considered for collection.
- //
- // The whole thing is mitigated if Elastic TLABs are enabled.
- //
guarantee(MaxTLABSizeWords == 0, "we should only set it once");
- MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
+ MaxTLABSizeWords = MIN2(RegionSizeWords, HumongousThresholdWords);
MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahJfrSupport.hpp b/src/hotspot/share/gc/shenandoah/shenandoahJfrSupport.hpp
index 2ce1cd0b95faf..1553787265ce1 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahJfrSupport.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahJfrSupport.hpp
@@ -38,4 +38,4 @@ class ShenandoahJFRSupport {
static void register_jfr_type_serializers();
};
-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
index ffae4f068bc4b..4725b8c3dfae7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
@@ -185,7 +185,7 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w
if (work == 0) {
// No work encountered in current stride, try to terminate.
// Need to leave the STS here otherwise it might block safepoints.
- ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
+ ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
ShenandoahTerminatorTerminator tt(heap);
if (terminator->offer_termination(&tt)) return;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
index 9130207ba9932..078b89a4ce72c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
@@ -30,14 +30,10 @@
#include "gc/shenandoah/shenandoahOopClosures.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.hpp"
-class ShenandoahCMDrainMarkingStackClosure;
-
// Base class for mark
// Mark class does not maintain states. Instead, mark states are
// maintained by task queues, mark bitmap and SATB buffers (concurrent mark)
class ShenandoahMark: public StackObj {
- friend class ShenandoahCMDrainMarkingStackClosure;
-
protected:
ShenandoahObjToScanQueueSet* const _task_queues;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
index 74aafeb3831b9..980050b8b00f8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
@@ -52,39 +52,6 @@ ShenandoahNMethod::~ShenandoahNMethod() {
}
}
-class ShenandoahHasCSetOopClosure : public OopClosure {
-private:
- ShenandoahHeap* const _heap;
- bool _has_cset_oops;
-
-public:
- ShenandoahHasCSetOopClosure(ShenandoahHeap *heap) :
- _heap(heap),
- _has_cset_oops(false) {
- }
-
- bool has_cset_oops() const {
- return _has_cset_oops;
- }
-
- void do_oop(oop* p) {
- oop value = RawAccess<>::oop_load(p);
- if (!_has_cset_oops && _heap->in_collection_set(value)) {
- _has_cset_oops = true;
- }
- }
-
- void do_oop(narrowOop* p) {
- ShouldNotReachHere();
- }
-};
-
-bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) {
- ShenandoahHasCSetOopClosure cl(heap);
- oops_do(&cl);
- return cl.has_cset_oops();
-}
-
void ShenandoahNMethod::update() {
ResourceMark rm;
bool non_immediate_oops = false;
@@ -209,10 +176,6 @@ class ShenandoahNMethodOopDetector : public OopClosure {
GrowableArray* oops() {
return &_oops;
}
-
- bool has_oops() {
- return !_oops.is_empty();
- }
};
void ShenandoahNMethod::assert_same_oops(bool allow_dead) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp
index 4fc90b03bedc9..6a856e684031b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.hpp
@@ -55,12 +55,6 @@ class ShenandoahNMethod : public CHeapObj {
// Update oops when the nmethod is re-registered
void update();
- bool has_cset_oops(ShenandoahHeap* heap);
-
- inline int oop_count() const;
- inline bool has_oops() const;
-
- inline void mark_unregistered();
inline bool is_unregistered() const;
static ShenandoahNMethod* for_nmethod(nmethod* nm);
@@ -77,7 +71,6 @@ class ShenandoahNMethod : public CHeapObj {
void assert_same_oops(bool allow_dead = false) NOT_DEBUG_RETURN;
private:
- bool has_non_immed_oops() const { return _has_non_immed_oops; }
static void detect_reloc_oops(nmethod* nm, GrowableArray& oops, bool& _has_non_immed_oops);
};
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp
index eb9659219a24e..df721c0a695fc 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp
@@ -39,18 +39,6 @@ ShenandoahReentrantLock* ShenandoahNMethod::lock() {
return &_lock;
}
-int ShenandoahNMethod::oop_count() const {
- return _oops_count + static_cast(nm()->oops_end() - nm()->oops_begin());
-}
-
-bool ShenandoahNMethod::has_oops() const {
- return oop_count() > 0;
-}
-
-void ShenandoahNMethod::mark_unregistered() {
- _unregistered = true;
-}
-
bool ShenandoahNMethod::is_unregistered() const {
return _unregistered;
}
@@ -80,9 +68,7 @@ void ShenandoahNMethod::heal_nmethod_metadata(ShenandoahNMethod* nmethod_data) {
void ShenandoahNMethod::disarm_nmethod(nmethod* nm) {
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
- assert(bs != nullptr || !ShenandoahNMethodBarrier,
- "Must have nmethod barrier for concurrent GC");
- if (bs != nullptr && bs->is_armed(nm)) {
+ if (bs->is_armed(nm)) {
bs->disarm(nm);
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
index caa5416cd2295..0bd92da1b5d10 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
@@ -362,7 +362,7 @@ bool ShenandoahReferenceProcessor::discover_reference(oop reference, ReferenceTy
log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
uint worker_id = WorkerThread::worker_id();
- _ref_proc_thread_locals->inc_encountered(type);
+ _ref_proc_thread_locals[worker_id].inc_encountered(type);
if (UseCompressedOops) {
return discover(reference, type, worker_id);
@@ -402,7 +402,7 @@ T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint wo
}
template
-void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {;
+void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id) {
log_trace(gc, ref)("Processing discovered list #%u : " PTR_FORMAT, worker_id, p2i(refproc_data.discovered_list_head()));
T* list = refproc_data.discovered_list_addr();
// The list head is basically a GC root, we need to resolve and update it,
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
index 586835d35ad02..639b35deca375 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
@@ -89,7 +89,6 @@ void ShenandoahCodeCacheRoots::code_blobs_do(CodeBlobClosure* blob_cl, uint work
ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase) :
_heap(ShenandoahHeap::heap()),
- _phase(phase),
_worker_phase(phase) {
}
@@ -206,7 +205,7 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi
void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
CodeBlobToOopClosure code_blob_cl(oops, CodeBlobToOopClosure::FixRelocations);
ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
- CodeBlobToOopClosure* adjust_code_closure = (ClassUnloading && ShenandoahNMethodBarrier) ?
+ CodeBlobToOopClosure* adjust_code_closure = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ?
static_cast(&blobs_and_disarm_Cl) :
static_cast(&code_blob_cl);
CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
index 75a23aea04534..fcb28dfbce016 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
@@ -134,7 +134,6 @@ class ShenandoahClassLoaderDataRoots {
class ShenandoahRootProcessor : public StackObj {
private:
ShenandoahHeap* const _heap;
- const ShenandoahPhaseTimings::Phase _phase;
const ShenandoahGCWorkerPhase _worker_phase;
public:
ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp
index 8bbc742a556b4..3872a5a391c32 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp
@@ -172,7 +172,7 @@ template
void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) {
CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations);
ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(keep_alive);
- CodeBlobToOopClosure* codes_cl = (ClassUnloading && ShenandoahNMethodBarrier) ?
+ CodeBlobToOopClosure* codes_cl = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ?
static_cast(&blobs_and_disarm_Cl) :
static_cast(&update_blobs);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
index 7cc8af7361f2d..1462bc052dc5b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
@@ -89,8 +89,13 @@ ShenandoahSTWMark::ShenandoahSTWMark(bool full_gc) :
}
void ShenandoahSTWMark::mark() {
- // Weak reference processing
ShenandoahHeap* const heap = ShenandoahHeap::heap();
+
+ // Arm all nmethods. Even though this is STW mark, some marking code
+ // piggybacks on nmethod barriers for special instances.
+ ShenandoahCodeRoots::arm_nmethods_for_mark();
+
+ // Weak reference processing
ShenandoahReferenceProcessor* rp = heap->ref_processor();
rp->reset_thread_locals();
rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
@@ -120,6 +125,9 @@ void ShenandoahSTWMark::mark() {
heap->mark_complete_marking_context();
end_mark();
+ // Mark is finished, can disarm the nmethods now.
+ ShenandoahCodeRoots::disarm_nmethods();
+
assert(task_queues()->is_empty(), "Should be empty");
TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
index d6be092055820..4a97e599f3e5b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
@@ -32,6 +32,7 @@
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
+#include "interpreter/oopMapCache.hpp"
#include "memory/universe.hpp"
bool VM_ShenandoahReferenceOperation::doit_prologue() {
@@ -40,6 +41,7 @@ bool VM_ShenandoahReferenceOperation::doit_prologue() {
}
void VM_ShenandoahReferenceOperation::doit_epilogue() {
+ OopMapCache::cleanup_old_entries();
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
index 88c460b5f19bc..14212d48b099f 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
@@ -76,11 +76,6 @@
" compact - run GC more frequently and with deeper targets to " \
"free up more memory.") \
\
- product(uintx, ShenandoahUnloadClassesFrequency, 1, EXPERIMENTAL, \
- "Unload the classes every Nth cycle. Normally affects concurrent "\
- "GC cycles, as degenerated and full GCs would try to unload " \
- "classes regardless. Set to zero to disable class unloading.") \
- \
product(uintx, ShenandoahGarbageThreshold, 25, EXPERIMENTAL, \
"How much garbage a region has to contain before it would be " \
"taken for collection. This a guideline only, as GC heuristics " \
@@ -220,9 +215,6 @@
" 3 = previous level, plus all reachable objects; " \
" 4 = previous level, plus all marked objects") \
\
- product(bool, ShenandoahElasticTLAB, true, DIAGNOSTIC, \
- "Use Elastic TLABs with Shenandoah") \
- \
product(uintx, ShenandoahEvacReserve, 5, EXPERIMENTAL, \
"How much of heap to reserve for evacuations. Larger values make "\
"GC evacuate more live objects on every cycle, while leaving " \
@@ -334,9 +326,6 @@
"How many times to maximum attempt to flush SATB buffers at the " \
"end of concurrent marking.") \
\
- product(bool, ShenandoahSuspendibleWorkers, true, EXPERIMENTAL, \
- "Suspend concurrent GC worker threads at safepoints") \
- \
product(bool, ShenandoahSATBBarrier, true, DIAGNOSTIC, \
"Turn on/off SATB barriers in Shenandoah") \
\
@@ -352,21 +341,12 @@
product(bool, ShenandoahLoadRefBarrier, true, DIAGNOSTIC, \
"Turn on/off load-reference barriers in Shenandoah") \
\
- product(bool, ShenandoahNMethodBarrier, true, DIAGNOSTIC, \
- "Turn on/off NMethod entry barriers in Shenandoah") \
- \
product(bool, ShenandoahStackWatermarkBarrier, true, DIAGNOSTIC, \
"Turn on/off stack watermark barriers in Shenandoah") \
\
- develop(bool, ShenandoahVerifyOptoBarriers, false, \
+ develop(bool, ShenandoahVerifyOptoBarriers, trueInDebug, \
"Verify no missing barriers in C2.") \
\
- product(bool, ShenandoahLoopOptsAfterExpansion, true, DIAGNOSTIC, \
- "Attempt more loop opts after barrier expansion.") \
- \
- product(bool, ShenandoahSelfFixing, true, DIAGNOSTIC, \
- "Fix references with load reference barrier. Disabling this " \
- "might degrade performance.")
// end of GC_SHENANDOAH_FLAGS
diff --git a/src/hotspot/share/gc/x/xArguments.cpp b/src/hotspot/share/gc/x/xArguments.cpp
index 8c02c80024773..60e78d2c756ed 100644
--- a/src/hotspot/share/gc/x/xArguments.cpp
+++ b/src/hotspot/share/gc/x/xArguments.cpp
@@ -37,6 +37,10 @@ void XArguments::initialize_alignments() {
HeapAlignment = SpaceAlignment;
}
+void XArguments::initialize_heap_flags_and_sizes() {
+ // Nothing extra to do
+}
+
void XArguments::initialize() {
// Check mark stack size
const size_t mark_stack_space_limit = XAddressSpaceLimit::mark_stack();
diff --git a/src/hotspot/share/gc/x/xArguments.hpp b/src/hotspot/share/gc/x/xArguments.hpp
index aaa586a2df2f1..196dd994cad9b 100644
--- a/src/hotspot/share/gc/x/xArguments.hpp
+++ b/src/hotspot/share/gc/x/xArguments.hpp
@@ -31,6 +31,7 @@ class CollectedHeap;
class XArguments : AllStatic {
public:
static void initialize_alignments();
+ static void initialize_heap_flags_and_sizes();
static void initialize();
static size_t heap_virtual_to_physical_ratio();
static CollectedHeap* create_heap();
diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
index e5fb50a26e4fb..70b9bd6eaa7de 100644
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
@@ -42,6 +42,7 @@
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/type.hpp"
+#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
@@ -226,6 +227,7 @@ Label* ZBarrierStubC2::continuation() {
}
ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref) {
+ AARCH64_ONLY(fatal("Should use ZLoadBarrierStubC2Aarch64::create"));
ZLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2(node, ref_addr, ref);
register_stub(stub);
@@ -275,6 +277,7 @@ void ZLoadBarrierStubC2::emit_code(MacroAssembler& masm) {
}
ZStoreBarrierStubC2* ZStoreBarrierStubC2::create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) {
+ AARCH64_ONLY(fatal("Should use ZStoreBarrierStubC2Aarch64::create"));
ZStoreBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZStoreBarrierStubC2(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic);
register_stub(stub);
diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp
index a0f29fbc51076..7af70c6409678 100644
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp
@@ -52,9 +52,9 @@ static void inc_trampoline_stubs_count();
static int trampoline_stubs_count();
static int stubs_start_offset();
-public:
ZBarrierStubC2(const MachNode* node);
+public:
RegMask& live() const;
Label* entry();
Label* continuation();
diff --git a/src/hotspot/share/gc/z/shared/zSharedArguments.cpp b/src/hotspot/share/gc/z/shared/zSharedArguments.cpp
index 8a00a851acb08..4d7e9827f18a0 100644
--- a/src/hotspot/share/gc/z/shared/zSharedArguments.cpp
+++ b/src/hotspot/share/gc/z/shared/zSharedArguments.cpp
@@ -38,6 +38,16 @@ void ZSharedArguments::initialize_alignments() {
}
}
+void ZSharedArguments::initialize_heap_flags_and_sizes() {
+ GCArguments::initialize_heap_flags_and_sizes();
+
+ if (ZGenerational) {
+ ZArguments::initialize_heap_flags_and_sizes();
+ } else {
+ XArguments::initialize_heap_flags_and_sizes();
+ }
+}
+
void ZSharedArguments::initialize() {
GCArguments::initialize();
diff --git a/src/hotspot/share/gc/z/shared/zSharedArguments.hpp b/src/hotspot/share/gc/z/shared/zSharedArguments.hpp
index 74659f581b918..c53f28ee0f97c 100644
--- a/src/hotspot/share/gc/z/shared/zSharedArguments.hpp
+++ b/src/hotspot/share/gc/z/shared/zSharedArguments.hpp
@@ -31,6 +31,7 @@ class CollectedHeap;
class ZSharedArguments : public GCArguments {
private:
virtual void initialize_alignments();
+ virtual void initialize_heap_flags_and_sizes();
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp
index 01ecf8f3fc4b8..192cad86e67d3 100644
--- a/src/hotspot/share/gc/z/zArguments.cpp
+++ b/src/hotspot/share/gc/z/zArguments.cpp
@@ -37,6 +37,19 @@ void ZArguments::initialize_alignments() {
HeapAlignment = SpaceAlignment;
}
+void ZArguments::initialize_heap_flags_and_sizes() {
+ if (!FLAG_IS_CMDLINE(MaxHeapSize) &&
+ !FLAG_IS_CMDLINE(MaxRAMFraction) &&
+ !FLAG_IS_CMDLINE(MaxRAMPercentage) &&
+ !FLAG_IS_CMDLINE(SoftMaxHeapSize)) {
+ // We are really just guessing how much memory the program needs.
+ // When that is the case, we don't want the soft and hard limits to be the same
+ // as it can cause flakyness in the number of GC threads used, in order to keep
+ // to a random number we just pulled out of thin air.
+ FLAG_SET_ERGO(SoftMaxHeapSize, MaxHeapSize * 90 / 100);
+ }
+}
+
void ZArguments::select_max_gc_threads() {
// Select number of parallel threads
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
@@ -126,20 +139,13 @@ void ZArguments::initialize() {
FLAG_SET_ERGO_IF_DEFAULT(ZCollectionIntervalMajor, ZCollectionInterval);
}
- if (!FLAG_IS_CMDLINE(MaxHeapSize) &&
- !FLAG_IS_CMDLINE(MaxRAMFraction) &&
- !FLAG_IS_CMDLINE(MaxRAMPercentage)) {
- // We are really just guessing how much memory the program needs.
- // When that is the case, we don't want the soft and hard limits to be the same
- // as it can cause flakyness in the number of GC threads used, in order to keep
- // to a random number we just pulled out of thin air.
- FLAG_SET_ERGO_IF_DEFAULT(SoftMaxHeapSize, MaxHeapSize * 90 / 100);
- }
-
if (FLAG_IS_DEFAULT(ZFragmentationLimit)) {
FLAG_SET_DEFAULT(ZFragmentationLimit, 5.0);
}
+ // Set medium page size here because MaxTenuringThreshold may use it.
+ ZHeuristics::set_medium_page_size();
+
if (!FLAG_IS_DEFAULT(ZTenuringThreshold) && ZTenuringThreshold != -1) {
FLAG_SET_ERGO_IF_DEFAULT(MaxTenuringThreshold, ZTenuringThreshold);
if (MaxTenuringThreshold == 0) {
diff --git a/src/hotspot/share/gc/z/zArguments.hpp b/src/hotspot/share/gc/z/zArguments.hpp
index ac1e613d4ccbb..7d1c00d30d1cc 100644
--- a/src/hotspot/share/gc/z/zArguments.hpp
+++ b/src/hotspot/share/gc/z/zArguments.hpp
@@ -34,6 +34,7 @@ class ZArguments : AllStatic {
public:
static void initialize_alignments();
+ static void initialize_heap_flags_and_sizes();
static void initialize();
static size_t heap_virtual_to_physical_ratio();
static CollectedHeap* create_heap();
diff --git a/src/hotspot/share/gc/z/zBarrier.inline.hpp b/src/hotspot/share/gc/z/zBarrier.inline.hpp
index e0d83619934da..2c81c14865b51 100644
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp
@@ -26,14 +26,13 @@
#include "gc/z/zBarrier.hpp"
-#include "code/codeCache.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGeneration.inline.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zResurrection.inline.hpp"
+#include "gc/z/zVerify.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/continuation.hpp"
// A self heal must always "upgrade" the address metadata bits in
// accordance with the metadata bits state machine. The following
@@ -320,17 +319,9 @@ inline zaddress ZBarrier::make_load_good_no_relocate(zpointer o) {
return remap(ZPointer::uncolor_unsafe(o), remap_generation(o));
}
-inline void z_assert_is_barrier_safe() {
- assert(!Thread::current()->is_ConcurrentGC_thread() || /* Need extra checks for ConcurrentGCThreads */
- Thread::current()->is_suspendible_thread() || /* Thread prevents safepoints */
- Thread::current()->is_indirectly_suspendible_thread() || /* Coordinator thread prevents safepoints */
- SafepointSynchronize::is_at_safepoint(), /* Is at safepoint */
- "Shouldn't perform load barrier");
-}
-
template
inline zaddress ZBarrier::barrier(ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path, ZBarrierColor color, volatile zpointer* p, zpointer o, bool allow_null) {
- z_assert_is_barrier_safe();
+ z_verify_safepoints_are_blocked();
// Fast path
if (fast_path(o)) {
diff --git a/src/hotspot/share/gc/z/zBarrierSet.cpp b/src/hotspot/share/gc/z/zBarrierSet.cpp
index 160673e8059bf..48228a3e1abc3 100644
--- a/src/hotspot/share/gc/z/zBarrierSet.cpp
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp
@@ -152,6 +152,20 @@ void ZBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
deoptimize_allocation(thread);
}
+void ZBarrierSet::clone_obj_array(objArrayOop src_obj, objArrayOop dst_obj) {
+ volatile zpointer* src = (volatile zpointer*)src_obj->base();
+ volatile zpointer* dst = (volatile zpointer*)dst_obj->base();
+ const int length = src_obj->length();
+
+ for (const volatile zpointer* const end = src + length; src < end; src++, dst++) {
+ zaddress elem = ZBarrier::load_barrier_on_oop_field(src);
+ // We avoid healing here because the store below colors the pointer store good,
+ // hence avoiding the cost of a CAS.
+ ZBarrier::store_barrier_on_heap_oop_field(dst, false /* heal */);
+ Atomic::store(dst, ZAddress::store_good(elem));
+ }
+}
+
void ZBarrierSet::print_on(outputStream* st) const {
st->print_cr("ZBarrierSet");
}
diff --git a/src/hotspot/share/gc/z/zBarrierSet.hpp b/src/hotspot/share/gc/z/zBarrierSet.hpp
index 213f85dcea8c7..bf233df683afb 100644
--- a/src/hotspot/share/gc/z/zBarrierSet.hpp
+++ b/src/hotspot/share/gc/z/zBarrierSet.hpp
@@ -39,6 +39,8 @@ class ZBarrierSet : public BarrierSet {
static ZBarrierSetAssembler* assembler();
static bool barrier_needed(DecoratorSet decorators, BasicType type);
+ static void clone_obj_array(objArrayOop src, objArrayOop dst);
+
virtual void on_thread_create(Thread* thread);
virtual void on_thread_destroy(Thread* thread);
virtual void on_thread_attach(Thread* thread);
diff --git a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp
index bfbae74972d80..d53b69345dd98 100644
--- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp
@@ -403,14 +403,13 @@ inline bool ZBarrierSet::AccessBarrier::oop_arraycopy_i
return oop_arraycopy_in_heap_no_check_cast(dst, src, length);
}
-class ZStoreBarrierOopClosure : public BasicOopIterateClosure {
+class ZColorStoreGoodOopClosure : public BasicOopIterateClosure {
public:
virtual void do_oop(oop* p_) {
volatile zpointer* const p = (volatile zpointer*)p_;
const zpointer ptr = ZBarrier::load_atomic(p);
const zaddress addr = ZPointer::uncolor(ptr);
- ZBarrier::store_barrier_on_heap_oop_field(p, false /* heal */);
- *p = ZAddress::store_good(addr);
+ Atomic::store(p, ZAddress::store_good(addr));
}
virtual void do_oop(narrowOop* p) {
@@ -433,6 +432,17 @@ template
inline void ZBarrierSet::AccessBarrier::clone_in_heap(oop src, oop dst, size_t size) {
assert_is_valid(to_zaddress(src));
+ if (dst->is_objArray()) {
+ // Cloning an object array is similar to performing array copy.
+ // If an array is large enough to have its allocation segmented,
+ // this operation might require GC barriers. However, the intrinsics
+ // for cloning arrays transform the clone to an optimized allocation
+ // and arraycopy sequence, so the performance of this runtime call
+ // does not matter for object arrays.
+ clone_obj_array(objArrayOop(src), objArrayOop(dst));
+ return;
+ }
+
// Fix the oops
ZLoadBarrierOopClosure cl;
ZIterator::oop_iterate(src, &cl);
@@ -440,10 +450,10 @@ inline void ZBarrierSet::AccessBarrier::clone_in_heap(o
// Clone the object
Raw::clone_in_heap(src, dst, size);
- assert(ZHeap::heap()->is_young(to_zaddress(dst)), "ZColorStoreGoodOopClosure is only valid for young objects");
+ assert(dst->is_typeArray() || ZHeap::heap()->is_young(to_zaddress(dst)), "ZColorStoreGoodOopClosure is only valid for young objects");
// Color store good before handing out
- ZStoreBarrierOopClosure cl_sg;
+ ZColorStoreGoodOopClosure cl_sg;
ZIterator::oop_iterate(dst, &cl_sg);
}
diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp
index 0ee5ce7a6c89e..1b4afd4eefbe4 100644
--- a/src/hotspot/share/gc/z/zGeneration.cpp
+++ b/src/hotspot/share/gc/z/zGeneration.cpp
@@ -285,6 +285,10 @@ void ZGeneration::desynchronize_relocation() {
_relocate.desynchronize();
}
+bool ZGeneration::is_relocate_queue_active() const {
+ return _relocate.is_queue_active();
+}
+
void ZGeneration::reset_statistics() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
_freed = 0;
@@ -1492,7 +1496,7 @@ void ZGenerationOld::remap_young_roots() {
uint remap_nworkers = clamp(ZGeneration::young()->workers()->active_workers() + prev_nworkers, 1u, ZOldGCThreads);
_workers.set_active_workers(remap_nworkers);
- // TODO: The STS joiner is only needed to satisfy z_assert_is_barrier_safe that doesn't
+ // TODO: The STS joiner is only needed to satisfy ZBarrier::assert_is_state_barrier_safe that doesn't
// understand the driver locker. Consider making the assert aware of the driver locker.
SuspendibleThreadSetJoiner sts_joiner;
diff --git a/src/hotspot/share/gc/z/zGeneration.hpp b/src/hotspot/share/gc/z/zGeneration.hpp
index 23736f45b7be7..32762a50b6278 100644
--- a/src/hotspot/share/gc/z/zGeneration.hpp
+++ b/src/hotspot/share/gc/z/zGeneration.hpp
@@ -166,6 +166,7 @@ class ZGeneration {
// Relocation
void synchronize_relocation();
void desynchronize_relocation();
+ bool is_relocate_queue_active() const;
zaddress relocate_or_remap_object(zaddress_unsafe addr);
zaddress remap_object(zaddress_unsafe addr);
diff --git a/src/hotspot/share/gc/z/zHeuristics.cpp b/src/hotspot/share/gc/z/zHeuristics.cpp
index bcd9dd844052b..36ca1177c326a 100644
--- a/src/hotspot/share/gc/z/zHeuristics.cpp
+++ b/src/hotspot/share/gc/z/zHeuristics.cpp
@@ -101,9 +101,9 @@ uint ZHeuristics::nconcurrent_workers() {
}
size_t ZHeuristics::significant_heap_overhead() {
- return MaxHeapSize * ZFragmentationLimit;
+ return MaxHeapSize * (ZFragmentationLimit / 100);
}
size_t ZHeuristics::significant_young_overhead() {
- return MaxHeapSize * ZYoungCompactionLimit;
+ return MaxHeapSize * (ZYoungCompactionLimit / 100);
}
diff --git a/src/hotspot/share/gc/z/zInitialize.cpp b/src/hotspot/share/gc/z/zInitialize.cpp
index 0c0dc6e87a6c4..bf8cb96a4cbd6 100644
--- a/src/hotspot/share/gc/z/zInitialize.cpp
+++ b/src/hotspot/share/gc/z/zInitialize.cpp
@@ -28,7 +28,6 @@
#include "gc/z/zDriver.hpp"
#include "gc/z/zGCIdPrinter.hpp"
#include "gc/z/zGlobals.hpp"
-#include "gc/z/zHeuristics.hpp"
#include "gc/z/zInitialize.hpp"
#include "gc/z/zJNICritical.hpp"
#include "gc/z/zLargePages.hpp"
@@ -54,7 +53,6 @@ ZInitialize::ZInitialize(ZBarrierSet* barrier_set) {
ZThreadLocalAllocBuffer::initialize();
ZTracer::initialize();
ZLargePages::initialize();
- ZHeuristics::set_medium_page_size();
ZBarrierSet::set_barrier_set(barrier_set);
ZJNICritical::initialize();
ZDriver::initialize();
diff --git a/src/hotspot/share/gc/z/zIterator.inline.hpp b/src/hotspot/share/gc/z/zIterator.inline.hpp
index 9ccacdc9a3ca1..af97a549b0de3 100644
--- a/src/hotspot/share/gc/z/zIterator.inline.hpp
+++ b/src/hotspot/share/gc/z/zIterator.inline.hpp
@@ -26,11 +26,21 @@
#include "gc/z/zIterator.hpp"
+#include "gc/z/zVerify.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
inline bool ZIterator::is_invisible_object(oop obj) {
+ // This is a good place to make sure that we can't concurrently iterate over
+ // objects while VMThread operations think they have exclusive access to the
+ // object graph.
+ //
+ // One example that have caused problems is the JFR Leak Profiler, which
+ // sets the mark word to a value that makes the object arrays look like
+ // invisible objects.
+ z_verify_safepoints_are_blocked();
+
return obj->mark_acquire().is_marked();
}
diff --git a/src/hotspot/share/gc/z/zReferenceProcessor.cpp b/src/hotspot/share/gc/z/zReferenceProcessor.cpp
index 1037093523dd6..df8cb2b0e959f 100644
--- a/src/hotspot/share/gc/z/zReferenceProcessor.cpp
+++ b/src/hotspot/share/gc/z/zReferenceProcessor.cpp
@@ -113,6 +113,7 @@ static void list_append(zaddress& head, zaddress& tail, zaddress reference) {
ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers)
: _workers(workers),
_soft_reference_policy(nullptr),
+ _clear_all_soft_refs(false),
_encountered_count(),
_discovered_count(),
_enqueued_count(),
@@ -124,8 +125,9 @@ void ZReferenceProcessor::set_soft_reference_policy(bool clear) {
static AlwaysClearPolicy always_clear_policy;
static LRUMaxHeapPolicy lru_max_heap_policy;
+ _clear_all_soft_refs = clear;
+
if (clear) {
- log_info(gc, ref)("Clearing All SoftReferences");
_soft_reference_policy = &always_clear_policy;
} else {
_soft_reference_policy = &lru_max_heap_policy;
@@ -438,6 +440,10 @@ class ZReferenceProcessorTask : public ZTask {
void ZReferenceProcessor::process_references() {
ZStatTimerOld timer(ZSubPhaseConcurrentReferencesProcess);
+ if (_clear_all_soft_refs) {
+ log_info(gc, ref)("Clearing All SoftReferences");
+ }
+
// Process discovered lists
ZReferenceProcessorTask task(this);
_workers->run(&task);
diff --git a/src/hotspot/share/gc/z/zReferenceProcessor.hpp b/src/hotspot/share/gc/z/zReferenceProcessor.hpp
index d39cc8634cd22..7a8900827da83 100644
--- a/src/hotspot/share/gc/z/zReferenceProcessor.hpp
+++ b/src/hotspot/share/gc/z/zReferenceProcessor.hpp
@@ -41,6 +41,7 @@ class ZReferenceProcessor : public ReferenceDiscoverer {
ZWorkers* const _workers;
ReferencePolicy* _soft_reference_policy;
+ bool _clear_all_soft_refs;
ZPerWorker _encountered_count;
ZPerWorker _discovered_count;
ZPerWorker _enqueued_count;
diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp
index d4e3e92b3aa0b..281553f7fdfbd 100644
--- a/src/hotspot/share/gc/z/zRelocate.cpp
+++ b/src/hotspot/share/gc/z/zRelocate.cpp
@@ -87,6 +87,7 @@ ZRelocateQueue::ZRelocateQueue()
_nworkers(0),
_nsynchronized(0),
_synchronize(false),
+ _is_active(false),
_needs_attention(0) {}
bool ZRelocateQueue::needs_attention() const {
@@ -103,6 +104,20 @@ void ZRelocateQueue::dec_needs_attention() {
assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
}
+void ZRelocateQueue::activate(uint nworkers) {
+ _is_active = true;
+ join(nworkers);
+}
+
+void ZRelocateQueue::deactivate() {
+ Atomic::store(&_is_active, false);
+ clear();
+}
+
+bool ZRelocateQueue::is_active() const {
+ return Atomic::load(&_is_active);
+}
+
void ZRelocateQueue::join(uint nworkers) {
assert(nworkers != 0, "Must request at least one worker");
assert(_nworkers == 0, "Invalid state");
@@ -327,7 +342,7 @@ ZWorkers* ZRelocate::workers() const {
}
void ZRelocate::start() {
- _queue.join(workers()->active_workers());
+ _queue.activate(workers()->active_workers());
}
void ZRelocate::add_remset(volatile zpointer* p) {
@@ -1088,6 +1103,9 @@ class ZRelocateTask : public ZRestartableTask {
~ZRelocateTask() {
_generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
+
+ // Signal that we're not using the queue anymore. Used mostly for asserts.
+ _queue->deactivate();
}
virtual void work() {
@@ -1232,8 +1250,6 @@ void ZRelocate::relocate(ZRelocationSet* relocation_set) {
ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
workers()->run(&task);
}
-
- _queue.clear();
}
ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
@@ -1316,3 +1332,7 @@ void ZRelocate::desynchronize() {
ZRelocateQueue* ZRelocate::queue() {
return &_queue;
}
+
+bool ZRelocate::is_queue_active() const {
+ return _queue.is_active();
+}
diff --git a/src/hotspot/share/gc/z/zRelocate.hpp b/src/hotspot/share/gc/z/zRelocate.hpp
index ed54103d53c18..1b35abdf521fb 100644
--- a/src/hotspot/share/gc/z/zRelocate.hpp
+++ b/src/hotspot/share/gc/z/zRelocate.hpp
@@ -41,6 +41,7 @@ class ZRelocateQueue {
uint _nworkers;
uint _nsynchronized;
bool _synchronize;
+ volatile bool _is_active;
volatile int _needs_attention;
bool needs_attention() const;
@@ -53,6 +54,10 @@ class ZRelocateQueue {
public:
ZRelocateQueue();
+ void activate(uint nworkers);
+ void deactivate();
+ bool is_active() const;
+
void join(uint nworkers);
void resize_workers(uint nworkers);
void leave();
@@ -99,6 +104,8 @@ class ZRelocate {
void desynchronize();
ZRelocateQueue* queue();
+
+ bool is_queue_active() const;
};
#endif // SHARE_GC_Z_ZRELOCATE_HPP
diff --git a/src/hotspot/share/gc/z/zRelocationSet.cpp b/src/hotspot/share/gc/z/zRelocationSet.cpp
index 83bdf13b2bb02..92f245777b4e9 100644
--- a/src/hotspot/share/gc/z/zRelocationSet.cpp
+++ b/src/hotspot/share/gc/z/zRelocationSet.cpp
@@ -106,11 +106,16 @@ class ZRelocationSetInstallTask : public ZTask {
}
virtual void work() {
+ // Join the STS to block out VMThreads while running promote_barrier_on_young_oop_field
+ SuspendibleThreadSetJoiner sts_joiner;
+
// Allocate and install forwardings for small pages
for (size_t page_index; _small_iter.next_index(&page_index);) {
ZPage* page = _small->at(int(page_index));
ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page));
install_small(forwarding, _medium->length() + page_index);
+
+ SuspendibleThreadSet::yield();
}
// Allocate and install forwardings for medium pages
@@ -118,6 +123,8 @@ class ZRelocationSetInstallTask : public ZTask {
ZPage* page = _medium->at(int(page_index));
ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page));
install_medium(forwarding, page_index);
+
+ SuspendibleThreadSet::yield();
}
}
diff --git a/src/hotspot/share/gc/z/zRootsIterator.cpp b/src/hotspot/share/gc/z/zRootsIterator.cpp
index 130ab5e4627a8..086d90781dd63 100644
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp
@@ -101,7 +101,7 @@ void ZParallelApply::apply(ClosureType* cl) {
}
void ZOopStorageSetIteratorStrong::apply(OopClosure* cl) {
- ZRootStatTimer timer(ZSubPhaseConcurrentWeakRootsOopStorageSet, _generation);
+ ZRootStatTimer timer(ZSubPhaseConcurrentRootsOopStorageSet, _generation);
_iter.oops_do(cl);
}
diff --git a/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp b/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp
index 3eb28d3cafe26..a7d39e5ed9cc5 100644
--- a/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp
+++ b/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp
@@ -29,11 +29,12 @@
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zHeap.inline.hpp"
+#include "gc/z/zBarrier.hpp"
#include "oops/oop.hpp"
template
inline void ZUncoloredRoot::barrier(ObjectFunctionT function, zaddress_unsafe* p, uintptr_t color) {
- z_assert_is_barrier_safe();
+ z_verify_safepoints_are_blocked();
const zaddress_unsafe addr = Atomic::load(p);
assert_is_valid(addr);
diff --git a/src/hotspot/share/gc/z/zVerify.cpp b/src/hotspot/share/gc/z/zVerify.cpp
index 6950f66915871..b168610db3af1 100644
--- a/src/hotspot/share/gc/z/zVerify.cpp
+++ b/src/hotspot/share/gc/z/zVerify.cpp
@@ -43,16 +43,67 @@
#include "runtime/frame.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.hpp"
-#include "runtime/javaThread.hpp"
+#include "runtime/javaThread.inline.hpp"
+#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stackFrameStream.inline.hpp"
#include "runtime/stackWatermark.inline.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
+#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/preserveException.hpp"
#include "utilities/resourceHash.hpp"
+#ifdef ASSERT
+
+// Used to verify that safepoints operations can't be scheduled concurrently
+// with callers to this function. Typically used to verify that object oops
+// and headers are safe to access.
+void z_verify_safepoints_are_blocked() {
+ Thread* current = Thread::current();
+
+ if (current->is_ConcurrentGC_thread()) {
+ assert(current->is_suspendible_thread(), // Thread prevents safepoints
+ "Safepoints are not blocked by current thread");
+
+ } else if (current->is_Worker_thread()) {
+ assert(// Check if ...
+ // the thread prevents safepoints
+ current->is_suspendible_thread() ||
+ // the coordinator thread is the safepointing VMThread
+ current->is_indirectly_safepoint_thread() ||
+ // the coordinator thread prevents safepoints
+ current->is_indirectly_suspendible_thread() ||
+ // the RelocateQueue prevents safepoints
+ //
+ // RelocateQueue acts as a pseudo STS leaver/joiner and blocks
+ // safepoints. There's currently no infrastructure to check if the
+ // current thread is active or not, so check the global states instead.
+ ZGeneration::young()->is_relocate_queue_active() ||
+ ZGeneration::old()->is_relocate_queue_active(),
+ "Safepoints are not blocked by current thread");
+
+ } else if (current->is_Java_thread()) {
+ JavaThreadState state = JavaThread::cast(current)->thread_state();
+ assert(state == _thread_in_Java || state == _thread_in_vm || state == _thread_new,
+ "Safepoints are not blocked by current thread from state: %d", state);
+
+ } else if (current->is_JfrSampler_thread()) {
+ // The JFR sampler thread blocks out safepoints with this lock.
+ assert_lock_strong(Threads_lock);
+
+ } else if (current->is_VM_thread()) {
+ // The VM Thread doesn't schedule new safepoints while executing
+ // other safepoint or handshake operations.
+
+ } else {
+ fatal("Unexpected thread type");
+ }
+}
+
+#endif
+
#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, untype(o), p2i(p)
static bool z_is_null_relaxed(zpointer o) {
diff --git a/src/hotspot/share/gc/z/zVerify.hpp b/src/hotspot/share/gc/z/zVerify.hpp
index e9ada2cefa9ca..447d38504a262 100644
--- a/src/hotspot/share/gc/z/zVerify.hpp
+++ b/src/hotspot/share/gc/z/zVerify.hpp
@@ -30,6 +30,8 @@ class frame;
class ZForwarding;
class ZPageAllocator;
+NOT_DEBUG(inline) void z_verify_safepoints_are_blocked() NOT_DEBUG_RETURN;
+
class ZVerify : public AllStatic {
private:
static void roots_strong(bool verify_after_old_mark);
diff --git a/src/hotspot/share/interpreter/bytecodes.cpp b/src/hotspot/share/interpreter/bytecodes.cpp
index 5db3ab5fe6b0d..181024bffc6c6 100644
--- a/src/hotspot/share/interpreter/bytecodes.cpp
+++ b/src/hotspot/share/interpreter/bytecodes.cpp
@@ -385,12 +385,18 @@ int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end)
if (end != nullptr && aligned_bcp + 3*jintSize >= end) {
return -1; // don't read past end of code buffer
}
+ // Promote calculation to signed 64 bits to do range checks, used by the verifier.
jlong lo = (jint)Bytes::get_Java_u4(aligned_bcp + 1*jintSize);
jlong hi = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
jlong len = (aligned_bcp - bcp) + (3 + hi - lo + 1)*jintSize;
- // only return len if it can be represented as a positive int;
- // return -1 otherwise
- return (len > 0 && len == (int)len) ? len : -1;
+ // Only return len if it can be represented as a positive int and lo <= hi.
+ // The caller checks for bytecode stream overflow.
+ if (lo <= hi && len == (int)len) {
+ assert(len > 0, "must be");
+ return (int)len;
+ } else {
+ return -1;
+ }
}
case _lookupswitch: // fall through
@@ -402,9 +408,13 @@ int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end)
}
jlong npairs = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
jlong len = (aligned_bcp - bcp) + (2 + 2*npairs)*jintSize;
- // only return len if it can be represented as a positive int;
- // return -1 otherwise
- return (len > 0 && len == (int)len) ? len : -1;
+ // Only return len if it can be represented as a positive int and npairs >= 0.
+ if (npairs >= 0 && len == (int)len) {
+ assert(len > 0, "must be");
+ return (int)len;
+ } else {
+ return -1;
+ }
}
default:
// Note: Length functions must return <=0 for invalid bytecodes.
diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml
index 458dfe5bc605b..ac73a0b36c7c4 100644
--- a/src/hotspot/share/jfr/metadata/metadata.xml
+++ b/src/hotspot/share/jfr/metadata/metadata.xml
@@ -932,10 +932,11 @@
-
+
-
+
+ Copyright (c) 2012-2014 Daniel J. Bernstein
+ To the extent possible under law, the author(s) have dedicated all copyright
+ and related and neighboring rights to this software to the public domain
+ worldwide. This software is distributed without any warranty.
+ You should have received a copy of the CC0 Public Domain Dedication along
+ with
+ this software. If not, see
+ .
+```
+
+### Licenses
+The code is dual-licensed CCO and MIT
+
+#### MIT License
+```
+Copyright 2012-2024 JP Aumasson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+```
+
+#### CC0 1.0 Universal
+```
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator and
+subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the
+purpose of contributing to a commons of creative, cultural and scientific
+works ("Commons") that the public can reliably and without fear of later
+claims of infringement build upon, modify, incorporate in other works, reuse
+and redistribute as freely as possible in any form whatsoever and for any
+purposes, including without limitation commercial purposes. These owners may
+contribute to the Commons to promote the ideal of a free culture and the
+further production of creative, cultural and scientific works, or to gain
+reputation or greater distribution for their Work in part through the use and
+efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation
+of additional consideration or compensation, the person associating CC0 with a
+Work (the "Affirmer"), to the extent that he or she is an owner of Copyright
+and Related Rights in the Work, voluntarily elects to apply CC0 to the Work
+and publicly distribute the Work under its terms, with knowledge of his or her
+Copyright and Related Rights in the Work and the meaning and intended legal
+effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not limited
+to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display, communicate,
+ and translate a Work;
+
+ ii. moral rights retained by the original author(s) and/or performer(s);
+
+ iii. publicity and privacy rights pertaining to a person's image or likeness
+ depicted in a Work;
+
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+
+ v. rights protecting the extraction, dissemination, use and reuse of data in
+ a Work;
+
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation thereof,
+ including any amended or successor version of such directive); and
+
+ vii. other similar, equivalent or corresponding rights throughout the world
+ based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of,
+applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and
+unconditionally waives, abandons, and surrenders all of Affirmer's Copyright
+and Related Rights and associated claims and causes of action, whether now
+known or unknown (including existing as well as future claims and causes of
+action), in the Work (i) in all territories worldwide, (ii) for the maximum
+duration provided by applicable law or treaty (including future time
+extensions), (iii) in any current or future medium and for any number of
+copies, and (iv) for any purpose whatsoever, including without limitation
+commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes
+the Waiver for the benefit of each member of the public at large and to the
+detriment of Affirmer's heirs and successors, fully intending that such Waiver
+shall not be subject to revocation, rescission, cancellation, termination, or
+any other legal or equitable action to disrupt the quiet enjoyment of the Work
+by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be
+judged legally invalid or ineffective under applicable law, then the Waiver
+shall be preserved to the maximum extent permitted taking into account
+Affirmer's express Statement of Purpose. In addition, to the extent the Waiver
+is so judged Affirmer hereby grants to each affected person a royalty-free,
+non transferable, non sublicensable, non exclusive, irrevocable and
+unconditional license to exercise Affirmer's Copyright and Related Rights in
+the Work (i) in all territories worldwide, (ii) for the maximum duration
+provided by applicable law or treaty (including future time extensions), (iii)
+in any current or future medium and for any number of copies, and (iv) for any
+purpose whatsoever, including without limitation commercial, advertising or
+promotional purposes (the "License"). The License shall be deemed effective as
+of the date CC0 was applied by Affirmer to the Work. Should any part of the
+License for any reason be judged legally invalid or ineffective under
+applicable law, such partial invalidity or ineffectiveness shall not
+invalidate the remainder of the License, and in such case Affirmer hereby
+affirms that he or she will not (i) exercise any of his or her remaining
+Copyright and Related Rights in the Work or (ii) assert any associated claims
+and causes of action with respect to the Work, in either case contrary to
+Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+
+ b. Affirmer offers the Work as-is and makes no representations or warranties
+ of any kind concerning the Work, express, implied, statutory or otherwise,
+ including without limitation warranties of title, merchantability, fitness
+ for a particular purpose, non infringement, or the absence of latent or
+ other defects, accuracy, or the present or absence of errors, whether or not
+ discoverable, all to the greatest extent permissible under applicable law.
+
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without limitation
+ any person's Copyright and Related Rights in the Work. Further, Affirmer
+ disclaims responsibility for obtaining any necessary consents, permissions
+ or other rights required for any use of the Work.
+
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to this
+ CC0 or use of the Work.
+
+For more information, please see
+
+
+```
diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp
index 8e04ae11046af..99adff4844741 100644
--- a/src/hotspot/share/logging/logTag.hpp
+++ b/src/hotspot/share/logging/logTag.hpp
@@ -196,6 +196,7 @@ class outputStream;
LOG_TAG(timer) \
LOG_TAG(tlab) \
LOG_TAG(tracking) \
+ LOG_TAG(trimnative) /* trim native heap */ \
LOG_TAG(unload) /* Trace unloading of classes */ \
LOG_TAG(unmap) \
LOG_TAG(unshareable) \
diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp
index 53946e5cd4725..1c2dc593b9f4a 100644
--- a/src/hotspot/share/memory/arena.cpp
+++ b/src/hotspot/share/memory/arena.cpp
@@ -30,6 +30,7 @@
#include "runtime/os.hpp"
#include "runtime/task.hpp"
#include "runtime/threadCritical.hpp"
+#include "runtime/trimNativeHeap.hpp"
#include "services/memTracker.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
@@ -92,6 +93,7 @@ class ChunkPool {
}
static void clean() {
+ NativeHeapTrimmer::SuspendMark sm("chunk pool cleaner");
for (int i = 0; i < _num_pools; i++) {
_pools[i].prune();
}
diff --git a/src/hotspot/share/memory/heap.hpp b/src/hotspot/share/memory/heap.hpp
index 97ac11708c1c0..da02eecae4988 100644
--- a/src/hotspot/share/memory/heap.hpp
+++ b/src/hotspot/share/memory/heap.hpp
@@ -171,9 +171,6 @@ class CodeHeap : public CHeapObj {
// Containment means "contained in committed space".
bool contains(const void* p) const { return low() <= p && p < high(); }
- bool contains_blob(const CodeBlob* blob) const {
- return contains((void*)blob);
- }
void* find_start(void* p) const; // returns the block containing p or null
CodeBlob* find_blob(void* start) const;
diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp
index b71c213f4781d..f30080842540d 100644
--- a/src/hotspot/share/memory/metaspace.cpp
+++ b/src/hotspot/share/memory/metaspace.cpp
@@ -567,12 +567,6 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
"wrong alignment");
MetaspaceContext::initialize_class_space_context(rs);
-
- // This does currently not work because rs may be the result of a split
- // operation and NMT seems not to be able to handle splits.
- // Will be fixed with JDK-8243535.
- // MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
-
}
// Returns true if class space has been setup (initialize_class_space).
@@ -635,11 +629,13 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
// (the OS already assigned it for something else), go to the next position, wrapping
// around if necessary, until we exhaust all the items.
os::init_random((int)os::javaTimeNanos());
- r = os::random();
+ r = ABS(os::random()) % len;
+ assert(r >= 0, "must be");
log_info(metaspace)("Randomizing compressed class space: start from %d out of %d locations",
- r % len, len);
+ r, len);
}
for (int i = 0; i < len; i++) {
+ assert((i + r) >= 0, "should never underflow because len is small integer");
address a = list.at((i + r) % len);
ReservedSpace rs(size, Metaspace::reserve_alignment(),
os::vm_page_size(), (char*)a);
@@ -835,6 +831,9 @@ void Metaspace::global_initialize() {
CompressedClassSpaceSize));
}
+ // Mark class space as such
+ MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
+
// Initialize space
Metaspace::initialize_class_space(rs);
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index 58c1792633e13..de479d3e9e4cf 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -335,7 +335,7 @@ void Universe::genesis(TRAPS) {
// Initialization of the fillerArrayKlass must come before regular
// int-TypeArrayKlass so that the int-Array mirror points to the
// int-TypeArrayKlass.
- _fillerArrayKlassObj = TypeArrayKlass::create_klass(T_INT, "Ljdk/internal/vm/FillerArray;", CHECK);
+ _fillerArrayKlassObj = TypeArrayKlass::create_klass(T_INT, "[Ljdk/internal/vm/FillerElement;", CHECK);
for (int i = T_BOOLEAN; i < T_LONG+1; i++) {
_typeArrayKlassObjs[i] = TypeArrayKlass::create_klass((BasicType)i, CHECK);
}
diff --git a/src/hotspot/share/oops/constMethodFlags.cpp b/src/hotspot/share/oops/constMethodFlags.cpp
index 3664cb12e0181..69b017cb18055 100644
--- a/src/hotspot/share/oops/constMethodFlags.cpp
+++ b/src/hotspot/share/oops/constMethodFlags.cpp
@@ -29,7 +29,7 @@
void ConstMethodFlags::print_on(outputStream* st) const {
#define CM_PRINT(name, ignore) \
- if (name()) st->print(" " #name " ");
+ if (name()) st->print(#name " ");
CM_FLAGS_DO(CM_PRINT)
#undef CM_PRINT
}
diff --git a/src/hotspot/share/oops/fieldStreams.hpp b/src/hotspot/share/oops/fieldStreams.hpp
index 31f3fa6ca40eb..54619f4d472b4 100644
--- a/src/hotspot/share/oops/fieldStreams.hpp
+++ b/src/hotspot/share/oops/fieldStreams.hpp
@@ -37,6 +37,7 @@
// iterates over fields that have been injected by the JVM.
// AllFieldStream exposes all fields and should only be used in rare
// cases.
+// HierarchicalFieldStream allows to also iterate over fields of supertypes.
class FieldStreamBase : public StackObj {
protected:
const Array* _fieldinfo_stream;
@@ -135,7 +136,7 @@ class FieldStreamBase : public StackObj {
}
};
-// Iterate over only the internal fields
+// Iterate over only the Java fields
class JavaFieldStream : public FieldStreamBase {
public:
JavaFieldStream(const InstanceKlass* k): FieldStreamBase(k->fieldinfo_stream(), k->constants(), 0, k->java_fields_count()) {}
@@ -179,4 +180,104 @@ class AllFieldStream : public FieldStreamBase {
AllFieldStream(const InstanceKlass* k): FieldStreamBase(k->fieldinfo_stream(), k->constants()) {}
};
+// Iterate over fields including the ones declared in supertypes
+template
+class HierarchicalFieldStream : public StackObj {
+ private:
+ const Array* _interfaces;
+ InstanceKlass* _next_klass; // null indicates no more type to visit
+ FieldStreamType _current_stream;
+ int _interface_index;
+
+ void prepare() {
+ _next_klass = next_klass_with_fields();
+ // special case: the initial klass has no fields. If any supertype has any fields, use that directly.
+ // if no such supertype exists, done() will return false already.
+ next_stream_if_done();
+ }
+
+ InstanceKlass* next_klass_with_fields() {
+ assert(_next_klass != nullptr, "reached end of types already");
+ InstanceKlass* result = _next_klass;
+ do {
+ if (!result->is_interface() && result->super() != nullptr) {
+ result = result->java_super();
+ } else if (_interface_index > 0) {
+ result = _interfaces->at(--_interface_index);
+ } else {
+ return nullptr; // we did not find any more supertypes with fields
+ }
+ } while (FieldStreamType(result).done());
+ return result;
+ }
+
+ // sets _current_stream to the next if the current is done and any more is available
+ void next_stream_if_done() {
+ if (_next_klass != nullptr && _current_stream.done()) {
+ _current_stream = FieldStreamType(_next_klass);
+ assert(!_current_stream.done(), "created empty stream");
+ _next_klass = next_klass_with_fields();
+ }
+ }
+
+ public:
+ HierarchicalFieldStream(InstanceKlass* klass) :
+ _interfaces(klass->transitive_interfaces()),
+ _next_klass(klass),
+ _current_stream(FieldStreamType(klass)),
+ _interface_index(_interfaces->length()) {
+ prepare();
+ }
+
+ void next() {
+ _current_stream.next();
+ next_stream_if_done();
+ }
+
+ bool done() const { return _next_klass == nullptr && _current_stream.done(); }
+
+ // bridge functions from FieldStreamBase
+
+ AccessFlags access_flags() const {
+ return _current_stream.access_flags();
+ }
+
+ FieldInfo::FieldFlags field_flags() const {
+ return _current_stream.field_flags();
+ }
+
+ Symbol* name() const {
+ return _current_stream.name();
+ }
+
+ Symbol* signature() const {
+ return _current_stream.signature();
+ }
+
+ Symbol* generic_signature() const {
+ return _current_stream.generic_signature();
+ }
+
+ int offset() const {
+ return _current_stream.offset();
+ }
+
+ bool is_contended() const {
+ return _current_stream.is_contended();
+ }
+
+ int contended_group() const {
+ return _current_stream.contended_group();
+ }
+
+ FieldInfo to_FieldInfo() {
+ return _current_stream.to_FieldInfo();
+ }
+
+ fieldDescriptor& field_descriptor() const {
+ return _current_stream.field_descriptor();
+ }
+
+};
+
#endif // SHARE_OOPS_FIELDSTREAMS_HPP
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index 565c136435424..6f0d521b9d4c3 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -768,20 +768,43 @@ void InstanceKlass::link_class(TRAPS) {
void InstanceKlass::check_link_state_and_wait(JavaThread* current) {
MonitorLocker ml(current, _init_monitor);
+ bool debug_logging_enabled = log_is_enabled(Debug, class, init);
+
// Another thread is linking this class, wait.
while (is_being_linked() && !is_init_thread(current)) {
+ if (debug_logging_enabled) {
+ ResourceMark rm(current);
+ log_debug(class, init)("Thread \"%s\" waiting for linking of %s by thread \"%s\"",
+ current->name(), external_name(), init_thread_name());
+ }
ml.wait();
}
// This thread is recursively linking this class, continue
if (is_being_linked() && is_init_thread(current)) {
+ if (debug_logging_enabled) {
+ ResourceMark rm(current);
+ log_debug(class, init)("Thread \"%s\" recursively linking %s",
+ current->name(), external_name());
+ }
return;
}
// If this class wasn't linked already, set state to being_linked
if (!is_linked()) {
+ if (debug_logging_enabled) {
+ ResourceMark rm(current);
+ log_debug(class, init)("Thread \"%s\" linking %s",
+ current->name(), external_name());
+ }
set_init_state(being_linked);
set_init_thread(current);
+ } else {
+ if (debug_logging_enabled) {
+ ResourceMark rm(current);
+ log_debug(class, init)("Thread \"%s\" found %s already linked",
+ current->name(), external_name());
+ }
}
}
@@ -1047,13 +1070,21 @@ void InstanceKlass::initialize_impl(TRAPS) {
JavaThread* jt = THREAD;
+ bool debug_logging_enabled = log_is_enabled(Debug, class, init);
+
// refer to the JVM book page 47 for description of steps
// Step 1
{
- MonitorLocker ml(THREAD, _init_monitor);
+ MonitorLocker ml(jt, _init_monitor);
// Step 2
while (is_being_initialized() && !is_init_thread(jt)) {
+ if (debug_logging_enabled) {
+ ResourceMark rm(jt);
+ log_debug(class, init)("Thread \"%s\" waiting for initialization of %s by thread \"%s\"",
+ jt->name(), external_name(), init_thread_name());
+ }
+
wait = true;
jt->set_class_to_be_initialized(this);
ml.wait();
@@ -1062,24 +1093,44 @@ void InstanceKlass::initialize_impl(TRAPS) {
// Step 3
if (is_being_initialized() && is_init_thread(jt)) {
+ if (debug_logging_enabled) {
+ ResourceMark rm(jt);
+ log_debug(class, init)("Thread \"%s\" recursively initializing %s",
+ jt->name(), external_name());
+ }
DTRACE_CLASSINIT_PROBE_WAIT(recursive, -1, wait);
return;
}
// Step 4
if (is_initialized()) {
+ if (debug_logging_enabled) {
+ ResourceMark rm(jt);
+ log_debug(class, init)("Thread \"%s\" found %s already initialized",
+ jt->name(), external_name());
+ }
DTRACE_CLASSINIT_PROBE_WAIT(concurrent, -1, wait);
return;
}
// Step 5
if (is_in_error_state()) {
+ if (debug_logging_enabled) {
+ ResourceMark rm(jt);
+ log_debug(class, init)("Thread \"%s\" found %s is in error state",
+ jt->name(), external_name());
+ }
throw_error = true;
} else {
// Step 6
set_init_state(being_initialized);
set_init_thread(jt);
+ if (debug_logging_enabled) {
+ ResourceMark rm(jt);
+ log_debug(class, init)("Thread \"%s\" is initializing %s",
+ jt->name(), external_name());
+ }
}
}
@@ -1553,7 +1604,9 @@ void InstanceKlass::call_class_initializer(TRAPS) {
LogStream ls(lt);
ls.print("%d Initializing ", call_class_initializer_counter++);
name()->print_value_on(&ls);
- ls.print_cr("%s (" PTR_FORMAT ")", h_method() == nullptr ? "(no method)" : "", p2i(this));
+ ls.print_cr("%s (" PTR_FORMAT ") by thread \"%s\"",
+ h_method() == nullptr ? "(no method)" : "", p2i(this),
+ THREAD->name());
}
if (h_method() != nullptr) {
JavaCallArguments args; // No arguments
@@ -4059,6 +4112,23 @@ bool InstanceKlass::should_clean_previous_versions_and_reset() {
return ret;
}
+// This nulls out jmethodIDs for all methods in 'klass'
+// It needs to be called explicitly for all previous versions of a class because these may not be cleaned up
+// during class unloading.
+// We can not use the jmethodID cache associated with klass directly because the 'previous' versions
+// do not have the jmethodID cache filled in. Instead, we need to lookup jmethodID for each method and this
+// is expensive - O(n) for one jmethodID lookup. For all contained methods it is O(n^2).
+// The reason for expensive jmethodID lookup for each method is that there is no direct link between method and jmethodID.
+void InstanceKlass::clear_jmethod_ids(InstanceKlass* klass) {
+ Array* method_refs = klass->methods();
+ for (int k = 0; k < method_refs->length(); k++) {
+ Method* method = method_refs->at(k);
+ if (method != nullptr && method->is_obsolete()) {
+ method->clear_jmethod_id();
+ }
+ }
+}
+
// Purge previous versions before adding new previous versions of the class and
// during class unloading.
void InstanceKlass::purge_previous_version_list() {
@@ -4102,6 +4172,7 @@ void InstanceKlass::purge_previous_version_list() {
// Unlink from previous version list.
assert(pv_node->class_loader_data() == loader_data, "wrong loader_data");
InstanceKlass* next = pv_node->previous_versions();
+ clear_jmethod_ids(pv_node); // jmethodID maintenance for the unloaded class
pv_node->link_previous_versions(nullptr); // point next to null
last->link_previous_versions(next);
// Delete this node directly. Nothing is referring to it and we don't
@@ -4321,4 +4392,3 @@ void ClassHierarchyIterator::next() {
_current = _current->next_sibling();
return; // visit next sibling subclass
}
-
diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp
index ae08a56686f44..3026960221150 100644
--- a/src/hotspot/share/oops/instanceKlass.hpp
+++ b/src/hotspot/share/oops/instanceKlass.hpp
@@ -491,6 +491,14 @@ class InstanceKlass: public Klass {
static void check_prohibited_package(Symbol* class_name,
ClassLoaderData* loader_data,
TRAPS);
+
+ JavaThread* init_thread() { return Atomic::load(&_init_thread); }
+ // We can safely access the name as long as we hold the _init_monitor.
+ const char* init_thread_name() {
+ assert(_init_monitor->owned_by_self(), "Must hold _init_monitor here");
+ return init_thread()->name_raw();
+ }
+
public:
// initialization state
bool is_loaded() const { return init_state() >= loaded; }
@@ -500,7 +508,7 @@ class InstanceKlass: public Klass {
bool is_not_initialized() const { return init_state() < being_initialized; }
bool is_being_initialized() const { return init_state() == being_initialized; }
bool is_in_error_state() const { return init_state() == initialization_error; }
- bool is_init_thread(JavaThread *thread) { return thread == Atomic::load(&_init_thread); }
+ bool is_init_thread(JavaThread *thread) { return thread == init_thread(); }
ClassState init_state() const { return Atomic::load(&_init_state); }
const char* init_state_name() const;
bool is_rewritten() const { return _misc_flags.rewritten(); }
@@ -1077,6 +1085,8 @@ class InstanceKlass: public Klass {
bool idnum_can_increment() const { return has_been_redefined(); }
inline jmethodID* methods_jmethod_ids_acquire() const;
inline void release_set_methods_jmethod_ids(jmethodID* jmeths);
+ // This nulls out jmethodIDs for all methods in 'klass'
+ static void clear_jmethod_ids(InstanceKlass* klass);
// Lock during initialization
public:
diff --git a/src/hotspot/share/oops/instanceKlassFlags.cpp b/src/hotspot/share/oops/instanceKlassFlags.cpp
index 7ed77eb13fc6b..864fe60af2ea3 100644
--- a/src/hotspot/share/oops/instanceKlassFlags.cpp
+++ b/src/hotspot/share/oops/instanceKlassFlags.cpp
@@ -32,11 +32,10 @@
void InstanceKlassFlags::print_on(outputStream* st) const {
#define IK_FLAGS_PRINT(name, ignore) \
- if (name()) st->print(" ##name ");
+ if (name()) st->print(#name " ");
IK_FLAGS_DO(IK_FLAGS_PRINT)
IK_STATUS_DO(IK_FLAGS_PRINT)
#undef IK_FLAGS_PRINT
- st->cr();
}
#if INCLUDE_CDS
diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp
index 8625d2537cc9e..c5693b02b0578 100644
--- a/src/hotspot/share/oops/method.cpp
+++ b/src/hotspot/share/oops/method.cpp
@@ -2262,6 +2262,20 @@ void Method::clear_jmethod_ids(ClassLoaderData* loader_data) {
loader_data->jmethod_ids()->clear_all_methods();
}
+void Method::clear_jmethod_id() {
+ // Being at a safepoint prevents racing against other class redefinitions
+ assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+ // The jmethodID is not stored in the Method instance, we need to look it up first
+ jmethodID methodid = find_jmethod_id_or_null();
+ // We need to make sure that jmethodID actually resolves to this method
+ // - multiple redefined versions may share jmethodID slots and if a method
+ // has already been rewired to a newer version we could be removing reference
+ // to a still existing method instance
+ if (methodid != nullptr && *((Method**)methodid) == this) {
+ *((Method**)methodid) = nullptr;
+ }
+}
+
bool Method::has_method_vptr(const void* ptr) {
Method m;
// This assumes that the vtbl pointer is the first word of a C++ object.
diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp
index 06f0b8cf7d769..a4f8f3075efd3 100644
--- a/src/hotspot/share/oops/method.hpp
+++ b/src/hotspot/share/oops/method.hpp
@@ -773,6 +773,7 @@ class Method : public Metadata {
// Clear methods
static void clear_jmethod_ids(ClassLoaderData* loader_data);
+ void clear_jmethod_id();
static void print_jmethod_ids_count(const ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
// Get this method's jmethodID -- allocate if it doesn't exist
diff --git a/src/hotspot/share/oops/methodFlags.cpp b/src/hotspot/share/oops/methodFlags.cpp
index 3c805a4a8129f..4945dcc4b8396 100644
--- a/src/hotspot/share/oops/methodFlags.cpp
+++ b/src/hotspot/share/oops/methodFlags.cpp
@@ -28,7 +28,7 @@
void MethodFlags::print_on(outputStream* st) const {
#define M_PRINT(name, ignore) \
- if (name()) st->print(" " #name " ");
+ if (name()) st->print(#name " ");
M_STATUS_DO(M_PRINT)
#undef M_PRINT
}
diff --git a/src/hotspot/share/oops/symbol.cpp b/src/hotspot/share/oops/symbol.cpp
index f77d69fe3d41a..cbb51e1bb90e5 100644
--- a/src/hotspot/share/oops/symbol.cpp
+++ b/src/hotspot/share/oops/symbol.cpp
@@ -22,7 +22,6 @@
*
*/
-
#include "precompiled.hpp"
#include "cds/metaspaceShared.hpp"
#include "classfile/altHashing.hpp"
@@ -390,11 +389,9 @@ void Symbol::print() const { print_on(tty); }
// The print_value functions are present in all builds, to support the
// disassembler and error reporting.
void Symbol::print_value_on(outputStream* st) const {
- st->print("'");
- for (int i = 0; i < utf8_length(); i++) {
- st->print("%c", char_at(i));
- }
- st->print("'");
+ st->print_raw("'", 1);
+ st->print_raw((const char*)base(), utf8_length());
+ st->print_raw("'", 1);
}
void Symbol::print_value() const { print_value_on(tty); }
diff --git a/src/hotspot/share/opto/addnode.cpp b/src/hotspot/share/opto/addnode.cpp
index cf8f58d8e2307..73629a11bcdba 100644
--- a/src/hotspot/share/opto/addnode.cpp
+++ b/src/hotspot/share/opto/addnode.cpp
@@ -283,8 +283,26 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
assert( in1->in(2) != this && in2->in(2) != this,
"dead loop in AddINode::Ideal" );
Node* sub = SubNode::make(nullptr, nullptr, bt);
- sub->init_req(1, phase->transform(AddNode::make(in1->in(1), in2->in(1), bt)));
- sub->init_req(2, phase->transform(AddNode::make(in1->in(2), in2->in(2), bt)));
+ Node* sub_in1;
+ PhaseIterGVN* igvn = phase->is_IterGVN();
+ // During IGVN, if both inputs of the new AddNode are a tree of SubNodes, this same transformation will be applied
+ // to every node of the tree. Calling transform() causes the transformation to be applied recursively, once per
+ // tree node whether some subtrees are identical or not. Pushing to the IGVN worklist instead, causes the transform
+ // to be applied once per unique subtrees (because all uses of a subtree are updated with the result of the
+ // transformation). In case of a large tree, this can make a difference in compilation time.
+ if (igvn != nullptr) {
+ sub_in1 = igvn->register_new_node_with_optimizer(AddNode::make(in1->in(1), in2->in(1), bt));
+ } else {
+ sub_in1 = phase->transform(AddNode::make(in1->in(1), in2->in(1), bt));
+ }
+ Node* sub_in2;
+ if (igvn != nullptr) {
+ sub_in2 = igvn->register_new_node_with_optimizer(AddNode::make(in1->in(2), in2->in(2), bt));
+ } else {
+ sub_in2 = phase->transform(AddNode::make(in1->in(2), in2->in(2), bt));
+ }
+ sub->init_req(1, sub_in1);
+ sub->init_req(2, sub_in2);
return sub;
}
// Convert "(a-b)+(b+c)" into "(a+c)"
diff --git a/src/hotspot/share/opto/arraycopynode.cpp b/src/hotspot/share/opto/arraycopynode.cpp
index cfb959021edca..71b7a7e5024d9 100644
--- a/src/hotspot/share/opto/arraycopynode.cpp
+++ b/src/hotspot/share/opto/arraycopynode.cpp
@@ -134,12 +134,13 @@ int ArrayCopyNode::get_count(PhaseGVN *phase) const {
assert (ary_src != nullptr, "not an array or instance?");
// clone passes a length as a rounded number of longs. If we're
// cloning an array we'll do it element by element. If the
- // length input to ArrayCopyNode is constant, length of input
- // array must be too.
-
- assert((get_length_if_constant(phase) == -1) != ary_src->size()->is_con() ||
+ // length of the input array is constant, ArrayCopyNode::Length
+ // must be too. Note that the opposite does not need to hold,
+ // because different input array lengths (e.g. int arrays with
+ // 3 or 4 elements) might lead to the same length input
+ // (e.g. 2 double-words).
+ assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
-
if (ary_src->size()->is_con()) {
return ary_src->size()->get_con();
}
diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp
index 10b8ac2028aef..29dd95322a035 100644
--- a/src/hotspot/share/opto/c2_globals.hpp
+++ b/src/hotspot/share/opto/c2_globals.hpp
@@ -53,6 +53,9 @@
product(bool, StressCCP, false, DIAGNOSTIC, \
"Randomize worklist traversal in CCP") \
\
+ product(bool, StressIncrementalInlining, false, DIAGNOSTIC, \
+ "Randomize the incremental inlining decision") \
+ \
product(uint, StressSeed, 0, DIAGNOSTIC, \
"Seed for randomized stress testing (if unset, a random one is " \
"generated). The seed is recorded in the compilation log, if " \
diff --git a/src/hotspot/share/opto/c2compiler.cpp b/src/hotspot/share/opto/c2compiler.cpp
index 4b911620cf9f9..9e27ff17d9e1d 100644
--- a/src/hotspot/share/opto/c2compiler.cpp
+++ b/src/hotspot/share/opto/c2compiler.cpp
@@ -113,6 +113,7 @@ void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci, boo
bool do_locks_coarsening = EliminateLocks;
while (!env->failing()) {
+ ResourceMark rm;
// Attempt to compile while subsuming loads into machine instructions.
Options options(subsume_loads, do_escape_analysis, do_iterative_escape_analysis, eliminate_boxing, do_locks_coarsening, install_code);
Compile C(env, target, entry_bci, options, directive);
diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp
index 50f9f4b85f34d..c88d5db5488ac 100644
--- a/src/hotspot/share/opto/callGenerator.cpp
+++ b/src/hotspot/share/opto/callGenerator.cpp
@@ -97,6 +97,8 @@ JVMState* ParseGenerator::generate(JVMState* jvms) {
}
Parse parser(jvms, method(), _expected_uses);
+ if (C->failing()) return nullptr;
+
// Grab signature for matching/allocation
GraphKit& exits = parser.exits();
@@ -430,7 +432,7 @@ bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms)
assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
if (cg != nullptr) {
- assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
+ assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
_inline_cg = cg;
C->dec_number_of_mh_late_inlines();
return true;
@@ -552,7 +554,7 @@ bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState*
true /*allow_intrinsics*/);
if (cg != nullptr) {
- assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
+ assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
_inline_cg = cg;
return true;
} else {
@@ -987,8 +989,9 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* c
bool input_not_const;
CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
Compile* C = Compile::current();
+ bool should_delay = C->should_delay_inlining();
if (cg != nullptr) {
- if (AlwaysIncrementalInline) {
+ if (should_delay) {
return CallGenerator::for_late_inline(callee, cg);
} else {
return cg;
@@ -999,7 +1002,7 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* c
int call_site_count = caller->scale_count(profile.count());
if (IncrementalInlineMH && call_site_count > 0 &&
- (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
+ (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
} else {
// Out-of-line call.
diff --git a/src/hotspot/share/opto/castnode.cpp b/src/hotspot/share/opto/castnode.cpp
index d0d05b5bb98f0..8a26d514b688b 100644
--- a/src/hotspot/share/opto/castnode.cpp
+++ b/src/hotspot/share/opto/castnode.cpp
@@ -36,14 +36,14 @@
//=============================================================================
// If input is already higher or equal to cast type, then this is an identity.
Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
+ if (_dependency == UnconditionalDependency) {
+ return this;
+ }
Node* dom = dominating_cast(phase, phase);
if (dom != nullptr) {
return dom;
}
- if (_dependency != RegularDependency) {
- return this;
- }
- return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
+ return higher_equal_types(phase, in(1)) ? in(1) : this;
}
//------------------------------Value------------------------------------------
@@ -100,47 +100,62 @@ Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr;
}
+uint ConstraintCastNode::hash() const {
+ return TypeNode::hash() + (int)_dependency + (_extra_types != nullptr ? _extra_types->hash() : 0);
+}
+
bool ConstraintCastNode::cmp(const Node &n) const {
- return TypeNode::cmp(n) && ((ConstraintCastNode&)n)._dependency == _dependency;
+ if (!TypeNode::cmp(n)) {
+ return false;
+ }
+ ConstraintCastNode& cast = (ConstraintCastNode&) n;
+ if (cast._dependency != _dependency) {
+ return false;
+ }
+ if (_extra_types == nullptr || cast._extra_types == nullptr) {
+ return _extra_types == cast._extra_types;
+ }
+ return _extra_types->eq(cast._extra_types);
}
uint ConstraintCastNode::size_of() const {
return sizeof(*this);
}
-Node* ConstraintCastNode::make_cast(int opcode, Node* c, Node *n, const Type *t, DependencyType dependency) {
+Node* ConstraintCastNode::make_cast(int opcode, Node* c, Node* n, const Type* t, DependencyType dependency,
+ const TypeTuple* extra_types) {
switch(opcode) {
case Op_CastII: {
- Node* cast = new CastIINode(n, t, dependency);
+ Node* cast = new CastIINode(n, t, dependency, false, extra_types);
cast->set_req(0, c);
return cast;
}
case Op_CastLL: {
- Node* cast = new CastLLNode(n, t, dependency);
+ Node* cast = new CastLLNode(n, t, dependency, extra_types);
cast->set_req(0, c);
return cast;
}
case Op_CastPP: {
- Node* cast = new CastPPNode(n, t, dependency);
+ Node* cast = new CastPPNode(n, t, dependency, extra_types);
cast->set_req(0, c);
return cast;
}
case Op_CastFF: {
- Node* cast = new CastFFNode(n, t, dependency);
+ Node* cast = new CastFFNode(n, t, dependency, extra_types);
cast->set_req(0, c);
return cast;
}
case Op_CastDD: {
- Node* cast = new CastDDNode(n, t, dependency);
+ Node* cast = new CastDDNode(n, t, dependency, extra_types);
cast->set_req(0, c);
return cast;
}
case Op_CastVV: {
- Node* cast = new CastVVNode(n, t, dependency);
+ Node* cast = new CastVVNode(n, t, dependency, extra_types);
cast->set_req(0, c);
return cast;
}
- case Op_CheckCastPP: return new CheckCastPPNode(c, n, t, dependency);
+ case Op_CheckCastPP: return new CheckCastPPNode(c, n, t, dependency, extra_types);
default:
fatal("Bad opcode %d", opcode);
}
@@ -150,10 +165,10 @@ Node* ConstraintCastNode::make_cast(int opcode, Node* c, Node *n, const Type *t,
Node* ConstraintCastNode::make(Node* c, Node *n, const Type *t, DependencyType dependency, BasicType bt) {
switch(bt) {
case T_INT: {
- return make_cast(Op_CastII, c, n, t, dependency);
+ return make_cast(Op_CastII, c, n, t, dependency, nullptr);
}
case T_LONG: {
- return make_cast(Op_CastLL, c, n, t, dependency);
+ return make_cast(Op_CastLL, c, n, t, dependency, nullptr);
}
default:
fatal("Bad basic type %s", type2name(bt));
@@ -186,7 +201,7 @@ TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt)
u->outcnt() > 0 &&
u->Opcode() == opc &&
u->in(0) != nullptr &&
- u->bottom_type()->higher_equal(type())) {
+ higher_equal_types(gvn, u)) {
if (pt->is_dominator(u->in(0), ctl)) {
return u->as_Type();
}
@@ -202,9 +217,28 @@ TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt)
return nullptr;
}
+bool ConstraintCastNode::higher_equal_types(PhaseGVN* phase, const Node* other) const {
+ const Type* t = phase->type(other);
+ if (!t->higher_equal_speculative(type())) {
+ return false;
+ }
+ if (_extra_types != nullptr) {
+ for (uint i = 0; i < _extra_types->cnt(); ++i) {
+ if (!t->higher_equal_speculative(_extra_types->field_at(i))) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
#ifndef PRODUCT
void ConstraintCastNode::dump_spec(outputStream *st) const {
TypeNode::dump_spec(st);
+ if (_extra_types != nullptr) {
+ st->print(" extra types: ");
+ _extra_types->dump_on(st);
+ }
if (_dependency != RegularDependency) {
st->print(" %s dependency", _dependency == StrongDependency ? "strong" : "unconditional");
}
@@ -228,79 +262,6 @@ const Type* CastIINode::Value(PhaseGVN* phase) const {
res = widen_type(phase, res, T_INT);
}
- // Try to improve the type of the CastII if we recognize a CmpI/If pattern.
- //
- // in1 in2
- // | |
- // +--- | --+
- // | | |
- // CmpINode |
- // | |
- // BoolNode |
- // | |
- // IfNode |
- // | |
- // IfProj |
- // | |
- // CastIINode
- //
- if (carry_dependency()) {
- if (in(0) != nullptr && in(0)->in(0) != nullptr && in(0)->in(0)->is_If()) {
- assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
- Node* proj = in(0);
- if (proj->in(0)->in(1)->is_Bool()) {
- Node* b = proj->in(0)->in(1);
- if (b->in(1)->Opcode() == Op_CmpI) {
- Node* cmp = b->in(1);
- if (cmp->in(1) == in(1) && phase->type(cmp->in(2))->isa_int()) {
- const TypeInt* in2_t = phase->type(cmp->in(2))->is_int();
- const Type* t = TypeInt::INT;
- BoolTest test = b->as_Bool()->_test;
- if (proj->is_IfFalse()) {
- test = test.negate();
- }
- BoolTest::mask m = test._test;
- jlong lo_long = min_jint;
- jlong hi_long = max_jint;
- if (m == BoolTest::le || m == BoolTest::lt) {
- hi_long = in2_t->_hi;
- if (m == BoolTest::lt) {
- hi_long -= 1;
- }
- } else if (m == BoolTest::ge || m == BoolTest::gt) {
- lo_long = in2_t->_lo;
- if (m == BoolTest::gt) {
- lo_long += 1;
- }
- } else if (m == BoolTest::eq) {
- lo_long = in2_t->_lo;
- hi_long = in2_t->_hi;
- } else if (m == BoolTest::ne) {
- // can't do any better
- } else {
- stringStream ss;
- test.dump_on(&ss);
- fatal("unexpected comparison %s", ss.freeze());
- }
- int lo_int = (int)lo_long;
- int hi_int = (int)hi_long;
-
- if (lo_long != (jlong)lo_int) {
- lo_int = min_jint;
- }
- if (hi_long != (jlong)hi_int) {
- hi_int = max_jint;
- }
-
- t = TypeInt::make(lo_int, hi_int, Type::WidenMax);
-
- res = res->filter_speculative(t);
- return res;
- }
- }
- }
- }
- }
return res;
}
@@ -523,20 +484,21 @@ Node* CastP2XNode::Identity(PhaseGVN* phase) {
return this;
}
-Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency) {
+Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency,
+ const TypeTuple* types) {
Node* cast= nullptr;
if (type->isa_int()) {
- cast = make_cast(Op_CastII, c, in, type, dependency);
+ cast = make_cast(Op_CastII, c, in, type, dependency, types);
} else if (type->isa_long()) {
- cast = make_cast(Op_CastLL, c, in, type, dependency);
+ cast = make_cast(Op_CastLL, c, in, type, dependency, types);
} else if (type->isa_float()) {
- cast = make_cast(Op_CastFF, c, in, type, dependency);
+ cast = make_cast(Op_CastFF, c, in, type, dependency, types);
} else if (type->isa_double()) {
- cast = make_cast(Op_CastDD, c, in, type, dependency);
+ cast = make_cast(Op_CastDD, c, in, type, dependency, types);
} else if (type->isa_vect()) {
- cast = make_cast(Op_CastVV, c, in, type, dependency);
+ cast = make_cast(Op_CastVV, c, in, type, dependency, types);
} else if (type->isa_ptr()) {
- cast = make_cast(Op_CastPP, c, in, type, dependency);
+ cast = make_cast(Op_CastPP, c, in, type, dependency, types);
}
return cast;
}
diff --git a/src/hotspot/share/opto/castnode.hpp b/src/hotspot/share/opto/castnode.hpp
index d4b2d61621696..cbb1c5fe521ee 100644
--- a/src/hotspot/share/opto/castnode.hpp
+++ b/src/hotspot/share/opto/castnode.hpp
@@ -43,11 +43,20 @@ class ConstraintCastNode: public TypeNode {
const DependencyType _dependency;
virtual bool cmp( const Node &n ) const;
virtual uint size_of() const;
+ virtual uint hash() const; // Check the type
const Type* widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const;
+ private:
+ // PhiNode::Ideal() transforms a Phi that merges a single uncasted value into a single cast pinned at the region.
+ // The types of cast nodes eliminated as a consequence of this transformation are collected and stored here so the
+ // type dependencies carried by the cast are known. The cast can then be eliminated if the type of its input is
+ // narrower (or equal) than all the types it carries.
+ const TypeTuple* _extra_types;
+
public:
- ConstraintCastNode(Node *n, const Type *t, DependencyType dependency)
- : TypeNode(t,2), _dependency(dependency) {
+ ConstraintCastNode(Node* n, const Type* t, ConstraintCastNode::DependencyType dependency,
+ const TypeTuple* extra_types)
+ : TypeNode(t,2), _dependency(dependency), _extra_types(extra_types) {
init_class_id(Class_ConstraintCast);
init_req(1, n);
}
@@ -59,14 +68,15 @@ class ConstraintCastNode: public TypeNode {
virtual bool depends_only_on_test() const { return _dependency == RegularDependency; }
bool carry_dependency() const { return _dependency != RegularDependency; }
TypeNode* dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const;
- static Node* make_cast(int opcode, Node* c, Node *n, const Type *t, DependencyType dependency);
+ static Node* make_cast(int opcode, Node* c, Node* n, const Type* t, DependencyType dependency, const TypeTuple* extra_types);
static Node* make(Node* c, Node *n, const Type *t, DependencyType dependency, BasicType bt);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
- static Node* make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency);
+ static Node* make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency,
+ const TypeTuple* types);
Node* optimize_integer_cast(PhaseGVN* phase, BasicType bt);
@@ -91,6 +101,16 @@ class ConstraintCastNode: public TypeNode {
}
}
}
+
+ bool higher_equal_types(PhaseGVN* phase, const Node* other) const;
+
+ int extra_types_count() const {
+ return _extra_types == nullptr ? 0 : _extra_types->cnt();
+ }
+
+ const Type* extra_type_at(int i) const {
+ return _extra_types->field_at(i);
+ }
};
//------------------------------CastIINode-------------------------------------
@@ -103,12 +123,12 @@ class CastIINode: public ConstraintCastNode {
virtual uint size_of() const;
public:
- CastIINode(Node* n, const Type* t, DependencyType dependency = RegularDependency, bool range_check_dependency = false)
- : ConstraintCastNode(n, t, dependency), _range_check_dependency(range_check_dependency) {
+ CastIINode(Node* n, const Type* t, DependencyType dependency = RegularDependency, bool range_check_dependency = false, const TypeTuple* types = nullptr)
+ : ConstraintCastNode(n, t, dependency, types), _range_check_dependency(range_check_dependency) {
init_class_id(Class_CastII);
}
CastIINode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency, bool range_check_dependency = false)
- : ConstraintCastNode(n, t, dependency), _range_check_dependency(range_check_dependency) {
+ : ConstraintCastNode(n, t, dependency, nullptr), _range_check_dependency(range_check_dependency) {
init_class_id(Class_CastII);
init_req(0, ctrl);
}
@@ -134,12 +154,12 @@ class CastIINode: public ConstraintCastNode {
class CastLLNode: public ConstraintCastNode {
public:
CastLLNode(Node* ctrl, Node* n, const Type* t, DependencyType dependency = RegularDependency)
- : ConstraintCastNode(n, t, dependency) {
+ : ConstraintCastNode(n, t, dependency, nullptr) {
init_class_id(Class_CastLL);
init_req(0, ctrl);
}
- CastLLNode(Node* n, const Type* t, DependencyType dependency = RegularDependency)
- : ConstraintCastNode(n, t, dependency){
+ CastLLNode(Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
+ : ConstraintCastNode(n, t, dependency, types) {
init_class_id(Class_CastLL);
}
@@ -151,8 +171,8 @@ class CastLLNode: public ConstraintCastNode {
class CastFFNode: public ConstraintCastNode {
public:
- CastFFNode(Node* n, const Type* t, DependencyType dependency = RegularDependency)
- : ConstraintCastNode(n, t, dependency){
+ CastFFNode(Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
+ : ConstraintCastNode(n, t, dependency, types) {
init_class_id(Class_CastFF);
}
virtual int Opcode() const;
@@ -161,8 +181,8 @@ class CastFFNode: public ConstraintCastNode {
class CastDDNode: public ConstraintCastNode {
public:
- CastDDNode(Node* n, const Type* t, DependencyType dependency = RegularDependency)
- : ConstraintCastNode(n, t, dependency){
+ CastDDNode(Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
+ : ConstraintCastNode(n, t, dependency, types) {
init_class_id(Class_CastDD);
}
virtual int Opcode() const;
@@ -171,8 +191,8 @@ class CastDDNode: public ConstraintCastNode {
class CastVVNode: public ConstraintCastNode {
public:
- CastVVNode(Node* n, const Type* t, DependencyType dependency = RegularDependency)
- : ConstraintCastNode(n, t, dependency){
+ CastVVNode(Node* n, const Type* t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
+ : ConstraintCastNode(n, t, dependency, types) {
init_class_id(Class_CastVV);
}
virtual int Opcode() const;
@@ -184,8 +204,8 @@ class CastVVNode: public ConstraintCastNode {
// cast pointer to pointer (different type)
class CastPPNode: public ConstraintCastNode {
public:
- CastPPNode (Node *n, const Type *t, DependencyType dependency = RegularDependency)
- : ConstraintCastNode(n, t, dependency) {
+ CastPPNode (Node *n, const Type *t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
+ : ConstraintCastNode(n, t, dependency, types) {
}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
@@ -195,8 +215,8 @@ class CastPPNode: public ConstraintCastNode {
// for _checkcast, cast pointer to pointer (different type), without JOIN,
class CheckCastPPNode: public ConstraintCastNode {
public:
- CheckCastPPNode(Node *c, Node *n, const Type *t, DependencyType dependency = RegularDependency)
- : ConstraintCastNode(n, t, dependency) {
+ CheckCastPPNode(Node *c, Node *n, const Type *t, DependencyType dependency = RegularDependency, const TypeTuple* types = nullptr)
+ : ConstraintCastNode(n, t, dependency, types) {
init_class_id(Class_CheckCastPP);
init_req(0, c);
}
diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp
index fb52b49a0e112..25bc1b011e550 100644
--- a/src/hotspot/share/opto/cfgnode.cpp
+++ b/src/hotspot/share/opto/cfgnode.cpp
@@ -2085,10 +2085,12 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Add casts to carry the control dependency of the Phi that is
// going away
Node* cast = nullptr;
+ const TypeTuple* extra_types = collect_types(phase);
if (phi_type->isa_ptr()) {
const Type* uin_type = phase->type(uin);
if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) {
- cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency);
+ cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency,
+ extra_types);
} else {
// Use a CastPP for a cast to not null and a CheckCastPP for
// a cast to a new klass (and both if both null-ness and
@@ -2098,7 +2100,8 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// null, uin's type must be casted to not null
if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() &&
uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) {
- cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, TypePtr::NOTNULL, ConstraintCastNode::StrongDependency);
+ cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, TypePtr::NOTNULL,
+ ConstraintCastNode::StrongDependency, extra_types);
}
// If the type of phi and uin, both casted to not null,
@@ -2110,14 +2113,16 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
cast = phase->transform(cast);
n = cast;
}
- cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, ConstraintCastNode::StrongDependency);
+ cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, ConstraintCastNode::StrongDependency,
+ extra_types);
}
if (cast == nullptr) {
- cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency);
+ cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency,
+ extra_types);
}
}
} else {
- cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency);
+ cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency, extra_types);
}
assert(cast != nullptr, "cast should be set");
cast = phase->transform(cast);
@@ -2512,6 +2517,52 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return progress; // Return any progress
}
+static int compare_types(const Type* const& e1, const Type* const& e2) {
+ return (intptr_t)e1 - (intptr_t)e2;
+}
+
+// Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
+// Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
+// pointer comparison is enough to tell if 2 list of types are the same or not)
+const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
+ const Node* region = in(0);
+ const Type* phi_type = bottom_type();
+ ResourceMark rm;
+ GrowableArray types;
+ for (uint i = 1; i < req(); i++) {
+ if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
+ continue;
+ }
+ Node* in = Node::in(i);
+ const Type* t = phase->type(in);
+ if (in == nullptr || in == this || t == Type::TOP) {
+ continue;
+ }
+ if (t != phi_type && t->higher_equal_speculative(phi_type)) {
+ types.insert_sorted(t);
+ }
+ while (in != nullptr && in->is_ConstraintCast()) {
+ Node* next = in->in(1);
+ if (phase->type(next)->isa_rawptr() && phase->type(in)->isa_oopptr()) {
+ break;
+ }
+ ConstraintCastNode* cast = in->as_ConstraintCast();
+ for (int j = 0; j < cast->extra_types_count(); ++j) {
+ const Type* extra_t = cast->extra_type_at(j);
+ if (extra_t != phi_type && extra_t->higher_equal_speculative(phi_type)) {
+ types.insert_sorted(extra_t);
+ }
+ }
+ in = next;
+ }
+ }
+ const Type **flds = (const Type **)(phase->C->type_arena()->AmallocWords(types.length()*sizeof(Type*)));
+ for (int i = 0; i < types.length(); ++i) {
+ flds[i] = types.at(i);
+ }
+ return TypeTuple::make(types.length(), flds);
+}
+
Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
Node_Stack stack(1);
VectorSet visited;
diff --git a/src/hotspot/share/opto/cfgnode.hpp b/src/hotspot/share/opto/cfgnode.hpp
index 48f484a346898..d71e7ff758a8e 100644
--- a/src/hotspot/share/opto/cfgnode.hpp
+++ b/src/hotspot/share/opto/cfgnode.hpp
@@ -266,6 +266,8 @@ class PhiNode : public TypeNode {
#else //ASSERT
void verify_adr_type(bool recursive = false) const {}
#endif //ASSERT
+
+ const TypeTuple* collect_types(PhaseGVN* phase) const;
};
//------------------------------GotoNode---------------------------------------
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 47e2ebc5b6df1..c9b82face2d25 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -551,28 +551,37 @@ void Compile::print_compile_messages() {
#ifndef PRODUCT
void Compile::print_ideal_ir(const char* phase_name) {
- ttyLocker ttyl;
// keep the following output all in one block
// This output goes directly to the tty, not the compiler log.
// To enable tools to match it up with the compilation activity,
// be sure to tag this tty output with the compile ID.
- if (xtty != nullptr) {
- xtty->head("ideal compile_id='%d'%s compile_phase='%s'",
- compile_id(),
- is_osr_compilation() ? " compile_kind='osr'" : "",
- phase_name);
- }
+
+ // Node dumping can cause a safepoint, which can break the tty lock.
+ // Buffer all node dumps, so that all safepoints happen before we lock.
+ ResourceMark rm;
+ stringStream ss;
+
if (_output == nullptr) {
- tty->print_cr("AFTER: %s", phase_name);
+ ss.print_cr("AFTER: %s", phase_name);
// Print out all nodes in ascending order of index.
- root()->dump_bfs(MaxNodeLimit, nullptr, "+S$");
+ root()->dump_bfs(MaxNodeLimit, nullptr, "+S$", &ss);
} else {
// Dump the node blockwise if we have a scheduling
- _output->print_scheduling();
+ _output->print_scheduling(&ss);
}
+ // Check that the lock is not broken by a safepoint.
+ NoSafepointVerifier nsv;
+ ttyLocker ttyl;
if (xtty != nullptr) {
+ xtty->head("ideal compile_id='%d'%s compile_phase='%s'",
+ compile_id(),
+ is_osr_compilation() ? " compile_kind='osr'" : "",
+ phase_name);
+ xtty->print("%s", ss.as_string()); // print to tty would use xml escape encoding
xtty->tail("ideal");
+ } else {
+ tty->print("%s", ss.as_string());
}
}
#endif
@@ -826,8 +835,8 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
// If any phase is randomized for stress testing, seed random number
// generation and log the seed for repeatability.
- if (StressLCM || StressGCM || StressIGVN || StressCCP) {
- if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && RepeatCompilation)) {
+ if (StressLCM || StressGCM || StressIGVN || StressCCP || StressIncrementalInlining) {
+ if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
_stress_seed = static_cast(Ticks::now().nanoseconds());
FLAG_SET_ERGO(StressSeed, _stress_seed);
} else {
@@ -2230,6 +2239,8 @@ void Compile::Optimize() {
process_for_unstable_if_traps(igvn);
+ if (failing()) return;
+
inline_incrementally(igvn);
print_method(PHASE_INCREMENTAL_INLINE, 2);
@@ -2240,7 +2251,9 @@ void Compile::Optimize() {
// Inline valueOf() methods now.
inline_boxing_calls(igvn);
- if (AlwaysIncrementalInline) {
+ if (failing()) return;
+
+ if (AlwaysIncrementalInline || StressIncrementalInlining) {
inline_incrementally(igvn);
}
@@ -2255,16 +2268,20 @@ void Compile::Optimize() {
// CastPP nodes.
remove_speculative_types(igvn);
+ if (failing()) return;
+
// No more new expensive nodes will be added to the list from here
// so keep only the actual candidates for optimizations.
cleanup_expensive_nodes(igvn);
+ if (failing()) return;
+
assert(EnableVectorSupport || !has_vbox_nodes(), "sanity");
if (EnableVectorSupport && has_vbox_nodes()) {
TracePhase tp("", &timers[_t_vector]);
PhaseVector pv(igvn);
pv.optimize_vector_boxes();
-
+ if (failing()) return;
print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
}
assert(!has_vbox_nodes(), "sanity");
@@ -2284,6 +2301,8 @@ void Compile::Optimize() {
// safepoints
remove_root_to_sfpts_edges(igvn);
+ if (failing()) return;
+
// Perform escape analysis
if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
if (has_loops()) {
@@ -2393,6 +2412,8 @@ void Compile::Optimize() {
process_for_post_loop_opts_igvn(igvn);
+ if (failing()) return;
+
#ifdef ASSERT
bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
#endif
@@ -2431,6 +2452,7 @@ void Compile::Optimize() {
// More opportunities to optimize virtual and MH calls.
// Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
process_late_inline_calls_no_inline(igvn);
+ if (failing()) return;
}
} // (End scope of igvn; run destructor if necessary for asserts.)
@@ -4907,6 +4929,7 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
igvn.remove_speculative_types();
if (modified > 0) {
igvn.optimize();
+ if (failing()) return;
}
#ifdef ASSERT
// Verify that after the IGVN is over no speculative type has resurfaced
@@ -4977,8 +5000,8 @@ bool Compile::randomized_select(int count) {
CloneMap& Compile::clone_map() { return _clone_map; }
void Compile::set_clone_map(Dict* d) { _clone_map._dict = d; }
-void NodeCloneInfo::dump() const {
- tty->print(" {%d:%d} ", idx(), gen());
+void NodeCloneInfo::dump_on(outputStream* st) const {
+ st->print(" {%d:%d} ", idx(), gen());
}
void CloneMap::clone(Node* old, Node* nnn, int gen) {
@@ -5025,11 +5048,11 @@ int CloneMap::max_gen() const {
return g;
}
-void CloneMap::dump(node_idx_t key) const {
+void CloneMap::dump(node_idx_t key, outputStream* st) const {
uint64_t val = value(key);
if (val != 0) {
NodeCloneInfo ni(val);
- ni.dump();
+ ni.dump_on(st);
}
}
diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp
index 52d1ba7e08b16..e5b881065ac7c 100644
--- a/src/hotspot/share/opto/compile.hpp
+++ b/src/hotspot/share/opto/compile.hpp
@@ -135,7 +135,7 @@ class NodeCloneInfo {
NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
- void dump() const;
+ void dump_on(outputStream* st) const;
};
class CloneMap {
@@ -158,7 +158,7 @@ class CloneMap {
int max_gen() const;
void clone(Node* old, Node* nnn, int gen);
void verify_insert_and_clone(Node* old, Node* nnn, int gen);
- void dump(node_idx_t key) const;
+ void dump(node_idx_t key, outputStream* st) const;
int clone_idx() const { return _clone_idx; }
void set_clone_idx(int x) { _clone_idx = x; }
@@ -1032,6 +1032,7 @@ class Compile : public Phase {
bool inline_incrementally_one();
void inline_incrementally_cleanup(PhaseIterGVN& igvn);
void inline_incrementally(PhaseIterGVN& igvn);
+ bool should_delay_inlining() { return AlwaysIncrementalInline || (StressIncrementalInlining && (random() % 2) == 0); }
void inline_string_calls(bool parse_time);
void inline_boxing_calls(PhaseIterGVN& igvn);
bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp
index 2b78d7b3b244c..f7fa5e9d8cc95 100644
--- a/src/hotspot/share/opto/doCall.cpp
+++ b/src/hotspot/share/opto/doCall.cpp
@@ -172,7 +172,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Try inlining a bytecoded method:
if (!call_does_dispatch) {
InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
- bool should_delay = AlwaysIncrementalInline;
+ bool should_delay = C->should_delay_inlining();
if (ilt->ok_to_inline(callee, jvms, profile, should_delay)) {
CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
// For optimized virtual calls assert at runtime that receiver object
@@ -191,14 +191,14 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Delay the inlining of this method to give us the
// opportunity to perform some high level optimizations
// first.
- if (should_delay_string_inlining(callee, jvms)) {
+ if (should_delay) {
+ return CallGenerator::for_late_inline(callee, cg);
+ } else if (should_delay_string_inlining(callee, jvms)) {
return CallGenerator::for_string_late_inline(callee, cg);
} else if (should_delay_boxing_inlining(callee, jvms)) {
return CallGenerator::for_boxing_late_inline(callee, cg);
} else if (should_delay_vector_reboxing_inlining(callee, jvms)) {
return CallGenerator::for_vector_reboxing_late_inline(callee, cg);
- } else if (should_delay) {
- return CallGenerator::for_late_inline(callee, cg);
} else {
return cg;
}
@@ -983,6 +983,8 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
if (PrintOpto && WizardMode) {
tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
}
+ // If this is a backwards branch in the bytecodes, add safepoint
+ maybe_add_safepoint(handler_bci);
merge_exception(handler_bci); // jump to handler
return; // No more handling to be done here!
}
@@ -1014,6 +1016,8 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
klass->print_name();
tty->cr();
}
+ // If this is a backwards branch in the bytecodes, add safepoint
+ maybe_add_safepoint(handler_bci);
merge_exception(handler_bci);
}
set_control(not_subtype_ctrl);
diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp
index 30dcf22105989..1c60de9b9d870 100644
--- a/src/hotspot/share/opto/graphKit.cpp
+++ b/src/hotspot/share/opto/graphKit.cpp
@@ -3728,7 +3728,10 @@ Node* GraphKit::new_instance(Node* klass_node,
//-------------------------------new_array-------------------------------------
// helper for both newarray and anewarray
// The 'length' parameter is (obviously) the length of the array.
-// See comments on new_instance for the meaning of the other arguments.
+// The optional arguments are for specialized use by intrinsics:
+// - If 'return_size_val', report the non-padded array size (sum of header size
+// and array body) to the caller.
+// - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node* length, // number of array elements
int nargs, // number of arguments to push back for uncommon trap
@@ -3779,25 +3782,21 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
// The rounding mask is strength-reduced, if possible.
int round_mask = MinObjAlignmentInBytes - 1;
Node* header_size = nullptr;
- int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
// (T_BYTE has the weakest alignment and size restrictions...)
if (layout_is_con) {
int hsize = Klass::layout_helper_header_size(layout_con);
int eshift = Klass::layout_helper_log2_element_size(layout_con);
- BasicType etype = Klass::layout_helper_element_type(layout_con);
if ((round_mask & ~right_n_bits(eshift)) == 0)
round_mask = 0; // strength-reduce it if it goes away completely
assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
+ int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
assert(header_size_min <= hsize, "generic minimum is smallest");
- header_size_min = hsize;
- header_size = intcon(hsize + round_mask);
+ header_size = intcon(hsize);
} else {
Node* hss = intcon(Klass::_lh_header_size_shift);
Node* hsm = intcon(Klass::_lh_header_size_mask);
- Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );
- hsize = _gvn.transform( new AndINode(hsize, hsm) );
- Node* mask = intcon(round_mask);
- header_size = _gvn.transform( new AddINode(hsize, mask) );
+ header_size = _gvn.transform(new URShiftINode(layout_val, hss));
+ header_size = _gvn.transform(new AndINode(header_size, hsm));
}
Node* elem_shift = nullptr;
@@ -3849,25 +3848,30 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
}
#endif
- // Combine header size (plus rounding) and body size. Then round down.
- // This computation cannot overflow, because it is used only in two
- // places, one where the length is sharply limited, and the other
- // after a successful allocation.
+ // Combine header size and body size for the array copy part, then align (if
+ // necessary) for the allocation part. This computation cannot overflow,
+ // because it is used only in two places, one where the length is sharply
+ // limited, and the other after a successful allocation.
Node* abody = lengthx;
- if (elem_shift != nullptr)
- abody = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
- Node* size = _gvn.transform( new AddXNode(headerx, abody) );
- if (round_mask != 0) {
- Node* mask = MakeConX(~round_mask);
- size = _gvn.transform( new AndXNode(size, mask) );
+ if (elem_shift != nullptr) {
+ abody = _gvn.transform(new LShiftXNode(lengthx, elem_shift));
}
- // else if round_mask == 0, the size computation is self-rounding
+ Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
if (return_size_val != nullptr) {
// This is the size
- (*return_size_val) = size;
+ (*return_size_val) = non_rounded_size;
}
+ Node* size = non_rounded_size;
+ if (round_mask != 0) {
+ Node* mask1 = MakeConX(round_mask);
+ size = _gvn.transform(new AddXNode(size, mask1));
+ Node* mask2 = MakeConX(~round_mask);
+ size = _gvn.transform(new AndXNode(size, mask2));
+ }
+ // else if round_mask == 0, the size computation is self-rounding
+
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp
index b711a5780cf92..92180371c067b 100644
--- a/src/hotspot/share/opto/graphKit.hpp
+++ b/src/hotspot/share/opto/graphKit.hpp
@@ -82,7 +82,8 @@ class GraphKit : public Phase {
#ifdef ASSERT
~GraphKit() {
- assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
+ assert(failing() || !has_exceptions(),
+ "unless compilation failed, user must call transfer_exceptions_into_jvms");
}
#endif
diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp
index ef0c720835631..21b82a7b2c3ca 100644
--- a/src/hotspot/share/opto/ifnode.cpp
+++ b/src/hotspot/share/opto/ifnode.cpp
@@ -1833,6 +1833,46 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// then we are guaranteed to fail, so just start interpreting there.
// We 'expand' the top 3 range checks to include all post-dominating
// checks.
+ //
+ // Example:
+ // a[i+x] // (1) 1 < x < 6
+ // a[i+3] // (2)
+ // a[i+4] // (3)
+ // a[i+6] // max = max of all constants
+ // a[i+2]
+ // a[i+1] // min = min of all constants
+ //
+ // If x < 3:
+ // (1) a[i+x]: Leave unchanged
+ // (2) a[i+3]: Replace with a[i+max] = a[i+6]: i+x < i+3 <= i+6 -> (2) is covered
+ // (3) a[i+4]: Replace with a[i+min] = a[i+1]: i+1 < i+4 <= i+6 -> (3) and all following checks are covered
+ // Remove all other a[i+c] checks
+ //
+ // If x >= 3:
+ // (1) a[i+x]: Leave unchanged
+ // (2) a[i+3]: Replace with a[i+min] = a[i+1]: i+1 < i+3 <= i+x -> (2) is covered
+ // (3) a[i+4]: Replace with a[i+max] = a[i+6]: i+1 < i+4 <= i+6 -> (3) and all following checks are covered
+ // Remove all other a[i+c] checks
+ //
+ // We only need the top 2 range checks if x is the min or max of all constants.
+ //
+ // This, however, only works if the interval [i+min,i+max] is not larger than max_int (i.e. abs(max - min) < max_int):
+ // The theoretical max size of an array is max_int with:
+ // - Valid index space: [0,max_int-1]
+ // - Invalid index space: [max_int,-1] // max_int, min_int, min_int - 1 ..., -1
+ //
+ // The size of the consecutive valid index space is smaller than the size of the consecutive invalid index space.
+ // If we choose min and max in such a way that:
+ // - abs(max - min) < max_int
+ // - i+max and i+min are inside the valid index space
+ // then all indices [i+min,i+max] must be in the valid index space. Otherwise, the invalid index space must be
+ // smaller than the valid index space which is never the case for any array size.
+ //
+ // Choosing a smaller array size only makes the valid index space smaller and the invalid index space larger and
+ // the argument above still holds.
+ //
+ // Note that the same optimization with the same maximal accepted interval size can also be found in C1.
+ const jlong maximum_number_of_min_max_interval_indices = (jlong)max_jint;
// The top 3 range checks seen
const int NRC = 3;
@@ -1867,13 +1907,18 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
found_immediate_dominator = true;
break;
}
- // Gather expanded bounds
- off_lo = MIN2(off_lo,offset2);
- off_hi = MAX2(off_hi,offset2);
- // Record top NRC range checks
- prev_checks[nb_checks%NRC].ctl = prev_dom;
- prev_checks[nb_checks%NRC].off = offset2;
- nb_checks++;
+
+ // "x - y" -> must add one to the difference for number of elements in [x,y]
+ const jlong diff = (jlong)MIN2(offset2, off_lo) - (jlong)MAX2(offset2, off_hi);
+ if (ABS(diff) < maximum_number_of_min_max_interval_indices) {
+ // Gather expanded bounds
+ off_lo = MIN2(off_lo, offset2);
+ off_hi = MAX2(off_hi, offset2);
+ // Record top NRC range checks
+ prev_checks[nb_checks % NRC].ctl = prev_dom;
+ prev_checks[nb_checks % NRC].off = offset2;
+ nb_checks++;
+ }
}
}
prev_dom = dom;
diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp
index c995a1c6502da..af2ea60959578 100644
--- a/src/hotspot/share/opto/library_call.cpp
+++ b/src/hotspot/share/opto/library_call.cpp
@@ -1108,6 +1108,7 @@ bool LibraryCallKit::inline_countPositives() {
Node* ba_start = array_element_address(ba, offset, T_BYTE);
Node* result = new CountPositivesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
set_result(_gvn.transform(result));
+ clear_upper_avx();
return true;
}
@@ -1362,6 +1363,7 @@ bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) {
set_control(_gvn.transform(region));
record_for_igvn(region);
set_result(_gvn.transform(phi));
+ clear_upper_avx();
return true;
}
@@ -2864,6 +2866,7 @@ bool LibraryCallKit::inline_native_notify_jvmti_funcs(address funcAddr, const ch
if (!DoJVMTIVirtualThreadTransitions) {
return true;
}
+ Node* vt_oop = _gvn.transform(must_be_not_null(argument(0), true)); // VirtualThread this argument
IdealKit ideal(this);
Node* ONE = ideal.ConI(1);
@@ -2872,16 +2875,13 @@ bool LibraryCallKit::inline_native_notify_jvmti_funcs(address funcAddr, const ch
Node* notify_jvmti_enabled = ideal.load(ideal.ctrl(), addr, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
ideal.if_then(notify_jvmti_enabled, BoolTest::eq, ONE); {
+ sync_kit(ideal);
// if notifyJvmti enabled then make a call to the given SharedRuntime function
const TypeFunc* tf = OptoRuntime::notify_jvmti_vthread_Type();
- Node* vt_oop = _gvn.transform(must_be_not_null(argument(0), true)); // VirtualThread this argument
-
- sync_kit(ideal);
make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, hide);
ideal.sync_kit(this);
} ideal.else_(); {
// set hide value to the VTMS transition bit in current JavaThread and VirtualThread object
- Node* vt_oop = _gvn.transform(argument(0)); // this argument - VirtualThread oop
Node* thread = ideal.thread();
Node* jt_addr = basic_plus_adr(thread, in_bytes(JavaThread::is_in_VTMS_transition_offset()));
Node* vt_addr = basic_plus_adr(vt_oop, java_lang_Thread::is_in_VTMS_transition_offset());
@@ -3586,12 +3586,19 @@ bool LibraryCallKit::inline_native_setCurrentThread() {
return true;
}
-Node* LibraryCallKit::scopedValueCache_helper() {
- ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
- const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
+const Type* LibraryCallKit::scopedValueCache_type() {
+ ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
+ const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
+ const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
+ // Because we create the scopedValue cache lazily we have to make the
+ // type of the result BotPTR.
bool xk = etype->klass_is_exact();
+ const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
+ return objects_type;
+}
+Node* LibraryCallKit::scopedValueCache_helper() {
Node* thread = _gvn.transform(new ThreadLocalNode());
Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
// We cannot use immutable_memory() because we might flip onto a
@@ -3604,15 +3611,8 @@ Node* LibraryCallKit::scopedValueCache_helper() {
//------------------------inline_native_scopedValueCache------------------
bool LibraryCallKit::inline_native_scopedValueCache() {
- ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
- const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
- const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
-
- // Because we create the scopedValue cache lazily we have to make the
- // type of the result BotPTR.
- bool xk = etype->klass_is_exact();
- const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
Node* cache_obj_handle = scopedValueCache_helper();
+ const Type* objects_type = scopedValueCache_type();
set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
return true;
@@ -3622,9 +3622,10 @@ bool LibraryCallKit::inline_native_scopedValueCache() {
bool LibraryCallKit::inline_native_setScopedValueCache() {
Node* arr = argument(0);
Node* cache_obj_handle = scopedValueCache_helper();
+ const Type* objects_type = scopedValueCache_type();
const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
- access_store_at(nullptr, cache_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
+ access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
return true;
}
@@ -4993,8 +4994,8 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
PreserveJVMState pjvms(this);
set_control(array_ctl);
Node* obj_length = load_array_length(obj);
- Node* obj_size = nullptr;
- Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
+ Node* array_size = nullptr; // Size of the array without object alignment padding.
+ Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
@@ -5027,7 +5028,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
// the object.)
if (!stopped()) {
- copy_to_clone(obj, alloc_obj, obj_size, true);
+ copy_to_clone(obj, alloc_obj, array_size, true);
// Present the results of the copy.
result_reg->init_req(_array_path, control());
@@ -5067,7 +5068,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
if (!stopped()) {
// It's an instance, and it passed the slow-path tests.
PreserveJVMState pjvms(this);
- Node* obj_size = nullptr;
+ Node* obj_size = nullptr; // Total object size, including object alignment padding.
// Need to deoptimize on exception from allocation since Object.clone intrinsic
// is reexecuted if deoptimization occurs and there could be problems when merging
// exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
diff --git a/src/hotspot/share/opto/library_call.hpp b/src/hotspot/share/opto/library_call.hpp
index d4d6e50b2a576..f714625a4df47 100644
--- a/src/hotspot/share/opto/library_call.hpp
+++ b/src/hotspot/share/opto/library_call.hpp
@@ -237,6 +237,7 @@ class LibraryCallKit : public GraphKit {
bool inline_native_setCurrentThread();
bool inline_native_scopedValueCache();
+ const Type* scopedValueCache_type();
Node* scopedValueCache_helper();
bool inline_native_setScopedValueCache();
diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp
index ab0a7eb1089e3..9ae24cb205556 100644
--- a/src/hotspot/share/opto/loopPredicate.cpp
+++ b/src/hotspot/share/opto/loopPredicate.cpp
@@ -1035,9 +1035,10 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree* loop, Node* ctrl, int scal
// Check if (scale * max_idx_expr) may overflow
const TypeInt* scale_type = TypeInt::make(scale);
MulINode* mul = new MulINode(max_idx_expr, con_scale);
- idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type);
- if (overflow || TypeInt::INT->higher_equal(idx_type)) {
+
+ if (overflow || MulINode::does_overflow(idx_type, scale_type)) {
// May overflow
+ idx_type = TypeInt::INT;
mul->destruct(&_igvn);
if (!overflow) {
max_idx_expr = new ConvI2LNode(max_idx_expr);
@@ -1050,6 +1051,7 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree* loop, Node* ctrl, int scal
} else {
// No overflow possible
max_idx_expr = mul;
+ idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type);
}
register_new_node(max_idx_expr, ctrl);
}
diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp
index 7c3f5841f3f51..bc1140d4673e6 100644
--- a/src/hotspot/share/opto/loopTransform.cpp
+++ b/src/hotspot/share/opto/loopTransform.cpp
@@ -1965,6 +1965,9 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree* loop, Node_List& old_new,
post_head->set_normal_loop();
post_head->set_post_loop(main_head);
+ // clone_loop() above changes the exit projection
+ main_exit = outer_main_end->proj_out(false);
+
// Reduce the post-loop trip count.
CountedLoopEndNode* post_end = old_new[main_end->_idx]->as_CountedLoopEnd();
post_end->_prob = PROB_FAIR;
@@ -3003,6 +3006,8 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
continue; // Don't rce this check but continue looking for other candidates.
}
+ assert(is_dominator(compute_early_ctrl(limit, limit_c), pre_end), "node pinned on loop exit test?");
+
// Check for scaled induction variable plus an offset
Node *offset = nullptr;
@@ -3021,6 +3026,8 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
if (is_dominator(ctrl, offset_c)) {
continue; // Don't rce this check but continue looking for other candidates.
}
+
+ assert(is_dominator(compute_early_ctrl(offset, offset_c), pre_end), "node pinned on loop exit test?");
#ifdef ASSERT
if (TraceRangeLimitCheck) {
tty->print_cr("RC bool node%s", flip ? " flipped:" : ":");
@@ -3703,7 +3710,7 @@ void IdealLoopTree::enqueue_data_nodes(PhaseIdealLoop* phase, Unique_Node_List&
void IdealLoopTree::collect_loop_core_nodes(PhaseIdealLoop* phase, Unique_Node_List& wq) const {
uint before = wq.size();
wq.push(_head->in(LoopNode::LoopBackControl));
- for (uint i = 0; i < wq.size(); ++i) {
+ for (uint i = before; i < wq.size(); ++i) {
Node* n = wq.at(i);
for (uint j = 0; j < n->req(); ++j) {
Node* in = n->in(j);
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index f84c4100d8eb3..e102d9ac9e9b7 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -493,19 +493,19 @@ PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealL
return phi;
}
-static int check_stride_overflow(jlong stride_con, const TypeInteger* limit_t, BasicType bt) {
- if (stride_con > 0) {
- if (limit_t->lo_as_long() > (max_signed_integer(bt) - stride_con)) {
+static int check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt) {
+ if (final_correction > 0) {
+ if (limit_t->lo_as_long() > (max_signed_integer(bt) - final_correction)) {
return -1;
}
- if (limit_t->hi_as_long() > (max_signed_integer(bt) - stride_con)) {
+ if (limit_t->hi_as_long() > (max_signed_integer(bt) - final_correction)) {
return 1;
}
} else {
- if (limit_t->hi_as_long() < (min_signed_integer(bt) - stride_con)) {
+ if (limit_t->hi_as_long() < (min_signed_integer(bt) - final_correction)) {
return -1;
}
- if (limit_t->lo_as_long() < (min_signed_integer(bt) - stride_con)) {
+ if (limit_t->lo_as_long() < (min_signed_integer(bt) - final_correction)) {
return 1;
}
}
@@ -767,7 +767,7 @@ SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, Ideal
// // inner_incr := AddI(inner_phi, intcon(stride))
// inner_incr = inner_phi + stride;
// if (inner_incr < inner_iters_actual) {
-// ... use phi=>(outer_phi+inner_phi) and incr=>(outer_phi+inner_incr) ...
+// ... use phi=>(outer_phi+inner_phi) ...
// continue;
// }
// else break;
@@ -977,10 +977,6 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
// loop iv phi
Node* iv_add = loop_nest_replace_iv(phi, inner_phi, outer_phi, head, bt);
- // Replace inner loop long iv incr with inner loop int incr + outer
- // loop iv phi
- loop_nest_replace_iv(incr, inner_incr, outer_phi, head, bt);
-
set_subtree_ctrl(inner_iters_actual_int, body_populated);
LoopNode* inner_head = create_inner_head(loop, head, exit_test);
@@ -1029,7 +1025,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
// back_control: fallthrough;
// else
// inner_exit_branch: break; //exit_branch->clone()
- // ... use phi=>(outer_phi+inner_phi) and incr=>(outer_phi+inner_incr) ...
+ // ... use phi=>(outer_phi+inner_phi) ...
// inner_phi = inner_phi + stride; // inner_incr
// }
// outer_exit_test: //exit_test->clone(), in(0):=inner_exit_branch
@@ -1749,50 +1745,205 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
C->print_method(PHASE_BEFORE_CLOOPS, 3);
// ===================================================
- // Generate loop limit check to avoid integer overflow
- // in cases like next (cyclic loops):
+ // We can only convert this loop to a counted loop if we can guarantee that the iv phi will never overflow at runtime.
+ // This is an implicit assumption taken by some loop optimizations. We therefore must ensure this property at all cost.
+ // At this point, we've already excluded some trivial cases where an overflow could have been proven statically.
+ // But even though we cannot prove that an overflow will *not* happen, we still want to speculatively convert this loop
+ // to a counted loop. This can be achieved by adding additional iv phi overflow checks before the loop. If they fail,
+ // we trap and resume execution before the loop without having executed any iteration of the loop, yet.
+ //
+ // These additional iv phi overflow checks can be inserted as Loop Limit Check Predicates above the Loop Limit Check
+ // Parse Predicate which captures a JVM state just before the entry of the loop. If there is no such Parse Predicate,
+ // we cannot generate a Loop Limit Check Predicate and thus cannot speculatively convert the loop to a counted loop.
+ //
+ // In the following, we only focus on int loops with stride > 0 to keep things simple. The argumentation and proof
+ // for stride < 0 is analogously. For long loops, we would replace max_int with max_long.
+ //
+ //
+ // The loop to be converted does not always need to have the often used shape:
+ //
+ // i = init
+ // i = init loop:
+ // do { ...
+ // // ... equivalent i+=stride
+ // i+=stride <==> if (i < limit)
+ // } while (i < limit); goto loop
+ // exit:
+ // ...
+ //
+ // where the loop exit check uses the post-incremented iv phi and a '<'-operator.
+ //
+ // We could also have '<='-operator (or '>='-operator for negative strides) or use the pre-incremented iv phi value
+ // in the loop exit check:
+ //
+ // i = init
+ // loop:
+ // ...
+ // if (i <= limit)
+ // i+=stride
+ // goto loop
+ // exit:
+ // ...
+ //
+ // Let's define the following terms:
+ // - iv_pre_i: The pre-incremented iv phi before the i-th iteration.
+ // - iv_post_i: The post-incremented iv phi after the i-th iteration.
+ //
+ // The iv_pre_i and iv_post_i have the following relation:
+ // iv_pre_i + stride = iv_post_i
+ //
+ // When converting a loop to a counted loop, we want to have a canonicalized loop exit check of the form:
+ // iv_post_i < adjusted_limit
+ //
+ // If that is not the case, we need to canonicalize the loop exit check by using different values for adjusted_limit:
+ // (LE1) iv_post_i < limit: Already canonicalized. We can directly use limit as adjusted_limit.
+ // -> adjusted_limit = limit.
+ // (LE2) iv_post_i <= limit:
+ // iv_post_i < limit + 1
+ // -> adjusted limit = limit + 1
+ // (LE3) iv_pre_i < limit:
+ // iv_pre_i + stride < limit + stride
+ // iv_post_i < limit + stride
+ // -> adjusted_limit = limit + stride
+ // (LE4) iv_pre_i <= limit:
+ // iv_pre_i < limit + 1
+ // iv_pre_i + stride < limit + stride + 1
+ // iv_post_i < limit + stride + 1
+ // -> adjusted_limit = limit + stride + 1
+ //
+ // Note that:
+ // (AL) limit <= adjusted_limit.
+ //
+ // The following loop invariant has to hold for counted loops with n iterations (i.e. loop exit check true after n-th
+ // loop iteration) and a canonicalized loop exit check to guarantee that no iv_post_i over- or underflows:
+ // (INV) For i = 1..n, min_int <= iv_post_i <= max_int
+ //
+ // To prove (INV), we require the following two conditions/assumptions:
+ // (i): adjusted_limit - 1 + stride <= max_int
+ // (ii): init < limit
+ //
+ // If we can prove (INV), we know that there can be no over- or underflow of any iv phi value. We prove (INV) by
+ // induction by assuming (i) and (ii).
+ //
+ // Proof by Induction
+ // ------------------
+ // > Base case (i = 1): We show that (INV) holds after the first iteration:
+ // min_int <= iv_post_1 = init + stride <= max_int
+ // Proof:
+ // First, we note that (ii) implies
+ // (iii) init <= limit - 1
+ // max_int >= adjusted_limit - 1 + stride [using (i)]
+ // >= limit - 1 + stride [using (AL)]
+ // >= init + stride [using (iii)]
+ // >= min_int [using stride > 0, no underflow]
+ // Thus, no overflow happens after the first iteration and (INV) holds for i = 1.
+ //
+ // Note that to prove the base case we need (i) and (ii).
+ //
+ // > Induction Hypothesis (i = j, j > 1): Assume that (INV) holds after the j-th iteration:
+ // min_int <= iv_post_j <= max_int
+ // > Step case (i = j + 1): We show that (INV) also holds after the j+1-th iteration:
+ // min_int <= iv_post_{j+1} = iv_post_j + stride <= max_int
+ // Proof:
+ // If iv_post_j >= adjusted_limit:
+ // We exit the loop after the j-th iteration, and we don't execute the j+1-th iteration anymore. Thus, there is
+ // also no iv_{j+1}. Since (INV) holds for iv_j, there is nothing left to prove.
+ // If iv_post_j < adjusted_limit:
+ // First, we note that:
+ // (iv) iv_post_j <= adjusted_limit - 1
+ // max_int >= adjusted_limit - 1 + stride [using (i)]
+ // >= iv_post_j + stride [using (iv)]
+ // >= min_int [using stride > 0, no underflow]
//
- // for (i=0; i <= max_jint; i++) {}
- // for (i=0; i < max_jint; i+=2) {}
+ // Note that to prove the step case we only need (i).
//
+ // Thus, by assuming (i) and (ii), we proved (INV).
//
- // Limit check predicate depends on the loop test:
//
- // for(;i != limit; i++) --> limit <= (max_jint)
- // for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1)
- // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride )
+ // It is therefore enough to add the following two Loop Limit Check Predicates to check assumptions (i) and (ii):
//
+ // (1) Loop Limit Check Predicate for (i):
+ // Using (i): adjusted_limit - 1 + stride <= max_int
+ //
+ // This condition is now restated to use limit instead of adjusted_limit:
+ //
+ // To prevent an overflow of adjusted_limit -1 + stride itself, we rewrite this check to
+ // max_int - stride + 1 >= adjusted_limit
+ // We can merge the two constants into
+ // canonicalized_correction = stride - 1
+ // which gives us
+ // max_int - canonicalized_correction >= adjusted_limit
+ //
+ // To directly use limit instead of adjusted_limit in the predicate condition, we split adjusted_limit into:
+ // adjusted_limit = limit + limit_correction
+ // Since stride > 0 and limit_correction <= stride + 1, we can restate this with no over- or underflow into:
+ // max_int - canonicalized_correction - limit_correction >= limit
+ // Since canonicalized_correction and limit_correction are both constants, we can replace them with a new constant:
+ // final_correction = canonicalized_correction + limit_correction
+ // which gives us:
+ //
+ // Final predicate condition:
+ // max_int - final_correction >= limit
+ //
+ // (2) Loop Limit Check Predicate for (ii):
+ // Using (ii): init < limit
+ //
+ // This Loop Limit Check Predicate is not required if we can prove at compile time that either:
+ // (2.1) type(init) < type(limit)
+ // In this case, we know:
+ // all possible values of init < all possible values of limit
+ // and we can skip the predicate.
+ //
+ // (2.2) init < limit is already checked before (i.e. found as a dominating check)
+ // In this case, we do not need to re-check the condition and can skip the predicate.
+ // This is often found for while- and for-loops which have the following shape:
+ //
+ // if (init < limit) { // Dominating test. Do not need the Loop Limit Check Predicate below.
+ // i = init;
+ // if (init >= limit) { trap(); } // Here we would insert the Loop Limit Check Predicate
+ // do {
+ // i += stride;
+ // } while (i < limit);
+ // }
+ //
+ // (2.3) init + stride <= max_int
+ // In this case, there is no overflow of the iv phi after the first loop iteration.
+ // In the proof of the base case above we showed that init + stride <= max_int by using assumption (ii):
+ // init < limit
+ // In the proof of the step case above, we did not need (ii) anymore. Therefore, if we already know at
+ // compile time that init + stride <= max_int then we have trivially proven the base case and that
+ // there is no overflow of the iv phi after the first iteration. In this case, we don't need to check (ii)
+ // again and can skip the predicate.
- // Check if limit is excluded to do more precise int overflow check.
- bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge);
- jlong stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1));
- // If compare points directly to the phi we need to adjust
- // the compare so that it points to the incr. Limit have
- // to be adjusted to keep trip count the same and the
- // adjusted limit should be checked for int overflow.
- Node* adjusted_limit = limit;
- if (phi_incr != nullptr) {
- stride_m += stride_con;
- }
+ // Accounting for (LE3) and (LE4) where we use pre-incremented phis in the loop exit check.
+ const jlong limit_correction_for_pre_iv_exit_check = (phi_incr != nullptr) ? stride_con : 0;
- Node *init_control = x->in(LoopNode::EntryControl);
+ // Accounting for (LE2) and (LE4) where we use <= or >= in the loop exit check.
+ const bool includes_limit = (bt == BoolTest::le || bt == BoolTest::ge);
+ const jlong limit_correction_for_le_ge_exit_check = (includes_limit ? (stride_con > 0 ? 1 : -1) : 0);
+
+ const jlong limit_correction = limit_correction_for_pre_iv_exit_check + limit_correction_for_le_ge_exit_check;
+ const jlong canonicalized_correction = stride_con + (stride_con > 0 ? -1 : 1);
+ const jlong final_correction = canonicalized_correction + limit_correction;
+
+ int sov = check_stride_overflow(final_correction, limit_t, iv_bt);
+ Node* init_control = x->in(LoopNode::EntryControl);
- int sov = check_stride_overflow(stride_m, limit_t, iv_bt);
// If sov==0, limit's type always satisfies the condition, for
// example, when it is an array length.
if (sov != 0) {
if (sov < 0) {
return false; // Bailout: integer overflow is certain.
}
+ // (1) Loop Limit Check Predicate is required because we could not statically prove that
+ // limit + final_correction = adjusted_limit - 1 + stride <= max_int
assert(!x->as_Loop()->is_loop_nest_inner_loop(), "loop was transformed");
- // Generate loop's limit check.
- // Loop limit check predicate should be near the loop.
if (!ParsePredicates::is_loop_limit_check_predicate_proj(init_control)) {
- // The limit check predicate is not generated if this method trapped here before.
+ // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
#ifdef ASSERT
if (TraceLoopLimitCheck) {
- tty->print("missing loop limit check:");
+ tty->print("Missing Loop Limit Check Parse Predicate:");
loop->dump_head();
x->dump(1);
}
@@ -1811,65 +1962,79 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
Node* bol;
if (stride_con > 0) {
- cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - stride_m, iv_bt), iv_bt);
+ cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
bol = new BoolNode(cmp_limit, BoolTest::le);
} else {
- cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - stride_m, iv_bt), iv_bt);
+ cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
bol = new BoolNode(cmp_limit, BoolTest::ge);
}
insert_loop_limit_check_predicate(loop_limit_check_parse_predicate_proj, cmp_limit, bol);
}
- // Now we need to canonicalize loop condition.
- if (bt == BoolTest::ne) {
- assert(stride_con == 1 || stride_con == -1, "simple increment only");
- if (stride_con > 0 && init_t->hi_as_long() < limit_t->lo_as_long()) {
- // 'ne' can be replaced with 'lt' only when init < limit.
- bt = BoolTest::lt;
- } else if (stride_con < 0 && init_t->lo_as_long() > limit_t->hi_as_long()) {
- // 'ne' can be replaced with 'gt' only when init > limit.
- bt = BoolTest::gt;
- } else {
- if (!ParsePredicates::is_loop_limit_check_predicate_proj(init_control)) {
- // The limit check predicate is not generated if this method trapped here before.
+ // (2.3)
+ const bool init_plus_stride_could_overflow =
+ (stride_con > 0 && init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) ||
+ (stride_con < 0 && init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con);
+ // (2.1)
+ const bool init_gte_limit = (stride_con > 0 && init_t->hi_as_long() >= limit_t->lo_as_long()) ||
+ (stride_con < 0 && init_t->lo_as_long() <= limit_t->hi_as_long());
+
+ if (init_gte_limit && // (2.1)
+ ((bt == BoolTest::ne || init_plus_stride_could_overflow) && // (2.3)
+ !has_dominating_loop_limit_check(init_trip, limit, stride_con, iv_bt, init_control))) { // (2.2)
+ // (2) Iteration Loop Limit Check Predicate is required because neither (2.1), (2.2), nor (2.3) holds.
+ // We use the following condition:
+ // - stride > 0: init < limit
+ // - stride < 0: init > limit
+ //
+ // This predicate is always required if we have a non-equal-operator in the loop exit check (where stride = 1 is
+ // a requirement). We transform the loop exit check by using a less-than-operator. By doing so, we must always
+ // check that init < limit. Otherwise, we could have a different number of iterations at runtime.
+
+ if (!ParsePredicates::is_loop_limit_check_predicate_proj(init_control)) {
+ // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
#ifdef ASSERT
- if (TraceLoopLimitCheck) {
- tty->print("missing loop limit check:");
- loop->dump_head();
- x->dump(1);
- }
-#endif
- return false;
+ if (TraceLoopLimitCheck) {
+ tty->print("Missing Loop Limit Check Parse Predicate:");
+ loop->dump_head();
+ x->dump(1);
}
- ParsePredicateSuccessProj* loop_limit_check_parse_predicate_proj = init_control->as_IfTrue();
- ParsePredicateNode* parse_predicate = init_control->in(0)->as_ParsePredicate();
+#endif
+ return false;
+ }
+ ParsePredicateSuccessProj* loop_limit_check_parse_predicate_proj = init_control->as_IfTrue();
+ ParsePredicateNode* parse_predicate = init_control->in(0)->as_ParsePredicate();
- if (!is_dominator(get_ctrl(limit), parse_predicate->in(0)) ||
- !is_dominator(get_ctrl(init_trip), parse_predicate->in(0))) {
- return false;
- }
+ if (!is_dominator(get_ctrl(limit), parse_predicate->in(0)) ||
+ !is_dominator(get_ctrl(init_trip), parse_predicate->in(0))) {
+ return false;
+ }
- Node* cmp_limit;
- Node* bol;
+ Node* cmp_limit;
+ Node* bol;
- if (stride_con > 0) {
- cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
- bol = new BoolNode(cmp_limit, BoolTest::lt);
- } else {
- cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
- bol = new BoolNode(cmp_limit, BoolTest::gt);
- }
+ if (stride_con > 0) {
+ cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
+ bol = new BoolNode(cmp_limit, BoolTest::lt);
+ } else {
+ cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
+ bol = new BoolNode(cmp_limit, BoolTest::gt);
+ }
- insert_loop_limit_check_predicate(loop_limit_check_parse_predicate_proj, cmp_limit, bol);
+ insert_loop_limit_check_predicate(loop_limit_check_parse_predicate_proj, cmp_limit, bol);
+ }
- if (stride_con > 0) {
- // 'ne' can be replaced with 'lt' only when init < limit.
- bt = BoolTest::lt;
- } else if (stride_con < 0) {
- // 'ne' can be replaced with 'gt' only when init > limit.
- bt = BoolTest::gt;
- }
+ if (bt == BoolTest::ne) {
+ // Now we need to canonicalize the loop condition if it is 'ne'.
+ assert(stride_con == 1 || stride_con == -1, "simple increment only - checked before");
+ if (stride_con > 0) {
+ // 'ne' can be replaced with 'lt' only when init < limit. This is ensured by the inserted predicate above.
+ bt = BoolTest::lt;
+ } else {
+ assert(stride_con < 0, "must be");
+ // 'ne' can be replaced with 'gt' only when init > limit. This is ensured by the inserted predicate above.
+ bt = BoolTest::gt;
}
}
@@ -1914,6 +2079,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
}
#endif
+ Node* adjusted_limit = limit;
if (phi_incr != nullptr) {
// If compare points directly to the phi we need to adjust
// the compare so that it points to the incr. Limit have
@@ -1927,7 +2093,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
adjusted_limit = gvn->transform(AddNode::make(limit, stride, iv_bt));
}
- if (incl_limit) {
+ if (includes_limit) {
// The limit check guaranties that 'limit <= (max_jint - stride)' so
// we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
//
@@ -2108,6 +2274,37 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
return true;
}
+// Check if there is a dominating loop limit check of the form 'init < limit' starting at the loop entry.
+// If there is one, then we do not need to create an additional Loop Limit Check Predicate.
+bool PhaseIdealLoop::has_dominating_loop_limit_check(Node* init_trip, Node* limit, const jlong stride_con,
+ const BasicType iv_bt, Node* loop_entry) {
+ // Eagerly call transform() on the Cmp and Bool node to common them up if possible. This is required in order to
+ // successfully find a dominated test with the If node below.
+ Node* cmp_limit;
+ Node* bol;
+ if (stride_con > 0) {
+ cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
+ bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::lt));
+ } else {
+ cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
+ bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::gt));
+ }
+
+ // Check if there is already a dominating init < limit check. If so, we do not need a Loop Limit Check Predicate.
+ IfNode* iff = new IfNode(loop_entry, bol, PROB_MIN, COUNT_UNKNOWN);
+ // Also add fake IfProj nodes in order to call transform() on the newly created IfNode.
+ IfFalseNode* if_false = new IfFalseNode(iff);
+ IfTrueNode* if_true = new IfTrueNode(iff);
+ Node* dominated_iff = _igvn.transform(iff);
+ // ConI node? Found dominating test (IfNode::dominated_by() returns a ConI node).
+ const bool found_dominating_test = dominated_iff != nullptr && dominated_iff->is_ConI();
+
+ // Kill the If with its projections again in the next IGVN round by cutting it off from the graph.
+ _igvn.replace_input_of(iff, 0, C->top());
+ _igvn.replace_input_of(iff, 1, C->top());
+ return found_dominating_test;
+}
+
//----------------------exact_limit-------------------------------------------
Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
assert(loop->_head->is_CountedLoop(), "");
@@ -4448,6 +4645,7 @@ void PhaseIdealLoop::build_and_optimize() {
NOT_PRODUCT( C->verify_graph_edges(); )
worklist.push(C->top());
build_loop_late( visited, worklist, nstack );
+ if (C->failing()) { return; }
if (_verify_only) {
C->restore_major_progress(old_progress);
@@ -4702,6 +4900,7 @@ void PhaseIdealLoop::verify() const {
bool success = true;
PhaseIdealLoop phase_verify(_igvn, this);
+ if (C->failing()) return;
// Verify ctrl and idom of every node.
success &= verify_idom_and_nodes(C->root(), &phase_verify);
@@ -5954,6 +6153,7 @@ void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, N
} else {
// All of n's children have been processed, complete post-processing.
build_loop_late_post(n);
+ if (C->failing()) { return; }
if (nstack.is_empty()) {
// Finished all nodes on stack.
// Process next node on the worklist.
@@ -6100,13 +6300,15 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
Node *legal = LCA; // Walk 'legal' up the IDOM chain
Node *least = legal; // Best legal position so far
while( early != legal ) { // While not at earliest legal
-#ifdef ASSERT
if (legal->is_Start() && !early->is_Root()) {
+#ifdef ASSERT
// Bad graph. Print idom path and fail.
dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA);
assert(false, "Bad graph detected in build_loop_late");
- }
#endif
+ C->record_method_not_compilable("Bad graph detected in build_loop_late");
+ return;
+ }
// Find least loop nesting depth
legal = idom(legal); // Bump up the IDOM tree
// Check for lower nesting depth
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 70c403d30a5f8..6d44434d71ecc 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -1115,6 +1115,7 @@ class PhaseIdealLoop : public PhaseTransform {
// Compute the Ideal Node to Loop mapping
PhaseIdealLoop(PhaseIterGVN& igvn, LoopOptsMode mode) :
PhaseTransform(Ideal_Loop),
+ _loop_or_ctrl(igvn.C->comp_arena()),
_igvn(igvn),
_verify_me(nullptr),
_verify_only(false),
@@ -1129,6 +1130,7 @@ class PhaseIdealLoop : public PhaseTransform {
// or only verify that the graph is valid if verify_me is null.
PhaseIdealLoop(PhaseIterGVN& igvn, const PhaseIdealLoop* verify_me = nullptr) :
PhaseTransform(Ideal_Loop),
+ _loop_or_ctrl(igvn.C->comp_arena()),
_igvn(igvn),
_verify_me(verify_me),
_verify_only(verify_me == nullptr),
@@ -1206,7 +1208,7 @@ class PhaseIdealLoop : public PhaseTransform {
if (!C->failing()) {
// Cleanup any modified bits
igvn.optimize();
-
+ if (C->failing()) { return; }
v.log_loop_tree();
}
}
@@ -1361,6 +1363,8 @@ class PhaseIdealLoop : public PhaseTransform {
void rewire_cloned_nodes_to_ctrl(const ProjNode* old_ctrl, Node* new_ctrl, const Node_List& nodes_with_same_ctrl,
const Dict& old_new_mapping);
void rewire_inputs_of_clones_to_clones(Node* new_ctrl, Node* clone, const Dict& old_new_mapping, const Node* next);
+ bool has_dominating_loop_limit_check(Node* init_trip, Node* limit, jlong stride_con, BasicType iv_bt,
+ Node* loop_entry);
public:
void register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body = true);
@@ -1549,7 +1553,7 @@ class PhaseIdealLoop : public PhaseTransform {
Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
bool split_up( Node *n, Node *blk1, Node *blk2 );
- void sink_use( Node *use, Node *post_loop );
+
Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const;
Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
void try_move_store_after_loop(Node* n);
@@ -1737,6 +1741,8 @@ class PhaseIdealLoop : public PhaseTransform {
bool clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2);
bool at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2);
+
+ void update_addp_chain_base(Node* x, Node* old_base, Node* new_base);
};
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index e9cbdb224571a..be2ec7dbc0727 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -237,7 +237,11 @@ bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) co
return false;
}
- assert(n->in(0) == nullptr, "divisions with zero check should already have bailed out earlier in split-if");
+ if (n->in(0) != nullptr) {
+ // Cannot split through phi if Div or Mod node has a control dependency to a zero check.
+ return true;
+ }
+
Node* divisor = n->in(2);
return is_divisor_counted_loop_phi(divisor, region) &&
loop_phi_backedge_type_contains_zero(divisor, zero);
@@ -1642,7 +1646,7 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
// Find control for 'x' next to use but not inside inner loops.
x_ctrl = place_outside_loop(x_ctrl, n_loop);
// Replace all uses
- if (u->is_ConstraintCast() && u->bottom_type()->higher_equal(_igvn.type(n)) && u->in(0) == x_ctrl) {
+ if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) {
// If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary
// anymore now that we're going to pin n as well
_igvn.replace_node(u, x);
@@ -1677,9 +1681,10 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop");
register_new_node(x, x_ctrl);
- // Chain of AddP: (AddP base (AddP base )) must keep the same base after sinking so:
- // 1- We don't add a CastPP here when the first one is sunk so if the second one is not, their bases remain
- // the same.
+ // Chain of AddP nodes: (AddP base (AddP base (AddP base )))
+ // All AddP nodes must keep the same base after sinking so:
+ // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk,
+ // their bases remain the same.
// (see 2- below)
assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() ||
x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) ||
@@ -1693,21 +1698,22 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
Node* in = x->in(k);
if (in != nullptr && n_loop->is_member(get_loop(get_ctrl(in)))) {
const Type* in_t = _igvn.type(in);
- cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t, ConstraintCastNode::UnconditionalDependency);
+ cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
+ ConstraintCastNode::UnconditionalDependency, nullptr);
}
if (cast != nullptr) {
- register_new_node(cast, x_ctrl);
+ Node* prev = _igvn.hash_find_insert(cast);
+ if (prev != nullptr && get_ctrl(prev) == x_ctrl) {
+ cast->destruct(&_igvn);
+ cast = prev;
+ } else {
+ register_new_node(cast, x_ctrl);
+ }
x->replace_edge(in, cast);
- // Chain of AddP:
- // 2- A CastPP of the base is only added now that both AddP nodes are sunk
+ // Chain of AddP nodes:
+ // 2- A CastPP of the base is only added now that all AddP nodes are sunk
if (x->is_AddP() && k == AddPNode::Base) {
- for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
- Node* u = x->fast_out(i);
- if (u->is_AddP() && u->in(AddPNode::Base) == n->in(AddPNode::Base)) {
- _igvn.replace_input_of(u, AddPNode::Base, cast);
- assert(u->find_out_with(Op_AddP) == nullptr, "more than 2 chained AddP nodes?");
- }
- }
+ update_addp_chain_base(x, n->in(AddPNode::Base), cast);
}
break;
}
@@ -1722,6 +1728,22 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
}
}
+void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) {
+ ResourceMark rm;
+ Node_List wq;
+ wq.push(x);
+ while (wq.size() != 0) {
+ Node* n = wq.pop();
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node* u = n->fast_out(i);
+ if (u->is_AddP() && u->in(AddPNode::Base) == old_base) {
+ _igvn.replace_input_of(u, AddPNode::Base, new_base);
+ wq.push(u);
+ }
+ }
+ }
+}
+
// Compute the early control of a node by following its inputs until we reach
// nodes that are pinned. Then compute the LCA of the control of all pinned nodes.
Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) {
@@ -1793,6 +1815,14 @@ bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealL
if (n_loop->is_member(u_loop)) {
return false; // Found use in inner loop
}
+ // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input
+ // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit
+ // test of the pre loop above the point in the graph where it's pinned.
+ if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop() &&
+ u_loop->_head->is_CountedLoop() && u_loop->_head->as_CountedLoop()->is_main_loop() &&
+ n_loop->_next == get_loop(u_loop->_head->as_CountedLoop()->skip_strip_mined())) {
+ return false;
+ }
return true;
}
@@ -2005,17 +2035,6 @@ CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
return (CmpNode*)cmp;
}
-//------------------------------sink_use---------------------------------------
-// If 'use' was in the loop-exit block, it now needs to be sunk
-// below the post-loop merge point.
-void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
- if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) {
- set_ctrl(use, post_loop);
- for (DUIterator j = use->outs(); use->has_out(j); j++)
- sink_use(use->out(j), post_loop);
- }
-}
-
void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
IdealLoopTree* loop, IdealLoopTree* outer_loop,
Node_List*& split_if_set, Node_List*& split_bool_set,
@@ -2082,7 +2101,7 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
while( use->in(idx) != old ) idx++;
Node *prev = use->is_CFG() ? use : get_ctrl(use);
assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
- Node *cfg = prev->_idx >= new_counter
+ Node* cfg = (prev->_idx >= new_counter && prev->is_Region())
? prev->in(2)
: idom(prev);
if( use->is_Phi() ) // Phi use is in prior block
@@ -2106,7 +2125,7 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
while(!outer_loop->is_member(get_loop(cfg))) {
prev = cfg;
- cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
+ cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg);
}
// If the use occurs after merging several exits from the loop, then
// old value must have dominated all those exits. Since the same old
@@ -2164,10 +2183,6 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
if( hit ) // Go ahead and re-hash for hits.
_igvn.replace_node( use, hit );
}
-
- // If 'use' was in the loop-exit block, it now needs to be sunk
- // below the post-loop merge point.
- sink_use( use, prev );
}
}
}
@@ -2534,8 +2549,6 @@ void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* l
// We need a Region to merge the exit from the peeled body and the
// exit from the old loop body.
RegionNode *r = new RegionNode(3);
- // Map the old use to the new merge point
- old_new.map( use->_idx, r );
uint dd_r = MIN2(dom_depth(newuse), dom_depth(use));
assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" );
@@ -2571,12 +2584,24 @@ void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* l
l -= uses_found; // we deleted 1 or more copies of this edge
}
+ assert(use->is_Proj(), "loop exit should be projection");
+ // lazy_replace() below moves all nodes that are:
+ // - control dependent on the loop exit or
+ // - have control set to the loop exit
+ // below the post-loop merge point. lazy_replace() takes a dead control as first input. To make it
+ // possible to use it, the loop exit projection is cloned and becomes the new exit projection. The initial one
+ // becomes dead and is "replaced" by the region.
+ Node* use_clone = use->clone();
+ register_control(use_clone, use_loop, idom(use), dom_depth(use));
// Now finish up 'r'
r->set_req(1, newuse);
- r->set_req(2, use);
+ r->set_req(2, use_clone);
_igvn.register_new_node_with_optimizer(r);
set_loop(r, use_loop);
set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r);
+ lazy_replace(use, r);
+ // Map the (cloned) old use to the new merge point
+ old_new.map(use_clone->_idx, r);
} // End of if a loop-exit test
}
}
@@ -4216,6 +4241,12 @@ void PhaseIdealLoop::move_unordered_reduction_out_of_loop(IdealLoopTree* loop) {
break; // Chain traversal fails.
}
+ assert(current->vect_type() != nullptr, "must have vector type");
+ if (current->vect_type() != last_ur->vect_type()) {
+ // Reductions do not have the same vector type (length and element type).
+ break; // Chain traversal fails.
+ }
+
// Expect single use of UnorderedReduction, except for last_ur.
if (current == last_ur) {
// Expect all uses to be outside the loop, except phi.
diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp
index ec0b465adf668..51fad220e53c8 100644
--- a/src/hotspot/share/opto/matcher.cpp
+++ b/src/hotspot/share/opto/matcher.cpp
@@ -356,7 +356,9 @@ void Matcher::match( ) {
// Recursively match trees from old space into new space.
// Correct leaves of new-space Nodes; they point to old-space.
_visited.clear();
- C->set_cached_top_node(xform( C->top(), live_nodes ));
+ Node* const n = xform(C->top(), live_nodes);
+ if (C->failing()) return;
+ C->set_cached_top_node(n);
if (!C->failing()) {
Node* xroot = xform( C->root(), 1 );
if (xroot == nullptr) {
diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp
index 286f901bd5dab..76ed95c4a789b 100644
--- a/src/hotspot/share/opto/memnode.cpp
+++ b/src/hotspot/share/opto/memnode.cpp
@@ -593,8 +593,13 @@ Node* LoadNode::find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node
Node* dest = ac->in(ArrayCopyNode::Dest);
if (dest == ld_base) {
- const TypeX *ld_offs_t = phase->type(ld_offs)->isa_intptr_t();
- if (ac->modifies(ld_offs_t->_lo, ld_offs_t->_hi, phase, can_see_stored_value)) {
+ const TypeX* ld_offs_t = phase->type(ld_offs)->isa_intptr_t();
+ assert(!ld_offs_t->empty(), "dead reference should be checked already");
+ // Take into account vector or unsafe access size
+ jlong ld_size_in_bytes = (jlong)memory_size();
+ jlong offset_hi = ld_offs_t->_hi + ld_size_in_bytes - 1;
+ offset_hi = MIN2(offset_hi, (jlong)(TypeX::MAX->_hi)); // Take care for overflow in 32-bit VM
+ if (ac->modifies(ld_offs_t->_lo, (intptr_t)offset_hi, phase, can_see_stored_value)) {
return ac;
}
if (!can_see_stored_value) {
diff --git a/src/hotspot/share/opto/mulnode.cpp b/src/hotspot/share/opto/mulnode.cpp
index 0636bcd31dc1f..f42d06a365000 100644
--- a/src/hotspot/share/opto/mulnode.cpp
+++ b/src/hotspot/share/opto/mulnode.cpp
@@ -281,45 +281,86 @@ Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
return res; // Return final result
}
-// Classes to perform mul_ring() for MulI/MulLNode.
+// This template class performs type multiplication for MulI/MulLNode. NativeType is either jint or jlong.
+// In this class, the inputs of the MulNodes are named left and right with types [left_lo,left_hi] and [right_lo,right_hi].
//
-// This class checks if all cross products of the left and right input of a multiplication have the same "overflow value".
-// Without overflow/underflow:
-// Product is positive? High signed multiplication result: 0
-// Product is negative? High signed multiplication result: -1
+// In general, the multiplication of two x-bit values could produce a result that consumes up to 2x bits if there is
+// enough space to hold them all. We can therefore distinguish the following two cases for the product:
+// - no overflow (i.e. product fits into x bits)
+// - overflow (i.e. product does not fit into x bits)
//
-// We normalize these values (see normalize_overflow_value()) such that we get the same "overflow value" by adding 1 if
-// the product is negative. This allows us to compare all the cross product "overflow values". If one is different,
-// compared to the others, then we know that this multiplication has a different number of over- or underflows compared
-// to the others. In this case, we need to use bottom type and cannot guarantee a better type. Otherwise, we can take
-// the min und max of all computed cross products as type of this Mul node.
-template
-class IntegerMulRing {
- using NativeType = std::conditional_t::value, jint, jlong>;
+// When multiplying the two x-bit inputs 'left' and 'right' with their x-bit types [left_lo,left_hi] and [right_lo,right_hi]
+// we need to find the minimum and maximum of all possible products to define a new type. To do that, we compute the
+// cross product of [left_lo,left_hi] and [right_lo,right_hi] in 2x-bit space where no over- or underflow can happen.
+// The cross product consists of the following four multiplications with 2x-bit results:
+// (1) left_lo * right_lo
+// (2) left_lo * right_hi
+// (3) left_hi * right_lo
+// (4) left_hi * right_hi
+//
+// Let's define the following two functions:
+// - Lx(i): Returns the lower x bits of the 2x-bit number i.
+// - Ux(i): Returns the upper x bits of the 2x-bit number i.
+//
+// Let's first assume all products are positive where only overflows are possible but no underflows. If there is no
+// overflow for a product p, then the upper x bits of the 2x-bit result p are all zero:
+// Ux(p) = 0
+// Lx(p) = p
+//
+// If none of the multiplications (1)-(4) overflow, we can truncate the upper x bits and use the following result type
+// with x bits:
+// [result_lo,result_hi] = [MIN(Lx(1),Lx(2),Lx(3),Lx(4)),MAX(Lx(1),Lx(2),Lx(3),Lx(4))]
+//
+// If any of these multiplications overflows, we could pessimistically take the bottom type for the x bit result
+// (i.e. all values in the x-bit space could be possible):
+// [result_lo,result_hi] = [NativeType_min,NativeType_max]
+//
+// However, in case of any overflow, we can do better by analyzing the upper x bits of all multiplications (1)-(4) with
+// 2x-bit results. The upper x bits tell us something about how many times a multiplication has overflown the lower
+// x bits. If the upper x bits of (1)-(4) are all equal, then we know that all of these multiplications overflowed
+// the lower x bits the same number of times:
+// Ux((1)) = Ux((2)) = Ux((3)) = Ux((4))
+//
+// If all upper x bits are equal, we can conclude:
+// Lx(MIN((1),(2),(3),(4))) = MIN(Lx(1),Lx(2),Lx(3),Lx(4)))
+// Lx(MAX((1),(2),(3),(4))) = MAX(Lx(1),Lx(2),Lx(3),Lx(4)))
+//
+// Therefore, we can use the same precise x-bit result type as for the no-overflow case:
+// [result_lo,result_hi] = [(MIN(Lx(1),Lx(2),Lx(3),Lx(4))),MAX(Lx(1),Lx(2),Lx(3),Lx(4)))]
+//
+//
+// Now let's assume that (1)-(4) are signed multiplications where over- and underflow could occur:
+// Negative numbers are all sign extend with ones. Therefore, if a negative product does not underflow, then the
+// upper x bits of the 2x-bit result are all set to ones which is minus one in two's complement. If there is an underflow,
+// the upper x bits are decremented by the number of times an underflow occurred. The smallest possible negative product
+// is NativeType_min*NativeType_max, where the upper x bits are set to NativeType_min / 2 (b11...0). It is therefore
+// impossible to underflow the upper x bits. Thus, when having all ones (i.e. minus one) in the upper x bits, we know
+// that there is no underflow.
+//
+// To be able to compare the number of over-/underflows of positive and negative products, respectively, we normalize
+// the upper x bits of negative 2x-bit products by adding one. This way a product has no over- or underflow if the
+// normalized upper x bits are zero. Now we can use the same improved type as for strictly positive products because we
+// can compare the upper x bits in a unified way with N() being the normalization function:
+// N(Ux((1))) = N(Ux((2))) = N(Ux((3)) = N(Ux((4)))
+template
+class IntegerTypeMultiplication {
NativeType _lo_left;
NativeType _lo_right;
NativeType _hi_left;
NativeType _hi_right;
- NativeType _lo_lo_product;
- NativeType _lo_hi_product;
- NativeType _hi_lo_product;
- NativeType _hi_hi_product;
short _widen_left;
short _widen_right;
static const Type* overflow_type();
- static NativeType multiply_high_signed_overflow_value(NativeType x, NativeType y);
+ static NativeType multiply_high(NativeType x, NativeType y);
+ const Type* create_type(NativeType lo, NativeType hi) const;
- // Pre-compute cross products which are used at several places
- void compute_cross_products() {
- _lo_lo_product = java_multiply(_lo_left, _lo_right);
- _lo_hi_product = java_multiply(_lo_left, _hi_right);
- _hi_lo_product = java_multiply(_hi_left, _lo_right);
- _hi_hi_product = java_multiply(_hi_left, _hi_right);
+ static NativeType multiply_high_signed_overflow_value(NativeType x, NativeType y) {
+ return normalize_overflow_value(x, y, multiply_high(x, y));
}
- bool cross_products_not_same_overflow() const {
+ bool cross_product_not_same_overflow_value() const {
const NativeType lo_lo_high_product = multiply_high_signed_overflow_value(_lo_left, _lo_right);
const NativeType lo_hi_high_product = multiply_high_signed_overflow_value(_lo_left, _hi_right);
const NativeType hi_lo_high_product = multiply_high_signed_overflow_value(_hi_left, _lo_right);
@@ -329,66 +370,95 @@ class IntegerMulRing {
hi_lo_high_product != hi_hi_high_product;
}
+ bool does_product_overflow(NativeType x, NativeType y) const {
+ return multiply_high_signed_overflow_value(x, y) != 0;
+ }
+
static NativeType normalize_overflow_value(const NativeType x, const NativeType y, NativeType result) {
return java_multiply(x, y) < 0 ? result + 1 : result;
}
public:
- IntegerMulRing(const IntegerType* left, const IntegerType* right) : _lo_left(left->_lo), _lo_right(right->_lo),
- _hi_left(left->_hi), _hi_right(right->_hi), _widen_left(left->_widen), _widen_right(right->_widen) {
- compute_cross_products();
- }
+ template
+ IntegerTypeMultiplication(const IntegerType* left, const IntegerType* right)
+ : _lo_left(left->_lo), _lo_right(right->_lo),
+ _hi_left(left->_hi), _hi_right(right->_hi),
+ _widen_left(left->_widen), _widen_right(right->_widen) {}
// Compute the product type by multiplying the two input type ranges. We take the minimum and maximum of all possible
// values (requires 4 multiplications of all possible combinations of the two range boundary values). If any of these
// multiplications overflows/underflows, we need to make sure that they all have the same number of overflows/underflows
// If that is not the case, we return the bottom type to cover all values due to the inconsistent overflows/underflows).
const Type* compute() const {
- if (cross_products_not_same_overflow()) {
+ if (cross_product_not_same_overflow_value()) {
return overflow_type();
}
- const NativeType min = MIN4(_lo_lo_product, _lo_hi_product, _hi_lo_product, _hi_hi_product);
- const NativeType max = MAX4(_lo_lo_product, _lo_hi_product, _hi_lo_product, _hi_hi_product);
- return IntegerType::make(min, max, MAX2(_widen_left, _widen_right));
+
+ NativeType lo_lo_product = java_multiply(_lo_left, _lo_right);
+ NativeType lo_hi_product = java_multiply(_lo_left, _hi_right);
+ NativeType hi_lo_product = java_multiply(_hi_left, _lo_right);
+ NativeType hi_hi_product = java_multiply(_hi_left, _hi_right);
+ const NativeType min = MIN4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
+ const NativeType max = MAX4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
+ return create_type(min, max);
}
-};
+ bool does_overflow() const {
+ return does_product_overflow(_lo_left, _lo_right) ||
+ does_product_overflow(_lo_left, _hi_right) ||
+ does_product_overflow(_hi_left, _lo_right) ||
+ does_product_overflow(_hi_left, _hi_right);
+ }
+};
template <>
-const Type* IntegerMulRing::overflow_type() {
+const Type* IntegerTypeMultiplication::overflow_type() {
return TypeInt::INT;
}
template <>
-jint IntegerMulRing::multiply_high_signed_overflow_value(const jint x, const jint y) {
+jint IntegerTypeMultiplication::multiply_high(const jint x, const jint y) {
const jlong x_64 = x;
const jlong y_64 = y;
const jlong product = x_64 * y_64;
- const jint result = (jint)((uint64_t)product >> 32u);
- return normalize_overflow_value(x, y, result);
+ return (jint)((uint64_t)product >> 32u);
+}
+
+template <>
+const Type* IntegerTypeMultiplication::create_type(jint lo, jint hi) const {
+ return TypeInt::make(lo, hi, MAX2(_widen_left, _widen_right));
}
template <>
-const Type* IntegerMulRing::overflow_type() {
+const Type* IntegerTypeMultiplication::overflow_type() {
return TypeLong::LONG;
}
template <>
-jlong IntegerMulRing::multiply_high_signed_overflow_value(const jlong x, const jlong y) {
- const jlong result = multiply_high_signed(x, y);
- return normalize_overflow_value(x, y, result);
+jlong IntegerTypeMultiplication::multiply_high(const jlong x, const jlong y) {
+ return multiply_high_signed(x, y);
+}
+
+template <>
+const Type* IntegerTypeMultiplication::create_type(jlong lo, jlong hi) const {
+ return TypeLong::make(lo, hi, MAX2(_widen_left, _widen_right));
}
// Compute the product type of two integer ranges into this node.
const Type* MulINode::mul_ring(const Type* type_left, const Type* type_right) const {
- const IntegerMulRing integer_mul_ring(type_left->is_int(), type_right->is_int());
- return integer_mul_ring.compute();
+ const IntegerTypeMultiplication integer_multiplication(type_left->is_int(), type_right->is_int());
+ return integer_multiplication.compute();
+}
+
+bool MulINode::does_overflow(const TypeInt* type_left, const TypeInt* type_right) {
+ const IntegerTypeMultiplication integer_multiplication(type_left, type_right);
+ return integer_multiplication.does_overflow();
}
// Compute the product type of two long ranges into this node.
const Type* MulLNode::mul_ring(const Type* type_left, const Type* type_right) const {
- const IntegerMulRing integer_mul_ring(type_left->is_long(), type_right->is_long());
- return integer_mul_ring.compute();
+ const IntegerTypeMultiplication integer_multiplication(type_left->is_long(), type_right->is_long());
+ return integer_multiplication.compute();
}
//=============================================================================
diff --git a/src/hotspot/share/opto/mulnode.hpp b/src/hotspot/share/opto/mulnode.hpp
index 84307fb00fb6c..01d418a5b1041 100644
--- a/src/hotspot/share/opto/mulnode.hpp
+++ b/src/hotspot/share/opto/mulnode.hpp
@@ -95,6 +95,7 @@ class MulINode : public MulNode {
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *mul_ring( const Type *, const Type * ) const;
+ static bool does_overflow(const TypeInt* type_left, const TypeInt* type_right);
const Type *mul_id() const { return TypeInt::ONE; }
const Type *add_id() const { return TypeInt::ZERO; }
int add_opcode() const { return Op_AddI; }
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 62d6c4f690f1f..e09aaf2acb7fb 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -1759,8 +1759,8 @@ Node* Node::find(const int idx, bool only_ctrl) {
class PrintBFS {
public:
- PrintBFS(const Node* start, const int max_distance, const Node* target, const char* options)
- : _start(start), _max_distance(max_distance), _target(target), _options(options),
+ PrintBFS(const Node* start, const int max_distance, const Node* target, const char* options, outputStream* st)
+ : _start(start), _max_distance(max_distance), _target(target), _options(options), _output(st),
_dcc(this), _info_uid(cmpkey, hashkey) {}
void run();
@@ -1780,6 +1780,7 @@ class PrintBFS {
const int _max_distance;
const Node* _target;
const char* _options;
+ outputStream* _output;
// options
bool _traverse_inputs = false;
@@ -1819,7 +1820,7 @@ class PrintBFS {
bool _print_blocks = false;
bool _print_old = false;
bool _dump_only = false;
- static void print_options_help(bool print_examples);
+ void print_options_help(bool print_examples);
bool parse_options();
public:
@@ -1836,9 +1837,9 @@ class PrintBFS {
// node info
static Node* old_node(const Node* n); // mach node -> prior IR node
- static void print_node_idx(const Node* n); // to tty
- static void print_block_id(const Block* b); // to tty
- static void print_node_block(const Node* n); // to tty: _pre_order, head idx, _idom, _dom_depth
+ void print_node_idx(const Node* n);
+ void print_block_id(const Block* b);
+ void print_node_block(const Node* n); // _pre_order, head idx, _idom, _dom_depth
// traversal data structures
GrowableArray _worklist; // BFS queue
@@ -1903,7 +1904,7 @@ void PrintBFS::run() {
// set up configuration for BFS and print
bool PrintBFS::configure() {
if (_max_distance < 0) {
- tty->print("dump_bfs: max_distance must be non-negative!\n");
+ _output->print_cr("dump_bfs: max_distance must be non-negative!");
return false;
}
return parse_options();
@@ -1941,7 +1942,7 @@ void PrintBFS::select() {
select_all();
} else {
if (find_info(_target) == nullptr) {
- tty->print("Could not find target in BFS.\n");
+ _output->print_cr("Could not find target in BFS.");
return;
}
if (_all_paths) {
@@ -2037,96 +2038,96 @@ void PrintBFS::print() {
print_node(n);
}
} else {
- tty->print("No nodes to print.\n");
+ _output->print_cr("No nodes to print.");
}
}
void PrintBFS::print_options_help(bool print_examples) {
- tty->print("Usage: node->dump_bfs(int max_distance, Node* target, char* options)\n");
- tty->print("\n");
- tty->print("Use cases:\n");
- tty->print(" BFS traversal: no target required\n");
- tty->print(" shortest path: set target\n");
- tty->print(" all paths: set target and put 'A' in options\n");
- tty->print(" detect loop: subcase of all paths, have start==target\n");
- tty->print("\n");
- tty->print("Arguments:\n");
- tty->print(" this/start: staring point of BFS\n");
- tty->print(" target:\n");
- tty->print(" if null: simple BFS\n");
- tty->print(" else: shortest path or all paths between this/start and target\n");
- tty->print(" options:\n");
- tty->print(" if null: same as \"cdmox@B\"\n");
- tty->print(" else: use combination of following characters\n");
- tty->print(" h: display this help info\n");
- tty->print(" H: display this help info, with examples\n");
- tty->print(" +: traverse in-edges (on if neither + nor -)\n");
- tty->print(" -: traverse out-edges\n");
- tty->print(" c: visit control nodes\n");
- tty->print(" d: visit data nodes\n");
- tty->print(" m: visit memory nodes\n");
- tty->print(" o: visit other nodes\n");
- tty->print(" x: visit mixed nodes\n");
- tty->print(" C: boundary control nodes\n");
- tty->print(" D: boundary data nodes\n");
- tty->print(" M: boundary memory nodes\n");
- tty->print(" O: boundary other nodes\n");
- tty->print(" X: boundary mixed nodes\n");
- tty->print(" #: display node category in color (not supported in all terminals)\n");
- tty->print(" S: sort displayed nodes by node idx\n");
- tty->print(" A: all paths (not just shortest path to target)\n");
- tty->print(" @: print old nodes - before matching (if available)\n");
- tty->print(" B: print scheduling blocks (if available)\n");
- tty->print(" $: dump only, no header, no other columns\n");
- tty->print("\n");
- tty->print("recursively follow edges to nodes with permitted visit types,\n");
- tty->print("on the boundary additionally display nodes allowed in boundary types\n");
- tty->print("Note: the categories can be overlapping. For example a mixed node\n");
- tty->print(" can contain control and memory output. Some from the other\n");
- tty->print(" category are also control (Halt, Return, etc).\n");
- tty->print("\n");
- tty->print("output columns:\n");
- tty->print(" dist: BFS distance to this/start\n");
- tty->print(" apd: all paths distance (d_start + d_target)\n");
- tty->print(" block: block identifier, based on _pre_order\n");
- tty->print(" head: first node in block\n");
- tty->print(" idom: head node of idom block\n");
- tty->print(" depth: depth of block (_dom_depth)\n");
- tty->print(" old: old IR node - before matching\n");
- tty->print(" dump: node->dump()\n");
- tty->print("\n");
- tty->print("Note: if none of the \"cmdxo\" characters are in the options string\n");
- tty->print(" then we set all of them.\n");
- tty->print(" This allows for short strings like \"#\" for colored input traversal\n");
- tty->print(" or \"-#\" for colored output traversal.\n");
+ _output->print_cr("Usage: node->dump_bfs(int max_distance, Node* target, char* options)");
+ _output->print_cr("");
+ _output->print_cr("Use cases:");
+ _output->print_cr(" BFS traversal: no target required");
+ _output->print_cr(" shortest path: set target");
+ _output->print_cr(" all paths: set target and put 'A' in options");
+ _output->print_cr(" detect loop: subcase of all paths, have start==target");
+ _output->print_cr("");
+ _output->print_cr("Arguments:");
+ _output->print_cr(" this/start: staring point of BFS");
+ _output->print_cr(" target:");
+ _output->print_cr(" if null: simple BFS");
+ _output->print_cr(" else: shortest path or all paths between this/start and target");
+ _output->print_cr(" options:");
+ _output->print_cr(" if null: same as \"cdmox@B\"");
+ _output->print_cr(" else: use combination of following characters");
+ _output->print_cr(" h: display this help info");
+ _output->print_cr(" H: display this help info, with examples");
+ _output->print_cr(" +: traverse in-edges (on if neither + nor -)");
+ _output->print_cr(" -: traverse out-edges");
+ _output->print_cr(" c: visit control nodes");
+ _output->print_cr(" d: visit data nodes");
+ _output->print_cr(" m: visit memory nodes");
+ _output->print_cr(" o: visit other nodes");
+ _output->print_cr(" x: visit mixed nodes");
+ _output->print_cr(" C: boundary control nodes");
+ _output->print_cr(" D: boundary data nodes");
+ _output->print_cr(" M: boundary memory nodes");
+ _output->print_cr(" O: boundary other nodes");
+ _output->print_cr(" X: boundary mixed nodes");
+ _output->print_cr(" #: display node category in color (not supported in all terminals)");
+ _output->print_cr(" S: sort displayed nodes by node idx");
+ _output->print_cr(" A: all paths (not just shortest path to target)");
+ _output->print_cr(" @: print old nodes - before matching (if available)");
+ _output->print_cr(" B: print scheduling blocks (if available)");
+ _output->print_cr(" $: dump only, no header, no other columns");
+ _output->print_cr("");
+ _output->print_cr("recursively follow edges to nodes with permitted visit types,");
+ _output->print_cr("on the boundary additionally display nodes allowed in boundary types");
+ _output->print_cr("Note: the categories can be overlapping. For example a mixed node");
+ _output->print_cr(" can contain control and memory output. Some from the other");
+ _output->print_cr(" category are also control (Halt, Return, etc).");
+ _output->print_cr("");
+ _output->print_cr("output columns:");
+ _output->print_cr(" dist: BFS distance to this/start");
+ _output->print_cr(" apd: all paths distance (d_outputart + d_target)");
+ _output->print_cr(" block: block identifier, based on _pre_order");
+ _output->print_cr(" head: first node in block");
+ _output->print_cr(" idom: head node of idom block");
+ _output->print_cr(" depth: depth of block (_dom_depth)");
+ _output->print_cr(" old: old IR node - before matching");
+ _output->print_cr(" dump: node->dump()");
+ _output->print_cr("");
+ _output->print_cr("Note: if none of the \"cmdxo\" characters are in the options string");
+ _output->print_cr(" then we set all of them.");
+ _output->print_cr(" This allows for short strings like \"#\" for colored input traversal");
+ _output->print_cr(" or \"-#\" for colored output traversal.");
if (print_examples) {
- tty->print("\n");
- tty->print("Examples:\n");
- tty->print(" if->dump_bfs(10, 0, \"+cxo\")\n");
- tty->print(" starting at some if node, traverse inputs recursively\n");
- tty->print(" only along control (mixed and other can also be control)\n");
- tty->print(" phi->dump_bfs(5, 0, \"-dxo\")\n");
- tty->print(" starting at phi node, traverse outputs recursively\n");
- tty->print(" only along data (mixed and other can also have data flow)\n");
- tty->print(" find_node(385)->dump_bfs(3, 0, \"cdmox+#@B\")\n");
- tty->print(" find inputs of node 385, up to 3 nodes up (+)\n");
- tty->print(" traverse all nodes (cdmox), use colors (#)\n");
- tty->print(" display old nodes and blocks, if they exist\n");
- tty->print(" useful call to start with\n");
- tty->print(" find_node(102)->dump_bfs(10, 0, \"dCDMOX-\")\n");
- tty->print(" find non-data dependencies of a data node\n");
- tty->print(" follow data node outputs until we find another category\n");
- tty->print(" node as the boundary\n");
- tty->print(" x->dump_bfs(10, y, 0)\n");
- tty->print(" find shortest path from x to y, along any edge or node\n");
- tty->print(" will not find a path if it is longer than 10\n");
- tty->print(" useful to find how x and y are related\n");
- tty->print(" find_node(741)->dump_bfs(20, find_node(746), \"c+\")\n");
- tty->print(" find shortest control path between two nodes\n");
- tty->print(" find_node(741)->dump_bfs(8, find_node(746), \"cdmox+A\")\n");
- tty->print(" find all paths (A) between two nodes of length at most 8\n");
- tty->print(" find_node(741)->dump_bfs(7, find_node(741), \"c+A\")\n");
- tty->print(" find all control loops for this node\n");
+ _output->print_cr("");
+ _output->print_cr("Examples:");
+ _output->print_cr(" if->dump_bfs(10, 0, \"+cxo\")");
+ _output->print_cr(" starting at some if node, traverse inputs recursively");
+ _output->print_cr(" only along control (mixed and other can also be control)");
+ _output->print_cr(" phi->dump_bfs(5, 0, \"-dxo\")");
+ _output->print_cr(" starting at phi node, traverse outputs recursively");
+ _output->print_cr(" only along data (mixed and other can also have data flow)");
+ _output->print_cr(" find_node(385)->dump_bfs(3, 0, \"cdmox+#@B\")");
+ _output->print_cr(" find inputs of node 385, up to 3 nodes up (+)");
+ _output->print_cr(" traverse all nodes (cdmox), use colors (#)");
+ _output->print_cr(" display old nodes and blocks, if they exist");
+ _output->print_cr(" useful call to start with");
+ _output->print_cr(" find_node(102)->dump_bfs(10, 0, \"dCDMOX-\")");
+ _output->print_cr(" find non-data dependencies of a data node");
+ _output->print_cr(" follow data node outputs until we find another category");
+ _output->print_cr(" node as the boundary");
+ _output->print_cr(" x->dump_bfs(10, y, 0)");
+ _output->print_cr(" find shortest path from x to y, along any edge or node");
+ _output->print_cr(" will not find a path if it is longer than 10");
+ _output->print_cr(" useful to find how x and y are related");
+ _output->print_cr(" find_node(741)->dump_bfs(20, find_node(746), \"c+\")");
+ _output->print_cr(" find shortest control path between two nodes");
+ _output->print_cr(" find_node(741)->dump_bfs(8, find_node(746), \"cdmox+A\")");
+ _output->print_cr(" find all paths (A) between two nodes of length at most 8");
+ _output->print_cr(" find_node(741)->dump_bfs(7, find_node(741), \"c+A\")");
+ _output->print_cr(" find all control loops for this node");
}
}
@@ -2198,8 +2199,8 @@ bool PrintBFS::parse_options() {
print_options_help(true);
return false;
default:
- tty->print_cr("dump_bfs: Unrecognized option \'%c\'", _options[i]);
- tty->print_cr("for help, run: find_node(0)->dump_bfs(0,0,\"H\")");
+ _output->print_cr("dump_bfs: Unrecognized option \'%c\'", _options[i]);
+ _output->print_cr("for help, run: find_node(0)->dump_bfs(0,0,\"H\")");
return false;
}
}
@@ -2278,14 +2279,14 @@ void PrintBFS::print_node_idx(const Node* n) {
} else {
os::snprintf_checked(buf, sizeof(buf), "o%d", n->_idx); // old node
}
- tty->print("%6s", buf);
+ _output->print("%6s", buf);
}
void PrintBFS::print_block_id(const Block* b) {
Compile* C = Compile::current();
char buf[30];
os::snprintf_checked(buf, sizeof(buf), "B%d", b->_pre_order);
- tty->print("%7s", buf);
+ _output->print("%7s", buf);
}
void PrintBFS::print_node_block(const Node* n) {
@@ -2294,19 +2295,19 @@ void PrintBFS::print_node_block(const Node* n) {
? C->cfg()->get_block_for_node(n)
: nullptr; // guard against old nodes
if (b == nullptr) {
- tty->print(" _"); // Block
- tty->print(" _"); // head
- tty->print(" _"); // idom
- tty->print(" _"); // depth
+ _output->print(" _"); // Block
+ _output->print(" _"); // head
+ _output->print(" _"); // idom
+ _output->print(" _"); // depth
} else {
print_block_id(b);
print_node_idx(b->head());
if (b->_idom) {
print_node_idx(b->_idom->head());
} else {
- tty->print(" _"); // idom
+ _output->print(" _"); // idom
}
- tty->print("%6d ", b->_dom_depth);
+ _output->print("%6d ", b->_dom_depth);
}
}
@@ -2336,39 +2337,39 @@ void PrintBFS::print_header() const {
if (_dump_only) {
return; // no header in dump only mode
}
- tty->print("dist"); // distance
+ _output->print("dist"); // distance
if (_all_paths) {
- tty->print(" apd"); // all paths distance
+ _output->print(" apd"); // all paths distance
}
if (_print_blocks) {
- tty->print(" [block head idom depth]"); // block
+ _output->print(" [block head idom depth]"); // block
}
if (_print_old) {
- tty->print(" old"); // old node
+ _output->print(" old"); // old node
}
- tty->print(" dump\n"); // node dump
- tty->print("---------------------------------------------\n");
+ _output->print(" dump\n"); // node dump
+ _output->print_cr("---------------------------------------------");
}
void PrintBFS::print_node(const Node* n) {
if (_dump_only) {
- n->dump("\n", false, tty, &_dcc);
+ n->dump("\n", false, _output, &_dcc);
return;
}
- tty->print("%4d", find_info(n)->distance());// distance
+ _output->print("%4d", find_info(n)->distance());// distance
if (_all_paths) {
Info* info = find_info(n);
int apd = info->distance() + info->distance_from_target();
- tty->print("%4d", apd); // all paths distance
+ _output->print("%4d", apd); // all paths distance
}
if (_print_blocks) {
- print_node_block(n); // block
+ print_node_block(n); // block
}
if (_print_old) {
- print_node_idx(old_node(n)); // old node
+ print_node_idx(old_node(n)); // old node
}
- tty->print(" ");
- n->dump("\n", false, tty, &_dcc); // node dump
+ _output->print(" ");
+ n->dump("\n", false, _output, &_dcc); // node dump
}
//------------------------------dump_bfs--------------------------------------
@@ -2378,7 +2379,12 @@ void PrintBFS::print_node(const Node* n) {
// To find all options, run:
// find_node(0)->dump_bfs(0,0,"H")
void Node::dump_bfs(const int max_distance, Node* target, const char* options) const {
- PrintBFS bfs(this, max_distance, target, options);
+ dump_bfs(max_distance, target, options, tty);
+}
+
+// Used to dump to stream.
+void Node::dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st) const {
+ PrintBFS bfs(this, max_distance, target, options, st);
bfs.run();
}
@@ -2522,7 +2528,7 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc)
}
if (C->clone_map().value(_idx) != 0) {
- C->clone_map().dump(_idx);
+ C->clone_map().dump(_idx, st);
}
// Dump node-specific info
dump_spec(st);
diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp
index dbe18b672239e..ca03ecd070a92 100644
--- a/src/hotspot/share/opto/node.hpp
+++ b/src/hotspot/share/opto/node.hpp
@@ -1215,7 +1215,8 @@ class Node {
public:
Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx.
Node* find_ctrl(int idx); // Search control ancestors for the given idx.
- void dump_bfs(const int max_distance, Node* target, const char* options) const; // Print BFS traversal
+ void dump_bfs(const int max_distance, Node* target, const char* options, outputStream* st) const;
+ void dump_bfs(const int max_distance, Node* target, const char* options) const; // directly to tty
void dump_bfs(const int max_distance) const; // dump_bfs(max_distance, nullptr, nullptr)
class DumpConfig {
public:
diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp
index 524e38fcf72dd..998be5e122589 100644
--- a/src/hotspot/share/opto/output.cpp
+++ b/src/hotspot/share/opto/output.cpp
@@ -2068,8 +2068,12 @@ void PhaseOutput::ScheduleAndBundle() {
#ifndef PRODUCT
if (C->trace_opto_output()) {
- tty->print("\n---- After ScheduleAndBundle ----\n");
- print_scheduling();
+ // Buffer and print all at once
+ ResourceMark rm;
+ stringStream ss;
+ ss.print("\n---- After ScheduleAndBundle ----\n");
+ print_scheduling(&ss);
+ tty->print("%s", ss.as_string());
}
#endif
}
@@ -2077,14 +2081,18 @@ void PhaseOutput::ScheduleAndBundle() {
#ifndef PRODUCT
// Separated out so that it can be called directly from debugger
void PhaseOutput::print_scheduling() {
+ print_scheduling(tty);
+}
+
+void PhaseOutput::print_scheduling(outputStream* output_stream) {
for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
- tty->print("\nBB#%03d:\n", i);
+ output_stream->print("\nBB#%03d:\n", i);
Block* block = C->cfg()->get_block(i);
for (uint j = 0; j < block->number_of_nodes(); j++) {
Node* n = block->get_node(j);
OptoReg::Name reg = C->regalloc()->get_reg_first(n);
- tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
- n->dump();
+ output_stream->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
+ n->dump("\n", false, output_stream);
}
}
}
diff --git a/src/hotspot/share/opto/output.hpp b/src/hotspot/share/opto/output.hpp
index 77c49b33bfeae..d74751ddbf601 100644
--- a/src/hotspot/share/opto/output.hpp
+++ b/src/hotspot/share/opto/output.hpp
@@ -222,7 +222,8 @@ class PhaseOutput : public Phase {
void BuildOopMaps();
#ifndef PRODUCT
- void print_scheduling();
+ void print_scheduling(outputStream* output_stream);
+ void print_scheduling(); // to tty for debugging
static void print_statistics();
#endif
};
diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp
index f179d3ba88df2..e214268112e41 100644
--- a/src/hotspot/share/opto/parse1.cpp
+++ b/src/hotspot/share/opto/parse1.cpp
@@ -1575,6 +1575,7 @@ void Parse::do_one_block() {
#endif //ASSERT
do_one_bytecode();
+ if (failing()) return;
assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth,
"incorrect depth prediction: sp=%d, pre_bc_sp=%d, depth=%d", sp(), pre_bc_sp, depth);
diff --git a/src/hotspot/share/opto/subnode.cpp b/src/hotspot/share/opto/subnode.cpp
index 30f71ea4e9715..6f43a3769f76c 100644
--- a/src/hotspot/share/opto/subnode.cpp
+++ b/src/hotspot/share/opto/subnode.cpp
@@ -152,6 +152,16 @@ static bool ok_to_convert(Node* inc, Node* var) {
return !(is_cloop_increment(inc) || var->is_cloop_ind_var());
}
+static bool is_cloop_condition(BoolNode* bol) {
+ for (DUIterator_Fast imax, i = bol->fast_outs(imax); i < imax; i++) {
+ Node* out = bol->fast_out(i);
+ if (out->is_BaseCountedLoopEnd()) {
+ return true;
+ }
+ }
+ return false;
+}
+
//------------------------------Ideal------------------------------------------
Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){
Node *in1 = in(1);
@@ -1556,13 +1566,15 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// and "cmp (add X min_jint) c" into "cmpu X (c + min_jint)"
if (cop == Op_CmpI &&
cmp1_op == Op_AddI &&
- phase->type(cmp1->in(2)) == TypeInt::MIN) {
+ phase->type(cmp1->in(2)) == TypeInt::MIN &&
+ !is_cloop_condition(this)) {
if (cmp2_op == Op_ConI) {
Node* ncmp2 = phase->intcon(java_add(cmp2->get_int(), min_jint));
Node* ncmp = phase->transform(new CmpUNode(cmp1->in(1), ncmp2));
return new BoolNode(ncmp, _test._test);
} else if (cmp2_op == Op_AddI &&
- phase->type(cmp2->in(2)) == TypeInt::MIN) {
+ phase->type(cmp2->in(2)) == TypeInt::MIN &&
+ !is_cloop_condition(this)) {
Node* ncmp = phase->transform(new CmpUNode(cmp1->in(1), cmp2->in(1)));
return new BoolNode(ncmp, _test._test);
}
@@ -1572,13 +1584,15 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// and "cmp (add X min_jlong) c" into "cmpu X (c + min_jlong)"
if (cop == Op_CmpL &&
cmp1_op == Op_AddL &&
- phase->type(cmp1->in(2)) == TypeLong::MIN) {
+ phase->type(cmp1->in(2)) == TypeLong::MIN &&
+ !is_cloop_condition(this)) {
if (cmp2_op == Op_ConL) {
Node* ncmp2 = phase->longcon(java_add(cmp2->get_long(), min_jlong));
Node* ncmp = phase->transform(new CmpULNode(cmp1->in(1), ncmp2));
return new BoolNode(ncmp, _test._test);
} else if (cmp2_op == Op_AddL &&
- phase->type(cmp2->in(2)) == TypeLong::MIN) {
+ phase->type(cmp2->in(2)) == TypeLong::MIN &&
+ !is_cloop_condition(this)) {
Node* ncmp = phase->transform(new CmpULNode(cmp1->in(1), cmp2->in(1)));
return new BoolNode(ncmp, _test._test);
}
diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp
index 6dcea46b4be85..6a641a44b3296 100644
--- a/src/hotspot/share/opto/superword.cpp
+++ b/src/hotspot/share/opto/superword.cpp
@@ -1403,6 +1403,7 @@ bool SuperWord::isomorphic(Node* s1, Node* s2) {
if (s1->Opcode() != s2->Opcode()) return false;
if (s1->req() != s2->req()) return false;
if (!same_velt_type(s1, s2)) return false;
+ if (s1->is_Bool() && s1->as_Bool()->_test._test != s2->as_Bool()->_test._test) return false;
Node* s1_ctrl = s1->in(0);
Node* s2_ctrl = s2->in(0);
// If the control nodes are equivalent, no further checks are required to test for isomorphism.
@@ -2701,18 +2702,15 @@ bool SuperWord::output() {
if (n->is_Load()) {
Node* ctl = n->in(MemNode::Control);
Node* mem = first->in(MemNode::Memory);
- SWPointer p1(n->as_Mem(), this, nullptr, false);
- // Identify the memory dependency for the new loadVector node by
- // walking up through memory chain.
- // This is done to give flexibility to the new loadVector node so that
- // it can move above independent storeVector nodes.
+ // Set the memory dependency of the LoadVector as early as possible.
+ // Walk up the memory chain, and ignore any StoreVector that provably
+ // does not have any memory dependency.
while (mem->is_StoreVector()) {
- SWPointer p2(mem->as_Mem(), this, nullptr, false);
- int cmp = p1.cmp(p2);
- if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) {
- mem = mem->in(MemNode::Memory);
+ SWPointer p_store(mem->as_Mem(), this, nullptr, false);
+ if (p_store.overlap_possible_with_any_in(p)) {
+ break;
} else {
- break; // dependent memory
+ mem = mem->in(MemNode::Memory);
}
}
Node* adr = first->in(MemNode::Address);
@@ -2799,6 +2797,14 @@ bool SuperWord::output() {
Node_List* p_bol = my_pack(bol);
assert(p_bol != nullptr, "CMove must have matching Bool pack");
+#ifdef ASSERT
+ for (uint j = 0; j < p_bol->size(); j++) {
+ Node* m = p_bol->at(j);
+ assert(m->as_Bool()->_test._test == bol_test,
+ "all bool nodes must have same test");
+ }
+#endif
+
CmpNode* cmp = bol->in(1)->as_Cmp();
assert(cmp != nullptr, "must have cmp above CMove");
Node_List* p_cmp = my_pack(cmp);
diff --git a/src/hotspot/share/opto/superword.hpp b/src/hotspot/share/opto/superword.hpp
index 6e2689b19ad8e..eca3836845ad1 100644
--- a/src/hotspot/share/opto/superword.hpp
+++ b/src/hotspot/share/opto/superword.hpp
@@ -722,6 +722,20 @@ class SWPointer : public ArenaObj {
}
}
+ bool overlap_possible_with_any_in(Node_List* p) {
+ for (uint k = 0; k < p->size(); k++) {
+ MemNode* mem = p->at(k)->as_Mem();
+ SWPointer p_mem(mem, _slp, nullptr, false);
+ // Only if we know that we have Less or Greater can we
+ // be sure that there can never be an overlap between
+ // the two memory regions.
+ if (!not_equal(p_mem)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
bool not_equal(SWPointer& q) { return not_equal(cmp(q)); }
bool equal(SWPointer& q) { return equal(cmp(q)); }
bool comparable(SWPointer& q) { return comparable(cmp(q)); }
diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp
index ba63134f01e5e..4e0bd43be5cb1 100644
--- a/src/hotspot/share/opto/type.cpp
+++ b/src/hotspot/share/opto/type.cpp
@@ -61,6 +61,7 @@ const Type::TypeInfo Type::_type_info[Type::lastype] = {
{ Bad, T_NARROWKLASS,"narrowklass:", false, Op_RegN, relocInfo::none }, // NarrowKlass
{ Bad, T_ILLEGAL, "tuple:", false, Node::NotAMachineReg, relocInfo::none }, // Tuple
{ Bad, T_ARRAY, "array:", false, Node::NotAMachineReg, relocInfo::none }, // Array
+ { Bad, T_ARRAY, "interfaces:", false, Node::NotAMachineReg, relocInfo::none }, // Interfaces
#if defined(PPC64)
{ Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask, relocInfo::none }, // VectorMask.
@@ -120,8 +121,8 @@ const Type* Type:: _zero_type[T_CONFLICT+1];
// Map basic types to array-body alias types.
const TypeAryPtr* TypeAryPtr::_array_body_type[T_CONFLICT+1];
-const TypePtr::InterfaceSet* TypeAryPtr::_array_interfaces = nullptr;
-const TypePtr::InterfaceSet* TypeAryKlassPtr::_array_interfaces = nullptr;
+const TypeInterfaces* TypeAryPtr::_array_interfaces = nullptr;
+const TypeInterfaces* TypeAryKlassPtr::_array_interfaces = nullptr;
//=============================================================================
// Convenience common pre-built types.
@@ -571,7 +572,7 @@ void Type::Initialize_shared(Compile* current) {
GrowableArray array_interfaces;
array_interfaces.push(current->env()->Cloneable_klass());
array_interfaces.push(current->env()->Serializable_klass());
- TypeAryPtr::_array_interfaces = new TypePtr::InterfaceSet(&array_interfaces);
+ TypeAryPtr::_array_interfaces = TypeInterfaces::make(&array_interfaces);
TypeAryKlassPtr::_array_interfaces = TypeAryPtr::_array_interfaces;
TypeAryPtr::RANGE = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), nullptr /* current->env()->Object_klass() */, false, arrayOopDesc::length_offset_in_bytes());
@@ -3252,14 +3253,14 @@ void TypeRawPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
// Convenience common pre-built type.
const TypeOopPtr *TypeOopPtr::BOTTOM;
-TypePtr::InterfaceSet::InterfaceSet()
- : _list(Compile::current()->type_arena(), 0, 0, nullptr),
+TypeInterfaces::TypeInterfaces()
+ : Type(Interfaces), _list(Compile::current()->type_arena(), 0, 0, nullptr),
_hash(0), _exact_klass(nullptr) {
DEBUG_ONLY(_initialized = true);
}
-TypePtr::InterfaceSet::InterfaceSet(GrowableArray* interfaces)
- : _list(Compile::current()->type_arena(), interfaces->length(), 0, nullptr),
+TypeInterfaces::TypeInterfaces(GrowableArray* interfaces)
+ : Type(Interfaces), _list(Compile::current()->type_arena(), interfaces->length(), 0, nullptr),
_hash(0), _exact_klass(nullptr) {
for (int i = 0; i < interfaces->length(); i++) {
add(interfaces->at(i));
@@ -3267,13 +3268,18 @@ TypePtr::InterfaceSet::InterfaceSet(GrowableArray* interfaces)
initialize();
}
-void TypePtr::InterfaceSet::initialize() {
+const TypeInterfaces* TypeInterfaces::make(GrowableArray* interfaces) {
+ TypeInterfaces* result = (interfaces == nullptr) ? new TypeInterfaces() : new TypeInterfaces(interfaces);
+ return (const TypeInterfaces*)result->hashcons();
+}
+
+void TypeInterfaces::initialize() {
compute_hash();
compute_exact_klass();
DEBUG_ONLY(_initialized = true;)
}
-int TypePtr::InterfaceSet::compare(ciKlass* const& k1, ciKlass* const& k2) {
+int TypeInterfaces::compare(ciInstanceKlass* const& k1, ciInstanceKlass* const& k2) {
if ((intptr_t)k1 < (intptr_t)k2) {
return -1;
} else if ((intptr_t)k1 > (intptr_t)k2) {
@@ -3282,24 +3288,20 @@ int TypePtr::InterfaceSet::compare(ciKlass* const& k1, ciKlass* const& k2) {
return 0;
}
-void TypePtr::InterfaceSet::add(ciKlass* interface) {
+void TypeInterfaces::add(ciInstanceKlass* interface) {
assert(interface->is_interface(), "for interfaces only");
_list.insert_sorted(interface);
verify();
}
-void TypePtr::InterfaceSet::raw_add(ciKlass* interface) {
- assert(interface->is_interface(), "for interfaces only");
- _list.push(interface);
-}
-
-bool TypePtr::InterfaceSet::eq(const InterfaceSet& other) const {
- if (_list.length() != other._list.length()) {
+bool TypeInterfaces::eq(const Type* t) const {
+ const TypeInterfaces* other = (const TypeInterfaces*)t;
+ if (_list.length() != other->_list.length()) {
return false;
}
for (int i = 0; i < _list.length(); i++) {
ciKlass* k1 = _list.at(i);
- ciKlass* k2 = other._list.at(i);
+ ciKlass* k2 = other->_list.at(i);
if (!k1->equals(k2)) {
return false;
}
@@ -3307,15 +3309,15 @@ bool TypePtr::InterfaceSet::eq(const InterfaceSet& other) const {
return true;
}
-bool TypePtr::InterfaceSet::eq(ciInstanceKlass* k) const {
+bool TypeInterfaces::eq(ciInstanceKlass* k) const {
assert(k->is_loaded(), "should be loaded");
- GrowableArray* interfaces = k->as_instance_klass()->transitive_interfaces();
+ GrowableArray* interfaces = k->transitive_interfaces();
if (_list.length() != interfaces->length()) {
return false;
}
for (int i = 0; i < interfaces->length(); i++) {
bool found = false;
- _list.find_sorted(interfaces->at(i), found);
+ _list.find_sorted(interfaces->at(i), found);
if (!found) {
return false;
}
@@ -3324,12 +3326,16 @@ bool TypePtr::InterfaceSet::eq(ciInstanceKlass* k) const {
}
-uint TypePtr::InterfaceSet::hash() const {
+uint TypeInterfaces::hash() const {
assert(_initialized, "must be");
return _hash;
}
-void TypePtr::InterfaceSet::compute_hash() {
+const Type* TypeInterfaces::xdual() const {
+ return this;
+}
+
+void TypeInterfaces::compute_hash() {
uint hash = 0;
for (int i = 0; i < _list.length(); i++) {
ciKlass* k = _list.at(i);
@@ -3338,17 +3344,17 @@ void TypePtr::InterfaceSet::compute_hash() {
_hash = hash;
}
-static int compare_interfaces(ciKlass** k1, ciKlass** k2) {
+static int compare_interfaces(ciInstanceKlass** k1, ciInstanceKlass** k2) {
return (int)((*k1)->ident() - (*k2)->ident());
}
-void TypePtr::InterfaceSet::dump(outputStream* st) const {
+void TypeInterfaces::dump(outputStream* st) const {
if (_list.length() == 0) {
return;
}
ResourceMark rm;
st->print(" (");
- GrowableArray interfaces;
+ GrowableArray interfaces;
interfaces.appendAll(&_list);
// Sort the interfaces so they are listed in the same order from one run to the other of the same compilation
interfaces.sort(compare_interfaces);
@@ -3363,110 +3369,110 @@ void TypePtr::InterfaceSet::dump(outputStream* st) const {
}
#ifdef ASSERT
-void TypePtr::InterfaceSet::verify() const {
+void TypeInterfaces::verify() const {
for (int i = 1; i < _list.length(); i++) {
- ciKlass* k1 = _list.at(i-1);
- ciKlass* k2 = _list.at(i);
+ ciInstanceKlass* k1 = _list.at(i-1);
+ ciInstanceKlass* k2 = _list.at(i);
assert(compare(k2, k1) > 0, "should be ordered");
assert(k1 != k2, "no duplicate");
}
}
#endif
-TypePtr::InterfaceSet TypeOopPtr::InterfaceSet::union_with(const InterfaceSet& other) const {
- InterfaceSet result;
+const TypeInterfaces* TypeInterfaces::union_with(const TypeInterfaces* other) const {
+ GrowableArray result_list;
int i = 0;
int j = 0;
- while (i < _list.length() || j < other._list.length()) {
+ while (i < _list.length() || j < other->_list.length()) {
while (i < _list.length() &&
- (j >= other._list.length() ||
- compare(_list.at(i), other._list.at(j)) < 0)) {
- result.raw_add(_list.at(i));
+ (j >= other->_list.length() ||
+ compare(_list.at(i), other->_list.at(j)) < 0)) {
+ result_list.push(_list.at(i));
i++;
}
- while (j < other._list.length() &&
+ while (j < other->_list.length() &&
(i >= _list.length() ||
- compare(other._list.at(j), _list.at(i)) < 0)) {
- result.raw_add(other._list.at(j));
+ compare(other->_list.at(j), _list.at(i)) < 0)) {
+ result_list.push(other->_list.at(j));
j++;
}
if (i < _list.length() &&
- j < other._list.length() &&
- _list.at(i) == other._list.at(j)) {
- result.raw_add(_list.at(i));
+ j < other->_list.length() &&
+ _list.at(i) == other->_list.at(j)) {
+ result_list.push(_list.at(i));
i++;
j++;
}
}
- result.initialize();
+ const TypeInterfaces* result = TypeInterfaces::make(&result_list);
#ifdef ASSERT
- result.verify();
+ result->verify();
for (int i = 0; i < _list.length(); i++) {
- assert(result._list.contains(_list.at(i)), "missing");
+ assert(result->_list.contains(_list.at(i)), "missing");
}
- for (int i = 0; i < other._list.length(); i++) {
- assert(result._list.contains(other._list.at(i)), "missing");
+ for (int i = 0; i < other->_list.length(); i++) {
+ assert(result->_list.contains(other->_list.at(i)), "missing");
}
- for (int i = 0; i < result._list.length(); i++) {
- assert(_list.contains(result._list.at(i)) || other._list.contains(result._list.at(i)), "missing");
+ for (int i = 0; i < result->_list.length(); i++) {
+ assert(_list.contains(result->_list.at(i)) || other->_list.contains(result->_list.at(i)), "missing");
}
#endif
return result;
}
-TypePtr::InterfaceSet TypeOopPtr::InterfaceSet::intersection_with(const InterfaceSet& other) const {
- InterfaceSet result;
+const TypeInterfaces* TypeInterfaces::intersection_with(const TypeInterfaces* other) const {
+ GrowableArray result_list;
int i = 0;
int j = 0;
- while (i < _list.length() || j < other._list.length()) {
+ while (i < _list.length() || j < other->_list.length()) {
while (i < _list.length() &&
- (j >= other._list.length() ||
- compare(_list.at(i), other._list.at(j)) < 0)) {
+ (j >= other->_list.length() ||
+ compare(_list.at(i), other->_list.at(j)) < 0)) {
i++;
}
- while (j < other._list.length() &&
+ while (j < other->_list.length() &&
(i >= _list.length() ||
- compare(other._list.at(j), _list.at(i)) < 0)) {
+ compare(other->_list.at(j), _list.at(i)) < 0)) {
j++;
}
if (i < _list.length() &&
- j < other._list.length() &&
- _list.at(i) == other._list.at(j)) {
- result.raw_add(_list.at(i));
+ j < other->_list.length() &&
+ _list.at(i) == other->_list.at(j)) {
+ result_list.push(_list.at(i));
i++;
j++;
}
}
- result.initialize();
+ const TypeInterfaces* result = TypeInterfaces::make(&result_list);
#ifdef ASSERT
- result.verify();
+ result->verify();
for (int i = 0; i < _list.length(); i++) {
- assert(!other._list.contains(_list.at(i)) || result._list.contains(_list.at(i)), "missing");
+ assert(!other->_list.contains(_list.at(i)) || result->_list.contains(_list.at(i)), "missing");
}
- for (int i = 0; i < other._list.length(); i++) {
- assert(!_list.contains(other._list.at(i)) || result._list.contains(other._list.at(i)), "missing");
+ for (int i = 0; i < other->_list.length(); i++) {
+ assert(!_list.contains(other->_list.at(i)) || result->_list.contains(other->_list.at(i)), "missing");
}
- for (int i = 0; i < result._list.length(); i++) {
- assert(_list.contains(result._list.at(i)) && other._list.contains(result._list.at(i)), "missing");
+ for (int i = 0; i < result->_list.length(); i++) {
+ assert(_list.contains(result->_list.at(i)) && other->_list.contains(result->_list.at(i)), "missing");
}
#endif
return result;
}
// Is there a single ciKlass* that can represent the interface set?
-ciKlass* TypePtr::InterfaceSet::exact_klass() const {
+ciInstanceKlass* TypeInterfaces::exact_klass() const {
assert(_initialized, "must be");
return _exact_klass;
}
-void TypePtr::InterfaceSet::compute_exact_klass() {
+void TypeInterfaces::compute_exact_klass() {
if (_list.length() == 0) {
_exact_klass = nullptr;
return;
}
- ciKlass* res = nullptr;
+ ciInstanceKlass* res = nullptr;
for (int i = 0; i < _list.length(); i++) {
- ciInstanceKlass* interface = _list.at(i)->as_instance_klass();
+ ciInstanceKlass* interface = _list.at(i);
if (eq(interface)) {
assert(res == nullptr, "");
res = interface;
@@ -3476,7 +3482,7 @@ void TypePtr::InterfaceSet::compute_exact_klass() {
}
#ifdef ASSERT
-void TypePtr::InterfaceSet::verify_is_loaded() const {
+void TypeInterfaces::verify_is_loaded() const {
for (int i = 0; i < _list.length(); i++) {
ciKlass* interface = _list.at(i);
assert(interface->is_loaded(), "Interface not loaded");
@@ -3484,8 +3490,19 @@ void TypePtr::InterfaceSet::verify_is_loaded() const {
}
#endif
+// Can't be implemented because there's no way to know if the type is above or below the center line.
+const Type* TypeInterfaces::xmeet(const Type* t) const {
+ ShouldNotReachHere();
+ return Type::xmeet(t);
+}
+
+bool TypeInterfaces::singleton(void) const {
+ ShouldNotReachHere();
+ return Type::singleton();
+}
+
//------------------------------TypeOopPtr-------------------------------------
-TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bool xk, ciObject* o, int offset,
+TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, bool xk, ciObject* o, int offset,
int instance_id, const TypePtr* speculative, int inline_depth)
: TypePtr(t, ptr, offset, speculative, inline_depth),
_const_oop(o), _klass(k),
@@ -3497,7 +3514,7 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const InterfaceSet& interfa
_instance_id(instance_id) {
#ifdef ASSERT
if (klass() != nullptr && klass()->is_loaded()) {
- interfaces.verify_is_loaded();
+ interfaces->verify_is_loaded();
}
#endif
if (Compile::current()->eliminate_boxing() && (t == InstPtr) &&
@@ -3574,7 +3591,8 @@ const TypeOopPtr *TypeOopPtr::make(PTR ptr, int offset, int instance_id,
ciKlass* k = Compile::current()->env()->Object_klass();
bool xk = false;
ciObject* o = nullptr;
- return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, InterfaceSet(), xk, o, offset, instance_id, speculative, inline_depth))->hashcons();
+ const TypeInterfaces* interfaces = TypeInterfaces::make();
+ return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, interfaces, xk, o, offset, instance_id, speculative, inline_depth))->hashcons();
}
@@ -3719,7 +3737,7 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass* klass, bool klass_
klass_is_exact = true;
}
}
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling);
return TypeInstPtr::make(TypePtr::BotPTR, klass, interfaces, klass_is_exact, nullptr, 0);
} else if (klass->is_obj_array_klass()) {
// Element is an object array. Recursively call ourself.
@@ -3952,15 +3970,15 @@ int TypeOopPtr::dual_instance_id( ) const {
}
-TypePtr::InterfaceSet TypeOopPtr::meet_interfaces(const TypeOopPtr* other) const {
+const TypeInterfaces* TypeOopPtr::meet_interfaces(const TypeOopPtr* other) const {
if (above_centerline(_ptr) && above_centerline(other->_ptr)) {
- return _interfaces.union_with(other->_interfaces);
+ return _interfaces->union_with(other->_interfaces);
} else if (above_centerline(_ptr) && !above_centerline(other->_ptr)) {
return other->_interfaces;
} else if (above_centerline(other->_ptr) && !above_centerline(_ptr)) {
return _interfaces;
}
- return _interfaces.intersection_with(other->_interfaces);
+ return _interfaces->intersection_with(other->_interfaces);
}
/**
@@ -3989,20 +4007,20 @@ const TypeInstPtr *TypeInstPtr::KLASS;
// Is there a single ciKlass* that can represent that type?
ciKlass* TypeInstPtr::exact_klass_helper() const {
- if (_interfaces.empty()) {
+ if (_interfaces->empty()) {
return _klass;
}
if (_klass != ciEnv::current()->Object_klass()) {
- if (_interfaces.eq(_klass->as_instance_klass())) {
+ if (_interfaces->eq(_klass->as_instance_klass())) {
return _klass;
}
return nullptr;
}
- return _interfaces.exact_klass();
+ return _interfaces->exact_klass();
}
//------------------------------TypeInstPtr-------------------------------------
-TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bool xk, ciObject* o, int off,
+TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, bool xk, ciObject* o, int off,
int instance_id, const TypePtr* speculative, int inline_depth)
: TypeOopPtr(InstPtr, ptr, k, interfaces, xk, o, off, instance_id, speculative, inline_depth) {
assert(k == nullptr || !k->is_loaded() || !k->is_interface(), "no interface here");
@@ -4014,7 +4032,7 @@ TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bo
//------------------------------make-------------------------------------------
const TypeInstPtr *TypeInstPtr::make(PTR ptr,
ciKlass* k,
- const InterfaceSet& interfaces,
+ const TypeInterfaces* interfaces,
bool xk,
ciObject* o,
int offset,
@@ -4046,17 +4064,17 @@ const TypeInstPtr *TypeInstPtr::make(PTR ptr,
return result;
}
-TypePtr::InterfaceSet TypePtr::interfaces(ciKlass*& k, bool klass, bool interface, bool array, InterfaceHandling interface_handling) {
+const TypeInterfaces* TypePtr::interfaces(ciKlass*& k, bool klass, bool interface, bool array, InterfaceHandling interface_handling) {
if (k->is_instance_klass()) {
if (k->is_loaded()) {
if (k->is_interface() && interface_handling == ignore_interfaces) {
assert(interface, "no interface expected");
k = ciEnv::current()->Object_klass();
- InterfaceSet interfaces;
+ const TypeInterfaces* interfaces = TypeInterfaces::make();
return interfaces;
}
GrowableArray* k_interfaces = k->as_instance_klass()->transitive_interfaces();
- InterfaceSet interfaces(k_interfaces);
+ const TypeInterfaces* interfaces = TypeInterfaces::make(k_interfaces);
if (k->is_interface()) {
assert(interface, "no interface expected");
k = ciEnv::current()->Object_klass();
@@ -4065,7 +4083,7 @@ TypePtr::InterfaceSet TypePtr::interfaces(ciKlass*& k, bool klass, bool interfac
}
return interfaces;
}
- InterfaceSet interfaces;
+ const TypeInterfaces* interfaces = TypeInterfaces::make();
return interfaces;
}
assert(array, "no array expected");
@@ -4076,7 +4094,7 @@ TypePtr::InterfaceSet TypePtr::interfaces(ciKlass*& k, bool klass, bool interfac
k = ciObjArrayKlass::make(ciEnv::current()->Object_klass(), k->as_array_klass()->dimension());
}
}
- return *TypeAryPtr::_array_interfaces;
+ return TypeAryPtr::_array_interfaces;
}
/**
@@ -4130,7 +4148,7 @@ const TypeInstPtr* TypeInstPtr::cast_to_instance_id(int instance_id) const {
//------------------------------xmeet_unloaded---------------------------------
// Compute the MEET of two InstPtrs when at least one is unloaded.
// Assume classes are different since called after check for same name/class-loader
-const TypeInstPtr *TypeInstPtr::xmeet_unloaded(const TypeInstPtr *tinst, const InterfaceSet& interfaces) const {
+const TypeInstPtr *TypeInstPtr::xmeet_unloaded(const TypeInstPtr *tinst, const TypeInterfaces* interfaces) const {
int off = meet_offset(tinst->offset());
PTR ptr = meet_ptr(tinst->ptr());
int instance_id = meet_instance_id(tinst->instance_id());
@@ -4287,7 +4305,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
int instance_id = meet_instance_id(tinst->instance_id());
const TypePtr* speculative = xmeet_speculative(tinst);
int depth = meet_inline_depth(tinst->inline_depth());
- InterfaceSet interfaces = meet_interfaces(tinst);
+ const TypeInterfaces* interfaces = meet_interfaces(tinst);
ciKlass* tinst_klass = tinst->klass();
ciKlass* this_klass = klass();
@@ -4347,16 +4365,16 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
return this; // Return the double constant
}
-template TypePtr::MeetResult TypePtr::meet_instptr(PTR& ptr, InterfaceSet& interfaces, const T* this_type, const T* other_type,
- ciKlass*& res_klass, bool& res_xk) {
+template TypePtr::MeetResult TypePtr::meet_instptr(PTR& ptr, const TypeInterfaces*& interfaces, const T* this_type, const T* other_type,
+ ciKlass*& res_klass, bool& res_xk) {
ciKlass* this_klass = this_type->klass();
ciKlass* other_klass = other_type->klass();
bool this_xk = this_type->klass_is_exact();
bool other_xk = other_type->klass_is_exact();
PTR this_ptr = this_type->ptr();
PTR other_ptr = other_type->ptr();
- InterfaceSet this_interfaces = this_type->interfaces();
- InterfaceSet other_interfaces = other_type->interfaces();
+ const TypeInterfaces* this_interfaces = this_type->interfaces();
+ const TypeInterfaces* other_interfaces = other_type->interfaces();
// Check for easy case; klasses are equal (and perhaps not loaded!)
// If we have constants, then we created oops so classes are loaded
// and we can handle the constants further down. This case handles
@@ -4441,7 +4459,7 @@ template TypePtr::MeetResult TypePtr::meet_instptr(PTR& ptr, InterfaceS
ptr = NotNull;
}
- interfaces = this_interfaces.intersection_with(other_interfaces);
+ interfaces = this_interfaces->intersection_with(other_interfaces);
// Now we find the LCA of Java classes
ciKlass* k = this_klass->least_common_ancestor(other_klass);
@@ -4477,14 +4495,14 @@ bool TypeInstPtr::eq( const Type *t ) const {
const TypeInstPtr *p = t->is_instptr();
return
klass()->equals(p->klass()) &&
- _interfaces.eq(p->_interfaces) &&
+ _interfaces->eq(p->_interfaces) &&
TypeOopPtr::eq(p); // Check sub-type stuff
}
//------------------------------hash-------------------------------------------
// Type-specific hashing function.
uint TypeInstPtr::hash(void) const {
- return klass()->hash() + TypeOopPtr::hash() + _interfaces.hash();
+ return klass()->hash() + TypeOopPtr::hash() + _interfaces->hash();
}
bool TypeInstPtr::is_java_subtype_of_helper(const TypeOopPtr* other, bool this_exact, bool other_exact) const {
@@ -4507,7 +4525,7 @@ bool TypeInstPtr::maybe_java_subtype_of_helper(const TypeOopPtr* other, bool thi
void TypeInstPtr::dump2(Dict &d, uint depth, outputStream* st) const {
// Print the name of the klass.
klass()->print_name_on(st);
- _interfaces.dump(st);
+ _interfaces->dump(st);
switch( _ptr ) {
case Constant:
@@ -4591,7 +4609,7 @@ const TypeKlassPtr* TypeInstPtr::as_klass_type(bool try_for_exact) const {
bool xk = klass_is_exact();
ciInstanceKlass* ik = klass()->as_instance_klass();
if (try_for_exact && !xk && !ik->has_subklass() && !ik->is_final()) {
- if (_interfaces.eq(ik)) {
+ if (_interfaces->eq(ik)) {
Compile* C = Compile::current();
Dependencies* deps = C->dependencies();
deps->assert_leaf_type(ik);
@@ -4608,12 +4626,12 @@ template bool TypePtr::is_meet_subtype_of_helper_for_instan
return false;
}
- if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces.empty()) {
+ if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces->empty()) {
return true;
}
return this_one->klass()->is_subtype_of(other->klass()) &&
- (!this_xk || this_one->_interfaces.contains(other->_interfaces));
+ (!this_xk || this_one->_interfaces->contains(other->_interfaces));
}
@@ -4623,12 +4641,12 @@ bool TypeInstPtr::is_meet_subtype_of_helper(const TypeOopPtr *other, bool this_x
template bool TypePtr::is_meet_subtype_of_helper_for_array(const T1* this_one, const T2* other, bool this_xk, bool other_xk) {
static_assert(std::is_base_of::value, "");
- if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces.empty()) {
+ if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces->empty()) {
return true;
}
if (this_one->is_instance_type(other)) {
- return other->klass() == ciEnv::current()->Object_klass() && this_one->_interfaces.contains(other->_interfaces);
+ return other->klass() == ciEnv::current()->Object_klass() && this_one->_interfaces->contains(other->_interfaces);
}
int dummy;
@@ -4645,7 +4663,7 @@ template bool TypePtr::is_meet_subtype_of_helper_for_array
}
if (other_elem == nullptr && this_elem == nullptr) {
- return this_one->_klass->is_subtype_of(other->_klass);
+ return this_one->klass()->is_subtype_of(other->klass());
}
return false;
@@ -4749,7 +4767,7 @@ const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const {
jint hi = size->_hi;
jint lo = size->_lo;
jint min_lo = 0;
- jint max_hi = max_array_length(elem()->basic_type());
+ jint max_hi = max_array_length(elem()->array_element_basic_type());
//if (index_not_size) --max_hi; // type of a valid array index, FTR
bool chg = false;
if (lo < min_lo) {
@@ -4971,9 +4989,9 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
int instance_id = meet_instance_id(tp->instance_id());
const TypePtr* speculative = xmeet_speculative(tp);
int depth = meet_inline_depth(tp->inline_depth());
- InterfaceSet interfaces = meet_interfaces(tp);
- InterfaceSet tp_interfaces = tp->_interfaces;
- InterfaceSet this_interfaces = _interfaces;
+ const TypeInterfaces* interfaces = meet_interfaces(tp);
+ const TypeInterfaces* tp_interfaces = tp->_interfaces;
+ const TypeInterfaces* this_interfaces = _interfaces;
switch (ptr) {
case TopPTR:
@@ -4981,13 +4999,13 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
// For instances when a subclass meets a superclass we fall
// below the centerline when the superclass is exact. We need to
// do the same here.
- if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces.contains(tp_interfaces) && !tp->klass_is_exact()) {
+ if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces->contains(tp_interfaces) && !tp->klass_is_exact()) {
return TypeAryPtr::make(ptr, _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth);
} else {
// cannot subclass, so the meet has to fall badly below the centerline
ptr = NotNull;
instance_id = InstanceBot;
- interfaces = this_interfaces.intersection_with(tp_interfaces);
+ interfaces = this_interfaces->intersection_with(tp_interfaces);
return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, false, nullptr,offset, instance_id, speculative, depth);
}
case Constant:
@@ -5000,7 +5018,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
// For instances when a subclass meets a superclass we fall
// below the centerline when the superclass is exact. We need
// to do the same here.
- if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces.contains(tp_interfaces) && !tp->klass_is_exact()) {
+ if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces->contains(tp_interfaces) && !tp->klass_is_exact()) {
// that is, my array type is a subtype of 'tp' klass
return make(ptr, (ptr == Constant ? const_oop() : nullptr),
_ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth);
@@ -5014,7 +5032,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
if (instance_id > 0) {
instance_id = InstanceBot;
}
- interfaces = this_interfaces.intersection_with(tp_interfaces);
+ interfaces = this_interfaces->intersection_with(tp_interfaces);
return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, false, nullptr, offset, instance_id, speculative, depth);
default: typerr(t);
}
@@ -5128,7 +5146,7 @@ const Type *TypeAryPtr::xdual() const {
#ifndef PRODUCT
void TypeAryPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
_ary->dump2(d,depth,st);
- _interfaces.dump(st);
+ _interfaces->dump(st);
switch( _ptr ) {
case Constant:
@@ -5578,7 +5596,7 @@ const TypeKlassPtr* TypeKlassPtr::make(ciKlass *klass, InterfaceHandling interfa
const TypeKlassPtr* TypeKlassPtr::make(PTR ptr, ciKlass* klass, int offset, InterfaceHandling interface_handling) {
if (klass->is_instance_klass()) {
- const InterfaceSet interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling);
return TypeInstKlassPtr::make(ptr, klass, interfaces, offset);
}
return TypeAryKlassPtr::make(ptr, klass, offset, interface_handling);
@@ -5586,7 +5604,7 @@ const TypeKlassPtr* TypeKlassPtr::make(PTR ptr, ciKlass* klass, int offset, Inte
//------------------------------TypeKlassPtr-----------------------------------
-TypeKlassPtr::TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, const InterfaceSet& interfaces, int offset)
+TypeKlassPtr::TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, const TypeInterfaces* interfaces, int offset)
: TypePtr(t, ptr, offset), _klass(klass), _interfaces(interfaces) {
assert(klass == nullptr || !klass->is_loaded() || (klass->is_instance_klass() && !klass->is_interface()) ||
klass->is_type_array_klass() || !klass->as_obj_array_klass()->base_element_klass()->is_interface(), "no interface here");
@@ -5595,16 +5613,16 @@ TypeKlassPtr::TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, const InterfaceSet&
// Is there a single ciKlass* that can represent that type?
ciKlass* TypeKlassPtr::exact_klass_helper() const {
assert(_klass->is_instance_klass() && !_klass->is_interface(), "No interface");
- if (_interfaces.empty()) {
+ if (_interfaces->empty()) {
return _klass;
}
if (_klass != ciEnv::current()->Object_klass()) {
- if (_interfaces.eq(_klass->as_instance_klass())) {
+ if (_interfaces->eq(_klass->as_instance_klass())) {
return _klass;
}
return nullptr;
}
- return _interfaces.exact_klass();
+ return _interfaces->exact_klass();
}
//------------------------------eq---------------------------------------------
@@ -5612,14 +5630,14 @@ ciKlass* TypeKlassPtr::exact_klass_helper() const {
bool TypeKlassPtr::eq(const Type *t) const {
const TypeKlassPtr *p = t->is_klassptr();
return
- _interfaces.eq(p->_interfaces) &&
+ _interfaces->eq(p->_interfaces) &&
TypePtr::eq(p);
}
//------------------------------hash-------------------------------------------
// Type-specific hashing function.
uint TypeKlassPtr::hash(void) const {
- return TypePtr::hash() + _interfaces.hash();
+ return TypePtr::hash() + _interfaces->hash();
}
//------------------------------singleton--------------------------------------
@@ -5646,15 +5664,15 @@ const Type *TypeKlassPtr::filter_helper(const Type *kills, bool include_speculat
return ft;
}
-TypePtr::InterfaceSet TypeKlassPtr::meet_interfaces(const TypeKlassPtr* other) const {
+const TypeInterfaces* TypeKlassPtr::meet_interfaces(const TypeKlassPtr* other) const {
if (above_centerline(_ptr) && above_centerline(other->_ptr)) {
- return _interfaces.union_with(other->_interfaces);
+ return _interfaces->union_with(other->_interfaces);
} else if (above_centerline(_ptr) && !above_centerline(other->_ptr)) {
return other->_interfaces;
} else if (above_centerline(other->_ptr) && !above_centerline(_ptr)) {
return _interfaces;
}
- return _interfaces.intersection_with(other->_interfaces);
+ return _interfaces->intersection_with(other->_interfaces);
}
//------------------------------get_con----------------------------------------
@@ -5694,7 +5712,7 @@ void TypeKlassPtr::dump2(Dict & d, uint depth, outputStream *st) const {
} else {
ShouldNotReachHere();
}
- _interfaces.dump(st);
+ _interfaces->dump(st);
}
case BotPTR:
if (!WizardMode && !Verbose && _ptr != Constant) break;
@@ -5735,7 +5753,7 @@ uint TypeInstKlassPtr::hash(void) const {
return klass()->hash() + TypeKlassPtr::hash();
}
-const TypeInstKlassPtr *TypeInstKlassPtr::make(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, int offset) {
+const TypeInstKlassPtr *TypeInstKlassPtr::make(PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, int offset) {
TypeInstKlassPtr *r =
(TypeInstKlassPtr*)(new TypeInstKlassPtr(ptr, k, interfaces, offset))->hashcons();
@@ -5787,7 +5805,7 @@ const TypeOopPtr* TypeInstKlassPtr::as_instance_type(bool klass_change) const {
assert((deps != nullptr) == (C->method() != nullptr && C->method()->code_size() > 0), "sanity");
// Element is an instance
bool klass_is_exact = false;
- TypePtr::InterfaceSet interfaces = _interfaces;
+ const TypeInterfaces* interfaces = _interfaces;
if (k->is_loaded()) {
// Try to set klass_is_exact.
ciInstanceKlass* ik = k->as_instance_klass();
@@ -5796,7 +5814,7 @@ const TypeOopPtr* TypeInstKlassPtr::as_instance_type(bool klass_change) const {
&& deps != nullptr && UseUniqueSubclasses) {
ciInstanceKlass* sub = ik->unique_concrete_subklass();
if (sub != nullptr) {
- if (_interfaces.eq(sub)) {
+ if (_interfaces->eq(sub)) {
deps->assert_abstract_with_unique_concrete_subtype(ik, sub);
k = ik = sub;
xk = sub->is_final();
@@ -5880,7 +5898,7 @@ const Type *TypeInstKlassPtr::xmeet( const Type *t ) const {
const TypeInstKlassPtr *tkls = t->is_instklassptr();
int off = meet_offset(tkls->offset());
PTR ptr = meet_ptr(tkls->ptr());
- InterfaceSet interfaces = meet_interfaces(tkls);
+ const TypeInterfaces* interfaces = meet_interfaces(tkls);
ciKlass* res_klass = nullptr;
bool res_xk = false;
@@ -5903,9 +5921,9 @@ const Type *TypeInstKlassPtr::xmeet( const Type *t ) const {
const TypeAryKlassPtr *tp = t->is_aryklassptr();
int offset = meet_offset(tp->offset());
PTR ptr = meet_ptr(tp->ptr());
- InterfaceSet interfaces = meet_interfaces(tp);
- InterfaceSet tp_interfaces = tp->_interfaces;
- InterfaceSet this_interfaces = _interfaces;
+ const TypeInterfaces* interfaces = meet_interfaces(tp);
+ const TypeInterfaces* tp_interfaces = tp->_interfaces;
+ const TypeInterfaces* this_interfaces = _interfaces;
switch (ptr) {
case TopPTR:
@@ -5913,12 +5931,12 @@ const Type *TypeInstKlassPtr::xmeet( const Type *t ) const {
// For instances when a subclass meets a superclass we fall
// below the centerline when the superclass is exact. We need to
// do the same here.
- if (klass()->equals(ciEnv::current()->Object_klass()) && tp_interfaces.contains(this_interfaces) && !klass_is_exact()) {
+ if (klass()->equals(ciEnv::current()->Object_klass()) && tp_interfaces->contains(this_interfaces) && !klass_is_exact()) {
return TypeAryKlassPtr::make(ptr, tp->elem(), tp->klass(), offset);
} else {
// cannot subclass, so the meet has to fall badly below the centerline
ptr = NotNull;
- interfaces = _interfaces.intersection_with(tp->_interfaces);
+ interfaces = _interfaces->intersection_with(tp->_interfaces);
return make(ptr, ciEnv::current()->Object_klass(), interfaces, offset);
}
case Constant:
@@ -5931,7 +5949,7 @@ const Type *TypeInstKlassPtr::xmeet( const Type *t ) const {
// For instances when a subclass meets a superclass we fall
// below the centerline when the superclass is exact. We need
// to do the same here.
- if (klass()->equals(ciEnv::current()->Object_klass()) && tp_interfaces.contains(this_interfaces) && !klass_is_exact()) {
+ if (klass()->equals(ciEnv::current()->Object_klass()) && tp_interfaces->contains(this_interfaces) && !klass_is_exact()) {
// that is, tp's array type is a subtype of my klass
return TypeAryKlassPtr::make(ptr,
tp->elem(), tp->klass(), offset);
@@ -5941,7 +5959,7 @@ const Type *TypeInstKlassPtr::xmeet( const Type *t ) const {
// The meet falls down to Object class below centerline.
if( ptr == Constant )
ptr = NotNull;
- interfaces = this_interfaces.intersection_with(tp_interfaces);
+ interfaces = this_interfaces->intersection_with(tp_interfaces);
return make(ptr, ciEnv::current()->Object_klass(), interfaces, offset);
default: typerr(t);
}
@@ -5970,11 +5988,11 @@ template bool TypePtr::is_java_subtype_of_helper_for_instan
return false;
}
- if (other->klass()->equals(ciEnv::current()->Object_klass()) && other->_interfaces.empty()) {
+ if (other->klass()->equals(ciEnv::current()->Object_klass()) && other->_interfaces->empty()) {
return true;
}
- return this_one->_klass->is_subtype_of(other->_klass) && this_one->_interfaces.contains(other->_interfaces);
+ return this_one->klass()->is_subtype_of(other->klass()) && this_one->_interfaces->contains(other->_interfaces);
}
bool TypeInstKlassPtr::is_java_subtype_of_helper(const TypeKlassPtr* other, bool this_exact, bool other_exact) const {
@@ -5989,7 +6007,7 @@ template bool TypePtr::is_same_java_type_as_helper_for_inst
if (!this_one->is_instance_type(other)) {
return false;
}
- return this_one->_klass->equals(other->_klass) && this_one->_interfaces.eq(other->_interfaces);
+ return this_one->klass()->equals(other->klass()) && this_one->_interfaces->eq(other->_interfaces);
}
bool TypeInstKlassPtr::is_same_java_type_as_helper(const TypeKlassPtr* other) const {
@@ -6003,7 +6021,7 @@ template bool TypePtr::maybe_java_subtype_of_helper_for_ins
}
if (this_one->is_array_type(other)) {
- return !this_exact && this_one->_klass->equals(ciEnv::current()->Object_klass()) && other->_interfaces.contains(this_one->_interfaces);
+ return !this_exact && this_one->klass()->equals(ciEnv::current()->Object_klass()) && other->_interfaces->contains(this_one->_interfaces);
}
assert(this_one->is_instance_type(other), "unsupported");
@@ -6012,12 +6030,12 @@ template bool TypePtr::maybe_java_subtype_of_helper_for_ins
return this_one->is_java_subtype_of(other);
}
- if (!this_one->_klass->is_subtype_of(other->_klass) && !other->_klass->is_subtype_of(this_one->_klass)) {
+ if (!this_one->klass()->is_subtype_of(other->klass()) && !other->klass()->is_subtype_of(this_one->klass())) {
return false;
}
if (this_exact) {
- return this_one->_klass->is_subtype_of(other->_klass) && this_one->_interfaces.contains(other->_interfaces);
+ return this_one->klass()->is_subtype_of(other->klass()) && this_one->_interfaces->contains(other->_interfaces);
}
return true;
@@ -6035,7 +6053,7 @@ const TypeKlassPtr* TypeInstKlassPtr::try_improve() const {
Compile* C = Compile::current();
Dependencies* deps = C->dependencies();
assert((deps != nullptr) == (C->method() != nullptr && C->method()->code_size() > 0), "sanity");
- TypePtr::InterfaceSet interfaces = _interfaces;
+ const TypeInterfaces* interfaces = _interfaces;
if (k->is_loaded()) {
ciInstanceKlass* ik = k->as_instance_klass();
bool klass_is_exact = ik->is_final();
@@ -6043,7 +6061,7 @@ const TypeKlassPtr* TypeInstKlassPtr::try_improve() const {
deps != nullptr) {
ciInstanceKlass* sub = ik->unique_concrete_subklass();
if (sub != nullptr) {
- if (_interfaces.eq(sub)) {
+ if (_interfaces->eq(sub)) {
deps->assert_abstract_with_unique_concrete_subtype(ik, sub);
k = ik = sub;
klass_is_exact = sub->is_final();
@@ -6097,7 +6115,7 @@ uint TypeAryKlassPtr::hash(void) const {
//----------------------compute_klass------------------------------------------
// Compute the defining klass for this class
-ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
+ciKlass* TypeAryPtr::compute_klass() const {
// Compute _klass based on element type.
ciKlass* k_ary = nullptr;
const TypeInstPtr *tinst;
@@ -6118,28 +6136,7 @@ ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
// and object; Top occurs when doing join on Bottom.
// Leave k_ary at null.
} else {
- // Cannot compute array klass directly from basic type,
- // since subtypes of TypeInt all have basic type T_INT.
-#ifdef ASSERT
- if (verify && el->isa_int()) {
- // Check simple cases when verifying klass.
- BasicType bt = T_ILLEGAL;
- if (el == TypeInt::BYTE) {
- bt = T_BYTE;
- } else if (el == TypeInt::SHORT) {
- bt = T_SHORT;
- } else if (el == TypeInt::CHAR) {
- bt = T_CHAR;
- } else if (el == TypeInt::INT) {
- bt = T_INT;
- } else {
- return _klass; // just return specified klass
- }
- return ciTypeArrayKlass::make(bt);
- }
-#endif
- assert(!el->isa_int(),
- "integral arrays must be pre-equipped with a class");
+ assert(!el->isa_int(), "integral arrays must be pre-equipped with a class");
// Compute array klass directly from basic type
k_ary = ciTypeArrayKlass::make(el->basic_type());
}
@@ -6337,9 +6334,9 @@ const Type *TypeAryKlassPtr::xmeet( const Type *t ) const {
const TypeInstKlassPtr *tp = t->is_instklassptr();
int offset = meet_offset(tp->offset());
PTR ptr = meet_ptr(tp->ptr());
- InterfaceSet interfaces = meet_interfaces(tp);
- InterfaceSet tp_interfaces = tp->_interfaces;
- InterfaceSet this_interfaces = _interfaces;
+ const TypeInterfaces* interfaces = meet_interfaces(tp);
+ const TypeInterfaces* tp_interfaces = tp->_interfaces;
+ const TypeInterfaces* this_interfaces = _interfaces;
switch (ptr) {
case TopPTR:
@@ -6347,12 +6344,12 @@ const Type *TypeAryKlassPtr::xmeet( const Type *t ) const {
// For instances when a subclass meets a superclass we fall
// below the centerline when the superclass is exact. We need to
// do the same here.
- if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces.intersection_with(tp_interfaces).eq(tp_interfaces) && !tp->klass_is_exact()) {
+ if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces->intersection_with(tp_interfaces)->eq(tp_interfaces) && !tp->klass_is_exact()) {
return TypeAryKlassPtr::make(ptr, _elem, _klass, offset);
} else {
// cannot subclass, so the meet has to fall badly below the centerline
ptr = NotNull;
- interfaces = this_interfaces.intersection_with(tp->_interfaces);
+ interfaces = this_interfaces->intersection_with(tp->_interfaces);
return TypeInstKlassPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, offset);
}
case Constant:
@@ -6365,7 +6362,7 @@ const Type *TypeAryKlassPtr::xmeet( const Type *t ) const {
// For instances when a subclass meets a superclass we fall
// below the centerline when the superclass is exact. We need
// to do the same here.
- if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces.intersection_with(tp_interfaces).eq(tp_interfaces) && !tp->klass_is_exact()) {
+ if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces->intersection_with(tp_interfaces)->eq(tp_interfaces) && !tp->klass_is_exact()) {
// that is, my array type is a subtype of 'tp' klass
return make(ptr, _elem, _klass, offset);
}
@@ -6374,7 +6371,7 @@ const Type *TypeAryKlassPtr::xmeet( const Type *t ) const {
// The meet falls down to Object class below centerline.
if (ptr == Constant)
ptr = NotNull;
- interfaces = this_interfaces.intersection_with(tp_interfaces);
+ interfaces = this_interfaces->intersection_with(tp_interfaces);
return TypeInstKlassPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, offset);
default: typerr(t);
}
@@ -6387,7 +6384,7 @@ const Type *TypeAryKlassPtr::xmeet( const Type *t ) const {
template bool TypePtr::is_java_subtype_of_helper_for_array(const T1* this_one, const T2* other, bool this_exact, bool other_exact) {
static_assert(std::is_base_of::value, "");
- if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces.empty() && other_exact) {
+ if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces->empty() && other_exact) {
return true;
}
@@ -6399,7 +6396,7 @@ template bool TypePtr::is_java_subtype_of_helper_for_array(
}
if (this_one->is_instance_type(other)) {
- return other->klass() == ciEnv::current()->Object_klass() && other->_interfaces.intersection_with(this_one->_interfaces).eq(other->_interfaces) && other_exact;
+ return other->klass() == ciEnv::current()->Object_klass() && other->_interfaces->intersection_with(this_one->_interfaces)->eq(other->_interfaces) && other_exact;
}
assert(this_one->is_array_type(other), "");
@@ -6415,7 +6412,7 @@ template bool TypePtr::is_java_subtype_of_helper_for_array(
return this_one->is_reference_type(this_elem)->is_java_subtype_of_helper(this_one->is_reference_type(other_elem), this_exact, other_exact);
}
if (this_elem == nullptr && other_elem == nullptr) {
- return this_one->_klass->is_subtype_of(other->_klass);
+ return this_one->klass()->is_subtype_of(other->klass());
}
return false;
}
@@ -6447,8 +6444,7 @@ template bool TypePtr::is_same_java_type_as_helper_for_arra
return this_one->is_reference_type(this_elem)->is_same_java_type_as(this_one->is_reference_type(other_elem));
}
if (other_elem == nullptr && this_elem == nullptr) {
- assert(this_one->_klass != nullptr && other->_klass != nullptr, "");
- return this_one->_klass->equals(other->_klass);
+ return this_one->klass()->equals(other->klass());
}
return false;
}
@@ -6459,7 +6455,7 @@ bool TypeAryKlassPtr::is_same_java_type_as_helper(const TypeKlassPtr* other) con
template bool TypePtr::maybe_java_subtype_of_helper_for_array(const T1* this_one, const T2* other, bool this_exact, bool other_exact) {
static_assert(std::is_base_of::value, "");
- if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces.empty() && other_exact) {
+ if (other->klass() == ciEnv::current()->Object_klass() && other->_interfaces->empty() && other_exact) {
return true;
}
int dummy;
@@ -6468,7 +6464,7 @@ template bool TypePtr::maybe_java_subtype_of_helper_for_arr
return true;
}
if (this_one->is_instance_type(other)) {
- return other->_klass->equals(ciEnv::current()->Object_klass()) && other->_interfaces.intersection_with(this_one->_interfaces).eq(other->_interfaces);
+ return other->klass()->equals(ciEnv::current()->Object_klass()) && other->_interfaces->intersection_with(this_one->_interfaces)->eq(other->_interfaces);
}
assert(this_one->is_array_type(other), "");
@@ -6487,7 +6483,7 @@ template bool TypePtr::maybe_java_subtype_of_helper_for_arr
return this_one->is_reference_type(this_elem)->maybe_java_subtype_of_helper(this_one->is_reference_type(other_elem), this_exact, other_exact);
}
if (other_elem == nullptr && this_elem == nullptr) {
- return this_one->_klass->is_subtype_of(other->_klass);
+ return this_one->klass()->is_subtype_of(other->klass());
}
return false;
}
@@ -6543,7 +6539,7 @@ void TypeAryKlassPtr::dump2( Dict & d, uint depth, outputStream *st ) const {
{
st->print("[");
_elem->dump2(d, depth, st);
- _interfaces.dump(st);
+ _interfaces->dump(st);
st->print(": ");
}
case BotPTR:
diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp
index 6cb74c6752193..62f9e27c8f88f 100644
--- a/src/hotspot/share/opto/type.hpp
+++ b/src/hotspot/share/opto/type.hpp
@@ -94,6 +94,8 @@ class Type {
Tuple, // Method signature or object layout
Array, // Array types
+ Interfaces, // Set of implemented interfaces for oop types
+
VectorMask, // Vector predicate/mask type
VectorA, // (Scalable) Vector types for vector length agnostic
VectorS, // 32bit Vector types
@@ -872,6 +874,48 @@ class TypeVectMask : public TypeVect {
static const TypeVectMask* make(const Type* elem, uint length);
};
+// Set of implemented interfaces. Referenced from TypeOopPtr and TypeKlassPtr.
+class TypeInterfaces : public Type {
+private:
+ GrowableArray _list;
+ uint _hash;
+ ciInstanceKlass* _exact_klass;
+ DEBUG_ONLY(bool _initialized;)
+
+ void initialize();
+
+ void add(ciInstanceKlass* interface);
+ void verify() const NOT_DEBUG_RETURN;
+ void compute_hash();
+ void compute_exact_klass();
+ TypeInterfaces();
+ TypeInterfaces(GrowableArray* interfaces);
+
+ NONCOPYABLE(TypeInterfaces);
+public:
+ static const TypeInterfaces* make(GrowableArray* interfaces = nullptr);
+ bool eq(const Type* other) const;
+ bool eq(ciInstanceKlass* k) const;
+ uint hash() const;
+ const Type *xdual() const;
+ void dump(outputStream* st) const;
+ const TypeInterfaces* union_with(const TypeInterfaces* other) const;
+ const TypeInterfaces* intersection_with(const TypeInterfaces* other) const;
+ bool contains(const TypeInterfaces* other) const {
+ return intersection_with(other)->eq(other);
+ }
+ bool empty() const { return _list.length() == 0; }
+
+ ciInstanceKlass* exact_klass() const;
+ void verify_is_loaded() const NOT_DEBUG_RETURN;
+
+ static int compare(ciInstanceKlass* const& k1, ciInstanceKlass* const& k2);
+
+ const Type* xmeet(const Type* t) const;
+
+ bool singleton(void) const;
+};
+
//------------------------------TypePtr----------------------------------------
// Class of machine Pointer Types: raw data, instances or arrays.
// If the _base enum is AnyPtr, then this refers to all of the above.
@@ -881,47 +925,7 @@ class TypePtr : public Type {
friend class TypeNarrowPtr;
friend class Type;
protected:
- class InterfaceSet {
- private:
- GrowableArray _list;
- uint _hash;
- ciKlass* _exact_klass;
- DEBUG_ONLY(bool _initialized;)
-
- void initialize();
- void raw_add(ciKlass* interface);
- void add(ciKlass* interface);
- void verify() const NOT_DEBUG_RETURN;
- void compute_hash();
- void compute_exact_klass();
- public:
- InterfaceSet();
- InterfaceSet(GrowableArray* interfaces);
- bool eq(const InterfaceSet& other) const;
- bool eq(ciInstanceKlass* k) const;
- uint hash() const;
- void dump(outputStream* st) const;
- InterfaceSet union_with(const InterfaceSet& other) const;
- InterfaceSet intersection_with(const InterfaceSet& other) const;
- bool contains(const InterfaceSet& other) const {
- return intersection_with(other).eq(other);
- }
- bool empty() const { return _list.length() == 0; }
-
- inline void* operator new(size_t x) throw() {
- Compile* compile = Compile::current();
- return compile->type_arena()->AmallocWords(x);
- }
- inline void operator delete(void* ptr) {
- ShouldNotReachHere();
- }
- ciKlass* exact_klass() const;
- void verify_is_loaded() const NOT_DEBUG_RETURN;
-
- static int compare(ciKlass* const& k1, ciKlass* const& k2);
- };
-
- static InterfaceSet interfaces(ciKlass*& k, bool klass, bool interface, bool array, InterfaceHandling interface_handling);
+ static const TypeInterfaces* interfaces(ciKlass*& k, bool klass, bool interface, bool array, InterfaceHandling interface_handling);
public:
enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR };
@@ -981,7 +985,7 @@ class TypePtr : public Type {
NOT_SUBTYPE,
LCA
};
- template static TypePtr::MeetResult meet_instptr(PTR& ptr, InterfaceSet& interfaces, const T* this_type,
+ template static TypePtr::MeetResult meet_instptr(PTR& ptr, const TypeInterfaces*& interfaces, const T* this_type,
const T* other_type, ciKlass*& res_klass, bool& res_xk);
template static MeetResult meet_aryptr(PTR& ptr, const Type*& elem, const T* this_ary, const T* other_ary,
@@ -1103,8 +1107,8 @@ class TypeOopPtr : public TypePtr {
friend class TypeInstPtr;
friend class TypeAryPtr;
protected:
- TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bool xk, ciObject* o, int offset, int instance_id,
- const TypePtr* speculative, int inline_depth);
+ TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, bool xk, ciObject* o, int offset, int instance_id,
+ const TypePtr* speculative, int inline_depth);
public:
virtual bool eq( const Type *t ) const;
virtual uint hash() const; // Type specific hashing
@@ -1120,7 +1124,7 @@ class TypeOopPtr : public TypePtr {
// If _klass is null, then so is _sig. This is an unloaded klass.
ciKlass* _klass; // Klass object
- const InterfaceSet _interfaces;
+ const TypeInterfaces* _interfaces;
// Does the type exclude subclasses of the klass? (Inexact == polymorphic.)
bool _klass_is_exact;
@@ -1138,7 +1142,7 @@ class TypeOopPtr : public TypePtr {
int dual_instance_id() const;
int meet_instance_id(int uid) const;
- InterfaceSet meet_interfaces(const TypeOopPtr* other) const;
+ const TypeInterfaces* meet_interfaces(const TypeOopPtr* other) const;
// Do not allow interface-vs.-noninterface joins to collapse to top.
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
@@ -1252,7 +1256,7 @@ class TypeOopPtr : public TypePtr {
ShouldNotReachHere(); return false;
}
- virtual const InterfaceSet interfaces() const {
+ virtual const TypeInterfaces* interfaces() const {
return _interfaces;
};
@@ -1273,7 +1277,7 @@ class TypeOopPtr : public TypePtr {
// Class of Java object pointers, pointing either to non-array Java instances
// or to a Klass* (including array klasses).
class TypeInstPtr : public TypeOopPtr {
- TypeInstPtr(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bool xk, ciObject* o, int offset, int instance_id,
+ TypeInstPtr(PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, bool xk, ciObject* o, int off, int instance_id,
const TypePtr* speculative, int inline_depth);
virtual bool eq( const Type *t ) const;
virtual uint hash() const; // Type specific hashing
@@ -1295,41 +1299,41 @@ class TypeInstPtr : public TypeOopPtr {
// Make a pointer to a constant oop.
static const TypeInstPtr *make(ciObject* o) {
ciKlass* k = o->klass();
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
return make(TypePtr::Constant, k, interfaces, true, o, 0, InstanceBot);
}
// Make a pointer to a constant oop with offset.
static const TypeInstPtr *make(ciObject* o, int offset) {
ciKlass* k = o->klass();
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
return make(TypePtr::Constant, k, interfaces, true, o, offset, InstanceBot);
}
// Make a pointer to some value of type klass.
static const TypeInstPtr *make(PTR ptr, ciKlass* klass, InterfaceHandling interface_handling = ignore_interfaces) {
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling);
return make(ptr, klass, interfaces, false, nullptr, 0, InstanceBot);
}
// Make a pointer to some non-polymorphic value of exactly type klass.
static const TypeInstPtr *make_exact(PTR ptr, ciKlass* klass) {
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, false, false, ignore_interfaces);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(klass, true, false, false, ignore_interfaces);
return make(ptr, klass, interfaces, true, nullptr, 0, InstanceBot);
}
// Make a pointer to some value of type klass with offset.
static const TypeInstPtr *make(PTR ptr, ciKlass* klass, int offset) {
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, false, false, ignore_interfaces);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(klass, true, false, false, ignore_interfaces);
return make(ptr, klass, interfaces, false, nullptr, offset, InstanceBot);
}
- static const TypeInstPtr *make(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bool xk, ciObject* o, int offset,
+ static const TypeInstPtr *make(PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, bool xk, ciObject* o, int offset,
int instance_id = InstanceBot,
const TypePtr* speculative = nullptr,
int inline_depth = InlineDepthBottom);
static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot) {
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
return make(ptr, k, interfaces, xk, o, offset, instance_id);
}
@@ -1357,7 +1361,7 @@ class TypeInstPtr : public TypeOopPtr {
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
- virtual const TypeInstPtr *xmeet_unloaded(const TypeInstPtr *t, const InterfaceSet& interfaces) const;
+ virtual const TypeInstPtr *xmeet_unloaded(const TypeInstPtr *tinst, const TypeInterfaces* interfaces) const;
virtual const Type *xdual() const; // Compute dual right now.
const TypeKlassPtr* as_klass_type(bool try_for_exact = false) const;
@@ -1376,7 +1380,7 @@ class TypeInstPtr : public TypeOopPtr {
virtual bool is_meet_subtype_of_helper(const TypeOopPtr* other, bool this_xk, bool other_xk) const;
virtual bool is_meet_same_type_as(const TypePtr* other) const {
- return _klass->equals(other->is_instptr()->_klass) && _interfaces.eq(other->is_instptr()->_interfaces);
+ return _klass->equals(other->is_instptr()->_klass) && _interfaces->eq(other->is_instptr()->_interfaces);
}
};
@@ -1390,7 +1394,7 @@ class TypeAryPtr : public TypeOopPtr {
TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
int offset, int instance_id, bool is_autobox_cache,
const TypePtr* speculative, int inline_depth)
- : TypeOopPtr(AryPtr,ptr,k,*_array_interfaces,xk,o,offset, instance_id, speculative, inline_depth),
+ : TypeOopPtr(AryPtr,ptr,k,_array_interfaces,xk,o,offset, instance_id, speculative, inline_depth),
_ary(ary),
_is_autobox_cache(is_autobox_cache)
{
@@ -1409,11 +1413,11 @@ class TypeAryPtr : public TypeOopPtr {
const TypeAry *_ary; // Array we point into
const bool _is_autobox_cache;
- ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const;
+ ciKlass* compute_klass() const;
// A pointer to delay allocation to Type::Initialize_shared()
- static const InterfaceSet* _array_interfaces;
+ static const TypeInterfaces* _array_interfaces;
ciKlass* exact_klass_helper() const;
// Only guaranteed non null for array of basic types
ciKlass* klass() const;
@@ -1551,7 +1555,7 @@ class TypeKlassPtr : public TypePtr {
friend class TypeAryKlassPtr;
friend class TypePtr;
protected:
- TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, const InterfaceSet& interfaces, int offset);
+ TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, const TypeInterfaces* interfaces, int offset);
virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
@@ -1563,8 +1567,8 @@ class TypeKlassPtr : public TypePtr {
protected:
ciKlass* _klass;
- const InterfaceSet _interfaces;
- InterfaceSet meet_interfaces(const TypeKlassPtr* other) const;
+ const TypeInterfaces* _interfaces;
+ const TypeInterfaces* meet_interfaces(const TypeKlassPtr* other) const;
virtual bool must_be_exact() const { ShouldNotReachHere(); return false; }
virtual ciKlass* exact_klass_helper() const;
virtual ciKlass* klass() const { return _klass; }
@@ -1623,7 +1627,7 @@ class TypeKlassPtr : public TypePtr {
ShouldNotReachHere(); return false;
}
- virtual const InterfaceSet interfaces() const {
+ virtual const TypeInterfaces* interfaces() const {
return _interfaces;
};
@@ -1643,7 +1647,7 @@ class TypeKlassPtr : public TypePtr {
// Instance klass pointer, mirrors TypeInstPtr
class TypeInstKlassPtr : public TypeKlassPtr {
- TypeInstKlassPtr(PTR ptr, ciKlass* klass, const InterfaceSet& interfaces, int offset)
+ TypeInstKlassPtr(PTR ptr, ciKlass* klass, const TypeInterfaces* interfaces, int offset)
: TypeKlassPtr(InstKlassPtr, ptr, klass, interfaces, offset) {
assert(klass->is_instance_klass() && (!klass->is_loaded() || !klass->is_interface()), "");
}
@@ -1662,13 +1666,13 @@ class TypeInstKlassPtr : public TypeKlassPtr {
bool maybe_java_subtype_of_helper(const TypeKlassPtr* other, bool this_exact, bool other_exact) const;
static const TypeInstKlassPtr *make(ciKlass* k, InterfaceHandling interface_handling) {
- InterfaceSet interfaces = TypePtr::interfaces(k, true, true, false, interface_handling);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(k, true, true, false, interface_handling);
return make(TypePtr::Constant, k, interfaces, 0);
}
- static const TypeInstKlassPtr* make(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, int offset);
+ static const TypeInstKlassPtr* make(PTR ptr, ciKlass* k, const TypeInterfaces* interfaces, int offset);
static const TypeInstKlassPtr* make(PTR ptr, ciKlass* k, int offset) {
- const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
+ const TypeInterfaces* interfaces = TypePtr::interfaces(k, true, false, false, ignore_interfaces);
return make(ptr, k, interfaces, offset);
}
@@ -1703,9 +1707,9 @@ class TypeAryKlassPtr : public TypeKlassPtr {
const Type *_elem;
- static const InterfaceSet* _array_interfaces;
+ static const TypeInterfaces* _array_interfaces;
TypeAryKlassPtr(PTR ptr, const Type *elem, ciKlass* klass, int offset)
- : TypeKlassPtr(AryKlassPtr, ptr, klass, *_array_interfaces, offset), _elem(elem) {
+ : TypeKlassPtr(AryKlassPtr, ptr, klass, _array_interfaces, offset), _elem(elem) {
assert(klass == nullptr || klass->is_type_array_klass() || !klass->as_obj_array_klass()->base_element_klass()->is_interface(), "");
}
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
index 9b8c97249b1e8..0d477d67eab88 100644
--- a/src/hotspot/share/prims/jni.cpp
+++ b/src/hotspot/share/prims/jni.cpp
@@ -930,6 +930,11 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
}
}
+ if (selected_method->is_abstract()) {
+ ResourceMark rm(THREAD);
+ THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), selected_method->name()->as_C_string());
+ }
+
methodHandle method(THREAD, selected_method);
// Create object to hold arguments for the JavaCall, and associate it with
@@ -2876,7 +2881,7 @@ JNI_ENTRY(jweak, jni_NewWeakGlobalRef(JNIEnv *env, jobject ref))
HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(env, ref);
Handle ref_handle(thread, JNIHandles::resolve(ref));
jweak ret = JNIHandles::make_weak_global(ref_handle, AllocFailStrategy::RETURN_NULL);
- if (ret == nullptr) {
+ if (ret == nullptr && ref_handle.not_null()) {
THROW_OOP_(Universe::out_of_memory_error_c_heap(), nullptr);
}
HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(ret);
diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp
index 9f5327eb31376..7852f37d4be25 100644
--- a/src/hotspot/share/prims/jvm.cpp
+++ b/src/hotspot/share/prims/jvm.cpp
@@ -1385,9 +1385,8 @@ JVM_ENTRY(jobject, JVM_FindScopedValueBindings(JNIEnv *env, jclass cls))
InstanceKlass* holder = method->method_holder();
if (name == vmSymbols::runWith_method_name()) {
- if ((holder == resolver.Carrier_klass
- || holder == vmClasses::VirtualThread_klass()
- || holder == vmClasses::Thread_klass())) {
+ if (holder == vmClasses::Thread_klass()
+ || holder == resolver.Carrier_klass) {
loc = 1;
}
}
diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp
index e82e4de0f6678..15472787f645d 100644
--- a/src/hotspot/share/prims/jvmtiEnv.cpp
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp
@@ -1009,7 +1009,7 @@ JvmtiEnv::SuspendThreadList(jint request_count, const jthread* request_list, jvm
jvmtiError
JvmtiEnv::SuspendAllVirtualThreads(jint except_count, const jthread* except_list) {
- if (!JvmtiExport::can_support_virtual_threads()) {
+ if (get_capabilities()->can_support_virtual_threads == 0) {
return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
}
JavaThread* current = JavaThread::current();
@@ -1127,7 +1127,7 @@ JvmtiEnv::ResumeThreadList(jint request_count, const jthread* request_list, jvmt
jvmtiError
JvmtiEnv::ResumeAllVirtualThreads(jint except_count, const jthread* except_list) {
- if (!JvmtiExport::can_support_virtual_threads()) {
+ if (get_capabilities()->can_support_virtual_threads == 0) {
return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
}
jvmtiError err = JvmtiEnvBase::check_thread_list(except_count, except_list);
diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp
index 9e12e9d51e006..105902b964263 100644
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp
@@ -213,7 +213,8 @@ JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() {
_is_retransformable = true;
// all callbacks initially null
- memset(&_event_callbacks,0,sizeof(jvmtiEventCallbacks));
+ memset(&_event_callbacks, 0, sizeof(jvmtiEventCallbacks));
+ memset(&_ext_event_callbacks, 0, sizeof(jvmtiExtEventCallbacks));
// all capabilities initially off
memset(&_current_capabilities, 0, sizeof(_current_capabilities));
diff --git a/src/hotspot/share/prims/jvmtiEventController.cpp b/src/hotspot/share/prims/jvmtiEventController.cpp
index 033365071229d..0a01c3263e136 100644
--- a/src/hotspot/share/prims/jvmtiEventController.cpp
+++ b/src/hotspot/share/prims/jvmtiEventController.cpp
@@ -309,6 +309,8 @@ class JvmtiEventControllerPrivate : public AllStatic {
static void clear_to_frame_pop(JvmtiEnvThreadState *env_thread, JvmtiFramePop fpop);
static void change_field_watch(jvmtiEvent event_type, bool added);
+ static bool is_any_thread_filtered_event_enabled_globally();
+ static void recompute_thread_filtered(JvmtiThreadState *state);
static void thread_started(JavaThread *thread);
static void thread_ended(JavaThread *thread);
@@ -729,6 +731,20 @@ JvmtiEventControllerPrivate::recompute_enabled() {
EC_TRACE(("[-] # recompute enabled - after " JULONG_FORMAT_X, any_env_thread_enabled));
}
+bool
+JvmtiEventControllerPrivate::is_any_thread_filtered_event_enabled_globally() {
+ julong global_thread_events = JvmtiEventController::_universal_global_event_enabled.get_bits() & THREAD_FILTERED_EVENT_BITS;
+ return global_thread_events != 0L;
+}
+
+void
+JvmtiEventControllerPrivate::recompute_thread_filtered(JvmtiThreadState *state) {
+ assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
+
+ if (is_any_thread_filtered_event_enabled_globally()) {
+ JvmtiEventControllerPrivate::recompute_thread_enabled(state);
+ }
+}
void
JvmtiEventControllerPrivate::thread_started(JavaThread *thread) {
@@ -738,17 +754,11 @@ JvmtiEventControllerPrivate::thread_started(JavaThread *thread) {
EC_TRACE(("[%s] # thread started", JvmtiTrace::safe_get_thread_name(thread)));
// if we have any thread filtered events globally enabled, create/update the thread state
- if ((JvmtiEventController::_universal_global_event_enabled.get_bits() & THREAD_FILTERED_EVENT_BITS) != 0) {
- MutexLocker mu(JvmtiThreadState_lock);
- // create the thread state if missing
- JvmtiThreadState *state = JvmtiThreadState::state_for_while_locked(thread);
- if (state != nullptr) { // skip threads with no JVMTI thread state
- recompute_thread_enabled(state);
- }
+ if (is_any_thread_filtered_event_enabled_globally()) { // intentionally racy
+ JvmtiThreadState::state_for(thread);
}
}
-
void
JvmtiEventControllerPrivate::thread_ended(JavaThread *thread) {
// Removes the JvmtiThreadState associated with the specified thread.
@@ -1114,6 +1124,11 @@ JvmtiEventController::change_field_watch(jvmtiEvent event_type, bool added) {
JvmtiEventControllerPrivate::change_field_watch(event_type, added);
}
+void
+JvmtiEventController::recompute_thread_filtered(JvmtiThreadState *state) {
+ JvmtiEventControllerPrivate::recompute_thread_filtered(state);
+}
+
void
JvmtiEventController::thread_started(JavaThread *thread) {
// operates only on the current thread
diff --git a/src/hotspot/share/prims/jvmtiEventController.hpp b/src/hotspot/share/prims/jvmtiEventController.hpp
index 9b236b29204fb..84070a3098c2c 100644
--- a/src/hotspot/share/prims/jvmtiEventController.hpp
+++ b/src/hotspot/share/prims/jvmtiEventController.hpp
@@ -234,6 +234,7 @@ class JvmtiEventController : AllStatic {
static void change_field_watch(jvmtiEvent event_type, bool added);
+ static void recompute_thread_filtered(JvmtiThreadState *state);
static void thread_started(JavaThread *thread);
static void thread_ended(JavaThread *thread);
diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp
index 5585eb124054d..0466bf8096035 100644
--- a/src/hotspot/share/prims/jvmtiExport.cpp
+++ b/src/hotspot/share/prims/jvmtiExport.cpp
@@ -417,6 +417,15 @@ JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
}
}
+JvmtiThreadState*
+JvmtiExport::get_jvmti_thread_state(JavaThread *thread) {
+ assert(thread == JavaThread::current(), "must be current thread");
+ if (thread->is_vthread_mounted() && thread->jvmti_thread_state() == nullptr) {
+ JvmtiEventController::thread_started(thread);
+ }
+ return thread->jvmti_thread_state();
+}
+
void
JvmtiExport::add_default_read_edges(Handle h_module, TRAPS) {
if (!Universe::is_module_initialized()) {
@@ -920,7 +929,7 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
_has_been_modified = false;
assert(!_thread->is_in_any_VTMS_transition(), "CFLH events are not allowed in any VTMS transition");
- _state = _thread->jvmti_thread_state();
+ _state = JvmtiExport::get_jvmti_thread_state(_thread);
if (_state != nullptr) {
_class_being_redefined = _state->get_class_being_redefined();
_load_kind = _state->get_class_load_kind();
@@ -1209,7 +1218,7 @@ void JvmtiExport::post_raw_breakpoint(JavaThread *thread, Method* method, addres
HandleMark hm(thread);
methodHandle mh(thread, method);
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1307,7 +1316,7 @@ void JvmtiExport::at_single_stepping_point(JavaThread *thread, Method* method, a
methodHandle mh(thread, method);
// update information about current location and post a step event
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1326,7 +1335,7 @@ void JvmtiExport::at_single_stepping_point(JavaThread *thread, Method* method, a
void JvmtiExport::expose_single_stepping(JavaThread *thread) {
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state != nullptr) {
state->clear_hide_single_stepping();
}
@@ -1334,7 +1343,7 @@ void JvmtiExport::expose_single_stepping(JavaThread *thread) {
bool JvmtiExport::hide_single_stepping(JavaThread *thread) {
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state != nullptr && state->is_enabled(JVMTI_EVENT_SINGLE_STEP)) {
state->set_hide_single_stepping();
return true;
@@ -1349,7 +1358,7 @@ void JvmtiExport::post_class_load(JavaThread *thread, Klass* klass) {
}
HandleMark hm(thread);
- JvmtiThreadState* state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1387,7 +1396,7 @@ void JvmtiExport::post_class_prepare(JavaThread *thread, Klass* klass) {
}
HandleMark hm(thread);
- JvmtiThreadState* state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1516,7 +1525,7 @@ void JvmtiExport::post_thread_end(JavaThread *thread) {
EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_END, ("[%s] Trg Thread End event triggered",
JvmtiTrace::safe_get_thread_name(thread)));
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1564,7 +1573,7 @@ void JvmtiExport::post_vthread_start(jobject vthread) {
EVT_TRIG_TRACE(JVMTI_EVENT_VIRTUAL_THREAD_START, ("[%p] Trg Virtual Thread Start event triggered", vthread));
JavaThread *cur_thread = JavaThread::current();
- JvmtiThreadState *state = cur_thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(cur_thread);
if (state == nullptr) {
return;
}
@@ -1598,7 +1607,7 @@ void JvmtiExport::post_vthread_end(jobject vthread) {
EVT_TRIG_TRACE(JVMTI_EVENT_VIRTUAL_THREAD_END, ("[%p] Trg Virtual Thread End event triggered", vthread));
JavaThread *cur_thread = JavaThread::current();
- JvmtiThreadState *state = cur_thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(cur_thread);
if (state == nullptr) {
return;
}
@@ -1633,7 +1642,7 @@ void JvmtiExport::post_vthread_mount(jobject vthread) {
HandleMark hm(thread);
EVT_TRIG_TRACE(EXT_EVENT_VIRTUAL_THREAD_MOUNT, ("[%p] Trg Virtual Thread Mount event triggered", vthread));
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1668,7 +1677,7 @@ void JvmtiExport::post_vthread_unmount(jobject vthread) {
HandleMark hm(thread);
EVT_TRIG_TRACE(EXT_EVENT_VIRTUAL_THREAD_UNMOUNT, ("[%p] Trg Virtual Thread Unmount event triggered", vthread));
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1701,7 +1710,7 @@ void JvmtiExport::continuation_yield_cleanup(JavaThread* thread, jint continuati
}
assert(thread == JavaThread::current(), "must be");
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1795,7 +1804,7 @@ void JvmtiExport::post_method_entry(JavaThread *thread, Method* method, frame cu
HandleMark hm(thread);
methodHandle mh(thread, method);
- JvmtiThreadState* state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr || !state->is_interp_only_mode()) {
// for any thread that actually wants method entry, interp_only_mode is set
return;
@@ -1835,7 +1844,7 @@ void JvmtiExport::post_method_exit(JavaThread* thread, Method* method, frame cur
HandleMark hm(thread);
methodHandle mh(thread, method);
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr || !state->is_interp_only_mode()) {
// for any thread that actually wants method exit, interp_only_mode is set
@@ -1956,7 +1965,7 @@ void JvmtiExport::post_single_step(JavaThread *thread, Method* method, address l
HandleMark hm(thread);
methodHandle mh(thread, method);
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -1998,7 +2007,7 @@ void JvmtiExport::post_exception_throw(JavaThread *thread, Method* method, addre
// ensure the stack is sufficiently processed.
KeepStackGCProcessedMark ksgcpm(thread);
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2086,7 +2095,7 @@ void JvmtiExport::notice_unwind_due_to_exception(JavaThread *thread, Method* met
methodHandle mh(thread, method);
Handle exception_handle(thread, exception);
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2202,7 +2211,7 @@ void JvmtiExport::post_field_access(JavaThread *thread, Method* method,
HandleMark hm(thread);
methodHandle mh(thread, method);
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2358,7 +2367,7 @@ void JvmtiExport::post_field_modification(JavaThread *thread, Method* method,
HandleMark hm(thread);
methodHandle mh(thread, method);
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2600,7 +2609,7 @@ void JvmtiExport::post_dynamic_code_generated_while_holding_locks(const char* na
// jvmti thread state.
// The collector and/or state might be null if JvmtiDynamicCodeEventCollector
// has been initialized while JVMTI_EVENT_DYNAMIC_CODE_GENERATED was disabled.
- JvmtiThreadState* state = thread->jvmti_thread_state();
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state != nullptr) {
JvmtiDynamicCodeEventCollector *collector = state->get_dynamic_code_event_collector();
if (collector != nullptr) {
@@ -2719,7 +2728,10 @@ void JvmtiExport::post_data_dump() {
void JvmtiExport::post_monitor_contended_enter(JavaThread *thread, ObjectMonitor *obj_mntr) {
oop object = obj_mntr->object();
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ HandleMark hm(thread);
+ Handle h(thread, object);
+
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2727,9 +2739,6 @@ void JvmtiExport::post_monitor_contended_enter(JavaThread *thread, ObjectMonitor
return; // no events should be posted if thread is in any VTMS transition
}
- HandleMark hm(thread);
- Handle h(thread, object);
-
EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTER,
("[%s] monitor contended enter event triggered",
JvmtiTrace::safe_get_thread_name(thread)));
@@ -2752,7 +2761,10 @@ void JvmtiExport::post_monitor_contended_enter(JavaThread *thread, ObjectMonitor
void JvmtiExport::post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr) {
oop object = obj_mntr->object();
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ HandleMark hm(thread);
+ Handle h(thread, object);
+
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2760,9 +2772,6 @@ void JvmtiExport::post_monitor_contended_entered(JavaThread *thread, ObjectMonit
return; // no events should be posted if thread is in any VTMS transition
}
- HandleMark hm(thread);
- Handle h(thread, object);
-
EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED,
("[%s] monitor contended entered event triggered",
JvmtiTrace::safe_get_thread_name(thread)));
@@ -2786,7 +2795,10 @@ void JvmtiExport::post_monitor_contended_entered(JavaThread *thread, ObjectMonit
void JvmtiExport::post_monitor_wait(JavaThread *thread, oop object,
jlong timeout) {
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ HandleMark hm(thread);
+ Handle h(thread, object);
+
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2794,9 +2806,6 @@ void JvmtiExport::post_monitor_wait(JavaThread *thread, oop object,
return; // no events should be posted if thread is in any VTMS transition
}
- HandleMark hm(thread);
- Handle h(thread, object);
-
EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAIT,
("[%s] monitor wait event triggered",
JvmtiTrace::safe_get_thread_name(thread)));
@@ -2820,7 +2829,10 @@ void JvmtiExport::post_monitor_wait(JavaThread *thread, oop object,
void JvmtiExport::post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out) {
oop object = obj_mntr->object();
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ HandleMark hm(thread);
+ Handle h(thread, object);
+
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2828,9 +2840,6 @@ void JvmtiExport::post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mnt
return; // no events should be posted if thread is in any VTMS transition
}
- HandleMark hm(thread);
- Handle h(thread, object);
-
EVT_TRIG_TRACE(JVMTI_EVENT_MONITOR_WAITED,
("[%s] monitor waited event triggered",
JvmtiTrace::safe_get_thread_name(thread)));
@@ -2883,7 +2892,10 @@ void JvmtiExport::post_vm_object_alloc(JavaThread *thread, oop object) {
}
void JvmtiExport::post_sampled_object_alloc(JavaThread *thread, oop object) {
- JvmtiThreadState *state = thread->jvmti_thread_state();
+ HandleMark hm(thread);
+ Handle h(thread, object);
+
+ JvmtiThreadState *state = get_jvmti_thread_state(thread);
if (state == nullptr) {
return;
}
@@ -2893,8 +2905,6 @@ void JvmtiExport::post_sampled_object_alloc(JavaThread *thread, oop object) {
if (thread->is_in_any_VTMS_transition()) {
return; // no events should be posted if thread is in any VTMS transition
}
- HandleMark hm(thread);
- Handle h(thread, object);
EVT_TRIG_TRACE(JVMTI_EVENT_SAMPLED_OBJECT_ALLOC,
("[%s] Trg sampled object alloc triggered",
diff --git a/src/hotspot/share/prims/jvmtiExport.hpp b/src/hotspot/share/prims/jvmtiExport.hpp
index 4abd8f6b1a8ec..805c8c090dacc 100644
--- a/src/hotspot/share/prims/jvmtiExport.hpp
+++ b/src/hotspot/share/prims/jvmtiExport.hpp
@@ -298,6 +298,11 @@ class JvmtiExport : public AllStatic {
static void decode_version_values(jint version, int * major, int * minor,
int * micro) NOT_JVMTI_RETURN;
+ // If the jvmti_thread_state is absent and any thread filtered event
+ // is enabled globally then it is created.
+ // Otherwise, the thread->jvmti_thread_state() is returned.
+ static JvmtiThreadState* get_jvmti_thread_state(JavaThread *thread);
+
// single stepping management methods
static void at_single_stepping_point(JavaThread *thread, Method* method, address location) NOT_JVMTI_RETURN;
static void expose_single_stepping(JavaThread *thread) NOT_JVMTI_RETURN;
diff --git a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp
index a3b256af84a8f..5bb354b704c04 100644
--- a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp
+++ b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "logging/log.hpp"
+#include "runtime/mutexLocker.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiManageCapabilities.hpp"
@@ -55,7 +56,12 @@ jvmtiCapabilities JvmtiManageCapabilities::onload_solo_remaining_capabilities;
// all capabilities ever acquired
jvmtiCapabilities JvmtiManageCapabilities::acquired_capabilities;
+int JvmtiManageCapabilities::_can_support_virtual_threads_count = 0;
+
+Mutex* JvmtiManageCapabilities::_capabilities_lock = nullptr;
+
void JvmtiManageCapabilities::initialize() {
+ _capabilities_lock = new Mutex(Mutex::nosafepoint, "Capabilities_lock");
always_capabilities = init_always_capabilities();
onload_capabilities = init_onload_capabilities();
always_solo_capabilities = init_always_solo_capabilities();
@@ -211,8 +217,14 @@ void JvmtiManageCapabilities::copy_capabilities(const jvmtiCapabilities *from, j
}
}
+Mutex* JvmtiManageCapabilities::lock() {
+ if (Thread::current_or_null() == nullptr) {
+ return nullptr; // Detached thread, can be a call from Agent_OnLoad.
+ }
+ return _capabilities_lock;
+}
-void JvmtiManageCapabilities::get_potential_capabilities(const jvmtiCapabilities *current,
+void JvmtiManageCapabilities::get_potential_capabilities_nolock(const jvmtiCapabilities *current,
const jvmtiCapabilities *prohibited,
jvmtiCapabilities *result) {
// exclude prohibited capabilities, must be before adding current
@@ -231,13 +243,22 @@ void JvmtiManageCapabilities::get_potential_capabilities(const jvmtiCapabilities
}
}
+void JvmtiManageCapabilities::get_potential_capabilities(const jvmtiCapabilities* current,
+ const jvmtiCapabilities* prohibited,
+ jvmtiCapabilities* result) {
+ MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
+ get_potential_capabilities_nolock(current, prohibited, result);
+}
+
jvmtiError JvmtiManageCapabilities::add_capabilities(const jvmtiCapabilities *current,
const jvmtiCapabilities *prohibited,
const jvmtiCapabilities *desired,
jvmtiCapabilities *result) {
+ MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
+
// check that the capabilities being added are potential capabilities
jvmtiCapabilities temp;
- get_potential_capabilities(current, prohibited, &temp);
+ get_potential_capabilities_nolock(current, prohibited, &temp);
if (has_some(exclude(desired, &temp, &temp))) {
return JVMTI_ERROR_NOT_AVAILABLE;
}
@@ -259,6 +280,10 @@ jvmtiError JvmtiManageCapabilities::add_capabilities(const jvmtiCapabilities *cu
exclude(&always_solo_remaining_capabilities, desired, &always_solo_remaining_capabilities);
exclude(&onload_solo_remaining_capabilities, desired, &onload_solo_remaining_capabilities);
+ if (desired->can_support_virtual_threads != 0 && current->can_support_virtual_threads == 0) {
+ _can_support_virtual_threads_count++;
+ }
+
// return the result
either(current, desired, result);
@@ -271,6 +296,8 @@ jvmtiError JvmtiManageCapabilities::add_capabilities(const jvmtiCapabilities *cu
void JvmtiManageCapabilities::relinquish_capabilities(const jvmtiCapabilities *current,
const jvmtiCapabilities *unwanted,
jvmtiCapabilities *result) {
+ MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
+
jvmtiCapabilities to_trash;
jvmtiCapabilities temp;
@@ -283,6 +310,12 @@ void JvmtiManageCapabilities::relinquish_capabilities(const jvmtiCapabilities *c
either(&onload_solo_remaining_capabilities, both(&onload_solo_capabilities, &to_trash, &temp),
&onload_solo_remaining_capabilities);
+ if (to_trash.can_support_virtual_threads != 0) {
+ assert(current->can_support_virtual_threads != 0, "sanity check");
+ assert(_can_support_virtual_threads_count > 0, "sanity check");
+ _can_support_virtual_threads_count--;
+ }
+
update();
// return the result
@@ -366,7 +399,7 @@ void JvmtiManageCapabilities::update() {
JvmtiExport::set_can_post_frame_pop(avail.can_generate_frame_pop_events);
JvmtiExport::set_can_pop_frame(avail.can_pop_frame);
JvmtiExport::set_can_force_early_return(avail.can_force_early_return);
- JvmtiExport::set_can_support_virtual_threads(avail.can_support_virtual_threads);
+ JvmtiExport::set_can_support_virtual_threads(_can_support_virtual_threads_count != 0);
JvmtiExport::set_should_clean_up_heap_objects(avail.can_generate_breakpoint_events);
JvmtiExport::set_can_get_owned_monitor_info(avail.can_get_owned_monitor_info ||
avail.can_get_owned_monitor_stack_depth_info);
diff --git a/src/hotspot/share/prims/jvmtiManageCapabilities.hpp b/src/hotspot/share/prims/jvmtiManageCapabilities.hpp
index e588be4fa1579..545909e3c4eac 100644
--- a/src/hotspot/share/prims/jvmtiManageCapabilities.hpp
+++ b/src/hotspot/share/prims/jvmtiManageCapabilities.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,12 @@ class JvmtiManageCapabilities : public AllStatic {
// all capabilities ever acquired
static jvmtiCapabilities acquired_capabilities;
+ // counter for the agents possess can_support_virtual_threads capability
+ static int _can_support_virtual_threads_count;
+
+ // lock to access the class data
+ static Mutex* _capabilities_lock;
+
// basic intenal operations
static jvmtiCapabilities *either(const jvmtiCapabilities *a, const jvmtiCapabilities *b, jvmtiCapabilities *result);
static jvmtiCapabilities *both(const jvmtiCapabilities *a, const jvmtiCapabilities *b, jvmtiCapabilities *result);
@@ -61,6 +67,14 @@ class JvmtiManageCapabilities : public AllStatic {
static jvmtiCapabilities init_always_solo_capabilities();
static jvmtiCapabilities init_onload_solo_capabilities();
+ // returns nullptr in onload phase
+ static Mutex* lock();
+
+ // get_potential_capabilities without lock
+ static void get_potential_capabilities_nolock(const jvmtiCapabilities* current,
+ const jvmtiCapabilities* prohibited,
+ jvmtiCapabilities* result);
+
public:
static void initialize();
diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp
index 6908e1994ba7d..07709b9e0179a 100644
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp
@@ -2320,7 +2320,10 @@ bool StackRefCollector::do_frame(vframe* vf) {
// Follow oops from compiled nmethod.
if (jvf->cb() != nullptr && jvf->cb()->is_nmethod()) {
_blk->set_context(_thread_tag, _tid, _depth, method);
- jvf->cb()->as_nmethod()->oops_do(_blk);
+ // Need to apply load barriers for unmounted vthreads.
+ nmethod* nm = jvf->cb()->as_nmethod();
+ nm->run_nmethod_entry_barrier();
+ nm->oops_do(_blk);
if (_blk->stopped()) {
return false;
}
diff --git a/src/hotspot/share/prims/jvmtiThreadState.hpp b/src/hotspot/share/prims/jvmtiThreadState.hpp
index 8340a44d1427f..4dc24487058df 100644
--- a/src/hotspot/share/prims/jvmtiThreadState.hpp
+++ b/src/hotspot/share/prims/jvmtiThreadState.hpp
@@ -465,9 +465,12 @@ class JvmtiThreadState : public CHeapObj {
// already holding JvmtiThreadState_lock - retrieve or create JvmtiThreadState
// Can return null if JavaThread is exiting.
+ // Callers are responsible to call recompute_thread_filtered() to update event bits
+ // if thread-filtered events are enabled globally.
static JvmtiThreadState *state_for_while_locked(JavaThread *thread, oop thread_oop = nullptr);
// retrieve or create JvmtiThreadState
// Can return null if JavaThread is exiting.
+ // Calls recompute_thread_filtered() to update event bits if thread-filtered events are enabled globally.
static JvmtiThreadState *state_for(JavaThread *thread, Handle thread_handle = Handle());
// JVMTI ForceEarlyReturn support
diff --git a/src/hotspot/share/prims/jvmtiThreadState.inline.hpp b/src/hotspot/share/prims/jvmtiThreadState.inline.hpp
index bbcbe14e56e99..1737bfd6a9f4c 100644
--- a/src/hotspot/share/prims/jvmtiThreadState.inline.hpp
+++ b/src/hotspot/share/prims/jvmtiThreadState.inline.hpp
@@ -109,6 +109,7 @@ inline JvmtiThreadState* JvmtiThreadState::state_for(JavaThread *thread, Handle
MutexLocker mu(JvmtiThreadState_lock);
// check again with the lock held
state = state_for_while_locked(thread, thread_handle());
+ JvmtiEventController::recompute_thread_filtered(state);
} else {
// Check possible safepoint even if state is non-null.
// (Note: the thread argument isn't the current thread)
diff --git a/src/hotspot/share/prims/resolvedMethodTable.cpp b/src/hotspot/share/prims/resolvedMethodTable.cpp
index fbcbd2b15907e..c10fe1f66010c 100644
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp
@@ -126,11 +126,9 @@ class ResolvedMethodTableLookup : StackObj {
uintx get_hash() const {
return _hash;
}
- bool equals(WeakHandle* value, bool* is_dead) {
+ bool equals(WeakHandle* value) {
oop val_oop = value->peek();
if (val_oop == nullptr) {
- // dead oop, mark this hash dead for cleaning
- *is_dead = true;
return false;
}
bool equals = _method == java_lang_invoke_ResolvedMethodName::vmtarget(val_oop);
@@ -141,6 +139,10 @@ class ResolvedMethodTableLookup : StackObj {
_found = Handle(_thread, value->resolve());
return true;
}
+ bool is_dead(WeakHandle* value) {
+ oop val_oop = value->peek();
+ return val_oop == nullptr;
+ }
};
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index b0a497c4e63d2..f68147fea4f2f 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -1865,6 +1865,12 @@ WB_ENTRY(jint, WB_GetConstantPoolCacheLength(JNIEnv* env, jobject wb, jclass kla
return cp->cache()->length();
WB_END
+WB_ENTRY(jobjectArray, WB_GetResolvedReferences(JNIEnv* env, jobject wb, jclass klass))
+ InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
+ objArrayOop resolved_refs= ik->constants()->resolved_references();
+ return (jobjectArray)JNIHandles::make_local(THREAD, resolved_refs);
+WB_END
+
WB_ENTRY(jint, WB_ConstantPoolRemapInstructionOperandFromCache(JNIEnv* env, jobject wb, jclass klass, jint index))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants();
@@ -2550,6 +2556,18 @@ WB_ENTRY(jboolean, WB_SetVirtualThreadsNotifyJvmtiMode(JNIEnv* env, jobject wb,
return result;
WB_END
+WB_ENTRY(void, WB_PreTouchMemory(JNIEnv* env, jobject wb, jlong addr, jlong size))
+ void* const from = (void*)addr;
+ void* const to = (void*)(addr + size);
+ if (from > to) {
+ os::pretouch_memory(from, to, os::vm_page_size());
+ }
+WB_END
+
+WB_ENTRY(void, WB_CleanMetaspaces(JNIEnv* env, jobject target))
+ ClassLoaderDataGraph::safepoint_and_clean_metaspaces();
+WB_END
+
#define CC (char*)
static JNINativeMethod methods[] = {
@@ -2737,6 +2755,7 @@ static JNINativeMethod methods[] = {
{CC"getConstantPool0", CC"(Ljava/lang/Class;)J", (void*)&WB_GetConstantPool },
{CC"getConstantPoolCacheIndexTag0", CC"()I", (void*)&WB_GetConstantPoolCacheIndexTag},
{CC"getConstantPoolCacheLength0", CC"(Ljava/lang/Class;)I", (void*)&WB_GetConstantPoolCacheLength},
+ {CC"getResolvedReferences0", CC"(Ljava/lang/Class;)[Ljava/lang/Object;", (void*)&WB_GetResolvedReferences},
{CC"remapInstructionOperandFromCPCache0",
CC"(Ljava/lang/Class;I)I", (void*)&WB_ConstantPoolRemapInstructionOperandFromCache},
{CC"encodeConstantPoolIndyIndex0",
@@ -2829,6 +2848,8 @@ static JNINativeMethod methods[] = {
{CC"lockCritical", CC"()V", (void*)&WB_LockCritical},
{CC"unlockCritical", CC"()V", (void*)&WB_UnlockCritical},
{CC"setVirtualThreadsNotifyJvmtiMode", CC"(Z)Z", (void*)&WB_SetVirtualThreadsNotifyJvmtiMode},
+ {CC"preTouchMemory", CC"(JJ)V", (void*)&WB_PreTouchMemory},
+ {CC"cleanMetaspaces", CC"()V", (void*)&WB_CleanMetaspaces},
};
diff --git a/src/hotspot/share/runtime/abstract_vm_version.cpp b/src/hotspot/share/runtime/abstract_vm_version.cpp
index 2fdb820982a93..fee840aba9ad3 100644
--- a/src/hotspot/share/runtime/abstract_vm_version.cpp
+++ b/src/hotspot/share/runtime/abstract_vm_version.cpp
@@ -240,6 +240,16 @@ const char* Abstract_VM_Version::internal_vm_info_string() {
#define HOTSPOT_BUILD_COMPILER "MS VC++ 17.2 (VS2022)"
#elif _MSC_VER == 1933
#define HOTSPOT_BUILD_COMPILER "MS VC++ 17.3 (VS2022)"
+ #elif _MSC_VER == 1934
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 17.4 (VS2022)"
+ #elif _MSC_VER == 1935
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 17.5 (VS2022)"
+ #elif _MSC_VER == 1936
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 17.6 (VS2022)"
+ #elif _MSC_VER == 1937
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 17.7 (VS2022)"
+ #elif _MSC_VER == 1938
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 17.8 (VS2022)"
#else
#define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER)
#endif
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index b0bfae08995f4..720a0e9ba29b5 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -1904,23 +1904,13 @@ bool Arguments::check_vm_args_consistency() {
}
#endif
-
-#if !defined(X86) && !defined(AARCH64) && !defined(RISCV64) && !defined(ARM) && !defined(PPC64)
+#if !defined(X86) && !defined(AARCH64) && !defined(RISCV64) && !defined(ARM) && !defined(PPC64) && !defined(S390)
if (LockingMode == LM_LIGHTWEIGHT) {
FLAG_SET_CMDLINE(LockingMode, LM_LEGACY);
warning("New lightweight locking not supported on this platform");
}
#endif
- if (UseHeavyMonitors) {
- if (FLAG_IS_CMDLINE(LockingMode) && LockingMode != LM_MONITOR) {
- jio_fprintf(defaultStream::error_stream(),
- "Conflicting -XX:+UseHeavyMonitors and -XX:LockingMode=%d flags", LockingMode);
- return false;
- }
- FLAG_SET_CMDLINE(LockingMode, LM_MONITOR);
- }
-
#if !defined(X86) && !defined(AARCH64) && !defined(PPC64) && !defined(RISCV64) && !defined(S390)
if (LockingMode == LM_MONITOR) {
jio_fprintf(defaultStream::error_stream(),
diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp
index c85bf9055ab39..ac0ce49d26e56 100644
--- a/src/hotspot/share/runtime/atomic.hpp
+++ b/src/hotspot/share/runtime/atomic.hpp
@@ -398,11 +398,15 @@ class Atomic : AllStatic {
T compare_value,
T exchange_value);
- // Support platforms that do not provide Read-Modify-Write
- // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
- // this class.
+ // Support platforms that do not provide Read-Modify-Write atomic
+ // accesses for 1-byte and 8-byte widths. To use, derive PlatformCmpxchg<1>,
+ // PlatformAdd, PlatformXchg from these classes.
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
struct CmpxchgByteUsingInt;
+ template
+ struct XchgUsingCmpxchg;
+ template
+ class AddUsingCmpxchg;
private:
// Dispatch handler for xchg. Provides type-based validity
@@ -677,6 +681,47 @@ struct Atomic::CmpxchgByteUsingInt {
atomic_memory_order order) const;
};
+// Define the class before including platform file, which may use this
+// as a base class, requiring it be complete. The definition is later
+// in this file, near the other definitions related to xchg.
+template
+struct Atomic::XchgUsingCmpxchg {
+ template
+ T operator()(T volatile* dest,
+ T exchange_value,
+ atomic_memory_order order) const;
+};
+
+// Define the class before including platform file, which may use this
+// as a base class, requiring it be complete.
+template
+class Atomic::AddUsingCmpxchg {
+public:
+ template
+ static inline D add_then_fetch(D volatile* dest,
+ I add_value,
+ atomic_memory_order order) {
+ D addend = add_value;
+ return fetch_then_add(dest, add_value, order) + add_value;
+ }
+
+ template
+ static inline D fetch_then_add(D volatile* dest,
+ I add_value,
+ atomic_memory_order order) {
+ STATIC_ASSERT(byte_size == sizeof(I));
+ STATIC_ASSERT(byte_size == sizeof(D));
+
+ D old_value;
+ D new_value;
+ do {
+ old_value = Atomic::load(dest);
+ new_value = old_value + add_value;
+ } while (old_value != Atomic::cmpxchg(dest, old_value, new_value, order));
+ return old_value;
+ }
+};
+
// Define the class before including platform file, which may specialize
// the operator definition. No generic definition of specializations
// of the operator template are provided, nor are there any generic
@@ -1170,4 +1215,18 @@ inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order or
return XchgImpl()(dest, exchange_value, order);
}
+template
+template
+inline T Atomic::XchgUsingCmpxchg::operator()(T volatile* dest,
+ T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(byte_size == sizeof(T));
+
+ T old_value;
+ do {
+ old_value = Atomic::load(dest);
+ } while (old_value != Atomic::cmpxchg(dest, old_value, exchange_value, order));
+ return old_value;
+}
+
#endif // SHARE_RUNTIME_ATOMIC_HPP
diff --git a/src/hotspot/share/runtime/continuationWrapper.cpp b/src/hotspot/share/runtime/continuationWrapper.cpp
index 3b967a075545e..9ef02bed67033 100644
--- a/src/hotspot/share/runtime/continuationWrapper.cpp
+++ b/src/hotspot/share/runtime/continuationWrapper.cpp
@@ -38,16 +38,12 @@
#include "runtime/stackChunkFrameStream.inline.hpp"
ContinuationWrapper::ContinuationWrapper(const RegisterMap* map)
- : _thread(map->thread()),
- _entry(Continuation::get_continuation_entry_for_continuation(_thread, map->stack_chunk()->cont())),
- _continuation(map->stack_chunk()->cont())
- {
- assert(oopDesc::is_oop(_continuation),"Invalid cont: " INTPTR_FORMAT, p2i((void*)_continuation));
+ : ContinuationWrapper(map->thread(),
+ Continuation::get_continuation_entry_for_continuation(map->thread(), map->stack_chunk()->cont()),
+ map->stack_chunk()->cont()) {
assert(_entry == nullptr || _continuation == _entry->cont_oop(map->thread()),
"cont: " INTPTR_FORMAT " entry: " INTPTR_FORMAT " entry_sp: " INTPTR_FORMAT,
p2i( (oopDesc*)_continuation), p2i((oopDesc*)_entry->cont_oop(map->thread())), p2i(entrySP()));
- disallow_safepoint();
- read();
}
const frame ContinuationWrapper::last_frame() {
@@ -96,4 +92,3 @@ bool ContinuationWrapper::chunk_invariant() const {
return true;
}
#endif // ASSERT
-
diff --git a/src/hotspot/share/runtime/continuationWrapper.inline.hpp b/src/hotspot/share/runtime/continuationWrapper.inline.hpp
index 03b2c726a0e59..0215f765c5dad 100644
--- a/src/hotspot/share/runtime/continuationWrapper.inline.hpp
+++ b/src/hotspot/share/runtime/continuationWrapper.inline.hpp
@@ -49,6 +49,7 @@ class ContinuationWrapper : public StackObj {
// These oops are managed by SafepointOp
oop _continuation; // jdk.internal.vm.Continuation instance
stackChunkOop _tail;
+ bool _done;
ContinuationWrapper(const ContinuationWrapper& cont); // no copy constructor
@@ -58,6 +59,7 @@ class ContinuationWrapper : public StackObj {
void disallow_safepoint() {
#ifdef ASSERT
+ assert(!_done, "");
assert(_continuation != nullptr, "");
_current_thread = Thread::current();
if (_current_thread->is_Java_thread()) {
@@ -69,16 +71,19 @@ class ContinuationWrapper : public StackObj {
void allow_safepoint() {
#ifdef ASSERT
// we could have already allowed safepoints in done
- if (_continuation != nullptr && _current_thread->is_Java_thread()) {
+ if (!_done && _current_thread->is_Java_thread()) {
JavaThread::cast(_current_thread)->dec_no_safepoint_count();
}
#endif
}
+ ContinuationWrapper(JavaThread* thread, ContinuationEntry* entry, oop continuation);
+
public:
void done() {
allow_safepoint(); // must be done first
- _continuation = nullptr;
+ _done = true;
+ *reinterpret_cast(&_continuation) = badHeapOopVal;
*reinterpret_cast(&_tail) = badHeapOopVal;
}
@@ -140,23 +145,19 @@ class ContinuationWrapper : public StackObj {
#endif
};
-inline ContinuationWrapper::ContinuationWrapper(JavaThread* thread, oop continuation)
- : _thread(thread), _entry(thread->last_continuation()), _continuation(continuation)
- {
+inline ContinuationWrapper::ContinuationWrapper(JavaThread* thread, ContinuationEntry* entry, oop continuation)
+ : _thread(thread), _entry(entry), _continuation(continuation), _done(false) {
assert(oopDesc::is_oop(_continuation),
"Invalid continuation object: " INTPTR_FORMAT, p2i((void*)_continuation));
disallow_safepoint();
read();
}
+inline ContinuationWrapper::ContinuationWrapper(JavaThread* thread, oop continuation)
+ : ContinuationWrapper(thread, thread->last_continuation(), continuation) {}
+
inline ContinuationWrapper::ContinuationWrapper(oop continuation)
- : _thread(nullptr), _entry(nullptr), _continuation(continuation)
- {
- assert(oopDesc::is_oop(_continuation),
- "Invalid continuation object: " INTPTR_FORMAT, p2i((void*)_continuation));
- disallow_safepoint();
- read();
-}
+ : ContinuationWrapper(nullptr, nullptr, continuation) {}
inline bool ContinuationWrapper::is_preempted() {
return jdk_internal_vm_Continuation::is_preempted(_continuation);
diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp
index d62100c4cf84a..44baccc3bc7c3 100644
--- a/src/hotspot/share/runtime/deoptimization.cpp
+++ b/src/hotspot/share/runtime/deoptimization.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,7 @@
#include "runtime/stackValue.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/vframe.hpp"
@@ -1614,6 +1615,10 @@ void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableAr
reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
}
}
+ // These objects may escape when we return to Interpreter after deoptimization.
+ // We need barrier so that stores that initialize these objects can't be reordered
+ // with subsequent stores that make these objects accessible by other threads.
+ OrderAccess::storestore();
}
@@ -1648,9 +1653,19 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArraylock();
- ObjectSynchronizer::enter(obj, lock, deoptee_thread);
- assert(mon_info->owner()->is_locked(), "object must be locked now");
+ if (LockingMode == LM_LIGHTWEIGHT && exec_mode == Unpack_none) {
+ // We have lost information about the correct state of the lock stack.
+ // Inflate the locks instead. Enter then inflate to avoid races with
+ // deflation.
+ ObjectSynchronizer::enter(obj, nullptr, deoptee_thread);
+ assert(mon_info->owner()->is_locked(), "object must be locked now");
+ ObjectMonitor* mon = ObjectSynchronizer::inflate(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
+ assert(mon->owner() == deoptee_thread, "must be");
+ } else {
+ BasicLock* lock = mon_info->lock();
+ ObjectSynchronizer::enter(obj, lock, deoptee_thread);
+ assert(mon_info->owner()->is_locked(), "object must be locked now");
+ }
}
}
}
diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp
index d4e7c26f18b6e..6266bd004ea25 100644
--- a/src/hotspot/share/runtime/frame.cpp
+++ b/src/hotspot/share/runtime/frame.cpp
@@ -52,6 +52,7 @@
#include "runtime/monitorChunk.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "runtime/safefetch.hpp"
#include "runtime/signature.hpp"
#include "runtime/stackValue.hpp"
#include "runtime/stubCodeGenerator.hpp"
@@ -301,6 +302,14 @@ bool frame::is_entry_frame_valid(JavaThread* thread) const {
return (jfa->last_Java_sp() > sp());
}
+Method* frame::safe_interpreter_frame_method() const {
+ Method** m_addr = interpreter_frame_method_addr();
+ if (m_addr == nullptr) {
+ return nullptr;
+ }
+ return (Method*) SafeFetchN((intptr_t*) m_addr, 0);
+}
+
bool frame::should_be_deoptimized() const {
if (_deopt_state == is_deoptimized ||
!is_compiled_frame() ) return false;
diff --git a/src/hotspot/share/runtime/frame.hpp b/src/hotspot/share/runtime/frame.hpp
index 2b62b4e1c730f..a66b9dee291f0 100644
--- a/src/hotspot/share/runtime/frame.hpp
+++ b/src/hotspot/share/runtime/frame.hpp
@@ -236,6 +236,8 @@ class frame {
bool is_entry_frame_valid(JavaThread* thread) const;
+ Method* safe_interpreter_frame_method() const;
+
// All frames:
// A low-level interface for vframes:
@@ -370,6 +372,7 @@ class frame {
BasicObjectLock* next_monitor_in_interpreter_frame(BasicObjectLock* current) const;
BasicObjectLock* previous_monitor_in_interpreter_frame(BasicObjectLock* current) const;
static int interpreter_frame_monitor_size();
+ static int interpreter_frame_monitor_size_in_bytes();
void interpreter_frame_verify_monitor(BasicObjectLock* value) const;
diff --git a/src/hotspot/share/runtime/frame.inline.hpp b/src/hotspot/share/runtime/frame.inline.hpp
index b6116a0341d7c..2cfaba170540d 100644
--- a/src/hotspot/share/runtime/frame.inline.hpp
+++ b/src/hotspot/share/runtime/frame.inline.hpp
@@ -104,4 +104,9 @@ inline CodeBlob* frame::get_cb() const {
return _cb;
}
+inline int frame::interpreter_frame_monitor_size_in_bytes() {
+ // Number of bytes for a monitor.
+ return frame::interpreter_frame_monitor_size() * wordSize;
+}
+
#endif // SHARE_RUNTIME_FRAME_INLINE_HPP
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index 5b609f858b7e3..7137270fd1749 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -295,6 +295,9 @@ const int ObjectAlignmentInBytes = 8;
product(bool, UseInlineCaches, true, \
"Use Inline Caches for virtual calls ") \
\
+ product(size_t, InlineCacheBufferSize, 10*K, EXPERIMENTAL, \
+ "InlineCacheBuffer size") \
+ \
product(bool, InlineArrayCopy, true, DIAGNOSTIC, \
"Inline arraycopy native that is known to be part of " \
"base library DLL") \
@@ -1050,13 +1053,9 @@ const int ObjectAlignmentInBytes = 8;
product(bool, ErrorFileToStdout, false, \
"If true, error data is printed to stdout instead of a file") \
\
- develop(bool, UseHeavyMonitors, false, \
- "(Deprecated) Use heavyweight instead of lightweight Java " \
- "monitors") \
- \
develop(bool, VerifyHeavyMonitors, false, \
"Checks that no stack locking happens when using " \
- "+UseHeavyMonitors") \
+ "-XX:LockingMode=0 (LM_MONITOR)") \
\
product(bool, PrintStringTableStatistics, false, \
"print statistics about the StringTable and SymbolTable") \
@@ -1976,6 +1975,13 @@ const int ObjectAlignmentInBytes = 8;
"1: monitors & legacy stack-locking (LM_LEGACY, default), " \
"2: monitors & new lightweight locking (LM_LIGHTWEIGHT)") \
range(0, 2) \
+ \
+ product(uint, TrimNativeHeapInterval, 0, \
+ "Interval, in ms, at which the JVM will trim the native heap if " \
+ "the platform supports that. Lower values will reclaim memory " \
+ "more eagerly at the cost of higher overhead. A value of 0 " \
+ "(default) disables native heap trimming.") \
+ range(0, UINT_MAX) \
// end of RUNTIME_FLAGS
diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp
index b4534837cad37..e3ecc2be6fd06 100644
--- a/src/hotspot/share/runtime/handshake.cpp
+++ b/src/hotspot/share/runtime/handshake.cpp
@@ -497,8 +497,17 @@ HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend, bool che
}
bool HandshakeState::has_operation(bool allow_suspend, bool check_async_exception) {
- MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
- return get_op_for_self(allow_suspend, check_async_exception) != nullptr;
+ // We must not block here as that could lead to deadlocks if we already hold an
+ // "external" mutex. If the try_lock fails then we assume that there is an operation
+ // and force the caller to check more carefully in a safer context. If we can't get
+ // the lock it means another thread is trying to handshake with us, so it can't
+ // happen during thread termination and destruction.
+ bool ret = true;
+ if (_lock.try_lock()) {
+ ret = get_op_for_self(allow_suspend, check_async_exception) != nullptr;
+ _lock.unlock();
+ }
+ return ret;
}
bool HandshakeState::has_async_exception_operation() {
diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp
index 8746efebc21d9..48709aa5c4390 100644
--- a/src/hotspot/share/runtime/java.cpp
+++ b/src/hotspot/share/runtime/java.cpp
@@ -70,6 +70,7 @@
#include "runtime/task.hpp"
#include "runtime/threads.hpp"
#include "runtime/timer.hpp"
+#include "runtime/trimNativeHeap.hpp"
#include "runtime/vmOperations.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_version.hpp"
@@ -477,6 +478,8 @@ void before_exit(JavaThread* thread, bool halt) {
StatSampler::disengage();
StatSampler::destroy();
+ NativeHeapTrimmer::cleanup();
+
// Stop concurrent GC threads
Universe::heap()->stop();
diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp
index b469c1a679010..ff8c842daaafb 100644
--- a/src/hotspot/share/runtime/javaThread.cpp
+++ b/src/hotspot/share/runtime/javaThread.cpp
@@ -1587,6 +1587,13 @@ const char* JavaThread::name() const {
return Thread::name();
}
+// Like name() but doesn't include the protection check. This must only be
+// called when it is known to be safe, even though the protection check can't tell
+// that e.g. when this thread is the init_thread() - see instanceKlass.cpp.
+const char* JavaThread::name_raw() const {
+ return get_thread_name_string();
+}
+
// Returns a non-null representation of this thread's name, or a suitable
// descriptive string if there is no set name.
const char* JavaThread::get_thread_name_string(char* buf, int buflen) const {
diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp
index f2cb56646d4cc..e8e6ccbfe6a57 100644
--- a/src/hotspot/share/runtime/javaThread.hpp
+++ b/src/hotspot/share/runtime/javaThread.hpp
@@ -904,6 +904,7 @@ class JavaThread: public Thread {
// Misc. operations
const char* name() const;
+ const char* name_raw() const;
const char* type_name() const { return "JavaThread"; }
static const char* name_for(oop thread_obj);
diff --git a/src/hotspot/share/runtime/jniHandles.cpp b/src/hotspot/share/runtime/jniHandles.cpp
index feceec366a597..25c8aa8b10b6e 100644
--- a/src/hotspot/share/runtime/jniHandles.cpp
+++ b/src/hotspot/share/runtime/jniHandles.cpp
@@ -199,13 +199,9 @@ jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
default:
ShouldNotReachHere();
}
- } else {
+ } else if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
// Not in global storage. Might be a local handle.
- if (is_local_handle(thread, handle) || is_frame_handle(thread, handle)) {
- result = JNILocalRefType;
- } else {
- ShouldNotReachHere();
- }
+ result = JNILocalRefType;
}
return result;
}
diff --git a/src/hotspot/share/runtime/lockStack.cpp b/src/hotspot/share/runtime/lockStack.cpp
index 5fd5297fd5c02..b4a3bf1e8e6c0 100644
--- a/src/hotspot/share/runtime/lockStack.cpp
+++ b/src/hotspot/share/runtime/lockStack.cpp
@@ -77,3 +77,15 @@ void LockStack::verify(const char* msg) const {
}
}
#endif
+
+void LockStack::print_on(outputStream* st) {
+ for (int i = to_index(_top); (--i) >= 0;) {
+ st->print("LockStack[%d]: ", i);
+ oop o = _base[i];
+ if (oopDesc::is_oop(o)) {
+ o->print_on(st);
+ } else {
+ st->print_cr("not an oop: " PTR_FORMAT, p2i(o));
+ }
+ }
+}
diff --git a/src/hotspot/share/runtime/lockStack.hpp b/src/hotspot/share/runtime/lockStack.hpp
index ce6a96bcfe624..25ab7a8de052a 100644
--- a/src/hotspot/share/runtime/lockStack.hpp
+++ b/src/hotspot/share/runtime/lockStack.hpp
@@ -30,8 +30,9 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/sizes.hpp"
-class Thread;
+class JavaThread;
class OopClosure;
+class outputStream;
class LockStack {
friend class VMStructs;
@@ -91,6 +92,8 @@ class LockStack {
// GC support
inline void oops_do(OopClosure* cl);
+ // Printing
+ void print_on(outputStream* st);
};
#endif // SHARE_RUNTIME_LOCKSTACK_HPP
diff --git a/src/hotspot/share/runtime/lockStack.inline.hpp b/src/hotspot/share/runtime/lockStack.inline.hpp
index 186c7169fae11..b36be2f72de0e 100644
--- a/src/hotspot/share/runtime/lockStack.inline.hpp
+++ b/src/hotspot/share/runtime/lockStack.inline.hpp
@@ -47,10 +47,14 @@ inline bool LockStack::can_push() const {
}
inline bool LockStack::is_owning_thread() const {
- JavaThread* thread = JavaThread::current();
- bool is_owning = &thread->lock_stack() == this;
- assert(is_owning == (get_thread() == thread), "is_owning sanity");
- return is_owning;
+ Thread* current = Thread::current();
+ if (current->is_Java_thread()) {
+ JavaThread* thread = JavaThread::cast(current);
+ bool is_owning = &thread->lock_stack() == this;
+ assert(is_owning == (get_thread() == thread), "is_owning sanity");
+ return is_owning;
+ }
+ return false;
}
inline void LockStack::push(oop o) {
@@ -100,16 +104,10 @@ inline void LockStack::remove(oop o) {
inline bool LockStack::contains(oop o) const {
verify("pre-contains");
- if (!SafepointSynchronize::is_at_safepoint() && !is_owning_thread()) {
- // When a foreign thread inspects this thread's lock-stack, it may see
- // bad references here when a concurrent collector has not gotten
- // to processing the lock-stack, yet. Call StackWaterMark::start_processing()
- // to ensure that all references are valid.
- StackWatermark* watermark = StackWatermarkSet::get(get_thread(), StackWatermarkKind::gc);
- if (watermark != nullptr) {
- watermark->start_processing();
- }
- }
+
+ // Can't poke around in thread oops without having started stack watermark processing.
+ assert(StackWatermarkSet::processing_started(get_thread()), "Processing must have started!");
+
int end = to_index(_top);
for (int i = end - 1; i >= 0; i--) {
if (_base[i] == o) {
diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp
index a94061c5d8c57..ea46300d14521 100644
--- a/src/hotspot/share/runtime/mutexLocker.cpp
+++ b/src/hotspot/share/runtime/mutexLocker.cpp
@@ -224,9 +224,9 @@ void mutex_init() {
MUTEX_DEFN(MarkStackFreeList_lock , PaddedMutex , nosafepoint);
MUTEX_DEFN(MarkStackChunkList_lock , PaddedMutex , nosafepoint);
-
- MUTEX_DEFN(MonitoringSupport_lock , PaddedMutex , service-1); // used for serviceability monitoring support
}
+ MUTEX_DEFN(MonitoringSupport_lock , PaddedMutex , service-1); // used for serviceability monitoring support
+
MUTEX_DEFN(StringDedup_lock , PaddedMonitor, nosafepoint);
MUTEX_DEFN(StringDedupIntern_lock , PaddedMutex , nosafepoint);
MUTEX_DEFN(RawMonitor_lock , PaddedMutex , nosafepoint-1);
diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp
index 12a0e953771b1..c8952cf8af369 100644
--- a/src/hotspot/share/runtime/objectMonitor.cpp
+++ b/src/hotspot/share/runtime/objectMonitor.cpp
@@ -276,24 +276,15 @@ ObjectMonitor::ObjectMonitor(oop object) :
{ }
ObjectMonitor::~ObjectMonitor() {
- if (!_object.is_null()) {
- // Release object's oop storage if it hasn't already been done.
- release_object();
- }
+ _object.release(_oop_storage);
}
oop ObjectMonitor::object() const {
check_object_context();
- if (_object.is_null()) {
- return nullptr;
- }
return _object.resolve();
}
oop ObjectMonitor::object_peek() const {
- if (_object.is_null()) {
- return nullptr;
- }
return _object.peek();
}
@@ -598,9 +589,6 @@ bool ObjectMonitor::deflate_monitor() {
install_displaced_markword_in_object(obj);
}
- // Release object's oop storage since the ObjectMonitor has been deflated:
- release_object();
-
// We leave owner == DEFLATER_MARKER and contentions < 0
// to force any racing threads to retry.
return true; // Success, ObjectMonitor has been deflated.
diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp
index 92fb58dadb5d5..d6c0e31f7a18c 100644
--- a/src/hotspot/share/runtime/objectMonitor.hpp
+++ b/src/hotspot/share/runtime/objectMonitor.hpp
@@ -363,7 +363,6 @@ class ObjectMonitor : public CHeapObj {
// Deflation support
bool deflate_monitor();
void install_displaced_markword_in_object(const oop obj);
- void release_object() { _object.release(_oop_storage); _object.set_null(); }
};
#endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp
index 997bf3a968fb1..a5ea07d8c6459 100644
--- a/src/hotspot/share/runtime/os.cpp
+++ b/src/hotspot/share/runtime/os.cpp
@@ -1391,7 +1391,7 @@ bool os::write(int fd, const void *buf, size_t nBytes) {
if (res == OS_ERR) {
return false;
}
- buf = (void *)((char *)buf + nBytes);
+ buf = (void *)((char *)buf + res);
nBytes -= res;
}
diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp
index 49c56b3375ed7..f445669ab23d1 100644
--- a/src/hotspot/share/runtime/os.hpp
+++ b/src/hotspot/share/runtime/os.hpp
@@ -663,6 +663,8 @@ class os: AllStatic {
static const char* get_temp_directory();
static const char* get_current_directory(char *buf, size_t buflen);
+ static void prepare_native_symbols();
+
// Builds the platform-specific name of a library.
// Returns false if the buffer is too small.
static bool dll_build_name(char* buffer, size_t size,
@@ -773,7 +775,7 @@ class os: AllStatic {
static void print_context(outputStream* st, const void* context);
static void print_tos_pc(outputStream* st, const void* context);
static void print_tos(outputStream* st, address sp);
- static void print_instructions(outputStream* st, address pc, int unitsize);
+ static void print_instructions(outputStream* st, address pc, int unitsize = 1);
static void print_register_info(outputStream* st, const void* context, int& continuation);
static void print_register_info(outputStream* st, const void* context);
static bool signal_sent_by_kill(const void* siginfo);
@@ -1053,6 +1055,7 @@ class os: AllStatic {
char pathSep);
static bool set_boot_path(char fileSep, char pathSep);
+ static bool pd_dll_unload(void* libhandle, char* ebuf, int ebuflen);
};
// Note that "PAUSE" is almost always used with synchronization
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index fb3f00f38ad7e..edbdad4f24bc8 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -1872,7 +1872,8 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
// nmethod could be deoptimized by the time we get here
// so no update to the caller is needed.
- if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
+ if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
+ (caller.is_native_frame() && ((CompiledMethod*)caller.cb())->method()->is_continuation_enter_intrinsic())) {
address pc = caller.pc();
diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp
index 09ed8d1a7f811..6e6f2115e1efd 100644
--- a/src/hotspot/share/runtime/synchronizer.cpp
+++ b/src/hotspot/share/runtime/synchronizer.cpp
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
@@ -54,6 +54,7 @@
#include "runtime/synchronizer.hpp"
#include "runtime/threads.hpp"
#include "runtime/timer.hpp"
+#include "runtime/trimNativeHeap.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
@@ -1640,18 +1641,25 @@ class VM_RendezvousGCThreads : public VM_Operation {
bool evaluate_at_safepoint() const override { return false; }
VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
void doit() override {
- SuspendibleThreadSet::synchronize();
- SuspendibleThreadSet::desynchronize();
+ Universe::heap()->safepoint_synchronize_begin();
+ Universe::heap()->safepoint_synchronize_end();
};
};
-static size_t delete_monitors(GrowableArray* delete_list) {
- size_t count = 0;
+static size_t delete_monitors(Thread* current, GrowableArray* delete_list,
+ LogStream* ls, elapsedTimer* timer_p) {
+ NativeHeapTrimmer::SuspendMark sm("monitor deletion");
+ size_t deleted_count = 0;
for (ObjectMonitor* monitor: *delete_list) {
delete monitor;
- count++;
+ deleted_count++;
+ if (current->is_Java_thread()) {
+ // A JavaThread must check for a safepoint/handshake and honor it.
+ ObjectSynchronizer::chk_for_block_req(JavaThread::cast(current), "deletion", "deleted_count",
+ deleted_count, ls, timer_p);
+ }
}
- return count;
+ return deleted_count;
}
// This function is called by the MonitorDeflationThread to deflate
@@ -1729,30 +1737,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
// After the handshake, safely free the ObjectMonitors that were
// deflated and unlinked in this cycle.
- if (current->is_Java_thread()) {
- if (ls != NULL) {
- timer.stop();
- ls->print_cr("before setting blocked: unlinked_count=" SIZE_FORMAT
- ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
- SIZE_FORMAT ", max=" SIZE_FORMAT,
- unlinked_count, in_use_list_ceiling(),
- _in_use_list.count(), _in_use_list.max());
- }
- // Mark the calling JavaThread blocked (safepoint safe) while we free
- // the ObjectMonitors so we don't delay safepoints whilst doing that.
- ThreadBlockInVM tbivm(JavaThread::cast(current));
- if (ls != NULL) {
- ls->print_cr("after setting blocked: in_use_list stats: ceiling="
- SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
- in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
- timer.start();
- }
- deleted_count = delete_monitors(&delete_list);
- // ThreadBlockInVM is destroyed here
- } else {
- // A non-JavaThread can just free the ObjectMonitors:
- deleted_count = delete_monitors(&delete_list);
- }
+ deleted_count = delete_monitors(current, &delete_list, ls, &timer);
assert(unlinked_count == deleted_count, "must be");
}
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index fd213086ccb1f..51a9e8471501e 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -73,6 +73,7 @@ Thread::Thread() {
set_lgrp_id(-1);
DEBUG_ONLY(clear_suspendible_thread();)
DEBUG_ONLY(clear_indirectly_suspendible_thread();)
+ DEBUG_ONLY(clear_indirectly_safepoint_thread();)
// allocated data structures
set_osthread(nullptr);
diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp
index 80114ddf7eda3..9bd839dc4dba6 100644
--- a/src/hotspot/share/runtime/thread.hpp
+++ b/src/hotspot/share/runtime/thread.hpp
@@ -206,6 +206,7 @@ class Thread: public ThreadShadow {
private:
DEBUG_ONLY(bool _suspendible_thread;)
DEBUG_ONLY(bool _indirectly_suspendible_thread;)
+ DEBUG_ONLY(bool _indirectly_safepoint_thread;)
public:
// Determines if a heap allocation failure will be retried
@@ -224,6 +225,10 @@ class Thread: public ThreadShadow {
void set_indirectly_suspendible_thread() { _indirectly_suspendible_thread = true; }
void clear_indirectly_suspendible_thread() { _indirectly_suspendible_thread = false; }
bool is_indirectly_suspendible_thread() { return _indirectly_suspendible_thread; }
+
+ void set_indirectly_safepoint_thread() { _indirectly_safepoint_thread = true; }
+ void clear_indirectly_safepoint_thread() { _indirectly_safepoint_thread = false; }
+ bool is_indirectly_safepoint_thread() { return _indirectly_safepoint_thread; }
#endif
private:
diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp
index 302c3672ce063..3e121e3bf741a 100644
--- a/src/hotspot/share/runtime/threads.cpp
+++ b/src/hotspot/share/runtime/threads.cpp
@@ -80,6 +80,7 @@
#include "runtime/safepointVerifiers.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/thread.inline.hpp"
@@ -87,6 +88,7 @@
#include "runtime/threadSMR.inline.hpp"
#include "runtime/timer.hpp"
#include "runtime/timerTrace.hpp"
+#include "runtime/trimNativeHeap.hpp"
#include "runtime/vmOperations.hpp"
#include "runtime/vm_version.hpp"
#include "services/attachListener.hpp"
@@ -759,6 +761,10 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
#endif
+ if (NativeHeapTrimmer::enabled()) {
+ NativeHeapTrimmer::initialize();
+ }
+
// Always call even when there are not JVMTI environments yet, since environments
// may be attached late and JVMTI must track phases of VM execution
JvmtiExport::enter_live_phase();
@@ -1224,6 +1230,12 @@ JavaThread *Threads::owning_thread_from_monitor_owner(ThreadsList * t_list,
JavaThread* Threads::owning_thread_from_object(ThreadsList * t_list, oop obj) {
assert(LockingMode == LM_LIGHTWEIGHT, "Only with new lightweight locking");
for (JavaThread* q : *t_list) {
+ // Need to start processing before accessing oops in the thread.
+ StackWatermark* watermark = StackWatermarkSet::get(q, StackWatermarkKind::gc);
+ if (watermark != nullptr) {
+ watermark->start_processing();
+ }
+
if (q->lock_stack().contains(obj)) {
return q;
}
diff --git a/src/hotspot/share/runtime/trimNativeHeap.cpp b/src/hotspot/share/runtime/trimNativeHeap.cpp
new file mode 100644
index 0000000000000..7700c8e5109fe
--- /dev/null
+++ b/src/hotspot/share/runtime/trimNativeHeap.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2023 SAP SE. All rights reserved.
+ * Copyright (c) 2023 Red Hat Inc. All rights reserved.
+ * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/nonJavaThread.hpp"
+#include "runtime/os.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/trimNativeHeap.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+#include "utilities/vmError.hpp"
+
+class NativeHeapTrimmerThread : public NamedThread {
+
+ // Upper limit for the backoff during pending/in-progress safepoint.
+ // Chosen as reasonable value to balance the overheads of waking up
+ // during the safepoint, which might have undesired effects on latencies,
+ // and the accuracy in tracking the trimming interval.
+ static constexpr int64_t safepoint_poll_ms = 250;
+
+ Monitor* const _lock;
+ bool _stop;
+ uint16_t _suspend_count;
+
+ // Statistics
+ uint64_t _num_trims_performed;
+
+ bool is_suspended() const {
+ assert(_lock->is_locked(), "Must be");
+ return _suspend_count > 0;
+ }
+
+ uint16_t inc_suspend_count() {
+ assert(_lock->is_locked(), "Must be");
+ assert(_suspend_count < UINT16_MAX, "Sanity");
+ return ++_suspend_count;
+ }
+
+ uint16_t dec_suspend_count() {
+ assert(_lock->is_locked(), "Must be");
+ assert(_suspend_count != 0, "Sanity");
+ return --_suspend_count;
+ }
+
+ bool is_stopped() const {
+ assert(_lock->is_locked(), "Must be");
+ return _stop;
+ }
+
+ bool at_or_nearing_safepoint() const {
+ return SafepointSynchronize::is_at_safepoint() ||
+ SafepointSynchronize::is_synchronizing();
+ }
+
+ // in seconds
+ static double now() { return os::elapsedTime(); }
+ static double to_ms(double seconds) { return seconds * 1000.0; }
+
+ struct LogStartStopMark {
+ void log(const char* s) { log_info(trimnative)("Native heap trimmer %s", s); }
+ LogStartStopMark() { log("start"); }
+ ~LogStartStopMark() { log("stop"); }
+ };
+
+ void run() override {
+ assert(NativeHeapTrimmer::enabled(), "Only call if enabled");
+
+ LogStartStopMark lssm;
+
+ const double interval_secs = (double)TrimNativeHeapInterval / 1000;
+
+ while (true) {
+ double tnow = now();
+ double next_trim_time = tnow + interval_secs;
+
+ unsigned times_suspended = 0;
+ unsigned times_waited = 0;
+ unsigned times_safepoint = 0;
+
+ {
+ MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+ if (_stop) return;
+
+ while (at_or_nearing_safepoint() || is_suspended() || next_trim_time > tnow) {
+ if (is_suspended()) {
+ times_suspended ++;
+ ml.wait(0); // infinite
+ } else if (next_trim_time > tnow) {
+ times_waited ++;
+ const int64_t wait_ms = MAX2(1.0, to_ms(next_trim_time - tnow));
+ ml.wait(wait_ms);
+ } else if (at_or_nearing_safepoint()) {
+ times_safepoint ++;
+ const int64_t wait_ms = MIN2(TrimNativeHeapInterval, safepoint_poll_ms);
+ ml.wait(wait_ms);
+ }
+
+ if (_stop) return;
+
+ tnow = now();
+ }
+ }
+
+ log_trace(trimnative)("Times: %u suspended, %u timed, %u safepoint",
+ times_suspended, times_waited, times_safepoint);
+
+ execute_trim_and_log(tnow);
+ }
+ }
+
+ // Execute the native trim, log results.
+ void execute_trim_and_log(double t1) {
+ assert(os::can_trim_native_heap(), "Unexpected");
+
+ os::size_change_t sc = { 0, 0 };
+ LogTarget(Info, trimnative) lt;
+ const bool logging_enabled = lt.is_enabled();
+
+ // We only collect size change information if we are logging; save the access to procfs otherwise.
+ if (os::trim_native_heap(logging_enabled ? &sc : nullptr)) {
+ _num_trims_performed++;
+ if (logging_enabled) {
+ double t2 = now();
+ if (sc.after != SIZE_MAX) {
+ const size_t delta = sc.after < sc.before ? (sc.before - sc.after) : (sc.after - sc.before);
+ const char sign = sc.after < sc.before ? '-' : '+';
+ log_info(trimnative)("Periodic Trim (" UINT64_FORMAT "): " PROPERFMT "->" PROPERFMT " (%c" PROPERFMT ") %.3fms",
+ _num_trims_performed,
+ PROPERFMTARGS(sc.before), PROPERFMTARGS(sc.after), sign, PROPERFMTARGS(delta),
+ to_ms(t2 - t1));
+ } else {
+ log_info(trimnative)("Periodic Trim (" UINT64_FORMAT "): complete (no details) %.3fms",
+ _num_trims_performed,
+ to_ms(t2 - t1));
+ }
+ }
+ }
+ }
+
+public:
+
+ NativeHeapTrimmerThread() :
+ _lock(new (std::nothrow) PaddedMonitor(Mutex::nosafepoint, "NativeHeapTrimmer_lock")),
+ _stop(false),
+ _suspend_count(0),
+ _num_trims_performed(0)
+ {
+ set_name("Native Heap Trimmer");
+ if (os::create_thread(this, os::vm_thread)) {
+ os::start_thread(this);
+ }
+ }
+
+ void suspend(const char* reason) {
+ assert(NativeHeapTrimmer::enabled(), "Only call if enabled");
+ uint16_t n = 0;
+ {
+ MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+ n = inc_suspend_count();
+ // No need to wakeup trimmer
+ }
+ log_debug(trimnative)("Trim suspended for %s (%u suspend requests)", reason, n);
+ }
+
+ void resume(const char* reason) {
+ assert(NativeHeapTrimmer::enabled(), "Only call if enabled");
+ uint16_t n = 0;
+ {
+ MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+ n = dec_suspend_count();
+ if (n == 0) {
+ ml.notify_all(); // pause end
+ }
+ }
+ if (n == 0) {
+ log_debug(trimnative)("Trim resumed after %s", reason);
+ } else {
+ log_debug(trimnative)("Trim still suspended after %s (%u suspend requests)", reason, n);
+ }
+ }
+
+ void stop() {
+ MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
+ _stop = true;
+ ml.notify_all();
+ }
+
+ void print_state(outputStream* st) const {
+ // Don't pull lock during error reporting
+ Mutex* const lock = VMError::is_error_reported() ? nullptr : _lock;
+ int64_t num_trims = 0;
+ bool stopped = false;
+ uint16_t suspenders = 0;
+ {
+ MutexLocker ml(lock, Mutex::_no_safepoint_check_flag);
+ num_trims = _num_trims_performed;
+ stopped = _stop;
+ suspenders = _suspend_count;
+ }
+ st->print_cr("Trims performed: " UINT64_FORMAT ", current suspend count: %d, stopped: %d",
+ num_trims, suspenders, stopped);
+ }
+
+}; // NativeHeapTrimmer
+
+static NativeHeapTrimmerThread* g_trimmer_thread = nullptr;
+
+void NativeHeapTrimmer::initialize() {
+ assert(g_trimmer_thread == nullptr, "Only once");
+ if (TrimNativeHeapInterval > 0) {
+ if (!os::can_trim_native_heap()) {
+ FLAG_SET_ERGO(TrimNativeHeapInterval, 0);
+ log_warning(trimnative)("Native heap trim is not supported on this platform");
+ return;
+ }
+ g_trimmer_thread = new NativeHeapTrimmerThread();
+ log_info(trimnative)("Periodic native trim enabled (interval: %u ms)", TrimNativeHeapInterval);
+ }
+}
+
+void NativeHeapTrimmer::cleanup() {
+ if (g_trimmer_thread != nullptr) {
+ g_trimmer_thread->stop();
+ }
+}
+
+void NativeHeapTrimmer::suspend_periodic_trim(const char* reason) {
+ if (g_trimmer_thread != nullptr) {
+ g_trimmer_thread->suspend(reason);
+ }
+}
+
+void NativeHeapTrimmer::resume_periodic_trim(const char* reason) {
+ if (g_trimmer_thread != nullptr) {
+ g_trimmer_thread->resume(reason);
+ }
+}
+
+void NativeHeapTrimmer::print_state(outputStream* st) {
+ if (g_trimmer_thread != nullptr) {
+ st->print_cr("Periodic native trim enabled (interval: %u ms)", TrimNativeHeapInterval);
+ g_trimmer_thread->print_state(st);
+ } else {
+ st->print_cr("Periodic native trim disabled");
+ }
+}
diff --git a/src/hotspot/share/runtime/trimNativeHeap.hpp b/src/hotspot/share/runtime/trimNativeHeap.hpp
new file mode 100644
index 0000000000000..06dc88ebb0899
--- /dev/null
+++ b/src/hotspot/share/runtime/trimNativeHeap.hpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2023 SAP SE. All rights reserved.
+ * Copyright (c) 2023 Red Hat Inc. All rights reserved.
+ * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_RUNTIME_TRIMNATIVEHEAP_HPP
+#define SHARE_RUNTIME_TRIMNATIVEHEAP_HPP
+
+#include "memory/allStatic.hpp"
+#include "runtime/globals.hpp"
+
+class outputStream;
+
+class NativeHeapTrimmer : public AllStatic {
+
+ // Pause periodic trim (if enabled).
+ static void suspend_periodic_trim(const char* reason);
+
+ // Unpause periodic trim (if enabled).
+ static void resume_periodic_trim(const char* reason);
+
+public:
+
+ static void initialize();
+ static void cleanup();
+
+ static inline bool enabled() { return TrimNativeHeapInterval > 0; }
+
+ static void print_state(outputStream* st);
+
+ // Pause periodic trimming while in scope; when leaving scope,
+ // resume periodic trimming.
+ struct SuspendMark {
+ const char* const _reason;
+ SuspendMark(const char* reason = "unknown") : _reason(reason) {
+ if (NativeHeapTrimmer::enabled()) {
+ suspend_periodic_trim(_reason);
+ }
+ }
+ ~SuspendMark() {
+ if (NativeHeapTrimmer::enabled()) {
+ resume_periodic_trim(_reason);
+ }
+ }
+ };
+};
+
+#endif // SHARE_RUNTIME_TRIMNATIVEHEAP_HPP
diff --git a/src/hotspot/share/services/finalizerService.cpp b/src/hotspot/share/services/finalizerService.cpp
index 202a1af08011a..ecd9168cd65d3 100644
--- a/src/hotspot/share/services/finalizerService.cpp
+++ b/src/hotspot/share/services/finalizerService.cpp
@@ -137,11 +137,14 @@ class FinalizerEntryLookup : StackObj {
public:
FinalizerEntryLookup(const InstanceKlass* ik) : _ik(ik) {}
uintx get_hash() const { return hash_function(_ik); }
- bool equals(FinalizerEntry** value, bool* is_dead) {
+ bool equals(FinalizerEntry** value) {
assert(value != nullptr, "invariant");
assert(*value != nullptr, "invariant");
return (*value)->klass() == _ik;
}
+ bool is_dead(FinalizerEntry** value) {
+ return false;
+ }
};
class FinalizerTableConfig : public AllStatic {
diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp
index 1d947cff1d010..74786534069c0 100644
--- a/src/hotspot/share/services/heapDumper.cpp
+++ b/src/hotspot/share/services/heapDumper.cpp
@@ -37,6 +37,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
+#include "oops/fieldStreams.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
@@ -48,7 +49,6 @@
#include "runtime/javaThread.inline.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/os.hpp"
-#include "runtime/reflectionUtils.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
@@ -1096,7 +1096,7 @@ u4 DumperSupport::instance_size(Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
u4 size = 0;
- for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
+ for (HierarchicalFieldStream fld(ik); !fld.done(); fld.next()) {
if (!fld.access_flags().is_static()) {
size += sig2size(fld.signature());
}
@@ -1108,7 +1108,7 @@ u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
field_count = 0;
u4 size = 0;
- for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
+ for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
if (fldc.access_flags().is_static()) {
field_count++;
size += sig2size(fldc.signature());
@@ -1142,7 +1142,7 @@ void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
// dump the field descriptors and raw values
- for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
+ for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
if (fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
@@ -1176,7 +1176,7 @@ void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o) {
InstanceKlass* ik = InstanceKlass::cast(o->klass());
- for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
+ for (HierarchicalFieldStream fld(ik); !fld.done(); fld.next()) {
if (!fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
dump_field_value(writer, sig->char_at(0), o, fld.offset());
@@ -1188,7 +1188,7 @@ void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o) {
u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
u2 field_count = 0;
- for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) {
+ for (JavaFieldStream fldc(ik); !fldc.done(); fldc.next()) {
if (!fldc.access_flags().is_static()) field_count++;
}
@@ -1200,7 +1200,7 @@ void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer,
InstanceKlass* ik = InstanceKlass::cast(k);
// dump the field descriptors
- for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) {
+ for (JavaFieldStream fld(ik); !fld.done(); fld.next()) {
if (!fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
diff --git a/src/hotspot/share/services/mallocTracker.cpp b/src/hotspot/share/services/mallocTracker.cpp
index bddfcfffc0820..5dab383770795 100644
--- a/src/hotspot/share/services/mallocTracker.cpp
+++ b/src/hotspot/share/services/mallocTracker.cpp
@@ -45,7 +45,6 @@
size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
-#ifdef ASSERT
void MemoryCounter::update_peak(size_t size, size_t cnt) {
size_t peak_sz = peak_size();
while (peak_sz < size) {
@@ -59,7 +58,6 @@ void MemoryCounter::update_peak(size_t size, size_t cnt) {
}
}
}
-#endif // ASSERT
// Total malloc'd memory used by arenas
size_t MallocMemorySnapshot::total_arena() const {
@@ -213,7 +211,8 @@ bool MallocTracker::print_pointer_information(const void* p, outputStream* st) {
const uint8_t* here = align_down(addr, smallest_possible_alignment);
const uint8_t* const end = here - (0x1000 + sizeof(MallocHeader)); // stop searching after 4k
for (; here >= end; here -= smallest_possible_alignment) {
- if (!os::is_readable_pointer(here)) {
+ // JDK-8306561: cast to a MallocHeader needs to guarantee it can reside in readable memory
+ if (!os::is_readable_range(here, here + sizeof(MallocHeader))) {
// Probably OOB, give up
break;
}
diff --git a/src/hotspot/share/services/mallocTracker.hpp b/src/hotspot/share/services/mallocTracker.hpp
index f4f824bb07c49..ed66f643c74b3 100644
--- a/src/hotspot/share/services/mallocTracker.hpp
+++ b/src/hotspot/share/services/mallocTracker.hpp
@@ -46,25 +46,20 @@ class MemoryCounter {
volatile size_t _count;
volatile size_t _size;
-#ifdef ASSERT
// Peak size and count. Note: Peak count is the count at the point
// peak size was reached, not the absolute highest peak count.
volatile size_t _peak_count;
volatile size_t _peak_size;
void update_peak(size_t size, size_t cnt);
-#endif // ASSERT
public:
- MemoryCounter() : _count(0), _size(0) {
- DEBUG_ONLY(_peak_count = 0;)
- DEBUG_ONLY(_peak_size = 0;)
- }
+ MemoryCounter() : _count(0), _size(0), _peak_count(0), _peak_size(0) {}
inline void allocate(size_t sz) {
size_t cnt = Atomic::add(&_count, size_t(1), memory_order_relaxed);
if (sz > 0) {
size_t sum = Atomic::add(&_size, sz, memory_order_relaxed);
- DEBUG_ONLY(update_peak(sum, cnt);)
+ update_peak(sum, cnt);
}
}
@@ -81,7 +76,7 @@ class MemoryCounter {
if (sz != 0) {
assert(sz >= 0 || size() >= size_t(-sz), "Must be");
size_t sum = Atomic::add(&_size, size_t(sz), memory_order_relaxed);
- DEBUG_ONLY(update_peak(sum, _count);)
+ update_peak(sum, _count);
}
}
@@ -89,11 +84,11 @@ class MemoryCounter {
inline size_t size() const { return Atomic::load(&_size); }
inline size_t peak_count() const {
- return DEBUG_ONLY(Atomic::load(&_peak_count)) NOT_DEBUG(0);
+ return Atomic::load(&_peak_count);
}
inline size_t peak_size() const {
- return DEBUG_ONLY(Atomic::load(&_peak_size)) NOT_DEBUG(0);
+ return Atomic::load(&_peak_size);
}
};
@@ -181,11 +176,6 @@ class MallocMemorySnapshot : public ResourceObj {
// Total malloc'd memory used by arenas
size_t total_arena() const;
- inline size_t thread_count() const {
- MallocMemorySnapshot* s = const_cast(this);
- return s->by_type(mtThreadStack)->malloc_count();
- }
-
void copy_to(MallocMemorySnapshot* s) {
// Need to make sure that mtChunks don't get deallocated while the
// copy is going on, because their size is adjusted using this
diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp
index b8674d1ca45fb..a9c50a8bf06df 100644
--- a/src/hotspot/share/services/management.cpp
+++ b/src/hotspot/share/services/management.cpp
@@ -2116,6 +2116,7 @@ JVM_ENTRY(jlong, jmm_GetTotalThreadAllocatedMemory(JNIEnv *env))
}
{
+ assert(MonitoringSupport_lock != nullptr, "Must be");
MutexLocker ml(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
if (result < high_water_result) {
// Encountered (2) above, or result wrapped to a negative value. In
diff --git a/src/hotspot/share/services/memBaseline.cpp b/src/hotspot/share/services/memBaseline.cpp
index 4167e43f6b15e..c0522016e1eca 100644
--- a/src/hotspot/share/services/memBaseline.cpp
+++ b/src/hotspot/share/services/memBaseline.cpp
@@ -110,22 +110,25 @@ class MallocAllocationSiteWalker : public MallocSiteWalker {
}
};
-// Compare virtual memory region's base address
-int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
- return r1.compare(r2);
-}
-
// Walk all virtual memory regions for baselining
class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
private:
- SortedLinkedList
- _virtual_memory_regions;
- size_t _count;
-
+ typedef LinkedListImpl EntryList;
+ EntryList _virtual_memory_regions;
+ size_t _count;
+ DEBUG_ONLY(address _last_base;)
public:
- VirtualMemoryAllocationWalker() : _count(0) { }
+ VirtualMemoryAllocationWalker() :
+ _count(0)
+#ifdef ASSERT
+ , _last_base(nullptr)
+#endif
+ {}
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
+ assert(rgn->base() >= _last_base, "region unordered?");
+ DEBUG_ONLY(_last_base = rgn->base());
if (rgn->size() > 0) {
if (_virtual_memory_regions.add(*rgn) != nullptr) {
_count ++;
@@ -190,6 +193,7 @@ void MemBaseline::baseline(bool summaryOnly) {
_instance_class_count = ClassLoaderDataGraph::num_instance_classes();
_array_class_count = ClassLoaderDataGraph::num_array_classes();
+ _thread_count = ThreadStackTracker::thread_count();
baseline_summary();
_baseline_type = Summary_baselined;
diff --git a/src/hotspot/share/services/memBaseline.hpp b/src/hotspot/share/services/memBaseline.hpp
index 5f2442d371061..fa44a45470461 100644
--- a/src/hotspot/share/services/memBaseline.hpp
+++ b/src/hotspot/share/services/memBaseline.hpp
@@ -64,6 +64,7 @@ class MemBaseline {
size_t _instance_class_count;
size_t _array_class_count;
+ size_t _thread_count;
// Allocation sites information
// Malloc allocation sites
@@ -84,7 +85,7 @@ class MemBaseline {
public:
// create a memory baseline
MemBaseline():
- _instance_class_count(0), _array_class_count(0),
+ _instance_class_count(0), _array_class_count(0), _thread_count(0),
_baseline_type(Not_baselined) {
}
@@ -171,7 +172,7 @@ class MemBaseline {
size_t thread_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- return _malloc_memory_snapshot.thread_count();
+ return _thread_count;
}
// reset the baseline for reuse
@@ -180,6 +181,7 @@ class MemBaseline {
// _malloc_memory_snapshot and _virtual_memory_snapshot are copied over.
_instance_class_count = 0;
_array_class_count = 0;
+ _thread_count = 0;
_malloc_sites.clear();
_virtual_memory_sites.clear();
diff --git a/src/hotspot/share/services/memReporter.cpp b/src/hotspot/share/services/memReporter.cpp
index 7c5f37c69924d..717699d84ea6a 100644
--- a/src/hotspot/share/services/memReporter.cpp
+++ b/src/hotspot/share/services/memReporter.cpp
@@ -233,7 +233,6 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
MallocMemory* thread_stack_memory = _malloc_snapshot->by_type(mtThreadStack);
const char* scale = current_scale();
// report thread count
- assert(ThreadStackTracker::thread_count() == 0, "Not used");
out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", thread_stack_memory->malloc_count());
out->print("%27s (Stack: " SIZE_FORMAT "%s", " ",
amount_in_current_scale(thread_stack_memory->malloc_size()), scale);
@@ -243,7 +242,7 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
// report malloc'd memory
if (amount_in_current_scale(malloc_memory->malloc_size()) > 0
- DEBUG_ONLY(|| amount_in_current_scale(malloc_memory->malloc_peak_size()) > 0)) {
+ || amount_in_current_scale(malloc_memory->malloc_peak_size()) > 0) {
print_malloc_line(malloc_memory->malloc_counter());
}
diff --git a/src/hotspot/share/services/nmtDCmd.cpp b/src/hotspot/share/services/nmtDCmd.cpp
index f64c65c2dc89d..d6f80613b37c3 100644
--- a/src/hotspot/share/services/nmtDCmd.cpp
+++ b/src/hotspot/share/services/nmtDCmd.cpp
@@ -77,7 +77,7 @@ void NMTDCmd::execute(DCmdSource source, TRAPS) {
return;
}
- const char* scale_value = _scale.value();
+ const char* scale_value = _scale.value() != nullptr ? _scale.value() : "(null)";
size_t scale_unit = get_scale(scale_value);
if (scale_unit == 0) {
output()->print_cr("Incorrect scale value: %s", scale_value);
diff --git a/src/hotspot/share/services/threadIdTable.cpp b/src/hotspot/share/services/threadIdTable.cpp
index ba0e6bdd4fdba..168b2e085adf2 100644
--- a/src/hotspot/share/services/threadIdTable.cpp
+++ b/src/hotspot/share/services/threadIdTable.cpp
@@ -187,13 +187,16 @@ class ThreadIdTableLookup : public StackObj {
uintx get_hash() const {
return _hash;
}
- bool equals(ThreadIdTableEntry** value, bool* is_dead) {
+ bool equals(ThreadIdTableEntry** value) {
bool equals = primitive_equals(_tid, (*value)->tid());
if (!equals) {
return false;
}
return true;
}
+ bool is_dead(ThreadIdTableEntry** value) {
+ return false;
+ }
};
class ThreadGet : public StackObj {
diff --git a/src/hotspot/share/services/threadStackTracker.cpp b/src/hotspot/share/services/threadStackTracker.cpp
index afed620bb8890..a6b14efc65700 100644
--- a/src/hotspot/share/services/threadStackTracker.cpp
+++ b/src/hotspot/share/services/threadStackTracker.cpp
@@ -49,40 +49,38 @@ int ThreadStackTracker::compare_thread_stack_base(const SimpleThreadStackSite& s
void ThreadStackTracker::new_thread_stack(void* base, size_t size, const NativeCallStack& stack) {
assert(MemTracker::tracking_level() >= NMT_summary, "Must be");
assert(base != nullptr, "Should have been filtered");
+ ThreadCritical tc;
if (track_as_vm()) {
- ThreadCritical tc;
VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
- _thread_count ++;
} else {
// Use a slot in mallocMemorySummary for thread stack bookkeeping
MallocMemorySummary::record_malloc(size, mtThreadStack);
if (MemTracker::tracking_level() == NMT_detail) {
- ThreadCritical tc;
assert(_simple_thread_stacks != nullptr, "Must be initialized");
SimpleThreadStackSite site((address)base, size, stack);
_simple_thread_stacks->add(site);
}
}
+ _thread_count++;
}
void ThreadStackTracker::delete_thread_stack(void* base, size_t size) {
assert(MemTracker::tracking_level() >= NMT_summary, "Must be");
assert(base != nullptr, "Should have been filtered");
+ ThreadCritical tc;
if(track_as_vm()) {
- ThreadCritical tc;
VirtualMemoryTracker::remove_released_region((address)base, size);
- _thread_count--;
} else {
// Use a slot in mallocMemorySummary for thread stack bookkeeping
MallocMemorySummary::record_free(size, mtThreadStack);
if (MemTracker::tracking_level() == NMT_detail) {
- ThreadCritical tc;
assert(_simple_thread_stacks != nullptr, "Must be initialized");
SimpleThreadStackSite site((address)base, size, NativeCallStack::empty_stack()); // Fake object just to serve as compare target for delete
bool removed = _simple_thread_stacks->remove(site);
assert(removed, "Must exist");
}
}
+ _thread_count--;
}
bool ThreadStackTracker::walk_simple_thread_stack_site(MallocSiteWalker* walker) {
diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
index 0d62a9f162e29..b222d379b722b 100644
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
+++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
@@ -455,9 +455,8 @@ inline bool ConcurrentHashTable::
assert(bucket->is_locked(), "Must be locked.");
Node* const volatile * rem_n_prev = bucket->first_ptr();
Node* rem_n = bucket->first();
- bool have_dead = false;
while (rem_n != nullptr) {
- if (lookup_f.equals(rem_n->value(), &have_dead)) {
+ if (lookup_f.equals(rem_n->value())) {
bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
break;
} else {
@@ -546,9 +545,7 @@ inline void ConcurrentHashTable::
Node* const volatile * rem_n_prev = bucket->first_ptr();
Node* rem_n = bucket->first();
while (rem_n != nullptr) {
- bool is_dead = false;
- lookup_f.equals(rem_n->value(), &is_dead);
- if (is_dead) {
+ if (lookup_f.is_dead(rem_n->value())) {
ndel[dels++] = rem_n;
Node* next_node = rem_n->next();
bucket->release_assign_node_ptr(rem_n_prev, next_node);
@@ -626,12 +623,11 @@ ConcurrentHashTable::
size_t loop_count = 0;
Node* node = bucket->first();
while (node != nullptr) {
- bool is_dead = false;
++loop_count;
- if (lookup_f.equals(node->value(), &is_dead)) {
+ if (lookup_f.equals(node->value())) {
break;
}
- if (is_dead && !(*have_dead)) {
+ if (!(*have_dead) && lookup_f.is_dead(node->value())) {
*have_dead = true;
}
node = node->next();
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index becaf7957990f..7e6ebf9fdde13 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -411,6 +411,9 @@ inline size_t byte_size_in_exact_unit(size_t s) {
return s;
}
+#define EXACTFMT SIZE_FORMAT "%s"
+#define EXACTFMTARGS(s) byte_size_in_exact_unit(s), exact_unit_for_byte_size(s)
+
// Memory size transition formatting.
#define HEAP_CHANGE_FORMAT "%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)->" SIZE_FORMAT "K(" SIZE_FORMAT "K)"
@@ -638,7 +641,7 @@ const bool support_IRIW_for_not_multiple_copy_atomic_cpu = PPC64_ONLY(true) NOT_
// The expected size in bytes of a cache line, used to pad data structures.
#ifndef DEFAULT_CACHE_LINE_SIZE
- #define DEFAULT_CACHE_LINE_SIZE 64
+#error "Platform should define DEFAULT_CACHE_LINE_SIZE"
#endif
diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp
index 13de5dec6c1b1..d9c47e8360fdb 100644
--- a/src/hotspot/share/utilities/growableArray.hpp
+++ b/src/hotspot/share/utilities/growableArray.hpp
@@ -142,17 +142,17 @@ class GrowableArrayView : public GrowableArrayBase {
}
E& at(int i) {
- assert(0 <= i && i < _len, "illegal index");
+ assert(0 <= i && i < _len, "illegal index %d for length %d", i, _len);
return _data[i];
}
E const& at(int i) const {
- assert(0 <= i && i < _len, "illegal index");
+ assert(0 <= i && i < _len, "illegal index %d for length %d", i, _len);
return _data[i];
}
E* adr_at(int i) const {
- assert(0 <= i && i < _len, "illegal index");
+ assert(0 <= i && i < _len, "illegal index %d for length %d", i, _len);
return &_data[i];
}
@@ -184,7 +184,7 @@ class GrowableArrayView : public GrowableArrayBase {
}
void at_put(int i, const E& elem) {
- assert(0 <= i && i < _len, "illegal index");
+ assert(0 <= i && i < _len, "illegal index %d for length %d", i, _len);
_data[i] = elem;
}
@@ -245,7 +245,7 @@ class GrowableArrayView : public GrowableArrayBase {
}
void remove_at(int index) {
- assert(0 <= index && index < _len, "illegal index");
+ assert(0 <= index && index < _len, "illegal index %d for length %d", index, _len);
for (int j = index + 1; j < _len; j++) {
_data[j-1] = _data[j];
}
@@ -259,8 +259,8 @@ class GrowableArrayView : public GrowableArrayBase {
// Remove all elements in the range [start - end). The order is preserved.
void remove_range(int start, int end) {
- assert(0 <= start, "illegal index");
- assert(start < end && end <= _len, "erase called with invalid range");
+ assert(0 <= start, "illegal start index %d", start);
+ assert(start < end && end <= _len, "erase called with invalid range (%d, %d) for length %d", start, end, _len);
for (int i = start, j = end; j < length(); i++, j++) {
at_put(i, at(j));
@@ -270,7 +270,7 @@ class GrowableArrayView : public GrowableArrayBase {
// The order is changed.
void delete_at(int index) {
- assert(0 <= index && index < _len, "illegal index");
+ assert(0 <= index && index < _len, "illegal index %d for length %d", index, _len);
if (index < --_len) {
// Replace removed element with last one.
_data[index] = _data[_len];
@@ -403,7 +403,7 @@ class GrowableArrayWithAllocator : public GrowableArrayView {
void push(const E& elem) { append(elem); }
E at_grow(int i, const E& fill = E()) {
- assert(0 <= i, "negative index");
+ assert(0 <= i, "negative index %d", i);
if (i >= this->_len) {
if (i >= this->_capacity) grow(i);
for (int j = this->_len; j <= i; j++)
@@ -414,7 +414,7 @@ class GrowableArrayWithAllocator : public GrowableArrayView {
}
void at_put_grow(int i, const E& elem, const E& fill = E()) {
- assert(0 <= i, "negative index");
+ assert(0 <= i, "negative index %d", i);
if (i >= this->_len) {
if (i >= this->_capacity) grow(i);
for (int j = this->_len; j < i; j++)
@@ -426,7 +426,7 @@ class GrowableArrayWithAllocator : public GrowableArrayView {
// inserts the given element before the element at index i
void insert_before(const int idx, const E& elem) {
- assert(0 <= idx && idx <= this->_len, "illegal index");
+ assert(0 <= idx && idx <= this->_len, "illegal index %d for length %d", idx, this->_len);
if (this->_len == this->_capacity) grow(this->_len);
for (int j = this->_len - 1; j >= idx; j--) {
this->_data[j + 1] = this->_data[j];
@@ -436,7 +436,7 @@ class GrowableArrayWithAllocator : public GrowableArrayView {
}
void insert_before(const int idx, const GrowableArrayView* array) {
- assert(0 <= idx && idx <= this->_len, "illegal index");
+ assert(0 <= idx && idx <= this->_len, "illegal index %d for length %d", idx, this->_len);
int array_len = array->length();
int new_len = this->_len + array_len;
if (new_len >= this->_capacity) grow(new_len);
diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp
index 2bcc1ddc2215f..5c0b4440e2cd5 100644
--- a/src/hotspot/share/utilities/vmError.cpp
+++ b/src/hotspot/share/utilities/vmError.cpp
@@ -54,6 +54,7 @@
#include "runtime/stackOverflow.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.hpp"
+#include "runtime/trimNativeHeap.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vmOperations.hpp"
#include "runtime/vm_version.hpp"
@@ -106,6 +107,8 @@ static const char* env_list[] = {
"JAVA_HOME", "JAVA_TOOL_OPTIONS", "_JAVA_OPTIONS", "CLASSPATH",
"PATH", "USERNAME",
+ "XDG_CACHE_HOME", "XDG_CONFIG_HOME", "FC_LANG", "FONTCONFIG_USE_MMAP",
+
// Env variables that are defined on Linux/BSD
"LD_LIBRARY_PATH", "LD_PRELOAD", "SHELL", "DISPLAY",
"HOSTTYPE", "OSTYPE", "ARCH", "MACHTYPE",
@@ -423,7 +426,7 @@ static frame next_frame(frame fr, Thread* t) {
if (!t->is_in_full_stack((address)(fr.real_fp() + 1))) {
return invalid;
}
- if (fr.is_java_frame() || fr.is_native_frame() || fr.is_runtime_frame()) {
+ if (fr.is_interpreted_frame() || (fr.cb() != nullptr && fr.cb()->frame_size() > 0)) {
RegisterMap map(JavaThread::cast(t),
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
@@ -479,7 +482,7 @@ static void print_oom_reasons(outputStream* st) {
st->print_cr("# Possible reasons:");
st->print_cr("# The system is out of physical RAM or swap space");
if (UseCompressedOops) {
- st->print_cr("# The process is running with CompressedOops enabled, and the Java Heap may be blocking the growth of the native heap");
+ st->print_cr("# This process is running with CompressedOops enabled, and the Java Heap may be blocking the growth of the native heap");
}
if (LogBytesPerWord == 2) {
st->print_cr("# In 32 bit mode, the process size limit was hit");
@@ -711,6 +714,11 @@ void VMError::report(outputStream* st, bool _verbose) {
"Runtime Environment to continue.");
}
+ // avoid the cache update for malloc/mmap errors
+ if (should_report_bug(_id)) {
+ os::prepare_native_symbols();
+ }
+
#ifdef ASSERT
// Error handler self tests
// Meaning of codes passed through in the tests.
@@ -817,9 +825,9 @@ void VMError::report(outputStream* st, bool _verbose) {
"(mprotect) failed to protect ");
jio_snprintf(buf, sizeof(buf), SIZE_FORMAT, _size);
st->print("%s", buf);
- st->print(" bytes");
+ st->print(" bytes.");
if (strlen(_detail_msg) > 0) {
- st->print(" for ");
+ st->print(" Error detail: ");
st->print("%s", _detail_msg);
}
st->cr();
@@ -1091,6 +1099,11 @@ void VMError::report(outputStream* st, bool _verbose) {
print_stack_location(st, _context, continuation);
st->cr();
+ STEP_IF("printing lock stack", _verbose && _thread != nullptr && _thread->is_Java_thread() && LockingMode == LM_LIGHTWEIGHT);
+ st->print_cr("Lock stack of current Java thread (top to bottom):");
+ JavaThread::cast(_thread)->lock_stack().print_on(st);
+ st->cr();
+
STEP_IF("printing code blobs if possible", _verbose)
const int printed_capacity = max_error_log_print_code;
address printed[printed_capacity];
@@ -1284,9 +1297,13 @@ void VMError::report(outputStream* st, bool _verbose) {
STEP_IF("Native Memory Tracking", _verbose)
MemTracker::error_report(st);
+ st->cr();
- STEP_IF("printing system", _verbose)
+ STEP_IF("printing periodic trim state", _verbose)
+ NativeHeapTrimmer::print_state(st);
st->cr();
+
+ STEP_IF("printing system", _verbose)
st->print_cr("--------------- S Y S T E M ---------------");
st->cr();
@@ -1325,6 +1342,8 @@ void VMError::report(outputStream* st, bool _verbose) {
void VMError::print_vm_info(outputStream* st) {
char buf[O_BUFLEN];
+ os::prepare_native_symbols();
+
report_vm_version(st, buf, sizeof(buf));
// STEP("printing summary")
@@ -1453,10 +1472,14 @@ void VMError::print_vm_info(outputStream* st) {
// STEP("Native Memory Tracking")
MemTracker::error_report(st);
+ st->cr();
- // STEP("printing system")
-
+ // STEP("printing periodic trim state")
+ NativeHeapTrimmer::print_state(st);
st->cr();
+
+
+ // STEP("printing system")
st->print_cr("--------------- S Y S T E M ---------------");
st->cr();
@@ -1827,6 +1850,7 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt
int e = errno;
out.print_raw("#\n# Can't open file to dump replay data. Error: ");
out.print_raw_cr(os::strerror(e));
+ close(fd);
}
}
}
diff --git a/src/java.base/aix/native/libjava/ProcessHandleImpl_aix.c b/src/java.base/aix/native/libjava/ProcessHandleImpl_aix.c
index 93347bd8c2071..3568af24ce438 100644
--- a/src/java.base/aix/native/libjava/ProcessHandleImpl_aix.c
+++ b/src/java.base/aix/native/libjava/ProcessHandleImpl_aix.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,12 @@
*/
#include "jni.h"
+#include "jni_util.h"
#include "ProcessHandleImpl_unix.h"
#include
+#include
/*
* Implementation of native ProcessHandleImpl functions for AIX.
@@ -36,9 +38,127 @@
void os_initNative(JNIEnv *env, jclass clazz) {}
+/*
+ * Return pids of active processes, and optionally parent pids and
+ * start times for each process.
+ * For a specific non-zero pid, only the direct children are returned.
+ * If the pid is zero, all active processes are returned.
+ * Use getprocs64 to accumulate any process following the rules above.
+ * The resulting pids are stored into an array of longs named jarray.
+ * The number of pids is returned if they all fit.
+ * If the parentArray is non-null, store also the parent pid.
+ * In this case the parentArray must have the same length as the result pid array.
+ * Of course in the case of a given non-zero pid all entries in the parentArray
+ * will contain this pid, so this array does only make sense in the case of a given
+ * zero pid.
+ * If the jstimesArray is non-null, store also the start time of the pid.
+ * In this case the jstimesArray must have the same length as the result pid array.
+ * If the array(s) (is|are) too short, excess pids are not stored and
+ * the desired length is returned.
+ */
jint os_getChildren(JNIEnv *env, jlong jpid, jlongArray jarray,
jlongArray jparentArray, jlongArray jstimesArray) {
- return unix_getChildren(env, jpid, jarray, jparentArray, jstimesArray);
+ pid_t pid = (pid_t) jpid;
+ jlong* pids = NULL;
+ jlong* ppids = NULL;
+ jlong* stimes = NULL;
+ jsize parentArraySize = 0;
+ jsize arraySize = 0;
+ jsize stimesSize = 0;
+ jsize count = 0;
+
+ arraySize = (*env)->GetArrayLength(env, jarray);
+ JNU_CHECK_EXCEPTION_RETURN(env, -1);
+ if (jparentArray != NULL) {
+ parentArraySize = (*env)->GetArrayLength(env, jparentArray);
+ JNU_CHECK_EXCEPTION_RETURN(env, -1);
+
+ if (arraySize != parentArraySize) {
+ JNU_ThrowIllegalArgumentException(env, "array sizes not equal");
+ return 0;
+ }
+ }
+ if (jstimesArray != NULL) {
+ stimesSize = (*env)->GetArrayLength(env, jstimesArray);
+ JNU_CHECK_EXCEPTION_RETURN(env, -1);
+
+ if (arraySize != stimesSize) {
+ JNU_ThrowIllegalArgumentException(env, "array sizes not equal");
+ return 0;
+ }
+ }
+
+ const int chunk = 100;
+ struct procentry64 ProcessBuffer[chunk];
+ pid_t idxptr = 0;
+ int i, num = 0;
+
+ do { // Block to break out of on Exception
+ pids = (*env)->GetLongArrayElements(env, jarray, NULL);
+ if (pids == NULL) {
+ break;
+ }
+ if (jparentArray != NULL) {
+ ppids = (*env)->GetLongArrayElements(env, jparentArray, NULL);
+ if (ppids == NULL) {
+ break;
+ }
+ }
+ if (jstimesArray != NULL) {
+ stimes = (*env)->GetLongArrayElements(env, jstimesArray, NULL);
+ if (stimes == NULL) {
+ break;
+ }
+ }
+
+ while ((num = getprocs64(ProcessBuffer, sizeof(struct procentry64), NULL,
+ sizeof(struct fdsinfo64), &idxptr, chunk)) != -1) {
+ for (i = 0; i < num; i++) {
+ pid_t childpid = (pid_t) ProcessBuffer[i].pi_pid;
+ pid_t ppid = (pid_t) ProcessBuffer[i].pi_ppid;
+
+ // Get the parent pid, and start time
+ if (pid == 0 || ppid == pid) {
+ if (count < arraySize) {
+ // Only store if it fits
+ pids[count] = (jlong) childpid;
+
+ if (ppids != NULL) {
+ // Store the parentPid
+ ppids[count] = (jlong) ppid;
+ }
+ if (stimes != NULL) {
+ // Store the process start time
+ stimes[count] = ((jlong) ProcessBuffer[i].pi_start) * 1000;;
+ }
+ }
+ count++; // Count to tabulate size needed
+ }
+ }
+ if (num < chunk) {
+ break;
+ }
+ }
+ } while (0);
+
+ if (pids != NULL) {
+ (*env)->ReleaseLongArrayElements(env, jarray, pids, 0);
+ }
+ if (ppids != NULL) {
+ (*env)->ReleaseLongArrayElements(env, jparentArray, ppids, 0);
+ }
+ if (stimes != NULL) {
+ (*env)->ReleaseLongArrayElements(env, jstimesArray, stimes, 0);
+ }
+
+ if (num == -1) {
+ JNU_ThrowByNameWithLastError(env,
+ "java/lang/RuntimeException", "Unable to retrieve Process info");
+ return -1;
+ }
+
+ // If more pids than array had size for; count will be greater than array size
+ return count;
}
pid_t os_getParentPidAndTimings(JNIEnv *env, pid_t pid, jlong *total, jlong *start) {
diff --git a/src/java.base/aix/native/libnio/MappedMemoryUtils.c b/src/java.base/aix/native/libnio/MappedMemoryUtils.c
new file mode 100644
index 0000000000000..5d0216cc25102
--- /dev/null
+++ b/src/java.base/aix/native/libnio/MappedMemoryUtils.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "jni.h"
+#include "jni_util.h"
+#include "jvm.h"
+#include "jlong.h"
+#include "java_nio_MappedMemoryUtils.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+typedef char mincore_vec_t;
+
+static long calculate_number_of_pages_in_range(void* address, size_t len, size_t pagesize) {
+ uintptr_t address_unaligned = (uintptr_t) address;
+ uintptr_t address_aligned = address_unaligned & (~(pagesize - 1));
+ size_t len2 = len + (address_unaligned - address_aligned);
+ long numPages = (len2 + pagesize - 1) / pagesize;
+ return numPages;
+}
+
+JNIEXPORT jboolean JNICALL
+Java_java_nio_MappedMemoryUtils_isLoaded0(JNIEnv *env, jobject obj, jlong address,
+ jlong len, jlong numPages)
+{
+ jboolean loaded = JNI_TRUE;
+ int result = 0;
+ long i = 0;
+ void *a = (void *) jlong_to_ptr(address);
+ mincore_vec_t* vec = NULL;
+
+ /* See JDK-8186665 */
+ size_t pagesize = (size_t)sysconf(_SC_PAGESIZE);
+ if ((long)pagesize == -1) {
+ return JNI_FALSE;
+ }
+ numPages = (jlong) calculate_number_of_pages_in_range(a, len, pagesize);
+
+ /* Include space for one sentinel byte at the end of the buffer
+ * to catch overflows. */
+ vec = (mincore_vec_t*) malloc(numPages + 1);
+
+ if (vec == NULL) {
+ JNU_ThrowOutOfMemoryError(env, NULL);
+ return JNI_FALSE;
+ }
+
+ vec[numPages] = '\x7f'; /* Write sentinel. */
+ result = mincore(a, (size_t)len, vec);
+ assert(vec[numPages] == '\x7f'); /* Check sentinel. */
+
+ if (result == -1) {
+ JNU_ThrowIOExceptionWithLastError(env, "mincore failed");
+ free(vec);
+ return JNI_FALSE;
+ }
+
+ for (i=0; i