diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 95ef72b31eb..6a56df295ba 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -98,12 +98,26 @@ jobs: id: gtest uses: ./.github/actions/get-gtest + - name: 'Check toolchain installed' + id: toolchain-check + run: | + set +e + '/c/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/vc/auxiliary/build/vcvars64.bat' -vcvars_ver=${{ inputs.msvc-toolset-version }} + if [ $? -eq 0 ]; then + echo "Toolchain is already installed" + echo "toolchain-installed=true" >> $GITHUB_OUTPUT + else + echo "Toolchain is not yet installed" + echo "toolchain-installed=false" >> $GITHUB_OUTPUT + fi + - name: 'Install toolchain and dependencies' run: | # Run Visual Studio Installer '/c/Program Files (x86)/Microsoft Visual Studio/Installer/vs_installer.exe' \ - modify --quiet --installPath 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise' \ + modify --quiet --installPath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' \ --add Microsoft.VisualStudio.Component.VC.${{ inputs.msvc-toolset-version }}.${{ inputs.msvc-toolset-architecture }} + if: steps.toolchain-check.outputs.toolchain-installed != 'true' - name: 'Configure' run: > diff --git a/doc/testing.html b/doc/testing.html index 37d4df604f5..0f81647ecae 100644 --- a/doc/testing.html +++ b/doc/testing.html @@ -426,6 +426,14 @@ GB/2.

Sets the argument -timeoutHandlerTimeout for JTReg. The default value is 0. This is only valid if the failure handler is built.

+

JTREG_TEST_THREAD_FACTORY

+

Sets the -testThreadFactory for JTReg. It should be the +fully qualified classname of a class which implements +java.util.concurrent.ThreadFactory. One such implementation +class, named Virtual, is currently part of the JDK build in the +test/jtreg_test_thread_factory/ directory. This class gets +compiled during the test image build. The implementation of the Virtual +class creates a new virtual thread for executing each test class.

TEST_MODE

The test mode (agentvm or othervm).

Defaults to agentvm.

diff --git a/doc/testing.md b/doc/testing.md index 3de0c26c391..764fec15c8d 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -378,6 +378,15 @@ Defaults to 4. Sets the argument `-timeoutHandlerTimeout` for JTReg. The default value is 0. This is only valid if the failure handler is built. +#### JTREG_TEST_THREAD_FACTORY + +Sets the `-testThreadFactory` for JTReg. It should be the fully qualified classname +of a class which implements `java.util.concurrent.ThreadFactory`. +One such implementation class, named Virtual, is currently part of the JDK build +in the `test/jtreg_test_thread_factory/` directory. This class gets compiled during +the test image build. The implementation of the Virtual class creates a new virtual +thread for executing each test class. + #### TEST_MODE The test mode (`agentvm` or `othervm`). diff --git a/make/CompileInterimLangtools.gmk b/make/CompileInterimLangtools.gmk index 0be8aa46ded..51263fde3bd 100644 --- a/make/CompileInterimLangtools.gmk +++ b/make/CompileInterimLangtools.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -109,7 +109,9 @@ define SetupInterimModule $$(INTERIM_LANGTOOLS_ADD_EXPORTS) \ --patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/gensrc/java.base.interim \ --add-exports java.base/jdk.internal.javac=java.compiler.interim \ - --add-exports java.base/jdk.internal.javac=jdk.compiler.interim, \ + --add-exports java.base/jdk.internal.javac=jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.javadoc.interim, \ )) $1_DEPS_INTERIM := $$(addsuffix .interim, $$(filter \ diff --git a/make/CompileToolsJdk.gmk b/make/CompileToolsJdk.gmk index dceca481451..13101c7cccf 100644 --- a/make/CompileToolsJdk.gmk +++ b/make/CompileToolsJdk.gmk @@ -57,7 +57,9 @@ $(eval $(call SetupJavaCompilation, BUILD_TOOLS_JDK, \ JAVAC_FLAGS := \ --add-exports java.desktop/sun.awt=ALL-UNNAMED \ --add-exports java.base/sun.text=ALL-UNNAMED \ - --add-exports java.base/sun.security.util=ALL-UNNAMED, \ + --add-exports java.base/sun.security.util=ALL-UNNAMED \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.javadoc.interim, \ )) TARGETS += $(BUILD_TOOLS_JDK) @@ -90,7 +92,9 @@ $(eval $(call SetupJavaCompilation, COMPILE_DEPEND, \ --add-exports jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED \ --add-exports jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED \ --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED \ - --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED, \ + --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.javadoc.interim, \ )) DEPEND_SERVICE_PROVIDER := $(BUILDTOOLS_OUTPUTDIR)/depend/META-INF/services/com.sun.source.util.Plugin diff --git a/make/Main.gmk b/make/Main.gmk index 3c7c30caba7..5f647c963b0 100644 --- a/make/Main.gmk +++ b/make/Main.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -747,6 +747,22 @@ ifeq ($(BUILD_FAILURE_HANDLER), true) )) endif +ifeq ($(BUILD_JTREG_TEST_THREAD_FACTORY), true) + # Builds the test thread factory jtreg extension + $(eval $(call SetupTarget, build-test-test-thread-factory, \ + MAKEFILE := test/BuildJtregTestThreadFactory, \ + TARGET := build, \ + DEPS := interim-langtools exploded-image, \ + )) + + # Copies the jtreg test thread factory into the test image + $(eval $(call SetupTarget, test-image-test-thread-factory, \ + MAKEFILE := test/BuildJtregTestThreadFactory, \ + TARGET := images, \ + DEPS := build-test-test-thread-factory, \ + )) +endif + $(eval $(call SetupTarget, build-microbenchmark, \ MAKEFILE := test/BuildMicrobenchmark, \ DEPS := interim-langtools exploded-image, \ @@ -1227,6 +1243,10 @@ ifeq ($(BUILD_FAILURE_HANDLER), true) test-image: test-image-failure-handler endif +ifeq ($(BUILD_JTREG_TEST_THREAD_FACTORY), true) + test-image: test-image-test-thread-factory +endif + ifneq ($(JMH_CORE_JAR), ) test-image: build-microbenchmark endif diff --git a/make/RunTests.gmk b/make/RunTests.gmk index 50342077645..aba7b3a78f6 100644 --- a/make/RunTests.gmk +++ b/make/RunTests.gmk @@ -93,6 +93,9 @@ endif JTREG_FAILURE_HANDLER_DIR := $(TEST_IMAGE_DIR)/failure_handler JTREG_FAILURE_HANDLER := $(JTREG_FAILURE_HANDLER_DIR)/jtregFailureHandler.jar +JTREG_TEST_THREAD_FACTORY_DIR := $(TEST_IMAGE_DIR)/jtreg_test_thread_factory +JTREG_TEST_THREAD_FACTORY_JAR := $(JTREG_TEST_THREAD_FACTORY_DIR)/jtregTestThreadFactory.jar + JTREG_FAILURE_HANDLER_TIMEOUT ?= 0 ifneq ($(wildcard $(JTREG_FAILURE_HANDLER)), ) @@ -200,7 +203,7 @@ $(eval $(call SetTestOpt,REPORT,JTREG)) $(eval $(call ParseKeywordVariable, JTREG, \ SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR FAILURE_HANDLER_TIMEOUT \ - TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM RUN_PROBLEM_LISTS \ + TEST_MODE ASSERT VERBOSE RETAIN TEST_THREAD_FACTORY MAX_MEM RUN_PROBLEM_LISTS \ RETRY_COUNT REPEAT_COUNT MAX_OUTPUT REPORT $(CUSTOM_JTREG_SINGLE_KEYWORDS), \ STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \ EXTRA_PROBLEM_LISTS LAUNCHER_OPTIONS \ @@ -752,6 +755,7 @@ define SetupRunJtregTestBody JTREG_VERBOSE ?= fail,error,summary JTREG_RETAIN ?= fail,error + JTREG_TEST_THREAD_FACTORY ?= JTREG_RUN_PROBLEM_LISTS ?= false JTREG_RETRY_COUNT ?= 0 JTREG_REPEAT_COUNT ?= 0 @@ -765,6 +769,14 @@ define SetupRunJtregTestBody endif endif + ifneq ($$(JTREG_TEST_THREAD_FACTORY), ) + $1_JTREG_BASIC_OPTIONS += -testThreadFactoryPath:$$(JTREG_TEST_THREAD_FACTORY_JAR) + $1_JTREG_BASIC_OPTIONS += -testThreadFactory:$$(JTREG_TEST_THREAD_FACTORY) + $1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \ + $$(addprefix $$($1_TEST_ROOT)/, ProblemList-$$(JTREG_TEST_THREAD_FACTORY).txt) \ + )) + endif + ifneq ($$(JTREG_LAUNCHER_OPTIONS), ) $1_JTREG_LAUNCHER_OPTIONS += $$(JTREG_LAUNCHER_OPTIONS) endif diff --git a/make/autoconf/basic.m4 b/make/autoconf/basic.m4 index 2ec792f78b4..f02c0e33707 100644 --- a/make/autoconf/basic.m4 +++ b/make/autoconf/basic.m4 @@ -60,6 +60,7 @@ AC_DEFUN([BASIC_CHECK_LEFTOVER_OVERRIDDEN], ############################################################################### # Setup basic configuration paths, and platform-specific stuff related to PATHs. +# Make sure to only use tools set up in BASIC_SETUP_FUNDAMENTAL_TOOLS. AC_DEFUN_ONCE([BASIC_SETUP_PATHS], [ # Save the current directory this script was started from diff --git a/make/autoconf/basic_tools.m4 b/make/autoconf/basic_tools.m4 index c21ca52e477..92a4582ecdd 100644 --- a/make/autoconf/basic_tools.m4 +++ b/make/autoconf/basic_tools.m4 @@ -29,8 +29,8 @@ RECOMMENDED_PANDOC_VERSION=2.19.2 ############################################################################### -# Setup the most fundamental tools that relies on not much else to set up, -# but is used by much of the early bootstrap code. +# Setup the most fundamental tools, used for setting up build platform and +# path handling. AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS], [ # Bootstrapping: These tools are needed by UTIL_LOOKUP_PROGS @@ -42,7 +42,28 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS], UTIL_CHECK_NONEMPTY(FILE) AC_PATH_PROGS(LDD, ldd) - # First are all the fundamental required tools. + # Required tools + UTIL_REQUIRE_PROGS(ECHO, echo) + UTIL_REQUIRE_PROGS(TR, tr) + UTIL_REQUIRE_PROGS(UNAME, uname) + UTIL_REQUIRE_PROGS(WC, wc) + + # Required tools with some special treatment + UTIL_REQUIRE_SPECIAL(GREP, [AC_PROG_GREP]) + UTIL_REQUIRE_SPECIAL(EGREP, [AC_PROG_EGREP]) + UTIL_REQUIRE_SPECIAL(SED, [AC_PROG_SED]) + + # Tools only needed on some platforms + UTIL_LOOKUP_PROGS(PATHTOOL, cygpath wslpath) + UTIL_LOOKUP_PROGS(CMD, cmd.exe, $PATH:/cygdrive/c/windows/system32:/mnt/c/windows/system32:/c/windows/system32) +]) + +############################################################################### +# Setup further tools that should be resolved early but after setting up +# build platform and path handling. +AC_DEFUN_ONCE([BASIC_SETUP_TOOLS], +[ + # Required tools UTIL_REQUIRE_PROGS(BASH, bash) UTIL_REQUIRE_PROGS(CAT, cat) UTIL_REQUIRE_PROGS(CHMOD, chmod) @@ -50,7 +71,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS], UTIL_REQUIRE_PROGS(CUT, cut) UTIL_REQUIRE_PROGS(DATE, date) UTIL_REQUIRE_PROGS(DIFF, gdiff diff) - UTIL_REQUIRE_PROGS(ECHO, echo) UTIL_REQUIRE_PROGS(EXPR, expr) UTIL_REQUIRE_PROGS(FIND, find) UTIL_REQUIRE_PROGS(GUNZIP, gunzip) @@ -72,16 +92,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS], UTIL_REQUIRE_PROGS(TAR, gtar tar) UTIL_REQUIRE_PROGS(TEE, tee) UTIL_REQUIRE_PROGS(TOUCH, touch) - UTIL_REQUIRE_PROGS(TR, tr) - UTIL_REQUIRE_PROGS(UNAME, uname) - UTIL_REQUIRE_PROGS(WC, wc) UTIL_REQUIRE_PROGS(XARGS, xargs) - # Then required tools that require some special treatment. - UTIL_REQUIRE_SPECIAL(GREP, [AC_PROG_GREP]) - UTIL_REQUIRE_SPECIAL(EGREP, [AC_PROG_EGREP]) + # Required tools with some special treatment UTIL_REQUIRE_SPECIAL(FGREP, [AC_PROG_FGREP]) - UTIL_REQUIRE_SPECIAL(SED, [AC_PROG_SED]) # Optional tools, we can do without them UTIL_LOOKUP_PROGS(DF, df) @@ -90,10 +104,8 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS], UTIL_LOOKUP_PROGS(READLINK, greadlink readlink) UTIL_LOOKUP_PROGS(WHOAMI, whoami) - # These are only needed on some platforms - UTIL_LOOKUP_PROGS(PATHTOOL, cygpath wslpath) + # Tools only needed on some platforms UTIL_LOOKUP_PROGS(LSB_RELEASE, lsb_release) - UTIL_LOOKUP_PROGS(CMD, cmd.exe, $PATH:/cygdrive/c/windows/system32:/mnt/c/windows/system32:/c/windows/system32) # For compare.sh only UTIL_LOOKUP_PROGS(CMP, cmp) diff --git a/make/autoconf/buildjdk-spec.gmk.in b/make/autoconf/buildjdk-spec.gmk.in index 524f35f417c..3e7c4a39f60 100644 --- a/make/autoconf/buildjdk-spec.gmk.in +++ b/make/autoconf/buildjdk-spec.gmk.in @@ -103,3 +103,7 @@ JVM_FEATURES_server := cds compiler1 compiler2 g1gc serialgc override EXTRA_CFLAGS := override EXTRA_CXXFLAGS := override EXTRA_LDFLAGS := + +# hsdis is not needed +HSDIS_BACKEND := none +ENABLE_HSDIS_BUNDLING := false diff --git a/make/autoconf/configure.ac b/make/autoconf/configure.ac index 5d48bd9aadd..6afa36ac18d 100644 --- a/make/autoconf/configure.ac +++ b/make/autoconf/configure.ac @@ -86,6 +86,7 @@ PLATFORM_SETUP_OPENJDK_BUILD_AND_TARGET # Continue setting up basic stuff. Most remaining code require fundamental tools. BASIC_SETUP_PATHS +BASIC_SETUP_TOOLS BASIC_SETUP_BUILD_ENV # Check if it's a pure open build or if custom sources are to be used. @@ -222,6 +223,10 @@ JDKOPT_SETUP_UNDEFINED_BEHAVIOR_SANITIZER # LeakSanitizer JDKOPT_SETUP_LEAK_SANITIZER +# Fallback linker +# This needs to go before 'LIB_DETERMINE_DEPENDENCIES' +JDKOPT_SETUP_FALLBACK_LINKER + ############################################################################### # # Check dependencies for external and internal libraries. @@ -249,6 +254,7 @@ HOTSPOT_SETUP_MISC ############################################################################### LIB_TESTS_ENABLE_DISABLE_FAILURE_HANDLER +LIB_TESTS_ENABLE_DISABLE_JTREG_TEST_THREAD_FACTORY JDKOPT_ENABLE_DISABLE_GENERATE_CLASSLIST JDKOPT_EXCLUDE_TRANSLATIONS diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4 index c40f3c02106..e9959196acb 100644 --- a/make/autoconf/flags-cflags.m4 +++ b/make/autoconf/flags-cflags.m4 @@ -494,7 +494,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], fi if test "x$TOOLCHAIN_TYPE" = xgcc; then - TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fcheck-new -fstack-protector" + TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fstack-protector" TOOLCHAIN_CFLAGS_JDK="-pipe -fstack-protector" # reduce lib size on linux in link step, this needs also special compile flags # do this on s390x also for libjvm (where serviceability agent is not supported) diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4 index a76fdab5ae5..f08cc6ddd41 100644 --- a/make/autoconf/jdk-options.m4 +++ b/make/autoconf/jdk-options.m4 @@ -903,3 +903,22 @@ AC_DEFUN([JDKOPT_SETUP_MACOSX_SIGNING], AC_SUBST(MACOSX_CODESIGN_MODE) fi ]) + +################################################################################ +# +# fallback linker +# +AC_DEFUN_ONCE([JDKOPT_SETUP_FALLBACK_LINKER], +[ + FALLBACK_LINKER_DEFAULT=false + + if HOTSPOT_CHECK_JVM_VARIANT(zero); then + FALLBACK_LINKER_DEFAULT=true + fi + + UTIL_ARG_ENABLE(NAME: fallback-linker, DEFAULT: $FALLBACK_LINKER_DEFAULT, + RESULT: ENABLE_FALLBACK_LINKER, + DESC: [enable libffi-based fallback implementation of java.lang.foreign.Linker], + CHECKING_MSG: [if fallback linker enabled]) + AC_SUBST(ENABLE_FALLBACK_LINKER) +]) diff --git a/make/autoconf/lib-tests.m4 b/make/autoconf/lib-tests.m4 index 4d771f7e5f2..aa02ac4ef97 100644 --- a/make/autoconf/lib-tests.m4 +++ b/make/autoconf/lib-tests.m4 @@ -28,7 +28,7 @@ ################################################################################ # Minimum supported versions -JTREG_MINIMUM_VERSION=7.1.1 +JTREG_MINIMUM_VERSION=7.2 GTEST_MINIMUM_VERSION=1.13.0 ############################################################################### @@ -301,3 +301,22 @@ AC_DEFUN_ONCE([LIB_TESTS_ENABLE_DISABLE_FAILURE_HANDLER], ]) AC_SUBST(BUILD_FAILURE_HANDLER) ]) + +AC_DEFUN_ONCE([LIB_TESTS_ENABLE_DISABLE_JTREG_TEST_THREAD_FACTORY], +[ + UTIL_ARG_ENABLE(NAME: jtreg-test-thread-factory, DEFAULT: auto, + RESULT: BUILD_JTREG_TEST_THREAD_FACTORY, + DESC: [enable building of the jtreg test thread factory], + DEFAULT_DESC: [enabled if jtreg is present], + CHECKING_MSG: [if the jtreg test thread factory should be built], + CHECK_AVAILABLE: [ + AC_MSG_CHECKING([if the jtreg test thread factory is available]) + if test "x$JT_HOME" != "x"; then + AC_MSG_RESULT([yes]) + else + AVAILABLE=false + AC_MSG_RESULT([no (jtreg not present)]) + fi + ]) + AC_SUBST(BUILD_JTREG_TEST_THREAD_FACTORY) +]) diff --git a/make/autoconf/libraries.m4 b/make/autoconf/libraries.m4 index 9e746b470c9..a1fc81564b1 100644 --- a/make/autoconf/libraries.m4 +++ b/make/autoconf/libraries.m4 @@ -82,7 +82,7 @@ AC_DEFUN_ONCE([LIB_DETERMINE_DEPENDENCIES], fi # Check if ffi is needed - if HOTSPOT_CHECK_JVM_VARIANT(zero); then + if HOTSPOT_CHECK_JVM_VARIANT(zero) || test "x$ENABLE_FALLBACK_LINKER" = "xtrue"; then NEEDS_LIB_FFI=true else NEEDS_LIB_FFI=false diff --git a/make/autoconf/platform.m4 b/make/autoconf/platform.m4 index babb24a9b0c..4a13dad24b5 100644 --- a/make/autoconf/platform.m4 +++ b/make/autoconf/platform.m4 @@ -640,6 +640,7 @@ AC_DEFUN([PLATFORM_SET_MODULE_TARGET_OS_VALUES], ]) #%%% Build and target systems %%% +# Make sure to only use tools set up in BASIC_SETUP_FUNDAMENTAL_TOOLS. AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_BUILD_AND_TARGET], [ # Figure out the build and target systems. # Note that in autoconf terminology, "build" is obvious, but "target" @@ -723,7 +724,7 @@ AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_ENDIANNESS], [ ############################################################################### # - # Is the target little of big endian? + # Is the target little or big endian? # AC_C_BIGENDIAN([ENDIAN="big"],[ENDIAN="little"],[ENDIAN="unknown"],[ENDIAN="universal_endianness"]) diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in index ff073c78c92..0f85917814e 100644 --- a/make/autoconf/spec.gmk.in +++ b/make/autoconf/spec.gmk.in @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -358,6 +358,8 @@ BUILDJDK_OUTPUTDIR=$(OUTPUTDIR)/buildjdk BUILD_FAILURE_HANDLER := @BUILD_FAILURE_HANDLER@ +BUILD_JTREG_TEST_THREAD_FACTORY := @BUILD_JTREG_TEST_THREAD_FACTORY@ + ENABLE_GENERATE_CLASSLIST := @ENABLE_GENERATE_CLASSLIST@ EXCLUDE_TRANSLATIONS := @EXCLUDE_TRANSLATIONS@ @@ -407,6 +409,9 @@ TEST_JOBS?=@TEST_JOBS@ DEFAULT_MAKE_TARGET:=@DEFAULT_MAKE_TARGET@ DEFAULT_LOG:=@DEFAULT_LOG@ +# Fallback linker +ENABLE_FALLBACK_LINKER:=@ENABLE_FALLBACK_LINKER@ + FREETYPE_TO_USE:=@FREETYPE_TO_USE@ FREETYPE_LIBS:=@FREETYPE_LIBS@ FREETYPE_CFLAGS:=@FREETYPE_CFLAGS@ @@ -690,6 +695,8 @@ INTERIM_LANGTOOLS_ADD_EXPORTS := \ --add-exports java.base/sun.invoke.util=jdk.compiler.interim \ --add-exports java.base/jdk.internal.javac=java.compiler.interim \ --add-exports java.base/jdk.internal.javac=jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.javadoc.interim \ # INTERIM_LANGTOOLS_MODULES_COMMA := $(strip $(subst $(SPACE),$(COMMA),$(strip \ $(INTERIM_LANGTOOLS_MODULES)))) diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf index d99f76ae40c..35d26baaccb 100644 --- a/make/conf/github-actions.conf +++ b/make/conf/github-actions.conf @@ -26,16 +26,16 @@ # Versions and download locations for dependencies used by GitHub Actions (GHA) GTEST_VERSION=1.13.0 -JTREG_VERSION=7.1.1+1 +JTREG_VERSION=7.2+1 LINUX_X64_BOOT_JDK_EXT=tar.gz -LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk19/877d6127e982470ba2a7faa31cc93d04/36/GPL/openjdk-19_linux-x64_bin.tar.gz -LINUX_X64_BOOT_JDK_SHA256=f47aba585cfc9ecff1ed8e023524e8309f4315ed8b80100b40c7dcc232c12f96 +LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-x64_bin.tar.gz +LINUX_X64_BOOT_JDK_SHA256=bb863b2d542976d1ae4b7b81af3e78b1e4247a64644350b552d298d8dc5980dc MACOS_X64_BOOT_JDK_EXT=tar.gz -MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk19/877d6127e982470ba2a7faa31cc93d04/36/GPL/openjdk-19_macos-x64_bin.tar.gz -MACOS_X64_BOOT_JDK_SHA256=bfd33f5b2590fd552ae2d9231340c6b4704a872f927dce1c52860b78c49a5a11 +MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-x64_bin.tar.gz +MACOS_X64_BOOT_JDK_SHA256=47cf960d9bb89dbe987535a389f7e26c42de7c984ef5108612d77c81aa8cc6a4 WINDOWS_X64_BOOT_JDK_EXT=zip -WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk19/877d6127e982470ba2a7faa31cc93d04/36/GPL/openjdk-19_windows-x64_bin.zip -WINDOWS_X64_BOOT_JDK_SHA256=8fabcee7c4e8d3b53486777ecd27bb906d67d7c1efd1bf22a8290cf659afa487 +WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip +WINDOWS_X64_BOOT_JDK_SHA256=c92fae5e42b9aecf444a66c8ec563c652f60b1e231dfdd33a4f5a3e3603058fb diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js index 1b866e21207..117b5aaf157 100644 --- a/make/conf/jib-profiles.js +++ b/make/conf/jib-profiles.js @@ -390,7 +390,7 @@ var getJibProfilesCommon = function (input, data) { }; }; - common.boot_jdk_version = "19"; + common.boot_jdk_version = "20"; common.boot_jdk_build_number = "36"; common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-" + common.boot_jdk_version @@ -587,21 +587,23 @@ var getJibProfilesProfiles = function (input, common, data) { "linux-x64-zero": { target_os: "linux", target_cpu: "x64", - dependencies: ["devkit", "gtest"], + dependencies: ["devkit", "gtest", "libffi"], configure_args: concat(common.configure_args_64bit, [ "--with-zlib=system", "--with-jvm-variants=zero", - "--enable-libffi-bundling" + "--with-libffi=" + input.get("libffi", "home_path"), + "--enable-libffi-bundling", ]) }, "linux-aarch64-zero": { target_os: "linux", target_cpu: "aarch64", - dependencies: ["devkit", "gtest"], + dependencies: ["devkit", "gtest", "libffi"], configure_args: concat(common.configure_args_64bit, [ "--with-zlib=system", "--with-jvm-variants=zero", + "--with-libffi=" + input.get("libffi", "home_path"), "--enable-libffi-bundling" ]) }, @@ -610,10 +612,11 @@ var getJibProfilesProfiles = function (input, common, data) { target_os: "linux", target_cpu: "x86", build_cpu: "x64", - dependencies: ["devkit", "gtest"], + dependencies: ["devkit", "gtest", "libffi"], configure_args: concat(common.configure_args_32bit, [ "--with-zlib=system", "--with-jvm-variants=zero", + "--with-libffi=" + input.get("libffi", "home_path"), "--enable-libffi-bundling" ]) } @@ -744,6 +747,40 @@ var getJibProfilesProfiles = function (input, common, data) { common.debug_profile_artifacts(artifactData[name])); }); + // Define artifact just for linux-x64-zero, which is the only one we test on + ["linux-x64"].forEach(function (name) { + var o = artifactData[name] + var pf = o.platform + var jdk_subdir = (o.jdk_subdir != null ? o.jdk_subdir : "jdk-" + data.version); + var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz"); + var zeroName = name + "-zero"; + profiles[zeroName].artifacts = { + jdk: { + local: "bundles/\\(jdk.*bin." + jdk_suffix + "\\)", + remote: [ + "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-zero." + jdk_suffix, + ], + subdir: jdk_subdir, + exploded: "images/jdk", + }, + test: { + local: "bundles/\\(jdk.*bin-tests.tar.gz\\)", + remote: [ + "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-zero-tests.tar.gz", + ], + exploded: "images/test" + }, + jdk_symbols: { + local: "bundles/\\(jdk.*bin-symbols.tar.gz\\)", + remote: [ + "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-zero-symbols.tar.gz", + ], + subdir: jdk_subdir, + exploded: "images/jdk" + }, + }; + }); + buildJdkDep = input.build_os + "-" + input.build_cpu + ".jdk"; docsProfiles = { "docs": { @@ -1150,9 +1187,9 @@ var getJibProfilesDependencies = function (input, common) { jtreg: { server: "jpg", product: "jtreg", - version: "7.1.1", + version: "7.2", build_number: "1", - file: "bundles/jtreg-7.1.1+1.zip", + file: "bundles/jtreg-7.2+1.zip", environment_name: "JT_HOME", environment_path: input.get("jtreg", "home_path") + "/bin", configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"), @@ -1234,6 +1271,13 @@ var getJibProfilesDependencies = function (input, common) { ext: "tar.gz", revision: "1.13.0+1.0" }, + + libffi: { + organization: common.organization, + module: "libffi-" + input.target_platform, + ext: "tar.gz", + revision: "3.4.2+1.0" + }, }; return dependencies; diff --git a/make/conf/version-numbers.conf b/make/conf/version-numbers.conf index 4a3e5133567..6729593b769 100644 --- a/make/conf/version-numbers.conf +++ b/make/conf/version-numbers.conf @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,6 @@ DEFAULT_VERSION_DATE=2023-09-19 DEFAULT_VERSION_CLASSFILE_MAJOR=65 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`" DEFAULT_VERSION_CLASSFILE_MINOR=0 DEFAULT_VERSION_DOCS_API_SINCE=11 -DEFAULT_ACCEPTABLE_BOOT_VERSIONS="19 20 21" +DEFAULT_ACCEPTABLE_BOOT_VERSIONS="20 21" DEFAULT_JDK_SOURCE_TARGET_VERSION=21 DEFAULT_PROMOTED_VERSION_PRE=ea diff --git a/make/data/cldr/unicode-license.txt b/make/data/cldr/LICENSE.txt similarity index 100% rename from make/data/cldr/unicode-license.txt rename to make/data/cldr/LICENSE.txt diff --git a/make/data/cldr/README b/make/data/cldr/README index b75492f15a4..cca0921da83 100644 --- a/make/data/cldr/README +++ b/make/data/cldr/README @@ -1,4 +1,4 @@ CLDR - Unicode Common Locale Data Repository http://cldr.unicode.org -CLDR version installed: 42 +CLDR version installed: 43 diff --git a/make/data/cldr/common/bcp47/currency.xml b/make/data/cldr/common/bcp47/currency.xml index 49fddfc7e6b..f357a38dd2f 100644 --- a/make/data/cldr/common/bcp47/currency.xml +++ b/make/data/cldr/common/bcp47/currency.xml @@ -67,9 +67,9 @@ For terms of use, see http://www.unicode.org/copyright.html - + - + @@ -112,7 +112,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -136,8 +136,8 @@ For terms of use, see http://www.unicode.org/copyright.html - - + + @@ -181,7 +181,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -189,7 +189,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -210,7 +210,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -219,7 +219,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -235,11 +235,11 @@ For terms of use, see http://www.unicode.org/copyright.html - + - - + + @@ -274,28 +274,28 @@ For terms of use, see http://www.unicode.org/copyright.html - + + - - + - - + + - + diff --git a/make/data/cldr/common/bcp47/segmentation.xml b/make/data/cldr/common/bcp47/segmentation.xml index 3f54cd8567a..427a3adcbd1 100644 --- a/make/data/cldr/common/bcp47/segmentation.xml +++ b/make/data/cldr/common/bcp47/segmentation.xml @@ -10,25 +10,25 @@ For terms of use, see http://www.unicode.org/copyright.html - + - - - + + + - - - - + + + + - - + + diff --git a/make/data/cldr/common/bcp47/timezone.xml b/make/data/cldr/common/bcp47/timezone.xml index 57d81ed48ec..20ec0c1f9da 100644 --- a/make/data/cldr/common/bcp47/timezone.xml +++ b/make/data/cldr/common/bcp47/timezone.xml @@ -96,20 +96,20 @@ For terms of use, see http://www.unicode.org/copyright.html - + - + - + @@ -177,7 +177,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -190,7 +190,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -280,7 +280,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -390,7 +390,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + diff --git a/make/data/cldr/common/bcp47/variant.xml b/make/data/cldr/common/bcp47/variant.xml index 9940379315c..0517a9b923c 100644 --- a/make/data/cldr/common/bcp47/variant.xml +++ b/make/data/cldr/common/bcp47/variant.xml @@ -7,18 +7,18 @@ - - - + + + - + + description="Valid unicode_subdivision_subtag for the region subtag as specified in LDML, based on subdivisionContainment data in supplementalData, prefixed by the associated unicode_region_subtag" since="28"/> diff --git a/make/data/cldr/common/dtd/ldml.dtd b/make/data/cldr/common/dtd/ldml.dtd index 76c0176780b..ade0de9e581 100644 --- a/make/data/cldr/common/dtd/ldml.dtd +++ b/make/data/cldr/common/dtd/ldml.dtd @@ -1,5 +1,5 @@ - + @@ -68,6 +68,7 @@ CLDR data files are interpreted according to the LDML specification (http://unic + @@ -465,6 +466,7 @@ CLDR data files are interpreted according to the LDML specification (http://unic + @@ -2825,10 +2827,11 @@ CLDR data files are interpreted according to the LDML specification (http://unic - + + @@ -3123,7 +3126,7 @@ CLDR data files are interpreted according to the LDML specification (http://unic - + @@ -3159,10 +3162,8 @@ CLDR data files are interpreted according to the LDML specification (http://unic - - @@ -3172,7 +3173,6 @@ CLDR data files are interpreted according to the LDML specification (http://unic - @@ -3183,7 +3183,6 @@ CLDR data files are interpreted according to the LDML specification (http://unic - @@ -3193,18 +3192,16 @@ CLDR data files are interpreted according to the LDML specification (http://unic - - + - + - + - + - @@ -3212,14 +3209,12 @@ CLDR data files are interpreted according to the LDML specification (http://unic - - + - - + diff --git a/make/data/cldr/common/dtd/ldmlBCP47.dtd b/make/data/cldr/common/dtd/ldmlBCP47.dtd index b82183c1825..ce302dc6990 100644 --- a/make/data/cldr/common/dtd/ldmlBCP47.dtd +++ b/make/data/cldr/common/dtd/ldmlBCP47.dtd @@ -1,5 +1,5 @@ - + diff --git a/make/data/cldr/common/dtd/ldmlSupplemental.dtd b/make/data/cldr/common/dtd/ldmlSupplemental.dtd index 9545872ddbc..9855a1ce7a6 100644 --- a/make/data/cldr/common/dtd/ldmlSupplemental.dtd +++ b/make/data/cldr/common/dtd/ldmlSupplemental.dtd @@ -1,18 +1,18 @@ - + - + @@ -216,7 +216,7 @@ CLDR data files are interpreted according to the LDML specification (http://unic - + @@ -237,6 +237,11 @@ CLDR data files are interpreted according to the LDML specification (http://unic + + + + + @@ -250,6 +255,12 @@ CLDR data files are interpreted according to the LDML specification (http://unic + + + + + + @@ -439,7 +450,6 @@ CLDR data files are interpreted according to the LDML specification (http://unic - @@ -900,6 +910,8 @@ CLDR data files are interpreted according to the LDML specification (http://unic + + @@ -923,6 +935,9 @@ CLDR data files are interpreted according to the LDML specification (http://unic + + + diff --git a/make/data/cldr/common/main/aa.xml b/make/data/cldr/common/main/aa.xml new file mode 100644 index 00000000000..0c36f7e0435 --- /dev/null +++ b/make/data/cldr/common/main/aa.xml @@ -0,0 +1,231 @@ + + + + + + + + + + + Qafar + + + Yabuuti + Eretria + Otobbia + + + + [a b t s e c k x i d q r f g o l m n u w h y] + [j p v z] + [A B T S E C K X I D Q R F G O L M N U W H Y] + + + + + + + + EEEE, MMMM dd, y G + GyMMMMEEEEdd + + + + + dd MMMM y G + GyMMMMdd + + + + + dd-MMM-y G + GyMMMdd + + + + + dd/MM/yy GGGGG + GGGGGyyMMdd + + + + + + + + + Qun + Nah + Cig + Agd + Cax + Qas + Qad + Leq + Way + Dit + Xim + Kax + + + Qunxa Garablu + Kudo + Ciggilta Kudo + Agda Baxis + Caxah Alsa + Qasa Dirri + Qado Dirri + Liiqen + Waysu + Diteli + Ximoli + Kaxxa Garablu + + + + + Q + N + C + A + C + Q + Q + L + W + D + X + K + + + + + + + Aca + Etl + Tal + Arb + Kam + Gum + Sab + + + Acaada + Etleeni + Talaata + Arbaqa + Kamiisi + Gumqata + Sabti + + + + + A + E + T + A + K + G + S + + + + + + + saaku + carra + + + saaku + carra + + + + + + Yaasuusuk Duma + Yaasuusuk Wadir + + + YD + YW + + + + + + EEEE, MMMM dd, y + yMMMMEEEEdd + + + + + dd MMMM y + yMMMMdd + + + + + dd-MMM-y + yMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + + + ¤#,##0.00 + + + + + + Br + + + + diff --git a/make/data/cldr/common/main/aa_DJ.xml b/make/data/cldr/common/main/aa_DJ.xml new file mode 100644 index 00000000000..f8f3194a14f --- /dev/null +++ b/make/data/cldr/common/main/aa_DJ.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + Qunxa Garablu + Kudo + Ciggilta Kudo + Agda Baxis + Caxah Alsa + Qasa Dirri + Qado Dirri + Leqeeni + Waysu + Diteli + Ximoli + Kaxxa Garablu + + + + + + + + + + Fdj + + + + diff --git a/make/data/cldr/common/main/aa_ER.xml b/make/data/cldr/common/main/aa_ER.xml new file mode 100644 index 00000000000..ea1daef97dc --- /dev/null +++ b/make/data/cldr/common/main/aa_ER.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + Nfk + + + + diff --git a/make/data/cldr/common/main/aa_ET.xml b/make/data/cldr/common/main/aa_ET.xml new file mode 100644 index 00000000000..6ccfb6dad6b --- /dev/null +++ b/make/data/cldr/common/main/aa_ET.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ab.xml b/make/data/cldr/common/main/ab.xml new file mode 100644 index 00000000000..78f7728b0a7 --- /dev/null +++ b/make/data/cldr/common/main/ab.xml @@ -0,0 +1,4066 @@ + + + + + + + + + + + {0} ({1}) + {0}, {1} + {0}: {1} + + + Аԥсшәа + ачех + адангме + аедыгь + африкаанс + агем + аин + акан + алеут + аладалтаи + амхар + арагон + ангика + араб + амапуче + арапахо + анедџитә араб + ассам + асу + астури + авар + авадхи + аимара + азербаиџьан + абашкир + абали + абаса + абелорус + абемба + абена + аболгар + абхоџпури + абислама + абини + асиксика + абамбара + абенгал + атибет + абретон + абодо + абосни + абуги + абилин + акаталан + акаиуга + ачакма + ачечен + асебуано + акига + ачаморро + ачукот + амари + ачоктав + ачипевиан + ачероки + ашаиен + агәҭантәи ақәырд + ақәырд (асорани) + акорсикан + ачеш + ауахәама-славиан + ачуваш + авалли + адат + адакота + адаргин + атаита + агерман + австриа агерман + алитературатә ашвеицар агерман + адогриб + аџерма + адогри + ҵаҟатәи лужик + адуала + амалдив + адиола-фони + аӡонг-ка + адаза + аембу + аеве + аефик + аекаџук + абырзен + англыз + австралиатәи англыз + канадатәи англыз + абритан англыз + америкатәи англыз + аесперанто + аиспан + алаҭын-америкатәи аиспан + европатәи аиспан + мексикатәи аиспан + аестон + абаск + аевондо + аџьам + адари + афулах + афин + афилиппин + афиџи + афарер + афон + афранцыз + канадатәи афранцыз + швеицариатәи афранцыз + каџунтәи афранцыз + африул + мраҭашәаратәи африз + аирланд + ага + агел + агеез + агилберт + агалиси + агуарани + агоронтало + швеицариатәи агерман + агуџарати + агуси + амен + агвичин + ахауса + ахаида + агаваи + аиврит + ахинди + ахынглыз + ахилигаинон + ахмонг + ахорват + хыхьтәи-алужик + агаитиан + авенгер + ахупа + аерман + агереро + аинтерлингва + аибан + аибибио + аиндонез + аигбо + аносу + аилоко + аингәыш + аидо + аисланд + аиталиа + аинуктитут + аиапон + аложбан + ангомба + амачаме + аиаван + ақырҭ + акабил + акачин + акаџи + акамба + аҟабарда + атиап + амаконде + акабувердиану + акоро + акхаси + акоира чиини + акикәиу + акәнама + аҟазах + акако + агренланд + акаленџин + акхмер + акимбунду + аканнада + акореи + аконкани + акпелле + аканури + аҟарач-абалҟар + акарел + акурух + акашмири + ашамбала + абафиа + акиолн + ақәырд + акәымык + акоми + акорн + акиргиз + алаҭын + аладино + аланго + алиуксембург + алезгьын + аганда + алимбург + алакота + алингала + алаос + алуизиантәи акреол + алози + аҩадалур + алитов + алуба-акатанга + алуба-алулуа + алунда + алуо + амизо + алухьиа + алатыш + амадур + амагахи + амаитхили + амакассар + амасаи + амокшан + аменде + амеру + амаврикитә креол + амалагаси + амакуа-амеетто + амета + амаршал + амаори + амикмак + аминангкабау + амакедон + амалаиалам + амонгол + аманипур + амохаук + амоси + амаратхи + амалаи + амалти + амунданг + еиуеиԥшым аҭаацәаратә бызшәақәа + акрик + амиранд + абирман + аерзиан + амазандеран + анауру + анеаполитан + анама + анорвегтәи абукмол + нхыҵ андебеле + ҵаҟатәи агерман + ҵаҟатәи асаксон + анепал + аневар + андонга + аниас + аниуе + анидерланд + афламанд + аквасио + аниунорск + ангиембунд + анорвег + аногаи + анко + аладатәи андебеле + аҩадатәи асото + ануер + анавахо + анианџа + анианколе + аокситан + аоромо + аориа + ауаԥс + апанџаби + апангасинан + апампанга + апапиаменто + апалау + анигери-креол + апол + апрус + апушту + апортугал + бразилиатәи апортугал + европатәи апортугал + акечуа + арапануи + араротонга + арохинџа + ароманш + арунди + арумын + молдав + аромбо + аурыс + аромун + акиниаруанда + аруанда + асанскрит + асандаве + асаха + асамбуру + асантали + ангамбаи + асангу + асардин + асицили + ашотланд + асиндхи + аҩадасаам + асена + акоираборо асенни + асанго + аташелхит + ашан + асингал + асловак + асловен + асамоан + аинари-асаам + аколтта-асаам + ашона + асонинке + асомали + арнауҭ + асерб + асранан-атонго + асвази + аладатәи асото + асундан + асукәыма + ашвед + асуахили + конголезтәи асуахили + акомор + ашьам + атамил + ателугу + атемне + атесо + атетум + аҭаџьык + атаи + атигриниа + атигре + атуркмен + аклингон + атлингит + атсвана + атонган + аток-аписин + аҭырқә + аседек + атсонга + аҭаҭар + атумбука + атувалу + атасавак + атаитиан + атувин + абжьара-атластәи атамазигхт + аудмурт + ауигур + аукраин + аумбунду + еилкаам абызшәа + аурду + аузбек + аваи + авенда + авиетнам + аволапиук + авунџо + аваллон + аваллис + аволамо + авараи + аволоф + аву + аҟалмыҟ + акоса + асога + аиангбен + аиемба + аидиш + аиоруба + акантон + атамазигхт + акитаи + аҩадакитаи + акитаи, имариоу аҩыра + аҩадакитаи, имариоу аҩыра + акитаи, атрадициатә ҩыра + аҩадакитаи, атрадициатә ҩыра + азулу + азуни + абызшәатә хыҵхырҭақәа ыҟаӡам + азаза + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + адунеи зегьы + Африка + Аҩадатәи Америка + Аладатәи Америка + Океаниа + Мраҭашәаратәи Африка + Агәҭантәи Америка + Мрагыларатәи Африка + Аҩадатәи Африка + Агәҭантәи Африка + Аладатәи Африка + Америка + Аҩада-Америкатәи арегион + Кариб + Мрагыларатәи Азиа + Аладатәи Азиа + Алада-Мрагыларатәи Азиа + Аладатәи Европа + Австралазиа + Меланезиа + Микронезиа + Полинезиа + Азиа + Агәҭантәи Азиа + Мраҭашәаратәи Азиа + Европа + Мрагыларатәи Европа + Аҩадатәи Европа + Мраҭашәаратәи Европа + Атроптә Африка + Алаҭынтәи Америка + ад-ха Вознесениа + Андорра + ЕАЕ + Афганистан + Антигуеи Барбудеи + Ангилиа + Албаниа + Ермантәыла + Ангола + Антарктида + Аргентина + Америкатәи Самоа + Австриа + Австралиа + Аруба + Аландтәи ад-хақәа + Азербаиџьан + Босниеи Герцеговинеи + Барбадос + Бангладеш + Бельгиа + Буркина-Фасо + Болгариа + Бахреин + Бурунди + Бенин + Сен-Бартелеми + Бермудтәи ад-хақәа + Брунеи-Даруссалам + Боливиа + Бонеир, Синт-Естатиуси Сабеи + Бразилиа + Баӷама + Бутан + ад-ха Буве + Ботсвана + Беларус + Белиз + Канада + Кокостәи ад-қәа + Конго-Киншаса + Конго (АРК) + Агәҭантәи-Африкатәи Ареспублика + Конго-Браззавиль + Ареспублика Конго + Швеицариа + Кот-д’Ивуар + Кук идгьылбжьахақәа + Чили + Камерун + Китаи + Колумбиа + ад-ха Клиппертон + Коста-Рика + Куба + Кабо-Верде + Киурасао + ад-ха Қьырса + Кипр + Чехиа + Чештәи ареспублика + Германиа + Диего-Гарсиа + Џибути + Даниа + Доминика + Доминиканатәи Ареспублика + Алжир + Сеутеи Мелилиеи + Еквадор + Естониа + Египет + Мраҭашәаратәи Сахара + Еритреиа + Испаниа + Ефиопиа + Европатәи аидгыла + евроцәаҳәа + Финлиандиа + Фиџи + Фолклендтәи ад-хақәа + Фолклендтәи (Мальвинтәи) ад-хақәа + Еилоу Микронезиатәи Аштатқәа + Фарертәи ад-хақәа + Франциа + Габон + Британиа ду + Британиа + Гренада + Қырҭтәыла + Францызтәи Гвиана + Гернси + Гана + Гибралтар + Гренландиа + Гамбиа + Гвинеиа + Гваделупа + Екваториалтәи Гвинеиа + Бырзентәыла + Аладатәи Георгиеи Аладатәи Сандвичқәеи ад-қәа + Гватемала + Гуам + Гвинеиа-Бисау + Гаиана + Гонконг (ҶАР) + Гонконг + ад-хақәа Херди Макдональди + Гондурас + Хорватиа + Гаити + Венгриа + Канартәи ад-хақәа + Индонезиа + Ирландиа + Израиль + ад-ха Мен + Индиа + Британиатәи аҵакырадгьыл Индиатәи аокеан аҟны + Ирак + Иран + Исландиа + Италиа + Џерси + Иамаика + Иорданиа + Иапониа + Кениа + Киргизиа + Камбоџа + Кирибати + Комор + Сент-Китси Невиси + КЖӘДР + Кореиа Ареспублика + Кувеит + Адгьылбжьахақәа Каиман + Ҟазахсҭан + Лаос + Ливан + Сент-Люсиа + Лихтенштеин + Шри-Ланка + Либериа + Лесото + Литва + Лиуксембург + Латвиа + Ливиа + Марокко + Монако + Молдова + Ашьхеиқәаҵәа + Сен-Мартен + Мадагаскар + Маршаллтәи Адгьылбжьахақәа + Аҩадатәи Македониа + Мали + Мианма (Бирма) + Монголиа + Макао (ҶАР) + Макао + Аҩадатәи Мариантәи ад-хақәа + Мартиника + Мавританиа + Монтсеррат + Мальта + Маврики + Мальдив + Малави + Мексика + Малаизиа + Мозамбик + Намибиа + Каледониа ҿыц + Нигер + Ад-ха Норфолк + Нигериа + Никарагуа + Нидерланд + Норвегиа + Непал + Науру + Ниуе + Зеландиа ҿыц + Аотеароа (Зеландиа ҿыц) + Оман + Панама + Перу + Францызтәи Полинезиа + Папуа — Гвинеиа ҿыц + Филиппин + Пакистан + Польша + Сен-Пиери Микелони + Питкерн ад-хақәа + Пуерто-Рико + Палестинатәи аҵакырадгьылқәа + Палестина + Португалиа + Палау + Парагваи + Катар + Нҭыҵтәи Океаниа + Реиунон + Румыниа + Сербиа + Урыстәыла + Руанда + Саудтәи Арабсҭан + Соломонтәи адгьылбжьахақәа + Сеишелтәи адгьылбжьахақәа + Судан + Швециа + Сингапур + Иԥшьоу Елена ладгьылбжьаха + Словениа + Шпицбергени Иан-Маиени + Словакиа + Сиерра-Леоне + Сан-Марино + Сенегал + Сомали + Суринам + Аладатәи Судан + Сан-Томеи Принсипиеи + Сальвадор + Синт-Мартен + Шьамтәыла + Есватини + Свазиленд + Тристан-да-Куниа + ад-хақәа Тиоркси Каикоси + Чад + Францызтәи Аладатәи аҵакырадгьылқәа + Того + Таиланд + Таџьықсҭан + Токелау + Мрагыларатәи Тимор + Тимор-Лесте + Ҭурқменисҭан + Тунис + Тонга + Ҭырқәтәыла + Тринидади Тобагои + Тувалу + Таиван + Танзаниа + Украина + Уганда + Нҭыҵтәи малыетәи ад-қәа (ЕАШ) + Еиду Амилаҭқәа Реиҿкаара + Еиду Аштатқәа + ЕАШ + Уругваи + Узбеқьисҭан + Ватикан + Сент-Винсенти Гренадини + Венесуела + Виргинтәи ад-хақәа (Британиаду) + Виргинтәи ад-хақәа (ЕАШ) + Виетнам + Вануату + Уоллиси Футунеи + Самоа + амццәажәашьақәа + амц-Bidi + Косово + Иемен + Маиотта + Алада-Африкатәи ареспублика + Замбиа + Зимбабве + еилкаам арегион + + + агригориантә мзар + амзар ISO-8601 + астандартә сортла аилыхра + иахьатәи араб ацифра + + + Аметрикатә + Англиатәи + Америкатә + + + Абызшәа: {0} + Аҩыра: {0} + Арегион: {0} + + + + [а ә б в г {гә} {гь} ӷ {ӷә} {ӷь} д {дә} е ж {жә} {жь} з ӡ {ӡә} и к {кә} {кь} қ {қә} {қь} ҟ {ҟә} {ҟь} л м н о п ԥ р с т {тә} ҭ {ҭә} у ф х {хә} {хь} ҳ {ҳә} ц {цә} ҵ {ҵә} ч ҷ ҽ ҿ џ {џь} ш {шә} {шь} ы ь ҩ] + [{а\u0301} ҕ {ҕә} {ҕь} {е\u0301} {и\u0301} {о\u0301} ҧ {у\u0301} {ы\u0301}] + [А Б В Г {ГӘ} {ГЬ} Ӷ {ӶӘ} {ӶЬ} Д {ДӘ} Е Ж {ЖӘ} {ЖЬ} З Ӡ {ӠӘ} И К {КӘ} {КЬ} Қ {ҚӘ} {ҚЬ} Ҟ {ҞӘ} {ҞЬ} Л М Н О П Ԥ Р С Т {ТӘ} Ҭ {ҬӘ} У Ф Х {ХӘ} {ХЬ} Ҳ {ҲӘ} Ц {ЦӘ} Ҵ {ҴӘ} Ч Ҷ Ҽ Ҿ Џ {ЏЬ} Ш {ШӘ} {ШЬ} Ы Ҩ] + [\- ‑ , % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ‚ " “ „ « » ( ) \[ \] \{ \} § @ * / \& #] + {0}… + …{0} + {0}…{1} + {0} … + … {0} + {0} … {1} + ? + + [\--/] + [\:∶] + + + [.․。︒﹒.。] + ['ʼ՚᾽᾿’'] + [%٪﹪%] + [؉‰] + [\$﹩$$] + [£₤] + [¥¥] + [₩₩] + [₨₹{Rp}{Rs}] + + + [\-‒⁻₋−➖﹣-] + [,،٫、︐︑﹐﹑,、] + [+⁺₊➕﬩﹢+] + + + [,٫︐﹐,] + [.․﹒.。] + + + + « + » + + + + + + + + + + EEEE, d MMMM y 'ш'. G + + + + + d MMMM y 'ш'. G + + + + + d MMM y 'ш'. G + + + + + dd.MM.y G + + + + + + + {1}, {0} + + + {1}, {0} 'аҿы' + + + + + {1}, {0} + + + {1}, {0} 'аҿы' + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + E, d + ccc, h:mm a + ccc HH:mm + ccc, h:mm:ss a + ccc HH:mm:ss + y 'ш'. G + dd.MM.y G + LLL y 'ш'. G + d MMM y 'ш'. G + E, d MMM y 'ш'. G + dd.MM + E, dd.MM + d MMM + ccc, d MMM + d MMMM + y 'ш'. G + y 'ш'. G + MM.y G + dd.MM.y G + E, dd.MM.y G + LLL y 'ш'. G + d MMM y 'ш'. G + E, d MMM y 'ш'. G + LLLL y 'ш'. G + QQQ y 'ш'. G + QQQQ y 'ш'. G + + + + y 'ш'. G – y 'ш'. G + y–y 'шш'. G + + + MM.y G – MM.y G + MM.y – MM.y G + MM.y – MM.y G + + + dd.MM.y – dd.MM.y G + dd.MM.y G – dd.MM.y G + dd.MM.y – dd.MM.y G + dd.MM.y – dd.MM.y G + + + ccc, dd.MM.y – ccc, dd.MM.y G + ccc, dd.MM.y G – ccc, dd.MM.y G + ccc, dd.MM.y – ccc, dd.MM.y G + ccc, dd.MM.y – ccc, dd.MM.y G + + + LLL y 'ш'. G – LLL y 'ш'. G + LLL – LLL y 'ш'. G + LLL y – LLL y 'шш'. G + + + d–d MMM y 'ш'. G + d MMM y 'ш'. G – d MMM y 'ш'. G + d MMM – d MMM y 'ш'. G + d MMM y – d MMM y 'шш'. G + + + ccc, d MMM – ccc, d MMM y 'ш'. G + ccc, d MMM y 'ш'. G – ccc, d MMM y 'ш'. G + ccc, d MMM – ccc, d MMM y 'ш'. G + ccc, d MMM y – ccc, d MMM y 'шш'. G + + + M–M + + + E, dd.MM – E, dd.MM + E, dd.MM – E, dd.MM + + + LLL – LLL + + + d–d MMM + d MMM – d MMM + + + ccc, d MMM – ccc, d MMM + ccc, d MMM – ccc, d MMM + + + y–y 'шш'. G + + + MM.y – MM.y G + MM.y – MM.y G + + + dd.MM.y – dd.MM.y G + dd.MM.y – dd.MM.y G + dd.MM.y – dd.MM.y G + + + ccc, dd.MM.y – ccc, dd.MM.y G + ccc, dd.MM.y – ccc, dd.MM.y G + ccc, dd.MM.y – ccc, dd.MM.y G + + + LLL – LLL y 'ш'. G + LLL y 'ш'. – LLL y 'ш'. G + + + d–d MMM y 'ш'. G + d MMM – d MMM y 'ш'. G + d MMM y 'ш'. – d MMM y 'ш'. G + + + ccc, d MMM – ccc, d MMM y 'ш'. G + ccc, d MMM – ccc, d MMM y 'ш'. G + ccc, d MMM y 'ш'. – ccc, d MMM y 'ш'. G + + + LLLL – LLLL y 'ш'. G + LLLL y 'ш'. – LLLL y 'ш'. G + + + + + + + + + Ажь + Жəаб + Хəажә + Мш + Лаҵ + Рашә + Ԥхынгә + Нанҳә + Цəыб + Жьҭ + Абҵ + Ԥхынҷ + + + Жь + Жə + Хə + М + Л + Р + Гә + Н + Цə + Ҭ + Б + Ҷ + + + Ажьырныҳəа + Жəабран + Хəажəкыра + Мшаԥы + Лаҵара + Рашəара + Ԥхынгəы + Нанҳəа + Цəыббра + Жьҭаара + Абҵара + Ԥхынҷкәын + + + + + Ажь + Жəаб + Хəажә + Мш + Лаҵ + Рашә + Ԥхынгә + Нанҳә + Цəыб + Жьҭ + Абҵ + Ԥхынҷ + + + Жь + Жə + Хə + М + Л + Р + Гә + Н + Цә + Ҭ + Б + Ҷ + + + Ажьырныҳəа + Жəабран + Хəажəкыра + Мшаԥы + Лаҵара + Рашəара + Ԥхынгəы + Нанҳəа + Цəыббра + Жьҭаара + Абҵара + Ԥхынҷкәын + + + + + + + Ам + Ашә + Аҩ + Ах + Аԥ + Ахә + Ас + + + М + Шә + Ҩ + Х + Ԥ + Хә + С + + + Ам + Ашә + Аҩ + Ах + Аԥ + Ахә + Ас + + + Амҽыша + Ашәахьа + Аҩаша + Ахаша + Аԥшьаша + Ахәаша + Асабша + + + + + Ам + Ашә + Аҩ + Ах + Аԥ + Ахә + Ас + + + М + Шә + Ҩ + Х + Ԥ + Хә + С + + + Ам + Ашә + Аҩ + Ах + Аԥ + Ахә + Ас + + + Амҽыша + Ашәахьа + Аҩаша + Ахаша + Аԥшьаша + Ахәаша + Асабша + + + + + + + 1-тәи акв. + 2-тәи акв. + 3-тәи акв. + 4-тәи акв. + + + 1 + 2 + 3 + 4 + + + 1-тәи аквартал + 2-тәи аквартал + 3-тәи аквартал + 4-тәи аквартал + + + + + 1-тәи акв. + 2-тәи акв. + 3-тәи акв. + 4-тәи акв. + + + 1-тәи аквартал + 2-тәи аквартал + 3-тәи аквартал + 4-тәи аквартал + + + + + + + AM + PM + + + AM + PM + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + + + EEEE, d MMMM y 'ш'. + + + + + d MMMM y 'ш'. + + + + + d MMM y 'ш'. + + + + + dd.MM.y + + + + + + + HH:mm:ss zzzz + + + + + HH:mm:ss z + + + + + HH:mm:ss + + + + + HH:mm + + + + + + + {1}, {0} + + + {1}, {0} 'аҿы' + + + + + {1}, {0} + + + {1}, {0} 'аҿы' + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + ccc, h:mm B + ccc, h:mm:ss B + ccc, d + y 'ш'. G + dd.MM.y GGGGG + LLL y 'ш'. G + d MMM y 'ш'. G + E, d MMM y 'ш'. G + h:mm a + HH:mm + h:mm:ss a + HH:mm:ss + h:mm:ss a v + HH:mm:ss v + dd.MM + E, dd.MM + d MMM + ccc, d MMM + d MMMM + W-'тәи' 'амчыбжь' MMMM + MM.y + dd.MM.y + ccc, dd.MM.y 'ш'. + LLL y 'ш'. + d MMM y 'ш'. + E, d MMM y 'ш'. + LLLL y 'ш'. + QQQ y 'ш'. + QQQQ y 'ш'. + w-'тәи' 'амчыбжь' Y 'ш'. + + + {0} – {1} + + d–d + + + y 'ш'. G – y 'ш'. G + y–y 'шш'. G + + + MM.y G – MM.y G + MM.y – MM.y G + MM.y – MM.y G + + + dd.MM.y – dd.MM.y G + dd.MM.y G – dd.MM.y G + dd.MM.y – dd.MM.y G + dd.MM.y – dd.MM.y G + + + ccc, dd.MM.y – ccc, dd.MM.y G + ccc, dd.MM.y G – ccc, dd.MM.y G + ccc, dd.MM.y – ccc, dd.MM.y G + ccc, dd.MM.y – ccc, dd.MM.y G + + + LLL y 'ш'. G – LLL y 'ш'. G + LLL – LLL y 'ш'. G + LLL y – LLL y 'шш'. G + + + d–d MMM y 'ш'. G + d MMM y 'ш'. G – d MMM y 'ш'. G + d MMM – d MMM y 'ш'. G + d MMM y – d MMM y 'шш'. G + + + ccc, d MMM – ccc, d MMM y 'ш'. G + ccc, d MMM y 'ш'. G – ccc, d MMM y 'ш'. G + ccc, d MMM – ccc, d MMM y 'ш'. G + ccc, d MMM y – ccc, d MMM y 'шш'. G + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + h a – h a v + h–h a v + + + M–M + + + dd.MM – dd.MM + dd.MM – dd.MM + + + E, dd.MM – E, dd.MM + E, dd.MM – E, dd.MM + + + LLL – LLL + + + d–d MMM + d MMM – d MMM + + + E, d MMM – E, d MMM + E, d MMM – E, d MMM + + + y–y + + + MM.y – MM.y + MM.y – MM.y + + + dd.MM.y – dd.MM.y + dd.MM.y – dd.MM.y + dd.MM.y – dd.MM.y + + + ccc, dd.MM.y – ccc, dd.MM.y + ccc, dd.MM.y – ccc, dd.MM.y + ccc, dd.MM.y – ccc, dd.MM.y + + + LLL – LLL y 'ш'. + LLL y 'ш'. – LLL y 'ш'. + + + d–d MMM y 'ш'. + d MMM – d MMM y 'ш'. + d MMM y 'ш'. – d MMM y 'ш'. + + + ccc, d – ccc, d MMM y 'ш'. + ccc, d MMM – ccc, d MMM y 'ш'. + ccc, d MMM y 'ш'. – ccc, d MMM y 'ш'. + + + LLLL – LLLL y 'ш'. + LLLL y 'ш'. – LLLL y 'ш'. + + + + + + + + аера + + + ашықәс + + + ш. + + + ш + + + аквартал + + + акв. + + + акв + + + амза + + + амз. + + + амз + + + амчыбжь + + + амч. + + + амч + + + амш + иацы + иахьа + уаҵәы + + + амш + иацы + иахьа + уаҵәы + + + амш + иацы + иахьа + уаҵәы + + + + {0} Аамҭа + {0} Аԥхынтәи Аамҭа + {0} Астандартә Аамҭа + + Еилкаам ақалақь + + + Андорра + + + Дубаи + + + Кабул + + + Антигуа + + + Ангилиа + + + Тирана + + + Ереван + + + Луанда + + + Ротера + + + Палмер + + + Тролл + + + Сиова + + + Моусон + + + Деивис + + + Амрагылара + + + Кеиси + + + Диумон-д’Иурвил + + + Мак-Мердо + + + Рио-Галегос + + + Мендоса + + + Сан-Хуан + + + Ушуаиа + + + Ла-Риоха + + + Сан-Луис + + + Катамарка + + + Салта + + + Жужуи + + + Тукуман + + + Кордова + + + Буенос-Аирес + + + Паго-Паго + + + Вена + + + Перт + + + Иукла + + + Дарвин + + + Аделаида + + + Брокен-Хилл + + + Мелбурн + + + Хобарт + + + Линдеман + + + Сиднеи + + + Брисбен + + + Маккуори + + + Лорд-Хау + + + Аруба + + + Мариехамн + + + Баку + + + Сараево + + + Барбадос + + + Дакка + + + Бриуссел + + + Уагадугу + + + Софиа + + + Бахреин + + + Бужумбура + + + Порто-Ново + + + Сен-Бартелеми + + + Бермудтәи ад-хақәа + + + Брунеи + + + Ла-Пас + + + Кралендеик + + + Еирунепе + + + Риу-Бранку + + + Порту-Велиу + + + Боа-Виста + + + Манаус + + + Куиаба + + + Сантарен + + + Кампу-Гранди + + + Белен + + + Арагуаина + + + Сан-Паулу + + + Баиа + + + Форталеза + + + Масеио + + + Ресифи + + + Норониа + + + Нассау + + + Тхимпху + + + Габороне + + + Минск + + + Белиз + + + Доусон + + + Уаитхорс + + + Инувик + + + Ванкувер + + + Форт Нелсон + + + Доусон-Крик + + + Крестон + + + Иеллоунаиф + + + Едмонтон + + + Свифт-Керрент + + + Кеимбриџ-Беи + + + Реџаина + + + Виннипег + + + Резолиут + + + Реини-Ривер + + + Ранкин-Инлет + + + Корал-Харбор + + + Тандер-Беи + + + Нипигон + + + Торонто + + + Икалуит + + + Пангниртанг + + + Монктон + + + Ҳалифакс + + + Гус-Беи + + + Глеис-Беи + + + Бланк-Саблон + + + Сент-Џонс + + + Кокостәи ад-хақәа + + + Киншаса + + + Лубумбаши + + + Банги + + + Браззавил + + + Циурих + + + Абиџан + + + Раротонга + + + ад-ха Пасхи + + + Пунта-Аренас + + + Сантиаго + + + Дуала + + + Урумчи + + + Шанхаи + + + Богота + + + Коста-Рика + + + Гавана + + + Кабо-Верде + + + Киурасао + + + ад-ха Қьырса + + + Никосиа + + + Фамагуста + + + Прага + + + Биузинген-Хыхьтәи-Реине + + + Берлин + + + Џибути + + + Копенҳаген + + + Доминика + + + Санто-Доминго + + + Алжир + + + Галапагостәи ад-хақәа + + + Гуаиакил + + + Таллин + + + Каир + + + Ел-Аиун + + + Асмера + + + Канартәи ад-хақәа + + + Сеута + + + Мадрид + + + Аддис-Абеба + + + Хелсинки + + + Фиџи + + + Стенли + + + Трук + + + Понпеи + + + Косрае + + + Фарертәи ад-хақәа + + + Париж + + + Либревил + + + + Англиа Аԥхынтәи Аамҭа + + Лондон + + + Гренада + + + Қарҭ + + + Каиенна + + + Гернси + + + Аккра + + + Гибралтар + + + Туле + + + Нуук + + + Скорсбисунн + + + Денмарксхавн + + + Банжул + + + Конакри + + + Гваделупа + + + Малабо + + + Афин + + + Аладатәи Георгиа + + + Гватемала + + + Гуам + + + Бисау + + + Гаиана + + + Ҳонконг + + + Тегусигалпа + + + Загреб + + + Порт-о-Пренс + + + Будапешт + + + Џакарта + + + Понтианак + + + Макасар + + + Џаиапура + + + + Аирланд Астандартә Аамҭа + + Дублин + + + Иерусалим + + + ад-ха Мен + + + Калкутта + + + Чагос + + + Багдад + + + Тегеран + + + Реикиавик + + + Рим + + + Џерси + + + Иамаика + + + Амман + + + Токио + + + Наироби + + + Бишкек + + + Пномпен + + + Кантон + + + Киритимати + + + Тарава + + + Комор + + + Сент-Китс + + + Пхениан + + + Сеул + + + Кувеит + + + Адгьылбжьахақәа Каиман + + + Актау + + + Уралск + + + Атырау + + + Актобе + + + Костанаи + + + Кызылорда + + + Алматы + + + Вентиан + + + Беирут + + + Сент-Лиусиа + + + Вадуц + + + Коломбо + + + Монровиа + + + Масеру + + + Вилниус + + + Лиуксембург + + + Рига + + + Триполи + + + Касабланка + + + Монако + + + Кишинев + + + Подгорица + + + Мариго + + + Антананариву + + + Кваџалеин + + + Маџуро + + + Скопе + + + Бамако + + + Иангон + + + Ховд + + + Улан-Батор + + + Чоибалсан + + + Макао + + + Саипан + + + Мартиника + + + Нуакшот + + + Монтсеррат + + + Малта + + + Маврики + + + Малдив + + + Блантаир + + + Тихуана + + + Ермосило + + + Масатлан + + + Чиуауа + + + Баиа-де-Бандерас + + + Охинага + + + Монтерреи + + + Мехико + + + Матаморос + + + Мерида + + + Канкун + + + Куала-Лумпур + + + Кучинг + + + Мапуту + + + Виндхук + + + Нумеа + + + Ниамеи + + + Норфолк + + + Лагос + + + Манагуа + + + Амстердам + + + Осло + + + Катманду + + + Науру + + + Ниуе + + + Чатем + + + Окленд + + + Маскат + + + Панама + + + Лима + + + Таити + + + Маркизтәи ад-хақәа + + + ад-хақәа Гамбе + + + Порт-Морсби + + + Бугенвил + + + Манила + + + Карачи + + + Варшава + + + Микелон + + + Питкерн + + + Пуерто-Рико + + + Газа + + + Хеврон + + + Азортәи ад-хақәа + + + Мадеира + + + Лиссабон + + + Палау + + + Асунсон + + + Катар + + + Реиунон + + + Бухарест + + + Белград + + + Калининград + + + Москва + + + Волгоград + + + Саратов + + + Астрахан + + + Улиановск + + + Киров + + + Самара + + + Екатеринбург + + + Омск + + + Новосибирск + + + Барнаул + + + Томск + + + Новокузнецк + + + Красноиарск + + + Иркутск + + + Чита + + + Иакутск + + + Владивосток + + + Хандыга + + + ад-ха Сахалин + + + Уст-Нера + + + Магадан + + + Среднеколымск + + + Петропавловск-Камчаткатәи + + + Анадыр + + + Кигали + + + Ер-Риад + + + Гуадалканал + + + Мае + + + Хартум + + + Стокҳолм + + + Сингапур + + + Иԥшьоу Елена ладгьылбжьаха + + + Лиублиана + + + Лонгир + + + Братислава + + + Фритаун + + + Сан-Марино + + + Дакар + + + Могадишо + + + Парамарибо + + + Џуба + + + Сан-Томе + + + Салвадор + + + Лоуер-Принс-Куотер + + + Дамаск + + + Мбабане + + + Гранд-Терк + + + Нџамена + + + Кергелен + + + Ломе + + + Бангкок + + + Душанбе + + + Факаофо + + + Дили + + + Ашхабад + + + Тунис + + + Тонгатапу + + + Стамбул + + + Порт-оф-Спеин + + + Фунафути + + + Таибеи + + + Дар-ес-Салам + + + Ужҳород + + + Киев + + + Симферопол + + + Запороже + + + Кампала + + + ад-хақәа Мидуеи + + + Уеик + + + Адак + + + Ном + + + Џонстон + + + Анкориџ + + + Иакутат + + + Ситка + + + Џуно + + + Метлакатла + + + Лос-Анџелес + + + Боисе + + + Финикс + + + Денвер + + + Боила, Аҩадатәи Дакота + + + Ниу-Сеилем, Аҩадатәи Дакота + + + Агәҭа, Аҩадатәи Дакота + + + Чикаго + + + Меномини + + + Винсеннес + + + Питерсберг, Индиана + + + Телл-Сити + + + Нокс, Индиана + + + Уинамак + + + Маренго, Индиана + + + Индианаполис + + + Луисвилл + + + Вевеи, Индиана + + + Монтиселло, Кентукки + + + Детроит + + + Ниу-Иорк + + + Монтевидео + + + Самарканд + + + Ташкент + + + Ватикан + + + Сент-Винсент + + + Каракас + + + Тортола + + + Сент-Томас + + + Хошимин + + + Ефате + + + Уоллис + + + Апиа + + + Аден + + + Маиотта + + + Иоҳаннесбург + + + Лусака + + + Ҳараре + + + + Акри аамҭа + Акри астандартә аамҭа + Акри аԥхынтәи аамҭа + + + + + Афганистан + + + + + Агәҭантәи Африка + + + + + Мрагыларатәи Африка + + + + + Аладатәи Африка + + + + + Мраҭашәаратәи Африка + Мраҭашәаратәи Африка, астандартә аамҭа + Мраҭашәаратәи Африка, аԥхынтәи аамҭа + + + + + Алиаска + Алиаска, астандартә аамҭа + Алиаска, аԥхынтәи аамҭа + + + + + Алма-Ата аамҭа + Алма-Ата астандартә аамҭа + Алма-Ата аԥхынтәи аамҭа + + + + + Амазонка + Амазонка, астандартә аамҭа + Амазонка, аԥхынтәи аамҭа + + + + + Агәҭантәи Америка + Агәҭантәи Америка, астандартә аамҭа + Агәҭантәи Америка, аԥхынтәи аамҭа + + + + + Мрагыларатәи Америка + Мрагыларатәи Америка, астандартә аамҭа + Мрагыларатәи Америка, аԥхынтәи аамҭа + + + + + Ашьхатә аамҭа (Аҩадатәи Америка) + Астандартә ашьхатә аамҭа (Аҩадатәи Америка) + Аԥхынтәи ашьхатә аамҭа (Аҩадатәи Америка) + + + + + Аокеанҭынчтәи аамҭа + Аокеанҭынчтәи астандартә аамҭа + Аокеанҭынчтәи аԥхынтәи аамҭа + + + + + аамҭа Анадыр аҿы + Анадыр астандартә аамҭа + Анадыр аԥхынтәи аамҭа + + + + + Апиа + Апиа, астандартә аамҭа + Апиа, аԥхынтәи аамҭа + + + + + Актау аамҭа + Актау, астандартә аамҭа + Актау аԥхынтәи аамҭа + + + + + Актобе аамҭа + Актобе астандартә аамҭа + Актобе аԥхынтәи аамҭа + + + + + Саудтәи Арабсҭан + Саудтәи Арабсҭан, астандартә аамҭа + Саудтәи Арабсҭан, аԥхынтәи аамҭа + + + + + Аргентина + Аргентина, астандартә аамҭа + Аргентина, аԥхынтәи аамҭа + + + + + Мраҭашәаратәи Аргентина + Мраҭашәаратәи Аргентина, астандартә аамҭа + Мраҭашәаратәи Аргентина, аԥхынтәи аамҭа + + + + + Ермантәыла + Ермантәыла, астандартә аамҭа + Ермантәыла, аԥхынтәи аамҭа + + + + + Атлантикатәи аамҭа + Атлантикатәи астандартә аамҭа + Атлантикатәи аԥхынтәи аамҭа + + + + + Агәҭантәи Австралиа + Агәҭантәи Австралиа, астандартә аамҭа + Агәҭантәи Австралиа, аԥхынтәи аамҭа + + + + + Агәҭантәи Австралиа, мраҭашәаратәи аамҭа + Агәҭантәи Австралиа, мраҭашәаратәи астандартә аамҭа + Агәҭантәи Австралиа, мраҭашәаратәи аԥхынтәи аамҭа + + + + + Мрагыларатәи Австралиа + Мрагыларатәи Австралиа, астандартә аамҭа + Мрагыларатәи Австралиа, аԥхынтәи аамҭа + + + + + Мраҭашәаратәи Австралиа + Мраҭашәаратәи Австралиа, астандартә аамҭа + Мраҭашәаратәи Австралиа, аԥхынтәи аамҭа + + + + + Азербаиџьан + Азербаиџьан, астандартә аамҭа + Азербаиџьан, аԥхынтәи аамҭа + + + + + Азортәи ад-хақәа + Азортәи ад-хақәа, астандартә аамҭа + Азортәи ад-хақәа, аԥхынтәи аамҭа + + + + + Бангладеш + Бангладеш, астандартә аамҭа + Бангладеш, аԥхынтәи аамҭа + + + + + Бутан + + + + + Боливиа + + + + + Бразилиа + Бразилиа, астандартә аамҭа + Бразилиа, аԥхынтәи аамҭа + + + + + Брунеи-Даруссалам + + + + + Кабо-Верде + Кабо-Верде, астандартә аамҭа + Кабо-Верде, аԥхынтәи аамҭа + + + + + Кеиси + + + + + Чаморро + + + + + Чатем + Чатем, астандартә аамҭа + Чатем, аԥхынтәи аамҭа + + + + + Чили + Чили, астандартә аамҭа + Чили, аԥхынтәи аамҭа + + + + + Китаи + Китаи, астандартә аамҭа + Китаи, аԥхынтәи аамҭа + + + + + Чоибалсан + Чоибалсан, астандартә аамҭа + Чоибалсан, аԥхынтәи аамҭа + + + + + ад-ха Қьырса + + + + + Кокостәи ад-хақәа + + + + + Колумбиа + Колумбиа, астандартә аамҭа + Колумбиа, аԥхынтәи аамҭа + + + + + Кук идгьылбжьахақәа + Кук идгьылбжьахақәа, астандартә аамҭа + Кук идгьылбжьахақәа, полуаԥхынтәи аамҭа + + + + + Куба + Куба, астандартә аамҭа + Куба, аԥхынтәи аамҭа + + + + + Деивис + + + + + Диумон-д’Иурвил + + + + + Мрагыларатәи Тимор + + + + + ад-ха Пасхи + ад-ха Пасхи, астандартә аамҭа + ад-ха Пасхи, аԥхынтәи аамҭа + + + + + Еквадор + + + + + Агәҭантәи Европа + Агәҭантәи Европа, астандартә аамҭа + Агәҭантәи Европа, аԥхынтәи аамҭа + + + + + Мрагыларатәи Европа + Мрагыларатәи Европа, астандартә аамҭа + Мрагыларатәи Европа, аԥхынтәи аамҭа + + + + + Московатәи аамҭа + + + + + Мраҭашәаратәи Европа + Мраҭашәаратәи Европа, астандартә аамҭа + Мраҭашәаратәи Европа, аԥхынтәи аамҭа + + + + + Фолклендтәи ад-хақәа + Фолклендтәи ад-хақәа, астандартә аамҭа + Фолклендтәи ад-хақәа, аԥхынтәи аамҭа + + + + + Фиџи + Фиџи, астандартә аамҭа + Фиџи, аԥхынтәи аамҭа + + + + + Францызтәи Гвиана + + + + + Францызтәи Аладатәеи Антарктикатәи рҵакырадгьылқәа + + + + + Галапагостәи ад-хақәа + + + + + Гамбе + + + + + Қырҭтәыла + Қырҭтәыла, астандартә аамҭа + Қырҭтәыла, аԥхынтәи аамҭа + + + + + ад-хақәа Гилберта + + + + + Агринвич Ибжьаратәу Аамҭа + + + + + Мрагыларатәи Гренландиа + Мрагыларатәи Гренландиа, стандарное времиа + Мрагыларатәи Гренландиа, аԥхынтәи аамҭа + + + + + Мраҭашәаратәи Гренландиа + Мраҭашәаратәи Гренландиа, астандартә аамҭа + Мраҭашәаратәи Гренландиа, аԥхынтәи аамҭа + + + + + Гуам + + + + + Аџьамтә аӡыбжьахала + + + + + Гаиана + + + + + Ҳаваи-алеуттәи аамҭа + Ҳаваи-алеуттәи астандартә аамҭа + Ҳаваи-алеуттәи аԥхынтәи аамҭа + + + + + Ҳонконг + Ҳонконг, астандартә аамҭа + Ҳонконг, аԥхынтәи аамҭа + + + + + Ховд + Ховд, астандартә аамҭа + Ховд, аԥхынтәи аамҭа + + + + + Индиа + + + + + Индиатәи аокеан + + + + + Индокитаи + + + + + Агәҭантәи Индонезиа + + + + + Мрагыларатәи Индонезиа + + + + + Мраҭашәаратәи Индонезиа + + + + + Иран + Иран, астандартә аамҭа + Иран, аԥхынтәи аамҭа + + + + + Иркутск + Иркутск, астандартә аамҭа + Иркутск, аԥхынтәи аамҭа + + + + + Израиль + Израиль, астандартә аамҭа + Израиль, аԥхынтәи аамҭа + + + + + Иапониа + Иапониа, астандартә аамҭа + Иапониа, аԥхынтәи аамҭа + + + + + Петропавловск-Камчаткатәи + Петропавловск-Камчаткатәи, астандартә аамҭа + Петропавловск-Камчаткатәи, аԥхынтәи аамҭа + + + + + Мрагыларатәи Ҟазахсҭан + + + + + Мраҭашәаратәи Ҟазахсҭан + + + + + Кореиа + Кореиа, астандартә аамҭа + Кореиа, аԥхынтәи аамҭа + + + + + Косрае + + + + + Красноиарск + Красноиарск, астандартә аамҭа + Красноиарск, аԥхынтәи аамҭа + + + + + Киргизиа + + + + + Шри-Ланка + + + + + ад-хақәа Лаин + + + + + Лорд-Ҳау + Лорд-Ҳау, астандартә аамҭа + Лорд-Ҳау, аԥхынтәи аамҭа + + + + + Макао + Макао, астандартә аамҭа + Макао, аԥхынтәи аамҭа + + + + + Маккуори + + + + + Магадан + Магадан, астандартә аамҭа + Магадан, аԥхынтәи аамҭа + + + + + Малаизиа + + + + + Мальдив + + + + + Маркизтәи ад-хақәа + + + + + Маршаллтәи Адгьылбжьахақәа + + + + + Маврики + Маврики, астандартә аамҭа + Маврики, аԥхынтәи аамҭа + + + + + Моусон + + + + + Аҩадамра-Ҭашәаратәи амексикатә аамҭа + Аҩадамра-Ҭашәаратәи амексикатә астандартә аамҭа + Аҩадамра-Ҭашәаратәи амексикатә аԥхынтәи аамҭа + + + + + Аокеанҭынчтәи амексикатә аамҭа + Аокеанҭынчтәи амексикатә астандартә аамҭа + Аокеанҭынчтәи амексикатә аԥхынтәи аамҭа + + + + + Улан-Батор + Улан-Батор, астандартә аамҭа + Улан-Батор, аԥхынтәи аамҭа + + + + + Москва + Москва, астандартә аамҭа + Москва, аԥхынтәи аамҭа + + + + + Мианма + + + + + Науру + + + + + Непал + + + + + Каледониа ҿыц + Каледониа ҿыц, астандартә аамҭа + Каледониа ҿыц, аԥхынтәи аамҭа + + + + + Зеландиа ҿыц + Зеландиа ҿыц, астандартә аамҭа + Зеландиа ҿыц, аԥхынтәи аамҭа + + + + + Ниуфаундленд + Ниуфаундленд, астандартә аамҭа + Ниуфаундленд, аԥхынтәи аамҭа + + + + + Ниуе + + + + + Норфолк + Норфолк, астандартә аамҭа + Норфолк, аԥхынтәи аамҭа + + + + + Фернанду-ди-Норониа + Фернанду-ди-Норониа, астандартә аамҭа + Фернанду-ди-Норониа, аԥхынтәи аамҭа + + + + + Аҩадатәи Мариантәи ад-хақәа + + + + + Новосибирск + Новосибирск, астандартә аамҭа + Новосибирск, аԥхынтәи аамҭа + + + + + Омск + Омск, астандартә аамҭа + Омск, аԥхынтәи аамҭа + + + + + Пакистан + Пакистан, астандартә аамҭа + Пакистан, аԥхынтәи аамҭа + + + + + Палау + + + + + Папуа — Гвинеиа ҿыц + + + + + Парагваи + Парагваи, астандартә аамҭа + Парагваи, аԥхынтәи аамҭа + + + + + Перу + Перу, астандартә аамҭа + Перу, аԥхынтәи аамҭа + + + + + Филиппин + Филиппин, астандартә аамҭа + Филиппин, аԥхынтәи аамҭа + + + + + ад-хақәа Феникс + + + + + Сен-Пиери Микелони + Сен-Пиери Микелони, астандартә аамҭа + Сен-Пиери Микелони, аԥхынтәи аамҭа + + + + + Питкерн + + + + + Понпеи + + + + + Пхениан + + + + + Кызылорда* + Кызылорда, астандартә аамҭа* + Кызылорда, аԥхынтәи аамҭа* + + + + + Реиунон + + + + + Ротера + + + + + Сахалин + Сахалин, астандартә аамҭа + Сахалин, аԥхынтәи аамҭа + + + + + аамҭа Самараҿы + Самартәи астандартә аамҭа + Самартәи аԥхынтәи аамҭа + + + + + Самоа + Самоа, астандартә аамҭа + Самоа, аԥхынтәи аамҭа + + + + + Сеишелтәи адгьылбжьахақәа + + + + + Сингапур + + + + + Соломонтәи адгьылбжьахақәа + + + + + Аладатәи Георгиа + + + + + Суринам + + + + + Сиова + + + + + Таити + + + + + Таиван + Таиван, астандартә аамҭа + Таиван, аԥхынтәи аамҭа + + + + + Таџьықсҭан + + + + + Токелау + + + + + Тонга + Тонга, астандартә аамҭа + Тонга, аԥхынтәи аамҭа + + + + + Трук + + + + + Туркменистан + Туркменистан, астандартә аамҭа + Туркменистан, аԥхынтәи аамҭа + + + + + Тувалу + + + + + Уругваи + Уругваи, астандартә аамҭа + Уругваи, аԥхынтәи аамҭа + + + + + Узбеқьисҭан + Узбеқьисҭан, астандартә аамҭа + Узбеқьисҭан, аԥхынтәи аамҭа + + + + + Вануату + Вануату, астандартә аамҭа + Вануату, аԥхынтәи аамҭа + + + + + Венесуела + + + + + Владивосток + Владивосток, астандартә аамҭа + Владивосток, аԥхынтәи аамҭа + + + + + Волгоград + Волгоград, астандартә аамҭа + Волгоград, аԥхынтәи аамҭа + + + + + Амрагылара + + + + + Уеик + + + + + Уоллиси Футунеи + + + + + Иакутск + Иакутск, астандартә аамҭа + Иакутск, аԥхынтәи аамҭа + + + + + Екатеринбург + Екатеринбург, астандартә аамҭа + Екатеринбург, аԥхынтәи аамҭа + + + + + Иукон + + + + + + latn + + latn + + + , +   + + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0 % + + + + + + + #,##0.00 ¤ + + + #,##0.00 ¤ + #,##0.00 + + + + + + бермудтәи адоллар + бермудтәи адоллар + + + евро + евро + + + + + + ¥ + + + PHP + + + L + + + + + + леоне + леоне + + + ฿ + + + NT$ + + + + + + $ + + + Еилкаам Авалиута + (еилкаам авалиута) + + + + ≈{0} + + + {0} мшы + + + + + + {0}/{1} + + + {0}⋅{1} + + + + + {0}/{1} + + + {0}⋅{1} + + + + + + {0}-и {1}-и + {0}-и {1}-и + + + {0} ма {1} + {0} ма {1} + + + {0}, {1} + {0}, {1} + {0} ма {1} + {0} ма {1} + + + {0}, {1} + {0}, {1} + {0} ма {1} + {0} ма {1} + + + {0}, {1} + {0}, {1} + {0}, {1} + {0}, {1} + + + {0}, {1} + {0}, {1} + {0}-и {1}-и + {0}-и {1}-и + + + {0} {1} + {0} {1} + {0} {1} + {0} {1} + + + {0} {1} + {0} {1} + {0} {1} + {0} {1} + + + {0} {1} + {0} {1} + {0} {1} + {0} {1} + + + + + ааи:а + мап:м + + + diff --git a/make/data/cldr/common/main/ab_GE.xml b/make/data/cldr/common/main/ab_GE.xml new file mode 100644 index 00000000000..60180381674 --- /dev/null +++ b/make/data/cldr/common/main/ab_GE.xml @@ -0,0 +1,18 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/af.xml b/make/data/cldr/common/main/af.xml index 5cd23719d22..b0b8a1ef44a 100644 --- a/make/data/cldr/common/main/af.xml +++ b/make/data/cldr/common/main/af.xml @@ -1,6 +1,6 @@ - + + + + + + + + aragonés + arabe + arabe standard moderno + bengalí + alemán + alemán austriaco + alemán standard suizo + anglés + anglés australiano + anglés canadiense + anglés britanico + anglés (RU) + anglés americano + anglés (EUA) + espanyol + espanyol latino-americano + espanyol europeu + espanyol mexicano + francés + francés canadiense + francés suizo + hindi + indonesio + italiano + chaponés + coreano + neerlandés + flamenco + polaco + portugués + portugués brasilenyo + portugués europeu + ruso + tai + turco + idioma desconoixiu + chino + chino mandarín + chino simplificau + chino mandarín (simplificau) + chino tradicional + chino mandarín (tradicional) + + + + + + + + + + + + + + + + Mundo + Africa + America d’o Norte + Sudamerica + Oceanía + Africa occidental + America Central + Africa oriental + Africa septentrional + Africa central + Africa meridional + America + Norteamerica + Caribe + Asia oriental + Asia meridional + Asia sudoriental + Europa meridional + Australasia + Melanesia + Rechión d’a Micronesia + Polinesia + Asia + Asia central + Asia occidental + Europa + Europa oriental + Europa septentrional + Europa occidental + Africa subsahariana + Latino-america + Isla Ascensión + Andorra + Emiratos Arabes Unius + Afganistán + Antigua y Barbuda + Anguilla + Albania + Armenia + Angola + Antartida + Archentina + Samoa Americana + Austria + Australia + Aruba + Islas Åland + Azerbaichán + Bosnia y Herzegovina + Barbados + Bangladesh + Belchica + Burkina Faso + Bulgaria + Bahrain + Burundi + Benín + St. Barthélemy + Bermuda + Brunei + Bolivia + Caribe neerlandés + Brasil + Bahamas + Bhután + Isla Bouvet + Botswana + Belarrusia + Belize + Canadá + Islas Cocos + Republica Democratica d’o Congo + Congo Kinshasa + Republica Centro-africana + Republica d’o Congo + Congo Brazzaville + Suiza + Côte d’Ivoire + Costa de Vori + Islas Cook + Chile + Camerún + ¨China + Colombia + Isla Clipperton + Costa Rica + Cuba + Cabo Verde + Curaçao + Isla Chirstmas + Chipre + Chequia + Republica checa + Alemanya + Diego García + Chibuti + Dinamarca + Dominica + Republica Dominicana + Alcheria + Ceuta y Melilla + Ecuador + Estonia + Echipto + Sahara occidental + Eritrea + Espanya + Ethiopia + Unión Europea + Eurozona + Finlandia + Fichi + Islas Malvinas + Islas Malvinas (Islas Falkland) + Micronesia + Islas Feroe + Francia + Gabón + Reino Uniu + RU + Grenada + Cheorchia + Guayana francesa + Guernsey + Ghana + Chibraltar + Gronlandia + Gambia + Guinea + Guadeloupe + Guinea equatorial + Grecia + Islas Cheorchia d’o Sud y Sandwich d’o Sud + Guatemala + Guam + Guinea-Bissau + Guyana + Hong Kong, RAS China + Hong Kong + Islas Heard y McDonald + Honduras + Croacia + Haití + Hongría + Islas Canarias + Indonesia + Irlanda + Israel + Isla de Man + India + Territorio Britanico de l’Oceano Indico + Iraq + Irán + Islandia + Italia + Jersey + Chamaica + Chordania + Chapón + Kenya + Kirguistán + Cambocha + Kiribati + Comoros + Sant Cristofo y Nieus + Corea d’o Norte + Corea d’o Sud + Kuwait + Islas Caimán + Cazaquistán + Laos + Libano + Santa Lucía + Liechtenstein + Sri Lanka + Liberia + Lesotho + Lituania + Luxemburgo + Letonia + Libia + Marruecos + Monaco + Moldavia + Montenegro + Sant Martín + Madagascar + Islas Marshall + Macedonia d’o norte + Mali + Myanmar (Burma) + Mongolia + Macau, RAS China + Macau + Islas Marianas d’o Norte + Martinica + Mauritania + Montserrat + Malta + Mauricio + Maldivas + Malawi + Mexico + Malasia + Mozambique + Namibia + Nueva Caledonia + Nicher + Isla Norfolk + Nicheria + Nicaragua + Países Baixos + Noruega + Nepal + Nauru + Niue + Nueva Zelanda + Omán + Panamá + Perú + Polinesa Francesa + Papúa Nueva Guinea + Filipinas + Paquistán + Polonia + Saint-Pierre y Miquelon + Islas Pitcairn + Puerto Rico + Territorios Palestinos + Palestina + Portugal + Palau + Paraguay + Qatar + Territorios aleixaus d’Oceanía + Isla d’a Reunión + Rumanía + Serbia + Rusia + Ruanda + Arabia Saudí + Islas Salomón + Seychelles + Sudán + Suecia + Singapur + Santa Helena + Eslovenia + Svalbard y Jan Mayen + Eslovaquia + Sierra Leona + San Marino + Senegal + Somalia + Surinam + Sudán d’o Sud + Sant Tomé y Principe + El Salvador + Sint Maarten + Siria + Eswatini + Swazilandia + Tristán da Cunha + Islas Turcas y Caicos + Chad + Territorios australs franceses + Togo + Tailandia + Tayikistán + Tokelau + Timor-Leste + Timor Oriental + Turkmenistán + Tunicia + Tonga + Turquía + Trinidad y Tobago + Tuvalu + Taiwán + Tanzania + Ucrainia + Uganda + Islas perifericas d’os EUA + Nacions Unidas + Estaus Unius + EUA + Uruguay + Uzbequistán + Ciudat d’o Vaticano + Sant Vicent y las Granadinas + Venezuela + Islas Virchens Britanicas + Islas Virchens Norte-americanas + Vietnam + Vanuatu + Wallis y Fortuna + Samoa + Pseudoaccentos + Pseudobidi + Kosovo + Yemen + Mayotte + Republica de Sudafrica + Zambia + Zimbabue + Rechión desconoixida + + + calendario gregoriano + calendario ISO-8601 + ordenación standard + dichitos occidentals + + + metrico + RU + EUA + + + Idioma: {0} + Escritura: {0} + Rechión: {0} + + + + [a á b c d e é f g h i í j k l m n o ó p q r s t u ú ü v w x y z] + [· à â ä ç è ê ë ì î ï ñ ò ô ö ù û] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ¡ ? ¿ . … ' ‘ ’ " “ ” « » ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + + « + » + + + + + + + + + + EEEE, d MMMM 'de' y G + GyMMMMEEEEd + + + + + d MMMM 'de' y G + GyMMMMd + + + + + d MMM 'de' y G + GyMMMd + + + + + dd-MM-y GGGGG + GGGGGyMMdd + + + + + + + {1} 'a' 'las' {0} + + + + + {1} 'a' 'las' {0} + + + + + {1}, {0} + + + + + {1} {0} + + + + E, d + y G + MMM y G + d MMM y G + E, d MMM y G + d/M + E, d/M + d MMM + E, d MMM + d 'de' MMMM + y G + y G + M/y GGGGG + d/M/y GGGGG + E, d/M/y GGGGG + MMM y G + d MMM y G + E, d MMM y G + MMMM y G + QQQ y G + QQQQ y G + + + + y G – y G + y – y G + + + M/y GGGGG – M/y GGGGG + M/y – M/y GGGGG + M/y – M/y GGGGG + + + d/M/y – d/M/y GGGGG + d/M/y GGGGG – d/M/y GGGGG + d/M/y – d/M/y GGGGG + d/M/y – d/M/y GGGGG + + + E, d/M/y – E, d/M/y GGGGG + E, d/M/y GGGGG – E, d/M/y GGGGG + E, d/M/y – E, d/M/y GGGGG + E, d/M/y – E, d/M/y GGGGG + + + LLL y G – LLL y G + LLL–LLL y G + LLL y – LLL y G + + + d–d MMM y G + d MMM y G – d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E, d MMM – E, d MMM y G + E, d MMM y G – E, d MMM y G + E, d MMM – E, d MMM y G + E, d MMM y – E, d MMM y G + + + M–M + + + d/M – d/M + d/M – d/M + + + E, d/M – E, d/M + E, d/M – E, d/M + + + LLL–LLL + + + d–d MMM + d MMM – d MMM + + + E, d MMM – E, d MMM + E, d MMM – E, d MMM + + + y–y G + + + M/y – M/y GGGGG + M/y – M/y GGGGG + + + d/M/y – d/M/y GGGGG + d/M/y – d/M/y GGGGG + d/M/y – d/M/y GGGGG + + + E, d/M/y – E, d/M/y GGGGG + E, d/M/y – E, d/M/y GGGGG + E, d/M/y – E, d/M/y GGGGG + + + LLL–LLL y G + LLL y – LLL y G + + + d–d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E, d MMM – E, d MMM y G + E, d MMM y – E, d MMM y G + E, d MMM y – E, d MMM y G + + + LLLL – LLLL y G + LLLL y – LLLL y G + + + + + + + + + chi. + feb. + mar. + abr. + may. + chn. + chl. + ago. + set. + oct. + nov. + avi. + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + de chinero + de febrero + de marzo + d’abril + de mayo + de chunyo + de chuliol + d’agosto + de setiembre + d’octubre + de noviembre + d’aviento + + + + + chi. + feb. + mar. + abr. + may. + chn. + chl. + ago. + set. + oct. + nov. + avi. + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + chinero + febrero + marzo + abril + mayo + chunyo + chuliol + agosto + setiembre + octubre + noviembre + aviento + + + + + + + dom + lun + mar + mie + chu + vie + sab + + + D + L + Ma + Mi + Ch + V + S + + + dom + lun + mar + mie + chu + vie + sab + + + dominche + luns + martz + miercres + chueves + viernes + sabado + + + + + dom + lun + mar + mie + chu + vie + sab + + + D + L + Ma + Mi + Ch + V + S + + + dom + lun + mar + mie + chu + vie + sab + + + dominche + luns + martz + miercres + chueves + viernes + sabado + + + + + + + 1T + 2T + 3T + 4T + + + 1 + 2 + 3 + 4 + + + 1r trimestre + 2o trimestre + 3r trimestre + 4o trimestre + + + + + 1T + 2T + 3T + 4T + + + 1r trimestre + 2o trimestre + 3r trimestre + 4o trimestre + + + + + + + a.m. + p.m. + + + a.m. + p.m. + + + a.m. + p.m. + + + + + a.m. + p.m. + + + a.m. + p.m. + + + a.m. + p.m. + + + + + + a.C. + AEC + d.C. + EC + + + a.C. + AEC + d.C. + EC + + + + + + EEEE, d MMMM 'de' y + yMMMMEEEEd + + + + + d MMMM 'de' y + yMMMMd + + + + + d MMM y + yMMMd + + + + + d/M/yy + yyMd + + + + + + + H:mm:ss zzzz + Hmmsszzzz + + + + + H:mm:ss z + Hmmssz + + + + + H:mm:ss + Hmmss + + + + + H:mm + Hmm + + + + + + + {1} 'a' 'las' {0} + + + + + {1} 'a' 'las' {0} + + + + + {1}, {0} + + + + + {1} {0} + + + + d + ccc + E d + y G + MMM y G + d MMM y G + E, d MMM y G + d/M + E, d/M + d MMM + E, d MMM + d MMMM + 'semana' W 'de' MMMM + 'semana' W 'de' MMMM + y + M/y + d/M/y + E, d/M/y + y MMM + d MMM y + E, d MMM y + MMMM y + QQQ 'de' y + QQQQ y + 'semana' w 'de' Y + 'semana' w 'de' Y + + + + y G – y G + y–y G + + + M/y GGGGG – M/y GGGGG + M/y – M/y GGGGG + M/y – M/y GGGGG + + + d/M/y – d/M/y GGGGG + d/M/y GGGGG – d/M/y GGGGG + d/M/y – d/M/y GGGGG + M/d/y – M/d/y GGGGG + + + E, d/M/y – E, d/M/y GGGGG + E, dd/MM/y GGGGG – E, dd/MM/y GGGGG + E, dd/MM/y GGGGG – E, dd/MM/y GGGGG + E, dd/MM/y GGGGG – E, dd/MM/y GGGGG + + + LLL y G – LLL y G + LLL – LLL y G + LLL y – LLL y G + + + d–d LLL y G + d MMM y G – d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E, d MMM – E, d MMM y G + E, d MMM y G – E, d MMM y G + E, d MMM – E, d MMM y G + E, d MMM y – E, d MMM y G + + + h a – h a + h – h a + + + HH – HH + + + h:mm a – h:mm a + h:mm – h:mm a + h:mm – h:mm a + + + HH:mm – HH:mm + HH:mm – HH:mm + + + h:mm a – h:mm a v + h:mm – h:mm a v + h:mm – h:mm a v + + + HH:mm – HH:mm v + HH:mm – HH:mm v + + + h a – h a v + h – h a v + + + HH – HH v + + + M–M + + + d/M – d/M + d/M – d/M + + + E, d/M – E, d/M + E, d/M – E, d/M + + + LLL–LLL + + + d–d MMM + d MMM – d MMM + + + E, d MMM – E, d MMM + E, d MMM – E, d MMM + + + y–y + + + M/y – M/y + M/y – M/y + + + d/M/y – d/M/y + d/M/y – d/M/y + d/M/y – d/M/y + + + E, d/M/y – E, d/M/y + E, d/M/y + E, d/M/Y – E, d/M/Y + + + LLL–LLL y + LLL y – LLL y + + + d–d MMM y + d MMM – d MMM y + d MMM y – d MMM y + + + E, d MMM y – E, d MMM y + E, d MMM – E, d MMM y + E, d MMM y – E, d MMM y + + + LLLL–LLLL y + LLLL y – LLLL y + + + + + + + + era + + + anyo + l’anyo pasau + estianyo + l’anyo que viene + + + a. + l’anyo pasau + estianyo + l’anyo que viene + + + a. + l’anyo pasau + estianyo + l’anyo que viene + + + trimestre + + + trim. + + + trim. + + + mes + lo mes pasau + este mes + lo mes que viene + + + m. + lo mes pasau + este mes + lo mes que viene + + + m. + lo mes pasau + este mes + lo mes que viene + + + semana + la semana pasada + esta semana + la semana que viene + la semana de {0} + + + s. + la semana pasada + esta semana + la semana que viene + la semana de {0} + + + s. + la semana pasada + esta semana + la semana que viene + la semana de ´{0} + + + día + ahiere + hue + manyana + + + día + ahiere + hue + manyana + + + día + ahiere + hue + manyana + + + día d’a semana + + + a.m./p.m. + + + hora + + + h. + + + h. + + + minuto + + + min. + + + min. + + + segundo + + + s. + + + s. + + + zona horaria + + + + GMT{0} + Hora de {0} + Hora de verano de {0} + Hora standard de {0} + + + tiempo universal coordenado + + + + Ciudat desconoixida + + + Macau + + + + hora central d’America d’o Norte + hora standard central d’America d’o Norte + hora de verano central d’America d’o Norte + + + + + hora oriental d’America d’o Norte + hora standard oriental d’America d’o Norte + hora de verano oriental d’America d’o Norte + + + + + hora de montanya d’America d’o Norte + hora standard de montanya d’America d’o Norte + hora de verano de montanya d’America d’o Norte + + + + + hora d’o Pacifico d’America d’o Norte + hora standard d’o Pacifico d’America d’o Norte + hora de verano d’o Pacifico d’America d’o Norte + + + + + hora de l’Atlantico + hora standard de l’Atlantico + hora de verano de l’Atlantico + + + + + hora d’o centro d’Europa + hora standard d’o centro d’Europa + hora de verano d’o centro d’Europa + + + + + hora de l’este d’Europa + hora standard de l’este d’Europa + hora de verano de l’este d’Europa + + + + + hora de l’ueste d’Europa + hora standard de l’ueste d’Europa + hora de verano de l’ueste d’Europa + + + + + hora d’o meridiano de Greenwich + + + + + + + , + . + % + + + - + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + #,##0.00 ¤ + + + ¤#,##0.00;(¤#,##0.00) + + + {0} {1} + {0} {1} + + + + real brasilenyo + real brasilenyo + reals brasilenyos + R$ + R$ + + + yuan chino + yuan chino + yuans chinos + CN¥ + ¥ + + + euro + euro + euros + + r + + + libra britanica + libra britanica + libras britanicas + £ + £ + + + rupia india + rupia india + rupias indias + + + + + yen chaponés + yen chaponés + yens chaponeses + ¥ + ¥ + + + ringgit de Malasia + ringgit de Malasia + ringgit de Malasia + + + piso filipino + piso filipino + pisos filipinos + + + rublo ruso + rublo ruso + rublos rusos + RUB + + + + dólar de Singapur + dolar de Singapur + dolars de Singapur + + + baht tailandés + baht tailandés + baht tailandés + + + dolar d’os Estaus Unius + dolar d’os Estaus Unius + dolars d’os Estaus Unius + US$ + $ + + + moneda desconoixida + (moneda desconoixida) + (moneda desconoixida) + + + + {0} día + {0} días + Pilla lo {0}o a la dreita + + + + + + {0} per {1} + + + {0}⋅{1} + + + revolución + {0} revolución + {0} revolucions + + + radians + {0} radián + {0} radians + + + graus + {0} grau + {0} graus + + + minutos d’arco + {0} minutos d’arco + {0} minutos d’arco + + + segundos d’arco + {0} segundo d’arco + {0} segundos d’arco + + + quiratz + {0} quirat + {0} quiratz + + + per cient + {0} per cient + {0} per cient + + + per mil + {0} per mil + {0} per mil + + + per miriada + {0} per miriada + {0} per miriada + + + joules + {0} joule + {0} joules + + + centimetros + {0} centimetro + {0} centimetros + {0} per centimetro + + + tonas metricas + {0} tona metrica + {0} tonas metricas + + + kilogramos + {0} kilogramo + {0} kilogramos + {0} per kilogramo + + + gramos + {0} gramo + {0} gramos + {0} per gramo + + + tonas + {0} tona + {0} tonas + + + quiratz + {0} quirat + {0} quiratz + + + masas d’a Tierra + {0} masa d’a Tierra + {0} masas d’a Tierra + + + masas solars + {0} masa solar + {0} masas solars + + + watts + {0} watt + {0} watts + + + caballos de vapor + {0} caballo de vapor + {0} caballos de vapor + + + graus Celsius + {0} grau Celsius + {0} graus Celsius + + + graus Farenheit + {0} grau Farenheit + {0} graus Farenheit + + + punto cardinal + {0} este + {0} norte + {0} sud + {0} ueste + + + + + {0} rev + {0} rev + + + {0} rad + {0} rad + + + º + {0}° + {0}° + + + {0} arcmin + {0} arcmin + + + {0} arcsec + {0} arcsec + + + {0} kt + {0} kt + + + {0}% + {0}% + + + {0}‰ + {0}‰ + + + {0}‱ + {0}‱ + + + J + {0} J + {0} J + + + cm + {0} cm + {0} cm + {0}/cm + + + tm + {0} tm + {0} tm + + + {0} kg + {0} kg + + + g + {0} g + {0} g + + + {0} tn + {0} tn + + + {0} M⊕ + {0} M⊕ + + + {0} M☉ + {0} M☉ + + + W + {0} W + {0} W + + + {0} hp + {0} hp + + + {0}°C + {0}°C + + + punto + {0}U + + + + + {0}/{1} + + + {0}⋅{1} + + + % + {0}% + {0}% + + + cm + {0} cm + {0} cm + + + kg + {0} kg + {0} kg + + + g + {0} g + {0} g + + + °C + {0}°C + {0}°C + + + punto + {0}N + {0}S + {0}U + + + + h:mm + + + h:mm:ss + + + m:ss + + + + + {0}, {1} + {0}, {1} + {0} y {1} + {0} y {1} + + + {0}, {1} + {0}, {1} + {0} u {1} + {0} u {1} + + + {0}, {1} + {0}, {1} + {0} u {1} + {0} u {1} + + + {0}, {1} + {0}, {1} + {0} u {1} + {0} u {1} + + + {0}, {1} + {0}, {1} + {0} y {1} + {0} y {1} + + + {0}, {1} + {0}, {1} + {0} y {1} + {0} y {1} + + + {0}, {1} + {0}, {1} + {0} y {1} + {0} y {1} + + + {0} {1} + {0} {1} + {0} {1} + {0} {1} + + + {0}, {1} + {0}, {1} + {0} y {1} + {0} y {1} + + + + + sí:s + no:n + + + diff --git a/make/data/cldr/common/main/an_ES.xml b/make/data/cldr/common/main/an_ES.xml new file mode 100644 index 00000000000..03e10f9bda6 --- /dev/null +++ b/make/data/cldr/common/main/an_ES.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ann.xml b/make/data/cldr/common/main/ann.xml index 67b4f4f9828..8fd5510aea7 100644 --- a/make/data/cldr/common/main/ann.xml +++ b/make/data/cldr/common/main/ann.xml @@ -1,6 +1,6 @@ - + + + + + + + + العامية + + + + + right-to-left + + + + [\u064B \u064C \u064D \u064E \u064F \u0650 \u0651 \u0652 \u0670 ء آ أ ؤ إ ئ ا ب ة ت ث ج ح خ د ذ ر ز س ش ص ض ط ظ ع غ ف ق ك ل م ن ه و ى ي] + [ـ\u200C\u200D\u200E\u200F پ چ ژ ڜ ڢ ڤ ڥ ٯ ڧ ڨ ک گ ی] + [ا ب ت ث ج ح خ د ذ ر ز س ش ص ض ط ظ ع غ ف ق ك ل م ن ه و ي] + [\u061C\u200E \- ‑ , ٫ ٬ . % ٪ ‰ ؉ + 0٠ 1١ 2٢ 3٣ 4٤ 5٥ 6٦ 7٧ 8٨ 9٩] + [\- ‐ ‑ – — ، ؛ \: ! ؟ . … ' " « » ( ) \[ \] \&] + + + + + + + + كانون التاني + شباط + أذار + نيسان + أيار + حزيران + تموز + آب + أيلول + تشرين الأول + تشرين التاني + كانون الأول + + + + + + + الأحد + التنين + التلاتا + الأربعا + الخميس + الجمعة + السبت + + + + + + + صباحا + مساء + + + + + + + + + + د.أ.‏ + + + + diff --git a/make/data/cldr/common/main/apc_SY.xml b/make/data/cldr/common/main/apc_SY.xml new file mode 100644 index 00000000000..b6113b39be2 --- /dev/null +++ b/make/data/cldr/common/main/apc_SY.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ar.xml b/make/data/cldr/common/main/ar.xml index e4be099c4de..c151869ea47 100644 --- a/make/data/cldr/common/main/ar.xml +++ b/make/data/cldr/common/main/ar.xml @@ -1,6 +1,6 @@ - {given-monogram-allCaps}.{given2-monogram-allCaps}.{surname-monogram-allCaps} {given-informal-monogram-allCaps}.{surname-monogram-allCaps} - {prefix} {given} {given2-initial} {surname} + {title} {given} {given2-initial} {surname} {given-informal} {surname} - {prefix} {surname} + {title} {surname} {given-informal} @@ -13221,17 +14411,20 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {given-monogram-allCaps}.{surname-monogram-allCaps} - {prefix} {given-initial} {surname} + {title} {given-initial} {surname} {given-informal-initial}. {surname} - {prefix} {surname} + {title} {surname} {given-informal} + + {surname-monogram-allCaps} + {given-monogram-allCaps}.{surname-monogram-allCaps} @@ -13242,7 +14435,7 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {surname} {given-informal} - {prefix} {surname} + {title} {surname} {given-informal} @@ -13260,34 +14453,34 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {surname} {given-informal} - {prefix} {surname} + {title} {surname} {given-informal} - {surname-monogram-allCaps}. + {surname-monogram-allCaps} - {given-informal-monogram-allCaps}. + {given-informal-monogram-allCaps} - {surname}، {given-initial} + {surname}، {given-initial} {given2-initial} {surname} {given-initial} - {prefix} {surname} + {title} {surname} {given-informal} - {surname-monogram-allCaps}. + {surname-monogram-allCaps} - {given-informal-monogram-allCaps}. + {given-informal-monogram-allCaps} {surname-prefix} {surname-core}، {given} {given2} @@ -13307,27 +14500,51 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {surname}، {given-informal} - + منير - + سميرة النجار - + كمال مجدي العامر - - الدكتور - علاء الدين - ربيع - رامي كامل + + السيد + أحمد رامي + وسام + نجيب محفوظ أبو - شقرا + الأغا علم الدين - ماجستير، دكتوراه + الابن + النائب + + + سندباد + + + كاثرين + مولر + + + زيزينيا + هاميش + ستوبر + + + الدكتور + عايدة كورنيليا + نيللي + سيزار مارتن + فون + برول + غونزاليس دومينغو + الإبن + طبيب وجراح diff --git a/make/data/cldr/common/main/ar_001.xml b/make/data/cldr/common/main/ar_001.xml index ace8e36d5d3..c388fbdd243 100644 --- a/make/data/cldr/common/main/ar_001.xml +++ b/make/data/cldr/common/main/ar_001.xml @@ -1,6 +1,6 @@ - + + + + + + + + Mapudungun + + + + + left-to-right + top-to-bottom + + + + [a {ch} d e f g i k l ḻ {ll} m n ñ ṉ {ng} o p r s {sh} t ṯ {tr} u ü w y] + [b c h j q x z] + [A {CH} D E F G I K L Ḻ {LL} M N Ñ Ṉ {NG} O P R S {SH} T Ṯ {TR} U Ü W Y] + + diff --git a/make/data/cldr/common/main/arn_CL.xml b/make/data/cldr/common/main/arn_CL.xml new file mode 100644 index 00000000000..105e0609de8 --- /dev/null +++ b/make/data/cldr/common/main/arn_CL.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/as.xml b/make/data/cldr/common/main/as.xml index f3ac4afd749..1158c7fb2e3 100644 --- a/make/data/cldr/common/main/as.xml +++ b/make/data/cldr/common/main/as.xml @@ -1,6 +1,6 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + دۆنیا + + + دیل: {0} + یازی: {0} + بؤلگه: {0} + + + + + right-to-left + top-to-bottom + + + + [آ ؤ ا ب پ ت ث ج چ ح خ د ذ ر ز ژ س ش ص ض ط ظ ع غ ف ق ک گ ل م ن ه و ۆ ۇ ی ؽ] + [\u200C\u200D\u200E\u200F \u064E \u064F \u0650 \u0652 إ ك ڭ ى ي] + [آ ا ب پ ت ث ج چ ح خ د ذ ر ز ژ س ش ص ض ط ظ ع غ ف ق ک گ ل م ن ه و ی] + [\- ‐ ‑ ، ٫ ٬ ؛ \: ! ؟ . … ‹ › « » ( ) \[ \] * / \\] + {0}… + …{0} + {0}…{1} + {0} … + … {0} + {0} … {1} + ؟ + + + « + » + + + + + arabext + + arabext + + + : + + + diff --git a/make/data/cldr/common/main/az_Arab_IQ.xml b/make/data/cldr/common/main/az_Arab_IQ.xml new file mode 100644 index 00000000000..27d806cf2a0 --- /dev/null +++ b/make/data/cldr/common/main/az_Arab_IQ.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + + + برازیل + چین + جرمنی + پرانس + برتانیا + هندستان + ایتالیا + جاپان + پاکستان + روس + امریکی هئوارێن ملک + نگیشّتگێن دمگ + + + بُدّایی سالدر + چینی سالدر + کپتی سالدر + دانگی سالدر + ایتیوپیایی سالدر + ایتیوپیایی آمیت آلم سالدر + گرێگۆری سالدر + ابرانی سالدر + هندی کئومی سالدر + اسلامی سالدر + اسلامی شهری سالدر + اسلامی سئوودی اربی سالدر + اسلامی نجومی سالدر + اسلامی ام الکراهی سالدر + سالدر ISO-8601 + جاپانی سالدر + پارسی سالدر + مینگۆ چینی سالدر + زرّئے گیشّتگێن کالب + گیشّتگێن ترتیب + اربی-هندی نمبر + روسی نمبر + دێوناگری نمبر + مگربی نمبر + + + میتَری + برتانی + امریکی + + + زبان: {0} + سیاهگ: {0} + دمگ: {0} + + + + + right-to-left + top-to-bottom + + + + [\u064E \u064F \u0650 \u0651 \u0652 آ ا ئ ب پ ت ٹ ج چ د ڈ ر ڑ ز ژ س ش ک گ ل م ن و ۆ ه ی ێ ے] + [\u200C\u200D \u064B \u0654 ء أ ؤ إ ة ث ح خ ذ ص ض ط ظ ع غ ف ق ں ھ ہ] + [آ ا {ای} {اێ} {ائی} ب پ ت ٹ ج چ د ڈ ر ڑ ز ژ س ش ف ک گ ل م ن و ه ی] + [، ؛ \: ! ؟ . ' ‹ › " « »] + + + + + + + + جن + پر + مار + اپر + مئیی + جون + جۆل + اگست + ستم + اکت + نئوم + دسم + + + جنوری + پروری + مارچ + اپرێل + مئیی + جون + جۆلایی + اگست + ستمبر + اکتوبر + نئومبر + دسمبر + + + + + + + یک + دو + سئے + چار + پنچ + جمه + شم + + + یکشمبه + دوشمبه + سئیشمبه + چارشمبه + پنچشمبه + جمه + شمبه + + + + + + + 1/4 + 2/4 + 3/4 + 4/4 + + + 1 + 2 + 3 + 4 + + + ائوَلی چارِک + دومی چارِک + سئیمی چارِک + چارُمی چارِک + + + + + 1/4 + 2/4 + 3/4 + 4/4 + + + 1 + 2 + 3 + 4 + + + ائوَلی چارِک + دومی چارِک + سئیمی چارِک + چارُمی چارِک + + + + + + + EEEE, d MMMM, y + + + + + d MMMM, y + + + + + d MMM, y + + + + + d/M/yy + + + + + + + hh:mm:ss a zzzz + + + + + hh:mm:ss a zzz + + + + + hh:mm:ss a + + + + + hh:mm a + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + + + + + اَلاسکائے ساهت + اَلاسکائے گیشّتگێن ساهت + اَلاسکائے گرماگی ساهت + + + + + امازۆنئے ساهت + امازۆنئے گیشّتگێن ساهت + امازۆنئے گرماگی ساهت + + + + + نیامی امریکائے ساهت + نیامی امریکائے گیشّتگێن ساهت + نیامی امریکائے گرماگی ساهت + + + + + رۆدراتکی امریکائے ساهت + رۆدراتکی امریکائے گیشّتگێن ساهت + رۆدراتکی امریکائے گرماگی ساهت + + + + + کۆهستگێن امریکائے ساهت + کۆهستگێن امریکائے گیشّتگێن ساهت + کۆهستگێن امریکائے گرماگی ساهت + + + + + آرامزِری امریکائے ساهت + آرامزِری امریکائے گیشّتگێن ساهت + آرامزِری امریکائے گرماگی ساهت + + + + + ارجنتینائے ساهت + ارجنتینائے گیشّتگێن ساهت + ارجنتینائے گرماگی ساهت + + + + + رۆنندی ارجنتینائے ساهت + رۆنندی ارجنتینائے گیشّتگێن ساهت + رۆنندی ارجنتینائے گرماگی ساهت + + + + + نیامی اُسترالیائے ساهت + نیامی اُسترالیائے گیشّتگێن ساهت + نیامی اُسترالیائے گرماگی ساهت + + + + + نیام‌رۆنندی اُسترالیائے ساهت + نیام‌رۆنندی اُسترالیائے گیشّتگێن ساهت + نیام‌رۆنندی اُسترالیائے گرماگی ساهت + + + + + رۆدراتکی اُسترالیائے ساهت + رۆدراتکی اُسترالیائے گیشّتگێن ساهت + رۆدراتکی اُسترالیائے گرماگی ساهت + + + + + رۆنندی اُسترالیائے ساهت + رۆنندی اُسترالیائے گیشّتگێن ساهت + رۆنندی اُسترالیائے گرماگی ساهت + + + + + برازیلئے ساهت + برازیلئے گیشّتگێن ساهت + برازیلئے گرماگی ساهت + + + + + نیامی یورپئے ساهت + نیامی یورپئے گیشّتگێن ساهت + نیامی یورپئے گرماگی ساهت + + + + + رۆدراتکی یورپئے ساهت + رۆدراتکی یورپئے گیشّتگێن ساهت + رۆدراتکی یورپئے گرماگی ساهت + + + + + دێمتری رۆدراتکی یورپئے گیشّتگێن ساهت + + + + + رۆنندی یورپئے ساهت + رۆنندی یورپئے گیشّتگێن ساهت + رۆنندی یورپئے گرماگی ساهت + + + + + هئواییئے ساهت + هئواییئے گیشّتگێن ساهت + هئواییئے گرماگی ساهت + + + + + نیامی اندۆنیزیائے گیشّتگێن ساهت + + + + + رۆدراتکی اندۆنیزیائے گیشّتگێن ساهت + + + + + رۆنندی اندۆنیزیائے گیشّتگێن ساهت + + + + + ایرکوتسکئے ساهت + ایرکوتسکئے گیشّتگێن ساهت + ایرکوتسکئے گرماگی ساهت + + + + + رۆدراتکی کازکستانئے گیشّتگێن ساهت + + + + + رۆنندی کازکستانئے گیشّتگێن ساهت + + + + + کرانسنُیارسکئے ساهت + کرانسنُیارسکئے گیشّتگێن ساهت + کرانسنُیارسکئے گرماگی ساهت + + + + + لۆرڈ هئو اُسترالیائے ساهت + لۆرڈ هئو اُسترالیائے گیشّتگێن ساهت + لۆرڈ هئو اُسترالیائے گرماگی ساهت + + + + + ماکواریئے گیشّتگێن ساهت + + + + + مَگَدَنئے ساهت + مَگَدَنئے گیشّتگێن ساهت + مَگَدَنئے گرماگی ساهت + + + + + شمالی مِکسیکۆئے ساهت + شمالی مِکسیکۆئے گیشّتگێن ساهت + شمالی مِکسیکۆئے گرماگی ساهت + + + + + آرامزِری مِکسیکۆئے ساهت + آرامزِری مِکسیکۆئے گیشّتگێن ساهت + آرامزِری مِکسیکۆئے گرماگی ساهت + + + + + ماسکۆئے ساهت + ماسکۆئے گیشّتگێن ساهت + ماسکۆئے گرماگی ساهت + + + + + نیوفاوونڈلئینڈئے ساهت + نیوفاوونڈلئینڈئے گیشّتگێن ساهت + نیوفاوونڈلئینڈئے گرماگی ساهت + + + + + نُرُنهائے ساهت + نُرُنهائے گیشّتگێن ساهت + نُرُنهائے گرماگی ساهت + + + + + نۆوۆسیبیرسکئے ساهت + نۆوۆسیبیرسکئے گیشّتگێن ساهت + نۆوۆسیبیرسکئے گرماگی ساهت + + + + + اۆمسکئے ساهت + اۆمسکئے گیشّتگێن ساهت + اۆمسکئے گرماگی ساهت + + + + + ولادیوُستُکئے ساهت + ولادیوُستُکئے گیشّتگێن ساهت + ولادیوُستُکئے گرماگی ساهت + + + + + یاکوتسکئے ساهت + یاکوتسکئے گیشّتگێن ساهت + یاکوتسکئے گرماگی ساهت + + + + + یێکاترینبورگئے ساهت + یێکاترینبورگئے گیشّتگێن ساهت + یێکاترینبورگئے گرماگی ساهت + + + + + + latn + + . + , + % + + + - + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤ #,##0.00 + + + + + + برازیلی ریال + R$ + R$ + + + یورۆ + + + + + برتانی پئوند + £ + £ + + + هندُستانی روپّئیی + + + + + اێرانی ریال + ریال + + + جاپانی یَن + ¥ + ¥ + + + پاکستانی روپی + Rs + Rs + + + روسی روبل + + + + + امریکی دالر + $ + $ + + + نگیشّتگێن زَرّ + XXX + + + + + + هئو:ه + نه:ن + + + diff --git a/make/data/cldr/common/main/bal_Arab.xml b/make/data/cldr/common/main/bal_Arab.xml new file mode 100644 index 00000000000..a6fb072f7ef --- /dev/null +++ b/make/data/cldr/common/main/bal_Arab.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + + + + Brázil + Chin + Jarmani + Paráns + Bartániá + Hendostán + Itáliá + Jápán + Pákestán + Rus + Amrikáay Tepákén Están + Nagisshetagén damag + + + Buddái sáldar + Chini sáldar + Kobti sáldar + Dángi sáldar + Etupiái sáldar + Etupiái Ámet Álem sáldar + Miládi sáldar + Ebráni sáldar + Hendi Kawmi sáldar + Eslámi sáldar + Eslámi shahri sáldar + Eslámi Saudi-Arabi sáldar + Eslámi Nojumi sáldar + Eslámi Omm al-Korrahi sáldar + ISO-8601 sáldar + Jápáni sáldar + Pársi sáldar + Mingu-Chini sáldar + Zarray anjárén káleb + Gisshetagén red o band + Arabi-Hendi mórdán + Rusi mórdán + Dénágari mórdán + Rónendi mórdán + + + mitari + Bartáni + Amriki + + + zobán: {0} + syáhag: {0} + damag: {0} + + + + + left-to-right + top-to-bottom + + + + [á a b c d é e g h i j k l m n ó o p r s t u v w y z] + [f ń q x] + [Á A B {Ch} D {Dh} É E F G H I J K L M N Ó O P R {Rh} S {Sh} T {Th} U V W Y Z {Zh}] + [, ; \: ? . ‘ ’ “ ”] + + + + + + + + Jan + Par + Már + Apr + Mai + Jun + Jól + Aga + Sat + Akt + Naw + Das + + + Janwari + Parwari + Márch + Aprél + Mai + Jun + Jólái + Agast + Satambar + Aktubar + Nawambar + Dasambar + + + + + + + Yak + Do + Say + Chá + Pan + Jom + Sha + + + Yakshambeh + Doshambeh + Sayshambeh + Chárshambeh + Panchshambeh + Jomah + Shambeh + + + + + + + 1/4 + 2/4 + 3/4 + 4/4 + + + 1 + 2 + 3 + 4 + + + awali chárek + domi chárek + sayomi chárek + cháromi chárek + + + + + 1/4 + 2/4 + 3/4 + 4/4 + + + 1 + 2 + 3 + 4 + + + awali chárek + domi chárek + sayomi chárek + cháromi chárek + + + + + + + EEEE, d MMMM, y + + + + + d MMMM, y + + + + + d MMM, y + + + + + d/M/yy + + + + + + + hh:mm:ss a zzzz + + + + + hh:mm:ss a zzz + + + + + hh:mm:ss a + + + + + hh:mm a + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + + + + + Aláskáay wahd + Aláskáay anjári wahd + Aláskáay garmági wahd + + + + + Amázónay wahd + Amázónay anjári wahd + Amázónay garmági wahd + + + + + Delgáhi Amrikáay wahd + Delgáhi Amrikáay anjári wahd + Delgáhi Amrikáay garmági wahd + + + + + Ródarátki Amrikáay wahd + Ródarátki Amrikáay anjári wahd + Ródarátki Amrikáay garmági wahd + + + + + Kóhestagi Amrikáay wahd + Kóhestagi Amrikáay anjári wahd + Kóhestagi Amrikáay garmági wahd + + + + + Árámzeri Amrikáay wahd + Árámzeri Amrikáay anjári wahd + Árámzeri Amrikáay garmági wahd + + + + + Arjentináay wahd + Arjentináay anjári wahd + Arjentináay garmági wahd + + + + + Rónendi Arjentináay wahd + Rónendi Arjentináay anjári wahd + Rónendi Arjentináay gramági wahd + + + + + Delgáhi Ástréliáay wahd + Delgáhi Ástréliáay anjári wahd + Delgáhi Ástréliáay garmági wahd + + + + + Delgáhirónendi Ástréliáay wahd + Delgáhirónendi Ástréliáay anjári wahd + Delgáhirónendi Ástréliáay garmági wahd + + + + + Ródarátki Ástréliáay wahd + Ródarátki Ástréliáay anjári wahd + Ródarátki Ástréliáay garmági wahd + + + + + Rónendi Ástréliáay wahd + Rónendi Ástréliáay anjári wahd + Rónendi Ástréliáay garmági wahd + + + + + Brázilay wahd + Brázilay anjári wahd + Brázilay garmági wahd + + + + + Delgáhi Yuropay wahd + Delgáhi Yuropay anjári wahd + Delgáhi Yuropay garmági wahd + + + + + Ródarátki Yuropay wahd + Ródarátki Yuropay anjári wahd + Ródarátki Yuropay garmági wahd + + + + + Démterén Ródarátki Yuropay anjári wahd + + + + + Rónendi Yuropay wahd + Rónendi Yuropay anjári wahd + Rónendi Yuropay garmági wahd + + + + + Hawái/Alushiay wahd + Hawái/Alushiay anjári wahd + Hawái/Alushiay garmági wahd + + + + + Delgáhi Endhonishiáay anjári wahd + + + + + Ródarátki Endhonishiáay anjári wahd + + + + + Rónendi Endhonishiáay anjári wahd + + + + + Erkuskay wahd + Erkuskay anjári wahd + Erkuskay garmági wahd + + + + + Ródarátki Kázekestánay anjári wahd + + + + + Rónendi Kázekestánay anjári wahd + + + + + Krasnóyáskay wahd + Krasnóyáskay anjári wahd + Krasnóyáskay garmági wahd + + + + + Ástréliáay, Ládhaway wahd + Ástréliáay, Ládhaway anjári wahd + Ástréliáay, Ládhaway garmági wahd + + + + + Makwáriay anjári wahd + + + + + Mágadánay wahd + Mágadánay anjári wahd + Mágadánay garmági wahd + + + + + Shemálrónendi Meksikóay wahd + Górichánrónendi Meksikóay anjári wahd + Shemálrónendi Meksikóay garmági wahd + + + + + Árámzeri Meksikóay wahd + Árámzeri Meksikóay anjári wahd + Árámzeri Meksikóay garmági wahd + + + + + Máskóay wahd + Máskóay anjári wahd + Máskóay garmági wahd + + + + + Nipándlaynday wahd + Nipándlaynday anjári wahd + Nipándlaynday garmági wahd + + + + + Noronáay wahd + Noronáay anjári wahd + Noronáay garmági wahd + + + + + Nawásibiskay wahd + Nawásibiskay anjári wahd + Nawásibiskay garmági wahd + + + + + Ómskay wahd + Ómskay anjári wahd + Ómskay garmági wahd + + + + + Waládiwástókay wahd + Waládiwástókay anjári wahd + Waládiwástókay garmági wahd + + + + + Yákuskay wahd + Yákuskay anjári wahd + Yákuskay garmági wahd + + + + + Yakátrinborgay wahd + Yakátrinborgay anjári wahd + Yakátrinborgay garmági wahd + + + + + + latn + + . + , + % + + + - + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤ #,##0.00 + + + + + + Brázili riál + R$ + R$ + + + yuró + + + + + Bartáni pawndh + £ + £ + + + Hendostáni rupi + + + + + Éráni ryál + ریال + + + Jápáni yen + ¥ + ¥ + + + Pákestáni rupi + Rs + Rs + + + Rusi rubel + + + + + Amriki dhálar + $ + $ + + + Nazántagén zarr + XXX + + + + + + haw:h + na:n + + + diff --git a/make/data/cldr/common/main/bal_Latn_PK.xml b/make/data/cldr/common/main/bal_Latn_PK.xml new file mode 100644 index 00000000000..9bfffa1f977 --- /dev/null +++ b/make/data/cldr/common/main/bal_Latn_PK.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + دونیا/جهان + افریقا + اوقیانوسیه + امریکای براعظم + استرالیا + اسیا + اورورپا + متحدین عربین امارات + انگویلا + انگولا + ارجنٹاین + اسٹرالیا + آزربایجان + بیلجیم + بولغاریه + بحرین + بروندی + بیرمودا + برونی + بولیویه + برازیل + بهاماس + بوتان + کاناڈا + چیلی + کامیرون + چین + کولومبیا + کوبا + قبرس + جرمنی + جیبوتی + الجزایر + اکوادور + مصر + روچ‌کپتین سحرا + اریتره + هسپانیه + ایتوپیه + اورورپایی یکویی + فیجی + فرانسه + گابون + گرجستان + گانا + گرینلاند + گامبیا + گوینیا + یونان + گویانا + هانگ کانگ + هنگری + ایندونیزیا + اسرائیل + عراق + ایتالیه + اردن + کینیا + قیرغیزستان + کمبودیا + کومورس + کویٹ + قزاقستان + لاوس + لیبنان + لیبیا + مراکو + مالداویا + ماداگاسکار + مالی + مالته + موریتانیا + مکسیکو + مالیزیا + نیجیر + نایجیریا + نیوزلنڈ + ئومان + پانامه + پیرو + فلیپین + پورتگال + پاراگوی + قطر + رومانیه + سیربستان + روندا + سیشیل + سوڈان + سینگاپور + سینیگال + سومالیا + سورینامی + سوریه + چاد + ٹایلنڈ + تاجیکستان + تورکمنستان + ٹونیس + تانزانیا + اوگاندا + متحدین ایالات + اوراگوی + اوزبکیستان + وینزوویلا + ویتنام + یمن + زامبیا + زیمبابوی + نازانتین سیمسر + + + میٹریک + بریتانوی + امریکایی + + + + + right-to-left + top-to-bottom + + + + [آ ئ ا ب پ ت ث ٹ ج چ ح خ د ڈ ر ز ڑ ژ س ش ص ض ط ظ غ ف ق ک گ ل م ن ھ و ۆ ی ێ] + [\u200C ؤ] + [آ ئ ا ب پ ت ث ٹ ج چ ح خ د ڈ ر ز ڑ ژ س ش ص ض ط ظ غ ف ق ک گ ل م ن ھ و ۆ ی ێ] + [‐ – — ، ؛ \: ! ؟ . … ‘ ’ " “ ” « » ( ) \[ \] § @ * \\ \& # † ‡ ′ ″] + + + + + سال + + + سال + + + سال + + + ماه + + + ماه + + + ماه + + + هفته‌گ + + + هفته‌گ + + + هفته‌گ + + + روچ + زي + مروچی + باندا + + +{0} روچ + + + + روچ + + + روچ + + + هفته‌گئ روچان + + + ساعت + + + سائت + + + ساعت + + + + {0} + + نازانتین شاران + + + دوبی + + + کابل + + + باکو + + + ڈاکا + + + بحرین + + + ٹیمپو + + + اورمچی + + + تیبلیسی + + + یورشلیم + + + کلکته + + + بغداد + + + تهران + + + امان + + + توکیو + + + بیشکیک + + + سیول + + + کویٹ + + + اقتاو + + + اورال + + + اقتوبه + + + قیزیلوردا + + + الماته + + + کولومبو + + + اولان‌باتور + + + مالدیف + + + کٹمنڈو + + + مسقط + + + کراچی + + + غزه + + + قطر + + + کیروف + + + سامارا + + + تومسک + + + ریاض + + + دمشق + + + دوشنبه + + + عشق آبات + + + استانبول + + + سمرقند + + + تاشکینت + + + عدن + + + + اوگانستانی وخت + + + + + روچ‌دراتین قزاقستانی وخت + + + + + روچ‌کپتین قزاقستانی وخت + + + + + قیرغیزستانی وخت + + + + + تاجیکستانی وخت + + + + + + arabext + + arabext + + + : + + + ٫ + ، + ٪ + + + - + × + + ناعدد + : + + + + + ¤ #,##0.00 + + + + + 0 هزار ¤ + ¤00هزار + ¤ 000هزار + ¤ 0میلیون + ¤ 00میلیون + ¤000میلیون + ¤0میلیارد + ¤00میلیارد + + + {0} {1} + + + + اوگانستانئ اوگانی + اوگانستانئ اوگانی + ؋ + + + بنگلادیشئ ٹاکه + بنگلادیشئ ٹاکه + BDT + + + + بوتانئ انگولٹروم + بوتانئ انگولٹروم + BTN + + + هندوستانئ روپی + هندوستانئ روپی + + + + + ایرانئ ریال + ایرانئ ریال + ریال + + + سریلانکایی روپی + سریلانکایی روپی + LKR + Rs + + + مالدیوی روپی + مالدیوی روپی + MVR + + + نیپالین روپی + نیپالین روپی + NPR + Rs + + + پاکستانئ روپی + پاکستانئ روپی + PKR + Rs + + + روسین روبل + + + + ≥{0} + {0}–{1} + + + + + + سده‌گ + {0} سده‌گ + + + سال + {0} سال + {0} سالئ تا + + + ماه + {0} ماه + {0} به ماه‌ای تا + + + هفته‌گ + {0} هفته‌گ + {0} هفته‌گئ تا + + + روچ + {0} روچ + {0} روچئ تا + + + ساعت + {0} ساعت + {0} ساعتئ تا + + + دقیقه + {0} دقیقه + {0} دقیقه‌ای تا + + + ثانیه + {0} ثانیه + {0} ثانیه‌ای تا + + + میلی‌ثانیه + {0} میلی‌ثانیه + + + مایکروثانیه + {0} مایکروثانیه + + + نانوثانیه + {0} نانوثانیه + + + کیلومیتر + {0} کیلومیتر + {0} کیلومیترئ تا + + + میتر + {0} میتر + {0} میترئ تا + + + روچ‌دراتین {0} + بُرزسرین {0} + + + + + سده‌گ + {0} سده‌گ + + + سال + {0} سال + {0}/سال + + + ماه + {0} ماه + {0}/ماه + + + هفته‌گ + {0} هفته‌گ + {0}/هفته‌گ + + + روچ + {0} روچ + {0}/روچ + + + ساعت + {0} ساعت + {0}/ساعت + + + دقیقه + {0} دقیقه + {0}/دقیقه + + + ثانیه + {0} ثانیه + {0}/ث + + + میلی‌ثانیه + {0} میلی‌ثانیه + + + مایکروثانیه + {0} مایکروثانیه + + + نانوثانیه + {0} نانوثانیه + + + کیلومیتر + {0} کیلومیتر + {0}/کیلومیتر + + + میتر + {0} میتر + {0}/میتر + + + روچ‌دراتین {0} + بُرزسرین {0} + + + + + سال + {0} سال + + + ماه + {0}ماه + + + هفته‌گ + {0}هفته‌گ + + + روچ + {0}روچ + + + ساعت + {0}ساعت + + + دقیقه + {0}دقیقه + + + ثانیه + {0}ث + + + میلی‌ثانیه + {0}میلی‌ثانیه + + + کیلومیتر + {0}کیلومیتر + + + میتر + {0}میتر + + + روچ‌دراتین {0} + بُرزسرین {0} + + + + h:mm + + + h:mm:ss + + + m:ss + + + + + {0}، {1} + {0}، {1} + {0}، و {1} + {0} و {1} + + + {0}، {1} + {0}، {1} + {0}، و {1} + {0} و {1} + + + {0}، {1} + {0}، {1} + {0}، {1} + {0}، {1} + + + {0} {1} + {0} {1} + {0} {1} + {0} {1} + + + {0}، {1} + {0}، {1} + {0}، {1} + {0}، {1} + + + + + هان:هاو + نه:ن + + + + {0} — موچ + {0}: {1} + {0} — تاریخی + {0} ضربه + + diff --git a/make/data/cldr/common/main/bgn_AE.xml b/make/data/cldr/common/main/bgn_AE.xml new file mode 100644 index 00000000000..773bfd72b56 --- /dev/null +++ b/make/data/cldr/common/main/bgn_AE.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/bgn_AF.xml b/make/data/cldr/common/main/bgn_AF.xml new file mode 100644 index 00000000000..53cc222ffaa --- /dev/null +++ b/make/data/cldr/common/main/bgn_AF.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/bgn_IR.xml b/make/data/cldr/common/main/bgn_IR.xml new file mode 100644 index 00000000000..e4eea64176d --- /dev/null +++ b/make/data/cldr/common/main/bgn_IR.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/bgn_OM.xml b/make/data/cldr/common/main/bgn_OM.xml new file mode 100644 index 00000000000..0155f7d6fb1 --- /dev/null +++ b/make/data/cldr/common/main/bgn_OM.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/bgn_PK.xml b/make/data/cldr/common/main/bgn_PK.xml new file mode 100644 index 00000000000..456d8df2f1f --- /dev/null +++ b/make/data/cldr/common/main/bgn_PK.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/bho.xml b/make/data/cldr/common/main/bho.xml index 9b916393fd6..724cb84915f 100644 --- a/make/data/cldr/common/main/bho.xml +++ b/make/data/cldr/common/main/bho.xml @@ -1,6 +1,6 @@ - + + + + + + + + ꪼꪕꪒꪾ + + + + [\uAABF \uAAC1 ꫝ ꪀ ꪁ ꪄ ꪅ ꪆ ꪇ ꪈ ꪉ ꪊ ꪋ ꪎ ꪏ ꪐ ꪑ ꪒ ꪓ ꪔ ꪕ ꪖ ꪗ ꪘ ꪙ ꪚ ꪛ ꪜ ꪝ ꪠ ꪡ ꪢ ꪣ ꪤ ꪥ ꪦ ꪧ ꪨ ꪩ ꪪ ꪫ ꪬ ꪭ ꪮ ꪯ \uAAB0 ꪱ \uAAB2 \uAAB3 \uAAB4 ꪵ ꪶ \uAAB7 \uAAB8 ꪹ ꪺ ꪻ ꪼ ꪽ \uAABE ꫀ ꫂ ꫛ ꫜ] + [. ꫞ ꫟] + + diff --git a/make/data/cldr/common/main/blt_VN.xml b/make/data/cldr/common/main/blt_VN.xml new file mode 100644 index 00000000000..1746f69fb20 --- /dev/null +++ b/make/data/cldr/common/main/blt_VN.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/bm.xml b/make/data/cldr/common/main/bm.xml index c91898bfa33..4b9ccee4e7a 100644 --- a/make/data/cldr/common/main/bm.xml +++ b/make/data/cldr/common/main/bm.xml @@ -1,6 +1,6 @@ - + + + + + + + + Kamerûn + + + mɔné + + + Metric + + + + [a á â ǎ ā b c d e é ê ě ē ə {ə\u0301} {ə\u0302} {ə\u030C} {ə\u0304} ɛ {ɛ\u0301} {ɛ\u0302} {ɛ\u030C} {ɛ\u0304} g h i í î ǐ ī j k l m ḿ n ń ŋ o ó ô ǒ ō ɔ {ɔ\u0301} {ɔ\u0302} {ɔ\u030C} {ɔ\u0304} p s t u ú û ǔ ū w y z ʼ] + [f q r v x] + [A B C D E Ə Ɛ G H I J K L M N Ŋ O Ɔ P S T U W Y Z] + [, ; \: ! ? . ' "] + {0}… + …{0} + {0}…{1} + + + + + + + + + + + mwɛ̌ + + + ngɔn + + + sɔ̂ndé + + + mbwɛ + súúbɛ + chǎn ábe éʼtómé + chii + chǎn ábe éʼhúɛʼ + + + háwa + + + menúte + + + + + GMT+1 + + + + + + . + , + % + + + - + + + + + + + #,##0.### + + + + + + + #,##0% + + + + + + + ¤ #,##0.00 + + + + + + Frânke CFA + FCFA + + + + + + {0}, {1} + {0}, {1} + {0} {1} + {0}, {1} + + + diff --git a/make/data/cldr/common/main/bss_CM.xml b/make/data/cldr/common/main/bss_CM.xml new file mode 100644 index 00000000000..be5edffb7d1 --- /dev/null +++ b/make/data/cldr/common/main/bss_CM.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/byn.xml b/make/data/cldr/common/main/byn.xml new file mode 100644 index 00000000000..eae47fb6c2b --- /dev/null +++ b/make/data/cldr/common/main/byn.xml @@ -0,0 +1,527 @@ + + + + + + + + + + + አፋርኛ + አብሐዚኛ + አፍሪቃንስኛ + አማርኛ + ዐርቢኛ + አሳሜዛዊ + አያማርኛ + አዜርባይጃንኛ + ባስኪርኛ + ቤላራሻኛ + ቡልጋሪኛ + ቢስላምኛ + በንጋሊኛ + ትበትንኛ + ብሬቶንኛ + ብሊን + ካታላንኛ + ኮርሲካኛ + ቼክኛ + ወልሽ + ዴኒሽ + ጀርመን + ድዞንግኻኛ + ግሪክኛ + እንግሊዝኛ + ኤስፐራንቶ + ስፓኒሽ + ኤስቶኒአን + ባስክኛ + ፐርሲያኛ + ፊኒሽ + ፊጂኛ + ፋሮኛ + ፈረንሳይኛ + ፍሪስኛ + አይሪሽ + እስኮትስ ጌልክኛ + ግዕዝኛ + ጋለጋኛ + ጓራኒኛ + ጉጃርቲኛ + ሃውሳኛ + ዕብራስጥ + ሐንድኛ + ክሮሽያንኛ + ሀንጋሪኛ + አርመናዊ + ኢንቴርሊንጓ + እንዶኒሲኛ + እንተርሊንግወ + እኑፒያቅኛ + አይስላንድኛ + ጣሊያንኛ + እኑክቲቱትኛ + ጃፓንኛ + ጃቫንኛ + ጊዮርጊያን + ካዛክኛ + ካላሊሱትኛ + ክመርኛ + ካናዳኛ + ኮሪያኛ + ካሽሚርኛ + ኩርድሽኛ + ኪርጊዝኛ + ላቲንኛ + ሊንጋላኛ + ላውስኛ + ሊቱአኒያን + ላትቪያን + ማላጋስኛ + ማዮሪኛ + ማከዶኒኛ + ማላያላምኛ + ሞንጎላዊኛ + ማራዚኛ + ማላይኛ + ማልቲስኛ + ቡርማኛ + ናኡሩ + ኔፓሊኛ + ደች + ኖርዌጂያን + ኦኪታንኛ + ኦሮምኛ + ኦሪያኛ + ፓንጃቢኛ + ፖሊሽ + ፑሽቶኛ + ፖርቱጋሊኛ + ኵቿኛ + ሮማንስ + ሩንዲኛ + ሮማኒያን + ሞልዳቫዊና + ራሽኛ + ኪንያርዋንድኛ + ሳንስክሪትኛ + ሲንድሂኛ + ሳንጎኛ + ስንሃልኛ + ሲዳምኛ + ስሎቫክኛ + ስሎቪኛ + ሳሞአኛ + ሾናኛ + ሱማልኛ + ልቤኒኛ + ሰርቢኛ + ስዋቲኛ + ሶዞኛ + ሱዳንኛ + ስዊድንኛ + ስዋሂሊኛ + ታሚልኛ + ተሉጉኛ + ታጂኪኛ + ታይኛ + ትግርኛ + ትግረ + ቱርክመንኛ + ታጋሎገኛ + ጽዋናዊኛ + ቶንጋ + ቱርክኛ + ጾንጋኛ + ታታርኛ + ትዊኛ + ኡዊግሁርኛ + ዩክረኒኛ + ኡርዱኛ + ኡዝበክኛ + ቪትናምኛ + ቮላፑክኛ + ዎሎፍኛ + ዞሳኛ + ይዲሻዊኛ + ዮሩባዊኛ + ዡዋንግኛ + ቻይንኛ + ዙሉኛ + + + + + + አንዶራ + የተባበሩት አረብ ኤምሬትስ + አልባኒያ + አርሜኒያ + አርጀንቲና + ኦስትሪያ + አውስትሬሊያ + አዘርባጃን + ቦስኒያ እና ሄርዞጎቪኒያ + ባርቤዶስ + ቤልጄም + ቡልጌሪያ + ባህሬን + ቤርሙዳ + ቦሊቪያ + ብራዚል + ቡህታን + ቤላሩስ + ቤሊዘ + ኮንጎ + የመካከለኛው አፍሪካ ሪፐብሊክ + ስዊዘርላንድ + ቺሊ + ካሜሩን + ቻይና + ኮሎምቢያ + ኬፕ ቬርዴ + ሳይፕረስ + ቼክ ሪፑብሊክ + ጀርመን + ዴንማርክ + ዶሚኒካ + ዶሚኒክ ሪፑብሊክ + አልጄሪያ + ኢኳዶር + ኤስቶኒያ + ግብጽ + ምዕራባዊ ሳህራ + ኤርትራ + ስፔን + ኢትዮጵያ + ፊንላንድ + ፊጂ + ሚክሮኔዢያ + ፈረንሳይ + እንግሊዝ + ጆርጂያ + የፈረንሳይ ጉዊአና + ጋምቢያ + ጊኒ + ኢኳቶሪያል ጊኒ + ግሪክ + ቢሳዎ + ጉያና + ሆንግ ኮንግ + ክሮኤሽያ + ሀይቲ + ሀንጋሪ + ኢንዶኔዢያ + አየርላንድ + እስራኤል + ህንድ + ኢራቅ + አይስላንድ + ጣሊያን + ጃማይካ + ጆርዳን + ጃፓን + ካምቦዲያ + ኮሞሮስ + ሰሜን ኮሪያ + ደቡብ ኮሪያ + ክዌት + ሊባኖስ + ሊቱዌኒያ + ላትቪያ + ሊቢያ + ሞሮኮ + ሞልዶቫ + ማከዶኒያ + ሞንጎሊያ + ማካዎ + ሞሪቴኒያ + ማልታ + ማሩሸስ + ሜክሲኮ + ማሌዢያ + ናሚቢያ + ኒው ካሌዶኒያ + ናይጄሪያ + ኔዘርላንድ + ኖርዌ + ኔፓል + ኒው ዚላንድ + ፔሩ + የፈረንሳይ ፖሊኔዢያ + ፓፑዋ ኒው ጊኒ + ፖላንድ + ፖርታ ሪኮ + ሮሜኒያ + ራሺያ + ሳውድአረቢያ + ሱዳን + ስዊድን + ሲንጋፖር + ስሎቬኒያ + ስሎቫኪያ + ሴኔጋል + ሱማሌ + ሲሪያ + ቻድ + የፈረንሳይ ደቡባዊ ግዛቶች + ታይላንድ + ታጃኪስታን + ምስራቅ ቲሞር + ቱኒዚያ + ቱርክ + ትሪኒዳድ እና ቶባጎ + ታንዛኒያ + ዩጋንዳ + አሜሪካ + ዩዝበኪስታን + ቬንዙዌላ + የእንግሊዝ ድንግል ደሴቶች + የአሜሪካ ቨርጂን ደሴቶች + የመን + ደቡብ አፍሪካ + ዛምቢያ + + + + [\u135F ሀ ሁ ሂ ሃ ሄ ህ ሆ ለ ሉ ሊ ላ ሌ ል ሎ ሏ ሐ ሑ ሒ ሓ ሔ ሕ ሖ ሗ መ ሙ ሚ ማ ሜ ም ሞ ሟ ረ ሩ ሪ ራ ሬ ር ሮ ሯ ሰ ሱ ሲ ሳ ሴ ስ ሶ ሷ ሸ ሹ ሺ ሻ ሼ ሽ ሾ ሿ ቀ ቁ ቂ ቃ ቄ ቅ ቆ ቈ ቊ ቋ ቌ ቍ ቐ ቑ ቒ ቓ ቔ ቕ ቖ ቘ ቚ ቛ ቜ ቝ በ ቡ ቢ ባ ቤ ብ ቦ ቧ ቨ ቩ ቪ ቫ ቬ ቭ ቮ ቯ ተ ቱ ቲ ታ ቴ ት ቶ ቷ ቸ ቹ ቺ ቻ ቼ ች ቾ ቿ ኀ ኁ ኂ ኃ ኄ ኅ ኆ ኈ ኊ ኋ ኌ ኍ ነ ኑ ኒ ና ኔ ን ኖ ኗ ኘ ኙ ኚ ኛ ኜ ኝ ኞ ኟ አ ኡ ኢ ኣ ኤ እ ኦ ኧ ከ ኩ ኪ ካ ኬ ክ ኮ ኰ ኲ ኳ ኴ ኵ ኸ ኹ ኺ ኻ ኼ ኽ ኾ ዀ ዂ ዃ ዄ ዅ ወ ዉ ዊ ዋ ዌ ው ዎ ዐ ዑ ዒ ዓ ዔ ዕ ዖ ዘ ዙ ዚ ዛ ዜ ዝ ዞ ዟ ዠ ዡ ዢ ዣ ዤ ዥ ዦ ዧ የ ዩ ዪ ያ ዬ ይ ዮ ደ ዱ ዲ ዳ ዴ ድ ዶ ዷ ጀ ጁ ጂ ጃ ጄ ጅ ጆ ጇ ገ ጉ ጊ ጋ ጌ ግ ጎ ጐ ጒ ጓ ጔ ጕ ጘ ጙ ጚ ጛ ጜ ጝ ጞ ጟ ⶓ ⶔ ⶕ ⶖ ጠ ጡ ጢ ጣ ጤ ጥ ጦ ጧ ጨ ጩ ጪ ጫ ጬ ጭ ጮ ጯ ጸ ጹ ጺ ጻ ጼ ጽ ጾ ጿ ፈ ፉ ፊ ፋ ፌ ፍ ፎ ፏ ፐ ፑ ፒ ፓ ፔ ፕ ፖ ፗ] + [᎐ ᎑ ᎒ ᎓ ᎔ ᎕ ᎖ ᎗ ᎘ ᎙ ሇ ⶀ ᎀ ᎁ ᎂ ᎃ ⶁ ሠ ሡ ሢ ሣ ሤ ሥ ሦ ሧ ⶂ ⶃ ⶄ ቇ ᎄ ᎅ ᎆ ᎇ ⶅ ⶆ ⶇ ኇ ⶈ ⶉ ⶊ ኯ ዏ ⶋ ዯ ⶌ ዸ ዹ ዺ ዻ ዼ ዽ ዾ ዿ ⶍ ⶎ ጏ ⶏ ⶐ ⶑ ፇ ᎈ ᎉ ᎊ ᎋ ᎌ ᎍ ᎎ ᎏ ⶒ ፘ ፙ ፚ ⶠ ⶡ ⶢ ⶣ ⶤ ⶥ ⶦ ⶨ ⶩ ⶪ ⶫ ⶬ ⶭ ⶮ ⶰ ⶱ ⶲ ⶳ ⶴ ⶵ ⶶ ⶸ ⶹ ⶺ ⶻ ⶼ ⶽ ⶾ ⷀ ⷁ ⷂ ⷃ ⷄ ⷅ ⷆ ⷈ ⷉ ⷊ ⷋ ⷌ ⷍ ⷎ ⷐ ⷑ ⷒ ⷓ ⷔ ⷕ ⷖ ⷘ ⷙ ⷚ ⷛ ⷜ ⷝ ⷞ] + [ሀ ለ ሐ መ ረ ሰ ሸ ቀ ቈ ቐ ቘ በ ቨ ተ ቸ ኀ ኈ ነ ኘ አ ከ ኰ ኸ ዀ ወ ዐ ዘ ዠ የ ደ ጀ ገ ጐ ጘ ⶓ ⶔ ⶕ ⶖ ጠ ጨ ጸ ፈ ፐ] + + + + + + + + EEEE፡ dd MMMM ግርጋ y G + GyMMMMEEEEdd + + + + + dd MMMM y G + GyMMMMdd + + + + + dd-MMM-y G + GyMMMdd + + + + + dd/MM/yy GGGGG + GGGGGyyMMdd + + + + + + + + + ልደት + ካብኽ + ክብላ + ፋጅኺ + ክቢቅ + ም/ት + ኰር + ማርያ + ያኸኒ + መተሉ + ም/ም + ተሕሳ + + + ልደትሪ + ካብኽብቲ + ክብላ + ፋጅኺሪ + ክቢቅሪ + ምኪኤል ትጟኒሪ + ኰርኩ + ማርያም ትሪ + ያኸኒ መሳቅለሪ + መተሉ + ምኪኤል መሽወሪ + ተሕሳስሪ + + + + + + + + + + + + + + + + + + + + + + + ሰ/ቅ + ሰኑ + ሰሊጝ + ለጓ + ኣምድ + ኣርብ + ሰ/ሽ + + + ሰንበር ቅዳዅ + ሰኑ + ሰሊጝ + ለጓ ወሪ ለብዋ + ኣምድ + ኣርብ + ሰንበር ሽጓዅ + + + + + + + + + + + + + + + + + + ፋዱስ ጃብ + ፋዱስ ደምቢ + + + ፋዱስ ጃብ + ፋዱስ ደምቢ + + + + + + ይጅ + ኣድ + + + + + + EEEE፡ dd MMMM ግርጋ y G + GyMMMMEEEEdd + + + + + dd MMMM y + yMMMMdd + + + + + dd-MMM-y + yMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + latn + + latn + ethi + + + + + ¤#,##0.00 + + + + + + የብራዚል ሪል + + + የቻይና ዩአን ረንሚንቢ + + + Nfk + + + የኢትዮጵያ ብር + + + አውሮ + + + የእንግሊዝ ፓውንድ ስተርሊንግ + + + የሕንድ ሩፒ + + + የጃፓን የን + + + የራሻ ሩብል + + + የአሜሪካን ዶላር + + + + diff --git a/make/data/cldr/common/main/byn_ER.xml b/make/data/cldr/common/main/byn_ER.xml new file mode 100644 index 00000000000..508caef4825 --- /dev/null +++ b/make/data/cldr/common/main/byn_ER.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ca.xml b/make/data/cldr/common/main/ca.xml index dc3dbe1b39d..3168fcdcb9e 100644 --- a/make/data/cldr/common/main/ca.xml +++ b/make/data/cldr/common/main/ca.xml @@ -1,6 +1,6 @@ - + + + + + + + + Caddo + + + United States + + + US + + + + [a á à {aː} {áː} {àː} b {ch} {chʼ} d e é è {eː} {éː} {èː} h i í ì {iː} {íː} {ìː} k {kʼ} m n o ó ò {oː} {óː} {òː} p s t {tsʼ} {tʼ} u ú ù {uː} {úː} {ùː} w y ˀ ʼ] + [c f g j l q r v x z] + [A B {CH} D E H I K M N O P S {SH} T {TS} U W Y] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + + + + + + + EEEE, MMMM d, y G + GyMMMMEEEEd + + + + + MMMM d, y G + GyMMMMd + + + + + MMM d, y G + GyMMMd + + + + + M/d/y GGGGG + GGGGGyMd + + + + + + + + + Cháykáhday Haˀimay + Tsahkápbiˀ + Wánit + Háshnihtiˀtiˀ + Háshnih Haˀimay + Háshnihtsiˀ + Násˀahˀatsus + Dahósikah nish + Híisikah nish + Nípbaatiˀtiˀ + Nípbaa Haˀimay + Cháykáhdaytiˀtiˀ + + + + + + + Inikuˀ + Wísts’i hayashuh + Bít hayashuh + Dahó hayashuh + Hiwí hayashuh + Dissik’an hayashuh + Inikuˀtiˀtiˀ + + + + + + + EEEE, MMMM d, y + yMMMMEEEEd + + + + + MMMM d, y + yMMMMd + + + + + MMM d, y + yMMMd + + + + + M/d/yy + yyMd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + . + , + ; + % + + + - + E + + + NaN + + + + + ¤ 0K + ¤ 00K + ¤ 000K + ¤ 0M + ¤ 00M + ¤ 000M + ¤ 0G + ¤ 00G + ¤ 000G + ¤ 0T + ¤ 00T + ¤ 000T + + + + + + $ + + + + + + + Nish + Nish {0} + + + Wísts’iˀ inikuˀ + Wísts’iˀ inikuˀ {0} + + + Kaˀisch’áyˀah + {0} Kaˀisch’áyˀah + + + + diff --git a/make/data/cldr/common/main/cad_US.xml b/make/data/cldr/common/main/cad_US.xml new file mode 100644 index 00000000000..1ca1eff9b5a --- /dev/null +++ b/make/data/cldr/common/main/cad_US.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/cch.xml b/make/data/cldr/common/main/cch.xml new file mode 100644 index 00000000000..353e649ae35 --- /dev/null +++ b/make/data/cldr/common/main/cch.xml @@ -0,0 +1,180 @@ + + + + + + + + + + + Atsam + + + + [a {a\u0331} b c {ch} d {dy} e f g {g\u0331} {gb} {gw} {gy} h {hy} i j k ḵ {kp} {kw} l {ly} m n ṉ {ny} o p {ph} {py} r {ry} s {sh} t u v w {wh} y {y\u0331} z ʼ] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z ʼ] + + + + + + + + EEEE, G y MMMM dd + GyMMMMEEEEdd + + + + + G y MMMM d + GyMMMMd + + + + + G y MMM d + GyMMMd + + + + + GGGGG yy/MM/dd + GGGGGyyMMdd + + + + + + + + + Dyon + Baa + Atat + Anas + Atyo + Achi + Atar + Awur + Shad + Shak + Naba + Nata + + + Pen Dyon + Pen Baʼa + Pen Atat + Pen Anas + Pen Atyon + Pen Achirim + Pen Atariba + Pen Awurr + Pen Shadon + Pen Shakur + Pen Kur Naba + Pen Kur Natat + + + + + + + Yok + Tung + Gitung + Tsan + Nas + Nat + Chir + + + Wai Yoka Bawai + Wai Tunga + Toki Gitung + Tsam Kasuwa + Wai Na Nas + Wai Na Tiyon + Wai Na Chirim + + + + + + Gabanin Miladi + Miladi + + + GM + M + + + + + + EEEE, y MMMM dd + yMMMMEEEEdd + + + + + y MMMM d + yMMMMd + + + + + y MMM d + yMMMd + + + + + yy/MM/dd + yyMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + + + + Aman + + + + + diff --git a/make/data/cldr/common/main/cch_NG.xml b/make/data/cldr/common/main/cch_NG.xml new file mode 100644 index 00000000000..9f11c337973 --- /dev/null +++ b/make/data/cldr/common/main/cch_NG.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ccp.xml b/make/data/cldr/common/main/ccp.xml index dc39fe4d46d..29abe237696 100644 --- a/make/data/cldr/common/main/ccp.xml +++ b/make/data/cldr/common/main/ccp.xml @@ -1,6 +1,6 @@ - + + + + + + + + Chahta + + + + + + United States + + + + [a {a\u0331} b {ch} e f h {hl} i {i\u0331} k l m n o {o\u0331} p s {sh} t u v ʋ w y] + [c č d g j q r š x z] + [A {A\u0331} B {CH} E F H {HL} I {I\u0331} K L M N O {O\u0331} P S {SH} T U V Ʋ W Z] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + diff --git a/make/data/cldr/common/main/cho_US.xml b/make/data/cldr/common/main/cho_US.xml new file mode 100644 index 00000000000..7d860d2bed7 --- /dev/null +++ b/make/data/cldr/common/main/cho_US.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/chr.xml b/make/data/cldr/common/main/chr.xml index 159a91cd495..1d6274a3c4f 100644 --- a/make/data/cldr/common/main/chr.xml +++ b/make/data/cldr/common/main/chr.xml @@ -1,6 +1,6 @@ - + + + + + + + + Chikashshanompaʼ + + + United States + + + US + + + + [a {a\u0331} b {ch} f h i {i\u0331} k l {lh} m n {ng} o {o\u0331} p s {sh} t w y ʼ] + [á {á\u0331} c d e g í {í\u0331} j ó {ó\u0331} q r u v x z] + [A {A\u0331} B {CH} D E F H I {I\u0331} K L {LH} M N O {O\u0331} P S {SH} T V W Y] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + + + + + + + EEEE, MMMM d, y G + GyMMMMEEEEd + + + + + MMMM d, y G + GyMMMMd + + + + + MMM d, y G + GyMMMd + + + + + M/d/y GGGGG + GGGGGyMd + + + + + + + + + Hashiʼ Ammoʼnaʼ + Hashiʼ Atokloʼ + Hashiʼ Atochchíʼnaʼ + Iiplal + Mih + Choon + Choola + Akaas + Siptimpaʼ + Aaktopaʼ + Nofimpaʼ + Tiisimpaʼ + + + + + + + Nittak Holloʼ + Mantiʼ + Chostiʼ + Winstiʼ + Soistiʼ + Nannalhchifaʼ Nittak + Nittak Holloʼ Nakfish + + + + + + + EEEE, MMMM d, y + yMMMMEEEEd + + + + + MMMM d, y + yMMMMd + + + + + MMM d, y + yMMMd + + + + + M/d/yy + yyMd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + . + , + ; + % + + + - + E + + + NaN + + + + + ¤ 0K + ¤ 00K + ¤ 000K + ¤ 0M + ¤ 00M + ¤ 000M + ¤ 0G + ¤ 00G + ¤ 000G + ¤ 0T + ¤ 00T + ¤ 000T + + + + + + $ + + + + + + + Afammi + Afammi {0} + + + Hashiʼ alhpisaʼ + Hashiʼ alhpisaʼ {0} + + + Nittak hollo ittataklaʼ + Nittak hollo ittataklaʼ {0} + + + Nittak + {0} Nittak + + + Hashiʼ kanalli chaffaʼ + Hashiʼ kanalli chaffaʼ {0} + + + Hashiʼ kanallloshiʼ + Hashiʼ kanallloshiʼ {0} + + + + diff --git a/make/data/cldr/common/main/cic_US.xml b/make/data/cldr/common/main/cic_US.xml new file mode 100644 index 00000000000..3eaf1b33792 --- /dev/null +++ b/make/data/cldr/common/main/cic_US.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ckb.xml b/make/data/cldr/common/main/ckb.xml index 87f70f1b56e..59c0ecee42f 100644 --- a/make/data/cldr/common/main/ckb.xml +++ b/make/data/cldr/common/main/ckb.xml @@ -1,6 +1,6 @@ - + + + + + + + + {0} : {1} + + + arabu + arabu mudernu + corsu + tedescu + tedescu austriacu + tedescu sguizzeru + inglese + inglese australianu + inglese canadianu + inglese americanu + inglese (S.U.) + spagnolu + francese + francese canadianu + francese sguizzeru + talianu + giappunese + cureanu + neerlandese + fiammingu + pulunese + purtughese + purtughese brasilianu + purtughese europeanu + russiu + tailandese + turcu + lingua scunnisciuta + chinese + chinese mandarinu + chinese simplificatu + mandarinu simplificatu + chinese tradiziunale + mandarinu tradiziunale + + + + + + + + + + + Mondu + Africa + Oceania + Americhe + Asia + Europa + America latina + Antarticu + Austria + Australia + Belgica + Canada + Svizzera + China + Costa Rica + Cuba + Republica cecca + Alemagna + Danimarca + Republica Duminicana + Spagna + Unione europea + Finlandia + Francia + Reame Unitu + R.U. + Grecia + Guatemala + Ungheria + Irlanda + Israele + India + Iran + Islanda + Italia + Giappone + Libanu + Santa Lucia + San Martinu + Mungulia + Martinica + Messicu + Malesia + Nicaragua + Nederlanda + Nurvegia + Nova Zelanda + Panama + Perù + Filippine + Palestina + Portugallu + Serbia + Russia + Sluvacchia + Siria + Turchia + Nazioni Unite + Stati Uniti + S.U. + regione scunnisciuta + + + calendariu gregurianu + calendariu ISO 8601 + ordine di classificazione standardizatu + cifri occidentale + + + metricu + imperiale + americanu + + + lingua : {0} + scrittura : {0} + regione : {0} + + + + + left-to-right + top-to-bottom + + + + [a à b c {chj} d e è f g {ghj} h i ì ï j l m n o ò p q r s {sc} {sg} t u ù ü v z] + [â æ ç é ê ë î k ñ ô œ ú û w x y ÿ] + [A B C {CHJ} D E F G {GHJ} H I J L M N O P Q R S {SC} {SG} T U V Z] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + {0}… + + + « + » + « + » + + + + + + + + EEEE d MMMM 'di' 'u' y G + GyMMMMEEEEd + + + + + d MMMM 'di' 'u' y G + GyMMMMd + + + + + d MMM y G + GyMMMd + + + + + dd/MM/y GGGGG + GGGGGyMMdd + + + + + + + {1} 'à' {0} + + + + + {1} 'à' {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + + + + + ghj. + fer. + mar. + apr. + mag. + ghju. + lug. + aos. + sit. + ott. + nuv. + dic. + + + G + F + M + A + M + G + L + A + S + O + N + D + + + di ghjennaghju + di ferraghju + di marzu + d’aprile + di maghju + di ghjugnu + di lugliu + d’aostu + di sittembre + d’ottobre + di nuvembre + di dicembre + + + + + ghj. + fer. + mar. + apr. + mag. + ghju. + lug. + aos. + sit. + ott. + nuv. + dic. + + + G + F + M + A + M + G + L + A + S + O + N + D + + + ghjennaghju + ferraghju + marzu + aprile + maghju + ghjugnu + lugliu + aostu + sittembre + ottobre + nuvembre + dicembre + + + + + + + dum. + lun. + mar. + mer. + ghj. + ven. + sab. + + + D + L + M + M + G + V + S + + + du + lu + ma + me + gh + ve + sa + + + dumenica + luni + marti + mercuri + ghjovi + venneri + sabbatu + + + + + dum. + lun. + mar. + mer. + ghj. + ven. + sab. + + + D + L + M + M + G + V + S + + + du + lu + ma + me + gh + ve + sa + + + dumenica + luni + marti + mercuri + ghjovi + venneri + sabbatu + + + + + + + T1 + T2 + T3 + T4 + + + 1 + 2 + 3 + 4 + + + 1u trimestru + 2u trimestru + 3u trimestru + 4u trimestru + + + + + T1 + T2 + T3 + T4 + + + 1u trimestru + 2u trimestru + 3u trimestru + 4u trimestru + + + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + + nanzu à Cristu + nanzu l’era cumuna + dopu à Cristu + di l’era cumuna + + + nz à C. + NEC + dp à C. + EC + + + nz à C. + NEC + dp à C. + EC + + + + + + EEEE d MMMM 'di' 'u' y + yMMMMEEEEd + + + + + d MMMM 'di' 'u' y + yMMMMd + + + + + d MMM y + yMMMd + + + + + dd/MM/y + yMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + + {1} 'à' {0} + + + {1} 'à' {0} + + + + + {1} {0} + + + {1} {0} + + + + h B + h:mm B + h:mm:ss B + d + E + E h:mm B + E h:mm:ss B + E d + E h:mm a + E HH:mm + E h:mm:ss a + E HH:mm:ss + y G + dd/MM/y G + MMM y G + d MMM y G + E d MMM y G + h a + HH + h:mm a + HH:mm + h:mm:ss a + HH:mm:ss + h:mm:ss a v + HH:mm:ss v + h:mm a v + HH:mm v + L + dd/MM + E dd/MM + LLL + d MMM + E d MMM + d MMMM + 'settimana' W MMMM + mm:ss + y + MM/y + dd/MM/y + E dd/MM/y + MMM y + d MMM y + E d MMM y + LLLL 'di' 'u' y + QQQ y + QQQQ 'di' 'u' y + 'settimana' w 'di' 'u' Y + + + {0} {1} + + + {0} – {1} + + h a – h a + h – h a + + + HH – HH + + + h:mm a – h:mm a + h:mm – h:mm a + h:mm – h:mm a + + + HH:mm – HH:mm + HH:mm – HH:mm + + + h:mm a – h:mm a v + h:mm – h:mm a v + h:mm – h:mm a v + + + HH:mm – HH:mm v + HH:mm – HH:mm v + + + h a – h a v + h – h a v + + + HH – HH v + + + MM–MM + + + dd/MM – dd/MM + dd/MM – dd/MM + + + E dd/MM – dd/MM + E dd/MM – dd/MM + + + MMM – MMM + + + y–y + + + MMM–MMM y + + + d – d MMM y + d MMM y – d MMM y + + + LLLL–LLLL y + LLLL y – LLLL y + + + + + + + + era + + + annu + l’annu scorsu + quist’annu + l’annu chì vene + + + an. + l’annu scorsu + quist’annu + l’annu chì vene + + + a. + l’annu scorsu + quist’annu + l’annu chì vene + + + trimestru + + + trim. + + + tr. + + + mese + u mese scorsu + stu mese + u mese chì vene + + + m. + u mese scorsu + stu mese + u mese chì vene + + + m. + u mese scorsu + stu mese + u mese chì vene + + + settimana + a settimana scorsa + sta settimana + a settimana chì vene + a settimana di u {0} + + + sett. + a settimana scorsa + sta settimana + a settimana chì vene + a settimana di u {0} + + + st. + a settimana scorsa + sta settimana + a settimana chì vene + a settimana di u {0} + + + ghjornu + eri + oghje + dumane + + + ghj. + eri + oghje + dumane + + + g + eri + oghje + dumane + + + ghjornu di a settimana + + + ora + + + ora + + + o. + + + minutu + + + min. + + + m. + + + seconda + + + sec. + + + s. + + + fusu orariu + + + + +HH:mm;-HH:mm + UTC{0} + UTC + ora : {0} + {0} (ora d’estate) + {0} (ora usuale) + + + Tempu universale cuurdinatu + + + + + ora mediana di Greenwich + + + + + + latn + + latn + + 1 + + , +   + % + + + - + + E + × + + + NaN + + + + + #,##0.### + + + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #E0 + + + + + + + #,##0 % + + + + + + + #,##0 % + + + + + + + #,##0.00 ¤ + #,##0.00 + + + #,##0.00 ¤;(#,##0.00) ¤ + #,##0.00;(#,##0.00) + + + + + + + #,##0.00 ¤ + + + #,##0.00 ¤;(#,##0.00) ¤ + #,##0.00;(#,##0.00) + + + {0} {1} + + + + yuan chinese + + + euro + EUR + + + libra sterlina + libre sterline + £GB + + + rupia indiana + rupie indiane + INR + + + yen giappunese + + + rublu russiu + rubli russii + + + $US + + + muneta scunnisciuta + munete scunnisciute + + + + ≈{0} + ≥{0} + ≤{0} + {0}–{1} + + + + + {0} è {1} + {0} è {1} + + + + + iè:i + innò:n + + + diff --git a/make/data/cldr/common/main/co_FR.xml b/make/data/cldr/common/main/co_FR.xml new file mode 100644 index 00000000000..b70e6839dce --- /dev/null +++ b/make/data/cldr/common/main/co_FR.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/cs.xml b/make/data/cldr/common/main/cs.xml index 9b57f54591b..5355745f299 100644 --- a/make/data/cldr/common/main/cs.xml +++ b/make/data/cldr/common/main/cs.xml @@ -1,6 +1,6 @@ - + + + + + + + + а҆бха́зскїй + а҆ра́вскїй + а҆зербайджа́нскїй + бѣлорꙋ́сскїй + бо́лгарскїй + церковнослове́нскїй + нѣме́цкїй + а҆ѵстрі́йскїй нѣме́цкїй + є҆лветі́йскїй нѣме́цкїй + є҆́ллинскїй + а҆нглі́йскїй + а҆ѵстралі́йскїй а҆нглі́йскїй + кана́дскїй а҆нглі́йскїй + брїта́нскїй а҆нглі́йскїй + а҆нглі́йскїй (вели́каѧ брїта́нїа) + а҆мерїка́нскїй а҆нглі́йскїй + а҆нглі́йскїй (асд) + і҆спа́нскїй + латїноамерїка́нскїй і҆спа́нскїй + є҆ѵрѡпе́йскїй і҆спа́нскїй + і҆спанскїй (ме́ѯїка) + є҆сто́нскїй + фі́нскїй + францꙋ́зскїй + кана́дскїй францꙋ́зскїй + є҆лветі́йскїй францꙋ́зскїй + є҆вре́йскїй + а҆рме́нскїй + і҆талїа́нскїй + ꙗ҆пѡ́нскїй + і҆́верскїй + каза́хскїй + латі́нскїй + лїто́вскїй + латві́йскїй + портога́льскїй + бразі́льскїй портога́льскїй + є҆ѵрѡпе́йскїй портога́льскїй + дакорꙋмы́нскїй + молда́вскїй + рꙋ́сскїй + се́рбскїй + ᲂу҆краи́нскїй + невѣ́домый ѧ҆зы́къ + хи́нскїй + ᲂу҆проще́нный хи́нскїй + традїцїо́нный хи́нскїй + + + + + + + + + + + + + А҆ѵстралі́ѧ + бразі́лїа + бѣ́лаѧ рꙋ́сь + Кана́да + хи́нскаѧ страна̀ + герма́нїа + Дані́ѧ + га́ллїа + Вели́каѧ брїта́нїа + і҆́ндїа + і҆та́лїа + ꙗ҆пѡ́нїа + кирги́зїа + казахста́нъ + Ме́ѯїко + рѡссі́а + ᲂу҆краи́на + а҆мерїка̑нскїѧ соединє́нныѧ держа̑вы + невѣ́домаѧ страна̀ + + + григорїа́нскїй мѣсѧцесло́въ + канѡни́ческое ᲂу҆порѧ́доченїе + а҆раві́йстїи числові́и зна́цы + + + метрі́ческаѧ + а҆нглі́йскаѧ + а҆мерїка́нскаѧ + + + + + left-to-right + top-to-bottom + + + + [\u0487\uA67D \u0483 ҂ а б \u2DE0 в \u2DE1 г \u2DE2 д \u2DE3 е є ж \u2DE4 \u2DE5 ѕ з ꙁ и й і ї к \u2DE6 л \u2DE7 м \u2DE8 н \u2DE9 ѻ о \u2DEA п р \u2DEC с \u2DED т у ꙋ ф х \u2DEF ѡ ѿ ꙍ ѽ ц ч \u2DF1 ш щ ⸯ ꙿ ъ ы ь ѣ ю ѫ ꙗ ѧ ѯ ѱ ѳ ѵ ѷ \u2DF4] + [\u0488\u0489\u200C\u200D\uA670\uA671\uA672\uFE2F \u0484 \uFE2E \uA66F \u2DF6 ꙣ \u2DF7 \uA674 ꙃ ꙅ \uA675 \uA676 ꙇ ꙉ \u2DF8 ꙥ ꙧ ҥ ꙩꙫꙭꙮꚙꚛ \u2DEB ҁ \u2DF5 \u2DEE \uA677 \u2DF9 \uA69E \uA67B \u2DF0 ꙡ џ \u2DF2 \u2DF3 ꙏ \uA678 ꙑ \uA679 \uA67A \u2DFA ꙓ \u2DFB ꙕ \u2DFC ѥ \uA69F \u2DFD ꙙ \u2DFE ꙛ ѩ ꙝ ѭ \u2DFF ꙟ] + [А Б В Г Д Є Ж Ѕ З И І К Л М Н Ѻ О П Р С Т Ꙋ Ф Х Ѡ Ѿ Ц Ч Ш Щ Ъ Ы Ь Ѣ Ю Ѫ Ꙗ Ѧ Ѯ Ѱ Ѳ Ѵ] + [  \- ‑ , % ‰ + 0 1 2 3 4 5 6 7 8 9] + [_ \- ‐ ‑ – — ⹃ , ; \: ! ? . ( ) ꙳ / ꙾] + + + « + » + + + + + + + + + + G y MMMM d, EEEE + GyMMMMEEEEd + + + + + G y MMMM d + GyMMMMd + + + + + G y MMM d + GyMMMd + + + + + GGGGG y-MM-dd + GGGGGyMMdd + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + {0} – {1} + + + + + + + + і҆аⷩ҇ + феⷡ҇ + маⷬ҇ + а҆пⷬ҇ + маꙵ + і҆ꙋⷩ҇ + і҆ꙋⷧ҇ + а҆́ѵⷢ҇ + сеⷫ҇ + ѻ҆кⷮ + ноеⷨ + деⷦ҇ + + + І҆ + Ф + М + А҆ + М + І҆ + І҆ + А҆ + С + Ѻ҆ + Н + Д + + + і҆аннꙋа́рїа + феврꙋа́рїа + ма́рта + а҆прі́ллїа + ма́їа + і҆ꙋ́нїа + і҆ꙋ́лїа + а҆́ѵгꙋста + септе́мврїа + ѻ҆ктѡ́врїа + ное́мврїа + деке́мврїа + + + + + і҆аⷩ҇ + феⷡ҇ + маⷬ҇ + а҆пⷬ҇ + маꙵ + і҆ꙋⷩ҇ + і҆ꙋⷧ҇ + а҆́ѵⷢ҇ + сеⷫ҇ + ѻ҆кⷮ + ноеⷨ + деⷦ҇ + + + І҆ + Ф + М + А҆ + М + І҆ + І҆ + А҆ + С + Ѻ҆ + Н + Д + + + і҆аннꙋа́рїй + феврꙋа́рїй + ма́ртъ + а҆прі́ллїй + ма́їй + і҆ꙋ́нїй + і҆ꙋ́лїй + а҆́ѵгꙋстъ + септе́мврїй + ѻ҆ктѡ́врїй + ное́мврїй + деке́мврїй + + + + + + + ндⷧ҇ѧ + пнⷣе + втоⷬ҇ + срⷣе + чеⷦ҇ + пѧⷦ҇ + сꙋⷠ҇ + + + Н + П + В + С + Ч + П + С + + + ндⷧ҇ѧ + пнⷣе + втоⷬ҇ + срⷣе + чеⷦ҇ + пѧⷦ҇ + сꙋⷠ҇ + + + недѣ́лѧ + понедѣ́льникъ + вто́рникъ + среда̀ + четверто́къ + пѧто́къ + сꙋббѡ́та + + + + + ндⷧ҇ѧ + пнⷣе + втоⷬ҇ + срⷣе + чеⷦ҇ + пѧⷦ҇ + сꙋⷠ҇ + + + Н + П + В + С + Ч + П + С + + + ндⷧ҇ѧ + пнⷣе + втоⷬ҇ + срⷣе + чеⷦ҇ + пѧⷦ҇ + сꙋⷠ҇ + + + недѣ́лѧ + понедѣ́льникъ + вто́рникъ + среда̀ + четверто́къ + пѧто́къ + сꙋббѡ́та + + + + + + + а҃_ѧ че́тверть + в҃_ѧ че́тверть + г҃_ѧ че́тверть + д҃_ѧ че́тверть + + + а҃ + в҃ + г҃ + д҃ + + + а҃_ѧ че́тверть + в҃_ѧ че́тверть + г҃_ѧ че́тверть + д҃_ѧ че́тверть + + + + + а҃ + в҃ + г҃ + д҃ + + + а҃ + в҃ + г҃ + д҃ + + + а҃_ѧ че́тверть + в҃_ѧ че́тверть + г҃_ѧ че́тверть + д҃_ѧ че́тверть + + + + + + + ДП + ПП + + + ДП + ПП + + + + + ДП + ПП + + + ДП + ПП + + + ДП + ПП + + + + + + пре́дъ р. х. + по р. х. + + + пре́дъ р. х. + пре́дъ р. х. + ѿ р. х. + ѿ р. х. + + + + + + EEEE, d MMMM 'л'. y. + yMMMMEEEEd + + + + + y MMMM d + yMMMMd + + + + + y MMM d + yMMMd + + + + + y.MM.dd + yMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + {0} – {1} + + + + + + + вѣ́къ + + + лѣ́то + + + л. + + + л. + + + че́тверть + + + чеⷡ҇ + + + чеⷡ҇ + + + мѣ́сѧцъ + + + мцⷭ҇ъ + + + мцⷭ҇ъ + + + седми́ца + + + сеⷣ + + + сеⷣ + + + де́нь + вчера̀ + дне́сь + наꙋ́трїе + + + деⷩ҇ + + + деⷩ҇ + + + де́нь седми́цы + + + ДП/ПП + + + ча́съ + + + чаⷭ҇ + + + чаⷭ҇ + + + минꙋ́та + + + миⷩ҇ + + + миⷩ҇ + + + секꙋ́нда + + + сеⷦ҇ + + + сеⷦ҇ + + + по́ѧсъ часовѡ́мъ + + + + +HH:mm;-HH:mm + GMT{0} + GMT + {0} (вре́мѧ) + {0} (лѣ́тнее вре́мѧ) + {0} (зи́мнее вре́мѧ) + {1} ({0}) + + + всемі́рное сѷгхронїзи́рованное вре́мѧ + + + + невѣ́домый гра́дъ + + + ми́нскъ + + + бишке́къ + + + а҆кта́ꙋ + + + ᲂу҆ра́льскъ + + + а҆ктю́бинскъ + + + кызылѻрда̀ + + + а҆лматы̀ + + + калинингра́дъ + + + москва̀ + + + волгогра́дъ + + + сама́ра + + + є҆катерїнбꙋ́ргъ + + + ѻ҆́мскъ + + + новосиби́рскъ + + + новокꙋзне́цкъ + + + красноѧ́рскъ + + + и҆ркꙋ́тскъ + + + чита̀ + + + ꙗ҆кꙋ́тскъ + + + владивосто́къ + + + ха́ндыга + + + сахали́нъ + + + ᲂу҆́сть_не́ра + + + магада́нъ + + + среднеколы́мскъ + + + петропа́ѵловскъ_камча́тскїй + + + а҆на́дырь + + + ᲂу҆́жградъ + + + кі́евъ + + + сѷмферꙋ́поль + + + запра́жїе + + + + среднеамерїка́нское вре́мѧ + среднеамерїка́нское зи́мнее вре́мѧ + среднеамерїка́нское лѣ́тнее вре́мѧ + + + + + восточноамерїка́нское вре́мѧ + восточноамерїка́нское зи́мнее вре́мѧ + восточноамерїка́нское лѣ́тнее вре́мѧ + + + + + а҆мерїка́нское наго́рнее вре́мѧ + а҆мерїка́нское наго́рнее зи́мнее вре́мѧ + а҆мерїка́нское наго́рнее лѣ́тнее вре́мѧ + + + + + тихоѻкеа́нское вре́мѧ + тихоѻкеа́нское зи́мнее вре́мѧ + тихоѻкеа́нское лѣ́тнее вре́мѧ + + + + + а҆тланті́ческое вре́мѧ + а҆тланті́ческое зи́мнее вре́мѧ + а҆тланті́ческое лѣ́тнее вре́мѧ + + + + + среднеєѵрѡпе́йское вре́мѧ + среднеєѵрѡпе́йское зи́мнее вре́мѧ + среднеєѵрѡпе́йское лѣ́тнее вре́мѧ + + + + + восточноєѵрѡпе́йское вре́мѧ + восточноєѵрѡпе́йское зи́мнее вре́мѧ + восточноєѵрѡпе́йское лѣ́тнее вре́мѧ + + + + + вре́мѧ въ калинингра́дѣ и҆ ми́нскѣ + + + + + западноєѵрѡпе́йское вре́мѧ + западноєѵрѡпе́йское зи́мнее вре́мѧ + западноєѵрѡпе́йское лѣ́тнее вре́мѧ + + + + + сре́днее вре́мѧ по грі́нꙋичꙋ + + + + + и҆ркꙋ́тское вре́мѧ + и҆ркꙋ́тское зи́мнее вре́мѧ + и҆ркꙋ́тское лѣ́тнее вре́мѧ + + + + + восто́чный казахста́нъ + + + + + за́падный казахста́нъ + + + + + красноѧ́рское вре́мѧ + красноѧ́рское зи́мнее вре́мѧ + красноѧ́рское лѣ́тнее вре́мѧ + + + + + кирги́зїа + + + + + магада́нское вре́мѧ + магада́нское зи́мнее вре́мѧ + магада́нское лѣ́тнее вре́мѧ + + + + + моско́вское вре́мѧ + моско́вское зи́мнее вре́мѧ + моско́вское лѣ́тнее вре́мѧ + + + + + новосиби́рское вре́мѧ + новосиби́рское зи́мнее вре́мѧ + новосиби́рское лѣ́тнее вре́мѧ + + + + + ѻ҆́мское вре́мѧ + ѻ҆́мское зи́мнее вре́мѧ + ѻ҆́мское лѣ́тнее вре́мѧ + + + + + вре́мѧ на сахали́нѣ + зи́мнее вре́мѧ на сахали́нѣ + лѣ́тнее вре́мѧ на сахали́нѣ + + + + + владивосто́цкое вре́мѧ + владивосто́цкое зи́мнее вре́мѧ + владивосто́цкое лѣ́тнее вре́мѧ + + + + + волгогра́дское вре́мѧ + волгогра́дское зи́мнее вре́мѧ + волгогра́дское лѣ́тнее вре́мѧ + + + + + ꙗ҆кꙋ́тское вре́мѧ + ꙗ҆кꙋ́тское зи́мнее вре́мѧ + ꙗ҆кꙋ́тское лѣ́тнее вре́мѧ + + + + + є҆катерїнбꙋ́ржское вре́мѧ + є҆катерїнбꙋ́ржское зи́мнее вре́мѧ + є҆катерїнбꙋ́ржское лѣ́тнее вре́мѧ + + + + + + latn + + cyrl + + + , +   + % + + + - + + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0 % + + + + + + + #,##0.00 ¤ + + + {0} {1} + + + + бразі́льскїй реа́лъ + бразі́льскагѡ реа́ла + R$ + R$ + + + бѣлорꙋ́сскїй рꙋ́бль + бѣлорꙋ́сскагѡ рꙋблѧ̀ + BYN + р. + + + бѣлорꙋ́сскїй рꙋ́бль (2000–2016) + бѣлорꙋ́сскагѡ рꙋблѧ̀ (2000–2016) + BYR + + + хи́нскїй ю҆а́нь + хи́нскагѡ ю҆а́нѧ + CN¥ + ¥ + + + є҆́ѵрѡ + є҆́ѵра + + + + + а҆нглі́йскїй фꙋ́нтъ сте́рлингѡвъ + а҆нглі́йскагѡ фꙋ́нта сте́рлингѡвъ + £ + £ + + + і҆нді́йскаѧ рꙋ́пїѧ + і҆нді́йскїѧ рꙋ́пїи + + + + + ꙗ҆пѡ́нскаѧ і҆е́на + ꙗ҆пѡ́нскїѧ і҆е́ны + JP¥ + ¥ + + + кирги́зскїй сꙋ́мъ + кирги́зскагѡ сꙋ́ма + KGS + + + каза́хскаѧ деньга̀ + каза́хскїѧ деньгѝ + + + + + рѡссі́йскїй рꙋ́бль + рѡссі́йскагѡ рꙋблѧ̀ + + + + + ᲂу҆краи́нскаѧ гри́вна + ᲂу҆краи́нскїѧ гри́вны + + + + + а҆мерїка́нскїй до́лларъ + а҆мерїка́нскагѡ до́ллара + $ + $ + + + невѣ́домое пла́тное сре́дство + невѣ́домагѡ пла́тнагѡ сре́дства + + + + + + h:mm + + + h:mm:ss + + + m:ss + + + + + {0}, {1} + {0}, {1} + {0} и҆ {1} + {0} и҆ {1} + + + {0}, {1} + {0}, {1} + {0}, {1} + {0}, {1} + + + {0}, {1} + {0}, {1} + {0}, {1} + {0}, {1} + + + {0}, {1} + {0}, {1} + {0}, {1} + {0}, {1} + + + + + є҆́й:є + нѝ:н + + + diff --git a/make/data/cldr/common/main/cu_RU.xml b/make/data/cldr/common/main/cu_RU.xml new file mode 100644 index 00000000000..120c8710823 --- /dev/null +++ b/make/data/cldr/common/main/cu_RU.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/cv.xml b/make/data/cldr/common/main/cv.xml index 07a49b6c516..a6422ca0f3d 100644 --- a/make/data/cldr/common/main/cv.xml +++ b/make/data/cldr/common/main/cv.xml @@ -1,6 +1,6 @@ - + + + + + + + + ދިވެހިބަސް + + + ދިވެހި ރާއްޖެ + + + + + right-to-left + + + + [ހ ށ ނ ރ ބ ޅ ކ އ ވ މ ފ ދ ތ ލ ގ ޏ ސ ޑ ޒ ޓ ޔ ޕ ޖ ޗ \u07A6 \u07A7 \u07A8 \u07A9 \u07AA \u07AB \u07AC \u07AD \u07AE \u07AF \u07B0] + [\u200C\u200D ޙ ޚ ޜ ޢ ޣ ޥ ޛ ޘ ޠ ޡ ޤ ޝ ޞ ޟ ޱ] + [ހ ށ ނ ރ ބ ޅ ކ އ ވ މ ފ ދ ތ ލ ގ ޏ ސ ޑ ޒ ޓ ޔ ޕ ޖ ޗ] + + + + + + + + EEEE d MMMM y G + GyMMMMEEEEd + + + + + d MMMM y G + GyMMMMd + + + + + dd-MM-y G + GyMMdd + + + + + d-M-yy GGGGG + GGGGGyyMd + + + + + + + + + EEEE d MMMM y + yMMMMEEEEd + + + + + d MMMM y + yMMMMd + + + + + dd-MM-y + yMMdd + + + + + d-M-yy + yyMd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + + + arab + + + ، + + + , + + + + + #,##,##0.### + + + + + + + #,##,##0% + + + + + + + ¤ #,##,##0.00 + + + + + + ރ. + + + + diff --git a/make/data/cldr/common/main/dv_MV.xml b/make/data/cldr/common/main/dv_MV.xml new file mode 100644 index 00000000000..7d07ed90b72 --- /dev/null +++ b/make/data/cldr/common/main/dv_MV.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/dyo.xml b/make/data/cldr/common/main/dyo.xml index 46cc8d4d63e..4d83bbf6400 100644 --- a/make/data/cldr/common/main/dyo.xml +++ b/make/data/cldr/common/main/dyo.xml @@ -1,6 +1,6 @@ - + + + + + + + + + Ἀραβικά + Ἀραμαϊκά + Οὐαλικά + Αἰγυπτιακὰ (ἀρχαῖα) + Ἑλληνικά + Ἀγγλικά + Ἱσπανικά + Ἐσθονικά + Ἰρλανδικά + Σκωτικὰ κελτικά + Ἀρχαῖα Ἑλληνικά + Ἑβραϊκά + Οὑγγρικά + Ἀρμενικά + Ἰνδονησιακά + Ἰσλανδικά + Ἰταλικά + Ἰαπωνικά + Πολλαπλές γλῶσσες + Ὁλλανδικά + Τουρκικά, ὀθωμανικὰ + Ἀρχαῖα περσικὰ + Ἀλβανικά + Οὐκρανικά + Ἰουδαϊκά + + + + + + + + + + Ἀνδόρα + Ἠνωμένα Ἀραβικὰ Ἐμιράτα + Ἀφγανιστάν + Ἀντίγκουα καὶ Μπαρμπούντα + Ἀνγκουίλα + Ἀλβανία + Ἀρμενία + Ἀνγκόλα + Ἀνταρκτική + Ἀργεντινή + Ἀμερικανικὴ Σαμόα + Αὐστρία + Αὐστραλία + Ἀρούμπα + Ἀζερμπαϊτζάν + Βοσνία - Ἐρζεγοβίνη + Βερμοῦδες + Νῆσος Μπουβέ + Νῆσοι Κόκος (Κήλινγκ) + Κονγκό, Λαϊκὴ Δημοκρατία τοῦ + Κεντροαφρικανικὴ Δημοκρατία + Ἑλβετία + Ἀκτὴ Ἐλεφαντοστού + Ακτή Ελεφαντοστού + Νῆσοι Κούκ + Πράσινο Ἀκρωτήριο + Νῆσος Χριστουγέννων + Δομινικανὴ Δημοκρατία + Ἀλγερία + Ἰσημερινός + Ἐσθονία + Αἴγυπτος + Δυτικὴ Σαχάρα + Ἐρυθραία + Ἱσπανία + Αἰθιοπία + Εὐρωπαϊκὴ ᾿Ένωση + Μικρονησία, Ὁμόσπονδες Πολιτεῖες τῆς + Νῆσοι Φερόες + Ἡνωμένο Βασίλειο + Γαλλικὴ Γουιάνα + Ἰσημερινὴ Γουινέα + Ἑλλάδα + Νότια Γεωργία καὶ Νότιες Νήσοι Σάντουιτς + Χὸνγκ Κόνγκ, Εἰδικὴ Διοικητικὴ Περιφέρεια τῆς Κίνας + Νῆσοι Χὲρντ καὶ Μακντόναλντ + Ὁνδούρα + Ἁϊτή + Οὑγγαρία + Ἰνδονησία + Ἰρλανδία + Ἰσραήλ + Ἰνδία + Βρετανικὰ Ἐδάφη Ἰνδικοῦ Ὠκεανοῦ + Ἰράκ + Ἰράν, Ἰσλαμικὴ Δημοκρατία τοῦ + Ἰσλανδία + Ἰταλία + Ἰορδανία + Ἰαπωνία + Σαὶντ Κὶτς καὶ Νέβις + Νῆσοι Κέιμαν + Λατινικὴ Ἀμερική + Ἁγία Λουκία + Σρὶ Λάνκα + Λουξεμβοῦργο + Μολδαβία, Δημοκρατία τῆς + Νῆσοι Μάρσαλ + Μαλί + Μακάο, Εἰδικὴ Διοικητικὴ Περιφέρεια τῆς Κίνας + Νῆσοι Βόρειες Μαριάνες + Νῆσος Νόρφολκ + Ὁλλανδία + Ὀμάν + Γαλλικὴ Πολυνησία + Σαὶντ Πιὲρ καὶ Μικελόν + Παλαιστινιακὰ Ἐδάφη + Σαουδικὴ Ἀραβία + Νῆσοι Σολομῶντος + Ἁγία Ἑλένη + Νῆσοι Σβάλμπαρ καὶ Γιὰν Μαγιέν + Ἅγιος Μαρίνος + Σάο Τομὲ καὶ Πρίνσιπε + Ἒλ Σαλβαδόρ + Συρία, Ἀραβικὴ Δημοκρατία τῆς + Νῆσοι Τὲρκς καὶ Κάικος + Τσάντ + Γαλλικὰ Νότια Ἐδάφη + Ἀνατολικὸ Τιμόρ + Τρινιδὰδ καὶ Τομπάγκο + Οὐκρανία + Οὐγκάντα + Ἀπομακρυσμένες Νησίδες τῶν Ἡνωμένων Πολιτειῶν + Ἡνωμένες Πολιτεῖες + Οὐρουγουάη + Οὐζμπεκιστάν + Ἁγία Ἕδρα (Βατικανό) + Ἅγιος Βικέντιος καὶ Γρεναδίνες + Βρετανικὲς Παρθένοι Νῆσοι + Ἀμερικανικὲς Παρθένοι Νῆσοι + Νῆσοι Οὐάλλις καὶ Φουτουνά + Ὑεμένη + Νότια Ἀφρική + + + Ἡμερολόγιο + + + Βουδιστικὸ ἡμερολόγιο + Κινεζικὸ ἡμερολόγιο + Γρηγοριανὸ ἡμερολόγιο + Ἑβραϊκὸ ἡμερολόγιο + Ἰσλαμικὸ ἡμερολόγιο + Ἰσλαμικὸ ἀστικὸ ἡμερολόγιο + Ἰαπωνικὸ ἡμερολόγιο + Σειρὰ τηλεφωνικοῦ καταλόγου + Σειρὰ Πίνγιν + Σειρὰ Stroke + + + + [α ἀ ἄ ἂ ἆ ἁ ἅ ἃ ἇ ά ὰ ᾶ β γ δ ε ἐ ἔ ἒ ἑ ἕ ἓ έ ὲ ζ η ἠ ἤ ἢ ἦ ἡ ἥ ἣ ἧ ή ὴ ῆ θ ι ἰ ἴ ἲ ἶ ἱ ἵ ἳ ἷ ί ὶ ῖ ϊ ΐ ῒ ῗ κ λ μ ν ξ ο ὄ ὂ ὃ ό ὸ π ρ σ ς τ υ ὐ ὔ ὒ ὖ ὑ ὕ ὓ ὗ ύ ὺ ῦ ϋ ΰ ῢ ῧ φ χ ψ ω ὤ ὢ ὦ ὥ ὣ ὧ ώ ὼ ῶ] + [] + + + + + + + + + + + + Ιαν + Φεβ + Μαρ + Απρ + Μαΐ + Ιουν + Ιουλ + Αὐγ + Σεπ + Ὀκτ + Νοε + Δεκ + + + Ιανουαρίου + Φεβρουαρίου + Μαρτίου + Απριλίου + Μαΐου + Ιουνίου + Ιουλίου + Αὐγούστου + Σεπτεμβρίου + Ὀκτωβρίου + Νοεμβρίου + Δεκεμβρίου + + + + + Ιανουάριος + Φεβρουάριος + Μάρτιος + Απρίλιος + Μάιος + Ιούνιος + Ιούλιος + Αὔγουστος + Σεπτέμβριος + Ὀκτώβριος + Νοέμβριος + Δεκέμβριος + + + + + + + + + + Πεσέτα Ἀνδόρας + + + Ντιρὰμ Ἡνωμένων Ἀραβικῶν Ἐμιράτων + + + Λὲκ Ἀλβανίας + + + Dram Ἀρμενίας + + + Γκίλντα Ὁλλανδικῶν Ἀντιλλῶν + + + Kwanza Ἀνγκόλας + + + Kwanza Ἀνγκόλας (1977–1990) + + + Νέα Kwanza Ἀνγκόλας (1990–2000) + + + Kwanza Reajustado Ἀνγκόλας (1995–1999) + + + Austral Ἀργεντινῆς + + + Πέσο Ἀργεντινῆς (1983–1985) + + + Πέσο Ἀργεντινῆς + + + Σελίνι Αὐστρίας + + + Δολάριο Αὐστραλίας + + + Γκίλντα Ἀρούμπα + + + Μανὰτ Ἀζερμπαϊτζάν + + + Δηνάριο Βοσνίας-Ἑρζεγοβίνης + + + Μάρκο Βοσνίας-Ἑρζεγοβίνης + + + Φράγκο Βελγίου (οἰκονομικό) + + + Μεταλλικὸ Λὲβ Βουλγαρίας + + + Νέο Λὲβ Βουλγαρίας + + + Δολάριο Καναδᾶ + + + Φράγκο Ἑλβετίας + + + Unidades de Fomento Χιλῆς + + + Πέσο Χιλῆς + + + Σκληρὴ Κορόνα Τσεχοσλοβακίας + + + Ἐσκούδο Πράσινου Ἀκρωτηρίου + + + Ostmark Ἀνατολικῆς Γερμανίας + + + Δηνάριο Ἀλγερίας + + + Sucre Ἰσημερινοῦ + + + Unidad de Valor Constante (UVC) Ἰσημερινοῦ + + + Κορόνα Ἐστονίας + + + Λίρα Αἰγύπτου + + + Nakfa Ἐρυθραίας + + + Πεσέτα Ἱσπανίας + + + Birr Αἰθιοπίας + + + Εὐρώ + + + Λίρα Νήσων Φώλκλαντ + + + Dalasi Γκάμπιας + + + Ekwele Guineana Ἰσημερινῆς Γουινέας + + + Quetzal Γουατεμάλας + + + Γκινέα Ἐσκούδο Πορτογαλίας + + + Δολάριο Χὸνγκ Κόνγκ + + + Gourde Ἁϊτῆς + + + Φιορίνι Οὑγγαρίας + + + Ρούπια Ἰνδονησίας + + + Λίρα Ἰρλανδίας + + + Λίρα Ἰσραήλ + + + Νέο Sheqel Ἰσραήλ + + + Ρούπια Ἰνδίας + + + Δηνάριο Ἰράκ + + + Rial Ἰράκ + + + Κορόνα Ἰσλανδίας + + + Λιρέτα Ἰταλίας + + + Δηνάριο Ἰορδανίας + + + Γιὲν Ἰαπωνίας + + + Ρούπια Σρὶ Λάνκας + + + Pataca Μακάου + + + Πέσο Μεξικοῦ + + + Ἀσημένιο Πέσο Μεξικοῦ (1861–1992) + + + Unidad de Inversion (UDI) Μεξικοῦ + + + Ἐσκούδο Μοζαμβίκης + + + Χρυσὴ Κόρδοβα Νικαράγουας + + + Γκίλντα Ὁλλανδίας + + + Μπαλμπόα Παναμᾶ + + + Kina Παπούα Νέα Γουινέας + + + Ἐσκούδο Πορτογαλίας + + + Γκουαρανὶ Παραγουάης + + + Δολάριο Νήσων Σολομῶντος + + + Ρούπια Σεϋχελῶν + + + Λίρα Ἀγίας Ἑλένης + + + Σοβιετικὸ Ρούβλι + + + Colon Ἒλ Σαλβαδόρ + + + Lilangeni Ζουαζιλάνδης + + + Μπὰτ Ταϊλάνδης + + + Μανὰτ Τουρκμενιστάν + + + Ἐσκούδο Τιμόρ + + + Δολάριο Τρινιδὰδ καὶ Τομπάγκο + + + Hryvnia Οὐκρανίας + + + Karbovanetz Οὐκρανίας + + + Σελίνι Οὐγκάντας (1966–1987) + + + Σελίνι Οὐγκάντας + + + Δολάριο ΗΠΑ (Ἑπόμενη ἡμέρα) + + + Δολάριο ΗΠΑ (Ἴδια ἡμέρα) + + + Πέσο Οὐρουγουάης (1975–1993) + + + Πέσο Uruguayo Οὐρουγουάης + + + Sum Οὐζμπεκιστάν + + + Μπολιβὰλ Βενεζουέλας + + + Tala Δυτικῆς Σαμόας + + + Εὐρωπαϊκὴ Σύνθετη Μονάδα + + + Εὐρωπαϊκὴ Νομισματικὴ Μονάδα + + + Εὐρωπαϊκὴ Μονάδα Λογαριασμοῦ (XBC) + + + Εὐρωπαϊκὴ Μονάδα Λογαριασμοῦ (XBD) + + + Δολάριο Ἀνατολικῆς Καραϊβικῆς + + + Εἰδικὰ Δικαιώματα Ἀνάληψης + + + Εὐρωπαϊκὴ Συναλλαγματικὴ Μονάδα + + + Χρυσὸ Φράγκο Γαλλίας + + + Δηνάριο Ὑεμένης + + + Rial Ὑεμένης + + + Μεταλλικὸ Δηνάριο Γιουγκοσλαβίας + + + Ραντ Νότιας Ἀφρικῆς (οἰκονομικό) + + + Ρὰντ Νότιας Ἀφρικῆς + + + + + + Ναί + Ὄχι + + + diff --git a/make/data/cldr/common/main/en.xml b/make/data/cldr/common/main/en.xml index a6c7c0e7ae5..32abf37721f 100644 --- a/make/data/cldr/common/main/en.xml +++ b/make/data/cldr/common/main/en.xml @@ -1,6 +1,6 @@ - - - Sinbad + + Zendaya - + Irene Adler - - John + + Mary Sue Hamish Watson - - Prof. Dr. + + Mr. + Bertram Wilberforce + Bertie + Henry Robert + ∅∅∅ + Wooster + ∅∅∅ + Jr + MP + + + Sinbad + + + Käthe + Müller + + + Zäzilia + Hamish + Stöber + + + Prof. Dr. Ada Cornelia Neele - Eva Sophia - van den - Wolf - Becker Schmidt - M.D. Ph.D. + César Martín + von + Brühl + González Domingo + Jr + MD DDS diff --git a/make/data/cldr/common/main/en_001.xml b/make/data/cldr/common/main/en_001.xml index 62ca51a8325..d3b2346c27a 100644 --- a/make/data/cldr/common/main/en_001.xml +++ b/make/data/cldr/common/main/en_001.xml @@ -1,6 +1,6 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 𐐎𐐲𐑉𐑊𐐼 + 𐐈𐑁𐑉𐐲𐐿𐐲 + 𐐤𐐱𐑉𐑃 𐐊𐑋𐐯𐑉𐐲𐐿𐐲 + 𐐝𐐵𐑃 𐐊𐑋𐐯𐑉𐐲𐐿𐐲 + 𐐄𐑇𐐨𐐰𐑌𐐨𐐲 + 𐐎𐐯𐑅𐐻𐐲𐑉𐑌 𐐈𐑁𐑉𐐲𐐿𐐲 + 𐐝𐐯𐑌𐐻𐑉𐐲𐑊 𐐊𐑋𐐯𐑉𐐲𐐿𐐲 + 𐐀𐑅𐐻𐐲𐑉𐑌 𐐈𐑁𐑉𐐲𐐿𐐲 + 𐐤𐐱𐑉𐑄𐐲𐑉𐑌 𐐈𐑁𐑉𐐲𐐿𐐲 + 𐐣𐐮𐐼𐑊 𐐈𐑁𐑉𐐮𐐿𐐲 + 𐐝𐐲𐑄𐐲𐑉𐑌 𐐈𐑁𐑉𐐲𐐿𐐲 + 𐐊𐑋𐐯𐑉𐐲𐐿𐐲𐑆 + 𐐤𐐱𐑉𐑄𐐲𐑉𐑌 𐐊𐑋𐐯𐑉𐐲𐐿𐐲 + 𐐗𐐯𐑉𐐲𐐺𐐨𐐲𐑌 + 𐐀𐑅𐐻𐐲𐑉𐑌 𐐁𐑈𐐲 + 𐐝𐐲𐑄𐐲𐑉𐑌 𐐁𐑈𐐲 + 𐐝𐐵𐑃-𐐀𐑅𐐻𐐲𐑉𐑌 𐐁𐑈𐐲 + 𐐝𐐲𐑄𐐲𐑉𐑌 𐐏𐐲𐑉𐐲𐐹 + 𐐉𐑅𐐻𐑉𐐩𐑊𐐨𐐲 𐐰𐑌𐐼 𐐤𐐭 𐐞𐐨𐑊𐐲𐑌𐐼 + 𐐣𐐯𐑊𐐲𐑌𐐨𐑈𐐲 + 𐐣𐐴𐐿𐑉𐐲𐑌𐐨𐑈𐐲𐑌 𐐡𐐨𐐾𐐲𐑌 + 𐐑𐐪𐑊𐐲𐑌𐐨𐑈𐐲 + 𐐁𐑈𐐲 + 𐐝𐐯𐑌𐐻𐑉𐐲𐑊 𐐁𐑈𐐲 + 𐐎𐐯𐑅𐐻𐐲𐑉𐑌 𐐁𐑈𐐲 + 𐐏𐐲𐑉𐐲𐐹 + 𐐀𐑅𐐻𐐲𐑉𐑌 𐐏𐐲𐑉𐐲𐐹 + 𐐤𐐱𐑉𐑄𐐲𐑉𐑌 𐐏𐐲𐑉𐐲𐐹 + 𐐎𐐯𐑅𐐻𐐲𐑉𐑌 𐐏𐐲𐑉𐐲𐐹 + 𐐢𐐰𐐻𐑌 𐐊𐑋𐐯𐑉𐐲𐐿𐐲 𐐰𐑌𐐼 𐑄 𐐗𐐯𐑉𐐲𐐺𐐨𐐲𐑌 + 𐐈𐑌𐐼𐐱𐑉𐐲 + 𐐏𐐭𐑌𐐴𐐼𐐮𐐼 𐐇𐑉𐐲𐐺 𐐇𐑋𐐲𐑉𐐩𐐻𐑅 + 𐐈𐑁𐑀𐐰𐑌𐐲𐑅𐐻𐐰𐑌 + 𐐈𐑌𐐻𐐨𐑀𐐶𐐲 𐐰𐑌𐐼 𐐒𐐪𐑉𐐺𐐷𐐭𐐼𐐲 + 𐐈𐑍𐑀𐐶𐐮𐑊𐐲 + 𐐈𐑊𐐺𐐩𐑌𐐨𐐲 + 𐐂𐑉𐑋𐐨𐑌𐐨𐐲 + 𐐈𐑌𐑀𐐬𐑊𐐲 + 𐐈𐑌𐐻𐐪𐑉𐐿𐐻𐐮𐐿𐐲 + 𐐂𐑉𐐾𐐲𐑌𐐻𐐨𐑌𐐲 + 𐐊𐑋𐐯𐑉𐐲𐐿𐐲𐑌 𐐝𐐲𐑋𐐬𐐲 + 𐐉𐑅𐐻𐑉𐐨𐐲 + 𐐉𐑅𐐻𐑉𐐩𐑊𐐨𐐲 + 𐐊𐑉𐐭𐐺𐐲 + 𐐈𐑊𐐰𐑌𐐼 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐈𐑆𐐲𐑉𐐺𐐴𐑈𐐪𐑌 + 𐐒𐐱𐑆𐑌𐐨𐐲 𐐰𐑌𐐼 𐐐𐐲𐑉𐐻𐑅𐐲𐑀𐐬𐑂𐐨𐑌𐐲 + 𐐒𐐪𐑉𐐺𐐩𐐼𐐬𐑅 + 𐐒𐐪𐑍𐑀𐑊𐐲𐐼𐐯𐑇 + 𐐒𐐯𐑊𐐾𐐲𐑋 + 𐐒𐐲𐑉𐐿𐐩𐑌𐐲 𐐙𐐰𐑅𐐬 + 𐐒𐐲𐑊𐑀𐐯𐑉𐐨𐐲 + 𐐒𐐪𐑉𐐩𐑌 + 𐐒𐐲𐑉𐐳𐑌𐐼𐐨 + 𐐒𐐲𐑌𐐨𐑌 + 𐐝𐐩𐑌𐐻 𐐒𐐪𐑉𐐻𐐩𐑊𐐲𐑋𐐨 + 𐐒𐐲𐑉𐑋𐐷𐐭𐐼𐐲 + 𐐒𐑉𐐭𐑌𐐴 + 𐐒𐐬𐑊𐐮𐑂𐐨𐐲 + 𐐒𐑉𐐲𐑆𐐮𐑊 + 𐐒𐐲𐐸𐐪𐑋𐐲𐑅 + 𐐒𐐭𐐻𐐪𐑌 + 𐐒𐐭𐑂𐐩 𐐌𐑊𐐲𐑌𐐼 + 𐐒𐐪𐐻𐑅𐐶𐐪𐑌𐐲 + 𐐒𐐯𐑊𐐲𐑉𐐭𐑅 + 𐐒𐐲𐑊𐐨𐑆 + 𐐗𐐰𐑌𐐲𐐼𐐲 + 𐐗𐐬𐐿𐐬𐑆 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐗𐐪𐑍𐑀𐐬 - 𐐗𐐲𐑌𐑇𐐪𐑅𐐲 + 𐐝𐐯𐑌𐐻𐑉𐐲𐑊 𐐈𐑁𐑉𐐲𐐿𐐲𐑌 𐐡𐐨𐐹𐐲𐐺𐑊𐐮𐐿 + 𐐗𐐪𐑍𐑀𐐬 - 𐐒𐑉𐐪𐑆𐐲𐑂𐐮𐑊 + 𐐝𐐶𐐮𐐻𐑅𐐲𐑉𐑊𐐲𐑌𐐼 + 𐐌𐑂𐑉𐐨 𐐗𐐬𐑅𐐻 + 𐐗𐐳𐐿 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐕𐐨𐑊𐐩 + 𐐗𐐰𐑋𐐲𐑉𐐭𐑌 + 𐐕𐐴𐑌𐐲 + 𐐗𐐲𐑊𐐲𐑋𐐺𐐨𐐲 + 𐐗𐐱𐑅𐐻𐐲 𐐡𐐨𐐿𐐲 + 𐐗𐐷𐐭𐐺𐐲 + 𐐗𐐩𐐹 𐐚𐐯𐑉𐐼𐐨 + 𐐗𐑉𐐮𐑅𐑋𐐲𐑅 𐐌𐑊𐐲𐑌𐐼 + 𐐝𐐴𐐹𐑉𐐲𐑅 + 𐐕𐐯𐐿 𐐡𐐨𐐹𐐲𐐺𐑊𐐮𐐿 + 𐐖𐐲𐑉𐑋𐐲𐑌𐐨 + 𐐖𐐲𐐺𐐭𐐼𐐨 + 𐐔𐐯𐑌𐑋𐐪𐑉𐐿 + 𐐔𐐪𐑋𐐲𐑌𐐨𐐿𐐲 + 𐐔𐐲𐑋𐐮𐑌𐐲𐐿𐐲𐑌 𐐡𐐨𐐹𐐲𐐺𐑊𐐮𐐿 + 𐐈𐑊𐐾𐐮𐑉𐐨𐐲 + 𐐇𐐿𐐶𐐲𐐼𐐱𐑉 + 𐐇𐑅𐐻𐐬𐑌𐐨𐐲 + 𐐀𐐾𐐲𐐹𐐻 + 𐐎𐐯𐑅𐐻𐐲𐑉𐑌 𐐝𐐲𐐸𐐱𐑉𐐲 + 𐐇𐑉𐐮𐐻𐑉𐐨𐐲 + 𐐝𐐹𐐩𐑌 + 𐐀𐑃𐐨𐐬𐐹𐐨𐐲 + 𐐏𐐲𐑉𐐲𐐹𐐨𐐲𐑌 𐐏𐐭𐑌𐐷𐐲𐑌 + 𐐙𐐮𐑌𐑊𐐲𐑌𐐼 + 𐐙𐐨𐐾𐐨 + 𐐙𐐪𐑊𐐿𐑊𐐲𐑌𐐼 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐣𐐴𐐿𐑉𐐲𐑌𐐨𐑈𐐲 + 𐐙𐐯𐑉𐐬 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐙𐑉𐐰𐑌𐑅 + 𐐘𐐲𐐺𐐪𐑌 + 𐐏𐐭𐑌𐐴𐐻𐐲𐐼 𐐗𐐨𐑍𐐼𐐲𐑋 + 𐐘𐑉𐐲𐑌𐐩𐐼𐐲 + 𐐖𐐱𐑉𐐾𐐲 + 𐐙𐑉𐐯𐑌𐐽 𐐘𐐨𐐪𐑌𐐲 + 𐐘𐐲𐑉𐑌𐑆𐐨 + 𐐘𐐪𐑌𐐲 + 𐐖𐐲𐐺𐑉𐐱𐑊𐐻𐐲𐑉 + 𐐘𐑉𐐨𐑌𐑊𐐲𐑌𐐼 + 𐐘𐐰𐑋𐐺𐐨𐐲 + 𐐘𐐮𐑌𐐨 + 𐐘𐐶𐐪𐐼𐐲𐑊𐐭𐐹 + 𐐇𐐿𐐶𐐲𐐻𐐱𐑉𐐨𐐲𐑊 𐐘𐐮𐑌𐐨 + 𐐘𐑉𐐨𐑅 + 𐐝𐐵𐑃 𐐖𐐱𐑉𐐾𐐲 𐐰𐑌𐐼 𐑄 𐐝𐐵𐑃 𐐝𐐰𐑌𐐼𐐶𐐮𐐽 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐘𐐶𐐪𐐼𐐲𐑋𐐪𐑊𐐲 + 𐐘𐐶𐐪𐑋 + 𐐘𐐮𐑌𐐨-𐐒𐐮𐑅𐐵 + 𐐘𐐴𐐰𐑌𐐲 + 𐐐𐐬𐑍 𐐗𐐬𐑍 𐐝𐐈𐐡 𐐕𐐴𐑌𐐲 + 𐐐𐐬𐑍 𐐗𐐬𐑍 + 𐐐𐐲𐑉𐐼 𐐌𐑊𐐲𐑌𐐼 𐐰𐑌𐐼 𐐣𐐿𐐔𐐱𐑌𐐲𐑊𐐼 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐐𐐪𐑌𐐼𐐭𐑉𐐲𐑅 + 𐐗𐑉𐐬𐐩𐑇𐐲 + 𐐐𐐩𐐻𐐨 + 𐐐𐐲𐑍𐑀𐐲𐑉𐐨 + 𐐆𐑌𐐼𐐲𐑌𐐨𐑈𐐲 + 𐐌𐑉𐑊𐐲𐑌𐐼 + 𐐆𐑆𐑉𐐨𐐲𐑊 + 𐐌𐐲𐑊 𐐲𐑁 𐐣𐐰𐑌 + 𐐆𐑌𐐼𐐨𐐲 + 𐐒𐑉𐐮𐐼𐐮𐑇 𐐆𐑌𐐼𐐨𐐲𐑌 𐐄𐑇𐐲𐑌 𐐓𐐯𐑉𐐲𐐻𐐱𐑉𐐨 + 𐐆𐑉𐐰𐐿 + 𐐆𐑉𐐪𐑌 + 𐐌𐑅𐑊𐐲𐑌𐐼 + 𐐆𐐻𐐲𐑊𐐨 + 𐐖𐐲𐑉𐑆𐐨 + 𐐖𐐲𐑋𐐩𐐿𐐲 + 𐐖𐐱𐑉𐐼𐐲𐑌 + 𐐖𐐲𐐹𐐰𐑌 + 𐐗𐐯𐑌𐐷𐐲 + 𐐗𐐮𐑉𐑀𐐲𐑅𐐻𐐰𐑌 + 𐐗𐐰𐑋𐐺𐐬𐐼𐐨𐐲 + 𐐗𐐮𐑉𐐲𐐺𐐪𐐻𐐨 + 𐐗𐐪𐑋𐐲𐑉𐐬𐑆 + 𐐝𐐩𐑌𐐻 𐐗𐐮𐐻𐑅 𐐰𐑌𐐼 𐐤𐐨𐑂𐐮𐑅 + 𐐤𐐱𐑉𐑃 𐐗𐐲𐑉𐐨𐐲 + 𐐝𐐵𐑃 𐐗𐐲𐑉𐐨𐐲 + 𐐗𐐲𐐶𐐩𐐻 + 𐐗𐐩𐑋𐐲𐑌 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐗𐐲𐑆𐐪𐐿𐑅𐐻𐐪𐑌 + 𐐢𐐪𐐬𐑅 + 𐐢𐐯𐐺𐐲𐑌𐐪𐑌 + 𐐢𐐮𐐿𐐻𐐲𐑌𐑅𐐻𐐴𐑌 + 𐐟𐑉𐐨 𐐢𐐰𐑍𐐿𐐲 + 𐐢𐐴𐐺𐐮𐑉𐐨𐐲 + 𐐢𐐲𐑅𐐬𐑃𐐬 + 𐐢𐐮𐑃𐐲𐐶𐐩𐑌𐐨𐐲 + 𐐢𐐲𐐿𐑅𐐲𐑋𐐺𐐲𐑉𐑀 + 𐐢𐐰𐐻𐑂𐐨𐐲 + 𐐢𐐮𐐺𐐨𐐲 + 𐐣𐐲𐑉𐐪𐐿𐐬 + 𐐣𐐪𐑌𐐲𐐿𐐬 + 𐐣𐐱𐑊𐐼𐐬𐑂𐐲 + 𐐣𐐪𐑌𐐲𐑌𐐨𐑀𐑉𐐬 + 𐐝𐐩𐑌𐐻 𐐣𐐪𐑉𐐻𐑌 + 𐐣𐐰𐐼𐐲𐑀𐐰𐑅𐐿𐐲𐑉 + 𐐣𐐪𐑉𐑇𐐲𐑊 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐣𐐰𐑅𐐲𐐼𐐬𐑌𐐨𐐲 + 𐐣𐐪𐑊𐐨 + 𐐣𐐨𐐲𐑌𐑋𐐪𐑉 + 𐐣𐐪𐑍𐑀𐐬𐑊𐐨𐐲 + 𐐣𐐲𐐿𐐵 𐐝𐐈𐐡 𐐕𐐴𐑌𐐲 + 𐐣𐐲𐐿𐐵 + 𐐤𐐱𐑉𐑄𐐲𐑉𐑌 𐐣𐐰𐑉𐐨𐐱𐑌𐐲 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐣𐐪𐑉𐐻𐑌𐐨𐐿 + 𐐣𐐱𐑉𐐲𐐻𐐩𐑌𐐨𐐲 + 𐐣𐐪𐑌𐐻𐑅𐐲𐑉𐐪𐐻 + 𐐣𐐱𐑊𐐻𐐲 + 𐐣𐐱𐑉𐐮𐑇𐐲𐑅 + 𐐣𐐪𐑊𐐼𐐨𐑂𐑆 + 𐐣𐐲𐑊𐐪𐐶𐐨 + 𐐣𐐯𐐿𐑅𐐲𐐿𐐬 + 𐐣𐐲𐑊𐐩𐑈𐐲 + 𐐣𐐬𐑆𐐰𐑋𐐺𐐨𐐿 + 𐐤𐐲𐑋𐐮𐐺𐐨𐐲 + 𐐤𐐭 𐐗𐐰𐑊𐐲𐐼𐐬𐑌𐐷𐐲 + 𐐤𐐴𐐾𐐲𐑉 + 𐐤𐐱𐑉𐑁𐐲𐐿 𐐌𐑊𐐲𐑌𐐼 + 𐐤𐐴𐐾𐐮𐑉𐐨𐐲 + 𐐤𐐮𐐿𐐲𐑉𐐪𐑀𐐶𐐲 + 𐐤𐐯𐑄𐐲𐑉𐑊𐐲𐑌𐐼𐑆 + 𐐤𐐱𐑉𐐶𐐩 + 𐐤𐐩𐐹𐐪𐑊 + 𐐤𐐪𐐭𐑉𐐭 + 𐐤𐐷𐐭𐐩 + 𐐤𐐭 𐐞𐐨𐑊𐐲𐑌𐐼 + 𐐄𐑋𐐲𐑌 + 𐐑𐐰𐑌𐐲𐑋𐐪 + 𐐑𐐲𐑉𐐭 + 𐐙𐑉𐐯𐑌𐐽 𐐑𐐪𐑊𐐲𐑌𐐨𐑈𐐲 + 𐐑𐐰𐐹𐐷𐐳𐐲 𐐤𐐭 𐐘𐐮𐑌𐐨 + 𐐙𐐮𐑊𐐲𐐹𐐨𐑌𐑆 + 𐐑𐐰𐐿𐐲𐑅𐐻𐐰𐑌 + 𐐑𐐬𐑊𐐲𐑌𐐼 + 𐐝𐐩𐑌𐐻 𐐑𐐨𐐯𐑉 𐐰𐑌𐐼 𐐣𐐨𐐿𐐲𐑊𐐪𐑌 + 𐐑𐐮𐐻𐐿𐐯𐑉𐑌 + 𐐑𐐶𐐯𐑉𐐻𐐬 𐐡𐐨𐐿𐐬 + 𐐑𐐰𐑊𐐲𐑅𐐻𐐮𐑌𐐨𐐲𐑌 𐐓𐐯𐑉𐐲𐐻𐐱𐑉𐐨 + 𐐑𐐱𐑉𐐽𐐲𐑀𐐲𐑊 + 𐐑𐐲𐑊𐐵 + 𐐑𐐯𐑉𐐲𐑀𐐶𐐴 + 𐐗𐐲𐐻𐐪𐑉 + 𐐍𐐻𐑊𐐴𐐮𐑍 𐐄𐑇𐐨𐐰𐑌𐐨𐐲 + 𐐡𐐨𐐷𐐭𐑌𐐷𐐲𐑌 + 𐐡𐐬𐑋𐐩𐑌𐐨𐐲 + 𐐝𐐲𐑉𐐺𐐨𐐲 + 𐐡𐐲𐑇𐐲 + 𐐡𐐲𐐶𐐪𐑌𐐼𐐲 + 𐐝𐐵𐐼𐐨 𐐊𐑉𐐩𐐺𐐨𐐲 + 𐐝𐐪𐑊𐐲𐑋𐐲𐑌 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐝𐐩𐑇𐐯𐑊𐑆 + 𐐝𐐭𐐼𐐰𐑌 + 𐐝𐐶𐐨𐐼𐑌 + 𐐝𐐮𐑍𐐲𐐹𐐱𐑉 + 𐐝𐐩𐑌𐐻 𐐐𐐯𐑊𐐲𐑌𐐲 + 𐐝𐑊𐐬𐑂𐐨𐑌𐐨𐐲 + 𐐝𐑂𐐪𐑊𐐺𐐪𐑉𐐼 𐐰𐑌𐐼 𐐖𐐰𐑌 𐐣𐐴𐐲𐑌 + 𐐝𐑊𐐬𐑂𐐪𐐿𐐨𐐲 + 𐐝𐐨𐐯𐑉𐐲 𐐢𐐨𐐬𐑌 + 𐐝𐐪𐑌 𐐣𐐲𐑉𐐨𐑌𐐬 + 𐐝𐐯𐑌𐐲𐑀𐐱𐑊 + 𐐝𐐲𐑋𐐪𐑊𐐨𐐲 + 𐐝𐐭𐑉𐐲𐑌𐐪𐑋 + 𐐝𐐵 𐐓𐐬𐑋 𐐰𐑌𐐼 𐐑𐑉𐐮𐑌𐐽𐐮𐐹𐐩 + 𐐇𐑊 𐐝𐐰𐑊𐑂𐐲𐐼𐐱𐑉 + 𐐝𐐮𐑉𐐨𐐲 + 𐐝𐐶𐐪𐑆𐐨𐑊𐐰𐑌𐐼 + 𐐓𐐲𐑉𐐿𐑅 𐐰𐑌𐐼 𐐗𐐴𐐿𐐬𐑆 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐕𐐰𐐼 + 𐐙𐑉𐐯𐑌𐐽 𐐝𐐲𐑄𐐲𐑉𐑌 𐐓𐐯𐑉𐐲𐐻𐐱𐑉𐐨𐑆 + 𐐓𐐬𐑀𐐬 + 𐐓𐐴𐑊𐐰𐑌𐐼 + 𐐓𐐲𐐾𐐨𐐿𐐲𐑅𐐻𐐰𐑌 + 𐐓𐐬𐐿𐐯𐑊𐐵 + 𐐀𐑅𐐻 𐐓𐐨𐑋𐐱𐑉 + 𐐓𐐲𐑉𐐿𐑋𐐯𐑌𐐲𐑅𐐻𐐰𐑌 + 𐐓𐐪𐑍𐑀𐐲 + 𐐓𐐲𐑉𐐿𐐨 + 𐐓𐑉𐐮𐑌𐐮𐐼𐐰𐐼 𐐰𐑌𐐼 𐐓𐐲𐐺𐐩𐑀𐐬 + 𐐓𐐲𐑂𐐪𐑊𐐭 + 𐐓𐐴𐐶𐐪𐑌 + 𐐓𐐰𐑌𐑆𐐲𐑌𐐨𐐲 + 𐐏𐐭𐑀𐐰𐑌𐐼𐐲 + 𐐏𐐭𐑌𐐰𐐮𐐻𐐲𐐼 𐐝𐐻𐐩𐐻𐑅 𐐣𐐴𐑌𐐬𐑉 𐐍𐐻𐑊𐐴𐐨𐑍 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐏𐐭𐑌𐐴𐐻𐐲𐐼 𐐝𐐻𐐩𐐻𐑅 + 𐐏𐐳𐑉𐐲𐑀𐐶𐐴 + 𐐅𐑆𐐺𐐯𐐿𐐲𐑅𐐻𐐰𐑌 + 𐐚𐐰𐐼𐐲𐐿𐐲𐑌 + 𐐝𐐩𐑌𐐻 𐐚𐐮𐑌𐑅𐐲𐑌𐐻 𐐰𐑌𐐼 𐑄 𐐘𐑉𐐯𐑌𐐲𐐼𐐨𐑌𐑆 + 𐐒𐑉𐐮𐐼𐐮𐑇 𐐚𐐲𐑉𐐾𐐲𐑌 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐏.𐐝. 𐐚𐐲𐑉𐐾𐐲𐑌 𐐌𐑊𐐲𐑌𐐼𐑆 + 𐐚𐐨𐐯𐐻𐑌𐐪𐑋 + 𐐚𐐪𐑌𐐳𐐪𐐼𐐭 + 𐐎𐐪𐑊𐐮𐑅 𐐰𐑌𐐼 𐐙𐐭𐐻𐐭𐑌𐐲 + 𐐝𐐲𐑋𐐬𐐲 + 𐐏𐐯𐑋𐐲𐑌 + 𐐣𐐪𐐷𐐱𐐻 + 𐐝𐐵𐑃 𐐈𐑁𐑉𐐲𐐿𐐲 + 𐐞𐐰𐑋𐐺𐐨𐐲 + 𐐞𐐮𐑋𐐺𐐪𐐺𐐶𐐩 + 𐐊𐑌𐐬𐑌 𐐬𐑉 𐐆𐑌𐑂𐐰𐑊𐐮𐐼 𐐡𐐨𐐾𐐲𐑌 + + + 𐐓𐑉𐐲𐐼𐐮𐑇𐐲𐑌𐑊 𐐖𐐲𐑉𐑋𐐲𐑌 𐐱𐑉𐑃𐐪𐑀𐑉𐐲𐑁𐐨 + 𐐖𐐲𐑉𐑋𐐲𐑌 𐐱𐑉𐑃𐐪𐑀𐑉𐐲𐑁𐐨 𐐲𐑂 1996 + 𐐢𐐩𐐻 𐐣𐐮𐐼𐑊 𐐙𐑉𐐯𐑌𐐽 𐐻𐐭 1606 + 𐐊𐑉𐑊𐐨 𐐣𐐪𐐼𐐲𐑉𐑌 𐐙𐑉𐐯𐑌𐐽 + 𐐀𐑅𐐻𐐲𐑉𐑌 𐐂𐑉𐑋𐐨𐑌𐐨𐐲𐑌 + 𐐎𐐯𐑅𐐻𐐲𐑉𐑌 𐐂𐑉𐑋𐐨𐑌𐐨𐐲𐑌 + 𐐏𐐭𐑌𐐲𐑁𐐴𐐼 𐐓𐐲𐑉𐐿𐐮𐐿 𐐢𐐰𐐻𐑌 𐐈𐑊𐑁𐐲𐐺𐐲𐐻 + 𐐆𐐙𐐈 𐐙𐐬𐑌𐐯𐐻𐐮𐐿𐑅 + 𐐣𐐪𐑌𐐲𐐻𐐪𐑌𐐮𐐿 + 𐐑𐐱𐑊𐐨𐐻𐐱𐑌𐐮𐐿 + 𐐗𐐲𐑋𐐹𐐷𐐭𐐻𐐯𐑉 + 𐐡𐐲𐑂𐐴𐑆𐐼 𐐉𐑉𐑃𐐪𐑀𐑉𐐲𐑁𐐨 + 𐐝𐐿𐐪𐐼𐐮𐑇 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐆𐑍𐑊𐐮𐑇 + + + 𐑋𐐯𐐻𐑉𐐮𐐿 + 𐐏𐐝 + + + + [𐐨 𐐩 𐐪 𐐫 𐐬 𐐭 𐐮 𐐯 𐐰 𐐱 𐐲 𐐳 𐐴 𐐵 𐐶 𐐷 𐐸 𐐹 𐐺 𐐻 𐐼 𐐽 𐐾 𐐿 𐑀 𐑁 𐑂 𐑃 𐑄 𐑅 𐑆 𐑇 𐑈 𐑉 𐑊 𐑋 𐑌 𐑍 𐑎 𐑏] + [] + [𐐀 𐐁 𐐂 𐐃 𐐄 𐐅 𐐆 𐐇 𐐈 𐐉 𐐊 𐐋 𐐌 𐐍 𐐎 𐐏 𐐐 𐐑 𐐒 𐐓 𐐔 𐐕 𐐖 𐐗 𐐘 𐐙 𐐚 𐐛 𐐜 𐐝 𐐞 𐐟 𐐠 𐐡 𐐢 𐐣 𐐤 𐐥 𐐦 𐐧] + + + + + + + + 𐐖𐐰𐑌 + 𐐙𐐯𐐺 + 𐐣𐐪𐑉 + 𐐁𐐹𐑉 + 𐐣𐐩 + 𐐖𐐭𐑌 + 𐐖𐐭𐑊 + 𐐂𐑀 + 𐐝𐐯𐐹 + 𐐉𐐿𐐻 + 𐐤𐐬𐑂 + 𐐔𐐨𐑅 + + + 𐐖 + 𐐙 + 𐐣 + 𐐁 + 𐐣 + 𐐖 + 𐐖 + 𐐂 + 𐐝 + 𐐉 + 𐐤 + 𐐔 + + + 𐐖𐐰𐑌𐐷𐐭𐐯𐑉𐐨 + 𐐙𐐯𐐺𐑉𐐭𐐯𐑉𐐨 + 𐐣𐐪𐑉𐐽 + 𐐁𐐹𐑉𐐮𐑊 + 𐐣𐐩 + 𐐖𐐭𐑌 + 𐐖𐐭𐑊𐐴 + 𐐂𐑀𐐲𐑅𐐻 + 𐐝𐐯𐐹𐐻𐐯𐑋𐐺𐐲𐑉 + 𐐉𐐿𐐻𐐬𐐺𐐲𐑉 + 𐐤𐐬𐑂𐐯𐑋𐐺𐐲𐑉 + 𐐔𐐨𐑅𐐯𐑋𐐺𐐲𐑉 + + + + + 𐐖𐐰𐑌 + 𐐙𐐯𐐺 + 𐐣𐐪𐑉 + 𐐁𐐹𐑉 + 𐐣𐐩 + 𐐖𐐭𐑌 + 𐐖𐐭𐑊 + 𐐂𐑀 + 𐐝𐐯𐐹 + 𐐉𐐿𐐻 + 𐐤𐐬𐑂 + 𐐔𐐨𐑅 + + + 𐐖 + 𐐙 + 𐐣 + 𐐁 + 𐐣 + 𐐖 + 𐐖 + 𐐂 + 𐐝 + 𐐉 + 𐐤 + 𐐔 + + + 𐐖𐐰𐑌𐐷𐐭𐐯𐑉𐐨 + 𐐙𐐯𐐺𐑉𐐭𐐯𐑉𐐨 + 𐐣𐐪𐑉𐐽 + 𐐁𐐹𐑉𐐮𐑊 + 𐐣𐐩 + 𐐖𐐭𐑌 + 𐐖𐐭𐑊𐐴 + 𐐂𐑀𐐲𐑅𐐻 + 𐐝𐐯𐐹𐐻𐐯𐑋𐐺𐐲𐑉 + 𐐉𐐿𐐻𐐬𐐺𐐲𐑉 + 𐐤𐐬𐑂𐐯𐑋𐐺𐐲𐑉 + 𐐔𐐨𐑅𐐯𐑋𐐺𐐲𐑉 + + + + + + + 𐐝𐐲𐑌 + 𐐣𐐲𐑌 + 𐐓𐐭𐑆 + 𐐎𐐯𐑌 + 𐐛𐐲𐑉 + 𐐙𐑉𐐴 + 𐐝𐐰𐐻 + + + 𐐝𐐲𐑌𐐼𐐩 + 𐐣𐐲𐑌𐐼𐐩 + 𐐓𐐭𐑆𐐼𐐩 + 𐐎𐐯𐑌𐑆𐐼𐐩 + 𐐛𐐲𐑉𐑆𐐼𐐩 + 𐐙𐑉𐐴𐐼𐐩 + 𐐝𐐰𐐻𐐲𐑉𐐼𐐩 + + + + + 𐐝 + 𐐣 + 𐐓 + 𐐎 + 𐐛 + 𐐙 + 𐐝 + + + + + + + 𐐗1 + 𐐗2 + 𐐗3 + 𐐗4 + + + 1𐑅𐐻 𐐿𐐶𐐪𐑉𐐻𐐲𐑉 + 2𐑌𐐼 𐐿𐐶𐐪𐑉𐐻𐐲𐑉 + 3𐑉𐐼 𐐿𐐶𐐪𐑉𐐻𐐲𐑉 + 4𐑉𐑃 𐐿𐐶𐐪𐑉𐐻𐐲𐑉 + + + + + + + 𐐈𐐣 + 𐐰𐑋 + 𐐑𐐣 + 𐐹𐑋 + + + 𐐈𐐣 + 𐐰𐑋 + 𐐑𐐣 + 𐐹𐑋 + + + + + + 𐐒𐐲𐑁𐐬𐑉 𐐗𐑉𐐴𐑅𐐻 + 𐐈𐑌𐐬 𐐔𐐱𐑋𐐮𐑌𐐨 + + + 𐐒𐐗 + 𐐈𐐔 + + + 𐐒 + 𐐈 + + + + + + + 𐐇𐑉𐐲 + + + 𐐏𐐨𐑉 + + + 𐐣𐐲𐑌𐑃 + + + 𐐎𐐨𐐿 + + + 𐐔𐐩 + 𐐜 𐐼𐐩 𐐺𐐲𐑁𐐬𐑉 𐐷𐐯𐑅𐐻𐐲𐑉𐐼𐐩 + 𐐏𐐯𐑅𐐻𐐲𐑉𐐼𐐩 + 𐐓𐐲𐐼𐐩 + 𐐓𐐲𐑋𐐱𐑉𐐬 + 𐐜 𐐼𐐩 𐐰𐑁𐐻𐐲𐑉 𐐻𐐲𐑋𐐱𐑉𐐬 + + + 𐐔𐐩 𐐲𐑂 𐑄 𐐎𐐨𐐿 + + + 𐐈𐐣/𐐑𐐣 + + + 𐐍𐑉 + + + 𐐣𐐮𐑌𐐲𐐻 + + + 𐐝𐐯𐐿𐐲𐑌𐐼 + + + 𐐞𐐬𐑌 + + + + 𐐘𐐣𐐓 {0} + 𐐘𐐣𐐓 + {0} 𐐓𐐴𐑋 + + 𐐊𐑌𐑌𐐬𐑌 + + + 𐐣𐐮𐐼𐐶𐐩 + + + 𐐎𐐩𐐿 + + + 𐐈𐐼𐐰𐐿 + + + 𐐤𐐬𐑋 + + + 𐐐𐐪𐑌𐐲𐑊𐐭𐑊𐐭 + + + 𐐖𐐪𐑌𐑅𐐻𐐲𐑌 + + + 𐐁𐑍𐐿𐐲𐑉𐐮𐐾 + + + 𐐏𐐰𐐿𐐭𐐻𐐰𐐻 + + + 𐐖𐐭𐑌𐐬 + + + 𐐢𐐱𐑅 𐐈𐑌𐐾𐐲𐑊𐑅 + + + 𐐒𐐱𐐮𐑆𐐨 + + + 𐐙𐐨𐑌𐐮𐐿𐑅 + + + 𐐔𐐯𐑌𐑂𐐲𐑉 + + + 𐐤𐐭 𐐝𐐩𐑊𐐲𐑋, 𐐤𐐱𐑉𐑃 𐐔𐐲𐐿𐐬𐐼𐐲 + + + 𐐝𐐯𐑌𐐻𐐲𐑉, 𐐤𐐱𐑉𐑃 𐐔𐐲𐐿𐐬𐐼𐐲 + + + 𐐟𐐮𐐿𐐪𐑀𐐬 + + + 𐐣𐐲𐑌𐐪𐑋𐐲𐑌𐐨 + + + 𐐚𐐮𐑌𐑅𐐯𐑌𐑆, 𐐆𐑌𐐼𐐨𐐰𐑌𐐲 + + + 𐐑𐐨𐐻𐐲𐑉𐑆𐐺𐐲𐑉𐑀, 𐐆𐑌𐐼𐐨𐐰𐑌𐐲 + + + 𐐓𐐯𐑊 𐐝𐐮𐐼𐐨, 𐐆𐑌𐐼𐐨𐐰𐑌𐐲 + + + 𐐤𐐪𐐿𐑅, 𐐆𐑌𐐼𐐨𐐰𐑌𐐲 + + + 𐐎𐐮𐑌𐐲𐑋𐐰𐐿, 𐐆𐑌𐐼𐐨𐐰𐑌𐐲 + + + 𐐣𐐲𐑉𐐯𐑍𐑀𐐬, 𐐆𐑌𐐼𐐨𐐰𐑌𐐲 + + + 𐐆𐑌𐐼𐐨𐐲𐑌𐐰𐐹𐐬𐑊𐐲𐑅 + + + 𐐢𐐭𐐶𐐨𐑂𐐮𐑊 + + + 𐐚𐐯𐑂𐐩, 𐐆𐑌𐐼𐐨𐐰𐑌𐐲 + + + 𐐣𐐪𐑌𐐻𐐲𐑅𐐯𐑊𐐬, 𐐗𐐲𐑌𐐻𐐲𐐿𐐨 + + + 𐐔𐐲𐐻𐑉𐐱𐐮𐐻 + + + 𐐤𐐭 𐐏𐐱𐑉𐐿 + + + + 𐐊𐑊𐐰𐑅𐐿𐐲 𐐓𐐴𐑋 + 𐐊𐑊𐐰𐑅𐐿𐐲 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐊𐑊𐐰𐑅𐐿𐐲 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + 𐐝𐐯𐑌𐐻𐑉𐐲𐑊 𐐓𐐴𐑋 + 𐐝𐐯𐑌𐐻𐑉𐐲𐑊 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐝𐐯𐑌𐐻𐑉𐐲𐑊 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + 𐐀𐑅𐐻𐐲𐑉𐑌 𐐓𐐴𐑋 + 𐐀𐑅𐐻𐐲𐑉𐑌 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐀𐑅𐐻𐐲𐑉𐑌 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + 𐐣𐐵𐑌𐐻𐐲𐑌 𐐓𐐴𐑋 + 𐐣𐐵𐑌𐐻𐐲𐑌 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐣𐐵𐑌𐐻𐐲𐑌 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + 𐐑𐐲𐑅𐐮𐑁𐐮𐐿 𐐓𐐴𐑋 + 𐐑𐐲𐑅𐐮𐑁𐐮𐐿 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐑𐐲𐑅𐐮𐑁𐐮𐐿 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + 𐐈𐐻𐑊𐐰𐑌𐐻𐐮𐐿 𐐓𐐴𐑋 + 𐐈𐐻𐑊𐐰𐑌𐐻𐐮𐐿 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐈𐐻𐑊𐐰𐑌𐐻𐐮𐐿 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + 𐐐𐐱𐑍 𐐗𐐱𐑍 𐐓𐐴𐑋 + 𐐐𐐱𐑍 𐐗𐐱𐑍 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐐𐐱𐑍 𐐗𐐱𐑍 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + 𐐤𐐭𐑁𐐲𐑌𐐼𐑊𐐲𐑌𐐼 𐐓𐐴𐑋 + 𐐤𐐭𐑁𐐲𐑌𐐼𐑊𐐲𐑌𐐼 𐐝𐐻𐐰𐑌𐐼𐐲𐑉𐐼 𐐓𐐴𐑋 + 𐐤𐐭𐑁𐐲𐑌𐐼𐑊𐐲𐑌𐐼 𐐔𐐩𐑊𐐴𐐻 𐐓𐐴𐑋 + + + + + + + + $ + + + + + + + {0} 𐐷𐐮𐑉 + {0} 𐐷𐐮𐑉𐑆 + + + {0} 𐑋𐐲𐑌𐑃𐑅 + {0} 𐑋𐐲𐑌𐑃 + + + {0} 𐐶𐐨𐐿 + {0} 𐐶𐐨𐐿𐑅 + + + {0} 𐐼𐐩 + {0} 𐐼𐐩𐑆 + + + {0} 𐐵𐑉 + {0} 𐐵𐑉𐑆 + + + {0} 𐑋𐐮𐑌𐐲𐐻 + {0} 𐑋𐐮𐑌𐐲𐐻𐑅 + + + {0} 𐑅𐐯𐐿𐐲𐑌𐐼 + {0} 𐑅𐐯𐐿𐐲𐑌𐐼𐑆 + + + + + + 𐐷𐐯𐑅:𐐷 + 𐑌𐐬:𐑌 + + + diff --git a/make/data/cldr/common/main/en_Dsrt_US.xml b/make/data/cldr/common/main/en_Dsrt_US.xml new file mode 100644 index 00000000000..f5d45057f16 --- /dev/null +++ b/make/data/cldr/common/main/en_Dsrt_US.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + + + + + + + Jeŋ Fɛɛ + Afrika + Kooyigbɛ Amerika + Wuoyigbɛ Amerika + Ŋshɔkpɔi + Afrika Anaigbɛ + Teŋgbɛ Amerika + Afrika Bokagbɛ + Afrika Kooyigbɛ + Afrika Teŋgbɛ + Afrika Wuoyigbɛ + Amerika Niiaŋ + Kooyigbɛ Shɔŋŋ Amerika + Karibean + Asia Bokagbɛ + Asia Wuoyigbɛ + Asia Wuoyi-Bokagbɛ + Yuropa Wuoyigbɛ + Australasia + Melanesia + Ŋshɔkpɔi Bibii + Ŋshɔkpɔi Bibii Pii + Asia + Asia Teŋgbɛ + Asia Anaigbɛ + Yuropa + Yuropa Bokagbɛ + Yuropa Kooyigbɛ + Yuropa Anaigbɛ + Afrika Fã Ni Yɔɔ Sahara Lɛ Shishi + Romanse Amerika + Antigua Kɛ Barbuda + Anguilla + Angola + Argentina + Aruba + Barbados + Burkina Faso + Burundi + Benin + St. Barthélemy + Bermuda + Bolivia + Netherlands Ni Yɔɔ Karibean + Brazil + Bahamas + Bouvet Ŋshɔkpɔ + Botswana + Belize + Kanada + Kongo - Kinshasa + Kongo (DR) + Teŋgbɛ Afrika Jeŋmaŋ + Kongo - Brazzaville + Kongo (Jeŋmaŋ) + Ko Divua + Tsili + Kameroon + Tsaina + Kolombia + Kosta Rika + Kuba + Kape Verde + Kurasao + Djibouti + Dominika + Dominika Republik + Algeria + Keuta Kɛ Melilla + Ekuador + Ejipt + Sahara Wuoyigbɛ + Eritrea + Etiopia + Yuropa Maji Ekomefeemɔ + Yuropaniiaŋ + Falkland Ŋshɔkpɔi + Falkland Ŋshɔkpɔi Lɛ + Gabon + Grenada + Frentsibii Guiana + Ghana + Greenland + Gambia + Guinea + Guadeloupe + Ekuatorial Guinea + Georgia Wuoyi Kɛ Sandwitsi Ŋshɔkpɔi Ni Yɔɔ Wuoyi + Guatemala + Guinea-Bissau + Guyana + Honduras + Haiti + Kanary Ŋshɔkpɔi + India + Britain Shikpɔji Ni Yɔɔ Indian Ŋshɔ Lɛ Mli + Jamaika + Japan + Kenya + Komoros + St. Kitts Kɛ Nevis + Kayman Ŋshɔkpɔi + St. Lusia + Liberia + Lesotho + Libia + Moroko + St. Martin + Madagaskar + Mali + Makao SAR Tsaina + Makao + Martinik + Mauritania + Montserrat + Mauritius + Malawi + Meziko + Mozambik + Namibia + Niger + Anago + Nikaragua + Panama + Peru + St. Pierre Kɛ Mikelon + Puerto Riko + Paraguay + Ŋshɔkpɔi Ni Yɔɔ Shɔŋŋ + Réunion + Rwanda + Seyshelles + Sudan + St. Helena + Sierra Leone + Senegal + Somalia + Suriname + Sudan Wuoyi + São Tomé Kɛ Prínsipe + El Salvador + Sint Maarten + Eswatini + Swaziland + Turks Kɛ Kaikos Ŋshɔkpɔi + Tsad + Frentsibii Ashikpɔji Ni Yɔɔ Wuoyi + Togo + Tunisia + Trinidad Kɛ Tobago + Tanzania + Uganda + Jeŋmaji Ekomefeemɔ + United States + US + Uruguay + St. Vinsent Kɛ Grenadines + Venezuela + Britain Ŋshɔkpɔi Ni Atarako Amɛhe + US Ŋshɔkpɔi Ni Atarako Amɛhe + Eyaa Ŋwɛi Kɛ Shikpɔŋ Fɛɛ + Eyaa Biɛ Kɛ Biɛ Fɛɛ + Mayotte + South Afrika + Zambia + Zimbabwe + He Ko Ni Gbɛ́i Bɛ Mli + + + Gregory Kalanda + ISO-8601 Kalanda + Bɔ Ni Atoɔ Naa Daa + Blɔfomɛi Anɔmbai + + + Susumɔnii + + + Language: {0} + Script: {0} + Region: {0} + + + + [a b d e ɛ f g h i j k l m n ŋ o ɔ p q r s t u v w y z] + [á ã é í ĩ ó ũ] + [A B D E Ɛ F G H I J K L M N Ŋ O Ɔ P Q R S T U V W Y Z] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + {0}… + …{0} + {0}…{1} + {0} … + … {0} + {0} … {1} + ? + + [\--/] + [\:∶︓﹕:] + + + [.․。︒﹒.。] + ['ʼ՚᾽᾿’'] + [%٪﹪%] + [؉‰] + [\$﹩$] + [£₤£] + [¥¥] + [₩₩] + [₨₹{Rp}{Rs}] + + + [\-‐‒–⁻₋−➖﹣-] + [,،٫⹁、︐︑﹐﹑,、] + [+⁺₊➕﬩﹢+] + + + [,٫⹁︐﹐,] + [.․﹒.。] + + + + + + + + + + + + + + + EEEE, MMMM d, y G + GyMMMMEEEEd + + + + + MMMM d, y G + GyMMMMd + + + + + MMM d, y G + GyMMMd + + + + + M/d/y GGGGG + GGGGGyMd + + + + + + + {1} 'be' 'ni' 'atswa' {0} + + + + + {1} 'be' 'ni' 'atswa' {0} + + + + + {1}, {0} + + + + + {1}, {0} + + + + E d + y G + MMM y G + MMM d, y G + E, MMM d, y G + M/d + E, M/d + E, MMM d + y G + y G + M/y GGGGG + M/d/y GGGGG + E, M/d/y GGGGG + MMM y G + MMM d, y G + E, MMM d, y G + MMMM y G + QQQ y G + QQQQ y G + + + + y G – y G + y – y G + + + M/y GGGGG – M/y GGGGG + M/y – M/y GGGGG + M/y – M/y GGGGG + + + M/d/y – M/d/y GGGGG + M/d/y GGGGG – M/d/y GGGGG + M/d/y – M/d/y GGGGG + M/d/y – M/d/y GGGGG + + + E, M/d/y – E, M/d/y GGGGG + E, M/d/y GGGGG – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + + + MMM y G – MMM y G + MMM – MMM y G + MMM y – MMM y G + + + MMM d – d, y G + MMM d, y G – MMM d, y G + MMM d – MMM d, y G + MMM d, y – MMM d, y G + + + E, MMM d – E, MMM d, y G + E, MMM d, y G – E, MMM d, y G + E, MMM d – E, MMM d, y G + E, MMM d, y – E, MMM d, y G + + + M – M + + + M/d – M/d + M/d – M/d + + + E, M/d – E, M/d + E, M/d – E, M/d + + + MMM – MMM + + + E, MMM d – E, MMM d + E, MMM d – E, MMM d + + + y – y G + + + M/y – M/y GGGGG + M/y – M/y GGGGG + + + M/d/y – M/d/y GGGGG + M/d/y – M/d/y GGGGG + M/d/y – M/d/y GGGGG + + + E, M/d/y – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + + + MMM – MMM y G + MMM y – MMM y G + + + MMM d – d, y G + MMM d – MMM d, y G + MMM d, y – MMM d, y G + + + E, MMM d – E, MMM d, y G + E, MMM d – E, MMM d, y G + E, MMM d, y – E, MMM d, y G + + + MMMM – MMMM y G + MMMM y – MMMM y G + + + + + + + + + Aha + Ofl + Ots + Abe + Agb + Otu + Maa + Man + Gbo + Ant + Ale + Afu + + + A + O + O + A + A + O + M + M + G + A + A + A + + + Aharabata + Oflɔ + Otsokrikri + Abeibe + Agbiɛnaa + Otukwajaŋ + Maawɛ + Manyawale + Gbo + Antɔŋ + Alemle + Afuabe + + + + + Aha + Ofl + Ots + Abe + Agb + Otu + Maa + Man + Gbo + Ant + Ale + Afu + + + A + O + O + A + A + O + M + M + G + A + A + A + + + Aharabata + Oflɔ + Otsokrikri + Abeibe + Agbiɛnaa + Otukwajan + Maawɛ + Manyawale + Gbo + Antɔŋ + Alemle + Afuabe + + + + + + + Hɔg + Ju + Juf + Shɔ + Soo + Soh + Hɔɔ + + + H + J + J + S + S + S + H + + + Hɔg + Ju + Juf + Shɔ + Soo + Soh + Hɔɔ + + + Hɔgbaa + Ju + Jufɔ + Shɔ + Soo + Sohaa + Hɔɔ + + + + + Hɔg + Ju + Juf + Shɔ + Soo + Soh + Hɔɔ + + + H + J + J + S + S + S + H + + + Hɔg + Ju + Juf + Shɔ + Soo + Soh + Hɔɔ + + + Hɔgbaa + Ju + Jufɔ + Shɔ + Soo + Sohaa + Hɔɔ + + + + + + + N1 + N2 + N3 + N4 + + + 1 + 2 + 3 + 4 + + + nyɔji etɛ 1 + nyɔji etɛ 2 + nyɔji etɛ 3 + nyɔji etɛ 4 + + + + + N1 + N2 + N3 + N4 + + + nyɔji etɛ 1 + nyɔji etɛ 2 + nyɔji etɛ 3 + nyɔji etɛ 4 + + + + + + + LB + SN + + + LB + SN + + + LEEBI + SHWANE + + + + + LB + SN + + + LB + SN + + + LEEBI + SHWANE + + + + + + Dani Yesu + Dani Ŋmɛnɛŋmɛnɛ Beiaŋ + Yesu Gbele Sɛɛ + Ŋmɛnɛŋmɛnɛ Beiaŋ + + + DY + DŊB + YGS + ŊB + + + + + + EEEE, MMMM d, y + yMMMMEEEEd + + + + + MMMM d, y + yMMMMd + + + + + MMM d, y + yMMMd + + + + + M/d/yy + yyMd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + {1} 'be' 'ni' 'atswa' {0} + + + + + {1} 'be' 'ni' 'atswa' {0} + + + + + {1}, {0} + + + + + {1}, {0} + + + + E d + y G + MMM y G + MMM d, y G + E, MMM d, y G + M/d + E, M/d + E, MMM d + MMMM 'otsi' W + M/y + M/d/y + E, M/d/y + MMM y + MMM d, y + E, MMM d, y + MMMM y + QQQ y + QQQQ y + Y 'otsi' w + + + + y G – y G + y – y G + + + M/y GGGGG – M/y GGGGG + M/y – M/y GGGGG + M/y – M/y GGGGG + + + M/d/y – M/d/y GGGGG + M/d/y GGGGG – M/d/y GGGGG + M/d/y – M/d/y GGGGG + M/d/y – M/d/y GGGGG + + + E, M/d/y – E, M/d/y GGGGG + E, M/d/y GGGGG – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + + + MMM y G – MMM y G + MMM – MMM y G + MMM y – MMM y G + + + MMM d – d, y G + MMM d, y G – MMM d, y G + MMM d – MMM d, y G + MMM d, y – MMM d, y G + + + E, MMM d – E, MMM d, y G + E, MMM d, y G – E, MMM d, y G + E, MMM d – E, MMM d, y G + E, MMM d, y – E, MMM d, y G + + + M – M + + + M/d – M/d + M/d – M/d + + + E, M/d – E, M/d + E, M/d – E, M/d + + + MMM – MMM + + + E, MMM d – E, MMM d + E, MMM d – E, MMM d + + + M/y – M/y + M/y – M/y + + + M/d/y – M/d/y + M/d/y – M/d/y + M/d/y – M/d/y + + + E, M/d/y – E, M/d/y + E, M/d/y – E, M/d/y + E, M/d/y – E, M/d/y + + + MMM – MMM y + MMM y – MMM y + + + MMM d – d, y + MMM d – MMM d, y + MMM d, y – MMM d, y + + + E, MMM d – E, MMM d, y + E, MMM d – E, MMM d, y + E, MMM d, y – E, MMM d, y + + + MMMM – MMMM y + MMMM y – MMMM y + + + + + + + + yinɔ + + + afi + nyɛsɛɛ afi + afi nɛɛ + wɔsɛɛ afi + + + afi + nyɛsɛɛ afi + afi nɛɛ + wɔsɛɛ afi + + + afi + nyɛsɛɛ afi + afi nɛɛ + wɔsɛɛ afi + + + nyɔji etɛ + + + ny. etɛ + + + ny. etɛ + + + nyɔɔŋ + nyɔɔŋ ni ho lɛ + nyɔɔŋ nɛɛ + nyɔɔŋ ni baa lɛ + + + ny. + ny. ni ho lɛ + ny. nɛɛ + ny. ni baa lɛ + + + ny. + ny. ni ho lɛ + ny. nɛɛ + ny. ni baa lɛ + + + otsi + nyɛsɛɛ otsi + otsi nɛɛ + wɔsɛɛ otsi + {0} otsi lɛ mli + + + ot. + nyɛsɛɛ ot. + ot. nɛɛ + wɔsɛɛ ot. + {0} otsi lɛ mli + + + ot. + nyɛsɛɛ ot. + ot. nɛɛ + wɔsɛɛ ot. + {0} otsi lɛ mli + + + gbi + nyɛ + ŋmɛnɛ + wɔ́ + + + gbi + nyɛ + ŋmɛnɛ + wɔ́ + + + gbi + nyɛ + ŋmɛnɛ + wɔ́ + + + otsi lɛ mli gbi + + + LEEBI/SHWANE + + + ŋmɛlɛtswaa + + + ŋm. + + + ŋm. + + + miniti + + + min. + + + min. + + + sɛkɛnsi + + + sɛk. + + + sɛk. + + + maji ni akɛ amɛbe buɔ akɔntaa + + + + {0} Be + {0} Be Yɛ Latsa Beiaŋ + {0} Be Yɛ Fɛi Beiaŋ + + + Be Ni Maji Ni Yɔɔ Jeŋ Fɛɛ Kɛtsuɔ Nii + + + + Maŋtiase Ko Ni Gbɛ́i Bɛ Mli + + + Kasey + + + MakMurdo + + + Yukla + + + Ŋmeŋme Ni Ekumɔ + + + Kurrie + + + Makwarie + + + Nuŋtsɔ Howe + + + St. Barthélemy + + + Okpɔŋɔ Yɛŋ + + + Vankouver + + + Nelson Mɔɔ + + + Dawson Kpaakpo Bibioo + + + Kreston + + + Kakla Wuɔfɔ + + + Swift Karɛnt + + + Kambridge Ŋshɔnine Bibioo + + + Faa Ni Nɛɔ + + + Rankin Ŋshɔnine + + + Sarawa Ŋshɔnine Bibioo + + + Ikaluit + + + Monkton + + + Halifas + + + Goose Ŋshɔnine Bibioo + + + Glase Ŋshɔnine Bibioo + + + Blank-Sablon + + + Kokos + + + Urumki + + + Kosta Rika + + + Kape Verde + + + Kurasao + + + Krismas + + + Nikosia + + + Dominika + + + Kairo + + + Tsuuk + + + + Be Ni Britainbii Kɛtsuɔ Nii Yɛ Latsa Beiaŋ + + + + Ga + + + Ittokortoormiit + + + Konakry + + + Tegusigalpa + + + Abladei Alɛjiadaamɔhe + + + + Be Ni Irelandbii Kɛtsuɔ Nii + + + + Yerusalem + + + Tsagos + + + Jamaika + + + Komoro + + + Kayman + + + Aktau + + + Aktobe + + + Kostanay + + + Kyzylorda + + + St. Lusia + + + Kolombo + + + Kasablanka + + + Tsoibalsan + + + Makao + + + Martinik + + + Nouakshott + + + Tsihuahua + + + Meziko Maŋ + + + Kankun + + + Kutsing + + + Tsatham + + + Ɔkland + + + Muskat + + + Markwesas + + + Karatsi + + + Mikelon + + + Pitkairn + + + Puerto Riko + + + Kata + + + Guadalkanal + + + Ablade Shĩa Ni Yɔɔ Jɔɔ Mli + + + Damasko + + + Turke Wulu + + + Spain Lɛjiadaamɔhe + + + Ankorage + + + New Salem, Dakota Kooyigbɛ + + + Maŋteŋ, Dakota Kooyigbɛ + + + Tsikago + + + Vinsennes, Indiana + + + Osheku Maŋ, Indiana + + + Knos, Indiana + + + Winamak, Indiana + + + Montisello, Kentuky + + + St. Vinsent + + + Ho Tsi Minh Maŋtiase + + + + Afrika Teŋgbɛ Be + + + + + Afrika Bokagbɛ Be + + + + + South Afrika Be + + + + + Afrika Anaigbɛ Be + Afrika Anaigbɛ Be Yɛ Fɛi Beiaŋ + Afrika Anaigbɛ Be Yɛ Latsa Beiaŋ + + + + + Alaska Be + Alaska Be Yɛ Fɛi Beiaŋ + Alaska Be Yɛ Latsa Beiaŋ + + + + + Amerika Teŋgbɛbii Abe + Amerika Teŋgbɛbii Abe Yɛ Fɛi Beiaŋ + Amerika Teŋgbɛbii Abe Yɛ Latsa Beiaŋ + + + + + Amerika Bokãgbɛbii Abe + Amerika Bokãgbɛbii Abe Yɛ Fɛi Beiaŋ + Amerika Bokãgbɛbii Abe Yɛ Latsa Beiaŋ + + + + + Amerika Gɔjianɔbii Abe + Amerika Gɔjianɔbii Abe Yɛ Fɛi Beiaŋ + Amerika Gɔjianɔbii Abe Yɛ Latsa Beiaŋ + + + + + Pasifik Be + Pasifik Be Yɛ Fɛi Beiaŋ + Pasifik Be Yɛ Latsa Beiaŋ + + + + + Atlantik Be + Atlantik Be Yɛ Fɛi Beiaŋ + Atlantik Be Yɛ Latsa Beiaŋ + + + + + Azores Be + Azores Be Yɛ Fɛi Beiaŋ + Azores Be Yɛ Latsa Beiaŋ + + + + + Kape Verde Be + Kape Verde Be Yɛ Fɛi Beiaŋ + Kape Verde Be Yɛ Latsa Beiaŋ + + + + + Kuba Be + Kuba Be Yɛ Fɛi Beiaŋ + Kuba Be Yɛ Latsa Beiaŋ + + + + + Antarktik Kɛ Wuoyigbɛbii Ni Wieɔ Frɛntsi Be + + + + + Greenland Bokãgbɛ Be + Greenland Bokãgbɛ Be Yɛ Fɛi Beiaŋ + Greenland Bokãgbɛ Be Yɛ Latsa Beiaŋ + + + + + Greenland Anaigbɛ Be + Greenland Anaigbɛ Be Yɛ Fɛi Beiaŋ + Greenland Anaigbɛ Be Yɛ Latsa Beiaŋ + + + + + Hawaii-Aleutia Be + Hawaii-Aleutia Be Yɛ Fɛi Beiaŋ + Hawaii-Aleutia Be Yɛ Latsa Beiaŋ + + + + + Indian Ŋshɔ Lɛ Be + + + + + Mauritius Be + Mauritius Be Yɛ Fɛi Beiaŋ + Mauritius Be Yɛ Latsa Beiaŋ + + + + + Meziko Kooyi-Anaigbɛ Be + Meziko Kooyi-Anaigbɛ Be Yɛ Fɛi Beiaŋ + Meziko Kooyi-Anaigbɛ Be Yɛ Latsa Beiaŋ + + + + + Meziko Pasifik Be + Meziko Pasifik Be Yɛ Fɛi Beiaŋ + Meziko Pasifik Be Yɛ Latsa Beiaŋ + + + + + Newfoundland Be + Newfoundland Be Yɛ Fɛi Beiaŋ + Newfoundland Be Yɛ Latsa Beiaŋ + + + + + St. Pierre Kɛ Mikelon Be + St. Pierre Kɛ Mikelon Be Yɛ Fɛi Beiaŋ + St. Pierre Kɛ Mikelon Be Yɛ Latsa Beiaŋ + + + + + Réunion Be + + + + + Seyshelles Be + + + + + + latn + + latn + + + + + #,##0% + + + + + + + ¤#,##0.00;(¤#,##0.00) + + + + + + Albania Leki + Albania lekii + + + Netherlands Antillea Guilda + Netherlands Antillea guildai + + + Angola Kwanza + Angola kwanzai + + + Argentina Peso + Argentina pesoi + + + Aruba Florin + Aruba florinii + + + Bosnia-Herzegovina Marki Ni Hiɔ Tsakemɔ + Bosnia-Herzegovina markii ni hiɔ tsakemɔ + + + Barbados Dɔla + Barbados dɔlai + + + Bulgaria Levi + Bulgaria levii + + + Burundi Franki + Burundi frankii + + + Bermuda Dɔla + Bermuda dɔlai + + + Bolivia Boliviano + Bolivia bolivianoi + + + Brazil Real + Brazil realii + + + Bahamas Dɔla + Bahamas dɔlai + + + Botswana Pula + Botswana pulai + + + Belarus Rubol + Belarus rubolii + BYN + + + Belize Dɔla + Belize dɔlai + + + Kanada Dɔla + Kanada dɔlai + KA$ + + + Kongo Franki + Kongo frankii + KDF + + + Switzerland Frank + Switzerland frankii + SZF + + + Tsili Peso + Tsili pesoi + + + Kolombia Peso + Kolombia pesoi + KOP + + + Kosta Rika Kolón + Kosta Rika kolónii + KRK + + + Kuba Peso Ni Hiɔ Tsakemɔ + Kuba pesoi ni hiɔ tsakemɔ + KUK + + + Kuba Peso + Kuba pesoi + KUP + + + Tsek Koruna + Tsek korunai + TSK + + + Djibouti Franki + Djibouti frankii + + + Denmark Krone + Denmark kronei + + + Dominika Peso + Dominika pesoi + + + Algeria Dinar + Algeria dinarii + + + Ejipt Pound + Ejipt pounds + + + Eritrea Nakfa + Eritrea nakfai + + + Ethiopia Birr + Ethiopia birri + + + Yuro + yuro + + + Falkland Ŋshɔkpɔi Pound + Falkland Ŋshɔkpɔi pounds + + + Britain Pound + Britain pounds + + + Sidi + + + Ghana Sidi + Ghana sidii + + + Gibraltar Pound + Gibraltar pounds + + + Gambia Dalasi + Gambia dalasii + + + Guinea Franki + Guinea frankii + GNF + + + Guatemala Kuetzal + Guatemala kuetzalii + GTK + K + + + Guyana Dɔla + Guyan dɔlai + + + Hondura Lempira + Hondura lempirai + + + Kroatia Kuna + Kroatia kunai + + + Haiti Gourde + Haiti gourdei + + + Hungary Forinti + Hungary forintii + + + Aisland Króna + Aisland krónai + + + Jamaika Dɔla + Jamaika dɔlai + + + Kenya Sheleŋ + Kenya sheleŋ + + + Komoros Franki + Komoros frankii + KF + + + Kayman Ŋshɔkpɔi Dɔla + Kayman Ŋshɔkpɔi dɔlai + + + Liberia Dɔla + Liberia dɔlai + LRD + + + Libia Dinar + Libia dinarii + + + Moroko Dirham + Moroko dirhamii + + + Moldova Leu + Moldova leuii + + + Madagaska Ariari + Madagaska ariarii + + + Makedonia Denari + Makedonia denarii + + + Mauritania Ouguiya + Mauritania ouguiyai + + + Mauritius Rupi + Mauritius rupii + + + Malawi Kwatsa + Malawi kwatsai + + + Meziko Peso + Meziko pesoi + MZ$ + + + Mozambik Metikal + Mozambik metikalii + + + Namibia Dɔla + Namibia dɔlai + + + Anago Naira + Anago nairai + + + Nikaragua Kórdoba + Nikaragua kórdobai + K$ + + + Norway Krone + Norway kronei + + + Panama Balboa + Panama balboai + + + Peru Sol + Peru solii + + + Poland Zloti + Poland zlotii + + + Paraguay Guarani + Paraguay guaranii + + + Romania Leu + Romania leuii + + + Serbia Dinari + Serbia dinarii + + + Russia Rubol + Russia rubolii + + + Rwanda Franki + Rwanda frankii + + + Seyshɛl Rupi + Seyshɛl rupii + SSR + + + Sudan Pound + Sudan pounds + + + Sweden Krona + Sweden kronai + + + St. Helena Pound + St. Helena pounds + + + Sierra Leone Leone + Sierra Leone leonei + + + Somali Sheleŋ + Somali sheleŋ + + + Surinam Dɔla + Surinam dɔlai + + + Sudan Anai Pound + Sudan Anai pounds + + + Swazi Lilangeni + Swazi lilangenii + + + Tunisia Dinar + Tunisia dinarii + + + Trinidad Kɛ Tobago Dɔla + Trinidad Kɛ Tobago dɔlai + + + Tanzania Sheleŋ + Tanzania sheleŋ + + + Ukrainia Hryvnia + Ukrainia hryvniai + + + Uganda Sjeleŋ + Uganda sheleŋ + UGS + + + US Dɔla + US dɔlai + $ + + + Uruguay Peso + Uruguay pesoi + + + Venezuela Bolívar + Venezuela bolívarii + + + Karibbean Bokã Dɔla + Karibbean Bokã dɔlai + KB$ + + + Afrika Anai Sefa Franki + Afrika Anai Sefa Frankii + SFA + + + Shika Ko Ni Gbɛ́i Bɛ Mli + (shika ko ni gbɛ́i bɛ mli) + + + South Afrika Randi + South Afrika randii + + + Zambia Kwatsa + Zambia kwatsai + ZMW + + + + gbii {0} + ni ji {0} + + + + + + deg + {0}° + + + % + {0}% + + + afii ohai + afii ohai {0} + + + afii nyɔŋma + afi nyɔŋmai {0} + + + afii + afii {0} + {0} daa afi + + + nyɔji + nyɔji {0} + {0} daa nyɔɔŋ + + + otsii + otsii {0} + {0} daa otsi + + + gbii + gbii {0} + {0} daa gbi + + + ŋmɛlɛtswai + ŋmɛlɛtswai {0} + {0} ŋmɛlɛtswaa fɛɛ ŋmɛlɛtswaa + + + minitii + minitii {0} + {0} miniti fɛɛ miniti + + + sɛkɛnsi + sɛkɛnsii {0} + {0} sɛkɛnsi fɛɛ sɛkɛnsi + + + sɛkɛnsi mlijaa 100 + sɛkɛnsi {0} mlijaa 100 + + + sɛkɛnsi mlijaa 1000 + sɛkɛnsi {0} mlijaa 1000 + + + sɛkɛnsi frim + sɛkɛnsi frim {0} + + + kilomita + kilomitai {0} + {0} kilomita fɛɛ kilomita + + + mitai + mitai {0} + {0} mita fɛɛ mita + + + dɛsimita + dɛsimitai {0} + + + sɛntimitai + sɛntimitai {0} + {0} sɛntimita fɛɛ sɛntimita + + + milimitai + milimitai {0} + + + jeŋ koji ejwɛ + {0} bokã + {0} kooyi + {0} wuoyi + {0} anai + + + + + ao + {0}ao + + + an + {0}an + + + afii + {0} afii + {0}/afi + + + nyɔji + nyɔji {0} + {0}/nyɔɔŋ + + + otsii + {0} otsii + {0}/otsi + + + gbii + gbii {0} + {0}/gbi + + + ŋmɛlɛtswai + ŋm {0} + {0}/ŋm + + + min + min {0} + {0}/min + + + sɛk + sɛk {0} + {0}/s + + + sɛk mlij + sm {0} + + + μsɛk + {0}μs + + + sɛkɛnsifrim + {0}sf + + + km + {0} km + {0}/km + + + m + + + sɛm + {0} sɛm + {0}/sɛm + + + koji + {0} B + {0} K + {0} W + {0} A + + + + + % + {0}% + + + afi + a{0} + + + nyɔɔŋ + {0}n + + + otsi + {0}o + + + gbi + {0}g + + + ŋmɛlɛtswaa + {0}ŋm + + + min + {0}m + + + sɛk + {0}s + + + sɛkmlij + {0}sm + + + km + {0}km + + + m + {0} m + + + mm + {0}mm + + + koji + {0}B + {0}K + {0}W + {0}A + + + + + + {0}, kɛ {1} + {0} kɛ {1} + + + + + hɛɛ:h + dabi:d + + + + und gaa + + + {0}. + {0} {1} + + {given} {given2} {surname} {credentials} + + + {given-informal} {surname} + + + {title} {surname} + + + {given-informal} + + + {given-monogram-allCaps}{given2-monogram-allCaps}{surname-monogram-allCaps} + + + {given-informal-monogram-allCaps}{surname-monogram-allCaps} + + + {given} {given2-initial} {surname} {credentials} + + + {given-informal} {surname} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {given-initial} {given2-initial} {surname} + + + {given-informal} {surname-initial} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {surname} {given} {given2} {credentials} + + + {surname} {given-informal} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps}{given-monogram-allCaps}{given2-monogram-allCaps} + + + {surname-monogram-allCaps}{given-informal-monogram-allCaps} + + + {surname} {given} {given2-initial} {credentials} + + + {surname} {given-informal} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {surname} {given-initial} {given2-initial} + + + {surname} {given-initial} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {surname-core}, {given} {given2} {surname-prefix} + + + {surname}, {given-informal} + + + {surname-core}, {given} {given2-initial} {surname-prefix} + + + {surname}, {given-informal} + + + {surname-core}, {given-initial} {given2-initial} {surname-prefix} + + + {surname}, {given-informal} + + + Prof. Dr. + Ada Kornelia + Neele + Eva Sofia + van den + Wolf + Beker Shmidt + M.D. Ph.D. + + + diff --git a/make/data/cldr/common/main/gaa_GH.xml b/make/data/cldr/common/main/gaa_GH.xml new file mode 100644 index 00000000000..94f9d0dd6d5 --- /dev/null +++ b/make/data/cldr/common/main/gaa_GH.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/gd.xml b/make/data/cldr/common/main/gd.xml index bc2f488a7b5..f57731275a3 100644 --- a/make/data/cldr/common/main/gd.xml +++ b/make/data/cldr/common/main/gd.xml @@ -1,6 +1,6 @@ - + + + + + + + + አፋርኛ + አብሐዚኛ + አፍሪቃንስኛ + አምሐረኛ + ዐርቢኛ + አሳሜዛዊ + አያማርኛ + አዜርባይጃንኛ + ባስኪርኛ + ቤላራሻኛ + ቡልጋሪኛ + ቢስላምኛ + በንጋሊኛ + ትበትንኛ + ብሬቶንኛ + ብሊን + ካታላንኛ + ኮርሲካኛ + ቼክኛ + ወልሽ + ዴኒሽ + ጀርመን + ድዞንግኻኛ + ግሪክኛ + እንግሊዝኛ + ኤስፐራንቶ + ስፓኒሽ + ኤስቶኒአን + ባስክኛ + ፐርሲያኛ + ፊኒሽ + ፊጂኛ + ፋሮኛ + ፈረንሳይኛ + ፍሪስኛ + አይሪሽ + እስኮትስ፡ጌልክኛ + ግዕዝኛ + ጋለጋኛ + ጓራኒኛ + ጉጃርቲኛ + ሃውሳኛ + ዕብራስጥ + ሐንድኛ + ክሮሽያንኛ + ሀንጋሪኛ + አርመናዊ + ኢንቴርሊንጓ + እንዶኒሲኛ + እንተርሊንግወ + እኑፒያቅኛ + አይስላንድኛ + ጣሊያንኛ + እኑክቲቱትኛ + ጃፓንኛ + ጃቫንኛ + ጊዮርጊያን + ካዛክኛ + ካላሊሱትኛ + ክመርኛ + ካናዳኛ + ኮሪያኛ + ካሽሚርኛ + ኩርድሽኛ + ኪርጊዝኛ + ላቲንኛ + ሊንጋላኛ + ላውስኛ + ሊቱአኒያን + ላትቪያን + ማላጋስኛ + ማዮሪኛ + ማከዶኒኛ + ማላያላምኛ + ሞንጎላዊኛ + ማራዚኛ + ማላይኛ + ማልቲስኛ + ቡርማኛ + ናኡሩ + ኔፓሊኛ + ደች + ኖርዌጂያን + ኦኪታንኛ + ኦሮምኛ + ኦሪያኛ + ፓንጃቢኛ + ፖሊሽ + ፑሽቶኛ + ፖርቱጋሊኛ + ኵቿኛ + ሮማንስ + ሩንዲኛ + ሮማኒያን + ሞልዳቫዊና + ራሽኛ + ኪንያርዋንድኛ + ሳንስክሪትኛ + ሲንድሂኛ + ሳንጎኛ + ስንሃልኛ + ሲዳምኛ + ስሎቫክኛ + ስሎቪኛ + ሳሞአኛ + ሾናኛ + ሱማልኛ + ልቤኒኛ + ሰርቢኛ + ስዋቲኛ + ሶዞኛ + ሱዳንኛ + ስዊድንኛ + ስዋሂሊኛ + ታሚልኛ + ተሉጉኛ + ታጂኪኛ + ታይኛ + ትግርኛ + ትግረ + ቱርክመንኛ + ታጋሎገኛ + ጽዋናዊኛ + ቶንጋ + ቱርክኛ + ጾንጋኛ + ታታርኛ + ትዊኛ + ኡዊግሁርኛ + ዩክረኒኛ + ኡርዱኛ + ኡዝበክኛ + ቪትናምኛ + ቮላፑክኛ + ዎሎፍኛ + ዞሳኛ + ይዲሻዊኛ + ዮሩባዊኛ + ዡዋንግኛ + ቻይንኛ + ዙሉኛ + + + + + + አንዶራ + የተባበሩት፡አረብ፡ኤምሬትስ + አልባኒያ + አርሜኒያ + አርጀንቲና + ኦስትሪያ + አውስትሬሊያ + አዘርባጃን + ቦስኒያ፡እና፡ሄርዞጎቪኒያ + ባርቤዶስ + ቤልጄም + ቡልጌሪያ + ባህሬን + ቤርሙዳ + ቦሊቪያ + ብራዚል + ቡህታን + ቤላሩስ + ቤሊዘ + የመካከለኛው፡አፍሪካ፡ሪፐብሊክ + ስዊዘርላንድ + ቺሊ + ካሜሩን + ቻይና + ኮሎምቢያ + ኬፕ፡ቬርዴ + ሳይፕረስ + ቼክ፡ሪፑብሊክ + ጀርመን + ዴንማርክ + ዶሚኒካ + ዶሚኒክ፡ሪፑብሊክ + አልጄሪያ + ኢኳዶር + ኤስቶኒያ + ግብጽ + ምዕራባዊ፡ሳህራ + ኤርትራ + ስፔን + ኢትዮጵያ + ፊንላንድ + ፊጂ + ሚክሮኔዢያ + ፈረንሳይ + እንግሊዝ + ጆርጂያ + የፈረንሳይ፡ጉዊአና + ጋምቢያ + ጊኒ + ኢኳቶሪያል፡ጊኒ + ግሪክ + ቢሳዎ + ጉያና + ሆንግ፡ኮንግ + ክሮኤሽያ + ሀይቲ + ሀንጋሪ + ኢንዶኔዢያ + አየርላንድ + እስራኤል + ህንድ + ኢራቅ + አይስላንድ + ጣሊያን + ጃማይካ + ጆርዳን + ጃፓን + ካምቦዲያ + ኮሞሮስ + ደቡብ፡ኮሪያ + ሰሜን፡ኮሪያ + ክዌት + ሊባኖስ + ሊቱዌኒያ + ላትቪያ + ሊቢያ + ሞሮኮ + ሞልዶቫ + ማከዶኒያ + ሞንጎሊያ + ማካዎ + ሞሪቴኒያ + ማልታ + ማሩሸስ + ሜክሲኮ + ማሌዢያ + ናሚቢያ + ኒው፡ካሌዶኒያ + ናይጄሪያ + ኔዘርላንድ + ኖርዌ + ኔፓል + ኒው፡ዚላንድ + ፔሩ + የፈረንሳይ፡ፖሊኔዢያ + ፓፑዋ፡ኒው፡ጊኒ + ፖላንድ + ፖርታ፡ሪኮ + ሮሜኒያ + ሰርቢያ + ራሺያ + ሳውድአረቢያ + ሱዳን + ስዊድን + ሲንጋፖር + ስሎቬኒያ + ስሎቫኪያ + ሴኔጋል + ሱማሌ + ሲሪያ + ቻድ + የፈረንሳይ፡ደቡባዊ፡ግዛቶች + ታይላንድ + ታጃኪስታን + ምስራቅ፡ቲሞር + ቱኒዚያ + ቱርክ + ትሪኒዳድ፡እና፡ቶባጎ + ታንዛኒያ + ዩጋንዳ + አሜሪካ + ዩዝበኪስታን + ቬንዙዌላ + የእንግሊዝ፡ድንግል፡ደሴቶች + የአሜሪካ፡ቨርጂን፡ደሴቶች + የመን + ደቡብ፡አፍሪካ + ዛምቢያ + + + + [\u135F ᎐ ᎑ ᎒ ᎓ ᎔ ᎕ ᎖ ᎗ ᎘ ᎙ ሀ ሁ ሂ ሃ ሄ ህ ሆ ለ ሉ ሊ ላ ሌ ል ሎ ሐ ሑ ሒ ሓ ሔ ሕ ሖ መ ሙ ሚ ማ ሜ ም ሞ ሠ ሡ ሢ ሣ ሤ ሥ ሦ ረ ሩ ሪ ራ ሬ ር ሮ ሰ ሱ ሲ ሳ ሴ ስ ሶ ቀ ቁ ቂ ቃ ቄ ቅ ቆ ቈ ቊ ቋ ቌ ቍ በ ቡ ቢ ባ ቤ ብ ቦ ተ ቱ ቲ ታ ቴ ት ቶ ኀ ኁ ኂ ኃ ኄ ኅ ኆ ኈ ኊ ኋ ኌ ኍ ነ ኑ ኒ ና ኔ ን ኖ አ ኡ ኢ ኣ ኤ እ ኦ ከ ኩ ኪ ካ ኬ ክ ኮ ኰ ኲ ኳ ኴ ኵ ወ ዉ ዊ ዋ ዌ ው ዎ ዐ ዑ ዒ ዓ ዔ ዕ ዖ ዘ ዙ ዚ ዛ ዜ ዝ ዞ የ ዩ ዪ ያ ዬ ይ ዮ ደ ዱ ዲ ዳ ዴ ድ ዶ ገ ጉ ጊ ጋ ጌ ግ ጎ ጐ ጒ ጓ ጔ ጕ ጠ ጡ ጢ ጣ ጤ ጥ ጦ ጰ ጱ ጲ ጳ ጴ ጵ ጶ ጸ ጹ ጺ ጻ ጼ ጽ ጾ ፀ ፁ ፂ ፃ ፄ ፅ ፆ ፈ ፉ ፊ ፋ ፌ ፍ ፎ ፐ ፑ ፒ ፓ ፔ ፕ ፖ] + [ሇ ሏ ⶀ ሗ ሟ ᎀ ᎁ ᎂ ᎃ ⶁ ሧ ሯ ⶂ ሷ ⶃ ሸ ሹ ሺ ሻ ሼ ሽ ሾ ሿ ⶄ ቇ ቐ ቑ ቒ ቓ ቔ ቕ ቖ ቘ ቚ ቛ ቜ ቝ ቧ ᎄ ᎅ ᎆ ᎇ ⶅ ቮ ቯ ቷ ⶆ ቿ ⶇ ኇ ኗ ⶈ ኛ ኟ ⶉ ኧ ⶊ ኯ ኸ ኹ ኺ ኻ ኼ ኽ ኾ ዀ ዂ ዃ ዄ ዅ ዏ ዟ ⶋ ዠ ዡ ዢ ዣ ዤ ዥ ዦ ዧ ዷ ⶌ ዸ ዹ ዺ ዻ ዼ ዽ ዾ ዿ ⶍ ጀ ጁ ጂ ጃ ጄ ጅ ጆ ጇ ⶎ ጏ ጘ ጙ ጚ ጛ ጜ ጝ ጞ ጟ ⶓ ⶔ ⶕ ⶖ ጧ ⶏ ጨ ጩ ጪ ጫ ጬ ጭ ጮ ጯ ⶐ ጷ ⶑ ጿ ፇ ፏ ᎈ ᎉ ᎊ ᎋ ፗ ᎌ ᎍ ᎎ ᎏ ⶒ ፘ ፙ ፚ ⶠ ⶡ ⶢ ⶣ ⶤ ⶥ ⶦ ⶨ ⶩ ⶪ ⶫ ⶬ ⶭ ⶮ ⶰ ⶱ ⶲ ⶳ ⶴ ⶵ ⶶ ⶸ ⶹ ⶺ ⶻ ⶼ ⶽ ⶾ ⷀ ⷁ ⷂ ⷃ ⷄ ⷅ ⷆ ⷈ ⷉ ⷊ ⷋ ⷌ ⷍ ⷎ ⷐ ⷑ ⷒ ⷓ ⷔ ⷕ ⷖ ⷘ ⷙ ⷚ ⷛ ⷜ ⷝ ⷞ] + [ሀ ለ ሐ መ ሠ ረ ሰ ቀ ቈ በ ተ ኀ ኈ ነ አ ከ ኰ ወ ዐ ዘ የ ደ ገ ጐ ጠ ጰ ጸ ፀ ፈ ፐ] + + + + + + + + EEEE፥ dd MMMM መዓልት y G + GyMMMMEEEEdd + + + + + dd MMMM y G + GyMMMMdd + + + + + dd-MMM-y G + GyMMMdd + + + + + dd/MM/yy GGGGG + GGGGGyyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + ጠሐረ + ከተተ + መገበ + አኀዘ + ግንባት + ሠንየ + ሐመለ + ነሐሰ + ከረመ + ጠቀመ + ኀደረ + ኀሠሠ + + + + + + + + + + + + + + + + + + + + + + + እኁድ + ሰኑይ + ሠሉስ + ራብዕ + ሐሙስ + ዓርበ + ቀዳሚት + + + + + + + + + + + + + + + + + + ጽባሕ + ምሴት + + + ጽባሕ + ምሴት + + + + + + ዓ/ዓ + ዓ/ም + + + + + + EEEE፥ dd MMMM መዓልት y G + GyMMMMEEEEdd + + + + + dd MMMM y + yMMMMdd + + + + + dd-MMM-y + yMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + + + + + + ¤#,##0.00 + + + + + + የብራዚል ሪል + + + የቻይና ዩአን ረንሚንቢ + + + የኢትዮጵያ ብር + + + አውሮ + + + የእንግሊዝ ፓውንድ ስተርሊንግ + + + የሕንድ ሩፒ + + + የጃፓን የን + + + የራሻ ሩብል + + + የአሜሪካን ዶላር + + + + diff --git a/make/data/cldr/common/main/gez_ER.xml b/make/data/cldr/common/main/gez_ER.xml new file mode 100644 index 00000000000..c8df591a28e --- /dev/null +++ b/make/data/cldr/common/main/gez_ER.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + Nfk + + + + diff --git a/make/data/cldr/common/main/gez_ET.xml b/make/data/cldr/common/main/gez_ET.xml new file mode 100644 index 00000000000..84c9083021c --- /dev/null +++ b/make/data/cldr/common/main/gez_ET.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/gl.xml b/make/data/cldr/common/main/gl.xml index e1ef5ec1a20..d2593fc251c 100644 --- a/make/data/cldr/common/main/gl.xml +++ b/make/data/cldr/common/main/gl.xml @@ -1,6 +1,6 @@ - + + + + + + + + avañe’ẽ + + + yvóra + África + América del Norte + América del Sur + Oceanía + América Central + América + Norteamérica + Caribe + Ásia + Európa + América Latina + Argentína + Bolívia + Brasil + Chíle + Colómbia + Ecuador + Union Européa + Eurozóna + Guyána Francésa + Groenlandia + Guyana + México + Peru + Paraguai + Surinam + Naciónes Unídas + Estados Unidos + EE. UU. + Uruguay + Venezuéla + + + + + left-to-right + top-to-bottom + + + + [a ã {ch} e ẽ g {g\u0303} h i ĩ j k l m {mb} n ñ {nd} {ng} {nt} o õ p r {rr} s t u ũ v y ỹ ʼ] + [b c d f q w x z] + [A à {CH} E Ẽ G {G\u0303} H I Ĩ J K L M {MB} N Ñ {ND} {NG} {NT} O Õ P R {RR} S T U Ũ V Y Ỹ ʼ] + + + + + + + + Jasyteĩ + Jasykõi + Jasyapy + Jasyrundy + Jasypo + Jasypoteĩ + Jasypokõi + Jasypoapy + Jasyporundy + Jasypa + Jasypateĩ + Jasypakõi + + + + + + + Arateĩ + Arakõi + Araapy + Ararundy + Arapo + Arapoteĩ + Arapokõi + + + + + + + + ary + + + jasy + + + ára + + + kuehe + ko ára + koʼẽrõ + + + kuehe + ko ára + koʼẽrõ + + + arapokõindy + + + aravo’i + + + aravo’ive + + + + + + Bolivia óra + + + + + Ecuador óra + + + + + Galápagos óra + + + + + Venezuela óra + + + + + + + , + . + + + + + + + + diff --git a/make/data/cldr/common/main/gn_PY.xml b/make/data/cldr/common/main/gn_PY.xml new file mode 100644 index 00000000000..03fbf31e5f8 --- /dev/null +++ b/make/data/cldr/common/main/gn_PY.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/gsw.xml b/make/data/cldr/common/main/gsw.xml index 0004c8c0b62..f9187c89ca7 100644 --- a/make/data/cldr/common/main/gsw.xml +++ b/make/data/cldr/common/main/gsw.xml @@ -1,6 +1,6 @@ - + + + + + + + + 𞄒𞄫𞄱𞄔𞄩𞄴 + + + + + left-to-right + top-to-bottom + + + + [\U0001E131 \U0001E136 \U0001E132 \U0001E133 \U0001E130 \U0001E134 \U0001E135 𞅏 𞄼 𞄽 𞄀 𞄁 𞄂 𞄃 𞄄 𞄅 𞄆 𞄇 𞄈 𞄉 𞄊 𞄋 𞄌 𞄍 𞄎 𞄏 𞄐 𞄑 𞄒 𞄓 𞄔 𞄕 𞄖 𞄗 𞄘 𞄙 𞄚 𞄛 𞄜 𞄝 𞄞 𞄟 𞄠 𞄡 𞄢 𞄣 𞄤 𞄥 𞄦 𞄧 𞄨 𞄩 𞄪 𞄫 𞄬 𞅎] + + [𞄀 𞄁 𞄂 𞄃 𞄄 𞄅 𞄆 𞄇 𞄈 𞄉 𞄊 𞄋 𞄌 𞄍 𞄎 𞄏 𞄐 𞄑 𞄒 𞄓 𞄔 𞄕 𞄖 𞄗 𞄘 𞄙 𞄚 𞄛 𞄜 𞄝 𞄞 𞄟 𞄠 𞄡 𞄢 𞄣 𞄤 𞄥 𞄦 𞄧 𞄨 𞄩 𞄪 𞄫 𞄬] + [\- ‑ , . % + 𞅀 𞅁 𞅂 𞅃 𞅄 𞅅 𞅆 𞅇 𞅈 𞅉] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + + + + + + + 𞄆𞄬 + 𞄛𞄨𞄱𞄄𞄤𞄲𞄨 + 𞄒𞄫𞄰𞄒𞄪𞄱 + 𞄤𞄨𞄱 + 𞄀𞄪𞄴 + 𞄛𞄤𞄱𞄞𞄤𞄦 + 𞄔𞄩𞄴𞄆𞄨𞄰 + 𞄕𞄩𞄲𞄔𞄄𞄰𞄤 + 𞄛𞄤𞄱𞄒𞄤𞄰 + 𞄪𞄱𞄀𞄤𞄴 + 𞄚𞄦𞄲𞄤𞄚𞄄𞄰𞄫 + 𞄒𞄩𞄱𞄔𞄬𞄴 + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + 𞄆𞄬 + 𞄛𞄨𞄱𞄄𞄤𞄲𞄨 + 𞄒𞄫𞄰𞄒𞄪𞄱 + 𞄤𞄨𞄱 + 𞄀𞄪𞄴 + 𞄛𞄤𞄱𞄞𞄤𞄦 + 𞄔𞄩𞄴𞄆𞄨𞄰 + 𞄕𞄩𞄲𞄔𞄄𞄰𞄤 + 𞄛𞄤𞄱𞄒𞄤𞄰 + 𞄪𞄱𞄀𞄤𞄴 + 𞄚𞄦𞄲𞄤𞄚𞄄𞄰𞄫 + 𞄒𞄩𞄱𞄔𞄬𞄴 + + + + + 𞄆𞄬 + 𞄛𞄨𞄱𞄄𞄤𞄲𞄨 + 𞄒𞄫𞄰𞄒𞄪𞄱 + 𞄤𞄨𞄱 + 𞄀𞄪𞄴 + 𞄛𞄤𞄱𞄞𞄤𞄦 + 𞄔𞄩𞄴𞄆𞄨𞄰 + 𞄕𞄩𞄲𞄔𞄄𞄰𞄤 + 𞄛𞄤𞄱𞄒𞄤𞄰 + 𞄪𞄱𞄀𞄤𞄴 + 𞄚𞄦𞄲𞄤𞄚𞄄𞄰𞄫 + 𞄒𞄩𞄱𞄔𞄬𞄴 + + + 𞄆𞄬 + 𞄛𞄨𞄱𞄄𞄤𞄲𞄨 + 𞄒𞄫𞄰𞄒𞄪𞄱 + 𞄤𞄨𞄱 + 𞄀𞄪𞄴 + 𞄛𞄤𞄱𞄞𞄤𞄦 + 𞄔𞄩𞄴𞄆𞄨𞄰 + 𞄕𞄩𞄲𞄔𞄄𞄰𞄤 + 𞄛𞄤𞄱𞄒𞄤𞄰 + 𞄪𞄱𞄀𞄤𞄴 + 𞄚𞄦𞄲𞄤𞄚𞄄𞄰𞄫 + 𞄒𞄩𞄱𞄔𞄬𞄴 + + + + + + + 𞄎𞄤𞄲 + 𞄈𞄦 + 𞄆𞄨𞄰 + 𞄗𞄄𞄤𞄰𞄦 + 𞄙𞄤𞄱𞄨 + 𞄑𞄤𞄱𞄨 + 𞄊𞄧𞄳 + + + + + + + 𞅁 + 𞅂 + 𞅃 + 𞅄 + + + + + 𞅁 + 𞅂 + 𞅃 + 𞅄 + + + + + + 𞄜𞄆𞄪 + + + 𞄜𞄆𞄪 + + + + + + + 𞄛𞄩 + + + 𞄛𞄩 + + + 𞄛𞄩 + + + + + hmnp + latn + + + 𞅎 + + + + diff --git a/make/data/cldr/common/main/hnj_Hmnp.xml b/make/data/cldr/common/main/hnj_Hmnp.xml new file mode 100644 index 00000000000..0572f3ad943 --- /dev/null +++ b/make/data/cldr/common/main/hnj_Hmnp.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + + + + + + + + Mundus + Africa + America Septentrionalis + America Australis + Oceania + Africa Occidentalis + America Centralis + Africa Orientalis + Africa Septentrionalis + Africa Australis (regio) + America + Caribaeum + Asia Orientalis + Asia Meridiorientalis + Europa Centralis + Australasia + Melanesia + Micronesia (regio) + Polynesia + Asia + Media Asia + Europa + Europa Orientalis + Europa Septentrionalis + Europa Occidentalis + Andorra + Phylarchiarum Arabicarum Confoederatio + Afgania + Antiqua et Barbuda + Anguilla + Albania + Armenia + Angolia + Argentina + Samoa Americana + Austria + Australia + Aruba + Alandia + Atropatene + Bosnia et Herzegovina + Barbata + Bangladesha + Belgica + Burkina Faso + Bulgaria + Baharina + Burundia + Beninum + Insula Sancti Bartholomaei + Bermuda + Bruneium + Bolivia + Brasilia + Insulae Bahamenses + Butania + Birmania + Insula Bouvet + Botswana + Ruthenia Alba + Beliza + Canada + Insulae Cocos seu Keeling + Res publica Democratica Congensis + Res publica Africae Mediae + Res publica Congoliae + Helvetia + Litus Eburneum + Insulae Cook + Chilia + Cameronia + Res publica popularis Sinarum + Columbia + Costarica + Cuba + Res publica Capitis Viridis + Insula Curacensis + Insula Christi Natalis + Cyprus + Cechia + Res publica Democratica Germanica + Germania + Gibutum + Dania + Dominica + Res publica Dominicana + Algerium + Aequatoria + Estonia + Aegyptus + Sahara Occidentalis + Erythraea + Hispania + Aethiopia + Finnia + Viti + Insulae Malvinae + Micronesia + Faeroae insulae + Francia + Gabonia + Britanniarum Regnum + Granata + Georgia + Guiana Francica + Lisia + Gana + Gibraltar + Groenlandia + Gambia + Guinea + Guadalupa + Guinea Aequatorensis + Graecia + Guatimalia + Guama + Guinea Bissaviensis + Guiana + Hongcongum + Honduria + Croatia + Haitia + Hungaria + Indonesia + Hibernia + Israël + Monapia + India + Iracum + Irania + Islandia + Italia + Caesarea Insula + Iamaica + Iordania + Iaponia + Kenia + Chirgisia + Cambosia + Kiribati + Insulae Comorianae + Sanctus Christophorus et Nevis + Res publica Popularis Democratica Coreana + Res publica Coreana + Cuvaitum + Insulae Caimanenses + Kazachstania + Laotia + Libanus + Sancta Lucia + Lichtenstenum + Taprobane + Liberia + Lesothum + Lituania + Luxemburgum + Lettonia + Libya + Marocum + Monoecus + Res publica Moldavica + Mons Niger + Madagascaria + Insulae Marsalienses + Res publica Macedonica + Malium + Mongolia + Macaum + Insulae Marianae Septentrionales + Martinica + Mauritania + Montserrat + Melita + Mauritia + Insulae Maldivae + Malavium + Mexicum + Malaesia + Mozambicum + Namibia + Nova Caledonia + Res publica Nigritana + Insula Norfolcia + Nigeria + Nicaragua + Nederlandia + Regnum Nederlandiae + Norvegia + Nepalia + Nauru + Niue + Nova Zelandia + Omania + Panama + Peruvia + Polynesia Francica + Papua Nova Guinea + Philippinae + Pakistania + Polonia + Insulae Sancti Petri et Miquelonensis + Insulae Pitcairn + Portus Dives + Territoria Palaestinensia + Portugallia + Belavia + Paraquaria + Quataria + Reunio + Romania + Serbia + Russia + Ruanda + Arabia Saudiana + Insulae Salomonis + Insulae Seisellenses + Sudania + Suecia + Singapura + Sancta Helena, Ascensio et Tristan da Cunha + Slovenia + Slovacia + Mons Leoninus + Res publica Sancti Marini + Senegalia + Somalia + Surinamia + Sudania Australis + Insulae Sancti Thomae et Principis + Salvatoria + Syria + Swazia + Insulae Turcenses et Caicenses + Tzadia + Togum + Thailandia + Tadzikistania + Tokelau + Timoria Orientalis + Turcomannia + Tunesia + Tonga + Turcia + Trinitas et Tabacum + Tuvalu + Res publica Sinarum + Tanzania + Ucraina + Uganda + Civitates Foederatae Americae + Uraquaria + Uzbecia + Civitas Vaticana + Sanctus Vincentius et Granatinae + Venetiola + Virginis Insulae Britannicae + Virginis Insulae Americanae + Vietnamia + Vanuatu + Vallis et Futuna + Samoa + Kosovia + Iemenia + Maiotta + Iugoslavia + Africa Australis + Zambia + Zimbabua + + + + [a b c d e f g h i j k l m n o p q r s t u v w x y z] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + + + + + + + + + + + + + Ian + Feb + Mar + Apr + Mai + Iun + Iul + Aug + Sep + Oct + Nov + Dec + + + Ianuarii + Februarii + Martii + Aprilis + Maii + Iunii + Iulii + Augusti + Septembris + Octobris + Novembris + Decembris + + + + + Ian + Feb + Mar + Apr + Mai + Iun + Iul + Aug + Sep + Oct + Nov + Dec + + + Ianuarius + Februarius + Martius + Aprilis + Maius + Iunius + Iulius + Augustus + September + October + November + December + + + + + + + Sol + Dom + Lun + Mar + Mer + Iov + Ven + Sat + Sab + + + dies Solis + Dominica + dies Lunae + dies Martis + dies Mercurii + dies Iovis + dies Veneris + dies Saturni + dies Sabbati + + + + + Sol + Dom + Lun + Mar + Mer + Iov + Ven + Sat + Sab + + + dies Solis + Dominica + dies Lunae + dies Martis + dies Mercurii + dies Iovis + dies Veneris + dies Saturni + Sabbatum + + + + + + + Q1 + Q2 + Q3 + Q4 + + + prima quarta + secunda quarta + tertia quarta + quarta quarta + + + + + Q1 + Q2 + Q3 + Q4 + + + prima quarta + secunda quarta + tertia quarta + quarta quarta + + + + + + + a.m. + p.m. + + + a.m. + p.m. + + + + + + ante Christum natum + post Christum natum + + + a.C.n. + p.C.n. + + + + + + EEEE, 'die' d MMMM y G + GyMMMMEEEEd + + + + + 'die' d MMMM y G + GyMMMMd + + + + + 'die' d MMM y G + GyMMMd + + + + + d M y G + GyMd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1} 'de' {0} + + + + + {1} 'de' {0} + + + + + {1}, {0} + + + + + {1}, {0} + + + + + + + + eare + + + anno + priore anno + hoc anno + postero anno + + in {0} anno + in {0} annis + + + abhinc {0} annum + abhinc {0} annos + + + + an. + priore an. + hoc an. + postero an. + + in {0} an. + in {0} an. + + + abhinc {0} an. + abhinc {0} an. + + + + quarter + priore quarta + hac quarta + postera quarta + + in {0} quarta + in {0} quartis + + + abhinc {0} quartam + abhinc {0} quartas + + + + qrt. + priore qrt. + hac qrt. + postera qrt. + + in {0} qrt. + in {0} qrt. + + + abhinc {0} qrt. + abhinc {0} qrt. + + + + mensis + priore mense + hac mense + postera mense + + in {0} mense + in {0} mensibus + + + abhinc {0} mensem + abhinc {0} menses + + + + men. + priore men. + hac men. + postera men. + + in {0} men. + in {0} men. + + + abhinc {0} men. + abhinc {0} men. + + + + hebdomas + priore hebdomade + hac hebdomade + postera hebdomade + + in {0} hebdomade + in {0} hebdomadibus + + + abhinc {0} hebdomadem + abhinc {0} hebdomades + + the week of {0} + + + hebd. + priore hebd. + hac hebd. + postera hebd + + in {0} hebd. + in {0} hebd. + + + abhinc {0} hebd. + abhinc {0} hebd. + + + + dies + pridie + hodie + cras + + in {0} die + in {0} diebus + + + abhinc {0} diem + abhinc {0} dies + + + + dies + + in {0} die + in {0} diebus + + + abhinc {0} diem + abhinc {0} dies + + + + day of the week + + + priore die Solis + hoc die Solis + postero die Solis + + in {0} die Solis + in {0} diebus Solis + + + abhinc {0} diem Solis + abhinc {0} dies Solis + + + + prior. die Sol. + hoc die Sol. + post. die Sol. + + in {0} die Sol. + in {0} dieb. Sol. + + + abhinc {0} diem Sol. + abhinc {0} dies Sol. + + + + priore die Lunae + hoc die Lunae + postero die Lunae + + in {0} die Lunae + in {0} diebus Lunae + + + abhinc {0} diem Lunae + abhinc {0} dies Lunae + + + + priore die Lun. + hoc die Lun. + postero die Lun. + + in {0} die Lun. + in {0} diebus Lun. + + + abhinc {0} diem Lun. + abhinc {0} dies Lun. + + + + priore die Martis + hoc die Martis + postero die Martis + + in {0} die Martis + in {0} diebus Martis + + + abhinc {0} diem Martis + abhinc {0} dies Martis + + + + priore die Mar. + hoc die Mar. + postero die Mar. + + in {0} die Mar. + in {0} diebus Mar. + + + abhinc {0} diem Mar. + abhinc {0} dies Mar. + + + + priore die Mercurii + hoc die Mercurii + postero die Mercurii + + in {0} die Mercurii + in {0} diebus Mercurii + + + abhinc {0} diem Mercurii + abhinc {0} dies Mercurii + + + + priore die Merc. + hoc die Merc. + postero die Merc. + + in {0} die Merc. + in {0} diebus Merc. + + + abhinc {0} diem Merc. + abhinc {0} dies Merc. + + + + priore die Iovis + hoc die Iovis + postero die Iovis + + in {0} die Iovis + in {0} diebus Iovis + + + abhinc {0} diem Iovis + abhinc {0} dies Iovis + + + + priore die Iov. + hoc die Iov. + postero die Iov. + + in {0} die Iov. + in {0} diebus Iov. + + + abhinc {0} diem Iov. + abhinc {0} dies Iov. + + + + priore die Veneris + hoc die Veneris + postero die Veneris + + in {0} die Veneris + in {0} diebus Veneris + + + abhinc {0} diem Veneris + abhinc {0} dies Veneris + + + + priore die Ven. + hoc die Ven. + postero die Ven. + + in {0} die Ven. + in {0} diebus Ven. + + + abhinc {0} diem Ven. + abhinc {0} dies Ven. + + + + priore die Saturni + hoc die Saturni + postero die Saturni + + in {0} die Saturni + in {0} diebus Saturni + + + abhinc {0} diem Saturni + abhinc {0} dies Saturni + + + + priore die Sat. + hoc die Sat. + postero die Sat. + + in {0} die Sat. + in {0} diebus Sat. + + + abhinc {0} diem Sat. + abhinc {0} dies Sat. + + + + hora + hac hora + + in {0} hora + in {0} horis + + + abhinc {0} horam + abhinc {0} horas + + + + hr. + + in {0} hr. + in {0} hr. + + + abhinc {0} hr. + abhinc {0} hr. + + + + minuta + hac minuta + + in {0} minuta + in {0} minutis + + + abhinc {0} minutam + abhinc {0} minutas + + + + min. + + in {0} min. + in {0} min. + + + abhinc {0} min. + abhinc {0} min. + + + + secunda + nunc + + in {0} secunda + in {0} secundis + + + abhinc {0} secundam + abhinc {0} secundas + + + + sec. + nunc + + in {0} sec. + in {0} sec. + + + abhinc {0} sec. + abhinc {0} sec. + + + + time zone + + + + + latn + + latn + + 1 + + , +   + ; + % + + + - + + + : + + + + + #,##0.### + + + + + 0 mille + 0 millia + 00 millia + 00 millia + 000 millia + 000 millia + 0 milio + 0 miliones + 00 miliones + 00 miliones + 000 miliones + 000 miliones + 0 miliardum + 0 miliarda + 00 miliarda + 00 miliarda + 000 miliarda + 000 miliarda + 0 milies miliardum + 0 milies miliarda + 00 milies miliarda + 00 milies miliarda + 000 milies miliarda + 000 milies miliarda + + + + + 0M + 0M + 00M + 00M + 000M + 000M + 0 Mn + 0 Mn + 00 Mn + 00 Mn + 000 Mn + 000 Mn + 0 Md + 0 Md + 00 Md + 00 Md + 000 Md + 000 Md + 0 mil Md + 0 mil Md + 00 mil Md + 00 mil Md + 000 mil Md + 000 mil Md + + + + + + Francus Helveticus + + + Euro + + + Marca Finniae + + + Libra sterlingorum + + + Drachma + + + Dollarium Hongkongense + + + Lira Italiana + + + Yen + + + Pensum Mexicanum + + + Corona Norvegiae + + + Nuevo Sol + + + Rubelus Russicus + + + Dollarium Civitatum Foederatarum + + + Argentum + + + Aurum + + + Palladium + + + Platinum + + + + + + {0}, {1} + {0}, {1} + {0} et {1} + {0} et {1} + + + {0}, {1} + {0}, {1} + {0} et {1} + {0} et {1} + + + {0}, {1} + {0}, {1} + {0} et {1} + {0}, {1} + + + {0}, {1} + {0}, {1} + {0} et {1} + {0}, {1} + + + + + ita:i + non:n + + + diff --git a/make/data/cldr/common/main/la_VA.xml b/make/data/cldr/common/main/la_VA.xml new file mode 100644 index 00000000000..c14b6ed224e --- /dev/null +++ b/make/data/cldr/common/main/la_VA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/lag.xml b/make/data/cldr/common/main/lag.xml index b58b92937ef..38f473a3cc1 100644 --- a/make/data/cldr/common/main/lag.xml +++ b/make/data/cldr/common/main/lag.xml @@ -1,6 +1,6 @@ - + + + + + + + + {0} ({1}) + {0}, {1} + {0}: {1} + + + abcaso + aceh + adangme + adyghe + afrikaans + aghem + ainu + akan + aléuto + altai do meridion + amarico + aragoneise + obolo + angika + arabo + arabo moderno standard + mapuche + arpaho + arabo najd + assameise + asu + asturian + atikamekw + avaro + awadhi + aymara + azerbaigian + azero + baschiro + balineise + basaa + bielloruscio + bemba + bena + burgao + bhojpuri + bislama + bini + siksika + bambara + bengaleise + tibetan + breton + bòddo + bosniaco + bugineise + blin + catalan + cayuga + chakma + cecen + cebuano + chiga + chamorro + chuukeise + mari + choctaw + chipewyan + cherokee + cheyenne + curdo sorani + curdo do mezo + chilcotin + còrso + métchif + cree do sud-levante + cree de ciañe + cree do nòrd-levante + cree moose + algonchin da Carolina + ceco + cree de smeugge + chuvash + galleise + daneise + dakota + dargwa + taita + tedesco + tedesco de l’Austria + tedesco standard da Svissera + dogrib + zarma + dogri + basso sorabo + duala + divehi + jola-fonyi + dzongkha + dazaga + embu + ewe + efik + ekajuk + grego + ingleise + ingleise d’Australia + ingleise do Canadà + ingleise britannico + ingleise (RU) + ingleise d’America + ingleise (SUA) + esperanto + spagnòllo + spagnòllo d’America + spagnòllo da Spagna + spagnòllo do Mescico + estone + basco + ewondo + perscian + dari + fulah + finlandeise + filipin + fijian + faroeise + fòn + franseise + franseise do Canadà + franseise da Svissera + franseise cajun + frison do settentrion + furlan + frisian de ponente + irlandeise + ga + gaelico scoçeise + geez + gilberteise + galiçian + guarani + gorontalo + tedesco da Svissera + gujarati + gusii + manneise + gwichʼin + hausa + haida + hawaiian + haida do meridion + ebreo + hindi + hinglish + hiligaynon + hmong + croato + erto sorabo + creolo de Haiti + ongareise + hupa + halkomelem + ermeno + herero + interlingua + iban + ibibio + indonesian + igbo + yi do settentrion + inuktitut canadeise de ponente + ilocan + ingush + ido + islandeise + italian + inuktitut + giapponeise + lojban + ngomba + machame + giavaneise + georgian + cabilo + kachin + jju + kamba + cabardin + tyap + makonde + cappoverdian + koro + kaingang + khasi + koyra chiini + kikuyu + kuanyama + kazakh + kako + groenlandeise + kalenjin + khmer + kimbundu + kannada + corean + konkani + kpelle + kanuri + karchay-balkar + carelian + kurukh + kashmiri + shambala + bafia + colonieise + curdo + kumyk + komi + còrnico + kwakʼwala + kirghiso + latin + giudeo-spagnòllo + langi + luxemburgheise + lesgo + ganda + limburgheise + ligure + lillooet + lakota + lingala + lao + creolo da Louisiana + lozi + luri do settentrion + samia + lituan + luba-katanga + luba-lulua + lunda + luo + lushai + luyia + letton + madureise + magahi + maithili + makasar + masai + moksha + mende + meru + creolo mauriçian + malagascio + makhuwa-meetto + meta’ + marshalleise + maori + mi'kmaq + minangkabau + maçedone + malayalam + mongolo + manipuri + innu-aimun + mohawk + mossi + marathi + maleise + malteise + mudang + moltilengua + muscogee + mirandeise + birman + erzya + mazanderani + nauru + napolitan + nama + norvegin bokmål + ndebele do settentrion + basso tedesco + nepaleise + newari + ndonga + nias + niue + olandeise + sciammengo + kwasio + norvegin nynorsk + ngiemboon + norvegin + nogai + n’ko + ndebele do meridion + sotho do settentrion + nuer + navajo + nyanja + nyankole + oçitan + ojibwa do nòrd-ponente + ojibwa do mezo + oji-cree + ojibwa de ponente + okangan + oromo + ödia + oscetico + punjabi + pangasinan + pampanga + papiamento + palau + pidgin nigerian + pijin + polacco + malecite-passamaquoddy + pashto + portogheise + portogheise do Braxî + portogheise d’Euröpa + quechua + rapanui + rarotonga + rohingya + romancio + rundi + romen + rombo + ruscio + aromen + kinyarwanda + rwa + sanscrito + sandawe + sakha + samburu + santali + ngambay + sangu + sardo + siçilian + scoçeise + sindhi + sami do settentrion + sena + koyraboro senni + sango + tashelhit + shan + sinhala + slovacco + sloven + lushootseed do meridion + samoan + sami de Inari + sami skolt + shona + soninke + sòmalo + arbaneise + serbo + sranan tongo + swati + sotho do meridion + salish di streiti + sundaneise + sukuma + svedeise + swahili + comörian + sciriaco + tamil + tutchone do meridion + telugu + timne + teso + tetum + tagico + tagish + thai + tahltan + tigrinya + tigre + turcomanno + klingon + tlingit + tswana + tongan + toki pona + tok pisin + turco + taroko + tsonga + tataro + tutchone do settentrion + tumbuka + tuvalu + tasawaq + tahitian + tuvinian + tamazight de l’Atlante do mezo + udmurt + uiguro + ucrain + umbundu + lengua desconosciua + urdu + uzbeco + vai + venda + vietnamita + vunjo + vallon + walser + wolaytta + waray + wolof + cineise wu + kalmyk + xhosa + soga + yangben + yemba + yiddish + yoruba + nheengatu + cantoneise + cineise cantoneise + tamazight standard do Maròcco + cineise + cineise mandarin + cineise semplificou + cineise mandarin semplificou + cineise tradiçionale + cineise mandarin tradiçionale + zulu + zuni + sensa contegnuo linguistico + zaza + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mondo + Africa + America do settentrion + America do meridion + Oçeania + Africa de ponente + America do mezo + Africa de levante + Africa do settentrion + Africa do mezo + Africa do meridion + Americhe + America do nòrd + Caraibi + Asia de levante + Asia do meridion + Asia do sud-levante + Euröpa do meridion + Australasia + Melanesia + region da Micronesia + Polinesia + Asia + Asia do mezo + Asia de ponente + Euröpa + Euröpa de levante + Euröpa do settentrion + Euröpa de ponente + Africa subsahariaña + America latiña + Isoa de l’Ascension + Andòrra + Emirati arabi unii + Afghanistan + Antigua e Barbuda + Anguilla + Arbania + Ermenia + Angola + Antartide + Argentiña + Samoa americaña + Austria + Australia + Aruba + Isoe Åland + Azerbaigian + Bòsnia e Herzegòvina + Barbados + Bangladesh + Belgio + Burkina Faso + Borgaia + Bahrein + Burundi + Benin + San Bertomê + Bermuda + Brunei + Bolivia + Caraibi olandeixi + Braxî + Bahamas + Bhutan + Isoa Bouvet + Botswana + Bieloruscia + Belize + Canadà + Isoe Cocos (Keeling) + Congo-Kinshasa + Congo (RDC) + Repubrica çentrafricaña + Congo-Brazzaville + Congo (Repubrica) + Svissera + Còsta d’Avöio + Côte d’Ivoire + Isoe Cook + Cile + Cameron + Ciña + Colombia + Isoa de Clipperton + Còsta Rica + Cubba + Cappo Verde + Curaçao + Isoa Christmas + Çipri + Cechia + Repubrica ceca + Germania + Diego Garcia + Djibouti + Danimarca + Dominica + Repubrica dominicaña + Algeria + Çéuta e Melilla + Ecuador + Estònia + Egitto + Sahara de ponente + Eritrea + Spagna + Etiòpia + Union europea + zöna euro + Finlandia + Figi + Isoe Malviñe + Isoe Malviñe (Isoe Falkland) + Micronesia + Isoe Fær Øer + Fransa + Gabon + Regno Unio + RU + Granada + Geòrgia + Guyana franseise + Guernsey + Ghana + Gibertâ + Groenlandia + Gambia + Guinea + Guadaluppa + Guinea equatoiäle + Greçia + Geòrgia do Sud e Isoe Sandwich do Sud + Guatemala + Guam + Guinea Bissau + Guyana + RAS de Hong Kong (Ciña) + Hong Kong + Isoe Heard e McDonald + Honduras + Croaçia + Haiti + Ongaia + Isoe Canäie + Indonesia + Irlanda + Israele + Isoa de Man + India + Tære britanniche de l’oçeano Indian + Iraq + Iran + Islanda + Italia + Jersey + Giamaica + Giordania + Giappon + Kenya + Kirghizistan + Cambòggia + Kiribati + Comöre + San Cristòffa e Nevis + Corea do Nòrd + Corea do Sud + Kuwait + Isoe Cayman + Kazakistan + Laos + Libano + Santa Luçia + Liechtenstein + Sri Lanka + Liberia + Lesotho + Lituania + Luxemburgo + Lettònia + Libia + Maròcco + Monego + Moldavia + Monteneigro + San Martin + Madagascar + Isoe Marshall + Maçedònia do Nòrd + Mali + Myanmar (Birmania) + Mongòlia + RAS de Macao + Macao + Isoe Mariañe de settentrion + Martinica + Mauritania + Montserrat + Malta + Mauritius + Maldive + Malawi + Mescico + Malaysia + Mozambico + Namibia + Neuva Caledònia + Niger + Isoa Norfolk + Nigeria + Nicaragua + Paixi Basci + Norveggia + Nepal + Nauru + Niue + Neuva Zelanda + Aoteratoa Neuva Zelanda + Òman + Panama + Perù + Polinesia fraseise + Papua Neuva Guinea + Filipiñe + Pakistan + Polònia + San Pê e Miquelon + Isoe Pitcairn + Puerto Rico + Tære palestineixi + Palestiña + Portugâ + Palau + Paraguay + Qatar + regioin lontañe de l’Oçeania + Réunion + Romania + Serbia + Ruscia + Rwanda + Arabia saudia + Isoe Salomon + Seychelles + Sudan + Sveçia + Scingapô + Sant’Elena + Slovenia + Svalbard e Jan Mayen + Slovacchia + Sierra Leone + San Marin + Senegal + Somalia + Suriname + Sudan do Sud + Sao Tomé e Prinçipe + El Salvador + Sint Maarten + Sciria + Eswatini + Swaziland + Tristan da Cunha + Isoe Turks e Caicos + Chad + Tære australe franseixi + Tögo + Tailandia + Tagikistan + Tokelau + Timor Est + Timor-Leste + Turkmenistan + Tunexia + Tonga + Turchia + Trinidad e Tobago + Tuvalu + Taiwan + Tanzania + Ucraiña + Uganda + Isoe lontañe di SUA + Naçioin Unie + Stati Unii + SUA + Uruguay + Uzbekistan + Çittæ do Vatican + San Viçenso e e Granadiñe + Venessuela + Isoe Vergine britanniche + Isoe Vergine di Stati Unii + Vietnam + Vanuatu + Wallis e Futuna + Samoa + pseudo-açenti + pseudo-bidi + Kòsovo + Yemen + Maiòtta + Sudafrica + Zambia + Zimbabwe + region desconosciua + + + lunäio + formato de monæa + ordine + monæa + scistema oräio (12 ò 24 oe) + stilo de interruçion de linia + scistema de mesuaçion + numeri + + + lunäio buddista + lunäio cineise + lunäio còpto + lunäio dangi + lunäio etiope + lunäio etiope Amete Alem + lunäio gregorian + lunäio ebraico + lunäio islamico + lunäio çivile islamico + lunäio islamico (Umm al-Qura) + lunäio ISO-8601 + lunäio giapponeise + lunäio perscian + lunäio repubrican cineise + formato de monæa contabile + formato de monæa standard + ordine predefinio de Unicode + reçerca generica + ordine standard + scistema oräio à 12 oe (0–11) + scistema oräio à 12 oe (1–12) + scistema oräio à 24 oe (0–23) + scistema oräio à 24 oe (1–24) + stilo de interruçion de linia flescibile + stilo de interruçion de linia standard + stilo de interruçion de linia sforsou + scistema metrico + scistema de mesuaçion imperiale + scistema de mesuaçion american + giffre indo-arabe + giffre indo-arabe esteise + numeri ermeni + numeri ermeni piccin + giffre bengaleixi + giffre chakma + giffre devanagari + numeri etiopi + giffre à ampiessa intrega + numeri georgien + numeri greghi + numeri greghi piccin + giffre gujarati + giffre gurmuki + numeri deçimali cineixi + numeri cineixi semplificæ + numeri finansiäi in cineise semplificou + numeri cineixi tradiçionali + numeri finansiäi in cineise tradiçionale + numeri ebraichi + giffre giavaneixi + numeri giapponeixi + numeri finansiäi giapponeixi + giffre khmer + giffre kannada + giffre lao + giffre occidentale + giffre malayalam + giffre meetei mayek + giffre birmañe + giffre ol chiki + giffre ödia + numeri romoen + numeri romoen piccin + giffre tamil tradiçionale + giffre tamil + giffre telugu + giffre tailandeixi + giffre tibetañe + giffre vaii + + + metrico + imperiale + american + + + Lengua: {0} + Scrittua: {0} + Region: {0} + + + + [a à â ä æ b c ç d e é è ê ë f g h i ì î ï j k l m n ñ o ó ò ô ö p q r s t u ù û ü v w x y z] + [õ ō œ ř] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + [\- ‑ , . ' % ‰ + − 0 1 2 3 4 5 6 7 8 9 ª º] + [\- ‐ ‑ – — , ; \: ! ? . … ’ " “ ” « » ( ) \[ \] § @ * / \& # † ‡] + + + « + » + + + + + + + + + + EEEE d 'de' MMMM 'do' y G + + + + + d 'de' MMMM 'do' y G + + + + + d/M/y G + + + + + d/M/yy GGGGG + + + + + + + {1}, {0} + + + {1} 'à' {0} + + + + + {1}, {0} + + + {1} 'à' {0} + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + E d + y G + d/M/y GGGGG + LLL y G + d MMM y G + E d MMM y G + H + d/M + E d/M + d MMM + E d MMM + d MMMM + y G + y G + M/y G + d/M/y G + E d/M/y GGGGG + LLL y G + d MMM y G + E d MMM y G + LLLL 'do' y G + QQQ y G + QQQQ y G + + + + y G – y G + y–y G + + + M/y GGGGG – M/y GGGGG + M/y – M/y GGGGG + M/y – M/y GGGGG + + + d/M/y – d/M/y GGGGG + d/M/y GGGGG – d/M/y GGGGG + d/M/y – d/M/y GGGGG + d/M/y – d/M/y GGGGG + + + E d/M/y – E d/M/y GGGGG + E d/M/y GGGGG – E d/M/y GGGGG + E d/M/y – E d/M/y GGGGG + E d/M/y – E d/M/y GGGGG + + + MMM y G – MMM y G + MMM–MMM y G + MMM y – MMM y G + + + d–d MMM, y G + d MMM y G – d MMM y G + d MMM y G – d MMM y G + d MMM y G – d MMM y G + + + E d MMM – E d MMM y G + E d MMM y G – E d MMM y G + E d MMM y – E d MMM y G + E d MMM y – E d MMM y G + + + M–M + + + d/M – d/M + d/M – d/M + + + E d/M – E d/M + E d/M – E d/M + + + MMM–MMM + + + d–d MMM + d MMM – d MMM + + + E d – E d MMM + E d MMM – E d MMM + + + y–y G + + + M/y – M/y GGGGG + M/y – M/y GGGGG + + + d/M/y – d/M/y GGGGG + d/M/y – d/M/y GGGGG + d/M/y – d/M/y GGGGG + + + E d/M/y – E d/M/y GGGGG + E d/M/y – E d/M/y GGGGG + E d/M/y – E d/M/y GGGGG + + + MMM–MMM y G + MMM 'do' y – MMM 'do' y G + + + d–d MMM 'do' y G + d MMM – d MMM 'do' y G + d MMM 'do' y – d MMM 'do' y G + + + E d MMM – E d MMM 'do' y G + E d MMM – E d MMM 'do' y G + E d MMM 'do' y – E d MMM 'do' y G + + + MMMM–MMMM 'do' y G + MMMM 'do' y – MMMM 'do' y G + + + + + + + + + de zen. + de fre. + de mar. + d’arv. + de maz. + de zug. + de lug. + d’ago. + de set. + d’ott. + de nov. + de dex. + + + ZN + FR + MR + AR + MZ + ZG + LG + AG + ST + OT + NV + DX + + + de zenâ + de frevâ + de marso + d’arvî + de mazzo + de zugno + de luggio + d’agosto + de settembre + d’ottobre + de novembre + de dexembre + + + + + zen. + fre. + mar. + arv. + maz. + zug. + lug. + ago. + set. + ott. + nov. + dex. + + + ZN + FR + MR + AR + MZ + ZG + LG + AG + ST + OT + NV + DX + + + zenâ + frevâ + marso + arvî + mazzo + zugno + luggio + agosto + settembre + ottobre + novembre + dexembre + + + + + + + dom. + lun. + mät. + mäc. + zeu. + ven. + sab. + + + D + L + M + M + Z + V + S + + + dom. + lun. + mät. + mäc. + zeu. + ven. + sab. + + + domenega + lunesdì + mätesdì + mäcordì + zeuggia + venardì + sabbo + + + + + dom. + lun. + mät. + mäc. + zeu. + ven. + sab. + + + D + L + M + M + Z + V + S + + + dom. + lun. + mät. + mäc. + zeu. + ven. + sab. + + + domenega + lunesdì + mätesdì + mäcordì + zeuggia + venardì + sabbo + + + + + + + T1 + T2 + T3 + T4 + + + 1 + 2 + 3 + 4 + + + 1º trimestre + 2º trimestre + 3º trimestre + 4º trimestre + + + + + T1 + T2 + T3 + T4 + + + 1º trimestre + 2º trimestre + 3º trimestre + 4º trimestre + + + + + + + mëzaneutte + mëzogiorno + da mattin + do poidisnâ + da seia + da neutte + + + mëzaneutte + m. + mëzogiorno + p. + da mattin + do poidisnâ + da seia + da neutte + + + mëzaneutte + AM + mëzogiorno + PM + da mattin + do poidisnâ + da seia + da neutte + + + + + mëzaneutte + AM + mëzogiorno + PM + mattin + poidisnâ + seia + neutte + + + mëzaneutte + m. + mëzogiorno + p. + mattin + poidisnâ + seia + neutte + + + mëzaneutte + AM + mëzogiorno + PM + mattin + poidisnâ + seia + neutte + + + + + + avanti de Cristo + avanti de l’era commun + dòppo de Cristo + de l’era commun + + + aC + AEC + dC + EC + + + aC + AEC + dC + EC + + + + + + EEEE d MMMM 'do' y + + + + + d MMMM 'do' y + + + + + d MMM 'do' y + + + + + dd/MM/yy + + + + + + + HH:mm:ss zzzz + + + + + HH:mm:ss z + + + + + HH:mm:ss + + + + + HH:mm + + + + + + + {1}, {0} + + + {1} 'à' {0} + + + + + {1}, {0} + + + {1} 'à' {0} + + + + + {1}, {0} + + + {1}, {0} + + + + + {1} {0} + + + {1}, {0} + + + + E d + y G + d/M/y G + MMM y G + d MMM 'do' y G + E d MMM 'do' y G + h:mm:ss a v + HH:mm:ss v + h:mm a v + HH:mm v + d/M + E d/M + d MMM + E d MMM + d MMMM + W'ª' 'settemaña' MMMM + W'ª' 'settemaña' MMMM + M/y + d/M/y + E d/M/y + LLL 'do' y + d MMM 'do' y + E d MMM 'do' y + LLLL 'do' y + QQQ y + QQQQ 'do' y + w'ª' 'settemaña' 'do' Y + w'ª' 'settemaña' 'do' Y + + + + h B – h B + h–h B + + + h:mm B – h:mm B + h:mm–h:mm B + h:mm–h:mm B + + + d–d + + + y G – y G + y–y G + + + M/y G – M/y G + M/y – M/y G + M/y – M/y G + + + d/M/y – d/M/y G + d/M/y G – d/M/y G + d/M/y – d/M/y G + d/M/y – d/M/y G + + + E d/M/y – E d/M/y G + E d/M/y – E d/M/y G + E d/M/y – E d/M/y G + E d/M/y – E d/M/y G + + + LLL 'do' y G – LLL 'do' y G + LLL – LLL 'do' y G + LLL 'do' y – LLL 'do' y G + + + d–d MMM 'do' y G + d MMM 'do' y G – d MMM 'do' y G + d MMM – d MMM 'do' y G + d MMM 'do' y – d MMM 'do' y G + + + E d MMM – E d MMM 'do' y G + E, d MMM 'do' y G – E d MMM 'do' y G + E d MMM – E d MMM 'do' y G + E d MMM 'do' y – E d MMM 'do' y G + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + M–M + + + d/M – d/M + d/M – d/M + + + E d/M – E d/M + E d/M – E d/M + + + d–d MMM + d MMM – d MMM + + + E d – E d MMM + E d MMM – E d MMM + + + M/y – M/y + M/y – M/y + + + d/M/y – d/M/y + d/M/y – d/M/y + d/M/y – d/M/y + + + E d/M/y – E d/M/y + E d/M/y – E d/M/y + E d/M/y – E d/M/y + + + LLL–LLL 'do' y + LLL 'do' y – LLL 'do' y + + + d–d MMM 'do' y + d MMM – d MMM 'do' y + d MMM 'do' y – d MMM 'do' y + + + E d – E d MMM 'do' y + E d MMM – E d MMM 'do' y + E d MMM 'do' y – E d MMM 'do' y + + + LLLL–LLLL 'do' y + LLLL 'do' y – LLLL 'do' y + + + + + + + + era + + + era + + + era + + + anno + l’anno passou + st’anno + l’anno ch’o vëgne + + de chì à {0} anno + de chì à {0} anni + + + l’é {0} anno + l’é {0} anni + + + + anno + l’anno passou + st’anno + l’anno ch’o ven + + de chì à {0} anno + de chì à {0} anni + + + l’é {0} anno + l’é {0} anni + + + + a + anno passou + st’anno + anno ch’o ven + + +{0}a + +{0}a + + + -{0}a + -{0}a + + + + trimestre + o trimestre passou + sto trimestre + o trimestre ch’o vëgne + + de chì à {0} trimestre + de chì à {0} trimestri + + + l’é {0} trimestre + l’é {0} trimestri + + + + trim. + o trim. passou + sto trim. + o trim. ch’o ven + + de chì à {0} trim. + de chì à {0} trim. + + + l’é {0} trim. + l’é {0} trim. + + + + tr + tr. passou + sto tr. + tr. ch’o ven + + +{0}tr + +{0}tr + + + -{0}tr + -{0}tr + + + + meise + o meise passou + sto meise + o meise ch’o vëgne + + de chì à {0} meise + de chì à {0} meixi + + + l’é {0} meise + l’é {0} meixi + + + + meise + o meise passou + sto meise + o meise ch’o ven + + de chì à {0} meise + de chì à {0} meixi + + + l’é {0} meise + l’é {0} meixi + + + + meise + meise passou + sto meise + meise ch’o ven + + +{0}meise + +{0}meixi + + + -{0}meise + -{0}meixi + + + + settemaña + a settemaña passâ + sta settemaña + a settemaña ch’a vëgne + + de chì à {0} settemaña + de chì à {0} settemañe + + + l’é {0} settemaña + l’é {0} settemañe + + a settemaña de {0} + + + sett. + a sett. passâ + sta sett. + a sett. ch’a ven + + de chì à {0} sett. + de chì à {0} sett. + + + l’é {0} sett. + l’é {0} sett. + + a sett. de {0} + + + sett + sett passâ + sta sett + sett ch’a ven + + +{0}sett + +{0}sett + + + -{0}sett + -{0}sett + + sett de {0} + + + settemaña do meise + + + sett. do meise + + + sett do meise + + + giorno + vëi + ancheu + doman + + de chì à {0} giorno + de chì à {0} giorni + + + l’é {0} giorno + l’é {0} giorni + + + + giorno + vëi + ancheu + doman + + de chì à {0} giorno + de chì à {0} giorni + + + l’é {0} giorno + l’é {0} giorni + + + + g + vëi + ancheu + doman + + +{0}g + +{0}g + + + -{0}g + -{0}g + + + + giorno de l’anno + + + giorno de l’anno + + + g de l’anno + + + giorno da settemaña + + + giorno da sett. + + + g da sett + + + giorno do meise + + + giorno do meise + + + g do meise + + + domenega passâ + sta domenega + domenega ch’a vëgne + + de chì à {0} domenega + de chì à {0} domeneghe + + + l’é {0} domenega + l’é {0} domeneghe + + + + dom. passâ + sta dom. + dom. ch’a ven + + de chì à {0} dom. + de chì à {0} dom. + + + l’é {0} dom. + l’é {0} dom. + + + + dom. passâ + sta dom. + dom. ch’a ven + + de chì à {0} dom. + de chì à {0} dom. + + + l’é {0} dom. + l’é {0} dom. + + + + lunesdì passou + sto lunesdì + lunesdì ch’o vëgne + + de chì à {0} lunesdì + de chì à {0} lunesdì + + + l’é {0} lunesdì + l’é {0} lunesdì + + + + lun. passou + sto lun. + lun. ch’o ven + + de chì à {0} lun. + de chì à {0} lun. + + + l’é {0} lun. + l’é {0} lun. + + + + lun. passou + sto lun. + lun. ch’o ven + + de chì à {0} lun. + de chì à {0} lun. + + + l’é {0} lun. + l’é {0} lun. + + + + mätesdì passou + sto mätesdì + mätesdì ch’o vëgne + + de chì à {0} mätesdì + de chì à {0} mätesdì + + + l’é {0} mätesdì + l’é {0} mätesdì + + + + mät. passou + sto mät. + mät. ch’o ven + + de chì à {0} mät. + de chì à {0} mät. + + + l’é {0} mät. + l’é {0} mät. + + + + mät. passou + sto mät. + mät. ch’o ven + + de chì à {0} mät. + de chì à {0} mät. + + + l’é {0} mät. + l’é {0} mät. + + + + mäcordì passou + sto mäcordì + mäcordì ch’o vëgne + + de chì à {0} mäcordì + de chì à {0} mäcordì + + + l’é {0} mäcordì + l’é {0} mäcordì + + + + mäc. passou + sto mäc. + mäc. ch’o ven + + de chì à {0} mäc. + de chì à {0} mäc. + + + l’é {0} mäc. + l’é {0} mäc. + + + + mäc. passou + sto mäc. + mäc. ch’o ven + + de chì à {0} mäc. + de chì à {0} mäc. + + + l’é {0} mäc. + l’é {0} mäc. + + + + zeuggia passâ + sta zeuggia + zeuggia ch’a vëgne + + de chì à {0} zeuggia + de chì à {0} zeugge + + + l’é {0} zeuggia + l’é {0} zeugge + + + + zeu. passâ + sta zeu. + zeu. ch’a ven + + de chì à {0} zeu. + de chì à {0} zeu. + + + l’é {0} zeu. + l’é {0} zeu. + + + + zeu. passâ + sta zeu. + zeu. ch’a ven + + de chì à {0} zeu. + de chì à {0} zeu. + + + l’é {0} zeu. + l’é {0} zeu. + + + + venardì passou + sto venardì + venardì ch’o vëgne + + de chì à {0} venardì + de chì à {0} venardì + + + l’é {0} venardì + l’é {0} venardì + + + + ven. passou + sto ven. + ven. ch’o ven + + de chì à {0} ven. + de chì à {0} ven. + + + l’é {0} ven. + l’é {0} ven. + + + + ven. passou + sto ven. + ven. ch’o ven + + de chì à {0} ven. + de chì à {0} ven. + + + l’é {0} ven. + l’é {0} ven. + + + + sabbo passou + sto sabbo + sabbo ch’o vëgne + + de chì à {0} sabbo + de chì à {0} sabbi + + + l’é {0} sabbo + l’é {0} sabbi + + + + sab. passou + sto sab. + sab. ch’o ven + + de chì à {0} sab. + de chì à {0} sab. + + + l’é {0} sab. + l’é {0} sab. + + + + sab. passou + sto sab. + sab. ch’o ven + + de chì à {0} sab. + de chì à {0} sab. + + + l’é {0} sab. + l’é {0} sab. + + + + AM/PM + + + AM/PM + + + AM/PM + + + oa + st’oa chì + + de chì à {0} oa + de chì à {0} oe + + + l’é {0} oa + l’é {0} oe + + + + oa + st’oa chì + + de chì à {0} oa + de chì à {0} oe + + + l’é {0} oa + l’é {0} oe + + + + h + st’oa chì + + +{0}h + +{0}h + + + -{0}h + -{0}h + + + + menuto + sto menuto chì + + de chì à {0} menuto + de chì à {0} menuti + + + l’é {0} menuto + l’é {0} menuti + + + + men + sto men. chì + + de chì à {0} men + de chì à {0} men + + + l’é {0} men + l’é {0} men + + + + men + sto men. chì + + +{0}men + +{0}men + + + -{0}men + -{0}men + + + + segondo + oua + + de chì à {0} segondo + de chì à {0} segondi + + + l’é {0} segondo + l’é {0} segondi + + + + s + oua + + de chì à {0} s + de chì à {0} s + + + l’é {0} s + l’é {0} s + + + + s + oua + + +{0}s + +{0}s + + + -{0}s + -{0}s + + + + fuso oräio + + + fuso + + + fuso + + + + +HH:mm;−HH:mm + UTC{0} + UTC + oa: {0} + oa de stæ: {0} + oa standard: {0} + {1} ({0}) + + + tempo universale coordinou + + + + çittæ desconosciua + + + Andòrra + + + Tiraña + + + Vostok + + + Còrdova + + + Vienna + + + Bruxelles + + + Bahrein + + + San Bertomê + + + San Poulo + + + Zurigo + + + Pasqua + + + Bogotà + + + Còsta Rica + + + Cappo Verde + + + Curaçao + + + Praga + + + Argê + + + Canäie + + + Çéuta + + + Addis Abeba + + + Figi + + + Fær Øer + + + Pariggi + + + + oa de stæ britannica + + Londra + + + Granada + + + Caieña + + + Gibertâ + + + Guadaluppa + + + Atene + + + Geòrgia do sud + + + Zagabria + + + Giacarta + + + + oa de stæ d’Irlanda + + Doblin + + + Gerusalemme + + + Isoa de Man + + + Calcutta + + + Romma + + + Nairöbi + + + Comöre + + + San Cristoffa + + + Santa Luçia + + + Luxemburgo + + + Monego + + + Ulan Bator + + + Martinica + + + Maldive + + + Çittæ do Mescico + + + Nouméa + + + Òslo + + + Marcheixi + + + Varsavia + + + Azore + + + Lisboña + + + Réunion + + + Belgraddo + + + Mosca + + + Ekaterinburg + + + Cita + + + Stoccolma + + + Scingapô + + + Sant’Elena + + + Lubiaña + + + San Marin + + + Mogadiscio + + + Sao Tomé + + + Damasco + + + N’Djamena + + + Lomé + + + Tunexi + + + Samarcanda + + + Vatican + + + San Viçenso + + + Maiòtta + + + + oa de l’Afghanistan + + + + + oa de l’Africa do mezo + + + + + oa de l’Africa de levante + + + + + oa de l’Africa do meridion + + + + + oa de l’Africa de ponente + oa standard de l’Africa de ponente + oa de stæ de l’Africa de ponente + + + + + oa de l’Alaska + oa standard de l’Alaska + oa de stæ de l’Alaska + + + + + oa de l’Amassònia + oa standard de l’Amassònia + oa de stæ de l’Amassònia + + + + + oa do mezo nordamericaña + oa standard do mezo nordamericaña + oa de stæ do mezo nordamericaña + + + + + oa do levante nordamericaña + oa standard do levante nordamericaña + oa de stæ do levante nordamericaña + + + + + oa de Montagne Alliggiæ + oa standard de Montagne Alliggiæ + oa de stæ de Montagne Alliggiæ + + + + + oa do Paçifico nordamericaña + oa standard do Paçifico nordamericaña + oa de stæ do Paçifico nordamericaña + + + + + oa de Apia + oa standard de Apia + oa de stæ de Apia + + + + + oa de l’Arabia + oa standard de l’Arabia + oa de stæ de l’Arabia + + + + + oa de l’Argentiña + oa standard de l’Argentiña + oa de stæ de l’Argentiña + + + + + oa de l’Argentiña de ponente + oa standard de l’Argentiña de ponente + oa de stæ de l’Argentiña de ponente + + + + + oa de l’Ermenia + oa standard de l’Ermenia + oa de stæ de l’Ermenia + + + + + oa de l’Atlantico nordamericaña + oa standard de l’Atlantico nordamericaña + oa de stæ de l’Atlantico nordamericaña + + + + + oa de l’Australia de mezo + oa standard de l’Australia de mezo + oa de stæ de l’Australia de mezo + + + + + oa de l’Australia do mezo-ponente + oa standard de l’Australia do mezo-ponente + oa de stæ de l’Australia do mezo-ponente + + + + + oa de l’Australia de levante + oa standard de l’Australia de levante + oa de stæ de l’Australia de levante + + + + + oa de l’Australia de ponente + oa standard de l’Australia de ponente + oa de stæ de l’Australia de ponente + + + + + oa de l’Azerbaigian + oa standard de l’Azerbaigian + oa de stæ de l’Azerbaigian + + + + + oa de Azore + oa standard de Azore + oa de stæ de Azore + + + + + oa do Bangladesh + oa standard do Bangladesh + oa de stæ do Bangladesh + + + + + oa do Bhutan + + + + + oa da Bolivia + + + + + oa de Brasilia + oa standard de Brasilia + oa de stæ de Brasilia + + + + + oa do Brunei Darussalam + + + + + oa de Cappo Verde + oa standard de Cappo Verde + oa de stæ de Cappo Verde + + + + + oa de Chamorro + + + + + oa de Chatham + oa standard de Chatham + oa de stæ de Chatham + + + + + oa do Cile + oa standard do Cile + oa de stæ do Cile + + + + + oa da Ciña + oa standard da Ciña + oa de stæ da Ciña + + + + + oa de Choibalsan + oa standard de Choibalsan + oa de stæ de Choibalsan + + + + + oa de l’isoa Christmas + + + + + oa de isoe Cocos + + + + + oa da Colombia + oa standard da Colombia + oa de stæ da Colombia + + + + + oa de isoe Cook + oa standard de isoe Cook + oa de stæ de isoe Cook + + + + + oa de Cubba + oa standard de Cubba + oa de stæ de Cubba + + + + + oa de Davis + + + + + oa de Dumont-d’Urville + + + + + oa de Timor Est + + + + + oa de l’isoa de Pasqua + oa standard de l’isoa de Pasqua + oa de stæ de l’isoa de Pasqua + + + + + oa de l’Ecuador + + + + + oa de l’Euröpa do mezo + oa standard de l’Euröpa do mezo + oa de stæ de l’Euröpa do mezo + + + + + oa de l’Euröpa de levante + oa standard de l’Euröpa de levante + oa de stæ de l’Euröpa de levante + + + + + oa de Kaliningrad + + + + + oa de l’Euröpa de ponente + oa standard de l’Euröpa de ponente + oa de stæ de l’Euröpa de ponente + + + + + oa de isoe Malviñe + oa standard de isoe Malviñe + oa de stæ de isoe Malviñe + + + + + oa de Figi + oa standard de Figi + oa de stæ de Figi + + + + + oa da Guyana franseise + + + + + oa de Tære australe e antartiche franseixi + + + + + oa de Galapagos + + + + + oa de Gambier + + + + + oa da Geòrgia + oa standard da Geòrgia + oa de stæ da Geòrgia + + + + + oa de isoe Gilbert + + + + + oa do meridian de Greenwich + + + + + oa da Groenlandia de levante + oa standard da Groenlandia de levante + oa de stæ da Groenlandia de levante + + + + + oa da Groenlandia de ponente + oa standard da Groenlandia de ponente + oa de stæ da Groenlandia de ponente + + + + + oa standard do Gorfo + + + + + oa da Guyana + + + + + oa de Hawaii-Aleutiñe + oa standard de Hawaii-Aleutiñe + oa de stæ de Hawaii-Aleutiñe + + + + + oa de Hong Kong + oa standard de Hong Kong + oa de stæ de Hong Kong + + + + + oa de Hovd + oa standard de Hovd + oa de stæ de Hovd + + + + + oa de l’India + + + + + oa de l’Oçeano Indian + + + + + oa de l’Indociña + + + + + oa de l’Indonesia de mezo + + + + + oa de l’Indonesia de levante + + + + + oa de l’Indonesia de ponente + + + + + oa de l’Iran + oa standard de l’Iran + oa de stæ de l’Iran + + + + + oa de Irkutsk + oa standard de Irkutsk + oa de stæ de Irkutsk + + + + + oa d’Israele + oa standard d’Israele + oa de stæ d’Israele + + + + + oa do Giappon + oa standard do Giappon + oa de stæ do Giappon + + + + + oa do Kazakistan de levante + + + + + oa do Kazakistan de ponente + + + + + oa da Corea + oa standard da Corea + oa de stæ da Corea + + + + + oa do Kosrae + + + + + oa de Krasnoyarsk + oa standard de Krasnoyarsk + oa de stæ de Krasnoyarsk + + + + + oa do Kirghizistan + + + + + oa de isoe da Linia + + + + + oa de Lord Howe + oa standard de Lord Howe + oa de stæ de Lord Howe + + + + + oa de l’isoa Macquarie + + + + + oa de Magadan + oa standard de Magadan + oa de stæ de Magadan + + + + + oa da Malesia + + + + + oa de Maldive + + + + + oa de Marcheixi + + + + + oa de isoe Marshall + + + + + oa de Mauritius + oa standard de Mauritius + oa de stæ de Mauritius + + + + + oa de Mawson + + + + + oa do Mescico do nòrd-ponente + oa standard do Mescico do nòrd-ponente + oa de stæ do Mescico do nòrd-ponente + + + + + oa do Paçifico mescicaña + oa standard do Paçifico mescicaña + oa de stæ do Paçifico mescicaña + + + + + oa d’Ulan Bator + oa standard d’Ulan Bator + oa de stæ d’Ulan Bator + + + + + oa de Mosca + oa standard de Mosca + oa de stæ de Mosca + + + + + oa da Birmania + + + + + oa de Nauru + + + + + oa do Nepal + + + + + oa da Neuva Caledònia + oa standard da Neuva Caledònia + oa de stæ da Neuva Caledònia + + + + + oa da Neuva Zelanda + oa standard da Neuva Zelanda + oa de stæ da Neuva Zelanda + + + + + oa de Tæraneuva + oa standard de Tæraneuva + oa de stæ de Tæraneuva + + + + + oa de Niue + + + + + oa de l’isoa Norfolk + oa standard de l’isoa Norfolk + oa de stæ de l’isoa Norfolk + + + + + oa de Fernando de Noronha + oa standard de Fernando de Noronha + oa de stæ de Fernando de Noronha + + + + + oa de Novosibirsk + oa standard de Novosibirsk + oa de stæ de Novosibirsk + + + + + oa de Òmsk + oa standard de Òmsk + oa de stæ de Òmsk + + + + + oa do Pakistan + oa standard do Pakistan + oa de stæ do Pakistan + + + + + oa de Palau + + + + + oa da Papua Neuva Guinea + + + + + oa do Paraguay + oa standard do Paraguay + oa de stæ do Paraguay + + + + + oa do Perù + oa standard do Perù + oa de stæ do Perù + + + + + oa de Filipiñe + oa standard de Filipiñe + oa de stæ de Filipiñe + + + + + oa de isoe Phoenix + + + + + oa de San Pê e Miquelon + oa standard de San Pê e Miquelon + oa de stæ de San Pê e Miquelon + + + + + oa de Pitcairn + + + + + oa de Pohnpei + + + + + oa de Pyongyang + + + + + oa da Réunion + + + + + oa de Rothera + + + + + oa de Sakhalin + oa standard de Sakhalin + oa de stæ de Sakhalin + + + + + oa de Samoa + oa standard de Samoa + oa de stæ de Samoa + + + + + oa de Seychelles + + + + + oa de Scingapô + + + + + oa de isoe Solomon + + + + + oa da Geòrgia do sud + + + + + oa do Suriname + + + + + oa de Syowa + + + + + oa de Tahiti + + + + + oa de Taipei + oa standard de Taipei + oa de stæ de Taipei + + + + + oa do Tagikistan + + + + + oa de Tokelau + + + + + oa de Tonga + oa standard de Tonga + oa de stæ de Tonga + + + + + oa do Chuuk + + + + + oa do Turkmenistan + oa standard do Turkmenistan + oa de stæ do Turkmenistan + + + + + oa de Tuvalu + + + + + oa de l’Uruguay + oa standard de l’Uruguay + oa de stæ de l’Uruguay + + + + + oa de l’Uzbekistan + oa standard de l’Uzbekistan + oa de stæ de l’Uzbekistan + + + + + oa de Vanuatu + oa standard de Vanuatu + oa de stæ de Vanuatu + + + + + oa do Venessuela + + + + + oa de Vladivostok + oa standard de Vladivostok + oa de stæ de Vladivostok + + + + + oa de Volgograd + oa standard de Volgograd + oa de stæ de Volgograd + + + + + oa de Vostok + + + + + oa de l’isoa de Wake + + + + + oa de Wallis e Futuna + + + + + oa de Yakutsk + oa standard de Yakutsk + oa de stæ de Yakutsk + + + + + oa d’Ekaterinburg + oa standard d’Ekaterinburg + oa de stæ d’Ekaterinburg + + + + + oa do Yukon + + + + + + latn + + latn + + 1 + + , + . + % + + + - + E + × + + + NaN + : + + + + + mille + 0 mia + 00 mia + 00 mia + 000 mia + 000 mia + 0 mion + 0 mioin + 00 mioin + 00 mioin + 000 mioin + 000 mioin + 0 miliardo + 0 miliardi + 00 miliardi + 00 miliardi + 000 miliardi + 000 miliardi + mille miliardi + 0 mia miliardi + 00 mia miliardi + 00 mia miliardi + 000 mia miliardi + 000 mia miliardi + + + + + 0 k + 0 k + 00 k + 00 k + 000 k + 000 k + 0 Mio + 0 Mio + 00 Mio + 00 Mio + 000 Mio + 000 Mio + 0 Mld + 0 Mld + 00 Mld + 00 Mld + 000 Mld + 000 Mld + 0 Bio + 0 Bio + 00 Bio + 00 Bio + 000 Bio + 000 Bio + + + + + + + #,##0% + + + + + + + #,##0.00 ¤ + + + #,##0.00 ¤;(#,##0.00 ¤) + #,##0.00;(#,##0.00) + + + + + 0 k ¤ + 0 k ¤ + 00 k ¤ + 00 k ¤ + 000 k ¤ + 000 k ¤ + 0 Mio ¤ + 0 Mio ¤ + 00 Mio ¤ + 00 Mio ¤ + 000 Mio ¤ + 000 Mio ¤ + 0 Mld ¤ + 0 Mld ¤ + 00 Mld ¤ + 00 Mld ¤ + 000 Mld ¤ + 000 Mld ¤ + 0 Bio ¤ + 0 Bio ¤ + 00 Bio ¤ + 00 Bio ¤ + 000 Bio ¤ + 000 Bio ¤ + + + {0} {1} + {0} {1} + + + + dirham di Emirati Arabi Unii + dirham di EAU + dirham di EAU + + + afghani + afghani + afghani + + + lek arbaneise + lek arbaneise + lekë arbaneixi + + + dram ermeno + dram ermeno + dram ermeni + + + fiorin de Antille olandeixi + fiorin de Antille olandeixi + fiorin de Antille olandeixi + + + kwanza angolan + kwanza angolan + kwanza angolen + + + peso argentin + peso argentin + pesos argentin + + + dòllao australian + dòllao australian + dòllai australien + AUD + + + fiorin d’Aruba + fiorin d’Aruba + fiorin d’Aruba + + + manat azero + manat azero + manat azeri + + + marco convertibile da Bòsnia-Herzegòvina + marco convertibile da Bòsnia-Herzegòvina + marchi convertibili da Bòsnia-Herzegòvina + + + dòllao de Barbados + dòllao de Barbados + dòllai de Barbados + + + taka bengaleise + taka bengaleise + taka bengaleixi + + + lev burgao + lev burgao + leva burgai + + + dinar do Bahrein + dinar do Bahrein + dinar do Bahrein + + + franco do Burundi + franco do Burundi + franchi do Burundi + + + dòllao de Bermuda + dòllao de Bermuda + dòllai de Bermuda + + + dòllao do Brunei + dòllao do Brunei + dòllai do Brunei + + + bolivian + bolivian + bolivien + + + real brasilian + real brasilian + reais brasilien + BRL + + + dòllao de Bahamas + dòllao de Bahamas + dòllai de Bahamas + + + ngultrum bhutaneise + ngultrum bhutaneise + ngultrum bhutaneixi + + + pula do Botswana + pula do Botswana + pula do Botswana + + + rubo belaruscio + rubo belaruscio + rubi belarusci + + + dòllao do Belize + dòllao do Belize + dòllai do Belize + + + dòllao canadeise + dòllao canadeise + dòllai canadeixi + CAD + + + franco congoleise + franco congoleise + franchi congoleixi + + + franco svissero + franco svissero + franchi svisseri + + + peso cileno + peso cileno + pesos cileni + + + renmimbi cineise (offshore) + renmimbi cineise (offshore) + renmimbi cineixi (offshore) + + + renmimbi cineise + renmimbi cineise + renmimbi cineixi + CNY + + + peso colombian + peso colombian + pesos colombien + + + colón costarican + colón costarican + colones costarichen + + + peso cuban convertibile + peso cuban convertibile + pesos cuben convertibili + + + peso cuban + peso cuban + pesos cuben + + + escudo capoverdian + escudo capoverdian + escudos capoverdien + + + coroña ceca + coroña ceca + coroñe ceche + + + franco do Djibouti + franco do Djibouti + franchi do Djibouti + + + coroña daneise + coroña daneise + coroñe daneixi + + + peso dominican + peso dominican + pesos dominichen + + + dinar algerian + dinar algerian + dinar algerien + + + sterliña egiçiaña + sterliña egiçiaña + sterliñe egiçiañe + + + nafka eritreo + nafka eritreo + nafka eritrëi + + + birr etiope + birr etiope + birr etiopi + + + euro + euro + euro + + + dòllao de Figi + dòllao de Figi + dòllai de Figi + + + sterliña de Malviñe + sterliña de Malviñe + sterliñe de Malviñe + + + sterliña britannica + sterliña britannica + sterliñe britanniche + GBP + + + lari georgian + lari georgian + lari georgien + + + cedi ghaneise + cedi ghaneise + cedi ghaneixi + + + sterliña de Gibertâ + sterliña de Gibertâ + sterliñe de Gibertâ + + + dalasi gambian + dalasi gambian + dalasi gambien + + + franco da Guinea + franco da Guinea + franchi da Guinea + + + quetzal guatemalteco + quetzal guatemalteco + quetzal guatemaltechi + + + dòllao da Guyana + dòllao da Guyana + dòllai da Guyana + + + dòllao de Hong Kong + dòllao de Hong Kong + dòllai de Hong Kong + HKD + + + lempira honduregna + lempira honduregna + lempire honduregne + + + kuna croata + kuna croata + kune croate + + + gourde haitian + gourde haitian + gourde haitien + + + fiorin ongareise + fiorin ongareise + fiorin ongareixi + + + rupia indonesiaña + rupia indonesiaña + rupie indonesiañe + + + neuvo sciclo israelian + neuvo sciclo israelian + neuvi scicli israelian + + + rupia indiaña + rupia indiaña + rupie indiañe + INR + + + dinar irachen + dinar irachen + dinar irachen + + + rial iranian + rial iranian + rial iranien + + + coroña islandeise + coroña islandeise + coroñe islandeixi + + + dòllao giamaican + dòllao giamaican + dòllai giamaichen + + + dinar giordan + dinar giordan + dinar giorden + + + yie giapponeise + yien giapponeise + yien giapponeixi + JPY + + + scellin Kenyan + scellin Kenyan + scellin Kenyen + + + som kirghiso + som kirghiso + som kirghixi + + + riel cambogian + riel cambogian + riel cambogien + + + franco comorian + franco comorian + franchi comorien + + + won nordcorean + won nordcorean + won nordcoreen + + + won sudcorean + won sudcorean + won sudcoreen + KRW + + + dinar do Kuwait + dinar do Kuwait + dinar do Kuwait + + + dòllao de isoe Cayman + dòllao de isoe Cayman + dòllai de isoe Cayman + + + tenge kazako + tenge kazako + tenge kazaki + + + kip laotian + kip laotian + kip laotien + + + sterliña libaneise + sterliña libaneise + sterliña libaneixi + + + rupia do Sri Lanka + rupia do Sri Lanka + rupie do Sri Lanka + + + dòllao liberian + dòllao liberian + dòllai liberian + + + loti do Lesotho + loti do Lesotho + maloti do Lesotho + + + dinar libico + dinar libico + dinar libichi + + + dirham marocchin + dirham marocchin + dirham marocchin + + + leu moldavo + leu moldavo + lei moldavi + + + ariary malgascio + ariary malgascio + ariary malgasci + + + dinao maçedone + dinao maçedone + dinai maçedoni + + + kyat do Myanmar + kyat do Myanmar + kyat do Myanmar + + + tugrik mongolo + tugrik mongolo + tugrik mongoli + + + pataca de Macao + pataca de Macao + patacas de Macao + + + ouguiya da Mauritania + ouguiya da Mauritania + ouguiya da Mauritania + + + rupia mauriçiaña + rupia mauriçiaña + rupie mauriçiañe + + + rufiyaa de Maldive + rufiyaa de Maldive + rufiyaa de Maldive + + + kwacha malawian + kwacha malawian + kwacha malawien + + + peso mescican + peso mescican + pesos mescichen + MXN + + + ringgit maleise + ringgit maleise + ringgit maleixi + + + metical mozambican + metical mozambican + meticales mozambichen + + + dòllao namibian + dòllao namibian + dòllai namibien + + + naira nigeriaña + naira nigeriaña + naire nigeriañe + + + córdoba do Nicaragua + córdoba do Nicaragua + córdobas do Nicaragua + + + coroña norvegiña + coroña norvegiña + coroñe norvegiñe + + + rupia nepaleise + rupia nepaleise + rupie nepaleixi + + + dòllao neozelandeise + dòllao neozelandeise + dòllai neozelandeixi + NZD + + + rial de l’Oman + rial de l’Oman + rial de l’Oman + + + balboa de Panama + balboa de Panama + balboas de Panama + + + sol peruvian + sol peruvian + soles peruvien + + + kina papuaña + kina papuaña + kina papuañe + + + peso filippin + peso filippin + pesos filippin + PHP + + + rupia pakistaña + rupia pakistaña + rupie pakistañe + + + złoty polacco + złoty polacco + złoty polacchi + + + guaraní paraguayan + guaraní paraguayan + guaraníes paraguayen + + + rial do Qatar + rial do Qatar + rial do Qatar + + + leu romen + leu romen + lei romen + + + dinao serbo + dinao serbo + dinai serbi + + + rublo ruscio + rublo ruscio + rubli rusci + + + franco do Rwanda + franco do Rwanda + franchi do Rwanda + + + rial saudita + rial saudita + rial sauditi + + + dòllao de Isoe Salomon + dòllao de Isoe Salomon + dòllai de Isoe Salomon + + + rupia de Seychelles + rupia de Seychelles + rupie de Seychelles + + + sterliña sudaneise + sterliña sudaneise + sterliñe sudaneixi + + + coroña svedeise + coroña svedeise + coroñe svedeixi + + + dòllao de Scingapô + dòllao de Scingapô + dòllai de Scingapô + + + sterliña de Sant’Elena + sterliña de Sant’Elena + sterliñe de Sant’Elena + + + lion da Sierra Leone + lion da Sierra Leone + lion da Sierra Leone + + + scellin da Somalia + scellin da Somalia + scellin da Somalia + + + dòllao do Suriname + dòllao do Suriname + dòllai do Suriname + + + sterliña sud-sudaneise + sterliña sud-sudaneise + sterliñe sud-sudaneixi + + + dobra de Sao Tomé e Prinçipe + dobra de Sao Tomé e Prinçipe + dobras de Sao Tomé e Prinçipe + + + sterliña sciriaña + sterliña sciriaña + sterliñe sciriañe + + + lilangeni do Swaziland + lilangeni do Swaziland + emalangeni do Swaziland + + + baht tailandeise + baht tailandeise + baht tailandeixi + + + somoni tagiko + somoni tagiko + somoni tagiki + + + manat turkmeno + manat turkmeno + manat turkmeni + + + dinar tunexian + dinar tunexian + dinar tunexien + + + paʻanga tongan + paʻanga tongan + paʻanga tonghen + + + lia turca + lia turca + lie turche + + + dòllao de Trinidad e Tobago + dòllao de Trinidad e Tobago + dòllai de Trinidad e Tobago + + + neuvo dòllao taiwaneise + neuvo dòllao taiwaneise + neuvi dòllai taiwaneixi + TWD + + + scellin da Tanzania + scellin da Tanzania + scellin da Tanzania + + + grivnia ucraiña + grivnia ucraiña + grivnie ucraiñe + + + scellin de l’Uganda + scellin de l’Uganda + scellin de l’Uganda + + + dòllao di Stati Unii + dòllao di Stati Unii + dòllai di Stati Unii + USD + + + peso uruguayan + peso uruguayan + pesos uruguayen + + + som uzbeco + som uzbeco + som uzbechi + + + bolívar venessuelan + bolívar venessuelan + bolívares venessuelen + + + dong vietnamita + dong vietnamita + dong vietnamiti + VND + + + vatu de Vanuatu + vatu de Vanuatu + vatu de Vanuatu + + + tala samoan + tala samoan + tala samoen + + + franco CFA BEAC + franco CFA BEAC + franchi CFA BEAC + + + dòllao di Caraibi de levante + dòllao di Caraibi de levante + dòllai di Caraibi de levante + XCD + + + franco CFA BCEAO + franco CFA BCEAO + franchi CFA BCEAO + + + franco CFP + franco CFP + franchi CFP + + + monæa desconosciua + (monæa desconosciua) + (monæe desconosciue) + + + rial do Yemen + rial do Yemen + rial do Yemen + + + rand sudafrican + rand sudafrican + rand sudafrichen + + + kwacha zambian + kwacha zambian + kwacha zambien + + + + ~{0} + ≥{0} + ≤{0} + {0}-{1} + + + {0} giorno + {0} giorni + Piggia l’{0}ª in sciâ drita. + Piggia a {0}ª in sciâ drita. + + + + + + dexi{0} + + + çenti{0} + + + milli{0} + + + micro{0} + + + nano{0} + + + pico{0} + + + femto{0} + + + atto{0} + + + zepto{0} + + + yocto{0} + + + deca{0} + + + etto{0} + + + chillo{0} + + + mega{0} + + + giga{0} + + + tera{0} + + + peta{0} + + + exa{0} + + + zetta{0} + + + yotta{0} + + + kibi{0} + + + mebi{0} + + + gibi{0} + + + tebi{0} + + + pebi{0} + + + exbi{0} + + + zebi{0} + + + yobi{0} + + + {0} pe {1} + + + {0} quaddro + {0} quaddri + + + {0} cubbo + {0} cubbi + + + {0}-{1} + + + fòrsa g + {0} fòrsa g + {0} fòrse g + + + metri a-o segondo quaddro + {0} metro a-o segondo quaddro + {0} metri a-o segondo quaddro + + + revoluçioin + {0} revoluçion + {0} revoluçioin + + + radianti + {0} radiante + {0} radianti + + + graddi + {0} graddo + {0} graddi + + + primmi d’erco + {0} primmo d’erco + {0} primmi d’erco + + + segondi d’erco + {0} segondo d’erco + {0} segondi d’erco + + + chillòmetri quaddri + {0} chillòmetro quaddro + {0} chillòmetri quaddri + {0} pe chillòmetro quaddro + + + ettari + {0} ettaro + {0} ha + + + metri quaddri + {0} metro quaddro + {0} metri quaddri + {0} pe metro quaddro + + + çentimetri quaddri + {0} çentimetro quaddro + {0} çentimetri quaddri + {0} pe çentimetro quaddro + + + miggia quaddre + {0} miggio quaddro + {0} miggia quaddre + {0} pe miggio quaddro + + + acri + {0} acro + {0} acri + + + iarde quaddre + {0} iarda quaddra + {0} yd² + + + pê quaddri + {0} pê quaddro + {0} pê quaddri + + + pòlliçi quaddri + {0} pòlliçe quaddro + {0} pòlliçi quaddri + {0} pe pòlliçe quaddro + + + dunam + {0} dunam + {0} dunam + + + caratti + {0} caratto + {0} caratti + + + milligrammi pe deçilitro + {0} milligrammo pe deçilitro + {0} milligrammi pe deçilitro + + + millimöle pe litro + {0} millimöle pe litro + {0} millimöle pe litro + + + elemento + {0} elemento + {0} elementi + + + parte pe mion + {0} parte pe mion + {0} parte pe mion + + + pe çento + {0} pe çento + {0} pe çento + + + pe mille + {0} pe mille + {0} pe mille + + + pe dexemia + {0} pe dexemia + {0} pe dexemia + + + möle + {0} möle + {0} möle + + + litri pe chillòmetri + {0} litro pe chillòmetri + {0} litri pe chillòmetri + + + litri pe 100 chillòmetri + {0} litro pe 100 chillòmetri + {0} litri pe 100 chillòmetri + + + miggia pe gallon + {0} miggio pe gallon + {0} miggia pe gallon + + + miggia pe gallon imperiale + {0} miggio pe gallon imperiale + {0} miggia pe gallon imperiale + + + petabyte + {0} petabyte + {0} petabyte + + + terabyte + {0} terabyte + {0} terabyte + + + terabit + {0} terabit + {0} terabit + + + gigabyte + {0} gigabyte + {0} gigabyte + + + gigabit + {0} gigabit + {0} gigabit + + + megabyte + {0} megabyte + {0} megabyte + + + megabit + {0} megabit + {0} megabit + + + chillobyte + {0} chillobyte + {0} chillobyte + + + chillobit + {0} chillobit + {0} chillobit + + + byte + {0} byte + {0} byte + + + bit + {0} bit + {0} bit + + + secoli + {0} secolo + {0} secoli + + + dëxennio + {0} dëxennio + {0} dëxenni + + + anni + {0} anno + {0} anni + {0} à l’anno + + + trimestri + {0} trimestre + {0} trimestri + {0} pe trimestre + + + meixi + {0} meise + {0} meixi + {0} a-o meise + + + settemañe + {0} settemaña + {0} settemañe + {0} a-a settemaña + + + giorni + {0} giorno + {0} giorni + {0} a-o giorno + + + oe + {0} oa + {0} oe + {0} à l’oa + + + menuti + {0} menuto + {0} menuti + {0} a-o menuto + + + segondi + {0} segondo + {0} segondi + {0} a-o segondo + + + millisegondi + {0} millisegondo + {0} millisegondi + + + microsegondi + {0} microsegondo + {0} microsegondi + + + nanosegondi + {0} nanosegondo + {0} nanosegondi + + + ampère + {0} ampère + {0} ampère + + + milliampère + {0} milliampère + {0} milliampère + + + ohm + {0} ohm + {0} ohm + + + vòlt + {0} vòlt + {0} vòlt + + + chillocalorie + {0} chillocaloria + {0} chillocalorie + + + calorie + {0} caloria + {0} calorie + + + chillojoule + {0} chillojoule + {0} chillojoule + + + J + {0} joule + {0} joule + + + chillowatt-oe + {0} chillowatt-oa + {0} chillowatt-oe + + + elettronvòlt + {0} elettronvòlt + {0} elettronvòlt + + + unitæ termiche britanniche + {0} unitæ termica britannica + {0} unitæ termiche britanniche + + + therm US + {0} therm US + {0} therm US + + + lie-fòrsa + {0} lia-fòrsa + {0} lie-fòrsa + + + newton + {0} newton + {0} newton + + + chillowatt-oe pe 100 chillòmetri + {0} chillowatt-oa pe 100 chillòmetri + {0} chillowatt-oe pe 100 chillòmetri + + + gigahertz + {0} gigahertz + {0} gigahertz + + + megahertz + {0} megahertz + {0} megahertz + + + chillohertz + {0} chillohertz + {0} chillohertz + + + hertz + {0} hertz + {0} hertz + + + emme tipografica + {0} em + {0} em + + + pixel + {0} px + {0} px + + + megapixel + {0} megapixel + {0} megapixel + + + pixel per çentimetro + {0} pixel per çentimetro + {0} pixel per çentimetro + + + pixel pe pòlliçe + {0} pixel pe pòlliçe + {0} pixel pe pòlliçe + + + raggi da Tæra + {0} raggio da Tæra + {0} raggi da Tæra + + + chillòmetri + {0} chillòmetro + {0} chillòmetri + {0} pe chillòmetro + + + metri + {0} metro + {0} metri + {0} pe metro + + + deximetri + {0} deximetro + {0} deximetri + + + çentimetri + {0} çentimetro + {0} çentimetri + {0} pe çentimetro + + + millimetri + {0} millimetro + {0} millimetri + + + micrometri + {0} micrometro + {0} micrometri + + + nanometri + {0} nanometro + {0} nanometri + + + picometri + {0} picometro + {0} picometri + + + miggia + {0} miggio + {0} miggia + + + iarde + {0} iarda + {0} iarde + + + + {0} pê + {0} pê + {0} pe pê + + + pòlliçi + {0} pòlliçe + {0} pòlliçi + {0} pe pòlliçe + + + parsec + {0} parsec + {0} parsec + + + anni luxe + {0} anno luxe + {0} anni luxe + + + unitæ astronòmiche + {0} unitæ astronòmica + {0} unitæ astronòmiche + + + furlong + {0} furlong + {0} furlong + + + brasse + {0} brasso + {0} brasse + + + miggia de navegaçion + {0} miggio de navegaçion + {0} miggia de navegaçion + + + miggia scandinave + {0} miggio scandinavo + {0} miggia scandinave + + + ponti tipografichi + {0} ponto tipografico + {0} ponti tipografichi + + + raggi do Sô + {0} raggio do Sô + {0} raggi do Sô + + + lux + {0} lux + {0} lux + + + candeie + {0} candeia + {0} candeie + + + lumen + {0} lumen + {0} lumen + + + luminoxitæ do Sô + {0} luminoxitæ do Sô + {0} luminoxitæ do Sô + + + tonnëi metrichi + {0} tonneo metrico + {0} tonnëi metrichi + + + chillogrammi + {0} chillogrammo + {0} chillogrammi + {0} pe chillogrammo + + + grammi + {0} grammo + {0} grammi + {0} pe grammo + + + milligrammi + {0} milligrammo + {0} milligrammi + + + microgrammi + {0} microgrammo + {0} microgrammi + + + tonnëi curti + {0} tonneo curto + {0} tonnëi curti + + + stone + {0} stone + {0} stone + + + lie + {0} lia + {0} lie + {0} pe lia + + + onse + {0} onsa + {0} onse + {0} pe onsa + + + onse troy + {0} onsa troy + {0} onse troy + + + caratti + {0} caratto + {0} caratti + + + dalton + {0} dalton + {0} dalton + + + masse da Tæra + {0} massa da Tæra + {0} masse da Tæra + + + masse do Sô + {0} massa do Sô + {0} masse do Sô + + + grañe + {0} graña + {0} grañe + + + gigawatt + {0} gigawatt + {0} gigawatt + + + megawatt + {0} megawatt + {0} megawatt + + + chillowatt + {0} chillowatt + {0} chillowatt + + + watt + {0} watt + {0} watt + + + milliwatt + {0} milliwatt + {0} milliwatt + + + cavalli vapô + {0} cavallo vapô + {0} cavalli vapô + + + millimetri de mercuio + {0} millimetro de mercuio + {0} millimetri de mercuio + + + lie-fòrsa pe pòlliçe quaddro + {0} lia-fòrsa pe pòlliçe quaddro + {0} lie-fòrsa pe pòlliçe quaddro + + + pòlliçi de mercuio + {0} pòlliçe de mercuio + {0} pòlliçi de mercuio + + + bar + {0} bar + {0} bar + + + millibar + {0} millibar + {0} millibar + + + atmosfere + {0} atmosfera + {0} atmosfere + + + pascal + {0} pascal + {0} pascal + + + ettopascal + {0} ettopascal + {0} ettopascal + + + chillopascal + {0} chillopascal + {0} chillopascal + + + megapascal + {0} megapascal + {0} megapascal + + + chillòmetri à l’oa + {0} chillòmetro à l’oa + {0} chillòmetri à l’oa + + + metri a-o segondo + {0} metro a-o segondo + {0} metri a-o segondo + + + miggia à l’oa + {0} miggio à l’oa + {0} miggia à l’oa + + + nödi + {0} nödo + {0} nödi + + + graddi + {0} graddo + {0} graddi + + + graddi Celsius + {0} graddo Celsius + {0} graddi Celsius + + + graddi Fahrenheit + {0} graddo Fahrenheit + {0} graddi Fahrenheit + + + kelvin + {0} kelvin + {0} kelvin + + + lie-fòrsa-pê + {0} lia-fòrsa-pê + {0} lie-fòrsa-pê + + + newton-metri + {0} newton-metro + {0} newton-metri + + + chillòmetri cubbi + {0} chillòmetro cubbo + {0} chillòmetri cubbi + + + metri cubbi + {0} metro cubbo + {0} metri cubbi + {0} pe metro cubbo + + + çentimetri cubbi + {0} çentimetro cubbo + {0} çentimetri cubbi + {0} pe çentimetro cubbo + + + miggia cubbe + {0} miggio cubbo + {0} miggia cubbe + + + iarde cubbe + {0} iarda cubba + {0} iarde cubbe + + + pê cubbi + {0} pê cubbo + {0} pê cubbi + + + pòlliçi cubbi + {0} pòlliçe cubbo + {0} pòlliçi cubbi + + + megalitri + {0} megalitro + {0} megalitri + + + ettòlitri + {0} ettòlitro + {0} ettòlitri + + + litri + {0} litro + {0} litri + {0} pe litro + + + dexilitri + {0} dexilitro + {0} dexilitri + + + çentilitri + {0} çentilitro + {0} çentilitri + + + millilitri + {0} millilitro + {0} millilitri + + + pinte metriche + {0} pinta metrica + {0} pinte metriche + + + tasse metriche + {0} tassa metrica + {0} tasse metriche + + + acri-pê + {0} acro-pê + {0} acri-pê + + + stæ + {0} stâ + {0} stæ + + + galloin + {0} gallon + {0} galloin + {0} pe gallon + + + galloin imperiali + {0} gallon imperiale + {0} galloin imperiali + {0} pe gallon imperiale + + + quarti + {0} quarto + {0} quarti + + + pinte + {0} pinta + {0} pinte + + + tassa + {0} tassa + {0} tasse + + + onse liquide + {0} onsa liquida + {0} onse liquide + + + onse liquide imperiale + {0} onsa liquida imperiale + {0} onse liquide imperiale + + + cuggiæ + {0} cuggiâ + {0} cuggiæ + + + cuggiæn + {0} cuggiæn + {0} cuggiæn + + + barî + {0} barî + {0} barî + + + cuggiæn da cafè + {0} cuggiæn da cafè + {0} cuggiæn da cafè + + + cuggiæn da cafè imperiali + {0} cuggiæn da cafè imperiale + {0} cuggiæn da cafè imperiali + + + stisse + {0} stissa + {0} stisse + + + dramme liquide + {0} dramma liquida + {0} dramme liquide + + + jigger + {0} jigger + {0} jigger + + + spellinsegæ + {0} spellinsegâ + {0} spellinsegæ + + + quarto imperiale + {0} quarto imperiale + {0} quarti imperiali + + + ponto cardinâ + {0} est + {0} nòrd + {0} sud + {0} òvest + + + + + {0}/{1} + + + {0}² + {0}² + + + {0}³ + {0}³ + + + {0}⋅{1} + + + fòrsa g + {0} G + {0} G + + + {0} m/s² + {0} m/s² + + + {0} rev + {0} rev + + + {0} rad + {0} rad + + + ° + {0}° + {0}° + + + + {0}′ + {0}′ + + + + {0}″ + {0}″ + + + {0} km² + {0} km² + {0}/km² + + + ha + {0} ha + {0} ha + + + {0} m² + {0} m² + + + {0} cm² + {0} cm² + + + {0} mi² + {0} mi² + + + ac + {0} ac + {0} ac + + + {0} yd² + {0} yd² + + + {0} ft² + {0} ft² + + + {0} in² + {0} in² + + + {0} dunam + {0} dunam + + + {0} kt + {0} kt + + + mg/dl + {0} mg/dl + {0} mg/dl + + + mmol/l + {0} mmol/l + {0} mmol/l + + + elem. + {0} elem. + {0} elem. + + + {0} ppm + {0} ppm + + + {0}% + {0}% + + + {0}‰ + {0}‰ + + + {0}‱ + {0}‱ + + + {0} mol + {0} mol + + + l/km + {0} l/km + {0} l/km + + + l/100km + {0} l/100km + {0} l/100km + + + mpg + {0} mpg + {0} mpg + + + mpg imp. + {0} mpg imp. + {0} mpg imp. + + + {0} PB + {0} PB + + + {0} TB + {0} TB + + + {0} Tb + {0} Tb + + + {0} GB + {0} GB + + + {0} Gb + {0} Gb + + + {0} MB + {0} MB + + + {0} Mb + {0} Mb + + + {0} kB + {0} kB + + + {0} kb + {0} kb + + + B + {0} B + {0} B + + + {0} bit + {0} bit + + + sec. + {0} sec. + {0} sec. + + + dëx. + {0} dëx. + {0} dëx. + + + anni + {0} anno + {0} anni + {0}/anno + + + trim. + {0} trim. + {0} trim. + {0}/trim. + + + meixi + {0} meise + {0} meixi + {0}/meise + + + sett. + {0} sett. + {0} sett. + {0}/sett. + + + g + {0} g + {0} g + {0}/g + + + oe + {0} oa + {0} oe + {0}/h + + + men + {0} men. + {0} men. + {0}/men + + + s + {0} s + {0} s + {0}/s + + + ms + {0} ms + {0} ms + + + μs + {0} μs + {0} μs + + + ns + {0} ns + {0} ns + + + A + {0} A + {0} A + + + {0} mA + {0} mA + + + Ω + {0} Ω + {0} Ω + + + V + {0} V + {0} V + + + {0} kcal + {0} kcal + + + {0} cal + {0} cal + + + {0} kJ + {0} kJ + + + J + {0} J + {0} J + + + {0} kWh + {0} kWh + + + {0} eV + {0} eV + + + BTU + {0} BTU + {0} BTU + + + thm US + {0} thm US + {0} thm US + + + {0} lbf + {0} lbf + + + {0} N + {0} N + + + {0} kWh/100km + {0} kWh/100km + + + {0} GHz + {0} GHz + + + {0} MHz + {0} MHz + + + {0} kHz + {0} kHz + + + {0} Hz + {0} Hz + + + {0} em + {0} em + + + pixel + {0} px + {0} px + + + megapixel + {0} Mpx + {0} Mpx + + + px/cm + {0} px/cm + {0} px/cm + + + px/in + {0} px/in + {0} px/in + + + {0} R⊕ + {0} R⊕ + + + {0} km + {0} km + + + m + {0} m + {0} m + + + {0} dm + {0} dm + + + {0} cm + {0} cm + + + {0} mm + {0} mm + + + {0} μm + {0} μm + + + {0} nm + {0} nm + + + {0} pm + {0} pm + + + {0} mi + {0} mi + + + {0} yd + {0} yd + + + {0} ft + {0} ft + + + {0} in + {0} in + + + {0} pc + {0} pc + + + {0} ly + {0} ly + + + {0} au + {0} au + + + {0} fur + {0} fur + + + {0} fth + {0} fth + + + {0} nmi + {0} nmi + + + {0} smi + {0} smi + + + {0} pt + {0} pt + + + {0} R☉ + {0} R☉ + + + {0} lx + {0} lx + + + {0} cd + {0} cd + + + {0} lm + {0} lm + + + {0} L☉ + {0} L☉ + + + {0} t + {0} t + + + {0} kg + {0} kg + + + g + {0} g + {0} g + + + {0} mg + {0} mg + + + {0} μg + {0} μg + + + {0} tn + {0} tn + + + {0} st + {0} st + + + {0} lb + {0} lb + + + {0} oz + {0} oz + + + {0} oz t + {0} oz t + + + ct + {0} ct + {0} ct + + + {0} Da + {0} Da + + + {0} M⊕ + {0} M⊕ + + + {0} M☉ + {0} M☉ + + + grañe + {0} graña + {0} grañe + + + {0} GW + {0} GW + + + {0} MW + {0} MW + + + {0} kW + {0} kW + + + W + {0} W + {0} W + + + {0} mW + {0} mW + + + {0} hp + {0} hp + + + mmHg + {0} mmHg + {0} mmHg + + + {0} psi + {0} psi + + + {0} inHg + {0} inHg + + + {0} bar + {0} bar + + + {0} mbar + {0} mbar + + + {0} atm + {0} atm + + + {0} Pa + {0} Pa + + + {0} hPa + {0} hPa + + + {0} kPa + {0} kPa + + + {0} MPa + {0} MPa + + + {0} km/h + {0} km/h + + + {0} m/s + {0} m/s + + + {0} mi/h + {0} mi/h + + + {0} kn + {0} kn + + + {0}° + {0}° + + + {0}°C + {0}°C + + + {0}°F + {0}°F + + + {0} K + {0} K + + + {0} lbf⋅ft + {0} lbf⋅ft + + + {0} N⋅m + {0} N⋅m + + + {0} km³ + {0} km³ + + + {0} m³ + {0} m³ + + + {0} cm³ + {0} cm³ + + + {0} mi³ + {0} mi³ + + + {0} yd³ + {0} yd³ + + + {0} ft³ + {0} ft³ + + + {0} in³ + {0} in³ + + + Ml + {0} Ml + {0} Ml + + + hl + {0} hL + {0} hL + + + l + {0} l + {0} l + {0}/l + + + dl + {0} dl + {0} dl + + + cl + {0} cl + {0} cl + + + ml + {0} ml + {0} ml + + + {0} mpt + {0} mpt + + + mc + {0} mc + {0} mc + + + {0} ac ft + {0} ac ft + + + {0} bu + {0} bu + + + gal + {0} gal + {0} gal + {0}/gal + + + gal imp. + {0} gal imp. + {0} gal imp. + {0}/gal imp. + + + qt + {0} qt + {0} qt + + + pt + {0} pt + {0} pt + + + c + {0} c + {0} c + + + fl oz + {0} fl oz + {0} fl oz + + + fl oz imp. + {0} fl oz imp. + {0} fl oz imp. + + + {0} tbsp + {0} tbsp + + + {0} tsp + {0} tsp + + + {0} bbl + {0} bbl + + + {0} dstspn + {0} dstspn + + + dstspn imp. + {0} dstspn imp. + {0} dstspn imp. + + + stisse + {0} stissa + {0} stisse + + + dramme liq. + {0} dramma liq. + {0} dramme liq. + + + {0} jigger + {0} jigger + + + spellinsegæ + {0} spellinsegâ + {0} spellinsegæ + + + qt imp. + {0} qt imp. + {0} qt imp. + + + ponto + {0} E + {0} N + {0} S + {0} Ò + + + + + d{0} + + + c{0} + + + m{0} + + + μ{0} + + + n{0} + + + p{0} + + + f{0} + + + a{0} + + + z{0} + + + y{0} + + + da{0} + + + h{0} + + + k{0} + + + M{0} + + + G{0} + + + T{0} + + + P{0} + + + E{0} + + + Z{0} + + + Y{0} + + + Ki{0} + + + Mi{0} + + + Gi{0} + + + Ti{0} + + + Pi{0} + + + Ei{0} + + + Zi{0} + + + Yi{0} + + + {0}/{1} + + + {0}² + {0}² + + + {0}³ + {0}³ + + + {0}⋅{1} + + + fòrsa g + {0} G + {0}G + + + m/s² + {0}m/s² + {0}m/s² + + + rev + {0}rev + {0}rev + + + rad + {0}rad + {0}rad + + + ° + {0}° + {0}° + + + + {0}′ + {0}′ + + + + {0}″ + {0}″ + + + km² + {0}km² + {0}km² + {0}/km² + + + ha + {0}ha + {0}ha + + + + {0}m² + {0}m² + {0}/m² + + + cm² + {0}cm² + {0}cm² + {0}/cm² + + + mi² + {0}mi² + {0}mi² + {0}/mi² + + + ac + {0}ac + {0}ac + + + yd² + {0}yd² + {0}yd² + + + ft² + {0}ft² + {0}ft² + + + in² + {0}in² + {0}in² + {0}/in² + + + dunam + {0}dunam + {0}dunam + + + kt + {0}kt + {0}kt + + + mg/dl + {0} mg/dl + {0} mg/dl + + + mmol/l + {0} mmol/l + {0} mmol/l + + + elem + {0}elem + {0}elem + + + ppm + {0}ppm + {0}ppm + + + % + {0}% + {0}% + + + + {0}‰ + {0}‰ + + + + {0}‱ + {0}‱ + + + mol + {0}mol + {0}mol + + + l/km + {0}l/km + {0}l/km + + + l/100km + {0}l/100km + {0}l/100km + + + mpg + {0}mpg + {0}mpg + + + mpg im + {0}mpg im + {0}mpg im + + + PB + {0}PB + {0}PB + + + TB + {0}TB + {0}TB + + + Tb + {0}Tb + {0}Tb + + + GB + {0}GB + {0}GB + + + Gb + {0}Gb + {0}Gb + + + MB + {0}MB + {0}MB + + + Mb + {0}Mb + {0}Mb + + + kB + {0}kB + {0}kB + + + kb + {0}kb + {0}kb + + + B + {0}B + {0}B + + + bit + {0}bit + {0}bit + + + sec + {0}sec + {0}sec + + + dëx + {0}dëx + {0}dëx + + + a + {0}a + {0}a + {0}/a + + + tr + {0}tr + {0}tr + {0}/tr + + + meixi + {0}meise + {0}meixi + {0}/meise + + + sett + {0}sett + {0}sett + {0}/sett + + + g + {0}g + {0}g + {0}/g + + + h + {0}h + {0}h + {0}/h + + + men + {0}men + {0}men + {0}/men + + + s + {0}s + {0}s + {0}/s + + + ms + {0}ms + {0}ms + + + μs + {0}μs + {0}μs + + + ns + {0}ns + {0}ns + + + A + {0}A + {0}A + + + mA + {0}mA + {0}mA + + + Ω + {0}Ω + {0}Ω + + + V + {0}V + {0}V + + + kcal + {0}kcal + {0}kcal + + + cal + {0}cal + {0}cal + + + kJ + {0}kJ + {0}kJ + + + J + {0}J + {0}J + + + kWh + {0}kWh + {0}kWh + + + eV + {0}eV + {0}eV + + + BTU + {0}BTU + {0}BTU + + + thm US + {0}thm US + {0}thm US + + + lbf + {0}lbf + {0}lbf + + + N + {0}N + {0}N + + + kWh/100km + {0}kWh/100km + {0}kWh/100km + + + GHz + {0}GHz + {0}GHz + + + MHz + {0}MHz + {0}MHz + + + kHz + {0}kHz + {0}kHz + + + Hz + {0}Hz + {0}Hz + + + em + {0}em + {0}em + + + px + {0}px + {0}px + + + MP + {0}Mpx + {0}Mpx + + + px/cm + {0}px/cm + {0}px/cm + + + px/in + {0} px/in + {0} px/in + + + R⊕ + {0}R⊕ + {0}R⊕ + + + km + {0}km + {0}km + {0}/km + + + m + {0}m + {0}m + {0}/m + + + dm + {0}dm + {0}dm + + + cm + {0}cm + {0}cm + {0}/cm + + + mm + {0}mm + {0}mm + + + μm + {0}μm + {0}μm + + + nm + {0}nm + {0}nm + + + pm + {0}pm + {0}pm + + + mi + {0}mi + {0}mi + + + yd + {0}yd + {0}yd + + + ft + {0}ft + {0}ft + {0}/ft + + + in + {0}in + {0}in + {0}/in + + + pc + {0}pc + {0}pc + + + ly + {0}ly + {0}ly + + + au + {0}au + {0}au + + + fur + {0}fur + {0}fur + + + fm + {0}fth + {0}fth + + + nmi + {0}nmi + {0}nmi + + + smi + {0}smi + {0}smi + + + pt + {0}pt + {0}pt + + + R☉ + {0}R☉ + {0}R☉ + + + lx + {0}lx + {0}lx + + + cd + {0}cd + {0}cd + + + lm + {0}lm + {0}lm + + + L☉ + {0}L☉ + {0}L☉ + + + t + {0}t + {0}t + + + kg + {0}kg + {0}kg + {0}/kg + + + g + {0}g + {0}g + {0}/g + + + mg + {0}mg + {0}mg + + + μg + {0}μg + {0}μg + + + tn + {0}tn + {0}tn + + + st + {0}st + {0}st + + + lb + {0}lb + {0}lb + {0}/lb + + + oz + {0}oz + {0}oz + {0}/oz + + + oz t + {0}oz t + {0}oz t + + + ct + {0}ct + {0}ct + + + Da + {0}Da + {0}Da + + + M⊕ + {0}M⊕ + {0}M⊕ + + + M☉ + {0}M☉ + {0}M☉ + + + grañe + {0}graña + {0}grañe + + + GW + {0}GW + {0}GW + + + MW + {0}MW + {0}MW + + + kW + {0}kW + {0}kW + + + W + {0}W + {0}W + + + mW + {0}mW + {0}mW + + + hp + {0}hp + {0}hp + + + mmHg + {0}mmHg + {0}mmHg + + + psi + {0}psi + {0}psi + + + inHg + {0}inHg + {0}inHg + + + bar + {0}bar + {0}bar + + + mbar + {0}mbar + {0}mbar + + + atm + {0}atm + {0}atm + + + Pa + {0}Pa + {0}Pa + + + hPa + {0}hPa + {0}hPa + + + kPa + {0}kPa + {0}kPa + + + MPa + {0}MPa + {0}MPa + + + km/h + {0}km/h + {0}km/h + + + m/s + {0}m/s + {0}m/s + + + mi/h + {0}mi/h + {0}mi/h + + + kn + {0}kn + {0}kn + + + ° + {0}° + {0}° + + + °C + {0}°C + {0}°C + + + °F + {0}°F + {0}°F + + + K + {0}K + {0}K + + + lbf⋅ft + {0}lbf⋅ft + {0}lbf⋅ft + + + N⋅m + {0}N⋅m + {0}N⋅m + + + km³ + {0}km³ + {0}km³ + + + + {0}m³ + {0}m³ + {0}/m³ + + + cm³ + {0}cm³ + {0}cm³ + {0}/cm³ + + + mi³ + {0}mi³ + {0}mi³ + + + yd³ + {0}yd³ + {0}yd³ + + + ft³ + {0}ft³ + {0}ft³ + + + in³ + {0}in³ + {0}in³ + + + Ml + {0}Ml + {0}Ml + + + hl + {0}hl + {0}hl + + + l + {0}l + {0}l + {0}/l + + + dl + {0}dl + {0}dl + + + cl + {0}cl + {0}cl + + + ml + {0}ml + {0}ml + + + mpt + {0}mpt + {0}mpt + + + mc + {0}mc + {0}mc + + + acft + {0}ac ft + {0}ac ft + + + bu + {0}bu + {0}bu + + + gal + {0}gal + {0}gal + {0}/gal + + + galim + {0}galim + {0}galim + {0}/galim + + + qt + {0}qt + {0}qt + + + pt + {0}pt + {0}pt + + + c + {0}c + {0}c + + + fl oz + {0}fl oz + {0}fl oz + + + fl oz im + {0}fl oz im + {0}fl oz im + + + tbsp + {0}tbsp + {0}tbsp + + + tsp + {0}tsp + {0}tsp + + + bbl + {0}bbl + {0}bbl + + + dsp + {0}dsp + {0}dsp + + + dsp im + {0}dsp im + {0}dsp im + + + st + {0}st + {0}st + + + dr liq + {0}dr liq + {0}dr liq + + + jigger + {0}jigger + {0}jigger + + + sp + {0}sp + {0}sp + + + qt im + {0}qt im + {0}qt im + + + ponto + {0}E + {0}N + {0}S + {0}Ò + + + + h:mm + + + h:mm:ss + + + m:ss + + + + + {0} e {1} + {0} e {1} + + + {0} ò {1} + {0} ò {1} + + + {0}, {1} + {0}, {1} + {0} ò {1} + {0} ò {1} + + + {0}, {1} + {0}, {1} + {0} ò {1} + {0} ò {1} + + + {0}, {1} + {0}, {1} + {0} e {1} + {0} e {1} + + + {0}, {1} + {0}, {1} + {0} e {1} + {0} e {1} + + + {0}, {1} + {0}, {1} + {0} e {1} + {0} e {1} + + + {0} {1} + {0} {1} + {0} {1} + {0} {1} + + + {0}, {1} + {0}, {1} + {0} e {1} + {0} e {1} + + + + {0} — tutto + {0} — compatibilitæ + {0} — çerciou + {0} — esteiso + {0} — stòrico + {0} — vario + {0} — atro + scistemi de scrittua — {0} + {0} træto + {0} træti + {0} a-o pê + {0} in testa + attivitæ + scrittue africañe + scrittue americañe + bestie + bestie e natua + frecce + còrpo + caratteri riga + scrittua Braille + casamenti + ballin e stelle + jamo consonantichi + scimboli de monæa + trætin ò connettoî + giffre + pittogrammi + scimboli de divinaçion + frecce in zu + frecce in sciù e in zu + scrittue de l’Asia de levante + emoji + scrittue europee + femenin + bandea + bandee + mangiâ e beive + formato + formato e spaçiatua + variante à larghessa piña + forme geometriche + variante à meza larghessa + caratteri han + radicali han + hanja + caratteri cineixi semplificæ + caratteri cineixi tradiçionali + cheu + scrittue stòriche + caratteri ideografichi de descriçion + kana giapponeixi + kanbun + kanji + tasto + frecce a-a manciña + frecce a-a manciña e a-a drita + scimboli alfabetichi + feua de deuvia + mascolin + scimboli matematichi + scrittue do Levante + varri + scrittue moderne + modificatoî + scimboli muxicali + nauta + sensa spaçiatua + numeri + oggetti + atro + in cobbia + persoñe + alfabeto fonetico + pittogrammi + pòsti + ciante + pontezzatua + frecce a-a drita + segni e scimboli + variante picciñe + morin + morin e persoñe + scrittue de l’Asia do meridion + scrittue de l’Asia do sud-levante + con spaçio + spòrt + scimboli + scimboli tecnichi + açenti tonali + viægi + viægi e pòsti + frecce in sciù + variante + jamo vocalichi + tempo + scrittue de l’Asia de ponente + spaçiatua + + + corscivo + dimenscion òttica + inclinaçion + larghessa + peiso + corscivo + didascalia + testo + titolo + intestaçion + manifesto + corscivo inversou + drito + cegou + extracegou + ultrastreito + ultrastreito + ultrastreito + extrastreito + extrastreito + extrastreito + streito + streito + streito + semistreito + semistreito + semistreito + normale + semilargo + semilargo + semilargo + largo + largo + largo + extralargo + extralargo + extralargo + ultralargo + ultralargo + ultralargo + ultrafin + extrafin + extrafin + fin + semifin + libbro + regolâ + medio + semidruo + semidruo + druo + extradruo + scuo + scuo + extrascuo + extrascuo + extrascuo + fraçioin verticale + spaçiatua de lettie gròsse + ligatue facoltative + fraçioin diagonale + giffre alliniæ + giffre vegio stilo + ordinale + giffre à spaçiatua proporçionale + lettie gròsse picciñe + giffre à spaçiatua regolâ + zero barrou + + + und lij + + {title} {given} {given2} {surname} + + + {given-informal} {surname} + + + {title} {surname} + + + {given-informal} + + + {given-monogram-allCaps}{given2-monogram-allCaps}{surname-monogram-allCaps} + + + {given-informal-monogram-allCaps}{surname-monogram-allCaps} + + + {given} {given2-initial} {surname} + + + {given-informal} {surname} + + + {title} {surname} + + + {given-informal} + + + {given-monogram-allCaps}{given2-monogram-allCaps}{surname-monogram-allCaps} + + + {given-informal-monogram-allCaps}{surname-monogram-allCaps} + + + {given-initial} {given2-initial} {surname} + + + {given-informal} {surname-initial} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {surname} {given} {given2} + + + {surname} {given-informal} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps}{given-monogram-allCaps}{given2-monogram-allCaps} + + + {surname-monogram-allCaps}{given-informal-monogram-allCaps} + + + {surname} {given} {given2-initial} + + + {surname} {given-informal} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps}{given-monogram-allCaps} + + + {surname-monogram-allCaps}{given-informal-monogram-allCaps} + + + {surname} {given-initial} {given2-initial} + + + {surname} {given-informal-initial} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {surname-core}, {title} {given} {given2} {surname-prefix} + + + {surname-core}, {given-informal} {surname-prefix} + + + {surname-core}, {given} {given2-initial} {surname-prefix} + + + {surname-core}, {given-informal} {surname-prefix} + + + {surname-core}, {given-initial} {given2-initial} {surname-prefix} + + + {surname-core}, {given-informal-initial} {surname-prefix} + + + Zane + + + Luçia + Cangiaxo + + + Françesco + Maria + Parödi + + + Dott.a Prof.a + Maria Giöxeppiña + Maiòllo + Reusa Texa + De + Franchi + Carcagno Baçigalô + OMRI + + + diff --git a/make/data/cldr/common/main/lij_IT.xml b/make/data/cldr/common/main/lij_IT.xml new file mode 100644 index 00000000000..0c7b1575aa9 --- /dev/null +++ b/make/data/cldr/common/main/lij_IT.xml @@ -0,0 +1,18 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/lkt.xml b/make/data/cldr/common/main/lkt.xml index 860cd504f1c..1faeb9e2729 100644 --- a/make/data/cldr/common/main/lkt.xml +++ b/make/data/cldr/common/main/lkt.xml @@ -1,6 +1,6 @@ - + + + + + + + + ingles + Lombard + + + + + + Italia + + + metregh + + + + [a b c d e f g h i j k l m n o p q r s t u v w x y z] + [á à ă â å ä ã ā æ ç é è ĕ ê ë ē í ì ĭ î ï ī ñ ó ò ŏ ô ö ø ō œ ú ù ŭ û ü ū ÿ] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‑ — , ; \: ! ? . … ' ’ " “ ” « » ( ) \[ \] \{ \} @ /] + + + + + + + + sginer + fevrer + marz + avril + masg + sgiugn + luj + avost + setember + otover + november + dicember + + + + + + + domenega + lundì + mardì + mercoldì + sgiovedì + venerdì + sabet + + + + + + + del matin + de sira + + + + + + d MMM y + + + + + + + + Temp Medi de Greenwich + + + + + + + , + + + + + + Elisa + + + Pinina + + + Matilda Bossa + + + March Brambilla + + + Peder + + + Caterina + + + Luisa + + + Luisa Verda + + + diff --git a/make/data/cldr/common/main/lmo_IT.xml b/make/data/cldr/common/main/lmo_IT.xml new file mode 100644 index 00000000000..ecc417479a4 --- /dev/null +++ b/make/data/cldr/common/main/lmo_IT.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ln.xml b/make/data/cldr/common/main/ln.xml index 44a306fd68b..e648d6cbb3c 100644 --- a/make/data/cldr/common/main/ln.xml +++ b/make/data/cldr/common/main/ln.xml @@ -1,6 +1,6 @@ - + + + + + + + + + + + + + + + + + ᠪᠷᠠᠽᠢᠯ + ᠬᠢᠳᠠᠳ + ᠭᠧᠷᠮᠠᠨ + ᠫᠷᠠᠨ᠋᠋ᠼᠠ + ᠶᠡᠺᠡ ᠪᠷᠢᠲ᠋ᠠᠨᠢ + ᠡᠨᠡᠳᠬᠡᠭ᠌ + ᠢᠲ᠋ᠠᠯᠢ + ᠶᠠᠫᠣᠨ + ᠮᠣᠩᠭᠣᠯ + ᠣᠷᠣᠰ + ᠠᠮᠸᠷᠢᠻᠠ ᠎ᠢᠢᠨ ᠨᠢᠭᠡᠳᠥᠭᠰᠡᠠ ᠡᠣᠯᠣᠰ + ᠳᠣᠳᠣᠷᠬᠠᠢ ᠥᠭᠡᠢ ᠪᠥᠰᠠ + + + ᠭᠸᠷᠸᠭᠣᠷᠢ ᠢᠨ ᠬᠣᠸᠠᠩᠯᠢ + ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠡᠷᠡᠮᠪᠡᠯᠡᠬᠥ ᠳᠠᠷᠠᠭᠠᠯᠠᠯ + ᠠᠷᠠᠪ ᠲᠣᠭ᠎ᠠ + ᠮᠣᠩᠭᠣᠯ ᠲᠣᠭ᠎ᠠ + + + ᠮᠧᠲ᠋ᠷ ᠦᠨ + ᠢ᠂ ᠪ + ᠠ᠂ ᠨ᠂ ᠣ + + + ᠺᠡᠯᠠ᠄ {0} + ᠪᠢᠴᠢᠭ᠌: {0} + ᠮᠣᠵᠢ᠄ {0} + + + + + + + + + y ᠤᠨ ᠣ MM ᠰᠠᠷ ᠠ ᠢᠢᠨ dd + yMMdd + + + + + y ᠣᠨ ᠎ᠤ MM ᠰᠠᠷ᠎ᠠ ᠎ᠢᠢᠨ dd + yMMdd + + + + + y MM d + yMMd + + + + + y-MM-dd + yMMdd + + + + + + {0} - {1} + + + + + + + + 1 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 2 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 3᠊ᠷ ᠰᠠᠷ᠎ᠠ + 4 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 5 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 6 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 7 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 8᠊ᠷ ᠰᠠᠷ᠎ᠠ + 9 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 10 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 11 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 12 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + + + I + II + III + IV + V + VI + VII + VIII + IX + X + XI + XII + + + ᠨᠢᠭᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ + ᠬᠣᠶᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ + ᠭᠣᠷᠪᠡᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ + ᠳᠥᠷᠪᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ + ᠲᠠᠪᠣᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ + ᠵᠢᠷᠭᠣᠭᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ + ᠲᠣᠯᠣᠭᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ + ᠨᠠᠢᠮᠠᠳᠥᠭᠠᠷ ᠰᠠᠷ᠎ᠠ + ᠶᠢᠰᠥᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ + ᠠᠷᠪᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ + ᠠᠷᠪᠠᠨ ᠨᠢᠭᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ + ᠠᠷᠪᠠᠨ ᠬᠣᠶᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ + + + + + 1 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 2 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 3᠊ᠷ ᠰᠠᠷ᠎ᠠ + 4 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 5 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 6 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 7 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 8 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 9 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 10 ᠊ᠷ ᠰᠠᠷ᠎ᠠ + 11᠊ᠷ ᠰᠠᠷ᠎ᠠ + 12᠊ᠷ ᠰᠠᠷ᠎ᠠ + + + I + II + III + IV + V + VI + VII + VIII + IX + X + XI + XII + + + ᠳᠥᠷᠪᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ + + + + + + + ᠨᠢ + ᠲᠠ + ᠮᠢᠭ + ᡀᠠ + ᠫᠥᠷ + ᠪᠠ + ᠪᠢᠮ + + + ᠨᠢ + ᠳᠠ + ᠮᠢᠭ + ᡀᠠ + ᠫᠥᠷ + ᠪᠠ + ᠪᠢ + + + ᠨᠢᠮ᠎ᠠ + ᠳᠠᠸᠠ + ᠮᠢᠠᠠᠮᠠᠷ + ᡀᠠᠭᠪᠠ + ᠫᠦᠷᠪᠦ + ᠪᠠᠰᠠᠩ + ᠪᠢᠮᠪᠠ + + + + + ᠨᠢ + ᠳᠠ + ᠮᠢᠭ + ᡀᠠ + ᠫᠦᠷ + ᠪᠠ + ᠪᠢᠮ + + + ᠨᠢ + ᠳᠠ + ᠮᠢᠭ + ᡀᠠ + ᠫᠥᠷ + ᠪᠠ + ᠪᠢᠮ + + + ᠨᠢᠮ᠎ᠠ + ᠳᠠᠸᠠ + ᠮᠢᠠᠠᠮᠠᠷ + ᡀᠠᠭᠪᠠ + ᠫᠦᠷᠪᠦ + ᠪᠠᠰᠠᠩ + ᠪᠢᠮᠪᠠ + + + + + + + 1 ᠣᠯᠠᠷᠢᠯ + 2 ᠣᠯᠠᠷᠢᠯ + 3 ᠣᠯᠠᠷᠢᠯ + 4 ᠣᠯᠠᠷᠢᠯ + + + I + II + III + IV + + + 1 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + 2 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + 3 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + 4 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + + + + + I + II + III + IV + + + I + II + III + IV + + + 1 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + 2 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + 3 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + 4 ᠊ᠷ ᠣᠯᠠᠷᠢᠯ + + + + + + + ᠦ᠂ ᠥ + ᠦ᠂ ᠬᠣ + + + + + + ᠮ᠂ ᠡᠡ᠂ ᠦ + ᠨ᠂ ᠲ᠂ ᠥ + ᠮ᠂ ᠡ + ᠨ᠂ ᠲ + + + + + + y ᠣᠨ ᠎᠎᠎ᠤ MMMM᠎᠎ᠢᠢᠨd. EEEE ᠋ᠭᠠᠷᠠᠭ + yMMMMEEEEd + + + + + y ᠋ᠣᠨ ᠤMMMM᠎᠎ ᠤᠩ d + yMMMMd + + + + + y.MM.dd + yMMdd + + + + + y.MM.dd + yMMdd + + + + + + + HH:mm:ss (zzzz) + HHmmsszzzz + + + + + HH:mm:ss (z) + HHmmssz + + + + + + + + ᠡᠷᠢᠨ + + + ᠵᠢᠯ + + + ᠵᠢᠯ + + + ᠵᠢᠯ + + + ᠣᠯᠠᠷᠢᠯ + + + ᠣᠯᠠᠷᠢᠯ + + + ᠣᠯᠠᠷᠢᠯ + + + ᠰᠠᠷ ᠠ + + + ᠰᠠᠷ ᠠ + + + ᠰᠡᠷ ᠠ + + + ᠳᠣᠯᠣᠭ᠎ᠠ ᠬᠣᠨᠣᠭ + + + ᠳᠣᠯᠣᠭ᠎ᠠ ᠬᠣᠨᠣᠭ + + + 7 ᠬᠣᠨᠣᠭ + + + ᠡᠳᠥᠷ + ᠥᠴᠥᠬᠡᠳᠥᠷ + ᠥᠨᠥᠳᠥᠷ + ᠮᠠᠷᠭᠠᠰᠢ + + + ᠡᠳᠥᠷ + + + ᠡᠳᠥᠷ + + + ᠭᠠᠷᠠᠭ + + + ᠥᠳᠡ ᠡᠴᠠ ᠡᠮᠥᠨ᠎ᠠ / ᠥᠳᠡ ᠡᠴᠡ ᠬᠣᠢᠢᠰᠢ + + + ᠴᠠᠭ + + + + + + + + + ᠮᠢᠨᠥ᠋ᠲ᠋ᠠ + + + ᠮᠢᠨ + + + ᠮᠢᠨ + + + ᠰᠸᠻᠥ᠋ᠨ᠋ᠳᠡ + + + ᠰᠧᠻ + + + ᠰᠧᠻ + + + ᠴᠠᠭ ᠎ᠤᠨ ᠪᠥᠰᠡ + + + + GMT {0} + {0} ᠴᠠᠭ + {0} ᠵᠣᠨ ᠎᠎᠎ᠤ ᠴᠠᠭ + {0} ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + + + ᠣᠯᠠᠨ ᠣᠯᠣᠰ ᠤᠨ ᠵᠣᠬᠢᠴᠡᠭᠣᠯᠣᠯᠳᠠᠳᠠᠢ ᠴᠠᠭ + + + + ᠥᠯᠥ ᠮᠡᠳᠡᠭᠳᠡᠬᠥ ᠬᠣᠳᠠ + + + ᠬᠣᠪᠳᠣ + + + ᠣᠯᠠᠭᠠᠨᠪᠠᠭᠠᠳᠣᠷ + + + ᠴᠥᠢᠪᠠᠯᠰᠨᠩ + + + + ᠲᠥᠪ ᠴᠠᠭ + ᠳᠥᠪ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠲᠥᠪ ᠵᠣᠨ ᠎᠎᠎ᠤ ᠴᠠᠭ + + + + + ᠵᠡᠭᠥᠨ ᠡᠷᠭᠡ ᠎ᠢᠢᠨ ᠴᠠᠭ + ᠵᠡᠭᠥᠨ ᠡᠷᠭᠡ ᠎ᠢᠢᠨ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠵᠡᠭᠥᠨ ᠡᠷᠭᠡ ᠎ᠢᠢᠨ ᠵᠣᠨ ᠎᠎᠎ᠤ ᠴᠠᠭ + + + + + ᠠᠭᠣᠯᠠ ᠎᠎᠎᠎ᠢᠢᠨ ᠴᠠᠭ + ᠠᠭᠣᠯᠠ ᠎᠎᠎᠎ᠢᠢᠨ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠠᠭᠣᠯᠠ ᠎ᠢᠢᠨ ᠵᠣᠨ ᠎᠎ᠤ ᠴᠠᠭ + + + + + ᠨᠣᠮᠣᠬᠠᠨ ᠳᠠᠯᠠᠢ ᠎ᠢᠢᠨ ᠴᠠᠭ + ᠨᠣᠮᠣᠬᠠᠨ ᠳᠠᠯᠠᠢ ᠎᠎ᠢᠢᠨ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠨᠣᠮᠣᠬᠠᠨ ᠳᠠᠯᠠᠢ ᠎ᠢᠢᠨ ᠵᠣᠨ ᠎᠎᠎ᠪ ᠴᠠᠭ + + + + + ᠠᠲ᠋ᠯᠠᠨ᠋ᠲ᠋ ᠎ᠤᠨ ᠴᠠᠭ + ᠠᠲ᠋ᠯᠠᠨ᠋ᠲ ᠎ᠤᠨ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠠᠲ᠋ᠯᠠᠨ᠋ᠲ ᠎ᠤᠨ ᠵᠣᠨ ᠎ᠪ ᠴᠠᠭ + + + + + ᠲᠥᠪ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠴᠠᠭ + ᠲᠥᠪ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠲᠥᠪ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠵᠣᠨ ᠎᠎ᠤ ᠴᠠᠭ + + + + + ᠵᠡᠭᠦᠨ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠴᠠᠭ + ᠵᠡᠭᠦᠨ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠵᠡᠭᠦᠨ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠵᠣᠨ ᠎᠎ᠤ ᠴᠠᠭ + + + + + ᠪᠠᠷᠠᠭᠣᠨ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠴᠠᠭ + ᠪᠠᠷᠠᠭᠣᠨ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠰᠲ᠋ᠠᠨ᠋ᠳᠠᠷᠳ᠋ ᠴᠠᠭ + ᠪᠠᠷᠠᠭᠣᠨ ᠡᠸᠣᠢᠷᠤᠫᠠ ᠢᠢᠨ ᠵᠣᠨ ᠎᠎ᠤ ᠴᠠᠭ + + + + + ᠭᠷᠢᠨ᠋ᠸᠢᠴᠢ ᠢᠢᠨ ᠴᠠᠭ + + + + + + + , + + + + + ¤#,##0.00 + + + + + + ᠪᠷᠠᠽᠢᠯ ᠤᠨ ᠷᠧᠠᠯ + ᠪᠷᠠᠽᠢᠯ ᠤᠨ ᠷᠧᠠᠯ + ᠪᠷᠠᠽᠢᠯ ᠤᠨ ᠷᠧᠠᠯ + + + ᠬᠢᠲᠠᠳ ᠶᠤᠸᠠᠨ + ᠬᠢᠲᠠᠳ ᠶᠤᠸᠠᠨ + ᠬᠢᠲᠠᠳ ᠶᠤᠸᠠᠨ + + + ᠶᠧᠸᠷᠣ + ᠶᠧᠸᠷᠣ + ᠶᠧᠸᠷᠣ + + + ᠪᠷᠢᠲ᠋ᠠᠨᠢ ᠢᠢᠨ ᠫᠤᠢᠨᠳ᠋ + ᠪᠷᠢᠲ᠋ᠠᠨᠢ ᠢᠢᠨ ᠫᠤᠢᠨᠳ᠋ + ᠪᠷᠢᠲ᠋ᠠᠨᠢ ᠢᠢᠨ ᠫᠤᠢᠨᠳ᠋ + + + ᠡᠨᠡᠳᠬᠡᠭ᠌ ᠷᠦᠫᠢ + ᠡᠨᠡᠳᠬᠡᠭ᠌ ᠷᠦᠫᠢ + ᠡᠨᠡᠳᠬᠡᠭ᠌ ᠷᠦᠫᠢ + + + ᠶᠠᠫᠣᠨ ᠧᠨ + ᠶᠠᠫᠣᠨ ᠧᠨ + ᠶᠠᠫᠣᠨ ᠧᠨ + + + ᠳᠥᠬᠥᠷᠢᠭ᠌ + ᠳᠥᠬᠥᠷᠢᠭ᠌ + ᠳᠥᠬᠥᠷᠢᠭ᠌ + + + + ᠣᠷᠥᠰ ᠷᠥᠪᠯᠢ + ᠣᠷᠥᠰ ᠷᠥᠪᠯᠢ + ᠣᠷᠥᠰ ᠷᠥᠪᠯᠢ + + + ᠠᠮᠸᠷᠢᠻᠠ ᠳ᠋ᠣᠯᠯᠠᠷ + ᠠᠮᠸᠷᠢᠻᠠ ᠳ᠋ᠣᠯᠯᠠᠷ + ᠠᠮᠸᠷᠢᠻᠠ ᠳ᠋ᠣᠯᠯᠠᠷ + + + ᠲᠣᠳᠣᠷᠬᠠᠢ ᠥᠭᠡᠢ ᠮᠥᠩᠭᠥᠨ ᠲᠡᠮᠳᠡᠭᠳᠥ + ᠲᠣᠳᠣᠷᠬᠠᠢ ᠥᠭᠡᠢ ᠮᠥᠩᠭᠥᠨ ᠲᠡᠮᠳᠡᠭᠳᠥ ᠢᠢᠨ ᠨᠢᠭᠡᠴᠡ + (ᠲᠣᠳᠣᠷᠬᠠᠢ ᠥᠭᠡᠢ ᠮᠥᠩᠭᠥᠨ ᠲᠡᠮᠳᠡᠭᠳᠥ) + + + + + + hh:mm + + + hh:mm:ss + + + + + ᠲᠡᠢᠢᠮᠣ᠄ ᠲ + ᠥᠬᠡᠢ᠄ ᠥ + + + diff --git a/make/data/cldr/common/main/mni.xml b/make/data/cldr/common/main/mni.xml index 8555dae9549..548762807a4 100644 --- a/make/data/cldr/common/main/mni.xml +++ b/make/data/cldr/common/main/mni.xml @@ -1,6 +1,6 @@ - + + + + + + + + + + [꯬ ꯀ ꯁ ꯂ ꯃ ꯄ ꯅ ꯆ ꯇ ꯈ ꯉ ꯊ ꯋ ꯌ ꯍ ꯎ ꯏ ꯐ ꯑ ꯒ ꯓ ꯔ ꯕ ꯖ ꯗ ꯘ ꯙ ꯚ ꯣ ꯤ \uABE5 ꯦ ꯧ \uABE8 ꯩ ꯪ ꯛ ꯜ ꯝ ꯞ ꯟ ꯠ ꯡ ꯢ \uABED] + [\- ‑ , . % + 0꯰ 1꯱ 2꯲ 3꯳ 4꯴ 5꯵ 6꯶ 7꯷ 8꯸ 9꯹] + [\- ‑ , ; \: ! ? . … ꯫ ' " ( ) \[ \] @ * / \& #] + + + + + + + + EEEE, d MMMM, y + yMMMMEEEEd + + + + + d MMMM, y + yMMMMd + + + + + dd-MM-y + yMMdd + + + + + d-M-y + yMd + + + + + + + h.mm.ss a zzzz + ahmmsszzzz + + + + + h.mm.ss a z + ahmmssz + + + + + h.mm.ss a + ahmmss + + + + + h.mm. a + ahmm + + + + + + + + mtei + + diff --git a/make/data/cldr/common/main/mni_Mtei_IN.xml b/make/data/cldr/common/main/mni_Mtei_IN.xml new file mode 100644 index 00000000000..40c5a7b54c6 --- /dev/null +++ b/make/data/cldr/common/main/mni_Mtei_IN.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + + + + + + + + + + + دنيا + اسيا تيمور + اسيا سلاتن + اسيا تڠݢارا + اسيا + اسيا تڠه + اسيا بارات + اميريک لاتين + بروني + البرازيل + چينا + جرمان + ڤرنچيس + إندونيسيا + اينديا + إيطاليا + جڤون + مليسيا + عرب سعودي + سيڠاڤورا + تايلان + تايوان + اميريک شريکت + ولايه تيدق دکتاهوءي + + + کومبڠ + ماتواڠ + نو + زون وقتو + + + کومبڠ بودا + کومبڠ چينا + کومبڠ کبڠساٴن اينديا + کومبڠ اسلام + کومبڠ سيۏيل اسلام + کومبڠ جڤون + کومبڠ ڤرسي + اتورن ايسيه قاموس + اتورن ايسيه بوکو تيليفون + اوروتن ايسيه فونيتيک + اتورن ايسيه ڤمبهاروان + چارين توجوان عموم + اتورن ايسيه تراديسيونل + اتورن ايسيه چوريتن راديکل + اڠک کأواڠن + اڠک ڤرڤولوهن چينا + اڠک چينا ريڠکس + اڠک کأواڠن چينا ريڠکس + اڠک چينا تراديسيونل + اڠک کأواڠن چينا تراديسيونل + اڠک جڤون + اڠک کأواڠن جڤون + ديݢيت بارات + ديݢيت مالايالم + ديݢيت اصل + اڠک تاميل + ديݢيت تاميل + اڠک تراديسيونل + + + ميتريک + + + + + right-to-left + + + + [ء آ أ ؤ إ ئ ا ب ة ت ث ج چ ح خ د ذ ر ز س ش ص ض ط ظ ع غ ڠ ف ڤ ق ك ک ݢ ل م ن ڽ ه و ۏ ى ي] + [ڬ ۑ] + {0}… + …{0} + {0}…{1} + ؟ + + + + + + + + + + + + + + EEEE، d MMMM y G + GyMMMMEEEEd + + + + + d MMMM y G + GyMMMMd + + + + + dd/MM/y G + GyMMdd + + + + + d/MM/y G + GyMMd + + + + + + E, d + d/M + E، d/M + d MMM + E، d MMM + d MMMM + M/y G + E، d/M/y G + MMM y G + d MMM y G + E، d MMM y G + + + + + + + + EEEE، U MMMM dd + UMMMMEEEEdd + + + + + U MMMM d + UMMMMd + + + + + U MMM d + UMMMd + + + + + y-M-d + yMd + + + + + + + + + EEEE، d MMMM y G + GyMMMMEEEEd + + + + + d MMMM y G + GyMMMMd + + + + + dd/MM/y G + GyMMdd + + + + + d/MM/yy GGGGG + GGGGGyyMMd + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + d + d E + h a + HH + h:mm a + HH:mm + H:mm + h:mm:ss a + HH:mm:ss + L + d-M + E، d-M + dd/MM + LLL + d MMM + E، d MMM + d MMMM + mm:ss + y + M-y + d/M/y + E، d/M/y + MMM y + d MMM y + E، d MMM y + QQQ y + QQQQ y + + + {0} – {1} + + d–d + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + M–M + + + d/M – d/M + d/M – d/M + + + E، d/M – E، d/M + E، d/M – E، d/M + + + MMM–MMM + + + d–d MMM + d MMM – d MMM + + + E، d MMM – E، d MMM + E، d MMM – E، d MMM + + + y–y + + + M/y – M/y + M/y – M/y + + + d/M/y – d/M/y + d/M/y – d/M/y + d/M/y – d/M/y + + + E، d/M/y – E، d/M/y + E، d/M/y – E، d/M/y + E، d/M/y – E، d/M/y + + + MMM–MMM y + MMM y – MMM y + + + d–d MMM y + d MMM – d MMM، y + d MMM y – d MMM y + + + E، d MMM – E، d MMM، y + E، d MMM – E، d MMM، y + E، d MMM y – E، d MMM y + + + MMMM–MMMM y + MMMM y – MMMM y + + + + + + + + + جانواري + فيبواري + مچ + اڤريل + مي + جون + جولاي + ݢوس + سيڤتيمبر + اوکتوبر + نوۏيمبر + ديسيمبر + + + + + + + احد + اثنين + ثلاث + رابو + خميس + جمعة + سبتو + + + + + + + سوکو 1 + سوکو ک-2 + سوکو ک-3 + سوکو ک-4 + + + سوکو ڤرتام + سوکو ک-2 + سوکو ک-3 + سوکو ک-4 + + + + + سوکو 1 + سوکو ک-2 + سوکو ک-3 + سوکو ک-4 + + + سوکو ڤرتام + سوکو ک-2 + سوکو ک-3 + سوکو ک-4 + + + + + + + EEEE، d MMMM y + yMMMMEEEEd + + + + + d MMMM y + yMMMMd + + + + + dd/MM/y + yMMdd + + + + + d/MM/yy + yyMMd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + d + d E + h a + HH + h:mm a + HH:mm + H:mm + h:mm:ss a + HH:mm:ss + L + d-M + E، d-M + dd/MM + LLL + d MMM + E، d MMM + d MMMM + mm:ss + y + M-y + d/M/y + E، d/M/y + MMM y + d MMM y + E، d MMM y + QQQ y + QQQQ y + + + {0} – {1} + + d–d + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + M–M + + + d/M – d/M + d/M – d/M + + + E، d/M – E، d/M + E، d/M – E، d/M + + + MMM–MMM + + + d–d MMM + d MMM – d MMM + + + E، d MMM – E، d MMM + E، d MMM – E، d MMM + + + y–y + + + M/y – M/y + M/y – M/y + + + d/M/y – d/M/y + d/M/y – d/M/y + d/M/y – d/M/y + + + E، d/M/y – E، d/M/y + E، d/M/y – E، d/M/y + E، d/M/y – E، d/M/y + + + MMM–MMM y + MMM y – MMM y + + + d–d MMM y + d MMM – d MMM، y + d MMM y – d MMM y + + + E، d MMM – E، d MMM، y + E، d MMM – E، d MMM، y + E، d MMM y – E، d MMM y + + + MMMM–MMMM y + MMMM y – MMMM y + + + + + + + + + EEEE، d MMMM y G + GyMMMMEEEEd + + + + + d MMMM y G + GyMMMMd + + + + + dd/MM/y G + GyMMdd + + + + + d/MM/y G + GyMMd + + + + + + E، d + d/M + E، d/M + d MMM + E، d MMM + d MMMM + M/y G + E، d/M/y G + MMM y G + d MMM y G + E، d MMM y G + + + + + + + + EEEE، d MMMM y G + GyMMMMEEEEd + + + + + d MMMM y G + GyMMMMd + + + + + dd/MM/y G + GyMMdd + + + + + d/MM/y G + GyMMd + + + + + + E، d + d/M + E، d/M + d MMM + E، d MMM + d MMMM + M/y G + E، d/M/y G + MMM y G + d MMM y G + E، d MMM y G + + + + + + + ايرا + + + تاهون + تاهون لڤس + تاهون ني + تاهون هدڤن + + دالم {0} تاهون + + + {0} تاهون لالو + + + + بولن + بولن لالو + بولن ني + بولن ستروسڽ + + دالم {0} بولن + + + {0} بولن لالو + + + + ميڠݢو + ميڠݢو لڤس + ميڠݢو ني + ميڠݢو ستروسڽ + + دالم {0} ميڠݢو + + + {0} ميڠݢو لالو + + + + هاري + هاري سبلوم سمالم + سمالم + هاري ني + ايسوق + هاري سلڤس ايسوق + + دالم {0} هاري + + + {0} هاري لالو + + + + هاري دالم ميڠݢو + + + جم + + دالم {0} جم + + + {0} جم لالو + + + + مينيت + + دالم {0} مينيت + + + {0} مينيت لالو + + + + کدوا + + دالم {0} ساعت + + + {0} ساعت لالو + + + + زون وقتو + + + + +HH:mm;-HH:mm + وقتو {0} + {1} ({0}) + + باندر تيدق دکتاهوءي + + + سيڠاڤورا + + + + وقتو بروني دارالسلام + + + + + وقتو ڤياواي اينديا + + + + + وقتو لاٴوتن هيندي + + + + + وقتو إندونيسيا تڠه + + + + + وقتو إندونيسيا تيمور + + + + + وقتو إندونيسيا بارات + + + + + وقتو مليسيا + + + + + + + . + , + ; + % + + + - + E + + + NaN + + + + + #,##0.### + + + + + 0 ريبو + 00 ريبو + 000 ريبو + 0 جوتا + 00 جوتا + 000 جوتا + 0 بيليون + 00 بيليون + 000 بيليون + 0 تريليون + 00 تريليون + 000 تريليون + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤#,##0.00 + + + ¤#,##0.00;(¤#,##0.00) + + + {0} {1} + + + + دولر بروني + + + ڤاٴون ستيرليڠ بريتيش + £ + + + روڤياە إندونيسيا + + + ريڠݢيت مليسيا + RM + + + دولر سيڠاڤورا + + + مات واڠ تيدق دکتاهوءي + + + + + + + {0} تاهون + + + {0} بولن + + + {0} ميڠݢو + + + {0} هاري + + + {0} جم + + + {0} مينيت + + + {0} ساعت + + + + + {0} thn + + + {0} بولن + + + {0} ميڠݢو + + + {0} هاري + + + {0} جم + + + {0} min + + + {0} ساعت + + + + diff --git a/make/data/cldr/common/main/ms_Arab_BN.xml b/make/data/cldr/common/main/ms_Arab_BN.xml new file mode 100644 index 00000000000..cfac2af9bcd --- /dev/null +++ b/make/data/cldr/common/main/ms_Arab_BN.xml @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + Масторланго + Африка + Африкань чивалгома ёнкс + Африкань чилисема ёнкс + Африкань пелеве ёнкс + Африкань куншка + Америкат + Азиянь чилисема ёнкс + Азиянь чинеле ёнкс + Азиянь чинеле-чилисема ёнкс + Европань чипеле ёнкс + Азия + Азиянь куншка + Азиянь чивалгома ёнкс + Европа + Европань чилисема ёнкс + Европань пелеве ёнкс + Европань чивалгома ёнкс + Андорра + Афганистан + Албания + Арменэнь мастор + Ангола + Антарктида + Аргентина + Американь Самоа + Австрия + Австралия + Аландонь усият + Барбадос + Бангладеш + Белгия + Болгария + Бурунди + Бенин + Бермуда + Боливия + Бразил + Ботсвана + Беларусия + Канада + Швейцария + Кук усият + Чили + Китай + Колумбия + Куба + Чехия + Чех Раськемастор + Германия + Дания + Алгерия + Эстэнь мастор + Эритрея + Испания + Финнэнь мастор + Фиджи + Фарерэнь усият + Франция + Габон + Гренада + Гамбия + Грекень мастор + Гватемала + Гуам + Хорватия + Гаити + Венгрия + Ирландия + Ман усия + Индия + Иран + Исландия + Италия + Япононь мастор + Лихтенштейн + Литва + Люксембург + Латвия + Монако + Молдова + Монтенегро + Мали + Монголонь мастор + Малта + Малави + Намибия + Од Каледония + Нигер + Нигерия + Нидерланд + Норвегия + Непал + Науру + Од Зеландия + Аотеароа Од Зеландия + Панама + Перу + Пакистан + Польша + Португалонь мастор + Парагвай + Румыния + Сербень мастор + Рузонь мастор + Соломон усият + Судан + Шведэнь мастор + Словения + Словакия + Сенегал + Сомалия + Чад + Того + Таймастор + Тонга + Тувалу + Тайван + Танзания + Украина + Уганда + Вейсэндязь Раськетнень Организация + Американь Вейсэндявкс Штаттнэ + АВШ + Уругвай + Ватикан ош + Вануату + Самоа + Косово + Замбия + Зимбабве + Асодавикс Ёнкс + + + {0} + {0} + {0} + + + + + left-to-right + top-to-bottom + + + + [а б в г д е ё ж з и й к л м н о п р с т у ф х ц ч ш щ ъ ы ь э ю я] + [ӓ ә є җ ѕ і ҥ ў ѡ џ ѣ ѳ ѵ ѷ] + [А Б В Г Д Е Ё Ж З И Й К Л М Н О П Р С Т У Ф Х Ц Ч Ш Щ Ъ Ы Ь Э Ю Я] + [\- ‐ ‑ – , ; \: ! ? . … ’ ” » ( ) \[ \] § @ * / \& #] + + + + + + + + якшамков + даволков + эйзюрков + чадыков + панжиков + аштемков + медьков + умарьков + таштамков + ожоков + сундерьков + ацамков + + + + + якш + дав + эйз + чад + пан + ашт + мед + ума + таш + ожо + сун + аца + + + + + + + тар + атя + вас + кун + кал + сюк + шля + + + тар + атя + вас + кун + кал + сюк + шля + + + таргочистэ + атяньчистэ + вастаньчистэ + куншкачистэ + калоньчистэ + сюконьчистэ + шлямочистэ + + + + + тар + атя + вас + кун + кал + сюк + шля + + + тар + атя + вас + кун + кал + сюк + шля + + + таргочи + атяньчи + вастаньчи + куншкачи + калоньчи + сюконьчи + шлямочи + + + + + + + обедтэ икеле + обедтэ мейле + + + + + обедтэ икеле + обедтэ мейле + + + + + + Христосонь чачомадо икеле + Минек эрадо икеле + Христосонь чачомадо мейле + Минек эрасто + + + + + + + пинге + + + ие + мелят + тедиде + сы иестэ + + + ие + мелят + тедиде + сы иестэ + + + ие + + + нилеце пелькс + + + нилеце пелькс + + + нилеце пелькс + + + ков + ютазь ковсто + те ковсто + сы ковсто + + + ков + + + ков + + + тарго + ютазь таргосто + те таргосто + сы таргосто + {0} таргостонть + + + тарго + + + тарго + + + чи + исяк + течи + ванды + + + чи + + + чи + + + таргоютконь чи + + + час + + + минута + + + секунда + + + + + Асодавикс Ош + + + Андорра + + + Тирана + + + Вена + + + Баку + + + Сараево + + + Брюссель + + + София + + + Бермуда + + + Минской + + + Цюрих + + + Шанхай + + + Прага + + + Берлин + + + Копенгаген + + + Таллин + + + Мадрид + + + Хельсинки + + + Париж + + + Лондон + + + Гренада + + + Афины + + + Гонконг + + + Загреб + + + Будапешт + + + Дублин + + + Багдад + + + Рейкьявик + + + Рим + + + Токио + + + Сеул + + + Вадуц + + + Вильнюс + + + Люксембург + + + Рига + + + Монако + + + Подгорица + + + Скопье + + + Улан-батор + + + Мальта + + + Амстердам + + + Осло + + + Панама + + + Варшава + + + Мадейра + + + Лиссабон + + + Бухарест + + + Белград + + + Москов + + + Саратов + + + Киров + + + Самара + + + Омской + + + Томской + + + Сахалин + + + Стокгольм + + + Любляна + + + Лонгйирбюен + + + Братислава + + + Сан-Марино + + + Тайпей + + + Ужгород + + + Киев + + + Симферополь + + + Запорожье + + + Бойси + + + Денвер + + + Шикаго + + + Самарканд + + + Ташкент + + + Ватикан + + + + Афганистанонь шка + + + + + Аляскань шка + Аляскань свалонь шка + Аляскань кизэнь шка + + + + + Амазононь шка + Амазононь свалонь шка + Амазононь кизэнь шка + + + + + Бангладешень шка + Бангладешень свалонь шка + Бангладешень кизэнь шка + + + + + Базилиянь шка + Базилиянь свалонь шка + Базилиянь кизэнь шка + + + + + Чилинь шка + Чилинь свалонь шка + Чилинь кизэнь шка + + + + + Китаень шка + Китаень свалонь шка + Китаень кизэнь шка + + + + + Кубань шка + Кубань свалонь шка + Кубань кизэнь шка + + + + + Индиянь свалонь шка + + + + + Иранонь шка + Иранонь свалонь шка + Иранонь кизэнь шка + + + + + Япониянь шка + Япониянь свалонь шка + Япониянь кизэнь шка + + + + + Казахстанонь чилисемань шка + + + + + Казахстанонь чивалгомань шка + + + + + Кореань шка + Кореань свалонь шка + Кореань кизэнь шка + + + + + Московонь шка + Московонь свалонь шка + Московонь кизэнь шка + + + + + Од Зеландиянь шка + Од Зеландиянь свалонь шка + Од Зеландиянь кизэнь шка + + + + + Омскоень шка + Омскоень свалонь шка + Омскоень кизэнь шка + + + + + Парагваень шка + Парагваень свалонь шка + Парагваень кизэнь шка + + + + + Сахалинэнь шка + Сахалинэнь свалонь шка + Сахалинэнь кизэнь шка + + + + + Уругваень шка + Уругваень свалонь шка + Уругваень кизэнь шка + + + + + Узбекистанонь шка + Узбекистанонь свалонь шка + Узбекистанонь кизэнь шка + + + + + + latn + + latn + + + + + истя:и + арась:а + + + diff --git a/make/data/cldr/common/main/myv_RU.xml b/make/data/cldr/common/main/myv_RU.xml new file mode 100644 index 00000000000..da3bfc9a168 --- /dev/null +++ b/make/data/cldr/common/main/myv_RU.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/mzn.xml b/make/data/cldr/common/main/mzn.xml index 87301122ec6..bfa844bbcd0 100644 --- a/make/data/cldr/common/main/mzn.xml +++ b/make/data/cldr/common/main/mzn.xml @@ -1,6 +1,6 @@ - - {given} {given2} {surname} {suffix} + {title} {given} {given2} {surname} {generation} {credentials} {given-informal} {surname} - {prefix} {surname} + {title} {surname} {given-informal} @@ -17924,13 +19105,13 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {given-informal-monogram-allCaps}{surname-prefix-monogram}{surname-core-monogram-allCaps} - {given} {given2-initial} {surname} {suffix} + {given} {given2-initial} {surname} {generation} {credentials} {given-informal} {surname} - {prefix} {surname} + {title} {surname} {given-informal} @@ -17948,7 +19129,7 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {given-informal} {surname-initial} - {prefix} {surname} + {title} {surname} {given-informal} @@ -17960,31 +19141,31 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {given-informal-monogram-allCaps} - {surname} {given} {given2} {suffix} + {title} {surname} {given} {given2} {generation} {credentials} {surname} {given-informal} - {prefix} {surname} + {title} {surname} {given-informal} - {surname-monogram-allCaps}{given-monogram-allCaps}{given2-monogram-allCaps} + {surname-prefix-monogram}{surname-core-monogram-allCaps}{given-monogram-allCaps}{given2-monogram-allCaps} - {surname-monogram-allCaps}{given-informal-monogram-allCaps} + {surname-prefix-monogram}{surname-core-monogram-allCaps}{given-informal-monogram-allCaps} - {surname} {given} {given2-initial} {suffix} + {surname} {given} {given2-initial} {generation} {credentials} {surname} {given-informal} - {prefix} {surname} + {title} {surname} {given-informal} @@ -17996,62 +19177,86 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {given-informal-monogram-allCaps} - {surname} {given-initial} {given2-initial} + {surname} {given-initial}{given2-initial} {surname} {given-initial} - {prefix} {surname} + {title} {surname} {given-informal} - {surname-prefix-monogram}{surname-core-monogram-allCaps} + {surname-monogram-allCaps} {given-informal-monogram-allCaps} - {surname}, {given} {given2} {suffix} + {surname-core}, {given} {given2} {surname-prefix} {surname}, {given-informal} - {surname}, {given} {given2-initial} {suffix} + {surname}, {given} {given2-initial} {credentials} {surname}, {given-informal} - {surname}, {given-initial} {given2-initial} + {surname}, {given-initial}{given2-initial} {surname}, {given-informal} - - Sinbad + + Fatima - + Irene Bakker - - Peter + + Heidi Johannes Willemse - - mevrouw - Ingrid - Ingy - Francina Zoë - van den - Berg - Wolff Metternich - PhD + + dhr. + Bertus Wevers + Bert + Harry Robert + de + Jong + ∅∅∅ + jr. + mp + + + Sinbad + + + Käthe + Müller + + + Zäzilia + Hamish + Stöber + + + mevrouw + Ada Cornelia + Neele + César Martín + von + Brühl + González Domingo + jr. + PhD diff --git a/make/data/cldr/common/main/nl_AW.xml b/make/data/cldr/common/main/nl_AW.xml index b7627f6bb33..8c50225ecaf 100644 --- a/make/data/cldr/common/main/nl_AW.xml +++ b/make/data/cldr/common/main/nl_AW.xml @@ -1,6 +1,6 @@ - - {prefix} {given-initial-allCaps}{given2-initial-allCaps} {surname} + {title} {given-initial-allCaps}{given2-initial-allCaps} {surname} - {prefix} {surname-core-initialCap} + {title} {surname-core-initialCap} {given-monogram-allCaps}{given2-monogram-allCaps}{surname-monogram-allCaps} @@ -131,7 +131,7 @@ CLDR data files are interpreted according to the LDML specification (http://unic {given-informal-monogram-allCaps}{surname-monogram-allCaps} - {prefix} {surname-initialCap} + {title} {surname-initialCap} {given-monogram-allCaps}{surname-monogram-allCaps} @@ -140,34 +140,31 @@ CLDR data files are interpreted according to the LDML specification (http://unic {given-informal} - {prefix} {surname-initialCap} + {title} {surname-initialCap} {surname-monogram-allCaps} - {surname}, {prefix} {given} {given2}, {suffix} + {surname}, {title} {given} {given2}, {credentials} - - {surname}, {given-initial}{given2-initial} - - + Liam - + Liam Van den Berg - + Liam Hugo Mees Van den Berg - + Juliette Juli - Van den Berg - Van den + Van den + Berg diff --git a/make/data/cldr/common/main/nl_BQ.xml b/make/data/cldr/common/main/nl_BQ.xml index 4abd9d8f817..f6a7ff1ed45 100644 --- a/make/data/cldr/common/main/nl_BQ.xml +++ b/make/data/cldr/common/main/nl_BQ.xml @@ -1,6 +1,6 @@ - + + + + + + + + {0}، {1} + + + ߊߝߙߌߞߊ߲߯ߛߑߞߊ߲ + ߊߜ߭ߍߡߑߞߊ߲ + ߊߞߊ߲ߞߊ + ߊߡߑߤߊߙߌ + ߊߙߓߎߞߊ߲ + ߊߙߊߓߎߞߊ߲ ߘߐߞߣߍߣߍ߲ + ߊߛߊߡߍߞߊ߲ + ߊߖ߭ߎߞߊ߲ + ߊߛߑߕߎߙߌߞߊ߲ + ߊߖߍߙߑߓߊߦߌߖߊߞߊ߲ + ߊߖߋߙߌߞߊ߲ + ߓߛߊߞߊ߲ + ߓߌߟߏߙߎ߳ߛߌߞߊ߲ + ߓߋ߲ߓߊߞߊ߲ + ߓߋߣߊߞߊ߲ + ߓߌߟߑߜ߭ߊߙߌߞߊ߲ + ߒߞߏ ߡߊߟߌ + ߓߍ߲ߜ߭ߊߟߌߞߊ߲ + ߕߌߓߋߕߌߞߊ߲ + ߓߙߋߕߐ߲ߞߊ߲ + ߓߏߘߏߞߊ߲ + ߓߐߛߑߣߌߞߊ߲ + ߞߕߊߟߊ߲ߞߊ߲ + ߞߏߘߊ߫ ߗߊߜ߭ߑߡߊߞߊ߲ + ߗߋߗߋߣߌߞߊ߲ + ߛߋߓߎߥߊߞߊ߲ + ߞߌߜ߭ߊߞߊ߲ + ߗߋߙߏߞߌߞߊ߲ + ߛߏߙߊߣߌߞߊ߲ + ߛߏߙߊߣߌ߫ ߞߎߙߑߘߎߞߊ߲ + ߞߐߙߑߛߌߞߊ߲ + ߗߍߞߌߞߊ߲ + ߛߌߟߊߝ߭ߐ߲ߞߊ߲ ߓߊ߬ߕߏ߬ߓߏ߲߬ߞߊ߲ + ߜ߭ߟߏߥߊߞߊ߲ + ߘߊߣߏߥߊߞߊ߲ + ߕߊߦߌߕߊߞߊ߲ + ߊߟߑߡߊ߲ߞߊ߲ + ߏߕߑߙߌߛߌ߬ ߊߟߑߡߊߞߊ߲ + ߛߥߌߛ ߊߟߑߡߊ߲ߞߊ߲ + ߖ߭ߍߙߑߡߊߞߊ߲ + ߘߏߜ߭ߙߌߞߊ߲ + ߛߏߙߊߓ-ߓߊߛߑߞߊ߲ + ߘߎߥߟߊߞߊ߲ + ߝߐߢߌ߫ ߖߏ߬ߟߊ߬ߞߊ߲ + ߖ߭ߏ߲ߜ߭ߊߞߊ߲ + ߋ߲ߓߎߞߊ߲ + ߋߥߋߞߊ߲ + ߜ߭ߙߍ߬ߞߌ߬ߞߊ߲ + ߊ߲߬ߜߑߟߋ߬ߞߊ߲ + ߐߛߑߕߙߊߟߌ߫ ߊ߲߬ߜ߭ߑߟߋ߬ߞߊ߲ + ߞߣߊߘߊ߫ ߊ߲߬ߜ߭ߑߟߋ߬ߞߊ߲ + ߓߙߌߕߊ߲ߓߊ߫ ߊ߲߬ߜ߭ߑߟߋ߬ߞߊ߲ + ߡ.ߟ. ߊ߲߬ߜ߭ߑߟߋ߲߬ߞߊ߲ + ߊߡߋߙߌߞߌ߬ ߊ߲߬ߜ߭ߑߟߋ߬ߞߊ߲ + ߞ.ߘ. ߊ߲߬ߜ߭ߑߟߋ߬ߞߊ߲ + ߍߛߑߔߋߙߊ߲ߕߏߞߊ߲ + ߊߛߌߔߞߊ߲ + ߊߡߋߙߌߞߌ߬ ߊߛߌߔߊ߲ߞߊ߲ ߟߊ߬ߕߍ߬ߡߊ + ߊߛߌߔߊ߲߫ ߊߛߌߔߊ߲ߞߊ߲ + ߡߍߞߑߛߌߞ ߊߛߌߔߊ߲ߞߊ߲ + ߍߛߑߕߏߣߌߞߊ߲ + ߓߊߛߑߞߌߞߊ߲ + ߋߥߏ߲ߘߏߞߊ߲ + ߝߊ߯ߙߛߌߞߊ߲ + ߘߊߙߌߞߊ߲ + ߝߎߟߊߞߊ߲ + ߝߍߣߏߥߊߞߊ߲ + ߝߟߌߔߌ߲ߞߊ߲ + ߝߋߙߏߞߊ + ߝߊ߬ߙߊ߲߬ߛߌ߬ߞߊ߲ + ߞߣߊߘߊ߫ ߝߊ߬ߙߊ߲߬ߛߌ߬ߞߊ߲ + ߛߥߌߛ ߝߙߊ߬ߛߌ߬ߞߊ߲ + ߞߊߘߌ߫ ߝߊ߬ߙߊ߲߬ߛߌ߬ߞߊ߲ + ߝߙߌߥߎߟߊߞߊ߲ + ߕߟߋ߬ߓߋ ߝߙߌߛߐ߲ߞߊ߲ + ߌߙߑߟߊ߲ߘߌߞߊ߲ + ߋߞߐߛߌ߬ ߖ߭ߋߏߟߌߞߊ߲ + ߜ߭ߊߟߌߛߌߞߊ߲ + ߛߥߌߛߌ߬ ߊߟߑߡߊ߲ߞߊ߲ + ߜ߭ߎߖߙߊߞߊ߲ + ߜߎ߭ߛߌߞߊ߲ + ߡߊߣߏߥߊߞߊ߲ + ߤߊߥߎߛߊߞߊ߲ + ߤߥߊߦߌߞߊ߲ + ߋߓߙߋߞߊ߲ + ߍ߲ߘߎߞߊ߲ + ߡߐ߲ߜ߭ߑߞߊ߲ + ߞߙߏߥߊߛߌߞߊ߲ + ߛߏߙߊߓߎ߫ ߛߊ߲ߘߐ߫ ߞߊ߲ + ߤߊߦߌߕߌ߫ ߕߊ߬ߓߎ߰ߛߌ߬ߞߊ߲ + ߤߐ߲ߜ߭ߙߌߞߊ߲ + ߊߙߊߡߋߣߌߞߊ߲ + ߍ߲ߕߍߙߑߟߌ߲ߜ߭ߏߥߊߞߊ߲ + ߍ߲ߘߣߏߛߌߞߊ߲ + ߊߜߏߞߊ߲ + ߛߌߛߎߥߊ߲߫ ߦߌߞߊ߲ + ߌߛߑߟߊ߲ߘߌߞߊ߲ + ߌߕߊߟߌߞߊ߲ + ߖ߭ߊߔߐ߲ߞߊ߲ + ߒߜ߭ߏ߲ߓߊߞߊ߲ + ߡߊߗߊߡߋߞߊ߲ + ߖ߭ߝ߭ߊߣߊߞߊ߲ + ߖ߭ߋߐߙߑߖ߭ߌߞߊ߲ + ߞߊߓߟߌߞߊ߲ + ߞߊ߲ߓߊߞߊ߲ + ߡߊߞߐ߲ߘߋߞߊ߲ + ߜߙߋߞߎ߲ߝߙߌߛߌߞߊ߲ + ߞߍ߲ߜ߭ߊ߲ߞߊ߲ + ߞߏߙߌߦߊߗߣߌߞߊ߲ + ߞߌߞߌߦߎߞߊ߲ + ߞߖ߭ߊߞߌߞߊ߲ + ߞߊߞߏߞߊ߲ + ߜ߭ߙߏߟߊ߲ߘߌߞߊ߲ + ߞߊߟߊ߲ߖߌߞߊ߲ + ߞߑߡߍߙߑߞߊ߲ + ߞߊ߲ߣߊߘߊߞߊ߲ + ߞߏߙߋߞߊ߲ + ߞߐ߲ߞߊߣߌߞߊ߲ + ߞߊߛߑߡߙߌߞߊ߲ + ߛߊ߲ߓߟߊߞߊ߲ + ߓߊߝߌߞߊ߲ + ߞߐߟߑߗߌߞߊ߲ + ߞߎߙߑߘߎߞߊ߲ + ߞߐߙߑߣߌߞߌߞߊ߲ + ߞߌߙߑߞߌߖ߭ߑߞߊ߲ + ߟߊ߬ߕߍ߲߬ߞߊ߲ + ߟߊ߲ߖߌߞߊ߲ + ߟߎߞߑߛߊ߲ߓߎ߯ߙߎߞߊ߲ + ߜ߭ߊ߲ߘߊߞߊ߲ + ߟߌ߭ߎ߳ߙߌߞߊ߲ + ߟߊߞߏߕߊߞߊ߲ + ߟߌ߲ߜ߭ߟߊߞߊ߲ + ߟߊߏߞߊ߲ + ߟߌߖ߭ߌߦߊߣߌ߫ ߕߊ߬ߓߎ߰ߛߌ߬ߞߊ߲ + ߕߟߋ߬ߓߐ ߟߏߙߌߞߊ߲ + ߟߌߕߎ߳ߦߊߣߌߞߊ߲ + ߞߊߕߊ߲ߜ߭ߊ߫-ߗߌ߬ߟߎ߬ߓߊ߬ߞߊ߲ + ߟߎ߳ߏߞߊ߲ + ߟߎ߳ߦߌߞߊ߲ + ߟߋߕߐ߲ߞߊ߲ + ߡߊߗߟߌߞߊ߲ + ߡߊ߯ߛߊߞߊ߲ + ߡߋߙߎߞߊ߲ + ߡߏߙߌߛ ߕߊ߬ߓߎ߰ߛߌ߬ߞߊ߲ + ߡߊߟߑߜ߭ߊߛߌߞߊ߲ + ߡߊߞߎߞߊ߲ + ߡߋߕߊߞߊ߲ + ߡߊߏߙߌߞߊ߲ + ߡߊߛߋߘߏߣߌߞߊ߲ + ߡߟߊߦߟߊߡߑߞߊ߲ + ߡߐ߲ߜ߭ߐߟߌߞߊ߲ + ߡߊߣߌߔߎߙߌߞߊ߲ + ߡߙߊߕߌߞߊ߲ + ߡߊߟߍߞߊ߲ + ߡߊߟߑߕߍߞߊ߲ + ߡߎ߲ߘߊ߲ߞߊ߲ + ߞߊ߲ߥߙߍߞߊ߲ + ߓߙߌߡߊ߲ߞߊ߲ + ߡߊߖ߭ߊ߲ߘߋߙߊߣߞߊ߲ + ߣߡߊߞߊ߲ + ߣߐߙߑߝ߭ߍߖ߭ ߓߏߞߑߡߊߟߑߞߊ߲ + ߕߟߋ߬ߓߐ ߒߘߓߋߟߋߞߊ߲ + ߊߟߑߡߊ߲ߘߎ߯-ߓߊߛߑߞߊ߲ + ߤߏߟߊ߲ߘߌ߬ ߓߊߛߊߞߑߛߐ߲ߞߊ߲ + ߣߋߔߌߟߌߞߊ߲ + ߣߍ߯ߙߑߟߊ߲ߘߌߞߊ߲ + ߝߌߟߊߡߊ߲ߞߊ߲ + ߒߜ߭ߎ߲ߓߊߞߊ߲ + ߣߐߙߑߝ߭ߍߖ߭ ߢߙߐߛߌߞߊ߲ + ߒߖߋ߲ߓߎ߲ߞߊ߲ + ߣߐߙߑߝ߭ߍߖ߭ߌߞߊ߲ + ߒߞߏ + ߣߎߦߋߞߊ߲ + ߣߝ߭ߊߖߏߞߊ߲ + ߛߋߥߞߊ߲ + ߢߊ߲ߞߏߟߋߞߊ߲ + ߏߙߏߡߏߞߊ߲ + ߏߖߊߞߊ߲ + ߏߛߍߕߌߞߊ߲ + ߔߍ߲ߖߊߓߌߞߊ߲ + ߖߋ߬ߟߌ߬ߓߊߟߊ߫ ߔߌߘߑߜ߭ߍ߲ߞߊ߲ + ߔߟߏߣߍߞߊ߲ + ߔߎߙߛߌߞߊ߲ + ߔߊߛߑߕߏߞߊ߲ + ߔߕߏ߬ߞߌ߬ߞߊ߲ + ߓߙߋߖ߭ߌߟ ߔߕߏ߬ߞߌ߬ߞߊ߲ + ߋߙߐߔߎ߬ ߔߕߏ߬ߞߌ߬ߞߊ߲ + ߞߋߛߎߥߊߞߊ߲ + ߙߏߤߌ߲ߜ߭ߊ + ߙߏߡߊ߲ߛߌߞߊ߲ + ߙߎ߲ߘߌߞߊ߲ + ߙߏߡߍߞߊ߲ + ߡߐߟߑߘߊߝ߭ߌߞߊ߲ + ߙߏ߲ߓߏߞߊ߲ + ߌ߬ߙߛߌ߬ߞߊ߲ + ߞߌ߲ߦߊߙߎߥߊ߲ߘߊߞߊ߲ + ߙߥߊߞߊ߲ + ߛߊߛߑߞߙߌߞߊ߲ + ߌߦߊߞߎߕߌߞߊ߲ + ߛߊ߲ߓߙߎߞߊ߲ + ߛߊ߲ߕߊߟߌߞߊ߲ + ߌߛߊ߲ߜ߭ߎߞߊ߲ + ߛߌ߲ߘߌߞߊ߲ + ߛߋߡߌ߫ ߕߟߋ߬ߓߐ߬ߞߊ߲ + ߛߌߛߋߣߊߞߊ߲ + ߞߏߦߌߙߊߓߙߏ߫ ߛߋߣߌߞߊ߲ + ߛߊ߲ߜߵߏߞߊ߲ + ߗߑߟߋߥߎߞߊ߲ + ߛߌ߲ߜ߭ߟߊߞߊ߲ + ߛߑߟߏߝ߭ߊߞߌߞߊ߲ + ߛߑߟߏߝ߭ߋߣߌߞߊ߲ + ߛߊߡߏߥߊߞߊ߲ + ߌߣߊߙߌ߫ ߛߊߡߌߞߊ߲ + ߛߏߣߊߞߊ߲ + ߛߏߡߊߟߌߞߊ߲ + ߊߟߑߓߊߣߌߞߊ߲ + ߛߍߙߑߓߌߞߊ߲ + ߛߕߏ߫ ߥߙߏ߬ߘߎ߰ߞߊ߲ + ߛߎ߲ߘߣߊߞߊ߲ + ߛߎߥߍߘߌߞߊ߲ + ߛߎߥߊߤߟߌߞߊ߲ + ߕߊߡߎߟߌߞߊ߲ + ߕߋߟߎߜ߭ߎߞߊ߲ + ߕߋߛߏߞߊ߲ + ߕߊߖߞߌߞߊ߲ + ߕߊߦߌߞߊ߲ + ߕߜ߭ߌߙߌߢߊߞߊ߲ + ߕߎߙߞߌߡߍߣߌߞߊ߲ + ߕߏ߲ߖ߭ߌߞߊ߲ + ߕߙߎߞߌߞߊ߲ + ߕߊߕߊߙߌߞߊ߲ + ߕߛߊߥߊߜ߭ߌߞߊ߲ + ߊߕߌߟߊ߲ߕߊ߫ ߕߊ߲ߓߊ߲ ߊߡߊ߲ߖ߭ߌ߲ߞߊ߲ + ߥߌߜ߭ߎ߯ߙߎߞߊ߲ + ߎߞߌߙߍߣߌߞߊ߲ + ߞߊ߲߫ ߘߊ߲߬ߠߊ߬ߕߍ߰ߓߊߟߌ + ߎߙߘߎߞߊ߲ + ߎߖ߭ߑߓߋߞߌߞߊ߲ + ߒߝ߭ߊߦߌ߲ߞߊ߲ + ߝ߭ߌߦߋߕߌߣߊߡߌߞߊ߲ + ߝ߭ߏߟߊߔߎߞߊ߲ + ߝ߭ߎߖߏߞߊ߲ + ߥߊߟߑߛߍߙߌߞߊ߲ + ߥߟߐߝߐߞߊ߲ + ߛߏߖ߭ߊߞߊ߲ + ߛߏߜ߭ߊߞߊ߲ + ߦߊ߲ߜߌߞߊ߲ + ߦߘߌߛߌߞߊ߲ + ߙߦߏߓߊߞߊ߲ + ߞߊ߲ߕߏߣߊߞߊ߲ + ߞߊ߲ߕߏߣߊ ߛߣߌߥߊߞߊ߲ + ߡߊ߬ߙߐߞߎ߬ ߢߊߓߘߍߡߊ + ߛߣߌߥߊߞߊ߲ + ߛߣߌߥߊ ߡߊ߲ߘߊߙߍ߲ߞߊ߲ + ߛߣߌߥߊߞߊ߲ ߘߐߞߣߍߣߍ߲ + ߡߊ߲ߘߊߙߍ߲ ߘߐߞߣߍߣߍ߲ + ߛߣߌߥߊߞߊ߲ ߦߋ߲ߢߐ߲߯ߠߊ + ߡߊ߲ߘߊߙߍ߲ߞߊ߲ ߢߋ߲ߢߐ߲߯ߠߊ + ߖ߭ߟߎߞߊ߲ + ߞߊ߲߫ ߘߐߞߏߟߏ߲ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ߞߌߢߍ߲߫ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ + ߊߡߋߙߌߞߌ߬ ߞߐ߬ߘߎ߮ + ߊߡߋߙߌߞߌ߬ ߥߙߏ߬ߘߎ߮ + ߟߌ߲ߓߊ߲ߘߎ߯ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߟߋ߬ߓߋ + ߊߡߋߙߌߞߌ߬ ߕߊ߲ߓߊ߲ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߟߋ߬ߓߐ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߞߐ߬ߘߎ߮ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߊ߲ߓߊ߲ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߥߙߏ߬ߘߎ߮ + ߊߡߋߙߌߞߌ߬ + ߞߐ߰ߘߎ߮ ߊߡߋߙߌߞߌ߬ + ߞߙߊߦߌߓ + ߊߖ߭ߌ߫ ߕߟߋ߬ߓߐ + ߊߖ߭ߌ߫ ߥߙߏ߬ߘߎ߮ + ߊߖ߭ߌ߫ ߥߙߏ߬ߘߎ߮-ߕߟߋ߬ߓߐ + ߋߙߐߔߎ߬ ߥߙߏ߬ߘߎ߮ + ߐߛߑߕߙߊߟߊߖ߭ߌ߫ + ߡߋߟߊߣߋߖ߭ߌ߫ + ߡߌߞߙߏߣߋߖ߭ߌ߫ ߕߌ߲߬ߞߎߘߎ߲ + ߔߏߟߣߋߖ߭ߌ߫ + ߊߖ߭ߌ߫ + ߊߖ߭ߌ߫ ߕߊ߲ߓߊ߲ + ߊߖ߭ߌ߫ ߕߟߋ߬ߓߋ + ߋߙߐߔߎ߬ + ߋߙߐߔߎ߬ ߕߟߋ߬ߓߐ + ߋߙߐߔߎ߬ ߞߐ߬ߘߎ߮ + ߋߙߐߔߎ߬ ߕߟߋ߬ߓߋ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߞߌ߬ߢߍ߬ߞߏ߲ߞߏ߫ ߘߎ߰ߟߊ߬-ߖߡߊߣߊ + ߊߡߋߙߞߌ߬ ߟߊ߬ߕߍ߬ߡߊ߬ ߦߙߐ + ߊߛߊ߲ߛߌߦߐ߲߫ ߕߌ߲ + ߊ߲ߘߐߙ + ߋߡߌߙߊߕ ߊߙߊߓߎ߫ ߘߍ߬ߣߍ߲ + ߊߝߎߜ߭ߊߣߌߛߑߕߊ߲߫ + ߊ߲ߕߌߞߎߥߊ߫ ߣߌ߫ ߓߊߙߑߓߎߘߊ߫ + ߊ߲ߞߎ߳ߟߊ߫ + ߊߟߑߓߊߣߌ߫ + ߊߙߑߡߋߣߌ߫ + ߊ߲ߜߏߟߊ߫ + ߊ߲ߕߊߙߑߛߕߌߞ + ߊߙߑߖ߭ߊ߲ߕߌߣ + ߛߊߡߏߥߊ߫ ߊߡߋߙߞߌߞߊ + ߏߕߑߙߌߛ + ߐߛߑߕߙߊߟߌ߫ + ߊߙߎߓߊ߫ + ߊߟߊ߲ߘ ߕߌ߲ + ߊߖߊߙߑߓߊߦߌߖߊ߲ + ߓߐߛߑߣߌ߫-ߍߙߑߖ߭ߋߜ߭ߏߝ߭ߌߣ + ߓߊߙߑߓߊߘ + ߓߊ߲ߜ߭ߑߟߊߘߍߛ + ߓߍߟߑߖ߭ߌߞ + ߓߙߎߞߌߣߊ߫ ߝߊ߬ߛߏ߫ + ߓߌߟߑߜ߭ߊ߯ߙߌ߫ + ߓߤߊ߬ߙߊߦߌ߬ߣ + ߓߎߙߎ߲ߘߌ߫ + ߓߋߣߍ߲߫ + ߛߍ߲ߕ-ߓߌߙߑߕߟߋߡߌ߫ + ߓߍߙߑߓߎߘ + ߓߙߎߣߋ߫ + ߓߏߟߝ߭ߌ߫ + ߤߏߟߊ߲ߘ ߞߊߙߌߓߋ߫ + ߓߙߋߖ߭ߌߟ + ߓߤߊߡߊߛ + ߓߎߕߊ߲߫ + ߓߎߝ߭ߋ߫ ߕߌ߲ + ߓߐߛߎߥߣߊ߫ + ߓߌߟߏߙߌߛ + ߓߋߟߌߖ߭ + ߞߣߊߘߊ߫ + ߞߏߞߏ߫ ߕߌ߲ + ߞߏ߲߬ߜ߭ߏ߫-ߞߌ߲ߛߊߛߊ߫ + ߞߏ߲߬ߜ߭ߏ߫ ߓߍ߯ߦߊ߫ ߞߊ߲ߓߍ߲ + ߕߊ߲ߓߊ߲-ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߞߊ߲ߓߍ߲ + ߞߏ߲߬ߜ߭ߏ߫-ߓߙߊߖ߭ߊ߫ + ߞߏ߲߬ߜ߭ߏ߫ ߞߊ߲ߓߍ߫ + ߛߎߥߌߛ + ߜߋ߲-ߞߐ߰ߖߌ߬ߘߊ + ߜߋ߲-ߞߐ߰ߖߌ߬ߘߊ ߞߊ߲ߓߍ߲ + ߞߎߞ ߕߌ߲ + ߛ߭ߟߌ߫ + ߞߊߡߋߙߎ߲ + ߛߌߣ + ߞߏߟߐ߲ߓߌ߫ + ߞߟߌߔߍߙߑߕߐ߲߫ ߕߌ߲ + ߞߐߛߑߕߊ߫ ߙߌߞߊ߫ + ߞߎ߳ߓߊ߫ + ߜߙߋߞߎ߲߫-ߝߙߌߛߌ߫ + ߞߎߙߛߊߏ߫ + ߞߙߌߛߑߕߌߡߊ߫ ߕߌ߲ + ߛߌߔߑߙߎ߫ + ߗߍߞ + ߗߍߞ ߞߊ߲ߓߍ߲ + ߊߟߑߡߊ߲ߘߎ߯ + ߖߋߜ߭ߏ߫-ߜ߭ߊߙߑߛߌߦߊ߫ + ߖߌߓߎߕߌ߫ + ߘߊߣߌߡߊߙߑߞ + ߘߏߡߣߌߞ + ߘߏߡߣߌߞ ߞߊ߲ߓߍ߲ + ߊߟߑߖ߭ߋߙߌ߫ + ߛߋߎߕߊ߫ ߣߌ߫ ߡߋߟߌߣߊ߫ + ߕߍߡߊߓߊ߲߮ + ߋߛߑߕߏߣߌ߫ + ߋߖ߭ߌߔߑߕ + ߞߌ߲߬ߢߍ߬ߞߏ߲ߞߏ߫ ߕߟߋ߬ߓߋ + ߋߙߕߌߙߋ߫ + ߊߛߌߔߊ߲߫ + ߋߗߏߔߌ߫ + ߋߙߐߔߎ߬ ߘߍ߭ + ߋߙߐߔߎ߬ ߞߣߍ + ߝߍ߲ߟߊ߲ߘ + ߝߖߌ߫ + ߡߊߟߎ߲ߌ߲߫ ߕߌ߲ + ߡߊߟߎ߲ߌ߲߫ ߕߌ߲ ( ߝߊߟߑߞߑߟߊ߲ ߕߌ߲ ) + ߡߌߞߙߏߣߋߖ߭ߌ߫ + ߝߋߙߏߦߋ߫ ߕߌ߲ + ߝߊ߬ߙߊ߲߬ߛߌ߫ + ߜ߭ߊߓߐ߲߫ + ߡߊ߲߬ߛߊ߬ߟߊ߫ ߟߊߘߍ߬ߣߍ߲ + ߡ.ߟ. + ߜ߭ߙߋߣߊߘ + ߖ߭ߋߐߙߑߖ߭ߌ߫ + ߝߊ߲߬ߙߊ߲߬ߛߌ߫ ߜ߭ߎ߳ߦߊߣ + ߜ߭ߋߙߑߣߋߖ߭ߌ߫ + ߜ߭ߊ߯ߣߊ߫ + ߜ߭ߌߓߙߊߟߑߕߊߙ + ߜ߭ߎߙߎ߲ߟߊ߲ߘ + ߜ߭ߊ߲ߓߌ߫ + ߖߌ߬ߣߍ߫ + ߜ߭ߎߥߊߘߋߟߎߔ + ߕߍߡߊߓߊ߲߮-ߖߌ߬ߣߍ߫ + ߜ߭ߙߍ߬ߞߌ߬ + ߖ߭ߐߙߑߖ߭ߌ߫ ߥߙߏ߬ߘߎ߮ ߣߌ߫ ߛߊ߲ߘߎߥߌߛ ߕߌ߲ + ߜ߭ߎߥߊߕߋߡߟߊ߫ + ߜ߭ߎߥߊߡ + ߖߌ߬ߣߍ߫ ߓߌߛߊߥߏ߫ + ߜ߭ߎߦߊߣ + ߛߌߣ ߕߌ߲߬ߞߎߘߎ߲߫ ߡߊߡߙߊ߬ߣߍ߲ ߤߐ߲ߞߐ߲߫ + ߤߐ߲ߞߐ߲߫ + ߡߊߞߑߘߏߣߊߟߑߘ ߕߌ߲ + ߤߎ߲ߘߎߙߊ߫ + ߞߙߏߥߊߛߌ߫ + ߤߊߦߕߌ߫ + ߤߐ߲ߜ߭ߙߌ߫ + ߞߣߊߙߌ߫ ߕߌ߲ + ߍ߲ߘߣߏߖ߭ߌ߫ + ߌߙߑߟߊ߲ߘ + ߌߛߑߙߊߍߟ + ߡߊ߲߯ ߕߌ߲ + ߌߘߎ߬ + ߓߙߌߕߊ߲ߓߊ߫ ߟߊ߫ ߌ߲ߘߎ߫ ߟߌ߲ߓߊ߲ ߞߣߍ + ߌߙߊߞߌ߬ + ߌߙߊ߲߫ + ߌߛߑߟߊߘ + ߌߕߊߟߌ߫ + ߖߋߙߑߖ߭ߌ߫ + ߖ߭ߡߊߦߌߞ + ߖߐߙߑߘߊߣߌ߫ + ߖ߭ߊߔߐ߲߫ + ߞߋߣߌߦߊ߫ + ߞߌߙߑߜ߭ߌߛߑߕߊ߲߫ + ߞߊ߲ߓߐߘߑߖ + ߞߙߌߓߊߕߌ߫ + ߞߡߐߙ + ߛߍ߲ߕ-ߞߙߌߛߑߕߐߝ ߣߌ߫ ߢߝ߭ߋ߫ + ߞߐ߬ߘߎ߮ ߞߏ߯ߙߋ߫ + ߕߟߋ߬ߓߋ ߞߏ߯ߙߋ߫ + ߞߎ߬ߥߊ߬ߕ + ߓߊ߲߬ߓߊ߬-ߕߌ߲ + ߞߖ߭ߊߞߌߛߑߕߊ߲߫ + ߟߊߐߛ + ߟߌߓߊ߲߫ + ߛߍ߲ߕ-ߟߎ߳ߛߌ߫ + ߟߎߛߑߕߊ߲ߛߑߕߍ߲߫ + ߛߙߌߟߊ߲ߞߊ߫ + ߟߌߓߋߙߌߦߊ߫ + ߟߋߛߕߏ߫ + ߟߎߕߎ߳ߦߊߣߌ߫ + ߟߎߜ߭ߑߛߊ߲ߓߎ߯ߙ + ߟߋߕߏߣߌ߫ + ߟߓߌ߫ + ߡߊ߬ߙߐߞߎ߬ + ߡߏߣߊߞߏ߫ + ߡߐߟߑߘߊߝ߭ߌ߫ + ߡߐ߲ߕߣߋߜ߭ߙߏ߫ + ߛߍ߲ߕ-ߡߊߙߑߕߍ߲߫ + ߡߘߊߜ߭ߛߑߞߊ߯ߙ + ߡߊߙߑߛߊߟ ߕߌ߲ + ߞߐ߬ߘߎ߮ ߡߊߛߋߘߏߣߌ߫ + ߡߊ߬ߟߌ߬ + ߡߌߦߊߡߊ߯ߙ ( ߓߙߌߡߊߣߌ߫ ) + ߡߐ߲ߜ߭ߐߟߌ߫ + ߛߌߣ ߕߌ߲߬ߞߎߘߎ߲߫ ߡߊߡߙߊ߬ߣߍ߲ ߡߞߊߥߏ߫ + ߡߞߊߥߏ߫ + ߡߊߙߌߦߊߣ ߞߐ߬ߘߎ߮ ߕߌ߲ + ߡߊߙߑߕߣߌߞ + ߡߏߙߌߕߊߣߌ߫ + ߡߐ߲ߗߋߙߊ߫ + ߡߊߟߑߕ + ߡߏߙߌߛ + ߡߊߟߑߘߌߝ߭ + ߡߟߊߥߌ߫ + ߡߍߞߑߛߌߞ + ߡߊߟߍߘߎ߯ + ߡߏߖ߭ߊ߲ߓߌߞ + ߣߊߡߌ߲ߓߌ߫ + ߞߊߟߋߘߏߣߌ߫-ߞߎߘߊ߫ + ߖߋ߬ߟߌ߬ߓߊߘߎ߯ + ߣߐߙߑߝߐߟߑߞ + ߖߋ߬ߟߌ߬ߓߊ߬ߟߊ߫ + ߣߌߞߙߊߜ߭ߎߥߊ߫ + ߤߏߟߊ߲ߘ + ߣߐߙߑߝ߭ߍߖ + ߣߋߔߊߟ + ߣߏ߯ߙߎ߫ + ߣߎ߳ߋ߫ + ߖ߭ߋߟߊ߲ߘߌ߫-ߞߎߘߊ߫ + ߏߡߊ߲߫ + ߔߣߊߡߊ߫ + ߔߋߙߎ߫ + ߝߊ߬ߙߊ߲߬ߛߌ߫ ߔߏߟߌߣߋߖ߭ߌ߫ + ߡߊߡߎߥߊߖ߭ߌ߫ ߖߌ߬ߣߍ߬-ߞߎߘߊ߫ + ߝߟߌߔߌ߲ߣ + ߔߊߞߌߛߑߕߊ߲߫ + ߔߏߟߐߢ + ߛߍ߲ߕ-ߔߍ߯ߙ ߣߌ߫ ߡߌ߲ߞߋߟߐ߲߫ + ߔߌߕߑߞߍ߲ ߕߌ߲ + ߔߐߙߑߕߏ߫-ߙߌߞߏ߫ + ߔߊߟߍߛߑߕߌߣ ߞߣߍ + ߔߊߟߍߛߑߕߌߣ + ߔߐߙߑߕߎߜ߭ߊߟ + ߔߟߊߐߛ + ߔߙߊߜ߭ߏߦߋ߫ + ߞߊߕߊ߯ߙ + ߟߌ߲ߓߊ߲ߘߎ߯ ߕߌ߲߬ߞߎߘߎ߲ ߦߙߐ߫ ߡߊߕߊ߯ߣߍ߲ + ߟߊ߬ߘߍ + ߙߎߡߊߣߌ߫ + ߛߍߙߑߓߌ߫ + ߌߙߌ߬ߛߌ߫ + ߙߎߥߊ߲ߘߊ߫ + ߛߎ߰ߘߎ߬ߟߊ߫-ߡߊ߲߬ߛߊ߬ߟߊ + ߛߊߟߏߡߐ߲߫ ߕߌ߲ + ߛߋߦߌߛߍߟ + ߛߎߘߊ߲߫ + ߛߎߥߍߘ + ߛߌ߲ߜ߭ߊߔߎߙ + ߛߍ߲ߕ-ߋߟߍߣ + ߛߑߟߏߝ߭ߋߣߌ߫ + ߛߊߟߑߓߊߙ ߣߌ߫ ߖ߭ߊ߲ ߡߊߦߍ߲߫ + ߛߑߟߏߝ߭ߊߞߌ߫ + ߛߙߊ߬ߟߏ߲߫ + ߛߍߕ-ߡߊߙߍ߲߫ + ߛߣߍ߬ߜߊ߯ߟߌ߫ + ߛߏߡߊߟߌ߫ + ߛߎߙߑߣߊߡ + ߥߙߏ߬ߘߎ߮ ߛߎ߬ߘߊ߲߫ + ߛߊߥߕߏߡߋ߫ ߣߌ߫ ߔߑߙߍ߲ߛߌߔ + ߛߊߟߑߝ߭ߊߘߐߙ + ߛߍ߲ߕ-ߡߊߙߑߕߍ߲߫ ( ߤߏߟߊ߲ߘ ߝߊ߲߭ߝߍ߬ ) + ߛߙߌ߫ + ߒߛߎߥߊߕߣߌ߫ + ߛߑߥߊߖ߭ߌߟߊ߲ߘ + ߞߎ߲ߓߊ߫ ߕߑߙߌߛߑߕߊ߫ + ߕߎߙߑߞߌ߫ ߣߌ߫ ߞߊߦߌߞ + ߗߊߘ + ߝߊ߬ߙߊ߲߬ߛߌ߫ ߘߎ߰ߞߟߏ ߓߊߙߌ ߘߐ߫ + ߕߜ߭ߏ߫ + ߕߊߦߌߘߎ߯ + ߕߊߖߞߌߛߑߕߊ߲߫ + ߕߏߞߋߟߊߏ߫ + ߕߌߡߐߙ ߕߟߋ߬ߓߐ߬ߝߊ߲ + ߕߌߡߐߙ ߕߟߋ߬ߓߐ + ߕߎߙߑߞߌߡߋߣߌߛߑߕߊ߲߫ + ߕߎߣߖ߭ߌ߫ + ߕߏ߲ߜ߭ߊ߫ + ߕߎߙߑߞߌ߫ + ߕߙߌߣߌߕߋ߫ ߣߌ߫ ߕߏߓߊߜ߭ߏ߫ + ߕߎߝ߭ߊߟߎ߫ + ߕߊߦߌߥߊ߲߫ + ߕߊ߲ߖ߭ߊߣߌ߫ + ߎ߳ߞߑߙߍߣ + ߎߜ߭ߊ߲ߘߊ߫ + ߞߊ߬ߝߏ߫ ߘߍ߬ߣߍ߲ ߘߌߣߍ߲߫ ߕߌ߲߫ ߡߊߕߊ߯ߣߍ߲ + ߡߊ߲߬ߕߏ߲߫ ߠߊߘߍ߬ߣߍ߲ ߛߌ߬ߝߏ߲߬ߧߊ߬ߟߌ + ߞߊ߬ߝߏ߫ ߘߍ߬ߣߍ߲ + ߞ.ߘ. + ߎ߳ߙߑߜ߭ߋߦߌ߫ + ߎߖ߭ߑߓߋߞߌߛߑߕߊ߲߫ + ߝ߭ߊߕߌߞߊ߲߫ ߞߊ߬ߝߏ + ߛߍ߲ߕ-ߝ߭ߍߛߊ߲ ߜ߭ߙߋߣߊߘߌ߫ + ߝ߭ߣߋߖ߭ߎߦߋߟߊ߫ + ߓߙߌߕߊ߲ߓߊ߫ ߕߌ߲߫ ߞߊߓߊ߲ + ߞߊ߬ߝߏ߫ ߘߍ߬ߣߍ߲ ߕߌ߲߫ ߞߊߓߊ߲߫ + ߝ߭ߌߦߍߕߑߣߊߡ + ߝ߭ߊߣߎߦߊߕߎ߫ + ߥߊߟߌߛߌ߫ ߣߌ߫ ߝߕߎߣߊ߫ + ߛߊߡߏߥߊ߫ + ߔߛߏߘߏ߫ ߊߞߑߛߊ߲ + ߔߛߔߘߏ߫-ߓߘߌ߫ + ߞߛߏߝ߭ߏ߫ + ߦߡߊߣߌ߲߫ + ߡߊߦߐߕ + ߥߙߏ߬ߘߎ߮ ߝߘߊ߬ߝߌ߲߬ߠߊ߫ + ߖ߭ߊ߲ߓߌ߫ + ߖ߭ߌ߲ߓߊߓߏߦߋ߫ + ߕߌ߲߬ߞߎߘߎ߲߫ ߕߊ߲߬ߠߊߕߍ߰ߓߊߟߌ + + + ߜߟߊ߬ߜߟߊ߬ߡߊ + ߓߙߌߕߊ߲ߓߊ߫ ߡߊ߲߬ߛߊ߬ߟߊ (ߓ.ߡ.) + ߞߊ߬ߝߏ߫ ߘߍ߬ߣߍ߲ (ߞ.ߘ.) + + + ߞߊ߲: {0} + ߛߓߍߟߌ: {0} + ߕߌ߲߬ߞߎߘߎ߲: {0} + + + + + right-to-left + top-to-bottom + + + + [\u07EB \u07EC \u07ED \u07EE \u07EF \u07F0 \u07F1 \u07F2 \u07F3 ߊ ߋ ߌ ߍ ߎ ߏ ߐ ߑ ߒ ߓ ߔ ߕ ߖ ߗ ߘ ߙ ߚ ߛ ߜ ߝ ߞ ߟ ߠ ߡ ߢ ߣ ߤ ߥ ߦ ߧ ߴ ߵ] + [ߨ ߩ ߪ] + [\- ‑ ، . % ‰ + ߀ ߁ ߂ ߃ ߄ ߅ ߆ ߇ ߈ ߉] + [߸ ߹ ߷] + + + + + + + + ߓߌ߲ߠ + ߞߏ߲ߞ + ߕߙߊ + ߞߏ߲ߘ + ߘߓߊ߬ߕ + ߥߊ߬ߛ + ߞߊ߬ߙ + ߘߓߊ߬ߓ + ߕߎߟߊߝߌ߲ + ߞߏ߲ߓ + ߣߍߣ + ߞߏߟ + + + ߓ + ߞ + ߕ + ߞ + ߘ + ߥ + ߞ + ߘ + ߕ + ߞ + ߣ + ߞ + + + ߓߌ߲ߠߊߥߎߟߋ߲ + ߞߏ߲ߞߏߜߍ + ߕߙߊߓߊ + ߞߏ߲ߞߏߘߌ߬ߓߌ + ߘߓߊ߬ߕߊ + ߥߊ߬ߛߌ߬ߥߙߊ + ߞߊ߬ߙߌߝߐ߭ + ߘߓߊ߬ߓߌߟߊ + ߕߎߟߊߝߌ߲ + ߞߏ߲ߓߌߕߌ߮ + ߣߍߣߍߓߊ + ߞߏߟߌ߲ߞߏߟߌ߲ + + + + + ߓߌ߲ߠ + ߞߏ߲ߞ + ߕߙߊ + ߞߏ߲ߘ + ߘߓߊ߬ߕ + ߥߊ߬ߛ + ߞߊ߬ߙ + ߘߓߊ߬ߓ + ߕߎߟߊߝߌ߲ + ߞߏ߲ߓ + ߣߍߣ + ߞߏߟ + + + ߓ + ߞ + ߕ + ߞ + ߘ + ߥ + ߞ + ߘ + ߕ + ߞ + ߣ + ߞ + + + ߓߌ߲ߠߊߥߎߟߋ߲ + ߞߏ߲ߞߏߜߍ + ߕߙߊߓߊ + ߞߏ߲ߞߏߘߌ߬ߓߌ + ߘߓߊ߬ߕߊ + ߥߊ߬ߛߌ߬ߥߙߊ + ߞߊ߬ߙߌߝߐ߭ + ߘߓߊ߬ߓߌߟߊ + ߕߎߟߊߝߌ߲ + ߞߏ߲ߓߌߕߌ߮ + ߣߍߣߍߓߊ + ߞߏߟߌ߲ߞߏߟߌ߲ + + + + + + + ߞߊ߯ߙ + ߞߐ߬ߓ + ߞߐ߬ߟߏ߲ + ߞߎߣ + ߓߌߟ + ߛߌ߬ߣ + ߞߍ߲ߘ + + + ߞ + ߞ + ߞ + ߞ + ߓ + ߛ + ߞ + + + ߞߊ߯ + ߞߐ߬ + ߞߐ߬ߟߏ߲ + ߞߎ + ߓߌ + ߛߌ߬ + ߞߍ߲ + + + ߞߊ߯ߙߌߟߏ߲ + ߞߐ߬ߓߊ߬ߟߏ߲ + ߞߐ߬ߟߏ߲ + ߞߎߣߎ߲ߟߏ߲ + ߓߌߟߏ߲ + ߛߌ߬ߣߌ߲߬ߟߏ߲ + ߞߍ߲ߘߍߟߏ߲ + + + + + ߞߊ߯ߙ + ߞߐ߬ߓ + ߞߐ߬ߟ + ߞߎߣ + ߓߌߟ + ߛߌ߬ߣ + ߞߍ߲ߘ + + + ߞ + ߞ + ߞ + ߞ + ߓ + ߛ + ߞ + + + ߞߊ߯ + ߞߐ߬ߓ + ߞߐ߬ߟ + ߞߎ + ߓߌ + ߛߌ߬ + ߞߍ߲ + + + ߞߊ߯ߙߌߟߏ߲ + ߞߐ߬ߓߊ߬ߟߏ߲ + ߞߐ߬ߟߏ߲ + ߞߎߣߎ߲ߟߏ߲ + ߓߌߟߏ߲ + ߛߌ߬ߣߌ߲߬ߟߏ߲ + ߞߍ߲ߘߍߟߏ߲ + + + + + + + ߞߛ߁ + ߞߛ߂ + ߞߛ߃ + ߞߛ߄ + + + ߁ + ߂ + ߃ + ߄ + + + ߞߊߙߏߛߓߊ߫ ߁߭ + ߞߊߙߏߛߓߊ߫ ߂߲ + ߞߊߙߏߛߓߊ߫ ߃߲ + ߞߊߙߏߛߓߊ߫ ߄߲ + + + + + ߞߛ߁ + ߞߛ߂ + ߞߛ߃ + ߞߛ߄ + + + ߁ + ߂ + ߃ + ߄ + + + ߞߊߙߏߛߓߊ߫ ߁߭ + ߞߊߙߏߛߓߊ߫ ߂߲ + ߞߊߙߏߛߓߊ߫ ߃߲ + ߞߊߙߏߛߓߊ߫ ߄߲ + + + + + + + ߛ + ߥ + + + ߛ + ߥ + + + ߛ + ߥ + + + + + ߛ + ߥ + + + ߛ + ߥ + + + ߛ + ߥ + + + + + + ߌߛߊ߫ ߡߏߦߌ ߢߍ߫ + ߞߊ߬ߝߏ߬ߙߋ߲ ߥߎ߬ߛߎ ߢߍ߫ + ߌߛߊ߫ ߡߏߦߌ ߞߐ߫ + ߞߊ߬ߝߏ߬ߙߋ߲ ߠߊ߫ ߥߎ߬ߛߎ + + + ߌߛ. ߡ. ߢߍ߫ + ߌߛ. ߡ. ߞߐ߫ + + + ߌߛ. ߢߍ߫ + ߌߛ. ߞߐ߫ + + + + + y / dd / MM + y / dd MMM + + + + + + ߜ߭ߕߖ{0} + ߜ߭ߕߖ + {0} ߕߎ߬ߡߊ + {0} ߕߎ߬ߡߊ߬-ߦߟߍߡߊ߲ + {0} ߕߎ߬ߡߊ߬-ߦߟߍߡߊ߲ߓߊߟߌ + + ߟߎߥߊ߲ߘߊ߫ + + + ߥߜ߭ߊ߬ߘߜ߭ߎ߫ + + + ߓߎߖ߭ߎ߲ߓߎߙߊ߫ + + + ߔߐߙߑߕߏ߫-ߣߝ߭ߏ߫ + + + ߜ߭ߊߓߏߙߐߣ + + + ߞߌ߲ߛߊߛߊ߫ + + + ߟߎߓߎ߲ߓߊߛߌ߫ + + + ߓߊ߲ߜ߭ߌ߫ + + + ߓߙߖ߭ߊߝ߭ߌߟ + + + ߊߓߌߖߊ߲߬ + + + ߘߎߥߟߊ߫ + + + ߜߦߋߞߎ߲߫-ߝߙߌߛߌ߫ + + + ߖߌߓߎߕߌ߫ + + + ߊߟߑߖ߭ߋ + + + ߞߍ߯ߙ + + + ߟߊ߯ߦߎߣ + + + ߊߛߑߡߙߊ߫ + + + ߊ߬ߘߌ߫ ߛߊ߬ߓߋߓߊ߫ + + + ߟߌߓߙߋߝ߭ߌߟ + + + ߊߞߙߊ߫ + + + ߓߊ߲ߖߎߟ + + + ߞߐߣߊߞߙߌ߫ + + + ߡߟߊߓߏ߫ + + + ߓߌߛߊߥߏ߫ + + + ߛߊߜ߭ߐߛ + + + ߣߊߦߙߏߓߌ߫ + + + ߞߐߡߐ߯ߙ + + + ߡߏ߬ߙߏߝ߭ߌߦߊ߫ + + + ߡߊߛߋߙߎ߫ + + + ߕߙߌߔߟߌ߫ ( ߟߓߌ߫ ) + + + ߞߛߊߓߎߟߊ߲ߞߊ߫ + + + ߊ߲ߕߣߊߣߊߙߌߝ߭ߏ߫ + + + ߓߡߊ߬ߞߐ߫ + + + ߣߎߥߊߞߑߛߐߕ + + + ߡߏߙߌߛ + + + ߓߑߟߊ߲ߕߌ߯ߙ + + + ߡߊߔߎߕߏ߫ + + + ߥߌ߲ߘߐߞ + + + ߢߊߡߋ߫ + + + ߟߋߜ߭ߐߛ + + + ߙߋߣߌߦߐ߲߫ + + + ߞߌߜ߭ߊߟߌ߫ + + + ߡߊߤߋ߫ + + + ߞߊߙߑߕߎߡ + + + ߛߍ߲ߕ ߤߋߟߍߣ + + + ߝߙߌߕߐ߲߬ + + + ߘߊ߬ߞߊ߯ߙߎ߫ + + + ߡߏߜ߭ߊߘߌߛߏ߫ + + + ߖߎߓߊ߫ + + + ߛߊߏ-ߕߏߡߋ߫ + + + ߒߓߊߓߊߣ + + + ߒߖߊߡߋߣߊ߫ + + + ߞߍߙߑߜ߭ߋߟߍ߲߫ + + + ߟߏߡߋ߫ + + + ߕߎߣߌߛ + + + ߘߊ߯ߙ-ߛߊ߬ߟߊ߯ߡ + + + ߞߊ߲ߔߟߊ߫ + + + ߡߊߦߐߕ + + + ߖ߭ߎߥߊߣߍߛߑߓߎ߯ߙ + + + ߟߎߛߞߊ߫ + + + ߤߙߊߙߋ߫ + + + + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߊ߲ߓߊ߲ ߕߎ߬ߡߊ߬ߙߋ߲ ߢߊߓߘߍ + + + + + ߝߘߊ߬ߌ߲߬ߠߊ߫ ߓߟߋ߬ߓߐ ߕߎ߬ߡߊ߬ߙߋ߲ ߢߊߓߘߍ + + + + + ߝߘߌ߬ߝߌ߲߬ߠߊ߫ ߥߙߏ߬ߘߎ߮ ߕߎ߬ߡߊ߬ߙߋ߲ ߢߊߓߘߍ + + + + + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߟߋ߬ߓߋ ߕߎ߬ߡߊ߬ߙߋ߲ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߟߋ߬ߓߋ ߕߎ߬ߡߊ߬ߙߋ߲ ߢߊߓߘߍ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߟߋ߬ߓߋ ߕߟߋ߬ߡߊ߬ ߕߎߡߊߙߋ߲ + + + + + ߜߙߋߞߎ߲߫-ߝߙߌߛߌ߫ ߕߎ߬ߡߊ߬ߙߋ߲ + ߜߙߋߞߎ߲߫-ߝߙߌߛߌ߫ ߕߎ߬ߡߊ߬ߙߋ߲ ߢߊߓߘߍ + ߜߙߋߞߎ߲߫-ߝߙߌߛߌ߫ ߕߟߋ߬ߡߊ߬ ߕߎߡߊߙߋ߲ + + + + + ߐߛߑߕߙߊߟߌ߫ ߘߎ߰ߞߟߏ ߣߌ߫ ߝߊ߬ߙߊ߲߬ߛߌ߫ ߊ߲ߕߊߙߑߕߌߞ ߕߎ߬ߡߊ߬ߙߋ + + + + + ߜ߭ߙߋߣߍߕ ߕߎ߬ߡߊ߬ߘߊ + + + + + ߍ߲ߘߎ߫ ߟߌ߲ߓߊ߲ߘߎ߯ ߕߎ߬ߡߊ߬ߙߋ߲ + + + + + ߡߏߙߌߛ ߕߎ߬ߡߊ߬ߙߋ߲ + ߡߏߙߌߛ ߕߎ߬ߡߊ߬ߙߋ߲ ߢߊߓߘߍ + ߡߏߙߌߛ ߕߟߋ߬ߡߊ߬ ߕߎߡߊߙߋ߲ + + + + + ߙߋߣߌߦߐ߲߫ ߕߎ߬ߡߊ߬ߙߋ߲ + + + + + ߛߋߦߌߛߌߟ ߕߎ߬ߡߊ߬ߙߋ߲ + + + + + + nkoo + + nkoo + + + ، + + + + ߊ߲ߜ߭ߏߟߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߊ߲ߖ߭ߊ + ߊ߲ߜ߭ߏߟߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߊ߲ߖ߭ߊ + ߊ߲ߜ߭ߎ + ߞߖ߭ + + + ߊ߲ߜ߭ߏߟߊ߫ ߞߎߥߊ߲ߖ߭ߊ ( ߁߉߇߇–߁߉߉߀ ) + ߊ߲ߜ߭ߏߟߊ߫ ߞߎߥߊ߲ߖ߭ߊ ( ߁߉߇߇–߁߉߉߀ ) + + + ߊ߲ߜ߭ߏߟߊ߫ ߞߎߥߊߖ߭ߊ߫ ߞߎߘߊ ( ߁߉߉߀–߂߀߀߀ ) + ߊ߲ߜ߭ߏߟߊ߫ ߞߎߥߊߖ߭ߊ߫ ߞߎߘߊ ( ߁߉߉߀–߂߀߀߀ ) + ߊߜ߭ߞ + + + ߊ߲ߜ߭ߏߟߊ߫ ߞߎߥߊߖ߭ߊ ߝߊ߲߬ߞߊߘߏ߲߬ߣߍ߲ ( ߁߉߉߅–߁߉߉߉ ) + ߊ߲ߜ߭ߏߟߊ߫ ߞߎߥߊߖ߭ߊ ߝߊ߲߬ߞߊߘߏ߲߬ߣߍ߲ ( ߁߉߉߅–߁߉߉߉ ) + ߊߜ߭ߝ + + + ߓߎߙߎ߲ߘߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߓߎߙߎ߲ߘߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߓߙߝ + + + ߓߐߛߎߥߣߊߞߊ ߟߎ߬ ߟߊ߫ ߔߎߟߊ + ߓߐߛߎߥߣߊߞߊ ߟߎ߬ ߟߊ߫ ߔߎߟߊ + ߓߥߔ + ߔ + + + ߞߏ߲߬ߜ߭ߏ߬ߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߞߏ߲߬ߜ߭ߏ߬ߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߞߝ + + + ߜߙߋߞߎ߲߫ ߝߙߌߛߌߞߊ ߟߎ߬ ߍߛߑߞߎߘߐߛ + ߜߙߋߞߎ߲߫ ߝߙߌߛߌߞߊ ߟߎ߬ ߍߛߑߞߎߘߐߛ + ߍߛߞ + + + ߖߌߓߎߕߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߖߌߓߎߕߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߖߓߝ + + + ߊߟߌߖ߭ߋߙߌߞߊ ߟߎ߬ ߟߊ߫ ߘߌ߬ߣߊ߯ߙߌ + ߊߟߌߖ߭ߋߙߌߞߊ ߟߎ߬ ߟߊ߫ ߘߌ߬ߣߊ߯ߙߌ + ߊߟߘ + + + ߡߌߛߌߙߊ߲ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߡߌߛߌߙߊ߲ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߡߛߔ + + + ߋߙߌߕߙߋߞߊ ߟߎ߬ ߟߊ߫ ߣߊߝߑߞߊ + ߋߙߌߕߙߋߞߊ ߟߎ߬ ߟߊ߫ ߣߊߝߑߞߊ + ߋߙߝ + + + ߋߗߏߔߌߞߊ ߟߎ߬ ߟߊ߫ ߓߌߙߑߛ + ߋߗߏߔߌߞߊ ߟߎ߬ ߟߊ߫ ߓߌߙߑߛ + ߋߗߓ + + + ߛߘߌ + ߜ߭ߊ߯ߣߊ߫ ߛߘߌ ( ߁߉߆߇–߂߀߀߇ ) + + + ߜ߭ߊ߯ߣߊ߫ ߛߘߌ + ߜ߭ߊ߯ߣߞߊ ߟߎ߬ ߛߘߌ + ߜ߭ߛߘ + + + ߜ߭ߊ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߘߟߊߛߌ߫ + ߜ߭ߊ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߘߟߊߛߌ߫ + ߜ߭ߓߘ + + + ߖߌ߬ߣߍ߬ߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߖߌ߬ߣߍ߬ߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߿ + ߿ + + + ߖߌ߬ߣߍ߬ߞߊ ߟߎ߬ ߟߊ߫ ߛߟߌ + ߖߌ߬ߣߍ߬ߞߊ ߟߎ߬ ߟߊ߫ ߛߟߌ + ߖߛ߾ + + + ߕߍߡߊߓߊ߲߮ ߖߌ߬ߣߍ߬ߞߊ ߟߎ߬ ߟߊ߫ ߋߞߥߋߟߋ + ߕߍߡߊߓߊ߲߮ ߖߌ߬ߣߍ߬ߞߊ ߟߎ߬ ߟߊ߫ ߋߞߥߋߟߋ + ߕߖߋ + + + ߖߌߣߍ߫ ߓߌߛߊߥߏߞߊ ߟߎ߬ ߟߊ߫ ߍߛߑߞߎߘߐߛ + ߖߌߣߍ߫ ߓߌߛߊߥߏߞߊ ߟߎ߬ ߟߊ߫ ߍߛߑߞߎߘߐߛ + ߖߓߍ + + + ߖߌߣߍ߫ ߓߌߛߊߥߏߞߊ ߟߎ߬ ߟߊ߫ ߔߋߖ߭ߏ + ߖߌߣߍ߫ ߓߌߛߊߥߏߞߊ ߟߎ߬ ߟߊ߫ ߔߋߖ߭ߏ + ߖߓߔ + + + ߝߋߣߌߦߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߝߋߣߌߦߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߞߋߛ + + + ߞߐߡ߲߯ߙߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߞߐߡ߲߯ߙߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߞߡߝ + ߝߛ + + + ߟߌߓߋߙߌߦߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊߙ + ߟߌߓߋߙߌߦߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊߙ + ߟ߾ + + + ߟߋߛߕߏߞߊ ߟߎ߬ ߟߊ߫ ߟߏߕߌ + ߟߋߛߕߏߞߊ ߟߎ߬ ߟߊ߫ ߟߏߕߌ + ߟߛߟ + + + ߟߓߌ߫ ߘߌ߬ߣߊ߯ߙ + ߟߓߌߞߊ ߟߎ߬ ߟߊ߫ ߘߌ߬ߣߊ߯ߙ + ߟߓߘ + + + ߡߊ߬ߙߐߞߎ߬ ߘߌ߬ߙߑߤߊߡ + ߡߊ߬ߙߐ߬ߞߎ߬ߞߊ ߟߎ߬ ߟߊ߫ ߘߌ߬ߙߑߤߊߡ + ߡߘߤ + + + ߡߊ߬ߙߐ߬ߞߎߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߡߊ߬ߙߐ߬ߞߎߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߡߙߝ + + + ߡߘߊߜ߭ߊߛߑߞߊ߯ߙߌߞߊ ߟߎ߬ ߟߊ߫ ߊߙߌߦߊߙߌ + ߡߘߊߜ߭ߊߛߑߞߊ߯ߙߌߞߊ ߟߎ߬ ߟߊ߫ ߊߙߌߦߊߙߌ + ߡߘߙ + ߊߙ + + + ߡߘߊߜ߭ߊߑߞߊ߯ߙߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߡߘߊߜ߭ߊߑߞߊ߯ߙߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߡߘߝ + + + ߡߊߟߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߡߊߟߌߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߡߝ + + + ߡߏߙߌߕߊߣߌߞߊ ߟߎ߬ ߟߊ߫ ߎ߬ߜ߭ߌߦߊ ( ߁߉߇߃–߂߀߁߇ ) + ߡߏߙߌߕߊߣߌߞߊ ߟߎ߬ ߟߊ߫ ߎ߬ߜ߭ߌߦߊ ( ߁߉߇߃–߂߀߁߇ ) + ߡߙߏ + + + ߡߏߙߌߕߊߣߌߞߊ ߟߎ߬ ߟߊ߫ ߎ߬ߜ߭ߌߦߊ + ߡߏߙߌߕߊߣߌߞߊ ߟߎ߬ ߟߊ߫ ߎ߬ߜ߭ߌߦߊ + ߡߎߜ߭ + + + ߡߏߙߛߌߞߊ ߟߎ߬ ߟߊ߫ ߙߔߎ + ߡߏߙߛߌߞߊ ߟߎ߬ ߟߊ߫ ߙߔߎ + ߡߙߔ + ߙߛ + + + ߡߟߊߥߌߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߛߊ + ߡߟߊߥߌߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߛߊ + ߡߟߞ + + + ߡߏߖ߭ߊ߲ߓߞߌߞߊ ߟߎ߬ ߟߊ߫ ߍߛߑߞߎߘߏߛ + ߡߏߖ߭ߊ߲ߓߞߌߞߊ ߟߎ߬ ߟߊ߫ ߍߛߑߞߎߘߏߛ + ߡߖ߭ߋ + + + ߡߏߖ߭ߊ߲ߓߞߌߞߊ ߟߎ߬ ߟߊ߫ ߡߋߕߌߞ ( ߁߉߈߀–߂߀߀߆ ) + ߡߏߖ߭ߊ߲ߓߞߌߞߊ ߟߎ߬ ߟߊ߫ ߡߋߕߌߞ ( ߁߉߈߀–߂߀߀߆ ) + ߡߖߡ + + + ߡߏߖ߭ߊ߲ߓߞߌߞߊ ߟߎ߬ ߟߊ߫ ߡߋߕߌߞߊߟ + ߡߏߖ߭ߊ߲ߓߞߌߞߊ ߟߎ߬ ߟߊ߫ ߡߋߕߌߞߊߟ + ߡߖ߭ߡ + + + ߣߊߡߌ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ + ߣߊߡߌ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ + ߣߡߘ + ߛ + + + ߖߋ߬ߟߌ߬ߓߊ߬ߞߊ ߟߎ߬ ߟߊ߫ ߣߍߙߊ + ߖߋ߬ߟߌ߬ߓߊ߬ߞߊ ߟߎ߬ ߟߊ߫ ߣߍߙߊ + ߖߣ + + + ߖ߭ߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ + ߖ߭ߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ + ߖ߭ߓߘ + + + ߙߎߥߊ߲ߘߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߙߎߥߊ߲ߘߞߊ ߟߎ߬ ߟߊ߫ ߝߊߙߊ߲ + ߙߥߝ + ߝߙ + + + ߛߋߦߌߛߍߟߌߞߊ ߟߎ߬ ߟߊ߫ ߙߎߔߌ + ߛߋߦߌߛߍߟߌߞߊ ߟߎ߬ ߟߊ߫ ߙߎߔߌ + ߛߛߥ + + + ߛߎ߬ߘߊ߲߬ߞߊ ߟߎ߬ ߘߌ߬ߣߊ߯ߙ ( ߁߉߉߂–߂߀߀߇ ) + ߛߎ߬ߘߊ߲߬ߞߊ ߟߎ߬ ߘߌ߬ߣߊ߯ߙ ( ߁߉߉߂–߂߀߀߇ ) + ߛߘߘ + + + ߛߎߘߊ߲ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߎߘߊ߲ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߘߜ߭ + + + ߛߎ߬ߘ߲ߊ߬ߞߊ ߟߊ߫ ߔߐߣߌ߬ ( ߁߉߅߆–߂߀߀߇ ) + ߛߎ߬ߘ߲ߊ߬ߞߊ ߟߊ߫ ߔߐߣߌ߬ ( ߁߉߅߆–߂߀߀߇ ) + ߛߘߔ + + + ߛߍ߲ߕ ߤߌߟߋߣߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߍ߲ߕ ߤߌߟߋߣߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߤߔ + + + ߛߙߊ߬ߟߏ߲߬ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߙߊ߬ߟߏ߲߬ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߙߔ + + + ߛߏߡߊߟߌߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߛߏߡߊߟߌߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߛߡߛ + + + ߛߎ߬ߘߊ߲߬ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߎ߬ߘߊ߲߬ߞߊ ߟߎ߬ ߟߊ߫ ߔߐߣߌ߬ + ߛߛߔ + + + ߛߊߏߕߏߡߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߓߙߊߛ ( ߁߉߇߇– ߂߀߁߇ ) + ߛߊߏߕߏߡߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߓߙߊߛ ( ߁߉߇߇– ߂߀߁߇ ) + ߛߕߘ + + + ߛߊߏߕߏߡߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߓߙߊߛ + ߛߊߏߕߏߡߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߓߙߊߛ + ߛߔߘ + ߛߓ + + + ߛߑߥߊߕߣߞߊ ߟߎ߬ ߟߌߟߊ߲ߖ߭ߋߣߌߛ + ߛߑߥߊߕߣߞߊ ߟߎ߬ ߟߌߟߊ߲ߖ߭ߋߣߌߛ + ߛߖ߭ߟ + + + ߕߎߣߛߌߞߊ ߟߎ߬ ߟߊ߫ ߘߌ߬ߣߊ߯ߙ + ߕߎߣߛߌߞߊ ߟߎ߬ ߟߊ߫ ߘߌ߬ߣߊ߯ߙ + ߕߣߘ + + + ߕߊ߲ߖ߭ߊ߯ߣߌߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߕߊ߲ߖ߭ߊ߯ߣߌߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߕߖ߭ߛ + + + ߎߜ߭ߊ߲ߘߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ ( ߁߉߆߆–߁߉߈߇ ) + ߎߜ߭ߊ߲ߘߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ ( ߁߉߆߆–߁߉߈߇ ) + ߎߜ߭ߥ + + + ߎߜ߭ߊ߲ߘߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߎߜ߭ߊ߲ߘߞߊ ߟߎ߬ ߟߊ߫ ߛߌߟߌ߲ߜ߭ + ߎߜ߭ߛ + + + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߊ߲ߓߊ߲ ߠߎ߬ ߝߊߙߊ߲߫ ߛߍߝߊ + ߝߘߊ߬ߝߌ߲߬ߠߊ߫ ߕߊ߲ߓߊ߲ ߠߎ߬ ߝߊߙߊ߲߫ ߛߍߝߊ + ߝߛߝ + + + ߝߊߙߊ߲߫ ߛߍߝߊ + ߝߊߙߊ߲߫ ߛߍߝߊ + ߾ + + + ߥߙߏ߬ߘߎ߮ ߝߘߊ߬ߝߌ߲߬ߠߞߊ ߟߎ߬ ߟߊ߫ ߙߊ߲ߘ + ߥߙߏ߬ߘߎ߮ ߝߘߊ߬ߝߌ߲߬ߠߞߊ ߟߎ߬ ߟߊ߫ ߙߊ߲ߘ + ߥߝߙ + + + ߖ߭ߊ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߛߊ ( ߁߉߆߈–߂߀߁߂ ) + ߖ߭ߊ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߛߊ ( ߁߉߆߈–߂߀߁߂ ) + ߖ߭ߓߞ + + + ߖ߭ߊ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߛߊ + ߖ߭ߊ߲ߓߌߞߊ ߟߎ߬ ߟߊ߫ ߞߎߥߛߊ + ߖ߭ߓߥ + ߖ߭ߞ + + + ߖ߭ߊ߬ߦߌ߬ߞߊ ߟߎ߬ ߟߊ߫ ߖ߭ߊ߬ߦߌߙ ( ߁߉߉߃–߁߉߉߈ ) + ߖ߭ߊ߬ߦߌ߬ߞߊ ߟߎ߬ ߟߊ߫ ߖ߭ߊ߬ߦߌߙ ( ߁߉߉߃–߁߉߉߈ ) + ߖ߭ߙ + + + ߖ߭ߊߦߙߌߞߊ ߟߎ߫ ߟߊ߫ ߖ߭ߊ߬ߦߌߙ ( ߁߉߇߁–߁߉߉߃ ) + ߖ߭ߊߦߙߌߞߊ ߟߎ߫ ߟߊ߫ ߖ߭ߊ߬ߦߌߙ ( ߁߉߇߁–߁߉߉߃ ) + ߖ߭ߙߖ߭ + + + ߖߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ (߁߉߈߀–߂߀߀߈ ) + ߖߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ (߁߉߈߀–߂߀߀߈ ) + ߖ߭ߥߘ + + + ߖ߭ߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ ( ߂߀߀߉ ) + ߖ߭ߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ ( ߂߀߀߉ ) + ߖ߭ߥߟ + + + ߖ߭ߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ ( ߂߀߀߈ ) + ߖ߭ߌ߲ߓߊߓߏߦߋߞߊ ߟߎ߬ ߟߊ߫ ߘߏߟߊ߯ߙ ( ߂߀߀߈ ) + ߖ߭ߥߙ + + + + + + + ߝߊ߯ߘߐߞߍ + ߝߊ߯ߘߐߞߍ {0} + + + ߦߟߍ߬ߘߐ߬ߞߍ + ߦߟߍ߬ߘߐ߬ߞߍ {0} + + + ߛߊ߲߭ + ߛߊ߲߭ {0} + ߛߊ߲߭ ߠߊ߫ {0} + + + ߞߊߙߏ + ߞߊߙߏ {0} + ߞߊߙߏ ߟߊ߫ {0} + + + ߞߎ߲߬ߢߐ߮ + ߞߎ߲߬ߢߐ߮ {0} + ߞߎ߲߬ߢߐ߮ ߟߊ߫ {0} + + + ߟߏ߲ + ߟߏ߲ ߜߍ {0} + ߟߏ߲ ߠߊ߫ {0} + + + ߕߎ߬ߡߊ߬ߙߋ߲ + ߕߎ߬ߡߊ߬ߙߋ߲ {0} + ߕߎ߬ߡߊ߬ߙߋ߲ ߞߘߐ߫ {0} + + + ߡߌ߬ߛߍ߲ + ߡߌ߬ߛߍ߲ {0} + ߡߌ߬ߛߍ߲ ߠߊ߫ {0} + + + ߝߌ߬ߟߊ߲ + ߝߌ߬ߟߊ߲ {0} + + + ߝߌ߬ߟߊ߲߬ߥߊ߰ߘߋ߲ + ߝߌ߬ߟߊ߲߬ߥߊ߰ߘߋ߲ {0} + + + ߝߌ߬ߟߊ߲߬ߢߊ߲߯ߕߊ + ߝߌ߬ߟߊ߲߬ߢߊ߲߯ߕߊ {0} + + + ߝߌ߬ߟߊ߲߬ߞߏߦߋ + ߝߌ߬ߟߊ߲߬ߞߏߦߋ {0} + + + + + ߝߊ߯ߘߐߞߍ + ߝߊ߯ߘߐߞߍ {0} + + + ߦߟߍ߬ߘ + + + ߛߊ߲߭ + ߛߊ߲߭ {0} + {0}/ߛߊ߲߭ + + + ߞ. + ߞ. {0} + {0}/ߞ. + + + ߞߎ߲߬ߢ + ߞߎ߲߬ߢ{0} + {0}/ߞߎ߲߬ߢ + + + ߟ + ߟ {0} + {0}/ߟ + + + ߕߎ߬ߡߊ߬ߙߋ߲ + ߕ {0} + {0}/ߕ + + + ߡߌ߬ߛ + ߡߌ߬ߛ {0} + {0}/ߡߌ߬ߛ + + + ߝ + ߝ {0} + {0}/ߝ + + + ߝߥ + ߝߥ {0} + + + ߝߢ + ߝߢ {0} + + + ߝߌ߬ߟߞ + ߝߞ {0} + + + + + ߝߊ߯ߘߐߞߍ + ߝߊ߯ߘߞ{0} + + + ߦߟߍ߬ߘߐ߬ߞߍ + ߦߟߍ߬ߘߞ{0} + + + ߛߊ߲߭ + ߛߊ߲߭{0} + {0}/ߛߊ߲߭ + + + ߞߊߙߏ + ߞ.{0} + {0}/ߞ. + + + ߞߎ߲߬ߢ + ߞߎ߲߬ߢ{0} + {0}/ߞߎ߲߬ߢ + + + ߟߏ߲ + ߟ{0} + {0}/ߟ + + + ߕߎ߬ߡߊ߬ߙߋ߲ + ߕ{0} + {0}/ߕ + + + ߡߌ߬ߛ + ߡߌ߬ߛ{0} + {0}/ߡߌ߬ߛ + + + ߝ + ߝ{0} + {0}/ߝ + + + ߝߥ + ߝߥ{0} + + + ߝߢ + ߝߢ{0} + + + ߝߞ + ߝߞ{0} + + + + diff --git a/make/data/cldr/common/main/nqo_GN.xml b/make/data/cldr/common/main/nqo_GN.xml new file mode 100644 index 00000000000..3bde714a333 --- /dev/null +++ b/make/data/cldr/common/main/nqo_GN.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/nr.xml b/make/data/cldr/common/main/nr.xml new file mode 100644 index 00000000000..1c3d63f8f2a --- /dev/null +++ b/make/data/cldr/common/main/nr.xml @@ -0,0 +1,134 @@ + + + + + + + + + + + isiNdebele + + + + [a b c d e f g h i j k l m n o p q s t u v w x y z] + [r] + [A B C D E F G H I J K L M N O P Q S T U V W X Y Z] + + + + + + + + + + + + + + Jan + Feb + Mat + Apr + Mey + Jun + Jul + Arh + Sep + Okt + Usi + Dis + + + Janabari + uFeberbari + uMatjhi + u-Apreli + Meyi + Juni + Julayi + Arhostosi + Septemba + Oktoba + Usinyikhaba + Disemba + + + + + + + Son + Mvu + Bil + Tha + Ne + Hla + Gqi + + + uSonto + uMvulo + uLesibili + Lesithathu + uLesine + ngoLesihlanu + umGqibelo + + + + + + BC + AD + + + + + + + + , +   + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤#,##0.00 + + + + + + R + + + + diff --git a/make/data/cldr/common/main/nr_ZA.xml b/make/data/cldr/common/main/nr_ZA.xml new file mode 100644 index 00000000000..b009779e64c --- /dev/null +++ b/make/data/cldr/common/main/nr_ZA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/nso.xml b/make/data/cldr/common/main/nso.xml new file mode 100644 index 00000000000..d08f3724536 --- /dev/null +++ b/make/data/cldr/common/main/nso.xml @@ -0,0 +1,561 @@ + + + + + + + + + + + Sesotho sa Leboa + + + Afrika Borwa + + + + [a b d e ê f g h i j k l m n o ô p r s š t u w x y] + [c q v z] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + + + + + + + Phere + Dibo + Hlak + Mora + Mopi + Phupu + Mose + Phato + Lewe + Dipha + Diba + Manth + + + P + D + H + M + M + P + M + P + L + D + D + M + + + Pherekgong + Dibokwane + Hlakola + Moranang + Mopitlo + Phupu + Mosegemanye + Phato + Lewedi + Diphalane + Dibatsela + Manthole + + + + + Phere + Dibo + Hlak + Mora + Mopi + Phupu + Mose + Phato + Lewe + Dipha + Diba + Manth + + + P + D + H + M + M + P + M + P + L + D + D + M + + + Pherekgong + Dibokwane + Hlakola + Moranang + Mopitlo + Phupu + Mosegemanye + Phato + Lewedi + Diphalane + Dibatsela + Manthole + + + + + + + Lam + Mos + Bed + Rar + Ne + Hla + Mok + + + L + M + B + R + N + H + M + + + Lam + Mos + Bed + Rar + Ne + Hla + Mok + + + Lamorena + Musopologo + Labobedi + Laboraro + Labone + Labohlano + Mokibelo + + + + + Lam + Mos + Bed + Rar + Ne + Hla + Mok + + + L + M + B + R + N + H + M + + + Lam + Mos + Bed + Rar + Ne + Hla + Mok + + + Lamorena + Musopologo + Labobedi + Laboraro + Labone + Labohlano + Mokibelo + + + + + + + 1st Kotara + 2nd Kotara + 3rd Kotara + 4th Kotara + + + 1 + 2 + 3 + 4 + + + 1st Kotara + 2nd Kotara + 3rd Kotara + 4th Kotara + + + + + 1st Kotara + 2nd Kotara + 3rd Kotara + 4th Kotara + + + 1 + 2 + 3 + 4 + + + 1st Kotara + 2nd Kotara + 3rd Kotara + 4th Kotara + + + + + + + AM + PM + + + a + p + + + AM + PM + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + + Before Christ + Before Common Era + Anno Domini + Common Era + + + BC + BCE + AD + CE + + + + + + y MMMM d, EEEE + yMMMMEEEEd + + + + + y MMMM d + yMMMMd + + + + + y MMM d + yMMMd + + + + + y-MM-dd + yMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1} 'ka' {0} + + + + + {1} 'ka' {0} + + + + + {1}, {0} + + + + + {1}, {0} + + + + d + ccc + d, E + E h:mm a + E HH:mm + E h:mm:ss a + E HH:mm:ss + G y + G y MMM + G y MMM d + G y MMM d, E + h a + HH + h:mm a + HH:mm + h:mm:ss a + HH:mm:ss + h:mm:ss a v + HH:mm:ss v + h:mm a v + HH:mm v + L + MM-dd + MM-dd, E + LLL + MMM d + MMM d, E + MMMM d + 'beke' W 'ya' MMM + 'beke' 'ya' 'bo' W 'ya' MMM + mm:ss + y + y-MM + y-MM-dd + y-MM-dd, E + y MMM + y MMM d + y MMM d, E + y MMMM + y QQQ + y QQQQ + 'beke' 'ya' 'bo' w 'ya' Y + 'beke' 'ya' 'bo' w 'ya' Y + + + {0} {1} + + + {0} – {1} + + d–d + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + MM–MM + + + MM-dd – MM-dd + MM-dd – MM-dd + + + MM-dd, E – MM-dd, E + MM-dd, E – MM-dd, E + + + LLL–LLL + + + MMM d–d + MMM d – MMM d + + + MMM d, E – MMM d, E + MMM d, E – MMM d, E + + + y–y + + + y-MM – y-MM + y-MM – y-MM + + + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + + + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + + + y MMM–MMM + y MMM – y MMM + + + y MMM d–d + y MMM d – MMM d + y MMM d – y MMM d + + + y MMM d, E – MMM d, E + y MMM d, E – MMM d, E + y MMM d, E – y MMM d, E + + + y MMMM–MMMM + y MMMM – y MMMM + + + + + + + + 1 + + . +   + % + + + - + E + × + + + NaN + : + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤ #,##0.00 + + + ¤#,##0.00 + + + {0} {1} + {0} {1} + + + + R + + + + ≥{0} + {0}–{1} + + + diff --git a/make/data/cldr/common/main/nso_ZA.xml b/make/data/cldr/common/main/nso_ZA.xml new file mode 100644 index 00000000000..ae90284ca6b --- /dev/null +++ b/make/data/cldr/common/main/nso_ZA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/nus.xml b/make/data/cldr/common/main/nus.xml index 36e6654cfb5..4164ad41d5f 100644 --- a/make/data/cldr/common/main/nus.xml +++ b/make/data/cldr/common/main/nus.xml @@ -1,6 +1,6 @@ - + + + + + + + + Diné Bizaad + + + + + left-to-right + top-to-bottom + + + + [a á ą {ą\u0301} b {ch} {ch’} d {dl} {dz} e é ę {ę\u0301} g {gh} h {hw} i í į {į\u0301} j k {k’} {kw} l ł m n o ó ǫ {ǫ\u0301} s {sh} t {t’} {tł} {tł’} {ts} {ts’} w x y z {zh}] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + diff --git a/make/data/cldr/common/main/nv_US.xml b/make/data/cldr/common/main/nv_US.xml new file mode 100644 index 00000000000..fb4fee38263 --- /dev/null +++ b/make/data/cldr/common/main/nv_US.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ny.xml b/make/data/cldr/common/main/ny.xml new file mode 100644 index 00000000000..31261bb7b63 --- /dev/null +++ b/make/data/cldr/common/main/ny.xml @@ -0,0 +1,116 @@ + + + + + + + + + + + Nyanja + + + + [a b c d e f g h i j k l m n o p r s t u w ŵ y z] + [q v x] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + Jan + Feb + Mal + Epu + Mei + Jun + Jul + Oga + Sep + Oku + Nov + Dis + + + Januwale + Febuluwale + Malichi + Epulo + Mei + Juni + Julai + Ogasiti + Seputemba + Okutoba + Novemba + Disemba + + + + + + + Mul + Lem + Wir + Tat + Nai + San + Wer + + + Lamulungu + Lolemba + Lachiwiri + Lachitatu + Lachinayi + Lachisanu + Loweruka + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + + Malawian Kwacha + + + + diff --git a/make/data/cldr/common/main/ny_MW.xml b/make/data/cldr/common/main/ny_MW.xml new file mode 100644 index 00000000000..acd7b467f2e --- /dev/null +++ b/make/data/cldr/common/main/ny_MW.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/nyn.xml b/make/data/cldr/common/main/nyn.xml index 8cee15975b9..ae6e6a12cba 100644 --- a/make/data/cldr/common/main/nyn.xml +++ b/make/data/cldr/common/main/nyn.xml @@ -1,6 +1,6 @@ - + + + + + + + + 𐓏𐓘𐓻𐓘𐓻𐓟 + + + United States + + + 𐓷𐓘𐓵𐓘𐓷𐓘 𐓨𐓣𐓡𐓣𐓵𐓟 𐓣͘𐓤𐓯𐓟 + + + US + + + + [𐓘 {𐓘\u0301} {𐓘\u0301\u0358} {𐓘\u030B} {𐓘\u030B\u0358} {𐓘\u0304} {𐓘\u0304\u0358} {𐓘\u0358} 𐓙 {𐓙\u0301} {𐓙\u030B} {𐓙\u0304} 𐓚 {𐓚\u0301} {𐓚\u030B} {𐓚\u0304} 𐓛 {𐓛\u0358} 𐓜 𐓝 𐓞 𐓟 {𐓟\u0301} {𐓟\u030B} {𐓟\u0304} 𐓠 {𐓠\u0301} {𐓠\u030B} {𐓠\u0304} 𐓡 𐓢 𐓣 {𐓣\u0301} {𐓣\u0301\u0358} {𐓣\u030B} {𐓣\u030B\u0358} {𐓣\u0304} {𐓣\u0304\u0358} {𐓣\u0358} 𐓤 𐓥 𐓦 𐓧 𐓨 𐓩 𐓪 {𐓪\u0301} {𐓪\u0301\u0358} {𐓪\u030B} {𐓪\u030B\u0358} {𐓪\u0304} {𐓪\u0304\u0358} {𐓪\u0358} 𐓫 {𐓫\u0301} {𐓫\u030B} {𐓫\u0304} 𐓬 𐓭 𐓮 𐓯 𐓰 𐓱 𐓲 𐓳 𐓴 𐓵 𐓶 {𐓶\u0301} {𐓶\u030B} {𐓶\u0304} 𐓷 𐓸 𐓹 𐓺 𐓻] + [] + [𐒰 {𐒰\u0358} 𐒱 𐒲 𐒳 𐒴 𐒵 𐒶 𐒷 𐒸 𐒹 𐒺 𐒻 {𐒻\u0358} 𐒼 𐒽 𐒾 𐒿 𐓀 𐓁 𐓂 {𐓂\u0358} 𐓃 𐓄 𐓅 𐓆 𐓇 𐓈 𐓉 𐓊 𐓋 𐓌 𐓍 𐓎 𐓏 𐓐 𐓑 𐓒 𐓓] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + + + + + + + EEEE, MMMM d, y G + GyMMMMEEEEd + + + + + MMMM d, y G + GyMMMMd + + + + + MMM d, y G + GyMMMd + + + + + M/d/y GGGGG + GGGGGyMd + + + + + + + + + 𐓄𐓘𐓡𐓛͘𐓧𐓟 + 𐓵𐓪͘𐓬𐓘 + 𐓵𐓘𐓜𐓣 + 𐓰𐓪𐓬𐓘 + 𐓮𐓘𐓰𐓘 + 𐓯𐓘𐓬𐓟 + 𐓄𐓟𐓵𐓪͘𐓬𐓘 + 𐒼𐓣𐓟𐓰𐓪𐓬𐓘 + 𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟 + 𐒿𐓟𐓜𐓛 + 𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣 + 𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘 + + + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓟 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓮𐓘𐓰𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓯𐓘𐓬𐓟 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓟𐓵𐓪͘𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒼𐓣𐓟𐓰𐓪𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘 + + + + + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓟 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓮𐓘𐓰𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓯𐓘𐓬𐓟 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓟𐓵𐓪͘𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒼𐓣𐓟𐓰𐓪𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣 + 𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘 + + + + + + + 𐓏 + 𐓄 + 𐓍 + 𐒴 + 𐓈 + 𐓊 + 𐓸 + + + 𐒹𐓘͘𐓬𐓘 𐓏𐓘𐓤𐓘͘𐓰𐓘𐓤𐓣 + 𐒹𐓘͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓣 + 𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘 + 𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣 + 𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘 + 𐒹𐓘͘𐓬𐓘 𐓈𐓘 𐓵𐓘𐓲𐓘 𐓻𐓣͘ + 𐒹𐓘͘𐓬𐓘 𐓂𐓤𐓘𐓸𐓟 𐓣͘𐓤𐓟 + + + + + + + EEEE, MMMM d, y + yMMMMEEEEd + + + + + MMMM d, y + yMMMMd + + + + + MMM d, y + yMMMd + + + + + M/d/yy + yyMd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + 𐓂𐓨𐓚𐓤𐓘 + + + 𐓀𐓣͘𐓪͘𐓬𐓘 + + + 𐒹𐓘͘𐓬𐓘 𐓷𐓘𐓤𐓘͘𐓰𐓘𐓤𐓣 + + + 𐒹𐓘͘𐓬𐓘 + + + 𐓨𐓣𐓪𐓵𐓘𐓤𐓟 𐓪𐓰𐓘𐓩𐓘͘ + + + 𐓰𐓘𐓲𐓟 𐓤𐓯𐓣𐓵𐓟 + + + 𐓰𐓘𐓲𐓟 𐓤𐓯𐓣𐓵𐓟 𐓻𐓣͘ + + + + + + . + , + ; + % + + + - + E + + + NaN + + + + + ¤ 0K + ¤ 00K + ¤ 000K + ¤ 0M + ¤ 00M + ¤ 000M + ¤ 0G + ¤ 00G + ¤ 000G + ¤ 0T + ¤ 00T + ¤ 000T + + + + + + $ + + + + + + + 𐓂𐓨𐓚𐓤𐓘 + 𐓂𐓨𐓚𐓤𐓘 {0} + + + 𐓀𐓣͘𐓪͘𐓬𐓘 + 𐓀𐓣͘𐓪͘𐓬𐓘 {0} + + + 𐒹𐓘͘𐓬𐓘𐓷𐓘𐓤𐓘͘𐓰𐓛𐓤𐓣 + 𐒹𐓘͘𐓬𐓘𐓷𐓘𐓤𐓘͘𐓰𐓛𐓤𐓣 {0} + + + 𐒹𐓘͘𐓬𐓘 + {0} 𐒹𐓘͘𐓬𐓘 + + + 𐓨𐓣𐓪𐓵𐓘𐓤𐓟 𐓪𐓰𐓘𐓩𐓘͘ + {0} 𐓨𐓣𐓪𐓵𐓘𐓤𐓟 𐓪𐓰𐓘𐓩𐓘͘ + + + 𐓰𐓘𐓲𐓟 𐓤𐓯𐓣𐓵𐓟 + {0} 𐓰𐓘𐓲𐓟 𐓤𐓯𐓣𐓵𐓟 + + + 𐓰𐓘𐓲𐓟 𐓤𐓯𐓣𐓵𐓟 𐓻𐓣͘ + {0} 𐓰𐓘𐓲𐓟 𐓤𐓯𐓣𐓵𐓟 𐓻𐓣͘ + + + + diff --git a/make/data/cldr/common/main/osa_US.xml b/make/data/cldr/common/main/osa_US.xml new file mode 100644 index 00000000000..9056a39064d --- /dev/null +++ b/make/data/cldr/common/main/osa_US.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/pa.xml b/make/data/cldr/common/main/pa.xml index 21427343646..f324ac5314a 100644 --- a/make/data/cldr/common/main/pa.xml +++ b/make/data/cldr/common/main/pa.xml @@ -1,6 +1,6 @@ - + + + + + + + + ingles + Papiamentu + + + + + + Aruba + Kòrsou + Turkia + + + meter + britániko + merikano + + + Idioma: {0} + Manera di skirbi: {0} + Region: {0} + + + + [a b c d e è f g h i j k l m n ñ o ò p q r s t u ù ü v w x y z] + [á é í ó ú] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‑ , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ / \& # % ′ ″] + + + + + + + + Yanüari + Febrüari + Mart + Aprel + Mei + Yüni + Yüli + Ougùstùs + Sèptèmber + Òktober + Novèmber + Desèmber + + + + + + + djadumingu + djaluna + djamars + djarason + djaweps + djabièrnè + djasabra + + + + + + + AM + PM + + + + + + dd-MM-y + d MMM y + + + + + + ora di {0} + + + Greenwich Mean Time + + + + + + + . + , + % + - + + + + + {title} {given} {given2} {surname} {surname2} + + + {title} {surname} {surname2} {given} {given2} + + + Zendaya + + + Irene + + + Mari-Sue + + + Jackson Martina + + + Ada Kornelia + + + diff --git a/make/data/cldr/common/main/pap_AW.xml b/make/data/cldr/common/main/pap_AW.xml new file mode 100644 index 00000000000..0b4b9f459e8 --- /dev/null +++ b/make/data/cldr/common/main/pap_AW.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/pap_CW.xml b/make/data/cldr/common/main/pap_CW.xml new file mode 100644 index 00000000000..d3e92b3856d --- /dev/null +++ b/make/data/cldr/common/main/pap_CW.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/pcm.xml b/make/data/cldr/common/main/pcm.xml index 25f9a4bf787..c94fd2b62fe 100644 --- a/make/data/cldr/common/main/pcm.xml +++ b/make/data/cldr/common/main/pcm.xml @@ -1,6 +1,6 @@ - + + + + + + + + {0} ({1}) + {0}, {1} + {0}: {1} + + + arābiskan + dāniskan + miksiskan + Āustrarīkis miksiskan + Šwēicis aūktamiksiskan + grēkiskan + ēngliskan + Austrālijas ēngliskan + Kanādas ēngliskan + brītiskan ēngliskan + amērikaniskan ēngliskan + APW ēngliskan + špāniskan + Lātiniskas Amērikas špāniskan + eurōpiskan špāniskan + Meksikus špāniskan + èstiskan + sōmiskan + prancōziskan + Kanādas prancōziskan + Šwēicis prancōziskan + wālkiskan + japāniskan + laītawiskan + lattawiskan + ullandiskan + pōliskan + prūsiskan + pōrtugaliskan + Brazīlijas pōrtugaliskan + eurōpiskan pōrtugaliskan + maskōwitiskan + šwēdiskan + turkiskan + niwaistā bilā + kīniskan + prastintan kīniskan + tradiciōnalin kīniskan + + + + + + + + + + + + + + + swītai + Afrika + Zēimanamērika + Pussideinanamērika + Amērika + Āzija + Eurōpa + Andōra + Antīgwa be Barbūda + Albānija + Argentīnija + Āustrarīki + Austrālija + Bōsnija be Ercegōwina + Barbādas + Belgija + Bulgārija + Bōliwija + Brazīlija + Bahāmai + Krēiwa + Belīzi + Kānada + Šwēici + Čīli + Kīna + Kōlumbija + Costa Rica + Kūba + Čekkija + Mikskātauta + Dānanmarki + Dōminika + Dōminikas Republīki + Ekwadōrs + Estantauta + Špānija + Sōmija + Farēirai + Prankrīki + Debabritānija + DB + Grenāda + Prancōziska Gujāna + Gibrāltars + Grēnlandan + Grēkantauta + Gwatemāla + Gujāna + Hōnduras + Kruātija + Haīti + Ungrai + Indōnezija + Īndija + Īslandan + Wālkija + Jamāika + Japānija + Pussideinankōreja + Līchtenšteinan + Laītawa + Luksemburgan + Lattawa + Mōnakō + Mōldawija + Mōntenegran + Mālta + Meksiku + Nikarāgwa + Nōrwigai + Nawazēlandan + Panāma + Perū + Pōli + Pōrtugalin + Palau + Paragwājs + Rumānija + Serbija + Russi + Saūdi Arābija + Šwēdija + Slōwenija + Slōwakei + San Marinō + Surināms + El Salvadōrs + Tāilandan + Turkāja + Trinidāds be Tobagō + Taiwāns + Ukrāini + Peraīnintas Wālstis + PW + Urugwājs + Venezuēla + Kōsawa + Pussideinanafrika + niwaistā regiōni + + + Gregōriskas kalāndars + sēisnas rikā + lātiniskas cipperis + + + mētriskan + brītiskan + amērikaniskan + + + Bilā: {0} + Skriptan: {0} + Regiōni: {0} + + + + [a ā b c d ḑ e ē f g ģ h i ī j k ķ l m n ņ o ō p q r ŗ s š t ț u ū v w x y z ž] + [] + [A Ā B C D Ḑ E Ē F G Ģ H I Ī J K Ķ L M N Ņ O Ō P Q R Ŗ S Š T Ț U Ū V W X Y Z Ž] + [  \- ‑ , % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … “ „ ( ) \[ \] \{ \}] + + + + + + + + + + + + + + EEEE, y 'mettas' d. MMMM G + GyMMMMEEEEd + + + + + y 'mettas' d. MMMM G + GyMMMMd + + + + + dd.MM 'st'. y G + GyMMdd + + + + + dd.MM.y GGGGG + GGGGGyMMdd + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + {0} – {1} + + + + + + + + rag + was + pūl + sak + zal + sīm + līp + dag + sil + spa + lap + sal + + + R + W + P + S + Z + S + L + D + S + S + L + S + + + rags + wassarins + pūlis + sakkis + zallaws + sīmenis + līpa + daggis + sillins + spallins + lapkrūtis + sallaws + + + + + rag + was + pūl + sak + zal + sīm + līp + dag + sil + spa + lap + sal + + + R + W + P + S + Z + S + L + D + S + S + L + S + + + rags + wassarins + pūlis + sakkis + zallaws + sīmenis + līpa + daggis + sillins + spallins + lapkrūtis + sallaws + + + + + + + nad + pan + wis + pus + ket + pēn + sab + + + N + P + W + P + K + P + S + + + nadīli + panadīli + wisasīdis + pussisawaiti + ketwirtiks + pēntniks + sabattika + + + + + nad + pan + wis + pus + ket + pēn + sab + + + N + P + W + P + K + P + S + + + nadīli + panadīli + wisasīdis + pussisawaiti + ketwirtiks + pēntniks + sabattika + + + + + + + 1. k. + 2. k. + 3. k. + 4. k. + + + 1 + 2 + 3 + 4 + + + 1. ketwirts + 2. ketwirts + 3. ketwirts + 4. ketwirts + + + + + 1. ketw. + 2. ketw. + 3. ketw. + 4. ketw. + + + 1 + 2 + 3 + 4 + + + 1. ketwirts + 2. ketwirts + 3. ketwirts + 4. ketwirts + + + + + + + AM + PM + + + ankstāinan + pa pussideinan + + + + + + BC + AD + + + + + + EEEE, y 'mettas' d. MMMM + yMMMMEEEEd + + + + + y 'mettas' d. MMMM + yMMMMd + + + + + dd.MM 'st'. y + yMMdd + + + + + dd.MM.yy + yyMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + d. + ccc + E, d. + E, h:mm a + E, HH:mm + E, h:mm:ss a + E, HH:mm:ss + y 'm'. G + y 'm'. MMM G + dd.MM 'st'. y G + E, dd.MM 'st'. y G + h a + HH + h:mm a + HH:mm + h:mm:ss a + HH:mm:ss + h:mm:ss a; v + HH:mm:ss; v + h:mm a; v + HH:mm; v + L. + d.M + E, d.M + LLL + d. MMM + E, d. MMM + mm:ss + y 'm'. + M.y + d.M.y + E, d.M.y + y 'm'. MMM + dd.MM 'st'. y + E, dd.MM 'st'. y + y 'm'. QQQ + y 'm'. QQQQ + + + {0} {1} + + + {0} – {1} + + d.–d. + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + M.–M. + + + dd.MM–dd.MM + dd.MM–dd.MM + + + E, dd.MM – E, dd.MM + E, dd.MM – E, dd.MM + + + MMM–MMM + + + d.–d. MMM + d. MMM – d. MMM + + + E, d. – E, d. MMM + E, d. MMM – E, d. MMM + + + y–y + + + MM.y–MM.y + MM.y–MM.y + + + dd.MM.y–dd.MM.y + dd.MM.y–dd.MM.y + dd.MM.y–dd.MM.y + + + E, dd.MM.y – E, dd.MM.y + E, dd.MM.y – E, dd.MM.y + E, dd.MM.y – E, dd.MM.y + + + y 'm'. MMM–MMM + y 'm'. MMM – y 'm'. MMM + + + dd.–dd.MM 'st'. y + dd.MM–dd.MM 'st'. y + dd.MM 'st'. y – dd.MM 'st'. y + + + E, dd. – E, dd.MM 'st'. y + E, dd.MM – E, dd.MM 'st'. y + E, dd.MM 'st'. y – E, dd.MM 'st'. y + + + y 'mettas' MMMM–MMMM + y 'mettas' MMMM – y 'mettas' MMMM + + + + + + + + ēra + + + mettan + panzdauman mettan + this year + next year + + + m. + + + m. + + + ketwirts + + + ketw. + + + ketw. + + + mīnss + + + mī. + + + mī. + + + sawaīti + + + saw. + + + saw. + + + deinā + bītan + šandēinan + ankstāinan + + + d. + + + d. + + + sawaītis deinā + + + ankstāinan / pa pussideinan + + + stūndi + + + st. + + + st. + + + minūti + + + min. + + + min. + + + sekūndi + + + sek. + + + sek. + + + kerdaszōni + + + + +HH:mm;-HH:mm + GMT{0} + GMT + Kerdā: {0} + Daggas kerdā: {0} + Zēimas kerdā: {0} + {1} ({0}) + + + Centrālas Amērikas kerdā + Centrālas Amērikas zēimas kerdā + Centrālas Amērikas daggas kerdā + + + + + Dēiniskas Amērikas kerdā + Dēiniskas Amērikas zēimas kerdā + Dēiniskas Amērikas daggas kerdā + + + + + Amērikas gārban kerdā + Amērikas gārban zēimas kerdā + Amērikas gārban daggas kerdā + + + + + Pacīfiskas Amērikas kerdā + Pacīfiskas Amērikas zēimas kerdā + Pacīfiskas Amērikas daggas kerdā + + + + + Atlāntiska kerdā + Atlāntiska zēimas kerdā + Atlāntiska daggas kerdā + + + + + Centrālas Eurōpas kerdā + Centrālas Eurōpas zēimas kerdā + Centrālas Eurōpas daggas kerdā + + + + + Dēiniskas Eurōpas kerdā + Dēiniskas Eurōpas zēimas kerdā + Dēiniskas Eurōpas daggas kerdā + + + + + Wakkariskas Eurōpas kerdā + Wakkariskas Eurōpas zēimas kerdā + Wakkariskas Eurōpas daggas kerdā + + + + + Greenwich kerdā + + + + + + latn + + latn + + + , +   + % + + + - + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + #,##0.00 ¤ + + + {0} {1} + {0} {1} + {0} {1} + + + + Brazīlijas reals + Brazīlijas realin + Brazīlijas reals + Brazīlijas realai + + + Kīnas juāns + Kīnas juānan + Kīnas juāns + Kīnas juānai + + + eurō + eurō + eurō + eurō + + + punds sterlings + pundan sterlingan + punds sterlings + pundai sterlingai + + + Īndijas rūpija + Īndijas rūpijan + Īndijas rūpija + Īndijas rūpijas + + + Japānijas jāns + Japānijas jānan + Japānijas jāns + Japānijas jānai + + + Russis rūbels + Russis rūblin + Russis rūbels + Russis rūblai + + + APW dālars + APW dālaran + APW dālars + APW dālarai + + + niwaistā walūta + (niwaistā walūta) + (niwaistā walūtas aīnibi) + (niwaistā walūta) + + + + + + h:mm + + + h:mm:ss + + + m:ss + + + + + {0}, {1} + {0}, {1} + {0} be {1} + {0} be {1} + + + + + jā:j + ni:n + + + diff --git a/make/data/cldr/common/main/prg_001.xml b/make/data/cldr/common/main/prg_001.xml new file mode 100644 index 00000000000..c0be9e6d854 --- /dev/null +++ b/make/data/cldr/common/main/prg_001.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ps.xml b/make/data/cldr/common/main/ps.xml index 79d0f77ddfd..91101430729 100644 --- a/make/data/cldr/common/main/ps.xml +++ b/make/data/cldr/common/main/ps.xml @@ -1,6 +1,6 @@ - + + + + + + + + Kʼicheʼ + + + Macedonia del Norte + + + + + left-to-right + top-to-bottom + + + + [a ä {aʼ} {bʼ} {ch} {chʼ} e {eʼ} i {iʼ} j k {kʼ} l m n o p q {qʼ} r s t {tz} {tzʼ} {tʼ} u {uʼ} v w x y] + [c d f g h ñ z] + [A Ä {Bʼ} {CH} {CHʼ} E I J K {Kʼ} L M N O P Q {Qʼ} R S T {TZ} {TZʼ} {Tʼ} U V W X Y] + + + + + Q + + + + diff --git a/make/data/cldr/common/main/quc_GT.xml b/make/data/cldr/common/main/quc_GT.xml new file mode 100644 index 00000000000..2c64b467b7d --- /dev/null +++ b/make/data/cldr/common/main/quc_GT.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/raj.xml b/make/data/cldr/common/main/raj.xml index ee969948643..6b4bb042787 100644 --- a/make/data/cldr/common/main/raj.xml +++ b/make/data/cldr/common/main/raj.xml @@ -1,6 +1,6 @@ - + + + + + + + + 𐴌𐴗𐴥𐴝𐴙𐴚𐴒𐴙𐴝 + + + + + right-to-left + + + + [\U00010D24\U00010D25\U00010D26\U00010D27 𐴀 𐴝 𐴞 𐴟 𐴠 𐴡 𐴁 𐴂 𐴃 𐴄 𐴅 𐴆 𐴇 𐴈 𐴉 𐴊 𐴋 𐴌 𐴍 𐴎 𐴏 𐴐 𐴑 𐴒 𐴓 𐴔 𐴕 𐴖 𐴗 𐴘 𐴙 𐴚 𐴛 𐴢 𐴣] + [ـ] + [\- ‑ , . % + 0𐴰 1𐴱 2𐴲 3𐴳 4𐴴 5𐴵 6𐴶 7𐴷 8𐴸 9𐴹] + [\- ‐ ‑ – — ، ؛ \: ! ؟ . … ' " ( ) \[ \]] + + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + H:mm + Hmm + + + + + + + + 𐴎𐴡𐴁𐴝𐴕𐴝 + + + 𐴎𐴡𐴁𐴝𐴕𐴝 + + + 𐴎𐴡𐴁𐴝𐴕𐴝 + + + 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴀𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴡𐴏𐴥𐴡𐴌 + + + {0} 𐴁𐴡𐴏𐴥𐴡𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴀𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴡𐴏𐴥𐴡𐴌 + + + {0} 𐴁𐴡𐴏𐴥𐴡𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴀𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴡𐴏𐴥𐴡𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴡𐴏𐴥𐴡𐴌 + + + {0} 𐴁𐴡𐴏𐴥𐴡𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴉𐴝𐴁𐴝 + 𐴀𐴝𐴈𐴥𐴠𐴌𐴞 𐴉𐴝𐴁𐴝 + 𐴀𐴠 𐴉𐴝𐴁𐴝 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴉𐴝𐴁𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴉𐴝𐴁𐴝 + + + {0} 𐴉𐴝𐴁𐴝 𐴀𐴝𐴒𐴠 + + + + 𐴉𐴝𐴁𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴉𐴝𐴁𐴝 + + + {0} 𐴉𐴝𐴁𐴝 𐴀𐴝𐴒𐴠 + + + + 𐴉𐴝𐴁𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴉𐴝𐴁𐴝 + + + {0} 𐴉𐴝𐴁𐴝 𐴀𐴝𐴒𐴠 + + + + 𐴔𐴝𐴐𐴢 + 𐴒𐴠𐴓𐴊𐴠 𐴔𐴥𐴝𐴐𐴢 + 𐴀𐴠 𐴔𐴥𐴝𐴐𐴢 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴔𐴥𐴝𐴐𐴢 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴥𐴝𐴐𐴢 + + + {0} 𐴔𐴝𐴐𐴢 𐴀𐴝𐴒𐴠 + + + + 𐴔𐴝𐴐𐴢 + 𐴒𐴠𐴓𐴊𐴠 𐴔𐴥𐴝𐴐𐴢 + 𐴀𐴠 𐴔𐴥𐴝𐴐𐴢 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴔𐴥𐴝𐴐𐴢 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴥𐴝𐴐𐴢 + + + {0} 𐴔𐴝𐴐𐴢 𐴀𐴝𐴒𐴠 + + + + 𐴔𐴝𐴐𐴢 + 𐴒𐴠𐴓𐴊𐴠 𐴔𐴥𐴝𐴐𐴢 + 𐴀𐴠 𐴔𐴥𐴝𐴐𐴢 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴔𐴥𐴝𐴐𐴢 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴥𐴝𐴐𐴢 + + + {0} 𐴔𐴝𐴐𐴢 𐴀𐴝𐴒𐴠 + + + + 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴒𐴠𐴓𐴊𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴀𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴇𐴥𐴝𐴉𐴃𐴝 + + + {0} 𐴇𐴥𐴝𐴉𐴃𐴝 𐴀𐴝𐴒𐴠 + + 𐴇𐴥𐴉𐴃𐴤𐴝𐴌 {0} + + + 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴒𐴠𐴓𐴊𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴀𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴇𐴥𐴝𐴉𐴃𐴝 + + + {0} 𐴇𐴥𐴝𐴉𐴃𐴝 𐴀𐴝𐴒𐴠 + + 𐴇𐴥𐴉𐴃𐴤𐴝𐴌 {0} + + + 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴒𐴠𐴓𐴊𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴀𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴇𐴥𐴝𐴉𐴃𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴇𐴥𐴝𐴉𐴃𐴝 + + + {0} 𐴇𐴥𐴝𐴉𐴃𐴝 𐴀𐴝𐴒𐴠 + + 𐴇𐴥𐴉𐴃𐴤𐴝𐴌 {0} + + + 𐴔𐴝𐴐𐴤𐴡𐴌 𐴇𐴥𐴝𐴉𐴃𐴝 + + + 𐴔𐴝𐴐𐴤𐴡𐴌 𐴇𐴥𐴝𐴉𐴃𐴝 + + + 𐴔𐴝𐴐𐴤𐴡𐴌 𐴇𐴥𐴝𐴉𐴃𐴝 + + + 𐴊𐴞𐴕 + 𐴒𐴠𐴓𐴊𐴠 𐴈𐴝𐴙𐴓𐴧𐴝 + 𐴀𐴝𐴙𐴅𐴧𐴙𐴝 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴈𐴝𐴙𐴓𐴧𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴊𐴞𐴕 + + + {0} 𐴊𐴞𐴕 𐴀𐴝𐴒𐴠 + + + + 𐴊𐴞𐴕 + 𐴒𐴠𐴓𐴊𐴠 𐴈𐴝𐴙𐴓𐴧𐴝 + 𐴀𐴝𐴙𐴅𐴧𐴙𐴝 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴈𐴝𐴙𐴓𐴧𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴊𐴞𐴕 + + + {0} 𐴊𐴞𐴕 𐴀𐴝𐴒𐴠 + + + + 𐴊𐴞𐴕 + 𐴒𐴠𐴓𐴊𐴠 𐴈𐴝𐴙𐴓𐴧𐴝 + 𐴀𐴝𐴙𐴅𐴧𐴙𐴝 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴈𐴝𐴙𐴓𐴧𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴊𐴞𐴕 + + + {0} 𐴊𐴞𐴕 𐴀𐴝𐴒𐴠 + + + + 𐴁𐴡𐴏𐴥𐴡𐴌𐴡𐴌 𐴊𐴞𐴕 + + + 𐴁𐴡𐴏𐴥𐴡𐴌𐴡𐴌 𐴊𐴞𐴕 + + + 𐴁𐴡𐴏𐴥𐴡𐴌𐴡𐴌 𐴊𐴞𐴕 + + + 𐴇𐴥𐴝𐴉𐴃𐴝𐴌 𐴊𐴞𐴕 + + + 𐴇𐴥𐴝𐴉𐴃𐴝𐴌 𐴊𐴞𐴕 + + + 𐴇𐴥𐴝𐴉𐴃𐴝𐴌 𐴊𐴞𐴕 + + + 𐴔𐴝𐴐𐴤𐴡𐴌 𐴇𐴥𐴝𐴉𐴃𐴝𐴌 𐴊𐴞𐴕 + + + 𐴔𐴝𐴐𐴤𐴡𐴌 𐴇𐴥𐴝𐴉𐴃𐴝𐴌 𐴊𐴞𐴕 + + + 𐴔𐴝𐴐𐴤𐴡𐴌 𐴇𐴥𐴝𐴉𐴃𐴝𐴌 𐴊𐴞𐴕 + + + 𐴒𐴠𐴓𐴊𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + 𐴀𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + + + {0} 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + 𐴀𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + + + {0} 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + 𐴀𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 + + + {0} 𐴌𐴦𐴡𐴘 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴡𐴔 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + 𐴀𐴠 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴝 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + + + {0} 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + 𐴀𐴠 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴝 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + + + {0} 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + 𐴀𐴠 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴝 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 + + + {0} 𐴔𐴡𐴚𐴒𐴡𐴓 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + 𐴀𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + + + {0} 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + 𐴀𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + + + {0} 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + 𐴀𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 + + + {0} 𐴁𐴟𐴙𐴃 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + 𐴀𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + + + {0} 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + 𐴀𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + + + {0} 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + 𐴀𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 + + + {0} 𐴁𐴞𐴐𐴤𐴞𐴃 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴠 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴟𐴑𐴧𐴟𐴌 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴠𐴓𐴊𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + 𐴀𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 + + + {0} 𐴐𐴤𐴡𐴕𐴞 𐴁𐴝𐴌 𐴀𐴝𐴒𐴠 + + + + 𐴊𐴞𐴕 / 𐴌𐴝𐴙𐴃𐴢 + + + 𐴊𐴞𐴕 / 𐴌𐴝𐴙𐴃𐴢 + + + 𐴊𐴞𐴕 / 𐴌𐴝𐴙𐴃𐴢 + + + 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 + 𐴀𐴠 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 + + + {0} 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 + + + {0} 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 𐴀𐴝𐴒𐴠 + + + + 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 + + + {0} 𐴒𐴤𐴡𐴕𐴄𐴤𐴝 𐴀𐴝𐴒𐴠 + + + + 𐴔𐴞𐴕𐴥𐴡𐴄𐴢 + 𐴀𐴠 𐴔𐴞𐴕𐴥𐴡𐴄 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴞𐴕𐴥𐴡𐴄 + + + {0} 𐴔𐴞𐴕𐴥𐴡𐴄 𐴀𐴝𐴒𐴠 + + + + 𐴔𐴞𐴕𐴥𐴡𐴄𐴢 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴞𐴕𐴥𐴡𐴄 + + + {0} 𐴔𐴞𐴕𐴥𐴡𐴄 𐴀𐴝𐴒𐴠 + + + + 𐴔𐴞𐴕𐴥𐴡𐴄𐴢 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴔𐴞𐴕𐴥𐴡𐴄 + + + {0} 𐴔𐴞𐴕𐴥𐴡𐴄 𐴀𐴝𐴒𐴠 + + + + 𐴏𐴠𐴑𐴤𐴠𐴕 + 𐴀𐴠𐴈𐴥𐴡𐴕 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴏𐴠𐴑𐴤𐴠𐴕 + + + {0} 𐴏𐴠𐴑𐴤𐴠𐴕 𐴀𐴝𐴒𐴠 + + + + 𐴏𐴠𐴑𐴤𐴠𐴕 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴏𐴠𐴑𐴤𐴠𐴕 + + + {0} 𐴏𐴠𐴑𐴤𐴠𐴕 𐴀𐴝𐴒𐴠 + + + + 𐴏𐴠𐴑𐴤𐴠𐴕 + + 𐴀𐴝𐴘𐴧𐴥𐴠𐴌𐴊𐴧𐴠 {0} 𐴏𐴠𐴑𐴤𐴠𐴕 + + + {0} 𐴏𐴠𐴑𐴤𐴠𐴕 𐴀𐴝𐴒𐴠 + + + + 𐴀𐴠𐴓𐴝𐴑𐴝𐴀𐴞 𐴄𐴝𐴙𐴔 + + + 𐴀𐴠𐴓𐴝𐴑𐴝𐴀𐴞 𐴄𐴝𐴙𐴔 + + + 𐴀𐴠𐴓𐴝𐴑𐴝𐴀𐴞 𐴄𐴝𐴙𐴔 + + + + + und rhg + + diff --git a/make/data/cldr/common/main/rhg_Rohg.xml b/make/data/cldr/common/main/rhg_Rohg.xml new file mode 100644 index 00000000000..351938d4f6d --- /dev/null +++ b/make/data/cldr/common/main/rhg_Rohg.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + Lmuɣrib + Ṭurkya + + + amitrik + aglinziy + amirikaniy + + + tutlayt: {0} + tira: {0} + jjihet: {0} + + + + [a b c d ḍ e ɛ f g ɣ h ḥ i j k l m n p q r s ṣ t ṭ u w x y z ẓ] + [á à â ä ç é è ê ë î ï ñ o ó ô ö œ ß ú ù û ü v ʷ ÿ] + [A B C D Ḍ E Ɛ F G Ɣ H Ḥ I J K L M N P Q R S Ṣ T Ṭ U W X Y Z Ẓ] + [\- ‑ , . % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” « » ( ) \[ \] \{ \} § @ * / \& # `] + + + + + + + + yennayer + febrayer + mars + yebril + mayyu + yunyu + yulyuz + ɣucct + cutenber + kṭuber + nuwember + dujember + + + + + + + lḥed + letnayen + ttlat + larbeɛ + lexmis + jjemɛa + ssebt + + + + + + + AM + PM + + + + + + h:mm a + dd/MM/y + d MMM y + + + + + + akud n {0} + akud unebdu n {0} + akud anaway n {0} + + + GMT + + + + + + + , +   + + + + + #,##0.00 ¤ + + + + + + + Yamina + + + Faḍma + Awraɣ + + + Ɛellal + + + Muḥemmed Amin + + + Sinbad + + + Käthe + + + Zäzilia + + + Ada Cornelia + + + diff --git a/make/data/cldr/common/main/rif_MA.xml b/make/data/cldr/common/main/rif_MA.xml new file mode 100644 index 00000000000..b099abe9ca4 --- /dev/null +++ b/make/data/cldr/common/main/rif_MA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/rm.xml b/make/data/cldr/common/main/rm.xml index 64b205ffe8d..2918029f628 100644 --- a/make/data/cldr/common/main/rm.xml +++ b/make/data/cldr/common/main/rm.xml @@ -1,6 +1,6 @@ - @@ -2940,6 +2940,18 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ Macao + + Ciudad Juárez + + + Bahía de Banderas + + + Mérida + + + Cancún + Kathmandu @@ -2994,18 +3006,6 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ Ho Chi Minh - - Bahía de Banderas - - - Cancún - - - Ciudad Juárez - - - Mérida - @@ -3020,6 +3020,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + ٫ ٬ @@ -3056,6 +3059,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3068,6 +3074,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3086,6 +3095,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3095,6 +3107,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3131,12 +3146,33 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + + + + + + + @@ -3146,6 +3182,15 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3164,9 +3209,18 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3191,15 +3245,30 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + @@ -3212,6 +3281,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3224,6 +3296,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3242,12 +3317,21 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3295,12 +3379,33 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + + + + + + + @@ -3310,6 +3415,15 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3328,9 +3442,18 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3355,15 +3478,30 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + @@ -3376,6 +3514,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3388,6 +3529,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3406,12 +3550,21 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3440,12 +3593,33 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + + + + + + + @@ -3455,6 +3629,15 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3473,9 +3656,18 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3500,15 +3692,30 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + @@ -3525,6 +3732,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3537,6 +3747,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3555,12 +3768,21 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3589,12 +3811,33 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + + + + + + + @@ -3604,6 +3847,15 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3622,9 +3874,18 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3649,15 +3910,30 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + @@ -3681,6 +3957,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3693,6 +3972,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -3711,12 +3993,21 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3779,12 +4070,33 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + + + + + + + @@ -3794,6 +4106,15 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3812,9 +4133,18 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -3839,9 +4169,21 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + ؋ @@ -4178,6 +4520,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -4190,6 +4535,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -4202,6 +4550,9 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + @@ -4220,12 +4571,21 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -4253,12 +4613,33 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + + + + + + + + + + @@ -4268,6 +4649,15 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -4286,9 +4676,18 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + @@ -4313,9 +4712,21 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ + + + + + + + + + + + + {0}? {0}? @@ -5430,7 +5841,7 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ {0}. {0} {1} - {prefix} {given} {given2} {surname} {surname2} {suffix} + {title} {given} {given2} {surname} {surname2} {credentials} @@ -5484,7 +5895,7 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ - {surname} {surname2} {prefix} {given} {given2} {suffix} + {surname} {surname2} {title} {given} {given2} {credentials} @@ -5538,7 +5949,7 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ - {surname} {surname2}, {prefix} {given} {given2} {suffix} + {surname} {surname2}, {title} {given} {given2} {credentials} @@ -5555,27 +5966,5 @@ Warnings: All cp values have U+FE0F characters removed. See /annotationsDerived/ - - ∅∅∅ - - - ∅∅∅ - ∅∅∅ - - - ∅∅∅ - ∅∅∅ - ∅∅∅ - - - ∅∅∅ - ∅∅∅ - ∅∅∅ - ∅∅∅ - ∅∅∅ - ∅∅∅ - ∅∅∅ - ∅∅∅ - diff --git a/make/data/cldr/common/main/ru.xml b/make/data/cldr/common/main/ru.xml index 2d59a1bfd96..bcb30f54805 100644 --- a/make/data/cldr/common/main/ru.xml +++ b/make/data/cldr/common/main/ru.xml @@ -1,6 +1,6 @@ - + + + + + + + + + + [\u093C \u0902 अ आ ई उ ए ओ क ग च ज ञ ट ड ण त द न प ब म य र ल व स ह ा ी \u0941 \u0947 ो] + [\- ‑ , . % ‰ + 0० 1१ 2२ 3३ 4४ 5५ 6६ 7७ 8८ 9९] + [\- ‑ , ; \: ! ? . ‘ ’ “ ” ( ) \[ \] \{ \} ॰] + + + + + + + + EEEE, d MMMM y + yMMMMEEEEd + + + + + d MMMM y + yMMMMd + + + + + d MMM y + yMMMd + + + + + d/M/yy + yyMd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + deva + + diff --git a/make/data/cldr/common/main/sat_Deva_IN.xml b/make/data/cldr/common/main/sat_Deva_IN.xml new file mode 100644 index 00000000000..9b343ae6dcf --- /dev/null +++ b/make/data/cldr/common/main/sat_Deva_IN.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Munnu + Àfrica + Uciania + Àfrica uccidintali + Mèrica cintrali + Àfrica urintali + Àfrica di menzu + Mèrichi + Asia urintali + Asia + Asia cintrali + Asia uccidintali + Europa + Europa urintali + Europa uccidintali + Mèrica latina + Antigua e Barbuda + Anguilla + Arbanìa + Antàrtidi + Argintina + Austria + Australia + Aruba + Barbados + Bergiu + Burkina Faso + Burgarìa + Benin + Bulivia + Brasili + Bahamas + Belize + Sbìzzira + Cili + Cina + Culommia + Cubba + Capu Virdi + Cechia + Ripùbblica Ceca + Girmania + Danimarca + Ripùbblica Duminicana + Ècuador + Estonia + Egittu + Sahara uccidintali + Spagna + Uniuni Eurupea + Zuna Euru + Francia + Regnu Unitu + RU + Guiana Francisi + Ghana + Grecia + Guiana + Honduras + Cruazzia + Haiti + Ungarìa + Ìsuli Canari + Irlanna + Ìnnia + Islanna + Italia + Giamàica + Giurdania + Giappuni + Camboggia + Kuwait + Lìbbanu + Liechtenstein + Libberia + Lituania + Lussimmurgu + Littonia + Libbia + Maroccu + Mònacu + Murdova + Mali + Mauritania + Marta + Mardivi + Mèssicu + Niger + Niggeria + Nicaragua + Paisi Vasci + Nurveggia + Pànama + Pirù + Pulonia + Purtugallu + Paraguay + Rumanìa + Serbia + Russia + Sbezzia + Sluvenia + Sluvacchia + San Marinu + Sènigal + El Salvador + Togu + Tunisìa + Turchìa + Nazziuna Uniti + Stati Uniti + SUM + Uruguay + Città dû Vaticanu + Accenti fausi + Bidirizziunali fausu + Kossovo + Riggiuni scanusciuta + + + Calannariu + Sistema di misura + Nùmmari + + + Calannariu buddista + Calannariu cinisi + Calannariu grigurianu + Calannariu ebbràicu + Calannariu ISO-8601 + Calannariu giappunisi + Sistema mètricu + Sistema mpiriali + Sistema miricanu + + + + [a à â b c d ḍ e è ê f g h i í î j l m n o ò ô p q r s t u ú û v z] + [ç đ é ë ə ḥ ì k š ù w x y] + [A B C D E F G H I J L M N O P Q R S T U V Z] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” ( ) \[ \] § @ * / \& # † ‡ ′ ″] + + + + + + + + jin + fri + mar + apr + maj + giu + gnt + agu + sit + utt + nuv + dic + + + J + F + M + A + M + G + G + A + S + U + N + D + + + jinnaru + frivaru + marzu + aprili + maju + giugnu + giugnettu + agustu + sittèmmiru + uttòviru + nuvèmmiru + dicèmmiru + + + + + jin + fri + mar + apr + maj + giu + gnt + agu + sit + utt + nuv + dic + + + J + F + M + A + M + G + G + A + S + U + N + D + + + jinnaru + frivaru + marzu + aprili + maju + giugnu + giugnettu + agustu + sittèmmiru + uttòviru + nuvèmmiru + dicèmmiru + + + + + + + dumìnica + lunnidìa + martidìa + mercuridìa + jovidìa + vennidìa + sàbbatu + + + + + + + + annu + l’annu passatu + st’annu + l’annu vinturu + + nna n’annu + nna {0} anni + + + n’annu nnarrè + {0} anni nnarrè + + + + a. + + + a. + + + misi + + + m. + + + m. + + + simana + + + smn. + + + smn. + + + simana dû misi + + + smn. dû m. + + + smn. dû m. + + + jornu + ajeri + stjornu + dumani + + + j. + ajeri + oji + dumani + + + j. + ajeri + oji + dumani + + + jornu di l’annu + + + j. di l’a. + + + j. di l’a. + + + jornu dâ simana + + + j. dâ smn. + + + j. dâ smn. + + + ura + + + u. + + + u. + + + minutu + + + min. + + + min. + + + sicunnu + ora + + + sic. + ora + + + sic. + ora + + + fusu urariu + + + fusu + + + fusu + + + + + + , + ; + % + + + - + ~ + E + × + + + NaN + : + + + , + . + ; + % + + + - + ~ + E + × + + + NaN + : + + + , + . + ; + % + + + - + ~ + E + × + + + NaN + : + + + , + . + ; + % + + + - + ~ + E + × + + + NaN + : + + + , + . + ; + % + + + - + ~ + E + × + + + NaN + : + + + , + . + ; + % + + + - + ~ + E + × + + + NaN + : + + + , + . + ; + % + + + - + ~ + E + × + + + NaN + : + + + : + + + : + + + : + + + : + + + : + + + , + . + ; + % + + + - + ~ + E + × + + + NaN + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + , + . + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + + + se:s + + + diff --git a/make/data/cldr/common/main/scn_IT.xml b/make/data/cldr/common/main/scn_IT.xml new file mode 100644 index 00000000000..fac9de008ce --- /dev/null +++ b/make/data/cldr/common/main/scn_IT.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/sd.xml b/make/data/cldr/common/main/sd.xml index c6b206780bb..f7f9b1b4658 100644 --- a/make/data/cldr/common/main/sd.xml +++ b/make/data/cldr/common/main/sd.xml @@ -1,6 +1,6 @@ - + + + + + + + + ئسپرانتو + کوردی خوارگ + + + + + right-to-left + top-to-bottom + + + + [ئ ا ب پ ت ج چ ح خ د ر ز ڕ ژ س ش ع غ ف ڤ ق ک گ ل ڵ م ن ھ ە و ۆ ۊ ی ێ] + [\u200C\u200D\u200E\u200F \u064B \u064C \u064E \u064F \u0650 \u0651 \u0652 \u0654 \u0670 ء آ أ ؤ إ ة ث ذ ص ض ط ظ ك ه ى ي] + [ئ ا ب پ ت ج چ ح خ د ر ز ڕ ژ س ش ع غ ف ڤ ق ک گ ل ڵ م ن ھ ە و ۆ ۊ ی ێ] + [\- ‐ ‑ ، ٫ ٬ ؛ \: ! ؟ . … ‹ › « » ( ) \[ \] * / \\] + {0}… + …{0} + {0}…{1} + {0} … + … {0} + {0} … {1} + ؟ + + + « + » + + + + + arab + + arab + + + diff --git a/make/data/cldr/common/main/sdh_IQ.xml b/make/data/cldr/common/main/sdh_IQ.xml new file mode 100644 index 00000000000..3cc33628c03 --- /dev/null +++ b/make/data/cldr/common/main/sdh_IQ.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/sdh_IR.xml b/make/data/cldr/common/main/sdh_IR.xml new file mode 100644 index 00000000000..bd375ccdb19 --- /dev/null +++ b/make/data/cldr/common/main/sdh_IR.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/se.xml b/make/data/cldr/common/main/se.xml index 5945cd6517f..98b959774c5 100644 --- a/make/data/cldr/common/main/se.xml +++ b/make/data/cldr/common/main/se.xml @@ -1,6 +1,6 @@ - + + + + + + + + တႆး + + + + + + မျၢၼ်ႇမႃႇ (မိူင်းမၢၼ်ႈ) + မိူင်းထႆး + + + + [\u200B း ႞ ႟ ၵ ၶ ၷ င ၸ ၺ ꧣ တ ထ ၻ ၼ ပ ၽ ၾ ၿ ꧤ မ ယ ျ ရ ြ လ ဝ \u103D \u1082 ႀ သ ႁ ဢ ႃ \u102D \u102E \u102F \u1030 ေ ႄ \uA9E5 \u103A ႇ ႈ ႉ ႊ] + [ꩡ ꩦ ꩧ ꩨ ꩩ ꩮ] + [ၵ ၶ ၷ ꧠ င ၸ ꩡ ꧡ ꧢ ၺ ꩦ ꩧ ꩨ ꩩ တ ထ ၻ ၼ ပ ၽ ၾ ၿ ꧤ မ ယ ရ လ ဝ ႀ သ ႁ ꩮ ဢ] + [႐ ႑ ႒ ႓ ႔ ႕ ႖ ႗ ႘ ႙] + [၊ ။ ‘ ’ “ ”] + + diff --git a/make/data/cldr/common/main/shn_MM.xml b/make/data/cldr/common/main/shn_MM.xml new file mode 100644 index 00000000000..3433e268995 --- /dev/null +++ b/make/data/cldr/common/main/shn_MM.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/shn_TH.xml b/make/data/cldr/common/main/shn_TH.xml new file mode 100644 index 00000000000..9a6819b9544 --- /dev/null +++ b/make/data/cldr/common/main/shn_TH.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/si.xml b/make/data/cldr/common/main/si.xml index 2bdaaa250c6..b1d7c38e9b4 100644 --- a/make/data/cldr/common/main/si.xml +++ b/make/data/cldr/common/main/si.xml @@ -1,6 +1,6 @@ - + + + + + + + + Sidaamu Afo + + + + + + Itiyoophiya + + + + [a b c d e f g h i j k l m n o p q r s t u v w x y z] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + EEEE, MMMM dd, y G + GyMMMMEEEEdd + + + + + dd MMMM y G + GyMMMMdd + + + + + dd-MMM-y G + GyMMMdd + + + + + dd/MM/yy GGGGG + GGGGGyyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + Sam + San + Mak + Row + Ham + Arb + Qid + + + Sambata + Sanyo + Maakisanyo + Roowe + Hamuse + Arbe + Qidaame + + + + + S + S + M + R + H + A + Q + + + + + + + soodo + hawwaro + + + soodo + hawwaro + + + + + + YIA + YIG + + + + + + EEEE, MMMM dd, y + yMMMMEEEEdd + + + + + dd MMMM y + yMMMMdd + + + + + dd-MMM-y + yMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + + + ¤#,##0.00 + + + + + + Br + + + + diff --git a/make/data/cldr/common/main/sid_ET.xml b/make/data/cldr/common/main/sid_ET.xml new file mode 100644 index 00000000000..f628f2fb25a --- /dev/null +++ b/make/data/cldr/common/main/sid_ET.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/sk.xml b/make/data/cldr/common/main/sk.xml index 36122aae93f..064b8c364ff 100644 --- a/make/data/cldr/common/main/sk.xml +++ b/make/data/cldr/common/main/sk.xml @@ -1,6 +1,6 @@ - + + + + + + + + Åarjelsaemien gïele + + + + + left-to-right + top-to-bottom + + + + [a å ä b d e f g h i j k l m n o ö p r s t u v y] + [c ï q w x z] + [A Å Ä B D E F G H I J K L M N O Ö P R S T U V Y] + + diff --git a/make/data/cldr/common/main/sma_NO.xml b/make/data/cldr/common/main/sma_NO.xml new file mode 100644 index 00000000000..30302dbe2e1 --- /dev/null +++ b/make/data/cldr/common/main/sma_NO.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + [a å æ b d e f g h i j k l m n o ø p r s t u v y] + [A Å Æ B D E F G H I J K L M N O Ø P R S T U V Y] + + diff --git a/make/data/cldr/common/main/sma_SE.xml b/make/data/cldr/common/main/sma_SE.xml new file mode 100644 index 00000000000..183903b78aa --- /dev/null +++ b/make/data/cldr/common/main/sma_SE.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/smj.xml b/make/data/cldr/common/main/smj.xml new file mode 100644 index 00000000000..2d7fc942467 --- /dev/null +++ b/make/data/cldr/common/main/smj.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + julevsámegiella + + + + + left-to-right + top-to-bottom + + + + [a á å ä b d e f g h i j k l m n ń o p r s t u v] + [c ñ ö q w x y z] + [A Á Å Ä B D E F G H I J K L M N Ń O P R S T U V] + + diff --git a/make/data/cldr/common/main/smj_NO.xml b/make/data/cldr/common/main/smj_NO.xml new file mode 100644 index 00000000000..a2dc043987b --- /dev/null +++ b/make/data/cldr/common/main/smj_NO.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/smj_SE.xml b/make/data/cldr/common/main/smj_SE.xml new file mode 100644 index 00000000000..a4187d0be90 --- /dev/null +++ b/make/data/cldr/common/main/smj_SE.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/smn.xml b/make/data/cldr/common/main/smn.xml index 46c39a7a2ff..a60ec691edd 100644 --- a/make/data/cldr/common/main/smn.xml +++ b/make/data/cldr/common/main/smn.xml @@ -1,6 +1,6 @@ - + + + + + + + + siSwati + + + eSwatini + + + + [a b c d e f g h i j k l m n o p q r s t u v w x y z] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + + + + + + + Bhi + Van + Vol + Mab + Nkh + Nhl + Kho + Ngc + Nyo + Mph + Lwe + Ngo + + + Bhimbidvwane + iNdlovana + iNdlovu-lenkhulu + Mabasa + iNkhwekhweti + iNhlaba + Kholwane + iNgci + iNyoni + iMphala + Lweti + iNgongoni + + + + + + + Son + Mso + Bil + Tsa + Ne + Hla + Mgc + + + Lisontfo + uMsombuluko + Lesibili + Lesitsatfu + Lesine + Lesihlanu + uMgcibelo + + + + + + + + + , +   + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤#,##0.00 + + + + + + E + + + R + + + + diff --git a/make/data/cldr/common/main/ss_SZ.xml b/make/data/cldr/common/main/ss_SZ.xml new file mode 100644 index 00000000000..fa930615ead --- /dev/null +++ b/make/data/cldr/common/main/ss_SZ.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + diff --git a/make/data/cldr/common/main/ss_ZA.xml b/make/data/cldr/common/main/ss_ZA.xml new file mode 100644 index 00000000000..9e512c4d8b1 --- /dev/null +++ b/make/data/cldr/common/main/ss_ZA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ssy.xml b/make/data/cldr/common/main/ssy.xml new file mode 100644 index 00000000000..9d816ee2f4f --- /dev/null +++ b/make/data/cldr/common/main/ssy.xml @@ -0,0 +1,235 @@ + + + + + + + + + + + Qafar + Saho + + + + + + Yabuuti + Eretria + Otobbia + + + + [a b t s e c k x i d q r f g o l m n u w h y] + [j p v z] + [A B T S E C K X I D Q R F G O L M N U W H Y] + + + + + + + + EEEE, MMMM dd, y G + GyMMMMEEEEdd + + + + + dd MMMM y G + GyMMMMdd + + + + + dd-MMM-y G + GyMMMdd + + + + + dd/MM/yy GGGGG + GGGGGyyMMdd + + + + + + + + + Qun + Nah + Cig + Agd + Cax + Qas + Qad + Leq + Way + Dit + Xim + Kax + + + Qunxa Garablu + Kudo + Ciggilta Kudo + Agda Baxis + Caxah Alsa + Qasa Dirri + Qado Dirri + Liiqen + Waysu + Diteli + Ximoli + Kaxxa Garablu + + + + + Q + N + C + A + C + Q + Q + L + W + D + X + K + + + + + + + Nab + San + Sal + Rab + Cam + Jum + Qun + + + Naba Sambat + Sani + Salus + Rabuq + Camus + Jumqata + Qunxa Sambat + + + + + N + S + S + R + C + J + Q + + + + + + + saaku + carra + + + saaku + carra + + + + + + Yaasuusuk Duma + Yaasuusuk Wadir + + + YD + YW + + + + + + EEEE, MMMM dd, y + yMMMMEEEEdd + + + + + dd MMMM y + yMMMMdd + + + + + dd-MMM-y + yMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + + + ¤#,##0.00 + + + + + + Nfk + + + + diff --git a/make/data/cldr/common/main/ssy_ER.xml b/make/data/cldr/common/main/ssy_ER.xml new file mode 100644 index 00000000000..33924045617 --- /dev/null +++ b/make/data/cldr/common/main/ssy_ER.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/st.xml b/make/data/cldr/common/main/st.xml new file mode 100644 index 00000000000..944fb014792 --- /dev/null +++ b/make/data/cldr/common/main/st.xml @@ -0,0 +1,650 @@ + + + + + + + + + + + Seburu + Se-amhari + Se-arab + Se-azerbaijani + Se-belarusia + Se-bulgaria + Se-bengali + Breton + Se-bosnia + Se-catalia + Se-czech + Se-welsh + Se-dutch + Se-jeremane + Se-greek + Senyesemane + Se-esperanto + Sespain + Se-estonia + Se-basque + Se-persia + Se-finnish + Se-tagalog + Se-foroese + Se-french + Se-frisia + Se-irish + Se-scots gaelic + Se-galicia + Guarani + Se-gujarati + Se-hebrew + Se-hindi + Se-croatia + Se-hungaria + Se-interlingua + Se-indonesia + Se-iceland + Se-tariana + Se-japane + Se-javane + Se-geogia + Se-kannada + Se-korea + Kurdish + Kyrgyz + Se-latino + Se-Lithuano + Se-masedonia + Se-malayalam + Se-marathi + Se-malay + Se-maltese + Se-nepali + Dutch + Se-norway (Nynorsk) + Se-norway + Se-occitan + Oriya + Se-punjabi + Se-polish + Pashto + Se-portugal + Seputukesi (sa Brazil) + Se-portugal (Portugal) + Se-romania + Se-rushia + Serbo-Croatian + Se-sinhali + Se-slovak + Se-slovania + Se-albanian + Se-serbian + Sesotho + Se-sundanese + Se-sweden + Se-swahili + Se-tamil + Se-telegu + Se-thai + Se-tigrinya + Turkmen + Se-klingon + Se-theki + Twi + Se-ukrania + Se-urdu + Se-uzbek + Se-vietnam + se Xhosa + Yiddish + se Zulu + + + + [a b d e f g h i j k l m n o p q r s t u w y] + [c v x z] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + + + + + + + Phe + Kol + Ube + Mme + Mot + Jan + Upu + Pha + Leo + Mph + Pun + Tsh + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + Phesekgong + Hlakola + Hlakubele + Mmese + Motsheanong + Phupjane + Phupu + Phata + Leotshe + Mphalane + Pundungwane + Tshitwe + + + + + Phe + Kol + Ube + Mme + Mot + Jan + Upu + Pha + Leo + Mph + Pun + Tsh + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + Phesekgong + Hlakola + Hlakubele + Mmese + Motsheanong + Phupjane + Phupu + Phata + Leotshe + Mphalane + Pundungwane + Tshitwe + + + + + + + Son + Mma + Bed + Rar + Ne + Hla + Moq + + + S + M + T + W + T + F + S + + + Son + Mma + Bed + Rar + Ne + Hla + Moq + + + Sontaha + Mmantaha + Labobedi + Laboraru + Labone + Labohlane + Moqebelo + + + + + Son + Mma + Bed + Rar + Ne + Hla + Moq + + + S + M + T + W + T + F + S + + + Son + Mma + Bed + Rar + Ne + Hla + Moq + + + Sontaha + Mmantaha + Labobedi + Laboraru + Labone + Labohlane + Moqebelo + + + + + + + Q1 + Q2 + Q3 + Q4 + + + 1 + 2 + 3 + 4 + + + Q1 + Q2 + Q3 + Q4 + + + + + Q1 + Q2 + Q3 + Q4 + + + 1 + 2 + 3 + 4 + + + Q1 + Q2 + Q3 + Q4 + + + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + + BCE + CE + + + BCE + CE + + + + + + y MMMM d, EEEE + yMMMMEEEEd + + + + + y MMMM d + yMMMMd + + + + + y MMM d + yMMMd + + + + + y-MM-dd + yMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + d + ccc + d, E + E h:mm a + E HH:mm + E h:mm:ss a + E HH:mm:ss + G y + G y MMM + G y MMM d + G y MMM d, E + h a + HH + h:mm a + HH:mm + h:mm:ss a + HH:mm:ss + h:mm:ss a v + HH:mm:ss v + h:mm a v + HH:mm v + L + MM-dd + MM-dd, E + LLL + MMM d + MMM d, E + MMMM d + 'week' W 'of' MMM + 'week' W 'of' MMM + mm:ss + y + y-MM + y-MM-dd + y-MM-dd, E + y MMM + y MMM d + y MMM d, E + y MMMM + y QQQ + y QQQQ + 'week' w 'of' Y + 'week' w 'of' Y + + + {0} {1} + + + {0} – {1} + + d–d + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + MM–MM + + + MM-dd – MM-dd + MM-dd – MM-dd + + + MM-dd, E – MM-dd, E + MM-dd, E – MM-dd, E + + + LLL–LLL + + + MMM d–d + MMM d – MMM d + + + MMM d, E – MMM d, E + MMM d, E – MMM d, E + + + y–y + + + y-MM – y-MM + y-MM – y-MM + + + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + + + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + + + y MMM–MMM + y MMM – y MMM + + + y MMM d–d + y MMM d – MMM d + y MMM d – y MMM d + + + y MMM d, E – MMM d, E + y MMM d, E – MMM d, E + y MMM d, E – y MMM d, E + + + y MMMM–MMMM + y MMMM – y MMMM + + + + + + + + 1 + + , +   + % + + + - + E + × + + + NaN + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤#,##0.00 + + + ¤#,##0.00 + + + {0} {1} + {0} {1} + + + + R + + + + ≥{0} + {0}–{1} + + + diff --git a/make/data/cldr/common/main/st_LS.xml b/make/data/cldr/common/main/st_LS.xml new file mode 100644 index 00000000000..5b5f213072e --- /dev/null +++ b/make/data/cldr/common/main/st_LS.xml @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + + + M + + + + diff --git a/make/data/cldr/common/main/st_ZA.xml b/make/data/cldr/common/main/st_ZA.xml new file mode 100644 index 00000000000..8457779ae89 --- /dev/null +++ b/make/data/cldr/common/main/st_ZA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/su.xml b/make/data/cldr/common/main/su.xml index 15dbcc9a319..59e2115b59b 100644 --- a/make/data/cldr/common/main/su.xml +++ b/make/data/cldr/common/main/su.xml @@ -1,6 +1,6 @@ - + + + + + + + + {0}، {1} + + + ܐܒܟܐܙܝܬ + ܐܡܪܢܝܬ + ܐܪܐܓܘܢܝܬ + ܥܪܒܝܬ + ܥܪܒܝܬ ܪܘܫܡܝܐ ܚܕܬܐ + ܐܪܐܦܗܝܬ + ܐܙܪܒܝܓܐܢܝܬ + ܐܙܪܝ + ܒܢܓܐܠܝܐ + ܩܪܕܝܬ ܩܢܛܪܘܢܝܐ + ܩܘܪܕܝܬ ܡܨܥܝܐ + ܩܘܪܕܝܬ ܣܘܪܢܝ + ܐܠܡܢܝܐ + ܝܘܢܐܝܬ + ܐܢܓܠܝܣ + ܐܢܓܠܝܣ (ܐܡܝܪܟܐ) + ܣܦܢܝܝܐ + ܦܪܣܝܬ + ܦܘܠܐܗܝܬ + ܦܝܢܠܢܕܝܬ + ܦܝܠܝܦܝܢܝܬ + ܦܘܢܝܬ + ܦܪܢܣܝܬ + ܓܐܝܬ + ܓܠܝܩܝܬ + ܓܘܓܐܪܝܬ + ܥܒܪܐܝܬ + ܗܢܕܝܐ + ܐܪܡܢܝܬ + ܐܝܛܠܝܬ + ܓܘܪܓܝܐܝܬ + ܩܘܪܕܝܬ + ܠܬܝܢܝܬ + ܓܢܕܝܬ + ܠܫ̈ܢܐ ܦܖ̈ܝܫܐ + ܐܠܡܢܝܐ ܠܐܠܬܚܬ + ܗܘܠܢܕܐ ܠܐܠܬܚܬ + ܗܘܠܢܕܝܬ + ܢܘܪܒܝܓܐܝܬ + ܐܘܪܘܡܘܐܝܬ + ܦܝܓܝܢܝܬ + ܦܘܠܢܕܐܝܬ + ܦܘܪܛܘܓܠܐܝܬ + ܪܘܡܢܐܝܬ + ܐܘܪܘܣܢܝܬ + ܣܟܘܬܠܢܕܐܝܬ + ܐܠܒܢܝܬ + ܣܘܝܕܐܝܬ + ܣܘܐܗܝܠܐܝܬ + ܣܘܪܝܝܐ + ܬܘܪܟܝܬ + ܐܘܟܪܐܝܢܐܝܬ + ܠܫܢܐ ܠܐ ܝܕܝܥܐ + ܐܘܪܕܘܝܬ + ܒܝܬܢܐܡܐܝܬ + ܝܕܝܬܝܬ + ܨܝܢܝܬ + ܨܝܢܝܬ (ܡܐܢܕܘܪܝܐ) + ܨܝܢܝܬ (ܦܫܝܛܐ) + ܨܝܢܝܬ (ܡܐܢܕܘܪܝܐ ܦܫܝܛܐ) + ܨܝܢܐܝܬ + + + + + + + + + + + + + + + + + ܬܐܒܝܠ + ܐܦܪܝܩܐ + ܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܐܡܪܝܟܐ ܬܝܡܢܝܬܐ + ܐܘܩܝܢܘܣܝܐ + ܐܦܪܝܩܐ ܡܥܪܒܝܬܐ + ܐܡܪܝܟܐ ܡܨܥܝܬܐ + ܐܦܪܝܩܐ ܡܕܢܚܝܬܐ + ܐܦܪܝܩܐ ܓܪܒܝܝܬܐ + ܐܦܪܝܩܐ ܡܨܥܝܬܐ + ܐܦܪܝܩܐ ܬܝܡܢܝܬܐ + ܐܡܪ̈ܝܟܐ + ܓܪܒܝܐ ܐܡܪܝܟܐ + ܟܐܪܝܒܝܢ + ܐܣܝܐ ܡܕܢܚܝܬܐ + ܐܣܝܐ ܬܝܡܢܝܬܐ + ܬܝܡܢ ܡܕܢܚ ܐܣܝܐ + ܐܘܪܘܦܐ ܬܝܡܢܝܬܐ + ܐܘܣܛܪܐܠܐܣܝܐ + ܡܝܠܐܢܝܣܝܐ + ܡܝܟܪܘܢܝܙܝܐ + ܦܘܠܢܝܣܝܐ + ܐܣܝܐ + ܐܣܝܐ ܡܨܥܝܬܐ + ܐܣܝܐ ܡܥܪܒܝܬܐ + ܐܘܪܘܦܐ + ܐܘܪܘܦܐ ܡܕܢܚܝܬܐ + ܐܘܪܘܦܐ ܓܪܒܝܝܬܐ + ܐܘܪܘܦܐ ܡܥܪܒ݂ܝܬܐ + ܐܦܪܝܩܐ ܨܚܪܐ ܬܝܡܢܝܬܐ + ܐܡܪܝܟܐ ܠܬܝܢܝܬܐ + ܓܙܪܬܐ ܕܐܣܝܢܫܘܢ + ܐܢܕܘܪܐ + ܐܡܝܪ̈ܘܬܐ ܡܚܝܕ̈ܬܐ ܥܪ̈ܒܝܐ + ܐܦܓܐܢܣܬܐܢ + ܐܢܬܝܓܘܐ ܘܒܐܪܒܘܕܐ + ܐܢܓܘܝܠܐ + ܐܠܒܢܝܐ + ܐܪܡܢܝܐ + ܐܢܓܘܠܐ + ܐܢܬܪܬܝܟܐ + ܐܪܓܢܬܝܢܐ + ܣܡܘܐ ܐܡܝܖ̈ܟܝܐ + ܐܘܣܛܪܝܐ + ܐܘܣܬܪܠܝܐ + ܐܪܘܒܐ + ܓܙܝܖ̈ܐ ܕܐܠܐܢܕ + ܐܙܪܒܝܓܐܢ + ܒܘܣܢܐ ܘܗܪܬܣܓܘܒܝܢܐ + ܒܪܒܐܕܘܣ + ܒܢܓܠܐܕܝܫ + ܒܠܓܝܩܐ + ܒܘܪܩܝܢܐ ܦܐܣܘ + ܒܘܠܓܐܪܝܐ + ܒܚܪܝܢ + ܒܘܪܘܢܕܝ + ܒܢܝܢ + ܡܪܬܝ ܒܪ ܬܘܠܡܝ + ܒܪܡܘܕܐ + ܒܪܘܢܐܝ + ܒܘܠܝܒܝܐ + ܟܐܪܝܒܝܢ ܕܢܝܬܝܪܠܐܢܕܣ + ܒܪܐܙܝܠ + ܒܗܐܡܣ + ܒܘܬܐܢ + ܓܙܪܬܐ ܕܒܘܒܝܬ + ܒܘܛܣܘܐܢܐ + ܒܠܐܪܘܣ + ܒܠܝܙ + ܟܢܕܐ + ܓܙܝܖ̈ܐ ܕܟܘܟܘܣ + ܟܘܢܓܘ - ܟܝܢܫܐܣܐ + ܩܘܛܢܝܘܬܐ ܕܝܡܘܩܪܛܝܬܐ ܕܟܘܢܓܘ + ܩܘܛܢܝܘܬܐ ܕܐܦܪܝܩܐ ܡܨܥܝܬܐ + ܟܘܢܓܘ - ܒܪܐܙܐܒܝܠ + ܩܘܛܢܝܘܬܐ ܕܟܘܢܓܘ + ܣܘܝܣܪܐ + ܩܘܛ ܕܝܒܘܐܪ + ܣܘܦܐ ܕܓܪܡܦܝܠܐ + ܓܙܪܬܐ ܟܘܟ + ܬܫܝܠܝ + ܟܐܡܪܘܢ + ܨܝܢ + ܟܘܠܘܡܒܝܐ + ܓܙܪܬܐ ܕܟܠܝܦܝܪܬܘܢ + ܟܘܣܬܐ ܪܝܩܐ + ܟܘܒܐ + ܟܐܦ ܒܝܪܕܝ (ܪܝܫܐ ܝܘܪܩܐ) + ܟܘܪܐܟܘ + ܓܙܪܬܐ ܕܟܪܝܣܬܡܣ + ܩܘܦܪܘܣ + ܬܫܝܟܝܐ + ܬܫܝܟ + ܐܠܡܢܝܐ + ܕܐܝܓܘ ܓܪܣܝܐ + ܓܝܒܘܛܝ + ܕܐܢܡܐܪܩ + ܕܘܡܝܢܝܩܐ + ܩܘܛܢܝܘܬܐ ܕܘܡܝܢܝܩܐܢܝܬܐ + ܓܙܐܪ + ܟܘܝܛܐ ܘܡܝܠܝܐ + ܐܩܘܐܕܘܪ + ܐܣܛܘܢܝܐ + ܡܨܪܝܢ + ܨܚܪܐ ܡܥܪܒܝܬܐ + ܐܪܬܪܝܐ + ܐܣܦܢܝܐ + ܟܘܫ + ܚܘܝܕܐ ܐܘܪܘܦܝܐ + ܩܠܝܡܐ ܕܐܘܪܘ + ܦܝܢܠܢܕ + ܦܝܓܝ + ܓܙܪܬܐ ܕܦܠܟܠܢܕ + ܓܙܪܬܐ ܕܡܠܒܢܐܣ + ܐܬܪܘܬܐ ܦܕܪܠܝܐ ܕܡܝܩܪܘܢܝܣܝܐ + ܓܙܝܖ̈ܐ ܕܦܪܘ + ܦܪܢܣܐ + ܓܒܘܢ + ܡܠܟܘܬܐ ܡܚܝܕܬܐ + ܓܪܝܢܐܕܐ + ܓܘܪܓܝܐ + ܓܘܝܐܢܐ ܦܪܢܣܝܬܐ + ܓܘܪܢܙܝ + ܓܐܢܐ + ܓܒܪܠܛܪ + ܓܪܝܢܠܢܕ + ܓܡܒܝܐ + ܓܝܢܝܐ + ܓܘܐܕܘܠܘܦܐܝ + ܓܝܢܝܐ ܫܘܝܬܐ + ܝܘܢ + ܓܙܝܖ̈ܐ ܕܓܘܪܓܝܐ ܘܣܐܢܕܘܝܟ ܬܝܡܢܝ̈ܐ + ܓܘܐܬܝܡܐܠܐ + ܓܘܐܡ + ܓܝܢܝܐ ܒܝܣܐܘ + ܓܘܝܐܢܐ + ܗܘܢܓ ܟܘܢܓ + ܓܙܝܪ̈ܐ ܕܗܪܕ ܘܡܟܕܘܢܠܕ + ܗܘܢܕܘܪܣ + ܩܪܘܐܛܝܐ + ܗܐܝܬܝ + ܡܓܪ + ܓܙܝܖ̈ܐ ܕܟܐܢܪܝ + ܐܝܢܕܘܢܝܣܝܐ + ܐܝܪܠܢܕ + ܐܝܣܪܐܝܠ + ܓܙܪܬܐ ܕܡܐܢ + ܗܢܕܘ + ܩܠܝܡܐ ܕܒܪܝܛܢܝܐ ܓܘ ܐܘܩܝܢܘܣ ܗܢܕܘܝܐ + ܥܝܪܩ + ܐܝܪܐܢ + ܐܝܣܠܢܕ + ܐܝܛܠܝܐ + ܓܝܪܙܝ + ܓܡܝܟܐ + ܐܘܪܕܘܢ + ܝܦܢ + ܩܝܢܝܐ + ܩܝܪܓܝܙܣܬܐܢ + ܟܡܒܘܕܝܐ + ܟܝܪܝܒܬܝ + ܓܙܪܬܐ ܕܩܡܪ + ܣܐܢܬ ܟܝܬܣ ܘܢܝܒܝܣ + ܟܘܪܝܐ ܕܓܪܒܝܐ + ܟܘܪܝܐ ܕܬܝܡܢܝܐ + ܟܘܝܬ + ܓܙܝܖ̈ܐ ܕܟܐܝܡܐܢ + ܟܙܩܣܬܐܢ + ܠܐܘܣ + ܠܒܢܢ + ܡܪܬܝ ܠܘܫܐ + ܠܝܟܛܢܫܛܝܢ + ܫܪܝ ܠܐܢܟܐ + ܠܝܒܝܪܝܐ + ܠܣܘܛܘ + ܠܬܘܢܝܐ + ܠܘܟܣܡܒܘܪܓ + ܠܐܛܒܝܐ + ܠܘܒܐ + ܡܓܪܒ + ܡܘܢܐܩܘ + ܡܘܠܕܘܒܐ + ܡܘܢܛܝܢܝܓܪܘ + ܣܐܢܬ ܡܐܪܬܝܢ + ܡܕܓܣܩܪ + ܓܙܪܬܐ ܡܐܪܫܐܠ + ܓܪܒܝ ܡܩܕܘܢܝܐ + ܡܐܠܝ + ܡܝܐܢܡܐܪ (ܒܘܪܡܐ) + ܡܘܢܓܘܠܝܐ + ܓܙܝܖ̈ܐ ܕܡܪܝܢܐ ܓܪܒܝܐ + ܡܐܪܬܝܢܝܩ + ܡܘܪܝܛܢܝܐ + ܡܘܢܣܝܪܐܬ + ܡܝܠܛܐ + ܡܘܪܝܛܝܘܣ + ܓܙܪܬܐ ܡܐܠܕܝܒܝܬܐ + ܡܠܐܘܝ + ܡܟܣܝܟܘ + ܡܠܝܙܝܐ + ܡܘܙܡܒܝܩ + ܢܡܝܒܝܐ + ܢܝܘ ܟܠܝܕܘܢܝܐ + ܢܝܓܪ + ܓܙܪܬܐ ܕܢܘܪܦܠܟ + ܢܝܓܝܪܝܐ + ܢܝܟܪܐܓܘܐ + ܗܘܠܢܕܐ + ܢܘܪܒܝܓ + ܢܝܦܐܠ + ܢܐܘܪܘ + ܢܘܥ + ܢܝܘ ܙܝܠܢܕ + ܐܬܝܐܐܪܐܘ ܢܝܘ ܙܝܠܢܕ + ܥܘܡܐܢ + ܦܢܡܐ + ܦܝܪܘ + ܦܘܠܝܢܝܣܝܐ ܦܪܢܣܝܐ + ܦܐܦܘܐ ܓܝܢܝܐ ܚܕܬܐ + ܦܝܠܝܦܝܢܝܐ + ܦܐܟܣܬܐܢ + ܦܘܠܢܕ + ܣܐܢܬ ܦܝܥܪ ܘܡܩܘܠܘܢ + ܓܙܝܪ̈ܐ ܕܦܝܬܟܐܝܪܢ + ܦܘܐܪܛܘ ܪܝܩܘ + ܐܬܖ̈ܘܬܐ ܕܦܠܣܛܝܢ + ܦܠܣܛܝܢ + ܦܘܪܛܘܓܠ + ܦܠܐܘ + ܦܪܓܘܐܝ + ܩܛܪ + ܐܘܩܝܢܘܣܝܐ ܒܪܝܬܐ + ܪܝܘܢܝܘܢ + ܪܘܡܢܝܐ + ܣܪܒܝܐ + ܪܘܣܝܐ + ܪܘܐܢܕܐ + ܣܥܘܕܝܐ + ܓܙܪܬܐ ܕܫܠܝܡܘܢ + ܣܐܝܫܝܠ + ܣܘܕܐܢ + ܣܘܝܕ + ܣܝܢܓܐܦܘܪ + ܡܪܬܝ ܗܝܠܝܢܐ + ܣܠܘܒܢܝܐ + ܣܒܠܕܒܪܕ ܘܓܐܢ ܡܐܝܝܢ + ܣܠܘܒܩܝܐ + ܣܝܝܪܐ ܠܝܐܘܢܝ + ܣܢ ܡܪܝܢܘ + ܣܢܓܐܠ + ܨܘܡܐܠ + ܣܘܪܝܢܐܡ + ܬܝܡܢ ܣܘܕܐܢ + ܣܐܘ ܛܘܡܝ ܘܦܪܝܢܣܝܦܝ + ܐܠ ܣܠܒܐܕܘܪ + ܣܢܬ ܡܐܪܬܝܢ + ܣܘܪܝܐ + ܐܣܘܐܛܝܢܝ + ܣܘܐܙܝܠܢܕ + ܬܪܝܣܬܢ ܕܟܘܢܗܐ + ܓܙܝܖ̈ܐ ܕܬܘܪܟܣ ܘܟܐܝܟܘܣ + ܬܫܐܕ + ܩܠܝܡ̈ܐ ܕܦܪܢܣܐ ܬܝܡܢܝܬܐ + ܬܘܓܘ + ܬܐܝܠܢܕ + ܬܐܓܝܟܣܬܐܢ + ܬܘܟܝܠܐܘ + ܬܝܡܘܪ-ܠܣܬܝ + ܬܝܡܘܪ ܡܕܢܚܐ + ܬܘܪܟܡܢܣܬܐܢ + ܬܘܢܣ + ܬܘܢܓܐ + ܬܘܪܟܝܐ + ܬܪܝܢܝܕܐܕ ܘܬܘܒܐܓܘ + ܬܘܒܐܠܘ + ܬܐܝܘܐܢ + ܛܢܙܢܝܐ + ܐܘܩܪܐܝܢܐ + ܐܘܓܢܕܐ + ܓܙܝܪ̈ܐ ܪ̈ܚܝܩܐ ܕܐܘܚܕ̈ܢܐ ܡܚܝܕ̈ܐ + ܐܡ̈ܘܬܐ ܡܚܝ̈ܕܬܐ + ܐܘܚܕ̈ܢܐ ܡܚܝܕ̈ܐ + ܐܘܪܘܓܘܐܝ + ܐܘܙܒܟܣܬܐܢ + ܡܕܝܢܬܐ ܕܘܛܝܩܢ + ܣܐܢܬ ܒܝܢܣܝܢܬ ܘܓܪܝܢܐܕܝܢܐܣ + ܒܢܙܘܝܠܐ + ܓܙܖ̈ܝܐ ܒܬܘ̈ܠܐ ܕܒܪܝܛܢܝܐ + ܓܙܖ̈ܝܐ ܒܬܘ̈ܠܐ ܕܐܡܝܪܟܐ + ܒܝܬܢܐܡ + ܒܐܢܘܐܛܘ + ܘܝܠܝܣ ܘܦܘܬܘܢܐ + ܣܡܘܐ + ܩܘܣܘܒܘ + ܝܡܢ + ܡܐܝܘܛ + ܬܝܡܢ ܐܦܪܝܩܐ + ܙܐܡܒܝܐ + ܙܝܡܒܐܒܘܝ + + + ܣܘܪܓܕܐ + ܛܘܦܣܐ ܕܙܘ̈ܙܐ + ܛܟܣܐ ܕܦܘܪܫܢܝܐ + ܙܘ̈ܙܐ + ܛܟ݂ܣܐ ܥܕܢܘܬܐ (12 ܠܘܩܒܠ 24) + ܛܟܣܐ ܕܟܝܠܬܐ + ܡܢܝ̈ܢܐ + + + ܣܘܪܓܕܐ ܒܘܕܗܝܝܐ + ܣܘܪܓܕܐ ܨܝܢܝܐ + ܣܘܪܓܕܐ ܐܓܒܛܝܐ + ܣܘܪܓܕܐ ܕܢܓܝ + ܣܘܪܓܕܐ ܟܘܫܝܐ + ܣܘܪܓܕܐ ܓܪܝܓܘܪܝܐ + ܣܘܪܓܕܐ ܝܗܘܕܝܐ + ܣܘܪܓܕܐ ܐܘܡܬܢܝܐ ܗܢܕܘܝܐ + ܣܘܪܓܕܐ ܡܫܠܡܢܝܐ + ܣܘܪܓܕܐ ܡܫܠܡܢܝܐ ܡܕܝܢܝܐ + ܣܘܪܓܕܐ ISO-8601 + ܣܘܪܓܕܐ ܝܦܢܝܐ + ܣܘܪܓܕܐ ܦܪܣܝܐ + ܣܘܪܓܕܐ ܡܝܢܓܘ + ܛܘܦܣܐ ܕܙܘ̈ܙܐ ܡܚܫܒܢܘܬܝܐ + ܛܘܦܣܐ ܕܙܘ̈ܙܐ ܫܪܫܝܐ + ܛܟ݂ܣܐ ܦܘܪܫܢܝܐ ܕܠܟܣܝܩܘܢ + ܟܬܒܐ ܕܡܢܝ̈ܢܐ ܕܬܝܠܝܦܘܢ + ܛܟ݂ܣܐ ܦܘܪܫܢܝܐ ܫܪܫܝܐ + ܛܟ݂ܣܐ ܦܘܪܫܢܝܐ ܥܝܕ݂ܝܐ + ܛܟ݂ܣܐ 12 ܫܥܬ݂ܐ (0–11) + ܛܟ݂ܣܐ 12 ܫܥܬ݂ܐ (1–12) + ܛܟ݂ܣܐ 24 ܫܥܬ݂ܐ (0–23) + ܛܟ݂ܣܐ 24 ܫܥܬ݂ܐ (0–23) + ܛܟܣܐ ܡܝܬܪܝܐ + ܛܟܣܐ ܕܟܝܠܬܐ ܒܪܝܛܢܝܝܐ + ܛܟܣܐ ܕܟܝܠܬܐ ܐܡܪܝܟܝܐ + ܡܢܝ̈ܢܐ ܕܥܖ̈ܒܝܐ ܗܢܕܘܝܐ + ܡܢܝ̈ܢܐ ܕܐܖ̈ܡܢܝܐ + ܡܢܝ̈ܢܐ ܟܘܫܝܐ + ܡܢܝ̈ܢܐ ܓܘܪܓܝܐ + ܡܢܝ̈ܢܐ ܕܝܘܢܝ̈ܐ + ܡܢܝ̈ܢܐ ܕܝܗܘܕܝ̈ܐ + ܡܢܝ̈ܢܐ ܕܝܦܢܝ̈ܐ + ܡܢܝ̈ܢܐ ܡܥܪܒܝܐ + ܡܢܝ̈ܢܐ ܕܡܘܢܓܘܠܢܝ̈ܐ + ܡܢܝ̈ܢܐ ܪܗܘܡܝܐ + + + ܛܟܣܐ ܡܝܬܪܝܐ + ܛܟܣܐ ܒܪܝܛܢܝܐ + ܛܟܣܐ ܐܡܝܪܟܐ + + + ܠܫܢܐ:‌ {0} + ܛܟܣܐ ܕܟܬܝܒܬܐ: {0} + ܩܠܝܡܐ: {0} + + + + + right-to-left + + + + [\u0740\u0743\u0744\u0747\u0748\u0749\u074A \u0741\u0745 \u0742\u0746 \u0711 \u0730 \u0731 \u0732 \u0733 \u0734 \u0735 \u0736 \u0737 \u0738 \u0739 \u073A \u073B \u073C \u073D \u073E \u073F ܃ ܄ ܅ ܆ ܇ ܈ ܉ ܁ ܂ ܀ ܊ ܋ ܌ ܍ ܐ ܒ ܓ ܔ ܖ ܕ ܗ ܘ ܙ ܚ ܛ ܜ ܝ ܞ ܟ ܠ ܡ ܢ ܣ ܤ ܥ ܦ ܧ ܨ ܩ ܪ ܫ ܬ] + [܏\u200C\u200D ܭ ܮ ܯ ݍ ݎ ݏ] + [ܐ ܒ ܓ ܖ ܕ ܗ ܘ ܙ ܚ ܛ ܝ ܟ ܠ ܡ ܢ ܣ ܥ ܦ ܨ ܩ ܪ ܫ ܬ] + [\u061C\u200E \- ‑ , ٫ ٬ . % ٪ ‰ ؉ + 0٠ 1١ 2٢ 3٣ 4٤ 5٥ 6٦ 7٧ 8٨ 9٩] + [\- ‐ ‑ – — ، ؛ \: ܃ ܄ ܅ ܆ ܇ ܈ ! ؟ ܉ . … ܁ ܂ ܀ ' ‘ ’ " “ ” « » ( ) \[ \] ܊ ܋ ܌ ܍] + ؟ + + [\:∶] + + + [\$﹩$$] + [£₤] + + + [\-‒⁻₋−➖﹣-] + + + + + + + + + + + + + + + EEEE، d MMMM y G + GyMMMMdd + + + + + d MMMM y G + GyMMMMdd + + + + + G y MMM d + GyMMdd + + + + + GGGGG y-MM-dd + GGGGGyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + {1} {0} + + + {1} {0} + + + + + {1} {0} + + + {1} {0} + + + + + {1} {0} + + + {1} {0} + + + + + {1} {0} + + + {1} {0} + + + + E، d + y G + d‏/M‏/y G + d MMM y G + E، d MMM y G + d/‏M + E، d/‏M + d MMM + E، d MMM + d MMMM + y G + y G + M‏/y G + d‏/M‏/y G + E، d/‏M/‏y G + MMM y G + d MMM y G + E، d MMM y G + MMMM y G + QQQ y G + QQQQ y G + + + + y G – y G + y–y G + + + MM-y GGGG – MM-y GGGG + MM-y – MM-y GGGG + MM-y – MM-y GGGG + + + dd-MM-y – dd-MM-y GGGG + dd-MM-y GGGG – dd-MM-y GGGG + dd-MM-y – dd-MM-y GGGG + dd-MM-y – dd-MM-y GGGG + + + E, dd-MM-y – E, dd-MM-y GGGG + E, dd-MM-y GGGG – E, dd-MM-y GGGG + E, dd-MM-y – E, dd-MM-y GGGG + E, dd-MM-y – E,dd-MM-y GGGG + + + MMM y G – MMM y G + MMM – MMM y G + MMM y – MMM y G + + + d–d MMM y G + d MMM y G – d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E, d MMM – E, d MMM y G + E, d MMM y G – E, d MMM y G + E, d MMM – E, d MMM y G + E, d MMM y – E, d MMM y G + + + M–M + + + d-M – d-M + d-M – d-M + + + E، d/‏M –‏ E، d/‏M + E، d/‏M – E، d/‏M + + + MMM–MMM + + + d–d MMM + d MMM – d MMM + + + E، d – E، d MMM + E، d MMM – E، d MMM + + + y–y G + + + M‏/y – M‏/y G + M‏/y – M‏/y G + + + d‏/M‏/y – d‏/M‏/y G + d‏/M‏/y – d‏/M‏/y G + d‏/M‏/y – d‏/M‏/y G + + + E، dd‏/MM‏/y – E، dd‏/MM‏/y G + E، d‏/M‏/y – E، d‏/M‏/y G + E، d‏/M‏/y – E، d‏/M‏/y G + + + MMM – MMM y G + MMM، y – MMM y G + + + d–d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E، d – E، d MMM y G + E، d MMM – E، d MMM y G + E، d MMM y – E، d MMM y G + + + MMMM – MMMM y G + MMMM y – MMMM y G + + + + + + + + + ܟܢ܊ ܒ + ܫܒܛ + ܐܕܪ + ܢܝܣܢ + ܐܝܪ + ܚܙܝܪܢ + ܬܡܘܙ + ܐܒ + ܐܝܠܘܠ + ܬܫ܊ ܐ + ܬܫ܊ ܒ + ܟܢ܊ ܐ + + + ܐ + ܒ + ܓ + ܕ + ܗ + ܘ + ܙ + ܚ + ܛ + ܝ + ܝܐ + ܝܒ + + + ܟܢܘܢ ܐܚܪܝܐ + ܫܒܛ + ܐܕܪ + ܢܝܣܢ + ܐܝܪ + ܚܙܝܪܢ + ܬܡܘܙ + ܐܒ + ܐܝܠܘܠ + ܬܫܪܝܢ ܩܕܡܝܐ + ܬܫܪܝܢ ܐܚܪܝܐ + ܟܢܘܢ ܩܕܡܝܐ + + + + + ܟܢ܊ ܒ + ܫܒܛ + ܐܕܪ + ܢܝܣܢ + ܐܝܪ + ܚܙܝܪܢ + ܬܡܘܙ + ܐܒ + ܐܝܠܘܠ + ܬܫ܊ ܐ + ܬܫ܊ ܒ + ܟܢ܊ ܐ + + + ܐ + ܒ + ܓ + ܕ + ܗ + ܘ + ܙ + ܚ + ܛ + ܝ + ܝܐ + ܝܒ + + + ܟܢܘܢ ܐܚܪܝܐ + ܫܒܛ + ܐܕܪ + ܢܝܣܢ + ܐܝܪ + ܚܙܝܪܢ + ܬܡܘܙ + ܐܒ + ܐܝܠܘܠ + ܬܫܪܝܢ ܩܕܡܝܐ + ܬܫܪܝܼܢ ܐܚܪܝܐ + ܟܢܘܢ ܩܕܡܝܐ + + + + + + + ܚܕܒܫܒܐ + ܬܪܝܢܒܫܒܐ + ܬܠܬܒܫܒܐ + ܐܪܒܥܒܫܒܐ + ܚܡܫܒܫܒܐ + ܥܪܘܒܬܐ + ܫܒܬܐ + + + ܐ + ܒ + ܓ + ܕ + ܗ + ܥ + ܫ + + + ܚܕ + ܬܪܝܢ + ܬܠܬ + ܐܪܒܥ + ܚܡܫ + ܥܪܘ + ܫܒܬܐ + + + ܚܕܒܫܒܐ + ܬܪܝܢܒܫܒܐ + ܬܠܬܒܫܒܐ + ܐܪܒܥܒܫܒܐ + ܚܡܫܒܫܒܐ + ܥܪܘܒܬܐ + ܫܒܬܐ + + + + + ܚܕ + ܬܪܝܢ + ܬܠܬ + ܐܪܒܥ + ܚܡܫ + ܥܪܘ + ܫܒܬܐ + + + ܐ + ܒ + ܓ + ܕ + ܗ + ܥ + ܫ + + + ܚܕ + ܬܪܝܢ + ܬܠܬ + ܐܪܒܥ + ܚܡܫ + ܥܪܘ + ܫܒܬܐ + + + ܚܕܒܫܒܐ + ܬܪܝܢܒܫܒܐ + ܬܠܬܒܫܒܐ + ܐܪܒܥܒܫܒܐ + ܚܡܫܒܫܒܐ + ܥܪܘܒܬܐ + ܫܒܬܐ + + + + + + + ܪܘܒܥܐ ܐ + ܪܘܒܥܐ ܒ + ܪܘܒܥܐ ܓ + ܪܘܒܥܐ ܕ + + + ܐ + ܒ + ܓ + ܕ + + + ܪܘܒܥܐ ܩܕܡܝܐ + ܪܘܒܥܐ ܬܪܝܢܐ + ܪܘܒܥܐ ܬܠܝܬܝܐ + ܪܘܒܥܐ ܪܒܝܥܝܐ + + + + + ܪܘܒܥܐ ܐ + ܪܘܒܥܐ ܒ + ܪܘܒܥܐ ܓ + ܪܘܒܥܐ ܕ + + + ܐ + ܒ + ܓ + ܕ + + + ܪܘܒܥܐ ܩܕܡܝܐ + ܪܘܒܥܐ ܬܪܝܢܐ + ܪܘܒܥܐ ܬܠܝܬܝܐ + ܪܘܒܥܐ ܪܒܝܥܝܐ + + + + + + + ܩܛ + ܒܛ + + + ܩ + ܒ + + + ܩܛ + ܒܛ + + + + + ܩܛ + ܒܛ + + + ܩܛ + ܒܛ + + + ܩܛ + ܒܛ + + + + + + ܩܕܡ ܡܫܝܚܐ + ܩܕܡ ܕܪܐ ܚܕܬܐ + ܫܢܬܐ ܡܪܢܝܬܐ + ܕܪܐ ܚܕܬܐ + + + ܏ܩܡ + ܏ܩܕܚ + ܏ܫܡ + ܏ܕܚ + + + ܏ܩܡ + ܏ܩܕܚ + ܏ܫܡ + ܏ܕܚ + + + + + + EEEE، d MMMM y + yMMMMdd + + + + + y MMMM d + yMMMMdd + + + + + y MMM d + yMMdd + + + + + y-MM-dd + yMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + d + cccc + E، d + y G + MMM y G + d MMM y G + EEEE، d MMM y G + dd-MM + EEEE، dd-MM + d MMM + EEEE، d MMM + d MMMM + ܫܒܘܥܐ W ܒMMMM + ܫܒܘܥܐ W ܒMMMM + MM-y + d‏/M‏/y + E، d/‏M/‏y + MMM y + d MMM y + E، d MMM y + MMMM y + QQQ y + QQQQ y + ܫܒܘܥܐ w ܕܫܢܬܐ Y + ܫܒܘܥܐ w ܕܫܢܬܐ Y + + + + y G – y G + y – y G + + + MM-y GGGG – MM-y GGGG + MM-y – MM-y GGGG + MM-y – MM-y GGGG + + + d MMM y G – d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E, d MMM – E, d MMM y G + E, d MMM y G – E, d MMM y G + E, d MMM – E, d MMM y G + E, d MMM y – E, d MMM y G + + + M–M + + + d-M – d-M + M/d – M/d + + + E، d/‏M –‏ E، d/‏M + E، d/‏M – E، d/‏M + + + MMM–MMM + + + d–d MMM + d MMM – d MMM + + + E، d – E، d MMM + E، d MMM – E، d MMM + + + M‏/y – M‏/y + + + d‏/M‏/y – d‏/M‏/y + d‏/M‏/y – d‏/M‏/y + d‏/M‏/y – d‏/M‏/y + + + E، dd‏/MM‏/y – E، dd‏/MM‏/y + E، d‏/M‏/y – E، d‏/M‏/y + E، d‏/M‏/y – E، d‏/M‏/y + + + MMM – MMM، y + MMM، y – MMM، y + + + d–d MMM، y + d MMM – d MMM، y + d MMM، y – d MMM، y + + + E، d – E، d MMM، y + E، d MMM – E، d MMM، y + E، d MMM، y – E، d MMM، y + + + MMMM–MMMM y + MMMM y – MMMM y + + + + + + + + ܕܪܐ + + + ܕܪܐ + + + ܕܪܐ + + + ܫܢܬܐ + ܐܫܬܩܕܝ + ܗܕܐ ܫܢܬܐ + ܫܢܬܐ ܐܚܪܬܐ + + ܒ{0} ܫܢܝ̈ܐ + ܒ{0} ܫܢܝ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܢܝ̈ܐ + ܡ̣ܢ ܩܕܡ {0} ܫܢܝ̈ܐ + + + + ܫܢ܊ + ܐܫܬܩܕܝ + ܗܕܐ ܫܢܬܐ + ܫܢܬܐ ܐܚܪܬܐ + + ܒ{0} ܫܢܝ̈ܐ + ܒ{0} ܫܢܝ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܢܝ̈ܐ + ܡ̣ܢ ܩܕܡ {0} ܫܢܝ̈ܐ + + + + ܫܢ܊ + ܐܫܬܩܕܝ + ܗܕܐ ܫܢܬܐ + ܫܢܬܐ ܐܚܪܬܐ + + ܒ{0} ܫܢܝ̈ܐ + ܒ{0} ܫܢܝ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܢܝ̈ܐ + ܡ̣ܢ ܩܕܡ {0} ܫܢܝ̈ܐ + + + + ܪܘܒܥܐ ܕܫܢܬܐ + ܪܘܒܥܐ ܕܥܒܪ + ܗܢܐ ܪܘܒܥܐ + ܪܘܒܥܐ ܕܐܬܐ + + ܒ{0} ܪܘܒܥܐ + ܒ{0} ܪ̈ܘܒܥܐ + + + ܡ̣ܢ ܩܕܡ {0} ܪܘܒܥܐ + ܡ̣ܢ ܩܕܡ {0} ܪ̈ܘܒܥܐ + + + + ܪܘܒܥܐ ܕܫܢܬܐ + ܪܘܒܥܐ ܕܥܒܪ + ܗܢܐ ܪܘܒܥܐ + ܪܘܒܥܐ ܕܐܬܐ + + ܒ{0} ܪܘܒܥܐ + ܒ{0} ܪ̈ܘܒܥܐ + + + ܡ̣ܢ ܩܕܡ {0} ܪܘܒܥܐ + ܡ̣ܢ ܩܕܡ {0} ܪ̈ܘܒܥܐ + + + + ܪܘܒܥܐ ܕܫܢܬܐ + ܪܘܒܥܐ ܕܥܒܪ + ܗܢܐ ܪܘܒܥܐ + ܪܘܒܥܐ ܕܐܬܐ + + ܒ{0} ܪܘܒܥܐ + ܒ{0} ܪ̈ܘܒܥܐ + + + ܡ̣ܢ ܩܕܡ {0} ܪܘܒܥܐ + ܡ̣ܢ ܩܕܡ {0} ܪ̈ܘܒܥܐ + + + + ܝܪܚܐ + ܝܪܚܐ ܕܕܥܒܪ + ܗܢܐ ܝܪܚܐ + ܝܪܚܐ ܕܐܬܐ + + ܒ{0} ܝܪܚܐ + ܒ{0} ܝܪ̈ܚܐ + + + ܡ̣ܢ ܩܕܡ {0} ܝܪܚܐ + ܡ̣ܢ ܩܕܡ {0} ܝܪ̈ܚܐ + + + + ܝܪܚܐ + ܝܪܚܐ ܕܕܥܒܪ + ܗܢܐ ܝܪܚܐ + ܝܪܚܐ ܕܐܬܐ + + ܒ{0} ܝܪܚܐ + ܒ{0} ܝܪ̈ܚܐ + + + ܡ̣ܢ ܩܕܡ {0} ܝܪܚܐ + ܡ̣ܢ ܩܕܡ {0} ܝܖ̈ܚܐ + + + + ܝܪܚܐ + ܝܪܚܐ ܕܕܥܒܪ + ܗܢܐ ܝܪܚܐ + ܝܪܚܐ ܕܐܬܐ + + ܒ{0} ܝܪܚܐ + ܒ{0} ܝܪ̈ܚܐ + + + ܡ̣ܢ ܩܕܡ {0} ܝܪܚܐ + ܡ̣ܢ ܩܕܡ {0} ܝܖ̈ܚܐ + + + + ܫܒܘܥܐ + ܫܒܘܥܐ ܕܕܥܒܪ + ܗܕܐ ܫܒܘܥܐ + ܫܒܘܥܐ ܕܐܬܐ + + ܒ{0} ܫܒܘܥܐ + ܒ{0} ܫܒ̈ܘܥܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܒܘܥܐ + ܡ̣ܢ ܩܕܡ {0} ܫܒ̈ܘܥܐ + + ܫܒܘܥܐ ܕ{0} + + + ܫܒ܊ + ܫܒܘܥܐ ܕܕܥܒܪ + ܗܕܐ ܫܒܘܥܐ + ܫܒܘܥܐ ܕܐܬܐ + + ܒ{0} ܫܒܘܥܐ + ܒ{0} ܫܒ̈ܘܥܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܒܘܥܐ + ܡ̣ܢ ܩܕܡ {0} ܫܒ̈ܘܥܐ + + ܫܒܘܥܐ ܕ{0} + + + ܫܒ܊ + ܫܒܘܥܐ ܕܕܥܒܪ + ܗܕܐ ܫܒܘܥܐ + ܫܒܘܥܐ ܕܐܬܐ + + ܒ{0} ܫܒܘܥܐ + ܒ{0} ܫܒ̈ܘܥܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܒܘܥܐ + ܡ̣ܢ ܩܕܡ {0} ܫܒ̈ܘܥܐ + + ܫܒܘܥܐ ܕ{0} + + + ܫܒܘܥܐ ܕܝܪܚܐ + + + ܫܒܘܥܐ ܕܝܪܚܐ + + + ܫܒܘܥܐ ܕܝܪܚܐ + + + ܝܘܡܐ + ܐܬܡܠܝ + ܐܕܝܘܡ + ܝܘܡܐ ܕܐܬܐ + + ܒ{0} ܝܘܡܐ + ܒ{0} ܝܘܡܢ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܝܘܡܐ + ܡ̣ܢ ܩܕܡ {0} ܝܘܡܢ̈ܐ + + + + ܝܘܡܐ + ܐܬܡܠܝ + ܐܕܝܘܡ + ܝܘܡܐ ܕܐܬܐ + + ܒ{0} ܝܘܡܐ + ܒ{0} ܝܘܡܢ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܝܘܡܐ + ܡ̣ܢ ܩܕܡ {0} ܝܘܡܢ̈ܐ + + + + ܝܘܡܐ + ܐܬܡܠܝ + ܐܕܝܘܡ + ܝܘܡܐ ܕܐܬܐ + + ܒ{0} ܝܘܡܐ + ܒ{0} ܝܘܡܢ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܝܘܡܐ + ܡ̣ܢ ܩܕܡ {0} ܝܘܡܢ̈ܐ + + + + ܝܘܡܐ ܕܫܢܬܐ + + + ܝܘܡܐ ܕܫܢܬܐ + + + ܝܘܡܐ ܕܫܢܬܐ + + + ܝܘܡܐ ܕܫܒܘܥܐ + + + ܝܘܡܐ ܕܫܒܘܥܐ + + + ܝܘܡܐ ܕܫܒܘܥܐ + + + ܝܘܡܐ ܦܘܠܚܢܐ ܕܫܒܘܥܐ + + + ܝܘܡܐ ܦܘܠܚܢܐ ܕܫܒܘܥܐ + + + ܝܘܡܐ ܦܘܠܚܢܐ ܕܫܒܘܥܐ + + + ܚܕܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܚܕܒܫܒܐ + ܚܕܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܚܕܒܫܒܐ + ܒ{0} ܚܕܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܚܕܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܚܕܒܫܒ̈ܐ + + + + ܚܕܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܚܕܒܫܒܐ + ܚܕܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܚܕܒܫܒܐ + ܒ{0} ܚܕܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܚܕܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܚܕܒܫܒ̈ܐ + + + + ܚܕܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܚܕܒܫܒܐ + ܚܕܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܚܕܒܫܒܐ + ܒ{0} ܚܕܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܚܕܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܚܕܒܫܒ̈ܐ + + + + ܬܪܝܢܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܬܪܝܢܒܫܒܐ + ܬܪܝܢܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܬܪܝܢܒܫܒܐ + ܒ{0} ܬܪܝܢܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܬܪܝܢܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܬܪܝܢܒܫܒ̈ܐ + + + + ܬܪܝܢܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܬܪܝܢܒܫܒܐ + ܬܪܝܢܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܬܪܝܢܒܫܒܐ + ܒ{0} ܬܪܝܢܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܬܪܝܢܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܬܪܝܢܒܫܒ̈ܐ + + + + ܬܪܝܢܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܬܪܝܢܒܫܒܐ + ܬܪܝܢܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܬܪܝܢܒܫܒܐ + ܒ{0} ܬܪܝܢܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܬܪܝܢܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܬܪܝܢܒܫܒ̈ܐ + + + + ܬܠܬܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܬܠܬܒܫܒܐ + ܬܠܬܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܬܠܬܒܫܒܐ + ܒ{0} ܬܠܬܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܬܠܬܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܬܠܬܒܫܒ̈ܐ + + + + ܬܠܬܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܬܠܬܒܫܒܐ + ܬܠܬܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܬܠܬܒܫܒܐ + ܒ{0} ܬܠܬܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܬܠܬܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܬܠܬܒܫܒ̈ܐ + + + + ܬܠܬܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܬܠܬܒܫܒܐ + ܬܠܬܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܬܠܬܒܫܒܐ + ܒ{0} ܬܠܬܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܬܠܬܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܬܠܬܒܫܒ̈ܐ + + + + ܐܪܒܥܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܐܪܒܥܒܫܒܐ + ܐܪܒܥܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܐܪܒܥܒܫܒܐ + ܒ{0} ܐܪܒܥܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܐܪܒܥܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܐܪܒܥܒܫܒ̈ܐ + + + + ܐܪܒܥܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܐܪܒܥܒܫܒܐ + ܐܪܒܥܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܐܪܒܥܒܫܒܐ + ܒ{0} ܐܪܒܥܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܐܪܒܥܒܫܒ̈ܐ + ܡ̣ܢ ܩܕܡ {0} ܐܪܒܥܒܫܒ̈ܐ + + + + ܐܪܒܥܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܐܪܒܥܒܫܒܐ + ܐܪܒܥܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܐܪܒܥܒܫܒܐ + ܒ{0} ܐܪܒܥܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܐܪܒܥܒܫܒ̈ܐ + ܡ̣ܢ ܩܕܡ {0} ܐܪܒܥܒܫܒ̈ܐ + + + + ܚܡܫܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܚܡܫܒܫܒܐ + ܚܡܫܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܚܡܫܒܫܒܐ + ܒ{0} ܚܡܫܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܚܡܫܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܚܡܫܒܫܒ̈ܐ + + + + ܚܡܫܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܚܡܫܒܫܒܐ + ܚܡܫܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܚܡܫܒܫܒܐ + ܒ{0} ܚܡܫܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܚܡܫܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܚܡܫܒܫܒ̈ܐ + + + + ܚܡܫܒܫܒܐ ܕܕܥܒܪ + ܗܕܐ ܚܡܫܒܫܒܐ + ܚܡܫܒܫܒܐ ܕܐܬܐ + + ܒ{0} ܚܡܫܒܫܒܐ + ܒ{0} ܚܡܫܒܫܒ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܚܡܫܒܫܒܐ + ܡ̣ܢ ܩܕܡ {0} ܚܡܫܒܫܒ̈ܐ + + + + ܥܪܘܒܬܐ ܕܕܥܒܪ + ܗܕܐ ܥܪܘܒܬܐ + ܥܪܘܒܬܐ ܕܐܬܐ + + ܒ{0} ܥܪܘܒܬܐ + ܒ{0} ܥܪ̈ܘܒܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܥܪܘܒܬܐ + ܡ̣ܢ ܩܕܡ {0} ܥܪ̈ܘܒܬܐ + + + + ܥܪܘܒܬܐ ܕܕܥܒܪ + ܗܕܐ ܥܪܘܒܬܐ + ܥܪܘܒܬܐ ܕܐܬܐ + + ܒ{0} ܥܪܘܒܬܐ + ܒ{0} ܥܪ̈ܘܒܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܥܪܘܒܬܐ + ܡ̣ܢ ܩܕܡ {0} ܥܪ̈ܘܒܬܐ + + + + ܥܪܘܒܬܐ ܕܕܥܒܪ + ܗܕܐ ܥܪܘܒܬܐ + ܥܪܘܒܬܐ ܕܐܬܐ + + ܒ{0} ܥܪܘܒܬܐ + ܒ{0} ܥܪ̈ܘܒܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܥܪܘܒܬܐ + ܡ̣ܢ ܩܕܡ {0} ܥܪ̈ܘܒܬܐ + + + + ܫܒܬܐ ܕܕܥܒܪ + ܗܕܐ ܫܒܬܐ + ܫܒܬܐ ܕܐܬܐ + + ܒ{0} ܫܒܬܐ + ܒ{0} ܫܒ̈ܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܒܬܐ + ܡ̣ܢ ܩܕܡ {0} ܫܒ̈ܬܐ + + + + ܫܒܬܐ ܕܕܥܒܪ + ܗܕܐ ܫܒܬܐ + ܫܒܬܐ ܕܐܬܐ + + ܒ{0} ܫܒܬܐ + ܒ{0} ܫܒ̈ܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܒܬܐ + ܡ̣ܢ ܩܕܡ {0} ܫܒ̈ܬܐ + + + + ܫܒܬܐ ܕܕܥܒܪ + ܗܕܐ ܫܒܬܐ + ܫܒܬܐ ܕܐܬܐ + + ܒ{0} ܫܒܬܐ + ܒ{0} ܫܒ̈ܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܒܬܐ + ܡ̣ܢ ܩܕܡ {0} ܫܒ̈ܬܐ + + + + ܩܛ/ܒܛ + + + ܩܛ/ܒܛ + + + ܩܛ/ܒܛ + + + ܫܥܬܐ + ܗܕܐ ܫܥܬܐ + + ܒ{0} ܫܥܬܐ + ܒ{0} ܫܥ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܥܬܐ + ܡ̣ܢ ܩܕܡ {0} ܫܥ̈ܐ + + + + ܫܥ܊ + ܗܕܐ ܫܥܬܐ + + ܒ{0} ܫܥܬܐ + ܒ{0} ܫܥ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܥܬܐ + ܡ̣ܢ ܩܕܡ {0} ܫܥ̈ܐ + + + + ܫܥ܊ + ܗܕܐ ܫܥܬܐ + + ܒ{0} ܫܥܬܐ + ܒ{0} ܫܥ̈ܐ + + + ܡ̣ܢ ܩܕܡ {0} ܫܥܬܐ + ܡ̣ܢ ܩܕܡ {0} ܫܥ̈ܐ + + + + ܩܛܝܢܬܐ + ܗܢܐ ܩܛܝܢܐ + + ܒ{0} ܩܛܝܢܬܐ + ܒ{0} ܩܛܝ̈ܢܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܩܛܝܢܐ + ܡ̣ܢ ܩܕܡ {0} ܩܛܝ̈ܢܐ + + + + ܩܛܝܢܬܐ + ܗܢܐ ܩܛܝܢܐ + + ܒ{0} ܩܛܝܢܬܐ + ܒ{0} ܩܛܝ̈ܢܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܩܛܝܢܐ + ܡ̣ܢ ܩܕܡ {0} ܩܛܝ̈ܢܐ + + + + ܩܛܝܢܬܐ + ܗܢܐ ܩܛܝܢܐ + + ܒ{0} ܩܛܝܢܬܐ + ܒ{0} ܩܛܝ̈ܢܬܐ + + + ܡ̣ܢ ܩܕܡ {0} ܩܛܝܢܐ + ܡ̣ܢ ܩܕܡ {0} ܩܛܝ̈ܢܐ + + + + ܪܦܦܐ + ܗܫܐ + + ܒ{0} ܪܦܦܐ + ܒ{0} ܪ̈ܦܦܐ + + + ܡ̣ܢ ܩܕܡ {0} ܪܦܦܐ + ܡ̣ܢ ܩܕܡ {0} ܖ̈ܦܦܐ + + + + ܪܦܦܐ + ܗܫܐ + + ܒ{0} ܪܦܦܐ + ܒ{0} ܪ̈ܦܦܐ + + + ܡ̣ܢ ܩܕܡ {0} ܪܦܦܐ + ܡ̣ܢ ܩܕܡ {0} ܖ̈ܦܦܐ + + + + ܪܦܦܐ + ܗܫܐ + + ܒ{0} ܪܦܦܐ + ܒ{0} ܪ̈ܦܦܐ + + + ܡ̣ܢ ܩܕܡ {0} ܪܦܦܐ + ܡ̣ܢ ܩܕܡ {0} ܖ̈ܦܦܐ + + + + ܦܢܝܬܐ ܕܙܒܢܐ + + + ܦܢܝܬܐ ܕܙܒܢܐ + + + ܦܢܝܬܐ ܕܙܒܢܐ + + + + ܥܕܢܐ ܓܪܝܢܟ {0} + ܥܕܢܐ ܕܓܪܝܢܟ + ܥܕܢܐ {0} + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ {0} + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ {0} + + ܡܕܝܢܬܐ ܠܐ ܝܕܥܝܬܐ + + + ܐܢܕܘܪܐ + + + ܕܘܒܐܝ + + + ܟܐܒܘܠ + + + ܐܢܬܝܓܘܐ + + + ܐܢܓܘܝܐ + + + ܬܝܪܐܢ + + + ܝܪܒܐܢ + + + ܠܘܐܢܕܐ + + + ܪܘܬܝܪܐ + + + ܦܐܠܡܝܪ + + + ܬܪܘܠ + + + ܣܝܘܐ + + + ܡܐܘܣܘܢ + + + ܕܒܝܣ + + + ܒܘܣܬܘܟ + + + ܟܐܝܣܝ + + + ܕܘܡܘܢܬ ܕܐܘܪܒܝܠ + + + ܡܟܡܘܪܕܘ + + + ܪܝܘ ܓܝܓܘܣ + + + ܡܢܕܘܙܐ + + + ܣܐܢ ܘܐܢ + + + ܐܘܫܘܐܝܐ + + + ܠܐ ܪܝܘܗܐ + + + ܣܐܢ ܠܘܝܣ + + + ܟܐܬܐܡܪܟܐ + + + ܣܠܬܐ + + + ܓܘܓܘܝ + + + ܬܘܟܘܡܐܢ + + + ܟܘܪܕܘܒܐ + + + ܒܘܐܝܢܘܣ ܥܝܪܣ + + + ܦܐܓܘ ܦܐܓܘ + + + ܒܝܝܢܐ + + + ܦܝܪܬ + + + ܐܘܟܠܐ + + + ܕܪܘܝܢ + + + ܐܕܝܠܝܕ + + + ܒܪܘܟܝܢ ܗܝܠ + + + ܡܝܠܒܘܪܢ + + + ܗܘܒܪܬ + + + ܠܝܢܕܡܐܢ + + + ܣܝܕܢܝ + + + ܒܪܝܣܒܐܢ + + + ܡܐܟܐܘܪܝ + + + ܠܘܪܕ ܗܐܘ + + + ܐܪܘܒܐ + + + ܡܐܪܝܗܐܡ + + + ܒܐܟܘ + + + ܣܪܐܝܝܒܘ + + + ܒܐܪܒܕܘܣ + + + ܕܟܐ + + + ܒܪܘܟܣܠ + + + ܐܘܐܓܐܕܐܘܓܐܘ + + + ܣܘܦܝܐ + + + ܒܚܪܝܢ + + + ܒܘܓܘܡܒܘܪܐ + + + ܦܘܪܬܘ-ܢܘܒܘ + + + ܡܪ ܒܪ ܬܘܠܡܝ + + + ܒܝܪܡܝܘܕܐ + + + ܒܪܘܢܐܝ + + + ܠܐ ܦܐܙ + + + ܟܪܠܝܢܓܩ + + + ܐܝܪܘܢܝܦܝ + + + ܪܝܘ ܒܪܢܟܘ + + + ܦܘܪܬܘ ܒܝܠܗܘ + + + ܒܘܥ ܒܝܣܬܐ + + + ܡܐܢܐܘܣ + + + ܟܘܝܐܒܐ + + + ܣܐܢܬܐܪܡ + + + ܟܐܡܦܘ ܓܪܢܕܝ + + + ܒܝܠܝܡ + + + ܐܪܐܓܐܘܝܢܐ + + + ܣܐܘ ܦܐܘܠܘ + + + ܒܐܗܝܐ + + + ܦܘܪܬܐܠܝܙܐ + + + ܡܐܣܝܐܘ + + + ܪܝܣܝܦܝ + + + ܢܘܪܘܢܗܐ + + + ܢܐܣܐܘ + + + ܬܝܡܦܘ + + + ܓܒܘܪܘܢ + + + ܡܝܢܣܟ + + + ܒܝܠܝܙ + + + ܕܐܣܘܢ + + + ܣܘܣܬܐ ܚܘܪܬܐ + + + ܐܢܘܒܝܟ + + + ܒܢܟܘܒܝܪ + + + ܦܘܪܬ ܢܝܠܣܘܢ + + + ܕܐܣܘܢ ܟܪܝܟ + + + ܟܪܝܣܬܘܢ + + + ܝܠܘܢܝܦ + + + ܐܕܡܘܢܬܘܢ + + + ܢܕܘܪܬܐ + + + ܟܡܒܪܓ ܒܐܝ + + + ܪܝܓܝܢܐ + + + ܘܝܢܝܦܓ + + + ܪܝܣܘܠܘܬ + + + ܢܗܪܐ ܕܪܥܢܝ + + + ܪܐܢܟܢ ܐܢܠܝܬ + + + ܐܬܝܟܘܟܐܢ + + + ܬܐܢܕܐܪ ܒܐܝ + + + ܢܝܦܝܓܘܢ + + + ܬܘܪܘܢܬܘ + + + ܐܝܩܠܘܝܬ + + + ܦܐܢܓܢܝܪܬܘܢܓ + + + ܡܘܢܟܬܘܢ + + + ܗܠܝܦܐܟܣ + + + ܓܘܣ ܒܐܝ + + + ܓܠܝܣ ܒܐܝ + + + ܒܠܐܢܟ-ܣܐܒܠܘܢ + + + ܡܪ ܝܘܚܢܢ + + + ܟܘܟܘܣ + + + ܟܝܢܫܐܣܐ + + + ܠܘܒܘܡܒܫܝ + + + ܒܐܢܓܐܘܝ + + + ܒܪܐܙܐܒܝܠ + + + ܙܝܘܪܚ + + + ܐܒܕܓܢ + + + ܪܐܪܘܬܘܢܓܐ + + + ܦܨܚܐ + + + ܦܘܢܬܐ ܥܪܝܢܣ + + + ܣܐܢܬܝܐܓܘ + + + ܕܘܐܠܐ + + + ܐܘܪܘܡܟܝ + + + ܫܢܓܗܐܝܝ + + + ܒܘܓܘܬܐ + + + ܟܘܣܬܐ ܪܝܟܐ + + + ܗܐܒܐܢܐ + + + ܟܐܦ ܒܝܪܕܝ + + + ܟܘܪܐܟܐܘ + + + ܟܪܝܣܬܡܣ + + + ܢܝܩܘܣܝܐ + + + ܦܐܡܐܓܘܣܬܐ + + + ܦܪܐܓ + + + ܒܘܣܝܢܓܢ + + + ܒܪܠܝܢ + + + ܓܝܒܘܬܝ + + + ܟܘܦܢܗܐܓܢ + + + ܕܘܡܝܢܝܟܐ + + + ܣܢܬܘ ܕܘܡܝܢܓܘ + + + ܓܙܐܐܪ + + + ܓܐܠܐܦܓܘܣ + + + ܓܘܐܝܐܩܘܝܠ + + + ܬܐܠܝܢ + + + ܩܐܗܖ̈ܗ + + + ܐܠ ܥܝܘܢ + + + ܐܣܡܐܪܐ + + + ܟܐܢܪܝ + + + ܣܒܬܐ + + + ܡܕܪܝܕ + + + ܐܕܝܣ ܐܒܒܐ + + + ܗܠܣܢܟܝ + + + ܦܝܓܝ + + + ܣܬܐܢܠܝ + + + ܬܫܘܟ + + + ܦܘܗܢܦܐܝ + + + ܟܘܣܪܐܝ + + + ܦܐܪܘ + + + ܦܐܪܝܣ + + + ܠܝܒܪܝܒܝܠ + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܒܪܝܛܢܝܐ + + ܠܘܢܕܘܢ + + + ܓܪܝܢܕܐ + + + ܬܦܠܝܣ + + + ܟܐܝܐܢ + + + ܓܘܪܢܙܝ + + + ܐܟܪܐ + + + ܓܒܪܠܛܪ + + + ܬܘܠ + + + ܢܘܟ + + + ܐܝܛܘܩܘܪܡܝܬ + + + ܕܐܢܡܪܟܫܒܝܢ + + + ܒܐܢܓܘܠ + + + ܟܘܢܐܟܪܝ + + + ܓܘܐܕܐܠܘܦܝ + + + ܡܐܠܐܒܘ + + + ܐܬܢܘܣ + + + ܬܡܝܢ ܓܘܪܓܝܐ + + + ܓܘܐܬܡܐܠܐ + + + ܓܘܐܡ + + + ܒܝܣܐܘ + + + ܓܘܝܐܢܐ + + + ܗܘܢܓ ܟܘܢܓ + + + ܬܝܓܘܣܝܓܐܠܦܐ + + + ܙܐܓܪܒ + + + ܦܘܪܬ ܐܘ ܦܪܝܢܣ + + + ܒܘܕܦܫܛ + + + ܓܐܟܐܪܬܐ + + + ܦܘܢܬܝܐܢܐܟ + + + ܡܐܟܐܣܐܪ + + + ܓܐܝܦܘܪܐ + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܝܪܠܢܕ + + ܕܒܠܢ + + + ܐܘܪܫܠܡ + + + ܓܙܪܬܐ ܕܡܐܢ + + + ܟܘܠܟܬܐ + + + ܬܫܓܘܣ + + + ܒܓܕܕ + + + ܬܗܪܢ + + + ܪܐܝܟܒܝܟ + + + ܪܘܡܝ + + + ܓܝܪܙܝ + + + ܓܡܐܝܟܐ + + + ܥܡܐܢ + + + ܛܘܟܝܘ + + + ܢܝܪܘܒܝ + + + ܒܝܫܟܝܟ + + + ܦܢܘܡ ܦܢ + + + ܟܐܢܬܘܢ + + + ܟܝܪܝܡܐܬܝ + + + ܬܐܪܐܘܐ + + + ܟܘܡܘܪܘ + + + ܣܐܢܬ ܟܬܣ + + + ܦܝܘܢܓܝܢܓ + + + ܣܐܘܠ + + + ܟܘܝܬ + + + ܟܐܝܡܝܢ + + + ܐܟܬܐܘ + + + ܐܘܪܐܠ + + + ܐܬܝܪܘ + + + ܐܟܬܘܒ + + + ܟܘܣܬܐܢܐܝ + + + ܟܝܙܝܠܘܪܕܐ + + + ܐܠܡܐܬܝ + + + ܒܝܐܢܬܝܐܢ + + + ܒܝܪܘܬ + + + ܡܪܬܝ ܠܘܫܐ + + + ܒܕܘܙ + + + ܟܘܠܘܡܒܘ + + + ܡܘܢܪܘܒܝܐ + + + ܡܐܣܝܪܘ + + + ܒܠܢܘܣ + + + ܠܘܟܣܡܒܘܪܓ + + + ܪܝܓܐ + + + ܛܪܝܦܘܠܝܣ + + + ܟܐܣܐܒܠܢܟܐ + + + ܡܘܢܐܟܘ + + + ܟܝܣܝܢܐܘ + + + ܦܘܕܓܘܪܝܟܐ + + + ܡܪܝܓܘܬ + + + ܐܢܬܐܢܐܢܪܝܒܘ + + + ܟܘܐܓܐܠܝܢ + + + ܡܐܓܘܪܘ + + + ܣܩܘܦܝܐ + + + ܒܐܡܐܟܘ + + + ܝܢܓܘܢ + + + ܗܘܒܕ + + + ܐܘܠܐܢܒܐܬܘܪ + + + ܟܘܝܒܠܣܢ + + + ܡܐܟܐܘ + + + ܣܐܝܦܐܢ + + + ܡܐܪܬܝܢܝܩ + + + ܢܘܐܟܫܘܬ + + + ܡܘܢܬܣܝܪܐܬ + + + ܡܝܠܛܐ + + + ܡܘܪܝܫܘܣ + + + ܓܙܪܬܐ ܡܐܠܕܝܒܝܬܐ + + + ܒܠܢܬܝܪ + + + ܬܝܐܘܐܢܐ + + + ܗܝܪܡܘܣܝܐ + + + ܡܙܛܠܐܢ + + + ܟܝܘܐܘܐ + + + ܒܐܗܝܐ ܒܐܢܝܪܣ + + + ܘܓܝܢܐܓܐ + + + ܡܘܢܛܪܐܝ + + + ܡܕܝܢܬܐ ܕܡܟܣܝܟܘ + + + ܡܐܬܐܡܘܪܘܣ + + + ܡܪܝܕܐ + + + ܟܐܢܟܘܢ + + + ܟܘܐܠܐ ܠܘܡܦܘܪ + + + ܟܘܫܝܢܓ + + + ܡܐܦܘܬܘ + + + ܘܝܢܕܗܘܟ + + + ܢܘܡܝܐ + + + ܢܝܐܡܝ + + + ܢܘܪܦܠܟ + + + ܠܐܓܘܣ + + + ܡܐܢܐܓܘܐ + + + ܐܡܣܬܪܕܡ + + + ܐܘܣܠܘ + + + ܟܐܬܡܐܢܕܘ + + + ܢܐܘܪܘ + + + ܢܝܘܝ + + + ܬܫܐܬܡ + + + ܐܟܠܐܢܕ + + + ܡܣܩܛ + + + ܦܢܡܐ + + + ܠܝܡܐ + + + ܬܐܗܝܬܝ + + + ܡܐܪܟܐܘܣܐܣ + + + ܓܡܒܝܪ + + + ܦܘܪܬ ܡܘܪܝܣܒܐܝ + + + ܒܘܓܐܝܢܒܝܠ + + + ܡܐܢܝܠܐ + + + ܟܪܐܟܝ + + + ܘܐܪܣܘ + + + ܡܩܘܠܘܢ + + + ܦܝܬܟܐܝܪܢ + + + ܦܘܪܬܘ ܪܝܟܘ + + + ܥܙܐ + + + ܚܒܪܘܢ + + + ܓܙܪܬܐ ܕܐܙܘܪ + + + ܡܕܐܝܪܐ + + + ܠܫܒܘܢܐ + + + ܦܠܐܘ + + + ܐܣܘܢܟܣܝܘܢ + + + ܩܛܪ + + + ܪܝܘܢܝܘܢ + + + ܒܘܩܘܪܫܛ + + + ܒܠܓܪܕ + + + ܟܐܠܝܢܝܢܓܪܐܕ + + + ܡܘܣܟܘ + + + ܒܘܠܓܘܓܪܐܕ + + + ܣܪܐܬܘܒ + + + ܐܣܬܪܐܚܢ + + + ܐܘܠܝܢܘܒܣܟ + + + ܟܝܪܘܒ + + + ܣܡܐܪܐ + + + ܝܟܐܬܝܪܢܒܝܪܓ + + + ܐܘܡܣܟ + + + ܢܘܒܘܣܝܒܪܣܟ + + + ܒܐܪܢܐܘܠ + + + ܬܘܡܣܟ + + + ܢܘܒܘܟܘܙܢܝܬܣܟ + + + ܟܪܐܣܢܘܝܪܣܟ + + + ܐܝܪܟܘܬܣܟ + + + ܬܫܝܬܐ + + + ܝܐܟܘܬܣܟ + + + ܒܠܐܕܝܒܘܣܬܘܟ + + + ܚܐܢܕܝܓܐ + + + ܣܐܚܐܠܝܢ + + + ܐܘܣܬ-ܢܝܪܐ + + + ܡܐܓܐܕܐܢ + + + ܣܪܝܕܢܝܟܘܠܝܡܣܟ + + + ܟܐܡܬܫܐܬܟܐ + + + ܐܢܐܕܝܪ + + + ܟܝܓܐܠܝ + + + ܪܝܐܕ + + + ܓܘܐܕܐܠܟܐܢܐܠ + + + ܡܐܗܝ + + + ܚܪܛܘܡ + + + ܣܬܘܟܗܘܠܡ + + + ܣܝܢܓܐܦܘܪ + + + ܡܪܬܝ ܗܝܠܝܢܐ + + + ܠܝܘܒܠܝܐܢܐ + + + ܠܘܢܓܝܥܪܒܝܝܢ + + + ܒܪܬܝܣܠܒܐ‏ + + + ܦܪܝܬܐܘܢ + + + ܣܢ ܡܪܝܢܘ + + + ܕܐܟܐܪ + + + ܡܘܩܕܝܫܘ + + + ܦܐܪܐܡܐܪܝܒܘ + + + ܓܘܒܐ + + + ܣܐܘ ܬܘܡܝ + + + ܐܠ ܣܠܒܐܕܘܪ + + + ܪܘܒ݂ܥܐ ܕܫܠܝܛܐ ܬܚܬܝܐ + + + ܕܪܡܣܘܩ + + + ܡܒܐܒܐܢܝ + + + ܓܪܐܢܕ ܬܘܪܟ + + + ܢܓܡܝܢܐ + + + ܟܝܪܓܘܠܝܢ + + + ܠܘܡܝ + + + ܒܐܢܟܘܟ + + + ܕܘܫܐܢܒܝ + + + ܦܐܟܐܘܦܘ + + + ܕܝܠܝ + + + ܥܫܩܐܒܐܕ + + + ܬܘܢܣ + + + ܬܘܢܓܐܬܐܦܘ + + + ܐܣܛܢܒܘܠ + + + ܦܘܪܬ ܕܐܣܦܢܝܐ + + + ܦܘܢܐܦܘܬܝ + + + ܬܐܝܦܐܝ + + + ܕܐܪ ܫܠܡܐ + + + ܐܘܙܓܘܪܘܕ + + + ܟܝܝܒ + + + ܣܡܦܪܘܦܠ + + + ܙܐܦܘܪܝܓܝ + + + ܟܐܡܦܐܠܐ + + + ܡܝܕܘܐܝ + + + ܘܐܝܟ + + + ܐܕܐܟ + + + ܢܘܡ + + + ܓܘܢܣܬܘܢ + + + ܐܢܟܘܪܓ + + + ܝܩܘܬܐܬ + + + ܣܝܛܟܐ + + + ܓܘܢܘ + + + ܡܛܠܟܐܬܠܐ + + + ܠܘܣ ܐܢܓܠܘܣ + + + ܒܘܝܙܝ + + + ܦܝܢܝܟܣ + + + ܕܢܒܪ + + + ܒܝܘܠܐ، ܕܐܟܘܬܐ ܓܪܒܝܝܬܐ + + + ܢܝܘ ܣܐܠܝܡ،‌ ܕܐܟܘܬܐ ܓܪܒܝܝܬܐ + + + ܣܝܢܬܪ، ܕܐܟܘܬܐ ܓܪܒܝܝܬܐ + + + ܫܟܓܘ + + + ܡܢܘܡܝܢܝ + + + ܒܝܢܣܝܢܝܣ، ܐܢܕܝܐܢܐ + + + ܦܝܬܝܪܣܒܝܪܓ، ܐܢܕܝܐܢܐ + + + ܡܢܕܝܬܐ ܕܬܝܠ، ܐܢܕܝܐܢܐ + + + ܢܘܟܣ، ܐܢܕܝܐܢܐ + + + ܘܝܢܐܡܐܟ، ܐܢܕܝܐܢܐ + + + ܡܪܝܢܓܘ، ܐܢܕܝܐܢܐ + + + ܐܢܕܝܐܢܐܦܘܠܝܣ + + + ܠܘܝܣܒܝܠ + + + ܒܝܒܐܝ، ܐܢܕܝܐܢܐ + + + ܡܘܢܬܐܟܝܠܘ، ܟܝܢܬܐܟܝ + + + ܕܝܬܪܘܝܬ + + + ܢܝܘ ܝܘܪܟ + + + ܡܘܢܬܝܒܝܕܝܘ + + + ܣܡܪܟܢܕ + + + ܬܫܟܝܢܬ + + + ܘܐܬܝܩܐܢ + + + ܡܪ ܒܢܣܢܬ + + + ܟܐܪܐܟܣ + + + ܬܘܪܬܘܠܐ + + + ܡܪ ܬܐܘܡܐ + + + ܡܕܝܢܬܐ ܕܗܘ ܟܝ ܡܝܢ + + + ܝܦܐܬ + + + ܘܝܠܝܣ + + + ܐܦܝܐ + + + ܥܕܢ + + + ܡܐܝܘܬ + + + ܝܘܗܢܝܣܒܘܪܓ + + + ܠܘܣܐܟܐ + + + ܗܪܐܪܝ + + + + ܥܕܢܘܬܐ ܕܐܝܟܝܪ + ܥܕܢܘܬܐ ܫܪܫܝܬܐ ܥܕܢܘܬܐ ܫܪܫܝܬܐ ܕܐܝܟܝܪ + ܥܕܢܘܬܐ ܩܝܬܝܬܐ ܕܐܝܟܝܪ + + + + + ܥܕܢܐ ܕܐܦܓܐܢܣܬܐܢ + + + + + ܥܕܢܐ ܕܡܨܥܝܬܐ ܐܦܪܝܩܐ + + + + + ܥܕܢܐ ܕܡܕܢܚ ܐܦܪܝܩܐ + + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܬܝܡܢ ܐܦܪܝܩܐ + + + + + ܥܕܢܐ ܕܡܥܪܒ ܐܦܪܝܩܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܥܪܒ ܐܦܪܝܩܐ + ܥܕܢܐ ܩܝܬܝܬܐ ܕܡܥܪܒ ܐܦܪܝܩܐ + + + + + ܥܕܢܐ ܕܐܠܐܣܟܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܠܐܣܟܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܠܐܣܟܐ + + + + + ܥܕܢܐ ܕܐܠܡܐܬܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܠܡܐܬܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܠܡܐܬܝ + + + + + ܥܕܢܐ ܕܐܡܙܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܡܙܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܡܙܢ + + + + + ܥܕܢܐ ܡܨܥܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܡܨܥܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܡܨܥܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + + + + + ܥܕܢܐ ܡܕܢܚܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܡܕܢܚܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܡܕܢܚܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + + + + + ܥܕܢܐ ܛܘܪܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܛܘܪܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܛܘܪܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + + + + + ܥܕܢܐ ܫܝܢܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܫܝܢܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܫܝܢܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + + + + + ܥܕܢܐ ܕܐܢܐܕܝܪ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܢܐܕܝܪ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܢܐܕܝܪ + + + + + ܥܕܢܐ ܕܐܦܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܦܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܦܝܐ + + + + + ܥܕܢܐ ܐܟܬܐܘ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܟܬܐܘ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܟܬܐܘ + + + + + ܥܕܢܐ ܕܐܟܬܘܒ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܟܬܘܒ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܟܬܘܒ + + + + + ܥܕܢܐ ܕܐܪܒܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܪܒܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܪܒܝܐ + + + + + ܥܕܢܐ ܕܐܪܓܢܬܝܢܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܪܓܢܬܝܢܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܪܓܢܬܝܢܐ + + + + + ܥܕܢܐ ܕܐܪܓܢܬܝܢܐ ܡܥܪܒܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܪܓܢܬܝܢܐ ܡܥܪܒܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܪܓܢܬܝܢܐ ܡܥܪܒܝܬܐ + + + + + ܥܕܢܐ ܕܐܪܡܢܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܪܡܢܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܪܡܢܝܐ + + + + + ܥܕܢܐ ܐܛܠܢܛܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܐܛܠܢܛܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܐܛܠܢܛܝܬܐ ܕܐܡܪܝܟܐ ܓܪܒܝܝܬܐ + + + + + ܥܕܢܐ ܕܐܘܣܬܪܠܝܐ ܡܨܥܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܣܬܪܠܝܐ ܡܨܥܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܣܬܪܠܝܐ ܡܨܥܝܬܐ + + + + + ܥܕܢܐ ܡܥܪܒܝܬܐ ܡܨܥܝܬܐ ܕܐܘܣܬܪܠܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܡܥܪܒܝܬܐ ܡܨܥܝܬܐ ܕܐܘܣܬܪܠܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܡܥܪܒܝܬܐ ܡܨܥܝܬܐ ܕܐܘܣܬܪܠܝܐ + + + + + ܥܕܢܐ ܕܐܘܣܬܪܠܝܐ ܡܕܢܚܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܣܬܪܠܝܐ ܡܕܢܚܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܣܬܪܠܝܐ ܡܕܢܚܝܬܐ + + + + + ܥܕܢܐ ܕܐܘܣܬܪܠܝܐ ܡܥܪܒܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܣܬܪܠܝܐ ܡܥܪܒܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܣܬܪܠܝܐ ܡܥܪܒܝܬܐ + + + + + ܥܕܢܐ ܕܐܙܪܒܝܓܐܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܙܪܒܝܓܐܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܙܪܒܝܓܐܢ + + + + + ܥܕܢܐ ܕܐܙܘܪ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܙܘܪ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܙܘܪ + + + + + ܥܕܢܐ ܕܒܢܓܠܐܕܝܫ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܒܢܓܠܐܕܝܫ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܒܢܓܠܐܕܝܫ + + + + + ܥܕܢܐ ܕܒܘܬܐܢ + + + + + ܥܕܢܐ ܕܒܘܠܝܒܝܐ + + + + + ܥܕܢܐ ܕܒܪܐܣܝܠܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܒܪܐܣܝܠܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܒܪܐܣܝܠܝܐ + + + + + ܥܕܢܐ ܕܒܪܘܢܐܝ ܕܐܪܘܣܐܠܡ + + + + + ܥܕܢܐ ܕܟܐܦ ܒܝܪܕܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܟܐܦ ܒܝܪܕܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܟܐܦ ܒܝܪܕܝ + + + + + ܥܕܢܐ ܕܟܐܝܣܝ + + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܬܫܐܡܘܪܘ + + + + + ܥܕܢܐ ܕܬܫܐܬܡ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܬܫܐܬܡ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܬܫܐܬܡ + + + + + ܥܕܢܐ ܕܬܫܝܠܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܬܫܝܠܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܬܫܝܠܝ + + + + + ܥܕܢܐ ܕܨܝܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܨܝܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܨܝܢ + + + + + ܥܕܢܐ ܕܟܘܝܒܠܣܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܟܘܝܒܠܣܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܟܘܝܒܠܣܢ + + + + + ܥܕܢܐ ܕܓܙܪܬܐ ܕܟܪܝܣܬܡܣ + + + + + ܥܕܢܐ ܕܓܙܝܖ̈ܐ ܕܟܘܟܘܣ + + + + + ܥܕܢܐ ܕܟܘܠܘܡܒܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܟܘܠܘܡܒܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܟܘܠܘܡܒܝܐ + + + + + ܥܕܢܐ ܓܙܝܪ̈ܐ ܕܟܘܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܓܙܝܪ̈ܐ ܕܟܘܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܓܙܝܪ̈ܐ ܕܟܘܟ + + + + + ܥܕܢܐ ܕܟܘܒܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܟܘܒܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܟܘܒܐ + + + + + ܥܕܢܐ ܕܕܒܝܣ + + + + + ܥܕܢܐ ܕܕܘܡܘܢܬ ܕܐܘܪܒܝܠ + + + + + ܥܕܢܐ ܕܡܕܢܚ ܬܝܡܘܪ + + + + + ܥܕܢܐ ܕܓܙܪܬܐ ܦܨܚܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܓܙܪܬܐ ܦܨܚܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܓܙܪܬܐ ܦܨܚܐ + + + + + ܥܕܢܐ ܕܐܩܘܐܕܘܪ + + + + + ܥܕܢܐ ܕܐܘܪܘܦܐ ܡܨܥܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܪܘܦܐ ܡܨܥܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܪܘܦܐ ܡܨܥܝܬܐ + + + + + ܥܕܢܐ ܕܐܘܪܘܦܐ ܡܕܢܚܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܪܘܦܐ ܡܕܢܚܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܪܘܦܐ ܡܕܢܚܝܬܐ + + + + + ܥܕܢܐ ܕܐܘܪܘܦܐ (ܗܡ ܡܕܢܚܐ) + + + + + ܥܕܢܐ ܕܐܘܪܘܦܐ ܡܥܪܒܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܪܘܦܐ ܡܥܪܒܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܪܘܦܐ ܡܥܪܒܝܬܐ + + + + + ܥܕܢܐ ܕܓܙܝܪ̈ܐ ܕܦܠܟܠܢܕ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܓܙܝܪ̈ܐ ܕܦܠܟܠܢܕ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܓܙܝܪ̈ܐ ܕܦܠܟܠܢܕ + + + + + ܥܕܢܐ ܕܦܝܓܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܦܝܓܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܦܝܓܝ + + + + + ܥܕܢܐ ܕܓܘܝܐܢܐ ܦܪܢܣܝܬܐ + + + + + ܥܕܢܐ ܕܦܪܢܣܐ ܬܝܡܢܝܬܐ ܘܐܢܬܪܬܝܟܐ + + + + + ܥܕܢܐ ܕܓܐܠܦܐܓܘܣ + + + + + ܥܕܢܐ ܕܓܡܒܝܪ + + + + + ܥܕܢܐ ܕܓܘܪܓܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܓܘܪܓܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܓܘܪܓܝܐ + + + + + ܥܕܢܐ ܕܓܙܝܪ̈ܐ ܕܓܝܠܒܪܬ + + + + + ܥܕܢܐ ܕܓܪܝܢܟ + + + + + ܥܕܢܐ ܕܡܕܢܚ ܓܪܝܢܠܢܕ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܕܢܚ ܓܪܝܢܠܢܕ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܡܕܢܚ ܓܪܝܢܠܢܕ + + + + + ܥܕܢܐ ܕܓܪܝܢܠܢܕ ܕܡܥܪܒܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܓܪܝܢܠܢܕ ܕܡܥܪܒܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܓܪܝܢܠܢܕ ܕܡܥܪܒܝܬܐ + + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܓܘܐܡ + + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܥܠܢܐ + + + + + ܥܕܢܐ ܕܓܘܝܐܢܐ + + + + + ܥܕܢܐ ܕܗܐܘܐܝܝ ܐܠܘܫܝܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܗܐܘܐܝܝ ܐܠܘܫܝܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܗܐܘܐܝܝ ܐܠܘܫܝܢ + + + + + ܥܕܢܐ ܕܗܘܢܓ ܟܘܢܓ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܗܘܢܓ ܟܘܢܓ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܗܘܢܓ ܟܘܢܓ + + + + + ܕܗܘܒܕ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܗܘܒܕ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܗܘܒܕ + + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܗܢܕܘ + + + + + ܥܕܢܐ ܕܐܘܩܝܢܘܣ ܗܢܕܘܝܐ + + + + + ܥܕܢܐ ܕܗܢܕܘܨܝܢ + + + + + ܥܕܢܐ ܕܗܢܕܘܨܝܢ ܡܨܥܝܬܐ + + + + + ܥܕܢܐ ܕܗܢܕܘܨܝܢ ܡܕܢܚܝܬܐ + + + + + ܥܕܢܐ ܕܗܢܕܘܨܝܢ ܡܥܪܒܝܬܐ + + + + + ܥܕܢܐ ܕܐܝܪܐܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܝܪܐܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܝܪܐܢ + + + + + ܥܕܢܐ ܕܐܝܪܟܘܬܣܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܝܪܟܘܬܣܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܝܪܟܘܬܣܟ + + + + + ܥܕܢܐ ܕܐܝܣܪܐܝܠ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܝܣܪܐܝܠ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܝܣܪܐܝܠ + + + + + ܥܕܢܐ ܕܝܦܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܝܦܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܝܦܢ + + + + + ܥܕܢܐ ܕܦܝܬܪܘܦܒܠܒܣܟܝ-ܟܐܡܟܬܣܟܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܦܝܬܪܘܦܒܠܒܣܟܝ-ܟܐܡܟܬܣܟܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܦܝܬܪܘܦܒܠܒܣܟܝ-ܟܐܡܟܬܣܟܝ + + + + + ܥܕܢܐ ܕܡܕܢܚ ܟܙܩܣܬܐܢ + + + + + ܥܕܢܐ ܕܡܥܪܒ ܟܙܩܣܬܐܢ + + + + + ܥܕܢܐ ܕܟܘܪܝܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܟܘܪܝܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܟܘܪܝܝܐ + + + + + ܥܕܢܐ ܟܘܣܪܐܝ + + + + + ܥܕܢܐ ܕܟܪܐܣܢܘܝܪܣܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܟܪܐܣܢܘܝܪܣܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܟܪܐܣܢܘܝܪܣܟ + + + + + ܥܕܢܐ ܕܩܝܪܓܝܙܣܬܐܢ + + + + + ܥܕܢܐ ܕܠܐܢܟܐ + + + + + ܥܕܢܐ ܕܓܙܝܪ̈ܐ ܕܠܐܝܢ + + + + + ܥܕܢܐ ܕܠܘܪܕ ܗܐܘ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܠܘܪܕ ܗܐܘ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܠܘܪܕ ܗܐܘ + + + + + ܥܕܢܐ ܕܡܐܟܐܘ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܐܟܐܘ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܡܐܟܐܘ + + + + + ܥܕܢܐ ܕܓܙܪܬܐ ܡܐܟܐܘܪܝ + + + + + ܥܕܢܐ ܕܡܐܓܐܕܐܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܐܓܕܐܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܡܐܓܕܐܢ + + + + + ܥܕܢܐ ܕܡܠܝܙܝܐ + + + + + ܥܕܢܐ ܕܓܙܪܬܐ ܡܐܠܕܝܒܝܬܐ + + + + + ܥܕܢܐ ܕܡܐܪܟܐܘܣܐܣ + + + + + ܥܕܢܐ ܕܓܙܝܪ̈ܐ ܕܡܐܪܫܐܠ + + + + + ܥܕܢܐ ܕܡܘܪܝܛܝܘܣ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܘܪܝܛܝܘܣ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܡܘܪܝܛܝܘܣ + + + + + ܥܕܢܐ ܕܡܐܘܣܘܢ + + + + + ܥܕܢܐ ܕܓܪܒܝ ܡܥܪܒܝܬܐ ܡܟܣܝܩܘ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܓܪܒܝ ܡܥܪܒܝܬܐ ܡܟܣܝܩܘ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܓܪܒܝ ܡܥܪܒܝܬܐ ܡܟܣܝܩܘ + + + + + ܥܕܢܐ ܕܡܟܣܝܩܘ ܫܝܢܝܬܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܟܣܝܩܘ ܫܝܢܝܬܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܡܟܣܝܩܘ ܫܝܢܝܬܐ + + + + + ܥܕܢܐ ܕܐܘܠܐܢܒܐܬܘܪ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܠܐܢܒܐܬܘܪ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܠܐܢܒܐܬܘܪ + + + + + ܥܕܢܐ ܕܡܘܣܟܘ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܡܘܣܟܘ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܡܘܣܟܘ + + + + + ܥܕܢܐ ܕܡܝܐܢܡܐܪ + + + + + ܥܕܢܐ ܕܢܐܘܪܘ + + + + + ܥܕܢܐ ܕܢܝܦܐܠ + + + + + ܥܕܢܐ ܕܢܝܘ ܟܠܝܕܘܢܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܢܝܘ ܟܠܝܕܘܢܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܢܝܘ ܟܠܝܕܘܢܝܐ + + + + + ܥܕܢܐ ܕܢܝܘ ܙܝܠܢܕ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܢܝܘ ܙܝܠܢܕ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܢܝܘ ܙܝܠܢܕ + + + + + ܥܕܢܐ ܕܢܝܘܦܐܘܢܠܢܕ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܢܝܘܦܐܘܢܠܢܕ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܢܝܘܦܐܘܢܠܢܕ + + + + + ܥܕܢܐ ܕܢܝܘܝ + + + + + ܥܕܢܐ ܕܓܙܪܬܐ ܕܢܘܪܦܠܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܓܙܪܬܐ ܕܢܘܪܦܠܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܓܙܪܬܐ ܕܢܘܪܦܠܟ + + + + + ܥܕܢܐ ܕܦܪܢܢܕܘ ܕܢܘܪܘܢܗܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܦܪܢܢܕܘ ܕܢܘܪܘܢܗܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܦܪܢܢܕܘ ܕܢܘܪܘܢܗܐ + + + + + ܥܕܢܐ ܕܓܙܝܖ̈ܐ ܕܡܪܝܐܢܐ ܓܪܒܝܝܬܐ + + + + + ܥܕܢܐ ܕܢܘܒܘܣܝܒܪܣܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܢܘܒܘܣܝܒܪܣܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܢܘܒܘܣܝܒܪܣܟ + + + + + ܥܕܢܐ ܕܘܡܣܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܘܡܣܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܘܡܣܟ + + + + + ܥܕܢܐ ܕܦܐܟܣܬܐܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܦܐܟܣܬܐܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܦܐܟܣܬܐܢ + + + + + ܥܕܢܐ ܕܦܠܐܘ + + + + + ܥܕܢܐ ܕܦܐܦܘܐ ܓܝܢܝܐ ܚܕܬܐ + + + + + ܥܕܢܐ ܕܦܪܓܘܐܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܦܪܓܘܐܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܦܪܓܘܐܝ + + + + + ܥܕܢܐ ܕܦܝܪܘ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܦܝܪܘ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܦܝܪܘ + + + + + ܥܕܢܐ ܕܦܝܠܝܦܝܢܝܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܦܝܠܝܦܝܢܝܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܦܝܠܝܦܝܢܝܐ + + + + + ܥܕܢܐ ܕܓܙܝܪ̈ܐ ܕܦܝܢܝܟܣ + + + + + ܥܕܢܐ ܕܣܐܢܬ ܦܝܥܪ ܘܡܩܘܠܘܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܣܐܢܬ ܦܝܥܪ ܘܡܩܘܠܘܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܣܐܢܬ ܦܝܥܪ ܘܡܩܘܠܘܢ + + + + + ܥܕܢܐ ܕܦܝܬܟܐܝܪܢ + + + + + ܥܕܢܐ ܕܦܘܢܐܦܝ + + + + + ܥܕܢܐ ܕܦܝܘܢܓܝܢܓ + + + + + ܥܕܢܐ ܕܟܝܙܝܠܘܪܕܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܟܝܙܝܠܘܪܕܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܟܝܙܝܠܘܪܕܐ + + + + + ܥܕܢܐ ܕܪܝܘܢܝܘܢ + + + + + ܥܕܢܐ ܕܪܘܬܝܪܐ + + + + + ܥܕܢܐ ܕܣܐܚܐܠܝܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܣܐܚܐܠܝܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܣܐܚܐܠܝܢ + + + + + ܥܕܢܐ ܕܣܡܐܪܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܣܡܐܪܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܣܡܐܪܐ + + + + + ܥܕܢܐ ܕܣܡܘܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܣܡܘܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܣܡܘܐ + + + + + ܥܕܢܐ ܕܣܐܝܫܝܠ + + + + + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܣܝܢܓܐܦܘܪ + + + + + ܥܕܢܐ ܕܓܙܝܪ̈ܐ ܕܫܠܝܡܘܢ + + + + + ܥܕܢܐ ܕܓܝܘܪܓܝܐ ܬܝܡܢܝܬܐ + + + + + ܥܕܢܐ ܕܣܘܪܝܢܐܡ + + + + + ܥܕܢܐ ܕܣܝܘܐ + + + + + ܥܕܢܐ ܕܬܗܝܬܝ + + + + + ܥܕܢܐ ܕܬܐܝܦܐܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܬܐܝܦܐܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܬܐܝܦܐܝ + + + + + ܥܕܢܐ ܕܬܐܓܝܟܣܬܐܢ + + + + + ܥܕܢܐ ܕܬܘܟܝܠܐܘ + + + + + ܥܕܢܐ ܬܘܢܓܐ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܬܘܢܓܐ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܬܘܢܓܐ + + + + + ܥܕܢܐ ܕܬܫܘܟ + + + + + ܥܕܢܐ ܕܬܘܪܟܡܢܣܬܐܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܬܘܪܟܡܢܣܬܐܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܬܘܪܟܡܢܣܬܐܢ + + + + + ܥܕܢܐ ܕܬܘܒܐܠܘ + + + + + ܥܕܢܐ ܕܐܘܪܘܓܘܐܝ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܪܘܓܘܐܝ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܪܘܓܘܐܝ + + + + + ܥܕܢܐ ܕܐܘܙܒܟܣܬܐܢ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܐܘܙܒܟܣܬܐܢ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܐܘܙܒܟܣܬܐܢ + + + + + ܥܕܢܐ ܕܒܐܢܘܐܛܘ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܒܐܢܘܐܛܘ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܒܐܢܘܐܛܘ + + + + + ܥܕܢܐ ܕܒܢܙܘܝܠܐ + + + + + ܥܕܢܐ ܕܒܠܐܕܝܒܘܣܬܘܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܒܠܐܕܝܒܘܣܬܘܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܒܠܐܕܝܒܘܣܬܘܟ + + + + + ܥܕܢܐ ܕܒܘܠܓܘܓܪܐܕ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܒܘܠܓܘܓܪܐܕ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܒܘܠܓܘܓܪܐܕ + + + + + ܥܕܢܐ ܕܒܘܣܬܘܟ + + + + + ܥܕܢܐ ܕܓܙܝܪ̈ܐ ܕܘܐܝܟ + + + + + ܥܕܢܐ ܕܘܝܠܝܣ ܘܦܘܬܘܢܐ + + + + + ܥܕܢܐ ܕܝܐܟܘܬܣܟ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܝܐܟܘܬܣܟ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܝܐܟܘܬܣܟ + + + + + ܥܕܢܐ ܕܝܟܐܬܝܪܢܒܝܪܓ + ܥܕܢܐ ܡܫܘܚܬܢܝܬܐ ܕܝܟܐܬܝܪܢܒܝܪܓ + ܥܕܢܐ ܕܒܗܪ ܝܘܡܐ ܕܝܟܐܬܝܪܢܒܝܪܓ + + + + + ܥܕܢܐ ܕܝܘܩܘܢ + + + + + + + : + + + ܠܝܬ ܡܢܝܢܐ + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + ܠܝܬ ܡܢܝܢܐ + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + : + + + + + #,##0.###;#,##0.###- + + + + + + + ¤ #,##0.00;¤ #,##0.00- + + + + + + + ¤ #,##0.00 + + + {0} {1} + {0} {1} + + + + ل.س.‏ + + + + ܝܠܐ ܝܠܗ ܥܬܝܕܐ + ܝܠ̈ܐ ܝܢܐ ܥܬܝܕ̈ܐ <RLM>{0} + ܫܩܘܠ ܦܬܠܐ ܕ{0} ܝܡܝܢܐ + + + + + + ܕܝܣܝ{0} + + + ܣܢܬܝ{0} + + + ܡܝܠܝ{0} + + + ܡܝܟܪܘ{0} + + + ܢܐܢܘ{0} + + + ܦܝܟܘ{0} + + + ܦ̮ܝܡܬܘ{0} + + + ܐܬܘ{0} + + + ܙܝܦܬܘ{0} + + + ܝܟܬܘ{0} + + + ܕܝܟܐ{0} + + + ܗܟܬܘ{0} + + + ܟܝܠܘ{0} + + + ܡܝܓܐ{0} + + + ܓܝܓܐ{0} + + + ܬܝܪܐ{0} + + + ܦܝܬܐ{0} + + + ܐܟܣܐ{0} + + + ܙܝܬܐ{0} + + + ܝܘܬܐ{0} + + + ܟܝܒܝ{0} + + + ܡܝܒܝ{0} + + + ܓܝܒܝ{0} + + + ܬܝܒܝ{0} + + + ܦܝܒܝ{0} + + + ܐܟܣܒܝ{0} + + + ܙܝܒܝ{0} + + + ܝܘܒܝ{0} + + + {0}/{1} + + + {0} ܡܪܒܥܐ + {0} ܡܪܒܥܐ + + + {0} ܡܩܦܣܐ + {0} ܡܩܦܣܐ + + + {0}⋅{1} + + + ܚܝܠܐ ܢܬܘܦܘܬܐ + {0} ܚܝܠܐ ܢܬܘܦܘܬܐ + {0} ܚܝܠܐ ܢܬܘܦܘܬܐ + + + ܡܝܬܪ̈ܐ ܒܪܦܦܐ ܡܪܒܥܐ + {0} ܡܝܬܪܐ ܒܪܦܦܐ ܡܪܒܥܐ + {0} ܡܝܬܪ̈ܐ ܒܪܦܦܐ ܡܪܒܥܐ + + + ܚܘܕܖ̈ܐ + {0} ܚܘܕܪܐ + {0} ܚܘܕܖ̈ܐ + + + ܪܐܕܝܐܢ + {0} ܪܐܕܝܐܢ + {0} ܪܐܕܝܐܢ + + + ܕܪ̈ܓ݂ܐ + ܕܪܓ݂ܐ + {0} ܕܪ̈ܓ݂ܐ + + + ܩܛܝ̈ܢܬܐ ܩܫܬܢܝܬܐ + ܩܛܝܢܐ ܩܫܬܢܝܐ + {0} ܩܛܝ̈ܢܬܐ ܩܫܬܢܝܬܐ + + + ܪ̈ܦܦܐ ܩܫܬܢܝܐ + {0} ܪ̈ܦܦܐ ܩܫܬܢܝܐ + {0} ܪ̈ܦܦܐ ܩܫܬܢ̈ܝܐ + + + ܟܝܠܘܡܝܬܪ̈ܐ ܡܪܒܥܐ + {0} ܟܝܠܘܡܝܬܪܐ ܡܪܒܥܐ + {0} ܟܝܠܘܡܝܬܪ̈ܐ ܡܪܒܥܐ + {0} ܒܟܝܠܘܡܝܬܪܐ ܡܪܒܥܐ + + + ܗܟܬܪ + {0} ܗܟܬܪ + {0} ܗܟܬܪ + + + ܡܝܬܪ̈ܐ ܡܪܒܥܐ + {0} ܡܝܬܪܐ ܡܪܒܥܐ + {0} ܡܝܬܪ̈ܐ ܡܪܒܥܐ + {0} ܒܡܝܬܪܐ ܡܪܒܥܐ + + + ܣܢܬܝܡܝܬܪ̈ܐ ܡܪܒܥܐ + {0} ܣܢܬܝܡܝܬܪܐ ܡܪܒܥܐ + {0} ܣܢܬܝܡܝܬܪ̈ܐ ܡܪܒܥܐ + {0} ܒܣܢܬܝܡܝܬܪܐ ܡܪܒܥܐ + + + ܡܝܠ̈ܐ ܡܪܒܥܐ + {0} ܡܝܠ̈ܐ ܡܪܒܥܐ + {0} ܡܝܠ̈ܐ ܡܪܒܥܐ + {0} ܒܡܝܠ̈ܐ ܡܪܒܥܐ + + + ܦ̈ܕܢܐ + {0} ܦܕܢܐ + {0} ܦ̈ܕܢܐ + + + ܝܪ̈ܕܐ ܡܪܒܥܐ + {0} ܝܪܕܐ ܡܪܒܥܐ + {0} ܝܪ̈ܕܐ ܡܪܒܥܐ + + + ܐܩܠ̈ܐ ܡܪܒܥܐ + {0} ܐܩܠܐ ܡܪܒܥܐ + {0} ܐܩܠ̈ܐ ܡܪܒܥܐ + + + ܐܢܟ̈ܐ ܡܪܒܥܐ + {0} ܐܢܟ ܡܪܒܥܐ + {0} ܐܢܟ̈ܐ ܡܪܒܥܐ + {0} ܒܐܢܟ ܡܪܒܥܐ + + + ܩܪ̈ܛܐ + ܩܪܛܐ + {0} ܩܪ̈ܛܐ + + + ܡܝܠܝܓܪ̈ܡܐ ܒܕܝܣܝܠܝܬܪܐ + {0} ܡܝܠܝܓܪܡܐ ܒܕܝܣܝܠܝܬܪ + {0} ܡܝܠܝܓܪ̈ܡܐ ܒܕܝܣܝܠܝܬܪܐ + + + ܡܝܠܝܡܘܠ ܒܠܝܬܪܐ + {0} ܡܝܠܝܡܘܠ ܒܠܝܬܪܐ + {0} ܡܝܠܝܡܘܠ ܒܠܝܬܪܐ + + + ܡܠܘܐ̈ܐ + ܚܕ ܡܠܘܐܐ + {0} ܡܠܘܐ̈ܐ + + + ܡܢܘ̈ܬܐ ܒܡܠܝܘܢ + {0} ܡܢܬܐ ܒܡܠܝܘܢ + {0} ܡܢܘ̈ܬܐ ܒܡܠܝܘܢ + + + ܒܡܐܐ + {0} ܒܡܐܐ + {0} ܒܡܐܐ + + + ܒܐܠܦܐ + {0} ܒܐܠܦܐ + {0} ܒܐܠܦܐ + + + + {0}‱ + {0}‱ + + + ܡܘܠ + {0} ܡܘܠ + {0} ܡܘܠ + + + ܠܝܬܪ̈ܐ ܒܟܝܠܘܡܝܬܪܐ + {0} ܠܝܬܪܐ ܒܟܝܠܘܡܝܬܪܐ + {0} ܠܝܬܪ̈ܐ ܒܟܝܠܘܡܝܬܪܐ + + + ܠܝܬܪ̈ܐ ܒ 100 ܟܝܠܘܡܝܬܪ̈ܐ + {0} ܠܝܬܪܐ ܒ 100 ܟܝܠܘܡܝܬܪ̈ܐ + {0} ܠܝܬܪ̈ܐ ܒ 100 ܟܝܠܘܡܝܬܪ̈ܐ + + + ܡܝܠ̈ܐ ܒܓܠܘܢܐ + {0} ܡܝܠܐ ܒܓܠܘܢܐ + {0} ܡܝܠ̈ܐ ܒܓܠܘܢܐ + + + ܡܝܠ̈ܐ ܒܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܡܝܠܐ ܒܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܡܝܠ̈ܐ ܒܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + + + ܦܝܬܐܒܐܝܬ + {0} ܦܝܬܐܒܐܝܬ + {0} ܦܝܬܐܒܐܝܬ + + + ܬܝܪܐܒܐܝܬ + {0} ܬܝܪܐܒܐܝܬ + {0} ܬܝܪܐܒܐܝܬ + + + ܬܝܪܐܒܬ + {0} ܬܝܪܐܒܬ + {0} ܬܝܪܐܒܬ + + + ܓܝܓܐܒܐܝܬ + {0} ܓܝܓܐܒܐܝܬ + {0} ܓܝܓܐܒܐܝܬ + + + ܓܝܓܐܒܬ + {0} ܓܝܓܐܒܬ + {0} ܓܝܓܐܒܬ + + + ܡܝܓܐܒܐܝܬ + {0} ܡܝܓܐܒܐܝܬ + {0} ܡܝܓܐܒܐܝܬ + + + ܡܝܓܐܒܬ + {0} ܡܝܓܐܒܬ + {0} ܡܝܓܐܒܬ + + + ܟܝܠܘܒܐܝܬ + {0} ܟܝܠܘܒܐܝܬ + {0} ܟܝܠܘܒܐܝܬ + + + ܟܝܠܘܒܬ + {0} ܟܝܠܘܒܬ + {0} ܟܝܠܘܒܬ + + + ܒܐܝܬ + {0} ܒܐܝܬ + {0} ܒܐܝܬ + + + ܒܬ + {0} ܒܬ + {0} ܒܬ + + + ܕܪ̈ܐ + ܕܪܐ + {0} ܕܪ̈ܐ + + + ܥܣܝܪ̈ܘܬܐ + ܥܣܝܪܘܬܐ + {0} ܥܣܝܪ̈ܘܬܐ + + + ܫ̈ܢܐ + ܫܢܬܐ + {0} ܫ̈ܢܐ + {0} ܒܫܢܬܐ + + + ܪ̈ܘܒܥܐ + ܪܘܒܥܐ + {0} ܪ̈ܘܒܥܐ + {0}/ܪܘܒܥܐ + + + ܝܪ̈ܚܐ + ܝܪܚܐ + {0} ܝܪ̈ܚܐ + {0} ܒܝܪܚܐ + + + ܫܒ̈ܘܥܐ + ܫܒ݂ܘܥܐ + {0} ܫܒ݂̈ܘܥܐ + {0} ܒܫܒ݂ܘܥܐ + + + ܝܘ̈ܡܬܐ + ܝܘܡܐ + {0} ܝܘ̈ܡܬܐ + {0} ܒܝܘܡܐ + + + ܫ̈ܥܐ + ܫܥܬܐ + {0} ܫ̈ܥܐ + {0} ܒܫܥܬܐ + + + ܩܛܝܢ̈ܬܐ + ܩܛܝܢܐ + {0} ܩܛܝܢ̈ܬܐ + {0} ܒܩܛܝܢܐ + + + ܪ̈ܦܦܐ + ܪܦܦܐ + {0} ܪ̈ܦܦܐ + {0} ܒܪܦܦܐ + + + ܡܝܠܝܪ̈ܦܦܐ + {0} ܡܝܠܝܪܦܦܐ + {0} ܡܝܠܝܪ̈ܦܦܐ + + + ܡܝܟܪܘܪ̈ܦܦܐ + {0} ܡܝܟܪܘܪܦܦܐ + {0} ܡܝܟܪܘܪ̈ܦܦܐ + + + ܢܐܢܘܪ̈ܦܦܐ + {0} ܢܐܢܘܪܦܦܐ + {0} ܢܐܢܘܪ̈ܦܦܐ + + + ܐܡܦܝܪ + {0} ܐܡܦܝܪ + {0} ܐܡܦܝܪ + + + ܡܝܠܝܐܡܦܝܪ + {0} ܡ ܐܡܦܝܪ + {0} ܡ ܐܡܦܝܪ + + + ܘܗܡ + {0} ܘܗܡ + {0} ܘܗܡ + + + ܒܘܠܬ + {0} ܒܘܠܬ + {0} ܒܘܠܬ + + + ܟ ܟܐܠܘܪܝ + {0} ܟ ܟܐܠܘܪܝ + {0} ܟܝܠܘܟܐܠܘܪܝ + + + ܟܐܠ + {0} ܟܐܠ + {0} ܟܐܠ + + + ܟܝܠܘܓܝܐܘܠ + {0} ܟܝܠܘܓܝܐܘܠ + {0} ܟܝܠܘܓܝܐܘܠ + + + ܓܝܐܘܠ + {0} ܓܝܐܘܠ + {0} ܓܝܐܘܠ + + + ܟܝܠܘܘܐܬ-ܫܥ̈ܐ + {0} ܟܝܠܘܘܐܬ-ܫܥܬܐ + {0} ܟܝܠܘܘܐܬ-ܫܥ̈ܐ + + + ܐܠܝܟܬܪܘܢ ܒܘܠܬ + {0} ܐܠܝܟܬܪܘܢ ܒܘܠܬ + {0} ܐܠܝܟܬܪܘܢ ܒܘܠܬ + + + ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + + + ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + + + ܢܝܘܬܢ + {0} ܢܝܘܬܢ + {0} ܢܝܘܬܢ + + + ܟܝܠܘܘܐܬ-ܫܥ̈ܐ ܒ 100 ܟܝܠܘܡܝܬܪ̈ܐ + {0} ܟܝܠܘܘܐܬ-ܫܥܬܐ ܒ 100 ܟܝܠܘܡܝܬܪ̈ܐ + {0} ܟܝܠܘܘܐܬ-ܫܥ̈ܐ ܒ 100 ܟܝܠܘܡܝܬܪ̈ܐ + + + ܓܝܓܐܗܪܬܙ + {0} ܓܝܓܐܗܪܬܙ + {0} ܓܝܓܐܗܪܬܙ + + + ܡܝܓܐܗܪܬܙ + {0} ܡܝܓܐܗܪܬܙ + {0} ܡܝܓܐܗܪܬܙ + + + ܟܝܠܘܗܪܬܙ + {0} ܟܝܠܘܗܪܬܙ + {0} ܟܝܠܘܗܪܬܙ + + + ܗܪܬܙ + {0} ܗܪܬܙ + {0} ܗܪܬܙ + + + ܦܝ̈ܟܣܠܐ + {0} ܦܝܟܣܠܐ + {0} ܦܝ̈ܟܣܠܐ + + + ܡܝܓܐܦܝ̈ܟܣܠܐ + {0} ܡܝܓܐܦܝܟܣܠܐ + {0} ܡܝܓܐܦܝ̈ܟܣܠܐ + + + ܦܝ̈ܟܣܠܐ ܒܣܢܬܝܡܝܬܪܐ + {0} ܦܝܟܣܠܐ ܒܣܢܬܝܡܝܬܪܐ + {0} ܦܝ̈ܟܣܠܐ ܒܣܢܬܝܡܝܬܪܐ + + + ܦܝ̈ܟܣܠܐ ܒܐܢܟ + {0} ܦܝܟܣܠܐ ܒܐܢܟ + {0} ܦܝ̈ܟܣܠܐ ܒܐܢܟ + + + ܥܘܒܐ ܦܫܝܬܐ ܐܪܥܝܐ + {0} ܥܘܒܐ ܦܫܝܬܐ ܐܪܥܝܐ + {0} ܥܘܒܐ ܦܫܝܬܐ ܐܪܥܝܐ + + + ܟܝܠܘܡܝܬܪ̈ܐ + {0} ܟܝܠܘܡܝܬܪܐ + {0} ܟܝܠܘܡܝܬܪ̈ܐ + {0} ܒܟܝܠܘܡܝܬܪܐ + + + ܡܝܬܪ̈ܐ + ܡܝܬܪܐ + {0} ܡܝܬܪ̈ܐ + {0} ܒܡܝܬܪܐ + + + ܕܝܣܝܡܝܬܪ̈ܐ + {0} ܕܝܣܝܡܝܬܪܐ + {0} ܕܝܣܝܡܝܬܪ̈ܐ + + + ܣܢܬܝܡܝܬܪ̈ܐ + {0} ܣܢܬܝܡܝܬܪܐ + {0} ܣܢܬܝܡܝܬܪ̈ܐ + {0} ܒܣܢܬܝܡܝܬܪܐ + + + ܡܝܠܝܡܝܬܪ̈ܐ + {0} ܡܝܠܝܡܝܬܪܐ + {0} ܡܝܠܝܡܝܬܪ̈ܐ + + + ܡܝܟܪܘܡܝܬܪ̈ܐ + {0} ܡܟܡ + {0}ܡܟܡ + + + ܢܐܢܘܡܝܬܪ̈ܐ + {0} ܢܐܢܘܡܝܬܪܐ + {0} ܢܐܢܘܡܝܬܪ̈ܐ + + + ܦܝܟܘܡܝܬܪ̈ܐ + {0} ܦܝܟܘܡܝܬܪܐ + {0} ܦܝܟܘܡܝܬܪ̈ܐ + + + ܡܝܠ̈ܐ + {0} ܡܝܠܐ + {0} ܡܝܠ̈ܐ + + + ܝܪܕ̈ܐ + {0} ܝܪܕ̈ܐ + {0} ܝܪܕ̈ܐ + + + ܐܩܠ̈ܐ + {0} ܐܩܠܐ + {0} ܐܩܠ̈ܐ + {0}/ܐܩܠܐ + + + ܐܢܟ̈ܐ + ܚܕܐ ܐܢܟ + {0} ܐܢܟ̈ܐ + {0}/ܐܢܟ + + + ܦܪ̈ܣܚܐ + {0} ܦܪܣܚܐ + {0} ܦܪ̈ܣܚܐ + + + ܫ̈ܢܐ ܕܢܘܗܪܐ + {0} ܫܢܬܐ ܕܢܘܗܪܐ + {0} ܫ̈ܢܐ ܕܢܘܗܪܐ + + + ܡܫܘܚܬܐ ܪܩܝܥܝܬܐ + {0} ܡܫܘܚܬܐ ܪܩܝܥܝܬܐ + {0} ܡܫܘܚܬܐ ܪܩܝܥܝܬܐ + + + ܦܘܪ̈ܠܢܓ + {0} ܦܘܪܠܢܓ + {0} ܦܘܪ̈ܠܢܓ + + + ܡܝ̈ܠܐ ܝܡܝ̈ܐ + {0} ܡܝܠܐ ܝܡܝܐ + {0} ܡܝ̈ܠܐ ܝܡܝ̈ܐ + + + ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + {0} ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + {0} ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + + + ܢܘܩܙ̈ܐ + {0} ܢܘܩܙܐ + {0} ܢܘܩܙ̈ܐ + + + ܥܘܒܐ ܦܫܝܬܐ ܫܡܫܝܐ + {0} ܥܘܒܐ ܦܫܝܬܐ ܫܡܫܝܐ + {0} ܥܘܒܐ ܦܫܝܬܐ ܫܡܫܝܐ + + + ܠܘܩܣ + {0} ܠܘܩܣ + {0} ܠܘܩܣ + + + ܢܗܝܪܐ + {0} ܢܗܝܪܐ + {0} ܢܗܝܪܐ + + + ܠܘܡܝܢ + {0} ܠܘܡܝܢ + {0} ܠܘܡܝܢ + + + ܬܘܢ ܡܝܬܪܝܐ + {0} ܬܘܢ ܡܝܬܪܝܐ + {0} ܬܘܢ̈ ܡܝܬܪ̈ܝܐ + + + ܟܝܠܘܓܪ̈ܡܐ + {0} ܟܝܠܘܓܪܡܐ + {0} ܟܝܠܘܓܪ̈ܡܐ + {0} ܒܟܝܠܘܓܪܡܐ + + + ܓܪ̈ܡܐ + ܓܪܡܐ + {0} ܓܪ̈ܡܐ + {0} ܒܓܪܡܐ + + + ܡܝܠܝܓܪ̈ܡܐ + {0} ܡܝܠܝܓܪܡܐ + {0} ܡܝܠܝܓܪ̈ܡܐ + + + ܡܝܟܪܘܓܪ̈ܡܐ + {0} ܡܝܟܪܘܓܪܡܐ + {0} ܡܝܟܪܘܓܪ̈ܡܐ + + + ܬܘܢ̈ܐ + {0} ܬܘܢܐ + {0} ܬܘܢ̈ܐ + + + ܣܛܘܢ + {0} ܣܛܘܢ + {0} ܣܛܘܢ + + + ܡܢܝ̈ܐ + {0} ܡܢܝܐ + {0} ܡܢܝ̈ܐ + {0} ܒܡܢܝܐ + + + ܐܘܢܩ̈ܝܐ + {0} ܐܘܢܩܝܐ + {0} ܐܘܢܩ̈ܝܐ + {0} ܒܐܘܢܩܝܐ + + + ܩܪ̈ܛܐ + {0} ܩܪܛܐ + {0} ܩܪ̈ܛܐ + + + ܕܐܠܬܘܢ + {0} ܕܐܠܬܘܢ + {0} ܕܐܠܬܘܢ + + + ܥܘܫܢܐ ܐܪܥܝܐ + {0} ܥܘܫܢܐ ܐܪܥܝܐ + {0} ܥܘܫܢܐ ܐܪܥܝܐ + + + ܥܘܫܢܐ ܫܡܫܝܐ + {0} ܥܘܫܢܐ ܫܡܫܝܐ + {0} ܥܘܫܢܐ ܫܡܫܝܐ + + + ܦܪ̈ܕܐ + {0} ܦܪܕܐ + {0} ܦܪ̈ܕܐ + + + ܓ ܘܐܬ + {0} ܓܝܓܐܘܐܬ + {0} ܓܝܓܐܘܐܬ + + + ܡ ܘܐܬ + {0} ܡܝܓܐܘܐܬ + {0} ܡܝܓܐܘܐܬ + + + ܟ ܘܐܬ + {0} ܟܝܠܘܘܐܬ + {0} ܟܝܠܘܘܐܬ + + + ܘܐܬ + {0} ܘܐܬ + {0} ܘܐܬ + + + ܡܝܠܝܘܐܬ + {0} ܡܝܠܝܘܐܬ + {0} ܡܝܠܝܘܐܬ + + + ܚܝܠܐ ܕܣܘܣܝܐ + {0} ܣܘܣܝܐ + {0} ܣܘܣܝܐ + + + ܦܣܟ̈ܠܐ + {0} ܦܣܟ̈ܠܐ + {0} ܦܣܟ̈ܠܐ + + + ܗܟܬܘܦܣܟ̈ܠܐ + {0} ܗܟܬܘܦܣܟ̈ܠܐ + {0} ܗܟܬܘܦܣܟ̈ܠܐ + + + ܟܝܠܘܦܣܟܠ + {0} ܟܝܠܘܦܣܟܠ + {0} ܟܝܠܘܦܣܟܠ + + + ܡܝܓܐܦܣܟܠ + {0} ܡܝܓܐܦܣܟܠ + {0} ܡܝܓܐܦܣܟܠ + + + ܟܝܠܘܡܝܬܪ̈ܐ ܒܫܥܬܐ + {0} ܟܝܠܘܡܝܬܪܐ ܒܫܥܬܐ + {0} ܟܝܠܘܡܝܬܪ̈ܐ ܒܫܥܬܐ + + + ܡܝܬܪ̈ܐ ܒܪܦܦܐ + {0} ܡܝܬܪܐ ܒܪܦܦܐ + {0} ܡܝܬܪ̈ܐ ܒܪܦܦܐ + + + ܡܝܠ̈ܐ ܒܫܥܬܐ + {0} ܡܝܠܐ ܒܫܥܬܐ + {0} ܡܝܠ̈ܐ ܒܫܥܬܐ + + + ܩܛܪ̈ܐ + {0} ܩܛܪܐ + {0} ܩܛܪ̈ܐ + + + ° + {0}° + {0}° + + + ܕܪ̈ܓܐ ܡܐܢܝܐ + {0} ܕܪܓܐ ܡܐܢܝܐ + {0} ܕܪ̈ܓܐ ܡܐܢܝܐ + + + ܕܪ̈ܓܐ ܦܐܗܪܢܗܥܝܬ + {0} ܕܪܓܐ ܦܐܗܪܢܗܥܝܬ + {0} ܕܪ̈ܓܐ ܦܐܗܪܢܗܥܝܬ + + + ܕܪ̈ܓܐ ܟܠܒܝܢ + {0} ܕܪܓܐ ܟܠܒܝܢ + {0} ܕܪ̈ܓܐ ܟܠܒܝܢ + + + ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + {0} ܢܝܘܬܢ-ܡܝܬܪܐ + {0} ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + + + ܟܝܠܘܡܝܬܪ̈ܐ ܡܩܦܣܐ + {0} ܟܝܠܘܡܝܬܪܐ ܡܩܦܣܐ + {0} ܟܝܠܘܡܝܬܪ̈ܐ ܡܩܦܣܐ + + + ܡܝܬܪ̈ܐ ܡܩܦܣܐ + {0} ܡܝܬܪܐ ܡܩܦܣܐ + {0} ܡܝܬܪ̈ܐ ܡܩܦܣܐ + {0}/ܡܝܬܪܐ ܡܩܦܣܐ + + + ܣܢܬܝܡܝܬܪ̈ܐ ܡܩܦܣܐ + {0} ܣܢܬܝܡܝܬܪܐ ܡܩܦܣܐ + {0} ܣܢܬܝܡܝܬܪ̈ܐ ܡܩܦܣܐ + {0}/ܣܢܬܝܡܝܬܪܐ ܡܩܦܣܐ + + + ܡܝܠ̈ܐ ܡܩܦܣܐ + {0} ܡܝܠܐ ܡܩܦܣܐ + {0} ܡܝܠ̈ܐ ܡܩܦܣܐ + + + ܝܪ̈ܕܐ ܡܩܦܣܐ + {0} ܝܪܕܐ ܡܩܦܣܐ + {0} ܝܪ̈ܕܐ ܡܩܦܣܐ + + + ܐܩܠ̈ܐ ܡܩܦܣܐ + {0} ܐܩܠܐ ܡܩܦܣܐ + {0} ܐܩܠ̈ܐ ܡܩܦܣܐ + + + ܐܢܟ̈ܐ ܡܩܦܣܐ + ܚܕܐ ܐܢܟ ܡܩܦܣܐ + {0} ܐܢܟ̈ܐ ܡܩܦܣܐ + + + ܡܝܓܐܠܝܬܪ̈ܐ + {0} ܡܝܓܐܠܝܬܪܐ + {0} ܡܝܓܐܠܝܬܪ̈ܐ + + + ܗܟܬܘܠܝܬܪ̈ܐ + {0} ܗܟܬܘܠܝܬܪܐ + {0} ܗܟܬܘܠܝܬܪ̈ܐ + + + ܠܝܬܪ̈ܐ + {0} ܠܝܬܪܐ + {0} ܠܝܬܪ̈ܐ + {0}/ܠܝܬܪܐ + + + ܕܝܣܝܠܝܬܪ̈ܐ + {0} ܕܝܣܝܠܝܬܪܐ + {0} ܕܝܣܝܠܝܬܪ̈ܐ + + + ܣܢܬܝܠܝܬܪ̈ܐ + {0} ܣܢܬܝܠܝܬܪܐ + {0} ܣܢܬܝܠܝܬܪ̈ܐ + + + ܡܝܠܝܠܝܬܪ̈ܐ + {0} ܡܝܠܝܠܝܬܪܐ + {0} ܡܝܠܝܠܝܬܪ̈ܐ + + + mpt + {0} mpt + {0} mpt + + + ܐܩܠ̈ܐ-ܦܕܢܐ + {0} ܐܩܠܐ-ܦܕܢܐ + {0} ܐܩܠ̈ܐ-ܦܕܢܐ + + + ܓܠܘܢ̈ܐ + {0} ܓܠܘܢܐ + {0} ܓܠܘܢ̈ܐ + {0}/ܓܠܘܢܐ + + + ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0}/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + + + ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ + {0} ܪܘܒܥܐ ܓܠܘܢܐ + {0} ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ + + + ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ + {0} ܐܘܢܩܝܐ ܪܕܘܝܐ + {0} ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ + + + ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܐܘܢܩܝܐ ܪܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + + + ܬܪ̈ܘܕܐ ܪ̈ܒܐ + {0} ܬܪܘܕܐ ܪܒܐ + {0} ܬܪ̈ܘܕܐ ܪ̈ܒܐ + + + ܬܪ̈ܘܕܐ ܙܥܘܪ̈ܐ + {0} ܬܪܘܕܐ ܙܥܘܪܐ + {0} ܬܪ̈ܘܕܐ ܙܥܘܪ̈ܐ + + + ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ + {0} ܬܪܘܕܐ ܚܠܝܐ + {0} ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ + + + ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܬܪܘܕܐ ܚܠܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ ܐܡܦܪܬܘܪܝܐ + + + ܛܘܦ̈ܐ + {0} ܛܘܦܬܐ + {0} ܛܘܦ̈ܐ + + + ܕܪ̈ܟܡܐ ܪ̈ܕܘܝܐ + {0} ܕܪܟܡܐ ܪܕܘܝܐ + {0} ܕܪ̈ܟܡܐ ܪ̈ܕܘܝܐ + + + ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܪܘܒܥܐ ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + + + ܦܢܝܬܐ ܫܪܫܢܝܬܐ + {0} ܡܕܢܚܐ + {0} ܓܪܒܝܐ + {0} ܬܝܡܢܐ + {0} ܡܥܪܒ݂ܐ + + + + + ܕܝܣܝ{0} + + + ܣܢܬܝ{0} + + + ܡ{0} + + + ܡܟ{0} + + + ܢ{0} + + + ܦ{0} + + + ܦ̮{0} + + + ܐ{0} + + + ܙܝܦ{0} + + + ܝܟ{0} + + + ܕܐ{0} + + + ܗ{0} + + + ܟ{0} + + + ܡܝܓ{0} + + + ܓܝ{0} + + + ܬ{0} + + + ܦܝܬ{0} + + + ܐܟ{0} + + + ܙܬ{0} + + + ܝܘ{0} + + + ܟܝܒܝ{0} + + + ܡܝܒܝ{0} + + + ܓܝܒܝ{0} + + + ܬܝܒܝ{0} + + + ܦܝܒܝ{0} + + + ܐܟܣܒܝ{0} + + + ܙܝܒܝ{0} + + + ܝܘܒܝ{0} + + + {0}² + {0}² + + + {0}³ + {0}³ + + + ܚܝܠܐ ܢܬܘܦܘܬܐ + {0} ܚܝܠܐ ܢܬܘܦܘܬܐ + {0} ܚܝܠܐ ܢܬܘܦܘܬܐ + + + ܡ/ܪ² + {0} ܡ/ܪ² + {0} ܡ/ܪ² + + + ܚܘܕ + {0} ܚܘܕ + {0} ܚܘܕ + + + ܪܐܕܝܐܢ + {0} ܪܐܕܝܐܢ + {0} ܪܐܕܝܐܢ + + + ܕܪ̈ܓ݂ܐ + ܕܪܓ݂ܐ + {0} ܕܪ̈ܓ݂ܐ + + + ܩܛܝ̈ܢܬܐ ܩܫܬܢܝܬܐ + ܩܛܝܢܐ ܩܫܬܢܝܐ + {0} ܩܛܝ̈ܢܬܐ ܩܫܬܢܝܬܐ + + + ܪ̈ܦܦܐ ܩܫܬܢܝܐ + ܪܦܦܐ ܩܫܬܢܝܐ + {0} ܪ̈ܦܦܐ ܩܫܬܢ̈ܝܐ + + + ܟܡ² + {0} ܟܡ² + {0} ܟܡ² + {0}/ܟܡ² + + + ܗܟܬܪ + {0} ܗܟܬܪ + {0} ܗܟܬܪ + + + ܡ² + {0} ܡ² + {0} ܡ² + {0}/ܡ² + + + ܣܡ² + {0} ܣܡ² + {0} ܣܡ² + {0}/ܣܡ² + + + ܡܝܠ̈ܐ² + {0} ܡܝܠ̈ܐ² + {0} ܡܝܠ̈ܐ² + {0}/ܡܝܠ̈ܐ² + + + ܦ̈ܕܢܐ + {0} ܦܕܢܐ + {0} ܦ̈ܕܢܐ + + + ܝܪ̈ܕܐ² + {0} ܝܪܕܐ² + {0} ܝܪ̈ܕܐ² + + + ܐܩܠ̈ܐ² + {0} ܐܩܠܐ² + {0} ܐܩܠ̈ܐ² + + + ܐܢܟ̈ܐ² + {0}/ܐܢ² + {0}/ܐܢ² + {0}/ܐܢ² + + + ܩܪ̈ܛܐ + ܩܪܛܐ + {0} ܩܪ̈ܛܐ + + + ܡܓܡ/ܕܝܣܝܠܝܬܪܐ + {0} ܡܓܡ/ܕܝܣܝܠܝܬܪܐ + {0} ܡܓܡ/ܕܝܣܝܠܝܬܪܐ + + + ܡܡܘܠ܊/ܠ + {0} ܡܡܘܠ܊/ܠ + {0} ܡܡܘܠ܊/ܠ + + + ܡܠܘܐܐ + ܡܠܘܐܐ + {0} ܡܠܘܐ̈ܐ + + + ܡܢܘ̈ܬܐ/ܡܠܝܘܢ + {0} ܡܢܘܬܐ/ܡܠ܊ + {0} ܡܢܘ̈ܬܐ/ܡܠ܊ + + + ܒܡܐܐ + {0}% + {0}% + + + ܒܐܠܦܐ + {0}‰ + {0}‰ + + + {0}‱ + {0}‱ + + + ܡܘܠ + {0} ܡܘܠ + {0} ܡܘܠ + + + ܠܝܬܪ̈ܐ/ܟܡ + {0} ܠܝܬܪܐ/ܟܡ + {0} ܠܝܬܪ̈ܐ/ܟܡ + + + ܠܝܬܪ̈ܐ/100 ܟܡ + {0} ܠܝܬܪܐ/100 ܟܡ + {0} ܠܝܬܪ̈ܐ/100 ܟܡ + + + ܡܝܠ̈ܐ/ܓܠܘܢܐ + {0} ܡܝܠܐ ܒܓܠܘܢܐ + {0} ܡܝܠ̈ܐ ܒܓܠܘܢܐ + + + ܡܝܠ̈ܐ/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܡܝܠܐ/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܡܝܠܐ/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + + + ܦܝܬܐܒܐܝܬ + {0} ܦܝܬܐܒܐܝܬ + {0} ܦܝܬܐܒܐܝܬ + + + ܬܝܪܐܒܐܝܬ + {0} ܬܝܪܐܒܐܝܬ + {0} ܬܝܪܐܒܐܝܬ + + + ܬܝܪܐܒܬ + {0} ܬܝܪܐܒܬ + {0} ܬܝܪܐܒܬ + + + ܓܝܓܐܒܐܝܬ + {0} ܓܝܓܐܒܐܝܬ + {0} ܓܝܓܐܒܐܝܬ + + + ܓܝܓܐܒܬ + {0} ܓܝܓܐܒܬ + {0} ܓܝܓܐܒܬ + + + ܡܝܓܐܒܐܝܬ + {0} ܡܝܓܐܒܐܝܬ + {0} ܡܝܓܐܒܐܝܬ + + + ܡܝܓܐܒܬ + {0} ܡܝܓܐܒܬ + {0} ܡܝܓܐܒܬ + + + ܟܝܠܘܒܐܝܬ + {0} ܟܝܠܘܒܐܝܬ + {0} ܟܝܠܘܒܐܝܬ + + + ܟܝܠܘܒܬ + {0} ܟܝܠܘܒܬ + {0} ܟܝܠܘܒܬ + + + ܒܐܝܬ + {0} ܒܐܝܬ + {0} ܒܐܝܬ + + + ܒܬ + {0} ܒܬ + {0} ܒܬ + + + ܕܪܐ + ܕܪܐ + {0} ܕܪ̈ܐ + + + ܥܣܝܪܘܬܐ + ܥܣܝܪܘܬܐ + {0} ܥܣܝܪ̈ܘܬܐ + + + ܫ̈ܢܐ + ܫܢܬܐ + {0} ܫ̈ܢܐ + {0}/ܫܢܬܐ + + + ܪܘܒܥܐ + ܪܘܒܥܐ + {0} ܪ̈ܘܒܥܐ + {0}/ܪܘܒܥܐ + + + ܝܪ̈ܚܐ + ܝܪܚܐ + {0} ܝܪ̈ܚܐ + {0}/ܝܪܚܐ + + + ܫܒ̈ܘܥܐ + ܫܒ݂ܘܥܐ + {0} ܫܒ݂̈ܘܥܐ + {0}/ܫ + + + ܝܘ̈ܡܬܐ + ܝܘܡܐ + {0} ܝܘ̈ܡܬܐ + {0}/ܝܘܡܐ + + + ܫ̈ܥܐ + {0} ܫܥ + {0} ܫܥ + {0}/ܫܥ + + + ܩ + {0} ܩ + {0} ܩ + {0}/ܩ + + + ܪ̈ܦܦܐ + {0} ܪ + {0} ܪ + {0}/ܪ + + + ܡܝܠܝܪ̈ܦܦܐ + {0} ܡܝܠܝ ܪ + {0} ܡܝܠܝ ܪ + + + ܡܝܟܪܘ ܪ + {0} ܡܝܟܪܘ ܪ + {0} ܡܝܟܪܘ ܪ + + + ܢܐܢܘ ܪ + {0} ܢܐܢܘ ܪ + {0} ܢܐܢܘ ܪ + + + ܐܡܦܝܪ + {0} ܐܡܦܝܪ + {0} ܐܡܦܝܪ + + + ܡܝܠܝܐܡܦܝܪ + {0} ܡ ܐܡܦܝܪ + {0} ܡ ܐܡܦܝܪ + + + ܘܗܡ + {0} ܘܗܡ + {0} ܘܗܡ + + + ܒܘܠܬ + {0} ܒܘܠܬ + {0} ܒܘܠܬ + + + ܟ ܟܐܠܘܪܝ + {0} ܟ ܟܐܠܘܪܝ + {0} ܟ ܟܐܠܘܪܝ + + + ܟܐܠ + {0} ܟܐܠ + {0} ܟܐܠ + + + ܟ ܓܝܐܘܠ + {0} ܟ ܓܝܐܘܠ + {0} ܟ ܓܝܐܘܠ + + + ܓܝܐܘܠ + {0} ܓܝܐܘܠ + {0} ܓܝܐܘܠ + + + ܟܝܠܘܘܐܬ-ܫ + {0} ܟܘܫ܏-ܫ + {0} ܟܘܫ܏-ܫ + + + ܐܠܝܟܬܪܘܢ ܒܘܠܬ + {0} ܐܠܝܟܬܪܘܢ ܒܘܠܬ + {0} ܐܠܝܟܬܪܘܢ ܒܘܠܬ + + + ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + + + ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + + + ܢܝܘܬܢ + {0} ܢܝܘܬܢ + {0} ܢܝܘܬܢ + + + ܟܘܫ܊ ܒ 100 ܟܡ + {0} ܟܘܫ܊ ܒ 100 ܟܡ + {0} ܟܘܫ܊ ܒ 100 ܟܡ + + + ܓܝܓܐܗܪܬܙ + {0} ܓܝܓܐܗܪܬܙ + {0} ܓܝܓܐܗܪܬܙ + + + ܡܝܓܐܗܪܬܙ + {0} ܡܝܓܐܗܪܬܙ + {0} ܡܝܓܐܗܪܬܙ + + + ܟܝܠܘܗܪܬܙ + {0} ܟܝܠܘܗܪܬܙ + {0} ܟܝܠܘܗܪܬܙ + + + ܗܪܬܙ + {0} ܗܪܬܙ + {0} ܗܪܬܙ + + + ܦܝ̈ܟܣܠܐ + {0} ܦܝܟܣܠܐ + {0} ܦܝܟܣܠܐ + + + ܡܝܓܐܦܝ̈ܟܣܠܐ + {0} ܡ.ܦܝܟܣܠܐ + {0} ܡ.ܦܝܟܣܠܐ + + + ܦܝ̈ܟܣܠܐ ܒܣܢܬܝܡܝܬܪܐ + {0} ܦܝ̈ܟܣܠܐ/ܣܡ + {0} ܦܝ̈ܟܣܠܐ/ܣܡ + + + ܦܝ̈ܟܣܠܐ ܒܐܢܟ + {0} ܦܝ̈ܟܣܠܐ/ܐܢ + {0} ܦܝ̈ܟܣܠܐ/ܐܢ + + + ܥܘܒܐ ܐܪܥܝܐ + {0} ܥܘܒܐ ܐܪܥܝܐ + {0} ܥܘܒܐ ܐܪܥܝܐ + + + ܟܡ + {0} ܟܡ + {0} ܟܡ + {0}/ܟܡ + + + ܡܝܬܪܐ + ܡܝܬܪܐ + {0} ܡ + {0}/ܡ + + + ܕܣܡ + {0} ܕܣܡ + {0} ܕܣܡ + + + ܣܡ + {0} ܣܡ + {0} ܣܡ + {0}/ܣܡ + + + ܡܡ + {0} ܡܡ + {0} ܡܡ + + + ܡܝܟܪܘܡܝܬܪ̈ܐ + {0} ܡܟܡ + {0}ܡܟܡ + + + ܢܡ + {0} ܢܡ + {0} ܢܡ + + + ܦܝܟܘܡܝܬܪܐ + {0} ܦܝܟܘܡܝܬܪܐ + {0} ܦܝܟܘܡܝܬܪܐ + + + ܡܝܠ̈ܐ + ܡܝܠܐ + {0} ܡܝܠ̈ܐ + + + ܝܪܕ̈ܐ + {0} ܝܪܕ̈ܐ + {0} ܝܪܕ̈ܐ + + + ܐܩܠ̈ܐ + {0} ܐܩܠ̈ܐ + {0} ܐܩܠ̈ܐ + {0}/ܐܩܠܐ + + + ܐܢܟ̈ܐ + ܚܕܐ ܐܢ܊ + {0} ܐܢ܊ + {0}/ܐܢܟ + + + ܦܪ̈ܣܚܐ + {0} ܦܪܣܚܐ + {0} ܦܪ̈ܣܚܐ + + + ܫ̈ܢܐ ܕܢܘܗܪܐ + {0} ܫܢܬܐ ܕܢܘܗܪܐ + {0} ܫ̈ܢܐ ܕܢܘܗܪܐ + + + ܡ.ܪ. + {0} ܡ.ܪ. + {0} ܡ.ܪ. + + + ܦܘܪ̈ܠܢܓ + {0} ܦܘܪܠܢܓ + {0} ܦܘܪ̈ܠܢܓ + + + ܡܝ̈ܠܐ ܝܡܝ̈ܐ + {0} ܡܝܠܐ ܝܡܝܐ + {0} ܡܝ̈ܠܐ ܝܡܝ̈ܐ + + + ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + {0} ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + {0} ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + + + ܢܘܩܙ̈ܐ + {0} ܢܘܩܙܐ + {0} ܢܘܩܙ̈ܐ + + + ܥܘܒܐ ܦܫܝܬܐ ܫܡܫܝܐ + {0} ܥܘܒܐ ܫܡܫܝܐ + {0} ܥܘܒܐ ܫܡܫܝܐ + + + ܠܘܩܣ + {0} ܠܘܩܣ + {0} ܠܘܩܣ + + + ܢܗܝܪܐ + {0} ܢܗܝܪܐ + {0} ܢܗܝܪܐ + + + ܠܘܡܝܢ + {0} ܠܘܡܝܢ + {0} ܠܘܡܝܢ + + + ܬ.ܡ + {0} ܬ.ܡ + {0} ܬ.ܡ + + + ܟܓܡ + {0} ܟܓܡ + {0} ܟܓܡ + {0}/ܟܓܡ + + + ܓܪ̈ܡܐ + ܓܪܡܐ + {0} ܓܪ̈ܡܐ + {0}/ܓܪܡܐ + + + ܡܓܡ + {0} ܡܓܡ + {0} ܡܓܡ + + + ܡܟܓܡ + {0} ܡܟܓܡ + {0} ܡܟܓܡ + + + ܬܘܢ̈ܐ + {0} ܬܘܢܐ + {0} ܬܘܢ̈ܐ + + + ܣܬܘܢ + {0} ܣܛܘܢ + {0} ܣܛܘܢ + + + ܡܢܝ̈ܐ + {0} ܡܢܝܐ + {0} ܡܢܝ̈ܐ + {0}/ܡܢܝܐ + + + ܐܘܢܩ̈ܝܐ + {0} ܐܘܢܩܝܐ + {0} ܐܘܢܩ̈ܝܐ + {0}/ܐܘܢܩܝܐ + + + ܩܪ̈ܛܐ + {0} ܩܪܛܐ + {0} ܩܪ̈ܛܐ + + + ܕܐܠܬܘܢ + {0} ܕܐܠܬܘܢ + {0} ܕܐܠܬܘܢ + + + ܥܘܫܢܐ ܐܪܥܝܐ + {0} ܥܘܫܢܐ ܐܪܥܝܐ + {0} ܥܘܫܢܐ ܐܪܥܝܐ + + + ܥܘܫܢܐ ܫܡܫܝܐ + {0} ܥܘܫܢܐ ܫܡܫܝܐ + {0} ܥܘܫܢܐ ܫܡܫܝܐ + + + ܦܪܕܐ + {0} ܦܪܕܐ + {0} ܦܪ̈ܕܐ + + + ܓ ܘܐܬ + {0} ܓ ܘܐܬ + {0} ܓ ܘܐܬ + + + ܡ ܘܐܬ + {0} ܡ ܘܐܬ + {0} ܡ ܘܐܬ + + + ܟ ܘܐܬ + {0} ܟ ܘܐܬ + {0} ܟ ܘܐܬ + + + ܘܐܬ + {0} ܘܐܬ + {0} ܘܐܬ + + + ܡܝܠܝܘܐܬ + {0} ܡܝܠܝܘܐܬ + {0} ܡܝܠܝܘܐܬ + + + ܚܝܠܐ ܕܣܘܣܝܐ + {0} ܣܘܣܝܐ + {0} ܣܘܣܝܐ + + + ܦܣܟܠ + {0} ܦܣܟ̈ܠܐ + {0} ܦܣܟ̈ܠܐ + + + ܗܟܬܘܦܣܟܠ + {0} ܗܟܬܘܦܣܟ̈ܠܐ + {0} ܗܟܬܘܦܣܟ̈ܠܐ + + + ܟܝܠܘܦܣܟܠ + {0} ܟܝܠܘܦܣܟܠ + {0} ܟܝܠܘܦܣܟܠ + + + ܡܝܓܐܦܣܟܠ + {0} ܡܝܓܐܦܣܟܠ + {0} ܡܝܓܐܦܣܟܠ + + + ܟܡ/ܫ + {0} ܟܡ/ܫ + {0} ܟܡ/ܫ + + + ܡ/ܪ + {0} ܡ/ܪ + {0} ܡ/ܪ + + + ܡܝܠ̈ܐ/ܫ + {0} ܡܝܠܐ/ܫ + {0} ܡܝܠ̈ܐ/ܫ + + + ܩܛܪ̈ܐ + {0} ܩܛܪܐ + {0} ܩܛܪ̈ܐ + + + {0}° + {0}° + + + ܕܪܓܐ ܡܐܢܝܐ + {0}°ܡ + {0}°ܡ + + + ܕܪܓܐ ܦܐܗܪܢܗܥܝܬ + {0}°ܦ + {0}°ܦ + + + ܕ ܟܠܒܝܢ + {0} ܕ ܟܠܒܝܢ + {0} ܕ ܟܠܒܝܢ + + + ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + {0} ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + {0} ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + + + ܟܡ³ + {0} ܟܡ³ + {0} ܟܡ³ + + + ܡܝܬܪ̈ܐ ܡܩܦܣܐ + {0} ܡ³ + {0} ܡ³ + {0}/ܡ³ + + + ܣܡ³ + {0} ܣܡ³ + {0} ܣܡ³ + {0}/ܣܡ³ + + + ܡܝܠ̈ܐ³ + {0} ܡܝܠܐ³ + {0} ܡܝܠ̈ܐ³ + + + ܝܪ̈ܕܐ³ + {0} ܝܪܕܐ³ + {0} ܝܪ̈ܕܐ³ + + + ܐܩܠ̈ܐ³ + {0} ܐܩܠܐ³ + {0} ܐܩܠ̈ܐ³ + + + ܐܢܟ̈ܐ³ + {0} ܐܢ³ + {0} ܐܢ³ + + + ܡܝܓܐܠܝܬܪ̈ܐ + {0} ܡܝܓܐܠܝܬܪܐ + {0} ܡܝܓܐܠܝܬܪ̈ܐ + + + ܗܟܬܘܠܝܬܪ̈ܐ + {0} ܗܟܬܘܠܝܬܪܐ + {0} ܗܟܬܘܠܝܬܪ̈ܐ + + + ܠܝܬܪ̈ܐ + {0} ܠܝܬܪܐ + {0} ܠܝܬܪ̈ܐ + {0}/ܠ + + + ܕܝܣܝܠܝܬܪ̈ܐ + {0} ܕܝܣܝܠܝܬܪܐ + {0} ܕܝܣܝܠܝܬܪ̈ܐ + + + ܣܢܬܝܠܝܬܪ̈ܐ + {0} ܣܢܬܝܠܝܬܪܐ + {0} ܣܢܬܝܠܝܬܪ̈ܐ + + + ܡܠܬܪ + {0} ܡܠܬܪ܊ + {0} ܡܠܬܪ̈܊ + + + {0} mpt + {0} mpt + + + ܐܩܠ̈ܐ-ܦܕܢܐ + {0} ܐܩܠ̈ܐ-ܦܕܢܐ + {0} ܐܩܠ̈ܐ-ܦܕܢܐ + + + ܓܠܘܢ̈ܐ + {0} ܓܠܘܢܐ + {0} ܓܠܘܢ̈ܐ + {0}/ܓܠܘܢ̈ܐ + + + ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0}/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + + + ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ + {0} ܪܘܒܥܐ ܓܠܘܢܐ + {0} ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ + + + ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ + {0} ܐܘܢܩܝܐ ܪܕܘܝܐ + {0} ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ + + + ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܐܘܢܩܝܐ ܪܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + + + ܬܪ̈ܘܕܐ ܪ̈ܒܐ + {0} ܬܪܘܕܐ ܪܒܐ + {0} ܬܪ̈ܘܕܐ ܪ̈ܒܐ + + + ܬܪ̈ܘܕܐ ܙܥܘܪ̈ܐ + {0} ܬܪܘܕܐ ܙܥܘܪܐ + {0} ܬܪ̈ܘܕܐ ܙܥܘܪ̈ܐ + + + ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ + {0} ܬܪܘܕܐ ܚܠܝܐ + {0} ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ + + + ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܬܪܘܕܐ ܚܠܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ ܐܡܦܪܬܘܪܝܐ + + + ܛܘܦܬܐ + {0} ܛܘܦܬܐ + {0} ܛܘܦ̈ܐ + + + ܕܪܟܡܐ ܪܕܘܝܐ + {0} ܕܪܟܡܐ ܪܕܘܝܐ + {0} ܕܪ̈ܟܡܐ ܪ̈ܕܘܝܐ + + + ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܪܘܒܥܐ ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + + + ܦܢܝܬܐ ܫܪܫܢܝܬܐ + {0} ܡܕܢܚܐ + {0} ܓܪܒܝܐ + {0} ܬܝܡܢܐ + {0} ܡܥܪܒ݂ܐ + + + + + ܕܝܣܝ{0} + + + ܣܢܬܝ{0} + + + ܡ{0} + + + ܡܟ{0} + + + ܢ{0} + + + ܦ{0} + + + ܦ̮{0} + + + ܐ{0} + + + ܙܝܦ{0} + + + ܝܟ{0} + + + ܕܐ{0} + + + ܗ{0} + + + ܟ{0} + + + ܡܝܓ{0} + + + ܓܝ{0} + + + ܬ{0} + + + ܦܝܬ{0} + + + ܐܟ{0} + + + ܙܬ{0} + + + ܝܘ{0} + + + ܟܝܒܝ{0} + + + ܡܝܒܝ{0} + + + ܓܝܒܝ{0} + + + ܬܝܒܝ{0} + + + ܦܝܒܝ{0} + + + ܐܟܣܒܝ{0} + + + ܙܝܒܝ{0} + + + ܝܘܒܝ{0} + + + {0}/{1} + + + {0}² + {0}² + + + {0}³ + {0}³ + + + {0}⋅{1} + + + ܚܝܠܐ ܢܬܘܦܘܬܐ + {0} ܚܝܠܐ ܢܬܘܦܘܬܐ + {0} ܚܝܠܐ ܢܬܘܦܘܬܐ + + + ܡ/ܪ² + {0} ܡ/ܪ² + {0} ܡ/ܪ² + + + ܚܘܕ + {0} ܚܘܕ + {0} ܚܘܕ + + + ܪܐܕܝܐܢ + {0} ܪܐܕܝܐܢ + {0} ܪܐܕܝܐܢ + + + ܕܪ̈ܓ݂ܐ + ܕܪܓ݂ܐ + {0} ܕܪ̈ܓ݂ܐ + + + ܩܛܝ̈ܢܬܐ ܩܫܬܢܝܬܐ + ܩܛܝܢܐ ܩܫܬܢܝܐ + {0} ܩܛܝ̈ܢܬܐ ܩܫܬܢܝܬܐ + + + ܪ̈ܦܦܐ ܩܫܬܢܝܐ + ܪܦܦܐ ܩܫܬܢܝܐ + {0} ܪ̈ܦܦܐ ܩܫܬܢ̈ܝܐ + + + ܟܡ² + {0} ܟܡ² + {0} ܟܡ² + {0}/ܟܡ² + + + ܗܟܬܪ + {0} ܗܟܬܪ + {0} ܗܟܬܪ + + + ܡ² + {0} ܡ² + {0} ܡ² + {0}/ܡ² + + + ܣܡ² + {0} ܣܡ² + {0} ܣܡ² + {0}/ܣܡ² + + + ܡܝܠ̈ܐ² + {0} ܡܝܠ̈ܐ² + {0} ܡܝܠ̈ܐ² + {0}/ܡܝܠ̈ܐ² + + + ܦܕܢܐ + {0} ܦܕܢܐ + {0} ܦ̈ܕܢܐ + + + ܝܪ̈ܕܐ² + {0} ܝܪܕܐ² + {0} ܝܪ̈ܕܐ² + + + ܐܩܠ̈ܐ² + {0} ܐܩܠܐ² + {0} ܐܩܠ̈ܐ² + + + ܐܢ² + {0}/ܐܢ² + {0}/ܐܢ² + {0}/ܐܢ² + + + ܩܪܛܐ + ܩܪܛܐ + {0} ܩܪ̈ܛܐ + + + ܡܓ/ܕܝܣܝܠܝܬܪܐ + {0} ܡܓ/ܕܝܣܝܠܝܬܪܐ + {0} ܡܓ/ܕܝܣܝܠܝܬܪܐ + + + ܡܡܘܠ܊/ܠ + {0} ܡܡܘܠ܊/ܠ + {0} ܡܡܘܠ܊/ܠ + + + ܡܠܘܐܐ + ܡܠܘܐܐ + {0} ܡܠܘܐ̈ܐ + + + ܡܢܘ̈ܬܐ/ܡܠܝܘܢ + {0} ܡܢܘܬܐ/ܡܠ܊ + {0} ܡܢܘ̈ܬܐ/ܡܠ܊ + + + ܒܡܐܐ + {0}% + {0}% + + + ܒܐܠܦܐ + {0}‰ + {0}‰ + + + + {0}‱ + {0}‱ + + + ܡܘܠ + {0} ܡܘܠ + {0} ܡܘܠ + + + ܠ/ܟܡ + {0} ܠ/ܟܡ + {0} ܠ/ܟܡ + + + ܠ/100ܟܡ + {0} ܠ/100ܟܡ + {0} ܠ/100ܟܡ + + + ܡܝܠ̈ܐ/ܓܠܘܢܐ + {0} ܡܝܠܐ ܒܓܠܘܢܐ + {0} ܡܝܠ̈ܐ ܒܓܠܘܢܐ + + + ܡܝܠ̈ܐ/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܡܝܠܐ/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܡܝܠܐ/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + + + ܦܝܬܐܒܐܝܬ + {0} ܦܝܬܐܒܐܝܬ + {0} ܦܝܬܐܒܐܝܬ + + + ܬܝܪܐܒܐܝܬ + {0} ܬܝܪܐܒܐܝܬ + {0} ܬܝܪܐܒܐܝܬ + + + ܬܝܪܐܒܬ + {0} ܬܝܪܐܒܬ + {0} ܬܝܪܐܒܬ + + + ܓܝܓܐܒܐܝܬ + {0} ܓܝܓܐܒܐܝܬ + {0} ܓܝܓܐܒܐܝܬ + + + ܓܝܓܐܒܬ + {0} ܓܝܓܐܒܬ + {0} ܓܝܓܐܒܬ + + + ܡܝܓܐܒܐܝܬ + {0} ܡܝܓܐܒܐܝܬ + {0} ܡܝܓܐܒܐܝܬ + + + ܡܝܓܐܒܬ + {0} ܡܝܓܐܒܬ + {0} ܡܝܓܐܒܬ + + + ܟܝܠܘܒܐܝܬ + {0} ܟܝܠܘܒܐܝܬ + {0} ܟܝܠܘܒܐܝܬ + + + ܟܝܠܘܒܬ + {0} ܟܝܠܘܒܬ + {0} ܟܝܠܘܒܬ + + + ܒܐܝܬ + {0} ܒܐܝܬ + {0} ܒܐܝܬ + + + ܒܬ + {0} ܒܬ + {0} ܒܬ + + + ܕܪܐ + {0} ܕܪܐ + {0} ܕܪ̈ܐ + + + ܥܣܝܪܘܬܐ + {0} ܥܣܝܪܘܬܐ + {0} ܥܣܝܪ̈ܘܬܐ + + + ܫܢܬܐ + {0} ܫܢܬܐ + {0} ܫ̈ܢܐ + {0}/ܫܢܬܐ + + + ܪܘܒܥܐ + {0} ܪܘܒܥܐ + {0} ܪ̈ܘܒܥܐ + {0}/ܪܘܒܥܐ + + + ܝܪܚܐ + {0} ܝܪܚܐ + {0} ܝܪ̈ܚܐ + {0}/ܝܪܚܐ + + + ܫܒܘܥܐ + {0} ܫ + {0} ܫ + {0}/ܫ + + + ܝܘܡܐ + {0} ܝ + {0} ܝ + {0}/ܝܘܡܐ + + + ܫܥܬܐ + {0} ܫܥ + {0} ܫܥ + {0}/ܫܥ + + + ܩ + {0} ܩ + {0} ܩ + {0}/ܩ + + + ܪ + {0} ܪ + {0} ܪ + {0}/ܪ + + + ܡܝܠܝ ܪ + {0} ܡܝܠܝ ܪ + {0} ܡܝܠܝ ܪ + + + ܡܝܟܪܘ ܪ + {0} ܡܝܟܪܘ ܪ + {0} ܡܝܟܪܘ ܪ + + + ܢܐܢܘ ܪ + {0} ܢܐܢܘ ܪ + {0} ܢܐܢܘ ܪ + + + ܐܡܦܝܪ + {0} ܐܡܦܝܪ + {0} ܐܡܦܝܪ + + + ܡܝܠܝܐܡܦܝܪ + {0} ܡ ܐܡܦܝܪ + {0} ܡ ܐܡܦܝܪ + + + ܘܗܡ + {0} ܘܗܡ + {0} ܘܗܡ + + + ܒܘܠܬ + {0} ܒܘܠܬ + {0} ܒܘܠܬ + + + ܟ ܟܐܠܘܪܝ + {0} ܟ ܟܐܠܘܪܝ + {0} ܟ ܟܐܠܘܪܝ + + + ܟܐܠ + {0} ܟܐܠ + {0} ܟܐܠ + + + ܟ ܓܝܐܘܠ + {0} ܟ ܓܝܐܘܠ + {0} ܟ ܓܝܐܘܠ + + + ܓܝܐܘܠ + {0} ܓܝܐܘܠ + {0} ܓܝܐܘܠ + + + ܟܘܫ܏-ܫ + {0} ܟܘܫ܏-ܫ + {0} ܟܘܫ܏-ܫ + + + ܐܠܝܟܬܪܘܢ ܒܘܠܬ + {0} ܐܠܝܟܬܪܘܢ ܒܘܠܬ + {0} ܐܠܝܟܬܪܘܢ ܒܘܠܬ + + + ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܒܪܝܛܢܝܐ + + + ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + {0} ܡܫܘܚܬܐ ܕܫܚܝܢܘܬܐ ܕܐܡܪܝܟܐ + + + ܢܝܘܬܢ + {0} ܢܝܘܬܢ + {0} ܢܝܘܬܢ + + + ܟܘܫ܊/100 ܟܡ + {0} ܟܘܫ܊ ܒ 100 ܟܡ + {0} ܟܘܫ܊ ܒ 100 ܟܡ + + + ܓܝܓܐܗܪܬܙ + {0} ܓܝܓܐܗܪܬܙ + {0} ܓܝܓܐܗܪܬܙ + + + ܡܝܓܐܗܪܬܙ + {0} ܡܝܓܐܗܪܬܙ + {0} ܡܝܓܐܗܪܬܙ + + + ܟܝܠܘܗܪܬܙ + {0} ܟܝܠܘܗܪܬܙ + {0} ܟܝܠܘܗܪܬܙ + + + ܗܪܬܙ + {0} ܗܪܬܙ + {0} ܗܪܬܙ + + + ܦܝ̈ܟܣܠܐ + {0} ܦܝܟܣܠܐ + {0} ܦܝܟܣܠܐ + + + ܡܝܓܐܦܝ̈ܟܣܠܐ + {0} ܡ.ܦܝܟܣܠܐ + {0} ܡ.ܦܝܟܣܠܐ + + + ܦܝ̈ܟܣܠܐ/ܣܡ + {0} ܦܝ̈ܟܣܠܐ/ܣܡ + {0} ܦܝ̈ܟܣܠܐ/ܣܡ + + + ܦܝ̈ܟܣܠܐ ܒܐܢܟ + {0} ܦܝ̈ܟܣܠܐ/ܐܢ + {0} ܦܝ̈ܟܣܠܐ/ܐܢ + + + ܥܘܒܐ ܐܪܥܝܐ + {0} ܥܘܒܐ ܐܪܥܝܐ + {0} ܥܘܒܐ ܐܪܥܝܐ + + + ܟܡ + {0}ܟܡ + {0}ܟܡ + {0}/ܟܡ + + + ܡ + {0}ܡ + {0}ܡ + {0}/ܡ + + + ܕܣܡ + {0}ܕܣܡ + {0}ܕܣܡ + + + ܣܡ + {0}ܣܡ + {0}ܣܡ + {0}/ܣܡ + + + ܡܡ + {0}ܡܡ + {0}ܡܡ + + + ܡܟܡ + {0}ܡܟܡ + {0}ܡܟܡ + + + ܢܡ + {0}ܢܡ + {0}ܢܡ + + + ܦܝܟܘܡܝܬܪܐ + {0} ܦܝܟܘܡܝܬܪܐ + {0} ܦܝܟܘܡܝܬܪܐ + + + ܡܝܠܐ + {0} ܡܝܠܐ + {0} ܡܝܠ̈ܐ + + + ܝܪܕ̈ܐ + {0} ܝܪܕ̈ܐ + {0} ܝܪܕ̈ܐ + + + ܐܩܠ̈ܐ + {0} ܐܩܠܐ + {0} ܐܩܠ̈ܐ + {0}/ܐܩܠܐ + + + ܐܢ܊ + ܚܕܐ ܐܢ܊ + {0} ܐܢ܊ + {0}/ܐܢܟ + + + ܦܪܣܚܐ + {0} ܦܪܣܚܐ + {0} ܦܪ̈ܣܚܐ + + + ܫ̈ܢܐ ܕܢܘܗܪܐ + {0} ܫ ܢ + {0} ܫ ܢ + + + ܡ.ܪ. + {0} ܡ.ܪ. + {0} ܡ.ܪ. + + + ܦܘܪܠܢܓ + {0} ܦܘܪܠܢܓ + {0} ܦܘܪ̈ܠܢܓ + + + ܡܝ̈ܠܐ ܝܡܝ̈ܐ + {0} ܡܝܠܐ ܝܡܝܐ + {0} ܡܝ̈ܠܐ ܝܡܝ̈ܐ + + + ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + {0} ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + {0} ܡܝ̈ܠܐ-ܐܣܟܢܕܝܢܒܝܝܢ + + + ܢܘܩܙ̈ܐ + {0} ܢܘܩܙܐ + {0} ܢܘܩܙ̈ܐ + + + ܥܘܒܐ ܫܡܫܝܐ + {0} ܥܘܒܐ ܫܡܫܝܐ + {0} ܥܘܒܐ ܫܡܫܝܐ + + + ܠܘܩܣ + {0} ܠܘܩܣ + {0} ܠܘܩܣ + + + ܢܗܝܪܐ + {0} ܢܗܝܪܐ + {0} ܢܗܝܪܐ + + + ܠܘܡܝܢ + {0} ܠܘܡܝܢ + {0} ܠܘܡܝܢ + + + ܬ.ܡ + {0} ܬ.ܡ + {0} ܬ.ܡ + + + ܟܓ + {0} ܟܓ + {0} ܟܓ + {0}/ܟܓ + + + ܓ + {0} ܓ + {0} ܓ + {0}/ܓ + + + ܡܓܡ + {0} ܡܓܡ + {0} ܡܓܡ + + + ܡܟܓܡ + {0} ܡܟܓܡ + {0} ܡܟܓܡ + + + ܬܘܢܐ + {0} ܬܘܢܐ + {0} ܬܘܢ̈ܐ + + + ܣܬܘܢ + {0} ܣܛܘܢ + {0} ܣܛܘܢ + + + ܡܢܝܐ + {0} ܡܢܝܐ + {0} ܡܢܝ̈ܐ + {0}/ܡܢܝܐ + + + ܐܘܢܩ̈ܝܐ + {0} ܐܘܢܩܝܐ + {0} ܐܘܢܩ̈ܝܐ + {0}/ܐܘܢܩܝܐ + + + ܩܪܛܐ + {0} ܩܪܛܐ + {0} ܩܪ̈ܛܐ + + + ܕܐܠܬܘܢ + {0} ܕܐܠܬܘܢ + {0} ܕܐܠܬܘܢ + + + ܥܘܫܢܐ ܐܪܥܝܐ + {0} ܥܘܫܢܐ ܐܪܥܝܐ + {0} ܥܘܫܢܐ ܐܪܥܝܐ + + + ܥܘܫܢܐ ܫܡܫܝܐ + {0} ܥܘܫܢܐ ܫܡܫܝܐ + {0} ܥܘܫܢܐ ܫܡܫܝܐ + + + ܦܪܕܐ + {0} ܦܪܕܐ + {0} ܦܪ̈ܕܐ + + + ܓ ܘܐܬ + {0} ܓ ܘܐܬ + {0} ܓ ܘܐܬ + + + ܡ ܘܐܬ + {0} ܡ ܘܐܬ + {0} ܡ ܘܐܬ + + + ܟ ܘܐܬ + {0} ܟ ܘܐܬ + {0} ܟ ܘܐܬ + + + ܘܐܬ + {0} ܘܐܬ + {0} ܘܐܬ + + + ܡܝܠܝܘܐܬ + {0} ܡܝܠܝܘܐܬ + {0} ܡܝܠܝܘܐܬ + + + ܚܝܠܐ ܕܣܘܣܝܐ + {0} ܣܘܣܝܐ + {0} ܣܘܣܝܐ + + + ܦܣܟܠ + {0} ܦܣܟ̈ܠܐ + {0} ܦܣܟ̈ܠܐ + + + ܗܟܬܘܦܣܟܠ + {0} ܗܟܬܘܦܣܟ̈ܠܐ + {0} ܗܟܬܘܦܣܟ̈ܠܐ + + + ܟܝܠܘܦܣܟܠ + {0} ܟܝܠܘܦܣܟܠ + {0} ܟܝܠܘܦܣܟܠ + + + ܡܝܓܐܦܣܟܠ + {0} ܡܝܓܐܦܣܟܠ + {0} ܡܝܓܐܦܣܟܠ + + + ܟܡ/ܫ + {0} ܟܡ/ܫ + {0} ܟܡ/ܫ + + + ܡ/ܪ + {0} ܡ/ܪ + {0} ܡ/ܪ + + + ܡܝܠ̈ܐ/ܫ + {0} ܡܝܠܐ/ܫ + {0} ܡܝܠ̈ܐ/ܫ + + + ܩܛܪ̈ܐ + {0} ܩܛܪܐ + {0} ܩܛܪ̈ܐ + + + ° + {0}° + {0}° + + + °ܡ + {0}°ܡ + {0}°ܡ + + + °ܦ + {0}°ܦ + {0}°ܦ + + + ܕ ܟܠܒܝܢ + {0} ܕ ܟܠܒܝܢ + {0} ܕ ܟܠܒܝܢ + + + ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + {0} ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + {0} ܢܝܘܬܢ-ܡܝܬܪ̈ܐ + + + ܟܡ³ + {0} ܟܡ³ + {0} ܟܡ³ + + + ܡ³ + {0} ܡ³ + {0} ܡ³ + {0}/ܡ³ + + + ܣܡ³ + {0} ܣܡ³ + {0} ܣܡ³ + {0}/ܣܡ³ + + + ܡܝܠ̈ܐ³ + {0} ܡܝܠܐ³ + {0} ܡܝܠ̈ܐ³ + + + ܝܪ̈ܕܐ³ + {0} ܝܪܕܐ³ + {0} ܝܪ̈ܕܐ³ + + + ܐܩܠ̈ܐ³ + {0} ܐܩܠܐ³ + {0} ܐܩܠ̈ܐ³ + + + ܐܢ³ + {0} ܐܢ³ + {0} ܐܢ³ + + + ܡܝܓܐܠܝܬܪ̈ܐ + {0} ܡܝܓܐܠܝܬܪܐ + {0} ܡܝܓܐܠܝܬܪ̈ܐ + + + ܗܟܬܘܠܝܬܪ̈ܐ + {0} ܗܟܬܘܠܝܬܪܐ + {0} ܗܟܬܘܠܝܬܪ̈ܐ + + + ܠܝܬܪܐ + {0} ܠܝܬܪܐ + {0} ܠܝܬܪ̈ܐ + {0}/ܠ + + + ܕܝܣܝܠܝܬܪ̈ܐ + {0} ܕܝܣܝܠܝܬܪܐ + {0} ܕܝܣܝܠܝܬܪ̈ܐ + + + ܣܢܬܝܠܝܬܪ̈ܐ + {0} ܣܢܬܝܠܝܬܪܐ + {0} ܣܢܬܝܠܝܬܪ̈ܐ + + + ܡܠܬܪ + {0} ܡܠܬܪ܊ + {0} ܡܠܬܪ̈܊ + + + mpt + {0} mpt + {0} mpt + + + ܐܩܠ̈ܐ-ܦܕܢܐ + {0} ܐܩܠ̈ܐ-ܦܕܢܐ + {0} ܐܩܠ̈ܐ-ܦܕܢܐ + + + ܓܠܘܢ̈ܐ + {0} ܓܠܘܢܐ + {0} ܓܠܘܢ̈ܐ + {0}/ܓܠܘܢ̈ܐ + + + ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0}/ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + + + ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ + {0} ܪܘܒܥܐ ܓܠܘܢܐ + {0} ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ + + + ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ + {0} ܐܘܢܩܝܐ ܪܕܘܝܐ + {0} ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ + + + ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܐܘܢܩܝܐ ܪܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܐܘܢܩ̈ܝܐ ܪ̈ܕܘܝܐ ܐܡܦܪܬܘܪܝܐ + + + ܬܪ̈ܘܕܐ ܪ̈ܒܐ + {0} ܬܪܘܕܐ ܪܒܐ + {0} ܬܪ̈ܘܕܐ ܪ̈ܒܐ + + + ܬܪ̈ܘܕܐ ܙܥܘܪ̈ܐ + {0} ܬܪܘܕܐ ܙܥܘܪܐ + {0} ܬܪ̈ܘܕܐ ܙܥܘܪ̈ܐ + + + ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ + {0} ܬܪܘܕܐ ܚܠܝܐ + {0} ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ + + + ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܬܪܘܕܐ ܚܠܝܐ ܐܡܦܪܬܘܪܝܐ + {0} ܬܪ̈ܘܕܐ ܚܠܝ̈ܐ ܐܡܦܪܬܘܪܝܐ + + + ܛܘܦܬܐ + {0} ܛܘܦܬܐ + {0} ܛܘܦ̈ܐ + + + ܕܪܟܡܐ ܪܕܘܝܐ + {0} ܕܪܟܡܐ ܪܕܘܝܐ + {0} ܕܪ̈ܟܡܐ ܪ̈ܕܘܝܐ + + + ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + {0} ܪܘܒܥܐ ܓܠܘܢܐ ܐܡܦܪܬܘܪܝܐ + {0} ܪ̈ܘܒܥܐ ܓܠܘܢ̈ܐ ܐܡܦܪܬܘܪܝܐ + + + ܦܢܝܬܐ ܫܪܫܢܝܬܐ + {0} ܡܕܢܚܐ + {0} ܓܪܒܝܐ + {0} ܬܝܡܢܐ + {0} ܡܥܪܒ݂ܐ + + + + + + {0}، ܘ{1} + {0} ܘ{1} + + + {0}، ܝܐ {1} + {0} ܝܐ {1} + + + {0}, {1} + {0}, {1} + {0}، ܝܐ {1} + {0} ܝܐ {1} + + + {0}, {1} + {0}, {1} + {0}، ܝܐ {1} + {0} ܝܐ {1} + + + {0}, {1} + {0}, {1} + {0}، ܘ{1} + {0} ܘ{1} + + + {0}, {1} + {0}, {1} + {0}، ܘ{1} + {0} ܘ{1} + + + {0}, {1} + {0}, {1} + {0}، ܘ{1} + {0} ܘ{1} + + + {0}, {1} + {0}, {1} + {0}، ܘ{1} + {0} ܘ{1} + + + {0}, {1} + {0}, {1} + {0}، ܘ{1} + {0} ܘ{1} + + + + + ܗܐ:ܗ + ܠܐ:ܠ + + + + und syr + {0} {1} + + {title} {given} {given2} {surname} + + + {given-informal} {surname} + + + {title} {given} {given2} {surname} + + + {title} {given} {given2} {surname} + + + {given-monogram-allCaps}{given2-monogram-allCaps}{surname-monogram-allCaps} + + + {given-informal-monogram-allCaps}.{surname-monogram-allCaps} + + + {title} {given} {given2-initial} {surname} + + + {given-informal} {surname} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-monogram-allCaps}.{surname-monogram-allCaps} + + + {title} {given-initial} {surname} + + + {given-informal-initial}. {surname} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-monogram-allCaps}.{surname-monogram-allCaps} + + + {surname}، {given} {given2} + + + {surname} {given-informal} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps}.{given-monogram-allCaps}.{given2-monogram-allCaps} + + + {surname-monogram-allCaps}.{given-informal-monogram-allCaps} + + + {surname}، {given} {given2-initial} + + + {surname} {given-informal} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {surname}، {given-initial} + + + {surname} {given-initial} + + + {title} {surname} + + + {given-informal} + + + {surname-monogram-allCaps} + + + {given-informal-monogram-allCaps} + + + {surname-prefix} {surname-core}، {given} {given2} + + + {surname}، {given-informal} + + + {surname-prefix} {surname-core}، {given} {given2-initial} + + + {surname}، {given-informal} + + + {surname-prefix} {surname-core}، {given} {given2-initial} + + + {surname}، {given-informal} + + + ܣܪܓܘܢ + + + ܐܝܫܬܪ + ܒܝܬ ܣܪܓܝܣ + + + ܝܘܚܢܢ + ܢܝܢܘܣ + ܢܝܢܘܝܐ + + + ܕܘܟܬܘܪ ܡܠܦܢܐ + ܫܡܝܪܡ + ܨܦܪܐ + ܣܒܝܢܐ + ܕ + ܐܠܩܘܫ + ܒܪܣܝܢ ܣܒܪܬܘ + ܐܣܝܐ + + + diff --git a/make/data/cldr/common/main/syr_IQ.xml b/make/data/cldr/common/main/syr_IQ.xml new file mode 100644 index 00000000000..01741894830 --- /dev/null +++ b/make/data/cldr/common/main/syr_IQ.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/syr_SY.xml b/make/data/cldr/common/main/syr_SY.xml new file mode 100644 index 00000000000..7a342c7a419 --- /dev/null +++ b/make/data/cldr/common/main/syr_SY.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/szl.xml b/make/data/cldr/common/main/szl.xml new file mode 100644 index 00000000000..f4e2e7e9fd2 --- /dev/null +++ b/make/data/cldr/common/main/szl.xml @@ -0,0 +1,5412 @@ + + + + + + + + + + + afrikaans + aghym + akan + amharski + arabski + asamski + asu + asturyjski + azerbejdżański + azerski + basaa + biołoruski + bymba + byna + bułgarski + bambara + byngalski + tybetański + bretōński + bodo + bośniacki + katalōński + czakma + czeczyński + cebuano + chiga + czirokeski + sorani + korsykański + czeski + cerkiewnosłowiański + walijski + duński + taita + niymiecki + austriacki niymiecki + szwajcarski wysokoniymiecki + dżerma + dolnołużycki + duala + diola + dzongkha + ymbu + ewe + grecki + angelski + australijski angelski + kanadyjski angelski + brytyjski angelski + angelski (Wlk. Bryt.) + amerykański angelski + angelski (USA) + esperanto + hiszpański + amerykański hiszpański + europejski hiszpański + meksykański hiszpański + estōński + baskijski + ewōndo + perski + fulani + fiński + filipino + farerski + francuski + kanadyjski francuski + szwajcarski francuski + cajuński + friulski + zachodniofryzyjski + irlandzki + szkocki gaelicki + galicyjski + szwajcarski niymiecki + gudżarati + gusii + manx + hausa + hawajski + hebrajski + hindi + hmōng + chorwacki + gōrnołużycki + kreolski haitański + wyngerski + ôrmiański + interlingua + indōnezyjski + igbo + syczuański + islandzki + italijański + japōński + ngōmbe + machame + jawajski + gruziński + kabylski + kamba + makōnde + kreolski Wysp Zielōnego Przilōndka + koyra chiini + kikuju + kazachski + kako + grynlandzki + kalynjin + khmerski + kannada + koreański + kōnkani + kaszmirski + sambala + bafia + gwara kolōńsko + kurdyjski + kornijski + kirgiski + łaciński + langi + luksymburski + ganda + lakota + lingala + laotański + kreolski luizjański + luryjski pōłnocny + litewski + luba-katanga + luo + luhya + łotewski + masajski + meru + kreolski Mauritiusa + malgaski + makua + meta + maoryjski + macedōński + malajalam + mōngolski + marathi + malajski + maltański + mundang + moc jynzykōw + birmański + mazanderański + nama + norweski (bokmål) + ndebele pōłnocny + dolnoniymiecki + dolnozaksōński + nepalski + niderlandzki + flamandzki + ngumba + norweski (nynorsk) + ngymboōn + nuer + njandża + nyankole + ôrōmo + ôrija + ôsetyjski + pyndżabski + polski + pruski + paszto + portugalski + brazylijski portugalski + europejski portugalski + keczua + retorōmański + rundi + rumuński + mołdawski + rōmbo + ruski + kinya-ruanda + rwa + sanskryt + jakucki + samburu + sangu + sindhi + pōłnocnolapōński + syna + koyraboro synni + sango + tashelhiyt + syngaleski + słowacki + słowyński + samoański + inari + shōna + sōmalijski + albański + serbski + sotho połedniowy + sundajski + szwedzki + suahili + kōngijski suahili + ślōnski + tamilski + telugu + ateso + tadżycki + tajski + tigrinia + turkmyński + tōnga + turecki + tatarski + tasawaq + tamazight (Atlas Postrzodkowy) + ujgurski + ukraiński + niyznōmy jynzyk + urdu + uzbecki + wai + wietnamski + wolapik + vunjo + walser + wolof + khosa + soga + yangbyn + jidysz + joruba + kantōński + standardowy marokański tamazight + chiński + chiński mandaryński + chiński uproszczōny + uproszczōny chiński mandaryński + chiński tradycyjny + tradycyjny chiński mandaryński + zulu + brak treści natury jynzykowyj + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Świat + Afryka + Pōłnocno Ameryka + Połedniowo Ameryka + Ôceanijo + Zachodnio Afryka + Postrzodkowo Ameryka + Wschodnio Afryka + Pōłnocno Afryka + Postrzodkowo Afryka + Połedniowo Afryka + Ameryka + Pōłnocno Ameryka (USA, Kanada) + Karajiby + Wschodnio Azyjo + Połedniowo Azyjo + Połedniowo-wschodnio Azyjo + Połedniowo Europa + Australazyjo + Melanezyjo + Regiōn Mikrōnezyje + Polinezyjo + Azyjo + Postrzodkowo Azyjo + Zachodnio Azyjo + Europa + Wschodnio Europa + Pōłnocno Europa + Zachodnio Europa + Subsaharyjsko Afryka + Łacińsko Ameryka + Wyspa Wniebostōmpiynio + Andora + Zjednoczōne Ymiraty Arabske + Afganistan + Antigua i Barbuda + Anguilla + Albanijo + Armynijo + Angola + Antarktyda + Argyntyna + Amerykańske Samoa + Austryjo + Australijo + Aruba + Wyspy Alandzkie + Azerbejdżan + Bośnia i Hercegowina + Barbados + Bangladesz + Belgijo + Burkina Faso + Bułgaryjo + Bahrajn + Burundi + Bynin + Saint-Barthélemy + Bermudy + Brunei + Boliwijo + Karajibske Niderlandy + Brazylijo + Bahamy + Bhutan + Wyspa Bouveta + Botswana + Biołoruś + Belize + Kanada + Kokosowe Wyspy + Dymokratyczno Republika Kōnga + Kōngo (DRK) + Republika Postrzodkowoafrykańsko + Kōngo + Republika Kōnga + Szwajcaryjo + Côte d’Ivoire + Wybrzeże Kości Słōniowyj + Wyspy Cooka + Chile + Kamerun + Chiny + Kolumbijo + Wyspa Clippertona + Kostaryka + Kuba + Republika Zielōnego Przilōndka + Curaçao + Godnio Wyspa + Cypr + Czechy + Czesko Republika + Niymcy + Diego Garcia + Dżibuti + Danijo + Dōminika + Dōminikana + Algeryjo + Ceuta i Melilla + Ekwador + Estōnijo + Egipt + Zachodnio Sahara + Erytrea + Hiszpanijo + Etiopijo + Europejsko Unijo + Strefa euro + Finlandyjo + Fidżi + Falklandy + Falklandy (Malwiny) + Mikrōnezyjo + Wyspy Ôwcze + Francyjo + Gabōn + Wielko Brytanijo + Wlk. Bryt. + Grynada + Gruzyjo + Francusko Gujana + Guernsey + Ghana + Gibraltar + Grynlandyjo + Gambijo + Gwinea + Gwadelupa + Rōwnikowo Gwinea + Grecyjo + Połedniowo Georgia i Połedniowy Sandwich + Gwatymala + Guam + Gwinea Bissau + Gujana + SRA Hōngkōng (Chiny) + Hōngkōng + Wyspy Heard i McDonalda + Hōnduras + Chorwacyjo + Haiti + Wyngry + Kanaryjske Wyspy + Indōnezyjo + Irlandyjo + Izrael + Wyspa Man + Indyjo + Brytyjske Terytorium Indyjskigo Ôceanu + Irak + Iran + Islandyjo + Italijo + Jersey + Jamajka + Jordanijo + Japōnijo + Kynijo + Kirgistan + Kambodża + Kiribati + Kōmory + Saint Kitts i Nevis + Pōłnocno Korea + Połedniowo Korea + Kuwejt + Kajmany + Kazachstan + Laos + Liban + Saint Lucia + Liechtenstein + Sri Lanka + Liberyjo + Lesotho + Litwa + Luksymburg + Łotwa + Libijo + Maroko + Mōnako + Mołdawijo + Czornogōra + Saint-Martin + Madagaskar + Wyspy Marshalla + Pōłnocno Macedōnijo + Mali + Mjanma (Birma) + Mōngolijo + SRA Makau (Chiny) + Makau + Pōłnocne Mariany + Martynika + Mauretanijo + Mōntserrat + Malta + Mauritius + Malediwy + Malawi + Meksyk + Malezyjo + Mozambik + Namibijo + Nowo Kaledōnijo + Niger + Norfolk + Nigeryjo + Nikaragua + Niderlandy + Norwegijo + Nepal + Nauru + Niue + Nowo Zelandyjo + Ōman + Panama + Peru + Francusko Polinezyjo + Papua-Nowo Gwinea + Filipiny + Pakistan + Polska + Saint-Pierre i Miquelon + Pitcairn + Portoryko + Palestyńske Terytoria + Palestyna + Portugalijo + Palau + Paragwaj + Katar + Ôceanijo — wyspy daleke + Reunion + Rumunijo + Serbijo + Rusyjo + Rwanda + Saudyjsko Arabijo + Wyspy Salōmōna + Seszele + Sudan + Szwecyjo + Singapur + Wyspa Świyntyj Helyny + Słowynijo + Svalbard i Jan Mayen + Słowacyjo + Sierra Leone + San Marino + Synegal + Sōmalijo + Surinam + Połedniowy Sudan + Wyspy Świyntego Tōmasza i Princowa + Salwador + Sint Maarten + Syryjo + Eswatini + Suazi + Tristan da Cunha + Turks i Caicos + Czad + Francuske Terytoria Połedniowe i Antarktyczne + Togo + Tajlandyjo + Tadżykistan + Tokelau + Timor-Leste + Wschodni Timor + Turkmynistan + Tunezyjo + Tōnga + Turcyjo + Trynidad i Tobago + Tuvalu + Tajwan + Tanzanijo + Ukrajina + Uganda + Daleke Myńsze Wyspy Stanōw Zjednoczōnych + Ôrganizacyjo Norodōw Zjednoczōnych + Stany Zjednoczōne + USA + Urugwaj + Uzbekistan + Watykan + Saint Vincent i Grynadyny + Wynezuela + Brytyjske Wyspy Dziewicze + Wyspy Dziewicze Stanōw Zjednoczōnych + Wietnam + Vanuatu + Wallis i Futuna + Samoa + Pseudoakcynty + Pseudodwurychtōnkowe + Kosowo + Jymyn + Majotta + Republika Połedniowyj Afryki + Zambijo + Zimbabwe + Niyznōmy regiōn + + + Gregoriański kalyndorz + Kalyndorz ISO-8601 + Sztandardowy porzōndek zortowanio + Cyfry zachodnie + + + brytyjski + amerykański + + + Jynzyk: {0} + Pismo: {0} + Regiōn: {0} + + + + [a ã b c ć d e f g h i j k l ł m n ń o ŏ ô õ ō p r s ś t u w y z ź ż] + [à ă â å ä ą ā æ č ç ď é è ĕ ê ě ë ę ē í ì ĭ î ï ī ľ ň ñ ó ò ö ø œ q ŕ ř š ß ť ú ù ŭ û ů ü ū v x ý ÿ ž] + [A B C Ć D E F G H I J K L Ł M N Ń O Ô Ō P Q R S Ś T U V W X Y Z Ź Ż] + [  \- ‑ , % ‰ + 0 1 2 3 4 5 6 7 8 9] + [\- ‐ ‑ – — , ; \: ! ? . … ' " ” „ « » ( ) \[ \] \{ \} § @ * / \& # % † ‡ ′ ″ ° ~] + + + + + » + « + + + + + + + + EEEE, d MMMM y G + GyMMMMEEEEd + + + + + d MMMM y G + GyMMMMd + + + + + d MMM y G + GyMMMd + + + + + dd.MM.y G + GyMMdd + + + + + + + {1} 'ô' {0} + + + {1} 'ô' {0} + + + + + {1} 'ô' {0} + + + {1} 'ô' {0} + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + E, d + E, h:mm a + E, HH:mm + E, h:mm:ss a + E, HH:mm:ss + y G + MMM y G + d MMM y G + E, d MMM y G + d.MM + E, d.MM + d MMM + E, d MMM + d MMMM + y G + y G + MM.y G + d.MM.y G + E, d.MM.y G + LLL y G + d MMM y G + E, d MMM y G + LLLL y G + QQQ y G + QQQQ y G + + + {0}–{1} + + h B – h B + h–h B + + + h:mm B – h:mm B + h:mm–h:mm B + h:mm–h:mm B + + + d–d + + + y G – y G + y–y G + + + M.y GGGGG – M.y GGGGG + M.y – M.y GGGGG + M.y – M.y GGGGG + + + d.M.y – d.M.y GGGGG + d.M.y GGGGG – d.M.y GGGGG + d.M.y – d.M.y GGGGG + d.M.y – d.M.y GGGGG + + + E, d.M.y – E, d.M.y GGGGG + E, d.M.y GGGGG – E, d.M.y GGGGG + E, d.M.y – E, d.M.y GGGGG + E, d.M.y – E, d.M.y GGGGG + + + MMM y G – MMM y G + MMM – MMM y G + MMM y – MMM y G + + + d–d MMM y G + d MMM y G – d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E, d MMM – E, d MMM y G + E, d MMM y G – E, d MMM y G + E, d MMM – E, d MMM y G + E, d MMM y – E, d MMM y G + + + h a–h a + h–h a + + + HH–HH + + + h:mm a–h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a–h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + MM–MM + + + dd.MM–dd.MM + dd.MM–dd.MM + + + E, dd.MM–E, dd.MM + E, dd.MM–E, dd.MM + + + LLL–LLL + + + d–d MMM + d MMM–d MMM + + + E, d MMM–E, d MMM + E, d MMM–E, d MMM + + + y–y + + + MM.y–MM.y + MM.y–MM.y + + + dd–dd.MM.y + dd.MM–dd.MM.y + dd.MM.y–dd.MM.y + + + E, dd.MM.y–E, dd.MM.y + E, dd.MM.y–E, dd.MM.y + E, dd.MM.y–E, dd.MM.y + + + LLL–LLL y + LLL y–LLL y + + + d–d MMM y + d MMM–d MMM y + d MMM y–d MMM y + + + E, d–E, d MMM y + E, d MMM y–E, d MMM y + E, d MMM y–E, d MMM y + + + LLLL–LLLL y + LLLL y–LLLL y + + + + + + + + + sty + lut + mar + kwi + moj + czy + lip + siy + wrz + paź + lis + gru + + + S + L + M + K + M + C + L + S + W + P + L + G + + + stycznia + lutego + marca + kwietnia + moja + czyrwca + lipca + siyrpnia + września + października + listopada + grudnia + + + + + sty + lut + mar + kwi + moj + czy + lip + siy + wrz + paź + lis + gru + + + S + L + M + K + M + C + L + S + W + P + L + G + + + styczyń + luty + marzec + kwieciyń + moj + czyrwiec + lipiec + siyrpiyń + wrzesiyń + październik + listopad + grudziyń + + + + + + + niy + pyń + wto + str + szt + piō + sob + + + n + p + w + s + s + p + s + + + nd + + wt + st + sz + pt + sb + + + niydziela + pyńdziałek + wtorek + strzoda + sztwortek + piōntek + sobota + + + + + niy + pyń + wto + str + szt + piō + sob + + + n + p + w + s + s + p + s + + + nd + + wt + st + sz + pt + sb + + + niydziela + pyńdziałek + wtorek + strzoda + sztwortek + piōntek + sobota + + + + + + + I szr. + II szr. + III szr. + IV szr. + + + I + II + III + IV + + + I sztwierć roku + II sztwierć roku + III sztwierć roku + IV sztwierć roku + + + + + I szr. + II szr. + III szr. + IV szr. + + + I + II + III + IV + + + I sztwierć roku + II sztwierć roku + III sztwierć roku + IV sztwierć roku + + + + + + + do połedniŏ + po połedniu + + + do połedniŏ + po połedniu + + + do połedniŏ + po połedniu + + + + + do połedniŏ + po połedniu + + + do połedniŏ + po połedniu + + + do połedniŏ + po połedniu + + + + + + przed naszōm erōm + p.n.e. + naszyj ery + n.e. + + + p.n.e. + n.e. + + + + + + EEEE, d MMMM y + yMMMMEEEEd + + + + + d MMMM y + yMMMMd + + + + + d MMM y + yMMMd + + + + + dd.MM.y + yMMdd + + + + + + + HH:mm:ss zzzz + + + + + HH:mm:ss z + + + + + HH:mm:ss + + + + + HH:mm + + + + + + + {1} 'ô' {0} + + + {1} 'ô' {0} + + + + + {1} 'ô' {0} + + + {1} 'ô' {0} + + + + + {1}, {0} + + + {1}, {0} + + + + + {1}, {0} + + + {1}, {0} + + + + E, d + E, HH:mm + E, HH:mm:ss + y G + MMM y G + d MMM y G + E, d MMM y G + d.MM + E, d.MM + d MMM + E, d MMM + d MMMM + MMMM, 'tydz'. W + MM.y + dd.MM.y + E, dd.MM.y + MMM y + d MMM y + E, d MMM y + MMMM y + QQQ y + QQQQ y + Y, 'tydz'. w + + + {0} {1} + + + {0}–{1} + + h B – h B + h–h B + + + h:mm B – h:mm B + h:mm–h:mm B + h:mm–h:mm B + + + d–d + + + y G – y G + y–y G + + + M.y GGGGG – M.y GGGGG + M.y – M.y GGGGG + M.y – M.y GGGGG + + + d.M.y – d.M.y GGGGG + d.M.y GGGGG – d.M.y GGGGG + d.M.y – d.M.y GGGGG + d.M.y – d.M.y GGGGG + + + E, d.M.y – E, d.M.y GGGGG + E, d.M.y GGGGG – E, d.M.y GGGGG + E, d.M.y – E, d.M.y GGGGG + E, d.M.y – E, d.M.y GGGGG + + + MMM y G – MMM y G + MMM – MMM y G + MMM y – MMM y G + + + d–d MMM y G + d MMM y G – d MMM y G + d MMM – d MMM y G + d MMM y – d MMM y G + + + E, d MMM – E, d MMM y G + E, d MMM y G – E, d MMM y G + E, d MMM – E, d MMM y G + E, d MMM y – E, d MMM y G + + + h a–h a + h–h a + + + HH–HH + + + h:mm a–h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a–h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + MM–MM + + + dd.MM–dd.MM + dd.MM–dd.MM + + + E, dd.MM–E, dd.MM + E, dd.MM–E, dd.MM + + + LLL–LLL + + + d–d MMM + d MMM–d MMM + + + E, d MMM–E, d MMM + E, d MMM–E, d MMM + + + y–y + + + MM.y–MM.y + MM.y–MM.y + + + dd–dd.MM.y + dd.MM–dd.MM.y + dd.MM.y–dd.MM.y + + + E, dd.MM.y–E, dd.MM.y + E, dd.MM.y–E, dd.MM.y + E, dd.MM.y–E, dd.MM.y + + + LLL–LLL y + LLL y–LLL y + + + d–d MMM y + d MMM–d MMM y + d MMM y–d MMM y + + + E, d–E, d MMM y + E, d MMM y–E, d MMM y + E, d MMM y–E, d MMM y + + + LLLL–LLLL y + LLLL y–LLLL y + + + + + + + + era + + + era + + + era + + + rok + łōńskigo roku + latoś + na bezrok + + za {0} roku + + + {0} roku tymu + + + + r. + łōńskigo roku + latoś + na bezrok + + za {0} roku + + + {0} roku tymu + + + + r. + łōńskigo roku + latoś + na bezrok + + za {0} roku + + + {0} roku tymu + + + + sztwierć roku + w zeszłyj sztwierci roku + w tyj sztwierci roku + w prziszłyj sztwierci roku + + za {0} sztwierci roku + + + {0} sztwierci roku tymu + + + + szr. + w zeszłyj sztwierci roku + w tyj sztwierci roku + w prziszłyj sztwierci roku + + za {0} szr. + + + {0} szr. tymu + + + + szr. + w zeszłyj sztwierci roku + w tyj sztwierci roku + w prziszłyj sztwierci roku + + za {0} szr. + + + {0} szr. tymu + + + + miesiōnc + w zeszłym miesiōncu + w tym miesiōncu + na drugi miesiōnc + + za {0} miesiōnca + + + {0} miesiōnca tymu + + + + mies. + w zeszłym miesiōncu + w tym miesiōncu + na drugi miesiōnc + + za {0} mies. + + + {0} mies. tymu + + + + mc + w zeszłym miesiōncu + w tym miesiōncu + na drugi miesiōnc + + za {0} mies. + + + {0} mies. tymu + + + + tydziyń + w zeszłym tydniu + w tym tydniu + na drugi tydziyń + + za {0} tydnia + + + {0} tydnia tymu + + tydziyń {0} + + + tydz. + w zeszłym tydniu + w tym tydniu + na drugi tydziyń + + za {0} tyd. + + + {0} tyd. tymu + + tydziyń {0} + + + tydz. + w zeszłym tydniu + w tym tydniu + na drugi tydziyń + + za {0} tyd. + + + {0} tyd. tymu + + tydziyń {0} + + + tydziyń miesiōnca + + + tydz. mies. + + + tydz. mies. + + + dziyń + wczorej + dzisiej + jutro + + za {0} dnia + + + {0} dnia tymu + + + + dziyń + wczorej + dzisiej + jutro + + za {0} dnia + + + {0} dnia tymu + + + + dziyń + wczorej + dzisiej + jutro + + za {0} dnia + + + {0} dnia tymu + + + + dziyń roku + + + dz. roku + + + dz. r. + + + dziyń tydnia + + + dziyń tyd. + + + dziyń tyd. + + + dziyń miesiōnca + + + dziyń mies. + + + dziyń mies. + + + w zeszło niydziela + w ta niydziela + w prziszło niydziela + + za {0} niydziele + + + {0} niydziele tymu + + + + w zeszło niydziela + w ta niydziela + w prziszło niydziela + + za {0} niydziele + + + {0} niydziele tymu + + + + w zeszło niydziela + w ta niydziela + w prziszło niydziela + + za {0} niydziele + + + {0} niydziele tymu + + + + w zeszły pyńdziałek + w tyn pyńdziałek + w prziszły pyńdziałek + + za {0} pyńdziałku + + + {0} pyńdziałku tymu + + + + w zeszły pyńdziałek + w tyn pyńdziałek + w prziszły pyńdziałek + + za {0} pyńdziałku + + + {0} pyńdziałku tymu + + + + w zeszły pyńdziałek + w tyn pyńdziałek + w prziszły pyńdziałek + + za {0} pyńdziałku + + + {0} pyńdziałku tymu + + + + w zeszły wtorek + w tyn wtorek + w prziszły wtorek + + za {0} wtorku + + + {0} wtorku tymu + + + + w zeszły wtorek + w tyn wtorek + w prziszły wtorek + + za {0} wtorku + + + {0} wtorku tymu + + + + w zeszły wtorek + w tyn wtorek + w prziszły wtorek + + za {0} wtorku + + + {0} wtorku tymu + + + + w zeszło strzoda + w ta strzoda + w prziszło strzoda + + za {0} strzody + + + {0} strzody tymu + + + + w zeszło strzoda + w ta strzoda + w prziszło strzoda + + za {0} strzody + + + {0} strzody tymu + + + + w zeszło strzoda + w ta strzoda + w prziszło strzoda + + za {0} strzody + + + {0} strzody tymu + + + + w zeszły sztwortek + w tyn sztwortek + w prziszły sztwortek + + za {0} sztwortku + + + {0} sztwortku tymu + + + + w zeszły sztwortek + w tyn sztwortek + w prziszły sztwortek + + za {0} sztwortku + + + {0} sztwortku tymu + + + + w zeszły sztwortek + w tyn sztwortek + w prziszły sztwortek + + za {0} sztwortku + + + {0} sztwortku tymu + + + + w zeszły piōntek + w tyn piōntek + w prziszły piōntek + + za {0} piōntku + + + {0} piōntku tymu + + + + w zeszły piōntek + w tyn piōntek + w prziszły piōntek + + za {0} piōntku + + + {0} piōntku tymu + + + + w zeszły piōntek + w tyn piōntek + w prziszły piōntek + + za {0} piōntku + + + {0} piōntku tymu + + + + w zeszło sobota + w ta sobota + w prziszło sobota + + za {0} soboty + + + {0} soboty tymu + + + + w zeszło sobota + w ta sobota + w prziszło sobota + + za {0} soboty + + + {0} soboty tymu + + + + w zeszło sobota + w ta sobota + w prziszło sobota + + za {0} soboty + + + {0} soboty tymu + + + + rano / po połedniu / na wieczōr + + + rano / po połedniu / na wieczōr + + + rano / po poł. / na wiecz. + + + godzina + ta godzina + + za {0} godziny + + + {0} godziny tymu + + + + godz. + + za {0} godz. + + + {0} godz. tymu + + + + g. + + za {0} g. + + + {0} g. tymu + + + + minuta + ta minuta + + za {0} minuty + + + {0} minuty tymu + + + + min + + za {0} min + + + {0} min tymu + + + + min + + za {0} min + + + {0} min tymu + + + + sekunda + teraz + + za {0} sekundy + + + {0} sekundy tymu + + + + sek. + + za {0} sek. + + + {0} sek. tymu + + + + s + + za {0} s + + + {0} s tymu + + + + czasowo strefa + + + str. czasowo + + + str. czas. + + + + +HH:mm;-HH:mm + GMT{0} + GMT + czas: {0} + {0} (latowy czas) + {0} (sztandardowy czas) + {1} ({0}) + + + uniwersalny koordynowany czas + + + + Niyznōme miasto + + + Andora + + + Dubaj + + + Kabul + + + Antigua + + + Anguilla + + + Tirana + + + Erywań + + + Luanda + + + Rothera + + + Palmer + + + Troll + + + Syowa + + + Mawson + + + Davis + + + Wostok + + + Casey + + + Dumont d’Urville + + + McMurdo + + + Rio Gallegos + + + Myndoza + + + San Juan + + + Ushuaia + + + La Rioja + + + San Luis + + + Catamarca + + + Salta + + + Jujuy + + + Tucuman + + + Cordoba + + + Buenos Aires + + + Pago Pago + + + Wiedyń + + + Perth + + + Eucla + + + Darwin + + + Adelaide + + + Broken Hill + + + Melbourne + + + Currie + + + Hobart + + + Lindeman + + + Sydney + + + Brisbane + + + Macquarie + + + Lord Howe + + + Aruba + + + Maarianhamina + + + Baku + + + Sarajewo + + + Barbados + + + Dhaka + + + Bruksela + + + Wagadugu + + + Sofia + + + Bahrajn + + + Bużumbura + + + Porto Novo + + + Saint-Barthélymy + + + Bermudy + + + Brunei + + + La Paz + + + Kralendijk + + + Eirunepe + + + Rio Branco + + + Porto Velho + + + Boa Vista + + + Manaus + + + Cuiabá + + + Santarem + + + Campo Grande + + + Belém + + + Araguaina + + + Sao Paulo + + + Salvador + + + Fortaleza + + + Maceiō + + + Recife + + + Noronha + + + Nassau + + + Thimphu + + + Gaborone + + + Mińsk + + + Belize + + + Dawson + + + Whitehorse + + + Inuvik + + + Vancouver + + + Fort Nelson + + + Dawson Creek + + + Creston + + + Yellowknife + + + Edmonton + + + Swift Current + + + Cambridge Bay + + + Regina + + + Winnipeg + + + Resolute + + + Rainy River + + + Rankin Inlet + + + Atikokan + + + Thunder Bay + + + Nipigon + + + Toronto + + + Iqaluit + + + Pangnirtung + + + Moncton + + + Halifax + + + Goose Bay + + + Glace Bay + + + Blanc-Sablon + + + St. John’s + + + Wyspy Kokosowe + + + Kinszasa + + + Lubumbashi + + + Bangi + + + Brazzaville + + + Zurych + + + Abidżan + + + Rarotonga + + + Wyspa Wielkanocno + + + Punta Arenas + + + Santiago + + + Duala + + + Urumczi + + + Szanghaj + + + Bogota + + + Kostaryka + + + Hawana + + + Republika Zielōnego Przilōndka + + + Curaçao + + + Godnio Wyspa + + + Nikozja + + + Famagusta + + + Praga + + + Büsingen am Hochrhein + + + Berlin + + + Dżibuti + + + Kopynhaga + + + Dōminika + + + Santo Domingo + + + Alger + + + Galapagos + + + Guayaquil + + + Tallin + + + Kair + + + Al-Ujun + + + Asmara + + + Wyspy Kanaryjske + + + Ceuta + + + Madryt + + + Addis Abeba + + + Helsinki + + + Fidżi + + + Stanley + + + Chuuk + + + Pohnpei + + + Kosrae + + + Wyspy Ôwcze + + + Paryż + + + Libreville + + + + Brytyjski latowy czas + + Lōndyn + + + Grynada + + + Tbilisi + + + Kajynna + + + Guernsey + + + Akra + + + Gibraltar + + + Qaanaaq + + + Nuuk + + + Ittoqqortoormiit + + + Danmarkshavn + + + Bandżul + + + Kōnakry + + + Gwadelupa + + + Malabo + + + Atyny + + + Georgia Połedniowo + + + Gwatymala + + + Guam + + + Bissau + + + Gujana + + + Hōngkōng + + + Tegucigalpa + + + Zagrzeb + + + Port-au-Prince + + + Budapeszt + + + Dżakarta + + + Pontianak + + + Makassar + + + Jayapura + + + + Irlandyjo (latowy czas) + + Dublin + + + Jerozolima + + + Wyspa Man + + + Kalkuta + + + Czagos + + + Bagdad + + + Teheran + + + Reykjavik + + + Rzym + + + Jersey + + + Jamajka + + + Amman + + + Tokio + + + Nairobi + + + Biszkek + + + Phnom Penh + + + Enderbury + + + Kiritimati + + + Tarawa + + + Kōmory + + + Saint Kitts + + + Pjōngjang + + + Seul + + + Kuwejt + + + Kajmany + + + Aktau + + + Ôral + + + Atyrau + + + Aktiubińsk + + + Kustanaj + + + Kyzyłorda + + + Ałmaty + + + Wiyntian + + + Bejrut + + + Saint Lucia + + + Vaduz + + + Kolōmbo + + + Monrovia + + + Maseru + + + Wilno + + + Luksymburg + + + Ryga + + + Trypolis + + + Casablanca + + + Mōnako + + + Kiszyniōw + + + Podgorica + + + Marigot + + + Antananarywa + + + Kwajalein + + + Majuro + + + Skopje + + + Bamako + + + Rangun + + + Kobdo + + + Ułan Bator + + + Czojbalsan + + + Makau + + + Saipan + + + Martynika + + + Nawakszut + + + Mōntserrat + + + Malta + + + Malediwy + + + Blantyre + + + Tijuana + + + Hermosillo + + + Mazatlan + + + Chihuahua + + + Bahia Banderas + + + Ojinaga + + + Monterrey + + + Meksyk (miasto) + + + Matamoros + + + Merida + + + Cancún + + + Kuala Lumpur + + + Kuching + + + Maputo + + + Windhuk + + + Numea + + + Niamey + + + Norfolk + + + Lagos + + + Managua + + + Amsterdam + + + Ôslo + + + Katmandu + + + Nauru + + + Niue + + + Chatham + + + Auckland + + + Maskat + + + Panama + + + Lima + + + Tahiti + + + Markizy + + + Wyspy Gambiera + + + Port Moresby + + + Bougainville’owa Wyspa + + + Manila + + + Karaczi + + + Warszawa + + + Miquelon + + + Pitcairn + + + Portoryko + + + Gaza + + + Hebrōn + + + Azory + + + Madera + + + Lizbōna + + + Palau + + + Asuńcion + + + Katar + + + Réunion + + + Bukareszt + + + Belgrad + + + Kaliningrad + + + Moskwa + + + Wołgograd + + + Saratōw + + + Astrachań + + + Uljanowsk + + + Kirow + + + Samara + + + Jekaterynburg + + + Ômsk + + + Nowosybirsk + + + Barnauł + + + Tōmsk + + + Nowokuźnieck + + + Krasnojarsk + + + Irkuck + + + Czyta + + + Jakuck + + + Władywostok + + + Chandyga + + + Sachalin + + + Ust-Niera + + + Magadan + + + Sriedniekołymsk + + + Kamczatka + + + Anadyr + + + Kigali + + + Rijad + + + Guadalcanal + + + Mahé + + + Chartum + + + Sztokholm + + + Singapur + + + Świynto Helyna + + + Lublana + + + Longyearbyyn + + + Bratysława + + + Freetown + + + San Marino + + + Dakar + + + Mogadiszu + + + Paramaribo + + + Dżuba + + + São Tomé + + + Salwador + + + Lower Prince’s Quarter + + + Damaszek + + + Mbabane + + + Grand Turk + + + Ndżamena + + + Kerguelenowe Wyspy + + + Lomé + + + Bangkok + + + Duszanbe + + + Fakaofo + + + Dili + + + Aszchabad + + + Tunis + + + Tongatapu + + + Stambuł + + + Port-of-Spain + + + Funafuti + + + Tajpej + + + Dar es Salaam + + + Użgorod + + + Kijōw + + + Symferopol + + + Zaporoże + + + Kampala + + + Midway + + + Wake + + + Adak + + + Nome + + + Johnston + + + Anchorage + + + Yakutat + + + Sitka + + + Juneau + + + Metlakatla + + + Los Angeles + + + Boise + + + Phoenix + + + Denver + + + Beulah, Pōłnocno Dakota + + + New Salem, Pōłnocno Dakota + + + Center, Pōłnocno Dakota + + + Chicago + + + Mynominee + + + Vincennes, Indiana + + + Petersburg, Indiana + + + Tell City, Indiana + + + Knox, Indiana + + + Winamac, Indiana + + + Marengo, Indiana + + + Indianapolis + + + Louisville + + + Vevay, Indiana + + + Monticello + + + Detroit + + + Nowy Jork + + + Montevideo + + + Samarkanda + + + Taszkynt + + + Watykan + + + Saint Vincent + + + Caracas + + + Tortola + + + Saint Thomas + + + Ho Chi Minh + + + Efate + + + Wallis + + + Apia + + + Adyn + + + Majotta + + + Johannesburg + + + Lusaka + + + Harare + + + + Afganistan + + + + + postrzodkowoafrykański czas + + + + + wschodnioafrykański czas + + + + + połedniowoafrykański czas + + + + + zachodnioafrykański czas + zachodnioafrykański sztandardowy czas + zachodnioafrykański latowy czas + + + + + czas alaskański + alaskański czas sztandardowy + alaskański czas latowy + + + + + czas ałmacki + czas ałmacki sztandardowy + czas ałmacki latowy + + + + + amazōński czas + amazōński sztandardowy czas + amazōński latowy czas + + + + + czas postrzodkowoamerykański + czas postrzodkowoamerykański sztandardowy + czas postrzodkowoamerykański latowy + + + + + czas wschodnioamerykański + czas wschodnioamerykański sztandardowy + czas wschodnioamerykański latowy + + + + + czas gōrski + czas gōrski sztandardowy + czas gōrski latowy + + + + + czas pacyficzny + czas pacyficzny sztandardowy + czas pacyficzny latowy + + + + + czas Anadyr + sztandardowy czas Anadyr + latowy czas Anadyr + + + + + czas auktaucki + czas auktaucki sztandardowy + czas auktaucki latowy + + + + + czas aktiubiński + czas aktiubiński sztandardowy + czas aktiubiński latowy + + + + + Pōłwysep Arabski + Pōłwysep Arabski (sztandardowy czas) + Pōłwysep Arabski (latowy czas) + + + + + Argyntyna + Argyntyna (sztandardowy czas) + Argyntyna (latowy czas) + + + + + Argyntyna Zachodnio + Argyntyna Zachodnio (sztandardowy czas) + Argyntyna Zachodnio (latowy czas) + + + + + Armynijo + Armynijo (sztandardowy czas) + Armynijo (latowy czas) + + + + + czas atlantycki + czas atlantycki sztandardowy + czas atlantycki latowy + + + + + postrzodkowoaustralijski czas + postrzodkowoaustralijski sztandardowy czas + postrzodkowoaustralijski latowy czas + + + + + postrzodkowo-zachodnioaustralijski czas + postrzodkowo-zachodnioaustralijski sztandardowy czas + postrzodkowo-zachodnioaustralijski latowy czas + + + + + wschodnioaustralijski czas + wschodnioaustralijski sztandardowy czas + wschodnioaustralijski latowy czas + + + + + zachodnioaustralijski czas + zachodnioaustralijski sztandardowy czas + zachodnioaustralijski latowy czas + + + + + Azerbejdżan + Azerbejdżan (sztandardowy czas) + Azerbejdżan (latowy czas) + + + + + Azory + Azory (sztandardowy czas) + Azory (latowy czas) + + + + + Bangladesz + Bangladesz (sztandardowy czas) + Bangladesz (latowy czas) + + + + + Boliwijo + + + + + Brasília + Brasília (sztandardowy czas) + Brasília (latowy czas) + + + + + Wyspy Zielōnego Przilōndka + Wyspy Zielōnego Przilōndka (sztandardowy czas) + Wyspy Zielōnego Przilōndka (latowy czas) + + + + + Czamorro + + + + + czas Chile + Chile (sztandardowy czas) + Chile (latowy czas) + + + + + Chiny + Chiny (sztandardowy czas) + Chiny (latowy czas) + + + + + Czojbalsan + Czojbalsan (sztandardowy czas) + Czojbalsan (latowy czas) + + + + + Godnio Wyspa + + + + + Wyspy Kokosowe + + + + + Kolumbijo + Kolumbijo (sztandardowy czas) + Kolumbijo (latowy czas) + + + + + Wyspy Cooka + Wyspy Cooka (sztandardowy czas) + Wyspy Cooka (latowy czas) + + + + + Kuba + Kuba (sztandardowy czas) + Kuba (latowy czas) + + + + + Dumont-d’Urville + + + + + Timor Wschodni + + + + + Wyspa Wielkanocno + Wyspa Wielkanocno (sztandardowy czas) + Wyspa Wielkanocno (latowy czas) + + + + + Ekwador + + + + + postrzodkowoeuropejski czas + postrzodkowoeuropejski sztandardowy czas + postrzodkowoeuropejski latowy czas + + + CET + CET + CEST + + + + + wschodnioeuropejski czas + wschodnioeuropejski sztandardowy czas + wschodnioeuropejski latowy czas + + + EET + EET + EEST + + + + + wschodnioeuropejski dalszy czas + + + + + zachodnioeuropejski czas + zachodnioeuropejski sztandardowy czas + zachodnioeuropejski latowy czas + + + WET + WET + WEST + + + + + Falklandy + Falklandy (sztandardowy czas) + Falklandy (latowy czas) + + + + + Fidżi + Fidżi (sztandardowy czas) + Fidżi (latowy czas) + + + + + Gujana Francusko + + + + + Francuske Terytoria Połedniowe i Antarktyczne + + + + + czas Galapagos + + + + + Wyspy Gambiera + + + + + Gruzyjo + Gruzyjo (sztandardowy czas) + Gruzyjo (latowy czas) + + + + + Gilbertowe Wyspy + + + + + uniwersalny czas + + + + + Grynlandyjo Wschodnia + Grynlandyjo Wschodnia (sztandardowy czas) + Grynlandyjo Wschodnia (latowy czas) + + + + + Grynlandyjo Zachodnio + Grynlandyjo Zachodnio (sztandardowy czas) + Grynlandyjo Zachodnio (latowy czas) + + + + + Zatoka Perska + + + + + Gujana + + + + + Hawaje-Aleuty + Hawaje-Aleuty (sztandardowy czas) + Hawaje-Aleuty (latowy czas) + + + + + Hōngkōng + Hōngkōng (sztandardowy czas) + Hōngkōng (latowy czas) + + + + + Kobdo + Kobdo (sztandardowy czas) + Kobdo (latowy czas) + + + + + indyjski sztandardowy czas + + + + + Ôcean Indyjski + + + + + indochiński czas + + + + + Indōnezyjo Postrzodkowo + + + + + Indōnezyjo Wschodnio + + + + + Indōnezyjo Zachodnio + + + + + Irkuck + Irkuck (sztandardowy czas) + Irkuck (latowy czas) + + + + + Izrael + Izrael (sztandardowy czas) + Izrael (latowy czas) + + + + + Japōnijo + Japōnijo (sztandardowy czas) + Japōnijo (latowy czas) + + + + + czas Pietropawłowsk Kamczacki + sztandardowy czas Pietropawłowsk Kamczacki + czas Pietropawłowsk Kamczacki latowy + + + + + Kazachstan Wschodni + + + + + Kazachstan Zachodni + + + + + Krasnojarsk + Krasnojarsk (sztandardowy czas) + Krasnojarsk (latowy czas) + + + + + Kirgistan + + + + + Line Islands + + + + + Lord Howe + Lord Howe (sztandardowy czas) + Lord Howe (latowy czas) + + + + + Malezyjo + + + + + Malediwy + + + + + Markizy + + + + + Wyspy Marshalla + + + + + Meksyk Pōłnocno-Zachodni + Meksyk Pōłnocno-Zachodni (sztandardowy czas) + Meksyk Pōłnocno-Zachodni (latowy czas) + + + + + Meksyk (czas pacyficzny) + Meksyk (czas pacyficzny sztandardowy) + Meksyk (czas pacyficzny latowy) + + + + + Ułan Bator + Ułan Bator (sztandardowy czas) + Ułan Bator (latowy czas) + + + + + Moskwa + Moskwa (sztandardowy) + Moskwa (latowy) + + + + + Mjanma + + + + + Nowo Kaledōnijo + Nowo Kaledōnijo (sztandardowy czas) + Nowo Kaledōnijo (latowy czas) + + + + + Nowo Zelandyjo + Nowo Zelandyjo (sztandardowy czas) + Nowo Zelandyjo (latowy czas) + + + + + Nowo Fundlandyjo + Nowo Fundlandyjo (sztandardowy czas) + Nowo Fundlandyjo (latowy czas) + + + + + Fernando de Noronha + Fernando de Noronha (sztandardowy czas) + Fernando de Noronha (latowy czas) + + + + + Nowosybirsk + Nowosybirsk (sztandardowy czas) + Nowosybirsk (latowy czas) + + + + + Ômsk + Ômsk (sztandardowy czas) + Ômsk (latowy czas) + + + + + Papua-Nowo Gwinea + + + + + Paragwaj + Paragwaj (sztandardowy czas) + Paragwaj (latowy czas) + + + + + czas Peru + Peru (sztandardowy czas) + Peru (latowy czas) + + + + + Filipiny + Filipiny (sztandardowy czas) + Filipiny (latowy czas) + + + + + Fyniks + + + + + Saint-Pierre i Miquelon + Saint-Pierre i Miquelon (sztandardowy czas) + Saint-Pierre i Miquelon (latowy czas) + + + + + Pohnpei + + + + + Pjōngjang + + + + + czas kyzyłordzki + czas kyzyłordzki sztandardowy + czas kyzyłordzki latowy + + + + + Sachalin + Sachalin (sztandardowy czas) + Sachalin (latowy czas) + + + + + czas Samara + sztandardowy czas Samara + czas Samara latowy + + + + + Seszele + + + + + Singapur + + + + + Wyspy Salōmōna + + + + + Georgia Połedniowo + + + + + Surinam + + + + + Tajpej + Tajpej (sztandardowy czas) + Tajpej (latowy czas) + + + + + Tadżykistan + + + + + Tōnga + Tōnga (sztandardowy czas) + Tōnga (latowy czas) + + + + + Chuuk + + + + + Turkmynistan + Turkmynistan (sztandardowy czas) + Turkmynistan (latowy czas) + + + + + Urugwaj + Urugwaj (sztandardowy czas) + Urugwaj (latowy czas) + + + + + Wynezuela + + + + + Władywostok + Władywostok (sztandardowy czas) + Władywostok (latowy czas) + + + + + Wołgograd + Wołgograd (sztandardowy czas) + Wołgograd (latowy czas) + + + + + Wostok + + + + + Wallis i Futuna + + + + + Jakuck + Jakuck (sztandardowy czas) + Jakuck (latowy czas) + + + + + Jekaterynburg + Jekaterynburg (sztandardowy czas) + Jekaterynburg (latowy czas) + + + + + czas jukōński + + + + + + + , +   + + + + + ¤ #,##0.00 + #,##0.00 + + + + + + + + + + {0} dni + Skryńć we {0}. prawo. + + + + + + {0} na {1} + + + {0}⋅{1} + + + stało grawitacyje + {0} stałyj grawitacyje + + + metry na sekunda do kwadratu + {0} metra na sekunda do kwadratu + + + ôbrōt + {0} ôbrotu + + + radiany + {0} radiana + + + stopnie + {0} stopnia + + + minuty kōntowe + {0} minuty kōntowyj + + + sekundy kōntowe + {0} sekundy kōntowyj + + + kilōmetry kwadratowe + {0} kilōmetra kwadratowego + {0} na kilōmeter kwadratowy + + + hektary + {0} hektara + + + metry kwadratowe + {0} metra kwadratowego + {0} na meter kwadratowy + + + cyntymetry kwadratowe + {0} cyntymetra kwadratowego + {0} na cyntymeter kwadratowy + + + mile kwadratowe + {0} mili kwadratowyj + {0} na mila kwadratowōm + + + akry + {0} akra + + + jardy kwadratowe + {0} jarda kwadratowego + + + stopy kwadratowe + {0} stopy kwadratowyj + + + cale kwadratowe + {0} cala kwadratowego + {0} na cal kwadratowy + + + dunamy + {0} dunama + + + karaty + {0} karata + + + miligramy na decyliter + {0} miligrama na decyliter + + + milimole na liter + {0} milimola na liter + + + czyńści na milijōn + {0} czyńści na milijōn + + + procynt + {0} procynt + + + prōmil + {0} prōmila + + + punkt bazowy + {0} punktu bazowego + + + mol + {0} mola + + + litry na kilōmeter + {0} litra na kilōmeter + + + litry na 100 kilōmetrōw + {0} litra na 100 kilōmetrōw + + + mile na galōn + {0} mile na galōn + + + mile na galōn angelski + {0} mile na galōn angelski + + + petabajty + {0} petabajta + + + terabajty + {0} terabajta + + + terabity + {0} terabitu + + + gigabajty + {0} gigabajta + + + gigabity + {0} gigabitu + + + megabajty + {0} megabajta + + + megabity + {0} megabitu + + + kilobajty + {0} kilobajta + + + kilobity + {0} kilobitu + + + bajty + {0} bajta + + + bity + {0} bitu + + + stolecie + {0} stolecio + + + dekady + {0} dekady + + + lata + {0} roku + {0} na rok + + + miesiōnce + {0} miesiōnca + {0} na miesiōnc + + + tydnie + {0} tydnia + {0} na tydziyń + + + dni + {0} dnia + {0} na dziyń + + + godziny + {0} godziny + {0} na godzina + + + minuty + {0} minuty + {0} na minuta + + + sekundy + {0} sekundy + {0} na sekunda + + + milisekundy + {0} milisekundy + + + mikrosekundy + {0} mikrosekundy + + + nanosekundy + {0} nanosekundy + + + ampry + {0} ampra + + + miliampry + {0} miliampra + + + ōmy + {0} ōma + + + wolty + {0} wolta + + + kilokaloryje + {0} kilokaloryje + + + kaloryje + {0} kaloryje + + + kaloryje + {0} kaloryje + + + kilodżule + {0} kilodżula + + + dżule + {0} dżula + + + kilowatogodziny + {0} kilowatogodziny + + + elektrōnowolty + {0} elektrōnowolta + + + brytyjsko jednostka ciepła + {0} brytyjskij jednostki ciepła + + + term amerykański + {0} terma amerykańskigo + + + fōnt-siła + {0} fōnta-siły + + + niutōny + {0} niutōna + + + gigaherce + {0} gigaherca + + + megaherce + {0} megaherca + + + kiloherce + {0} kiloherca + + + herce + {0} herca + + + typograficzne ym + {0} yma + + + piksele + {0} piksela + + + megapiksele + {0} megapiksela + + + piksele na cyntymeter + {0} piksela na cyntymeter + + + piksele na col + {0} piksela na col + + + pōnkty na cyntymeter + {0} pōnkta na cyntymeter + + + pōnkty na col + {0} pōnkta na col + + + kilōmetry + {0} kilōmetra + {0} na kilōmeter + + + metry + {0} metra + {0} na meter + + + decymetry + {0} decymetra + + + cyntymetry + {0} cyntymetra + {0} na cyntymeter + + + milimetry + {0} milimetra + + + mikrōmetry + {0} mikrōmetra + + + nanōmetry + {0} nanōmetra + + + pikōmetry + {0} pikōmetra + + + mile + {0} mile + + + jardy + {0} jarda + + + stopy + {0} stopy + {0} na stopa + + + cole + {0} cola + {0} na col + + + parseki + {0} parseka + + + lata świytlne + {0} roku świytlnego + + + jednostki astrōnōmiczne + {0} jednostki astrōnōmicznyj + + + mile morske + {0} mile morskij + + + mila skandynawsko + {0} mile skandynawskij + + + punkty + {0} pkt. + + + prōmiynie Słōńca + {0} prōmiynia Słōńca + + + luksy + {0} luksu + + + jasność Słōńca + {0} jasności Słōńca + + + tōny + {0} tōny + + + kilogramy + {0} kilograma + {0} na kilogram + + + gramy + {0} grama + {0} na gram + + + miligramy + {0} miligrama + + + mikrogramy + {0} mikrograma + + + krōtke tōny + {0} krōtkij tōny + + + fōnty + {0} fōnta + {0} na fōnt + + + uncyje + {0} uncyje + {0} na uncyjo + + + uncyjo trojańsko + {0} uncyje trojańskij + + + karaty + {0} karata + + + daltōny + {0} daltōna + + + masa Ziymie + {0} masy Ziymie + + + masa Słōńca + {0} masy Słōńca + + + gigawaty + {0} gigawata + + + megawaty + {0} megawata + + + kilowaty + {0} kilowata + + + waty + {0} wata + + + miliwaty + {0} miliwata + + + kōnie mechaniczne + {0} kōnia mechanicznego + + + milimetry supa rtyńci + {0} milimetra supa rtyńci + + + fōnty na col kwadratowy + {0} fōnta na col kwadratowy + + + cole supa rtyńci + {0} cola supa rtyńci + + + bary + {0} bara + + + milibary + {0} millibara + + + atmosfery + {0} atmosfery + + + paskale + {0} paskala + + + hektopaskale + {0} hektopaskala + + + kilopaskale + {0} kilopaskala + + + megapaskale + {0} megapaskala + + + kilōmetry na godzina + {0} kilōmetra na godzina + + + metry na sekunda + {0} metra na sekunda + + + mile na godzina + {0} mile na godzina + + + wynzeł + {0} wynzła + + + stopnie + {0} stopnia + + + stopnie Celsjusza + {0} stopnia Celsjusza + + + stopnie Fahrenheita + {0} stopnia Fahrenheita + + + kelwiny + {0} kelwina + + + stopofunty + {0} stopofunt + + + niutōnōmetry + {0} niutōnōmetra + + + kilōmetry sześciynne + {0} kilōmetra sześciynnego + + + metry sześciynne + {0} metra sześciynnego + {0} na meter sześciynny + + + cyntymetry sześciynne + {0} cyntymetra sześciynnego + {0} na cyntymeter sześciynny + + + mile sześciynne + {0} mile sześciynnyj + + + jardy sześciynne + {0} jarda sześciynnego + + + stopy sześciynne + {0} stopy sześciynnyj + + + cale sześciynne + {0} cala sześciynnego + + + megalitry + {0} megalitra + + + hektolitry + {0} hektolitra + + + litry + {0} litra + {0} na liter + + + decylitry + {0} decylitra + + + cyntylitry + {0} cyntylitra + + + mililitry + {0} mililitra + + + pōłkworty metryczne + {0} pōłkworty metrycznyj + + + ćwierćkwarty metryczne + {0} ćwierćkwarty metrycznyj + + + akro-stopy + {0} akro-stopy + + + galōny + {0} galōna + {0} na galōn + + + galōny angelske + {0} galōna angelskigo + {0} na galōn angelski + + + kworty + {0} kworty + + + pōłkworty + {0} pōłkworty + + + ćwierćkworty + {0} ćwierćkworty + + + uncyje płynu + {0} uncyje płynu + + + uncyje płynu imp. + {0} uncyje płynu imp. + + + łyżki stołowe + {0} łyżki stołowyj + + + łyżeczki + {0} łyżeczki + + + baryłki + {0} baryłki + + + rychtōnek świata + {0} dugości geograficznyj wschodnij + {0} szyrokości geograficznyj pōłnocnyj + {0} szyrokości geograficznyj połedniowyj + {0} dugości geograficznyj zachodnij + + + + + G + {0} G + + + ôbr. + {0} ôbr. + + + stopnie + + + minuty + + + sekundy + + + ha + + + akry + + + jd² + {0} jd² + + + st² + {0} st² + + + + {0} c² + {0}/c² + + + dunamy + {0} dunama + + + karaty + + + mg/dl + {0} mg/dl + + + milimole/liter + {0} mmol/l + + + czyńści/miliōn + {0} cz/mln + + + procynt + + + prōmil + + + pōnkt bazowy + {0}‱ + + + mol + {0} mola + + + l/km + {0} l/km + + + l/100 km + {0} l/100 km + + + mpg + {0} mpg + + + mile/gal ang. + {0} mi/gal ang. + + + bajty + {0} B + + + bity + {0} b + + + st. + {0} st. + + + dek. + {0} dek. + + + lata + {0} roku + {0}/rok + + + miesiōnce + {0} mies. + {0}/mies. + + + tydnie + {0} tyd. + {0}/tydz. + + + dni + {0} dnia + {0}/dziyń + + + godziny + {0} godz. + + + minuty + + + sekundy + {0} sek. + + + milisekundy + + + ampry + + + ōmy + + + wolty + + + col + {0} col + + + J + + + elektrōnowolt + {0} eV + + + BTU + {0} Btu + + + term USA + {0} terma USA + + + fōnt-siła + {0} lbf + + + niutōn + {0} N + + + ym + {0} yma + + + piksele + {0} pks + + + megapiksele + {0} mp + + + pks/cm + {0} pks/cm + + + pks/c + {0} pks/c + + + pk/cm + {0} pk/cm + + + pk/c + {0} pk/c + + + metry + + + cyntymetry + + + mile + {0} mili + + + stopy + + + cale + {0} cala + {0}/cal + + + lata świetlne + {0} lś + + + j.a. + {0} j.a. + + + Mm + {0} Mm + + + punkty + {0} pkt. + + + prōmiynie Słōńca + {0} R☉ + + + lks + {0} lks + + + jasność Słōńca + {0} L☉ + + + g + + + fōnty + + + karaty + {0} kt + + + daltōny + {0} Da + + + masa Ziymie + {0} M⊕ + + + masa Słōńca + {0} M☉ + + + waty + + + KM + {0} KM + + + kPa + {0} kPa + + + MPa + {0} MPa + + + w. + {0} w. + + + lbf⋅ft + {0} lbf⋅ft + + + N⋅m + {0} N⋅m + + + jd³ + {0} jd³ + + + st³ + {0} st³ + + + + {0} c³ + + + Ml + {0} Ml + + + hl + {0} hl + + + litry + + + dl + {0} dl + + + cl + {0} cl + + + ml + {0} ml + + + pkwm. + {0} pkwm. + + + ćwkwm. + {0} ćwkwm. + + + akst. + {0} akst. + + + gal + {0} gal + {0}/gal + + + gal ang. + {0} gal ang. + {0}/gal ang. + + + kw. + {0} kw. + + + pōłkworty + {0} pkw. + + + ćwierćkworty + {0} ćwkw. + + + fl oz + {0} fl oz + + + fl oz imp. + {0} fl oz imp. + + + ł. stoł. + {0} ł. stoł. + + + łyżeczki + {0} łyżeczki + + + baryłki + {0} bbl + + + rychtōnek + + + + + {0}/{1} + + + {0}⋅{1} + + + {0} G + + + {0}′ + + + {0}″ + + + procynt + {0}% + + + l/100 km + {0} l/100 km + + + rok + {0} r. + + + miesiōnc + {0} mies. + + + tydziyń + {0} tyd. + + + dziyń + {0} dn. + {0}/d. + + + godzina + {0} g. + + + minuty + {0} min + + + sekundy + {0} sek. + + + milisekundy + {0} ms + + + km + {0} km + + + metry + {0} m + + + cyntymetry + {0} cm + + + mm + {0} mm + + + mile + + + stopy + + + cale + {0}″ + {0}/cal + + + kg + {0} kg + + + g + {0} g + + + funty + + + {0} KM + + + km/h + {0} km/h + + + °C + {0}°C + + + liter + {0} l + + + rychtōnek + {0}E + {0}N + {0}S + {0}W + + + + h:mm + + + h:mm:ss + + + m:ss + + + + + {0} i {1} + {0} i {1} + + + {0} abo {1} + {0} abo {1} + + + {0}, {1} + {0}, {1} + {0}, abo {1} + {0} abo {1} + + + {0}, {1} + {0}, {1} + {0}, abo {1} + {0} abo {1} + + + {0}, {1} + {0}, {1} + {0} i {1} + {0} i {1} + + + {0}, {1} + {0}, {1} + {0} i {1} + {0} i {1} + + + {0}, {1} + {0}, {1} + {0} i {1} + {0} i {1} + + + {0}, {1} + {0}, {1} + {0} i {1} + {0} i {1} + + + {0}, {1} + {0}, {1} + {0} i {1} + {0} i {1} + + + + + niy:n + ja:j + + + diff --git a/make/data/cldr/common/main/szl_PL.xml b/make/data/cldr/common/main/szl_PL.xml new file mode 100644 index 00000000000..bb8c99c383c --- /dev/null +++ b/make/data/cldr/common/main/szl_PL.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ta.xml b/make/data/cldr/common/main/ta.xml index 46a0903e2b0..9eaa6f37d39 100644 --- a/make/data/cldr/common/main/ta.xml +++ b/make/data/cldr/common/main/ta.xml @@ -1,6 +1,6 @@ - + + + + + + + + አፋርኛ + አብሐዚኛ + አፍሪቃንስኛ + አምሐረኛ + ዐርቢኛ + አሳሜዛዊ + አያማርኛ + አዜርባይጃንኛ + ባስኪርኛ + ቤላራሻኛ + ቡልጋሪኛ + ቢስላምኛ + በንጋሊኛ + ትበትንኛ + ብሬቶንኛ + ብሊን + ካታላንኛ + ኮርሲካኛ + ቼክኛ + ወልሽ + ዴኒሽ + ጀርመን + ድዞንግኻኛ + ግሪክኛ + እንግሊዝኛ + ኤስፐራንቶ + ስፓኒሽ + ኤስቶኒአን + ባስክኛ + ፐርሲያኛ + ፊኒሽ + ፊጂኛ + ፋሮኛ + ፈረንሳይኛ + ፍሪስኛ + አይሪሽ + እስኮትስ ጌልክኛ + ግዕዝኛ + ጋለጋኛ + ጓራኒኛ + ጉጃርቲኛ + ሃውሳኛ + ዕብራስጥ + ሐንድኛ + ክሮሽያንኛ + ሀንጋሪኛ + አርመናዊ + ኢንቴርሊንጓ + እንዶኒሲኛ + እንተርሊንግወ + እኑፒያቅኛ + አይስላንድኛ + ጣሊያንኛ + እኑክቲቱትኛ + ጃፓንኛ + ጃቫንኛ + ጊዮርጊያን + ካዛክኛ + ካላሊሱትኛ + ክመርኛ + ካናዳኛ + ኮሪያኛ + ካሽሚርኛ + ኩርድሽኛ + ኪርጊዝኛ + ላቲንኛ + ሊንጋላኛ + ላውስኛ + ሊቱአኒያን + ላትቪያን + ማላጋስኛ + ማዮሪኛ + ማከዶኒኛ + ማላያላምኛ + ሞንጎላዊኛ + ማራዚኛ + ማላይኛ + ማልቲስኛ + ቡርማኛ + ናኡሩ + ኔፓሊኛ + ደች + ኖርዌጂያን + ኦኪታንኛ + ኦሮምኛ + ኦሪያኛ + ፓንጃቢኛ + ፖሊሽ + ፑሽቶኛ + ፖርቱጋሊኛ + ኵቿኛ + ሮማንስ + ሩንዲኛ + ሮማኒያን + ሞልዳቫዊና + ራሽኛ + ኪንያርዋንድኛ + ሳንስክሪትኛ + ሲንድሂኛ + ሳንጎኛ + ስንሃልኛ + ሲዳምኛ + ስሎቫክኛ + ስሎቪኛ + ሳሞአኛ + ሾናኛ + ሱማልኛ + ልቤኒኛ + ሰርቢኛ + ስዋቲኛ + ሶዞኛ + ሱዳንኛ + ስዊድንኛ + ስዋሂሊኛ + ታሚልኛ + ተሉጉኛ + ታጂኪኛ + ታይኛ + ትግርኛ + ትግረ + ቱርክመንኛ + ታጋሎገኛ + ጽዋናዊኛ + ቶንጋ + ቱርክኛ + ጾንጋኛ + ታታርኛ + ትዊኛ + ኡዊግሁርኛ + ዩክረኒኛ + ኡርዱኛ + ኡዝበክኛ + ቪትናምኛ + ቮላፑክኛ + ዎሎፍኛ + ዞሳኛ + ይዲሻዊኛ + ዮሩባዊኛ + ዡዋንግኛ + ቻይንኛ + ዙሉኛ + + + + + + አንዶራ + የተባበሩት አረብ ኤምሬትስ + አልባኒያ + አርሜኒያ + አርጀንቲና + ኦስትሪያ + አውስትሬሊያ + አዘርባጃን + ቦስኒያ እና ሄርዞጎቪኒያ + ባርቤዶስ + ቤልጄም + ቡልጌሪያ + ባህሬን + ቤርሙዳ + ቦሊቪያ + ብራዚል + ቡህታን + ቤላሩስ + ቤሊዘ + ኮንጎ + የመካከለኛው አፍሪካ ሪፐብሊክ + ስዊዘርላንድ + ቺሊ + ካሜሩን + ቻይና + ኮሎምቢያ + ኬፕ ቬርዴ + ሳይፕረስ + ቼክ ሪፑብሊክ + ጀርመን + ዴንማርክ + ዶሚኒካ + ዶሚኒክ ሪፑብሊክ + አልጄሪያ + ኢኳዶር + ኤስቶኒያ + ግብጽ + ምዕራባዊ ሳህራ + ኤርትራ + ስፔን + ኢትዮጵያ + ፊንላንድ + ፊጂ + ሚክሮኔዢያ + ፈረንሳይ + እንግሊዝ + ጆርጂያ + የፈረንሳይ ጉዊአና + ጋምቢያ + ጊኒ + ኢኳቶሪያል ጊኒ + ግሪክ + ቢሳዎ + ጉያና + ሆንግ ኮንግ + ክሮኤሽያ + ሀይቲ + ሀንጋሪ + ኢንዶኔዢያ + አየርላንድ + እስራኤል + ህንድ + ኢራቅ + አይስላንድ + ጣሊያን + ጃማይካ + ጆርዳን + ጃፓን + ካምቦዲያ + ኮሞሮስ + ሰሜን ኮሪያ + ደቡብ ኮሪያ + ክዌት + ሊባኖስ + ሊቱዌኒያ + ላትቪያ + ሊቢያ + ሞሮኮ + ሞልዶቫ + ማከዶኒያ + ሞንጎሊያ + ማካዎ + ሞሪቴኒያ + ማልታ + ማሩሸስ + ሜክሲኮ + ማሌዢያ + ናሚቢያ + ኒው ካሌዶኒያ + ናይጄሪያ + ኔዘርላንድ + ኖርዌ + ኔፓል + ኒው ዚላንድ + ፔሩ + የፈረንሳይ ፖሊኔዢያ + ፓፑዋ ኒው ጊኒ + ፖላንድ + ፖርታ ሪኮ + ሮሜኒያ + ራሺያ + ሳውድአረቢያ + ሱዳን + ስዊድን + ሲንጋፖር + ስሎቬኒያ + ስሎቫኪያ + ሴኔጋል + ሱማሌ + ሲሪያ + ቻድ + የፈረንሳይ ደቡባዊ ግዛቶች + ታይላንድ + ታጃኪስታን + ምስራቅ ቲሞር + ቱኒዚያ + ቱርክ + ትሪኒዳድ እና ቶባጎ + ታንዛኒያ + ዩጋንዳ + አሜሪካ + ዩዝበኪስታን + ቬንዙዌላ + የእንግሊዝ ድንግል ደሴቶች + የአሜሪካ ቨርጂን ደሴቶች + የመን + ደቡብ አፍሪካ + ዛምቢያ + + + + [\u135F ፡ ፣-፧ ። ፠ ፨ ᎐-᎙ ፲-፼ ሀ-ሏ ⶀ ሐ-ሟ ᎀ-ᎃ ⶁ ሠ-ሯ ⶂ ሰ-ሷ ⶃ ሸ-ሿ ⶄ ቀ-ቈ ቊ-ቍ ቐ-ቖ ቘ ቚ-ቝ በ-ቧ ᎄ-ᎇ ⶅ ቨ-ቷ ⶆ ቸ-ቿ ⶇ ኀ-ኈ ኊ-ኍ ነ-ኗ ⶈ ኘ-ኟ ⶉ አ-ኧ ⶊ ከ-ኰ ኲ-ኵ ኸ-ኾ ዀ ዂ-ዅ ወ-ዖ ዘ-ዟ ⶋ ዠ-ዷ ⶌ ዸ-ዿ ⶍ ጀ-ጇ ⶎ ገ-ጐ ጒ-ጕ ጘ-ጟ ⶓ-ⶖ ጠ-ጧ ⶏ ጨ-ጯ ⶐ ጰ-ጷ ⶑ ጸ-ፏ ᎈ-ᎋ ፐ-ፗ ᎌ-ᎏ ⶒ ፘ-ፚ ⶠ-ⶦ ⶨ-ⶮ ⶰ-ⶶ ⶸ-ⶾ ⷀ-ⷆ ⷈ-ⷎ ⷐ-ⷖ ⷘ-ⷞ] + [ሀ ለ ⶀ መ ᎀ ᎁ ᎃ ⶁ ረ ⶂ ሰ ሸ ⶄ ቈ ቐ ቘ ᎄ ᎅ ᎇ ⶅ ቨ ⶆ ቸ ኀ ኈ ነ ኘ ⶉ ⶊ ከ ኰ ዀ ወ ዐ ⶋ ዠ ደ ⶌ ዸ ጀ ⶎ ጐ ጘ ⶓ ⶕ ⶖ ⶏ ጨ ⶐ ⶑ ጸ ፈ ᎈ ᎉ ᎋ ፐ ᎍ ᎎ ᎏ ፘ ⶠ ⶢ ⶣ ⶤ ⶦ ⶨ ⶩ ⶫ ⶬ ⶮ ⶰ ⶱ ⶳ ⶴ ⶶ ⶸ ⶹ ⶻ ⶼ ⶾ ⷀ ⷁ ⷃ ⷄ ⷆ ⷈ ⷉ ⷋ ⷌ ⷎ ⷐ ⷑ ⷓ ⷔ ⷖ ⷘ ⷙ ⷛ ⷜ ⷝ] + + + + + + + + EEEE፡ dd MMMM ዮም y G + GyMMMMEEEEdd + + + + + dd MMMM y G + GyMMMMdd + + + + + dd-MMM-y G + GyMMMdd + + + + + dd/MM/yy GGGGG + GGGGGyyMMdd + + + + + + + + + ጃንዩ + ፌብሩ + ማርች + ኤፕረ + ሜይ + ጁን + ጁላይ + ኦገስ + ሴፕቴ + ኦክተ + ኖቬም + ዲሴም + + + ጃንዩወሪ + ፌብሩወሪ + ማርች + ኤፕረል + ሜይ + ጁን + ጁላይ + ኦገስት + ሴፕቴምበር + ኦክተውበር + ኖቬምበር + ዲሴምበር + + + + + + + + + + + + + + + + + + + + + + + ሰ/ዓ + ሰኖ + ታላሸ + ኣረር + ከሚሽ + ጅምዓ + ሰ/ን + + + ሰንበት ዓባይ + ሰኖ + ታላሸኖ + ኣረርባዓ + ከሚሽ + ጅምዓት + ሰንበት ንኢሽ + + + + + + + + + + + + + + + + + + ቀደም ሰርምዕል + ሓቆ ስርምዕል + + + ቀደም ሰርምዕል + ሓቆ ስርምዕል + + + + + + ዓ/ዓ + ዓ/ም + + + + + + EEEE፡ dd MMMM ዮም y G + GyMMMMEEEEdd + + + + + dd MMMM y + yMMMMdd + + + + + dd-MMM-y + yMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + latn + + latn + ethi + + + + + ¤#,##0.00 + + + + + + የብራዚል ሪል + + + የቻይና ዩአን ረንሚንቢ + + + Nfk + + + የኢትዮጵያ ብር + + + አውሮ + + + የእንግሊዝ ፓውንድ ስተርሊንግ + + + የሕንድ ሩፒ + + + የጃፓን የን + + + የራሻ ሩብል + + + የአሜሪካን ዶላር + + + + diff --git a/make/data/cldr/common/main/tig_ER.xml b/make/data/cldr/common/main/tig_ER.xml new file mode 100644 index 00000000000..4a946eb9da7 --- /dev/null +++ b/make/data/cldr/common/main/tig_ER.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/tk.xml b/make/data/cldr/common/main/tk.xml index 25746e16259..c31689fa46e 100644 --- a/make/data/cldr/common/main/tk.xml +++ b/make/data/cldr/common/main/tk.xml @@ -1,6 +1,6 @@ - + + + + + + + + Seburu + Amhariki + Arabic + Azerbaijani + Belarusian + Bulgarian + Bengali + SeBosnia + Catalan + Se Czeck + Welsh + Danish + German + SeGerika + Sekgoa + Esperanto + Spanish + Estonian + Basque + Mo/SePerishia + Se-Finland + Tagalog + Faroese + Se Fora + Frisian + Irish + Scots Gaelic + Galician + Gujarati + Se heberu + Hindi + Croatian + Hungarian + Interlingua + Indonesian + Icelandic + Se Italiano + Se Japan + Javanese + Mo/SeJojia + Kannada + Se Korea + Latin + Lithuanian + Latvian + Macedonian + Malayalam + Marathi + Malay + Maltese + Nepali + Se Dutch + Puo ya kwa Norway + Occitan + Punjabi + Se Poland + Se Potoketsi + Se Roma + Russian + Slovak + Slovenian + Albanian + Serbian + Mo/SeSundane + Swedish + Swahili + Tamil + Telugu + Thai + Tigrinya + Klingon + Setswana + Turkish + Ukrainian + Urdu + Uzbek + Vietnamese + IsiXhosa + IsiZulu + + + Aforika Borwa + + + + [a b d e ê f g h i j k l m n o ô p r s t u w y] + [c q v x z] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + + + + + + + G y MMMM d, EEEE + GyMMMMEEEEd + + + + + G y MMMM d + GyMMMMd + + + + + G y MMM d + GyMMMd + + + + + GGGGG y-MM-dd + GGGGGyMMdd + + + + + + + + + Fer + Tlh + Mop + Mor + Mot + See + Phu + Pha + Lwe + Dip + Ngw + Sed + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + Ferikgong + Tlhakole + Mopitlo + Moranang + Motsheganang + Seetebosigo + Phukwi + Phatwe + Lwetse + Diphalane + Ngwanatsele + Sedimonthole + + + + + Fer + Tlh + Mop + Mor + Mot + See + Phu + Pha + Lwe + Dip + Ngw + Sed + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + Ferikgong + Tlhakole + Mopitlo + Moranang + Motsheganang + Seetebosigo + Phukwi + Phatwe + Lwetse + Diphalane + Ngwanatsele + Sedimonthole + + + + + + + Tsh + Mos + Labb + Labr + Labn + Labt + Mat + + + S + M + T + W + T + F + S + + + Tsh + Mos + Labb + Labr + Labn + Labt + Mat + + + Tshipi + Mosopulogo + Labobedi + Laboraro + Labone + Labotlhano + Matlhatso + + + + + Tsh + Mos + Labb + Labr + Labn + Labt + Mat + + + S + M + T + W + T + F + S + + + Tsh + Mos + Labb + Labr + Labn + Labt + Mat + + + Tshipi + Mosopulogo + Labobedi + Laboraro + Labone + Labotlhano + Matlhatso + + + + + + + Q1 + Q2 + Q3 + Q4 + + + 1 + 2 + 3 + 4 + + + Sephatlho sa ntlha sa ngwaga + Sephatlho sa bobedi + Sephatlho sa boraro + Sephatlho sa bone + + + + + Q1 + Q2 + Q3 + Q4 + + + 1 + 2 + 3 + 4 + + + Sephatlho sa ntlha sa ngwaga + Sephatlho sa bobedi + Sephatlho sa boraro + Sephatlho sa bone + + + + + + + AM + PM + + + a + p + + + AM + PM + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + + Pele ga tsalo ya Morena Jeso + Pele ga Krestie + Morago ga Leso la Morena Jeso + Morago ga Krestie + + + BC + BCE + AD + CE + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1} 'ka' {0} + + + + + {1} 'ka' {0} + + + + + {1}, {0} + + + + + {1}, {0} + + + + d + ccc + d, E + E h:mm a + E HH:mm + E h:mm:ss a + E HH:mm:ss + G y + G y MMM + G y MMM d + G y MMM d, E + h a + HH + h:mm a + HH:mm + h:mm:ss a + HH:mm:ss + h:mm:ss a v + HH:mm:ss v + h:mm a v + HH:mm v + L + MM-dd + MM-dd, E + LLL + MMM d + MMM d, E + MMMM d + 'beke' 'ya' W 'ya' MMM + 'beke' 'ya' W 'ya' MMM + mm:ss + y + y-MM + y-MM-dd + y-MM-dd, E + y MMM + y MMM d + y MMM d, E + y MMMM + y QQQ + y QQQQ + 'beke' w 'ya' Y + 'beke' w 'ya' Y + + + {0} {1} + + + {0} – {1} + + d–d + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + MM–MM + + + MM-dd – MM-dd + MM-dd – MM-dd + + + MM-dd, E – MM-dd, E + MM-dd, E – MM-dd, E + + + LLL–LLL + + + MMM d–d + MMM d – MMM d + + + MMM d, E – MMM d, E + MMM d, E – MMM d, E + + + y–y + + + y-MM – y-MM + y-MM – y-MM + + + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + + + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + + + y MMM–MMM + y MMM – y MMM + + + y MMM d–d + y MMM d – MMM d + y MMM d – y MMM d + + + y MMM d, E – MMM d, E + y MMM d, E – MMM d, E + y MMM d, E – y MMM d, E + + + y MMMM–MMMM + y MMMM – y MMMM + + + + + + + + 1 + + . +   + % + + + - + E + × + + + NaN + : + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤#,##0.00 + + + ¤#,##0.00 + + + {0} {1} + {0} {1} + + + + R + + + + ≥{0} + {0}–{1} + + + diff --git a/make/data/cldr/common/main/tn_BW.xml b/make/data/cldr/common/main/tn_BW.xml new file mode 100644 index 00000000000..9a07e5f9c3c --- /dev/null +++ b/make/data/cldr/common/main/tn_BW.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + P + + + + diff --git a/make/data/cldr/common/main/tn_ZA.xml b/make/data/cldr/common/main/tn_ZA.xml new file mode 100644 index 00000000000..d0a698ebd79 --- /dev/null +++ b/make/data/cldr/common/main/tn_ZA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/to.xml b/make/data/cldr/common/main/to.xml index 3d38ee20179..4185fe4579e 100644 --- a/make/data/cldr/common/main/to.xml +++ b/make/data/cldr/common/main/to.xml @@ -1,6 +1,6 @@ - + + + + + + + + Jeman + Austria Jeman + Swis Jeman + Inglis + Australian Inglis + Kenedien Inglis + Britis Inglis + Amerikan Inglis + Spenis + Saut Amerikan Spenis + Spenis (Spein) + Meksikan Spenis + Frens + Kenedien Frens + Swis Frens + Italien + Japanis + Potigis + Brasilien Potigis + Yurop Potigis + Rasien + Tok Pisin + Tok ples i no stap + Sainis + Isipela Sainis + Tredisinol Sainis + + + + + + + + + + + + Brasil + Saina + Jemani + Frans + Yunaited Kingdom + India + Itali + Papua Niugini + Rijen i no stap + + + Gregorien kalenda + stendet karensi fomet + stendet oda bilong skelim + westen namba + + + tok ples: {0} + skript: {0} + rijen: {0} + + + + [a b d e f g h i j k l m n o p r s t u w y] + [c q v x z] + [A B D E F G H I J K L M N O P R S T U W Y] + + + + + + + + Jan + Feb + Mas + Epr + Me + Jun + Jul + Oga + Sep + Okt + Nov + Des + + + Janueri + Februeri + Mas + Epril + Me + Jun + Julai + Ogas + Septemba + Oktoba + Novemba + Desemba + + + + + + + San + Man + Tun + Tri + Fon + Fra + Sar + + + Sande + Mande + Tunde + Trinde + Fonde + Fraide + Sarere + + + + + + + AM + PM + + + + + + + EEE, dd MMMM y + yyyyMMMMEEEdd + + + + + dd MMMM y + yyyyMMMMdd + + + + + dd MMM y + yyyyMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + hh:mm:ss a zzzz + ahhmmsszzzz + + + + + hh:mm:ss a zzz + ahhmmsszzz + + + + + hh:mm:ss a + ahhmmss + + + + + hh:mm a + ahhmm + + + + + + d/M/y + d MMM y + + + + + + + latn + + . + , + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + #,##0.00 ¤ + + + + + + + yes:y + no:n + + + diff --git a/make/data/cldr/common/main/tpi_PG.xml b/make/data/cldr/common/main/tpi_PG.xml new file mode 100644 index 00000000000..59c299a7362 --- /dev/null +++ b/make/data/cldr/common/main/tpi_PG.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/tr.xml b/make/data/cldr/common/main/tr.xml index d41cc2f3d9e..28a8d7b4c06 100644 --- a/make/data/cldr/common/main/tr.xml +++ b/make/data/cldr/common/main/tr.xml @@ -1,6 +1,6 @@ - + + + + + + + + patas Monchiara + patas Towjih + patas Ingrisi + patas Espanniu + patas Bosey + patas Heyti + patas Itariya + patas Nihong + patas Bowdu + patas Pajey + patas Ruski + patas Srpian + patas Taroko + Ini klayna patas ni + patas Yurtu + patas Ipaw + Qantan Ipaw patas + Baday Ipaw patas + + + + + + + + + + + + alang Nanci + alang Posniya + alang Pajey + alang Puwei + alang Switjrrant + alang Ipaw + alang Towjih + alang Posey + alang Inglis + alang Nanjiouya ni Nansanminji + alang Htee ni Mayktan + alang Krowtia + alang Intu + alang Inglis niq Intu + alang Itariya + alang Nihong + alang Mondineygrw + alang Srbia + alang Ruski + alang Snmarinow + alang Posey niq Nan + alang Amarika + ini klayi na alang ni + + + Jiyax Yisu Thulang + + + Snamrika + Snyunaydi + + + Kari: {0} + Patas: {0} + Alang: {0} + + + + [a b c d e g h i j k l m n {ng} o p q r s t u w x y] + [ḏ f ɨ ḻ ṟ ṯ ʉ v z ʼ] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + + + + + + + EEEE, G y MMMM dd + GyMMMMEEEEdd + + + + + G y MMMM d + GyMMMMd + + + + + G y MMM d + GyMMMd + + + + + GGGGG y-MM-dd + GGGGGyMMdd + + + + + + d + H:mm + L + M-d + E, M-d + LLL + MMM d + E MMM d + MMMM d + E MMMM d + mm:ss + y + y-M + E, y-M-d + y MMM + E, y MMM d + y MMMM + y Q + y QQQ + + + + + + + + Kii + Dhi + Tri + Spi + Rii + Mti + Emi + Mai + Mni + Mxi + Mxk + Mxd + + + Kingal idas + Dha idas + Tru idas + Spat idas + Rima idas + Mataru idas + Empitu idas + Maspat idas + Mngari idas + Maxal idas + Maxal kingal idas + Maxal dha idas + + + + + K + D + T + S + R + M + E + P + A + M + K + D + + + + + + + Emp + Kin + Dha + Tru + Spa + Rim + Mat + + + Jiyax sngayan + tgKingal jiyax iyax sngayan + tgDha jiyax iyax sngayan + tgTru jiyax iyax sngayan + tgSpac jiyax iyax sngayan + tgRima jiyax iyax sngayan + tgMataru jiyax iyax sngayan + + + + + E + K + D + T + S + R + M + + + + + + + mn1 + mn2 + mn3 + mn4 + + + mnprxan + mndha + mntru + mnspat + + + + + + + AM + PM + + + Brax kndaax + Baubau kndaax + + + + + + Brah jikan Yisu Thulang + Bukuy jikan Yisu Thulang + + + BRY + BUY + + + + + + EEEE, y MMMM dd + yMMMMEEEEdd + + + + + y MMMM d + yMMMMd + + + + + y MMM d + yMMMd + + + + + y-MM-dd + yMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + d + H:mm + L + M-d + E, M-d + LLL + MMM d + E MMM d + MMMM d + E MMMM d + mm:ss + y + y-M + E, y-M-d + y MMM + E, y MMM d + y MMMM + y Q + y QQQ + + + + + + + Hngkawas + + + hngkawas + + + Idas + + + Jiyax iyax sngayan + + + Jiyax + Shiga + Jiyax sayang + Saman + + + Jiyax quri jiyax iyax sngayan + + + Jikan + + + Tuki + + + Spngan + + + Seykn + + + Alang + + + + +HH:mm;-HH:mm + JQG{0} + Jikan {0} + + Ini klayi ka Jikan hini + + + Jikan alang Purank + + + Jikan alang Grad + + + Jikan alang Snpaurow + + + Jikan alang Honoruru + + + Jikan alang Ankriji + + + Jikan alang Rosanci + + + Jikan alang Bonhuan + + + Jikan alang Tanbo + + + Jikan alang Jiciak + + + Jikan alang Intiannaporis + + + Jikan alang Niuyue + + + + Jikan Con-Amarika + Snegun Jikan Con-Amarika + Jikan Con-Amarika o Karat Rbagan + + + JCA + SJCA + JCAKR + + + + + Jikan Ton-Amarika + Snegun Jikan Ton-Amarika + Jikan Ton-Amarika o Karat Rbagan + + + JTA + SJTA + JTAKR + + + + + Jikan Yama-Amarika + Snegun Jikan Yama-Amarika + Jikan Hidaw niq Yama-Amarika + + + JYA + SJYA + JHYA + + + + + Jikan Daybinyan + Snegun Jikan Amarika-Daybinyan + Jikan Amarika-Daybinyan o Karat Rbagan + + + JD + SJAD + JADKR + + + + + Jikan Yayun Tasiyan + Snegun Jikan Yayun Tasiyan + Jikan Yayun Tasiyan o Karat Rbagan + + + JYT + SJYT + JYTKR + + + + + Jikan Conow + Snegun Jikan Conow + Jikan Conow o Karat Rbagan + + + JC + JC + JCKR + + + + + Jikan Tonow + Snegun Jikan Tonow + Jikan Tonow o Karat Rbagan + + + JT + JT + JTKR + + + + + Jikan Siow + Snegun Jikan Siow + Jikan Siow o Karat Rbagan + + + JS + JS + JSKR + + + + + Jikan Quri Grinweyji + + + JQG + + + + + + + . + , + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤ #,##0.00 + + + + + + pila Autaria + + + pila Pajey + + + pila Ipaw + + + pila Irow + + + pila Inglis + + + pila Hong Kong + + + pila Intia + + + pila Nihong + + + pila Macao + + + pila Nowey + + + pila Ruski + + + pila Taiwan + + + pila America + + + ini klayi pila ni + + + + + + + {0} Hnkawas + + + {0} Idas + + + {0} Jiyax iyax sngayan + + + {0} Jiyax + + + {0} Tuki + + + {0} spngan + + + {0} Seykn + + + + + + yiru:y + mnan:m + + + diff --git a/make/data/cldr/common/main/trv_TW.xml b/make/data/cldr/common/main/trv_TW.xml new file mode 100644 index 00000000000..f8194882dcf --- /dev/null +++ b/make/data/cldr/common/main/trv_TW.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/trw.xml b/make/data/cldr/common/main/trw.xml new file mode 100644 index 00000000000..5589a7966a1 --- /dev/null +++ b/make/data/cldr/common/main/trw.xml @@ -0,0 +1,6145 @@ + + + + + + + + + + + {0} ({1}) + {0}،{1} + {0}: {1} + + + افریقی + اگھیم + آکان + امھاریک + عربی + ماڈرن اسٹینڈرڈ عربی + اسامی + اسو + استوری + ازری + باسا + بیلاروسی + بیمبا + بینا + بلغاری + بمبارا + بنگلہ + بریٹون + بوڈو + بوسنیائی + جرمن + آسٹریائی جرمن + سوئس ہائی جرمن + انگریزی + آسٹریلیائی انگریزی + کینیڈین انگریزی + برطانوی انگریزی + انگریزی (یو کے) + امریکی انگریزی + ہسپانوی + لاطینی امریکی ہسپانوی + یورپی ہسپانوی + میکسیکن ہسپانوی + فرانسیسی + کینیڈین فرانسیسی + سوئس فرینچ + ہندی + ارمینی + انڈونیثیائی + اطالوی + جاپانی + کوریائی + بافیہ + برمی + ڈچ + فلیمِش + پولش + پُرتگالی + برازیلی پرتگالی + یورپی پرتگالی + روسی + البانی + تھائی + ترکی + توروالی + نامعلوم جِب + چینی + چینی، مندارن + چینی (آسان کوئیل) + سادہ مندارن چینی + روایتی چینی + روایتی مندارن چینی + + + + + + + + + + + + + + + + دونیئ + افریقہ + شمالی امریکہ + جنوبی امریکہ + اوشیانیا + مغربی افریقہ + وسطی امریکہ + مشرقی افریقہ + شمالی افریقہ + وسطی افریقہ + جنوبی افریقہ سی علاقہ + امیریکاز + شمالی امریکہ سی علاقہ + کریبیائی + مشرقی ایشیا + جنوبی ایشیا + جنوب مشرقی ایشیا + جنوبی یورپ + آسٹریلیشیا + مالینیشیا + مائکرونیشیائی علاقہ + پولینیشیا + ایشیا + وسطی ایشیا + مغربی ایشیا + یورپ + مشرقی یورپ + شمالی یورپ + مغربی یورپ + ذیلی صحارن افریقہ + لاطینی امریکہ + اسینشن آئلینڈ + انڈورا + متحدہ عرب امارات + افغانستان + انٹیگوا اور باربودا + انگوئیلا + البانیہ + آرمینیا + انگولا + انٹارکٹیکا + ارجنٹینا + امریکی ساموآ + آسٹریا + اسٹریلیا + اروبا + آلینڈ آئلینڈز + آذربائیجان + بوسنیا آں ہرزیگووینا + بارباڈوس + بنگلہ دیش + بیلجیم + برکینا فاسو + بلغاریہ + بحرین + برونڈی + بینن + سینٹ برتھلیمی + برمودا + برونائی + بولیویا + کریبیائی نیدرلینڈز + برازیل + بہاماس + بھوٹان + بؤویٹ آئلینڈ + بوتسوانا + بیلاروس + بیلائز + کینیڈا + کوکوس (کیلنگ) جزائر + کانگو - کنشاسا + کانگو (DRC) + وسط افریقی جمہوریہ + کانگو - برازاویلے + کانگو (جمہوریہ) + سوئٹزر لینڈ + کوٹ ڈی آئیوری + آئیوری کوسٹ + کک آئلینڈز + چلی + کیمرون + چین + کولمبیا + کلپرٹن آئلینڈ + کوسٹا ریکا + کیوبا + کیپ ورڈی + کیوراکاؤ + جزیرہ کرسمس + قبرص + چیکیا + چیک جمہوریہ + جرمنی + ڈائجو گارسیا + جبوتی + ڈنمارک + ڈومنیکا + جمہوریہ ڈومينيکن + الجیریا + سیئوٹا آں میلیلا + ایکواڈور + اسٹونیا + مصر + مغربی صحارا + اریٹیریا + ہسپانیہ + ایتھوپیا + یوروپی یونین + یوروزون + فن لینڈ + فجی + فاکلینڈ جزائر + فاکلینڈ جزائر (مالویناس) + مائکرونیشیا + جزائر فارو + فرانس + گیبون + سلطنت متحدہ + یو کے سلطنت متحدہ + گریناڈا + جارجیا + فرینچ گیانا + گوئرنسی + گھانا + جبل الطارق + گرین لینڈ + گیمبیا + گنی + گواڈیلوپ + استوائی گیانا + یونان + جنوبی جارجیا آں جنوبی سینڈوچ جزائر + گواٹے مالا + گوام + گنی بساؤ + گیانا + ہانگ کانگ SAR چین + ہانگ کانگ ہانگ کانگ SAR چین + ہیرڈ جزیرہ و میکڈولینڈ جزائر + ہونڈاروس + کروشیا + ہیٹی + ہنگری + کینری آئلینڈز + انڈونیشیا + آئرلینڈ + اسرائیل + آئل آف مین + بھارت + برطانوی بحر ہند سی علاقہ + عراق + ایران + آئس لینڈ + اٹلی + جرسی + جمائیکا + اردن + جاپان + کینیا + کرغزستان + کمبوڈیا + کریباتی + کوموروس + سینٹ کٹس اور نیویس + شمالی کوریا + جنوبی کوریا + کویت + کیمین آئلینڈز + قزاخستان + لاؤس + لبنان + سینٹ لوسیا + لیشٹنسٹائن + سری لنکا + لائبیریا + لیسوتھو + لیتھونیا + لکسمبرگ + لٹویا + لیبیا + مراکش + موناکو + مالدووا + مونٹے نیگرو + سینٹ مارٹن + مڈغاسکر + مارشل آئلینڈز + شمالی مقدونیہ + مالی + میانمار (برما) + منگولیا + مکاؤ SAR چین + مکاؤ مکاؤ SAR چین + شمالی ماریانا آئلینڈز + مارٹینک + موریطانیہ + مونٹسیراٹ + مالٹا + ماریشس + مالدیپ + ملاوی + میکسیکو + ملائشیا + موزمبیق + نامیبیا + نیو کلیڈونیا + نائجر + نارفوک آئلینڈ + نائجیریا + نکاراگووا + نیدر لینڈز + ناروے + نیپال + نؤرو + نیئو + نیوزی لینڈ + عمان + پانامہ + پیرو + فرانسیسی پولینیشیا + پاپوآ نیو گنی + فلپائن + پاکستان + پولینڈ + سینٹ پیئر آں میکلیئون + پٹکائرن جزائر + پیورٹو ریکو + فلسطینی خطے + فلسطین فلسطینی خطے + پرتگال + پلاؤ + پیراگوئے + قطر + بیرونی اوشیانیا + ری یونین + رومانیہ + سربیا + روس + روانڈا + سعودی عرب + سولومن آئلینڈز + سشلیز + سوڈان + سویڈن + سنگاپور + سینٹ ہیلینا + سلووینیا + سوالبرڈ آں جان ماین + سلوواکیہ + سیرالیون + سان مارینو + سینیگل + صومالیہ + سورینام + جنوبی سوڈان + ساؤ ٹومے آں پرنسپے + ال سلواڈور + سنٹ مارٹن + شام + سواتنی + سوازی لینڈ + ٹرسٹن ڈا کیونہا + ٹرکس آں کیکوس جزائر + چاڈ + فرانسیسی جنوبی خطے + ٹوگو + تھائی لینڈ + تاجکستان + ٹوکیلاؤ + تیمور لیسٹ + مشرقی تیمور تیمور لیسٹ + ترکمانستان + تونس + ٹونگا + ترکی + ترینیداد آں ٹوباگو + ٹووالو + تائیوان + تنزانیہ + یوکرین + یوگنڈا + امریکہ ما باہرسی لَو جزائز + اقوام متحدہ + ریاست ہائے متحدہ امریکہ + امریکا + یوروگوئے + ازبکستان + ویٹیکن سٹی + سینٹ ونسنٹ آں گرینیڈائنز + وینزوئیلا + برٹش ورجن آئلینڈز + امریکی ورجن آئلینڈز + ویتنام + وینوآٹو + ویلیز آں فیوٹیونا + ساموآ + بناوٹی لہجے + مصنوعی بیڑی + کوسووو + یمن + مایوٹ + جنوبی افریقہ + زامبیا + زمبابوے + نامعلوم علاقہ + + + جارجیائی کیلنڈر + ISO-8601 کیلنڈر + معیاری چھانٹی سی ترتیب + عربی ہندی ہندسے + مغربی ہندسے + + + میٹرک + برطانیہ + ریاست ہائے متحدہ امریکہ + + + جِب:{0} + لِک:{0} + علاقہ:{0} + + + + + right-to-left + top-to-bottom + + + + [ء ٶ آ أ ئ ا ب پ ت ث ٹ ج چ ڇ ح خ څ ݲ د ذ ڈ ر ز ڑ ژ ڙ س ش ݜ ص ض ط ظ ع غ ف ق ک گ ل م ن ں ھ ہ و ی ے] + [؀؁؂؃\u200C\u200D\u200E\u200F \u064B \u064C \u064D \u064E \u064F \u0650 \u0651 \u0652 \u0654 \u0656 \u0657 \u0658 \u0670 ٻ ٺ ټ ٽ ۃ ي] + [ء ٶ آ أ ئ ا ب پ ت ث ٹ ج چ ڇ ح خ څ ݲ د ذ ڈ ر ز ڑ ژ ڙ س ش ݜ ص ض ط ظ ع غ ف ق ک گ ل م ن ں ھ ہ و ی ے] + [\u200E \- ‑ , ٫ ٬ . % ‰ + 0۰ 1۱ 2۲ 3۳ 4۴ 5۵ 6۶ 7۷ 8۸ 9۹] + [، ؍ ٫ ٬ ؛ \: ؟ . ۔ ( ) \[ \]] + ؟ + + + + + + + + EEEE، d MMMM، y G + GyMMMMEEEEd + + + + + d MMMM، y G + GyMMMMd + + + + + d MMM، y G + GyMMMd + + + + + d/M/y GGGGG + GGGGGyMd + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + y G + MMM y G + d MMM، y G + E، d MMM، y G + d/M + E، d/M + d MMM + E، d MMM + d MMMM + y G + y G + M/y G + d/M/y G + E، d/M/y G + MMM y G + d MMM، y G + E، d MMM، y G + MMMM y G + QQQ y G + QQQQ y G + + + + y G – y G + y – y G + + + M/y GGGGG – M/y GGGGG + M/y – M/y GGGGG + M/y – M/y GGGGG + + + M/d/y – M/d/y GGGGG + M/d/y GGGGG – M/d/y GGGGG + M/d/y – M/d/y GGGGG + M/d/y – M/d/y GGGGG + + + E, M/d/y – E, M/d/y GGGGG + E, M/d/y GGGGG – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + + + MMM y G – MMM y G + MMM – MMM y G + MMM y – MMM y G + + + MMM d – d, y G + MMM d, y G – MMM d, y G + MMM d – MMM d, y G + MMM d, y – MMM d, y G + + + E, MMM d – E, MMM d, y G + E, MMM d, y G – E, MMM d, y G + E, MMM d – E, MMM d, y G + E, MMM d, y – E, MMM d, y G + + + M–M + + + d/M – d/M + d/M – d/M + + + E، d/M – E، d/M + E، d/M – E، d/M + + + MMM–MMM + + + d–d MMM + d MMM – d MMM + + + E، d MMM – E، d MMM + E، d MMM – E، d MMM + + + y–y G + + + M/y – M/y G + M/y – M/y G + + + d/M/y – d/M/y G + d/M/y – d/M/y G + d/M/y – d/M/y G + + + E، d/M/y – E، d/M/y G + E، d/M/y – E، d/M/y G + E، d/M/y – E، d/M/y G + + + MMM–MMM y G + MMM y – MMM y G + + + d–d MMM، y G + d MMM – d MMM، y G + d MMM، y – d MMM، y G + + + E، d MMM – E، d MMM، y G + E، d MMM – E، d MMM، y G + E، d MMM، y – E، d MMM، y G + + + MMMM–MMMM y G + MMMM y – MMMM y G + + + + + + + + + جنوری + فروری + مارچ + اپریل + مئ + جون + جولائی + اگست + ستمبر + اکتوبر + نومبر + دسمبر + + + ج + ف + م + ا + م + ج + ج + ا + س + ا + ن + د + + + جنوری + فروری + مارچ + اپریل + مئ + جون + جولائی + اگست + ستمبر + اکتوبر + نومبر + دسمبر + + + + + جنوری + فروری + مارچ + اپریل + مئ + جون + جولائی + اگست + ستمبر + اکتوبر + نومبر + دسمبر + + + ج + ف + م + ا + م + ج + ج + ا + س + ا + ن + د + + + جنوری + فروری + مارچ + اپریل + مئ + جون + جولائی + اگست + ستمبر + اکتوبر + نومبر + دسمبر + + + + + + + ایکشیمے + دُوشیمے + گھن آنگا + چارشیمے + پَئ شیمے + شُوگار + لَو آنگا + + + ا + د + گ + چ + پ + ش + ل + + + ایکشیمے + دُوشیمے + گھن آنگا + چارشیمے + پَئ شیمے + شُوگار + لَو آنگا + + + ایکشیمے + دُوشیمے + گھن آنگا + چارشیمے + پَئ شیمے + شُوگار + لَو آنگا + + + + + ایکشیمے + دُوشیمے + گھن آنگا + چارشیمے + پَئ شیمے + شُوگار + لَو آنگا + + + ا + د + گ + چ + پ + ش + ل + + + ایکشیمے + دُوشیمے + گھن آنگا + چارشیمے + پَئ شیمے + شُوگار + لَو آنگا + + + ایکشیمے + دُوشیمے + گھن آنگا + چارشیمے + پَئ شیمے + شُوگار + لَو آنگا + + + + + + + اول ڇامای + دوھیم ڇامای + ڇوی ڇامای + چوٹھوم ڇامای + + + 1 + 2 + 3 + 4 + + + اول ڇامای + دھویم ڇامای + ڇوی ڇامای + چوٹھوم ڇامای + + + + + اول ڇامای + دوھیم ڇامای + ڇوی ڇامای + چوٹھوم ڇامای + + + اول ڇامای + دوھیم ڇامای + ڇوی ڇامای + چوٹھوم ڇامای + + + + + + + a + p + + + AM + PM + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + + عیسٰیؑ ما مُش + ع-م + عیسوی + عام دور + + + ع-م + ع + ع-د + + + + + + EEEE، d MMMM، y + yMMMMEEEEd + + + + + d MMMM، y + yMMMMd + + + + + d MMM، y + yMMMd + + + + + d/M/yy + yyMd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + E h:mm B + y G + MMM y G + d MMM، y G + E، d MMM، y G + d/M + E، d/M + d MMM + E، d MMM + d MMMM + MMMM سی ہفتہ W + M/y + d/M/y + E، d/M/y + MMM y + d MMM، y + E، d MMM، y + MMMM y + QQQ y + QQQQ y + Yسی w ہفتہ + + + + y G – y G + y – y G + + + M/y GGGGG – M/y GGGGG + M/y – M/y GGGGG + M/y – M/y GGGGG + + + M/d/y – M/d/y GGGGG + M/d/y GGGGG – M/d/y GGGGG + M/d/y – M/d/y GGGGG + M/d/y – M/d/y GGGGG + + + E, M/d/y GGGGG – E, M/d/y GGGGG + E, M/d/y GGGGG – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + E, M/d/y – E, M/d/y GGGGG + + + MMM y G – MMM y G + MMM – MMM y G + MMM y – MMM y G + + + MMM d – d, y G + MMM d, y G – MMM d, y G + MMM d – MMM d, y G + MMM d, y – MMM d, y G + + + E, MMM d – E, MMM d, y G + E, MMM d, y G – E, MMM d, y G + E, MMM d – E, MMM d, y G + E, MMM d, y – E, MMM d, y G + + + M–M + + + d/M – d/M + d/M – d/M + + + E، d/M – E، d/M + E، d/M – E، d/M + + + MMM–MMM + + + d–d MMM + d MMM – d MMM + + + E، d MMM – E، d MMM + E، d MMM – E، d MMM + + + y–y + + + M/y – M/y + M/y – M/y + + + d/M/y – d/M/y + d/M/y – d/M/y + d/M/y – d/M/y + + + E، d/M/y – E، d/M/y + E، d/M/y – E، d/M/y + E، d/M/y – E، d/M/y + + + MMM–MMM y + MMM y – MMM y + + + d–d MMM y + d MMM – d MMM، y + d MMM، y – d MMM، y + + + E، d MMM – E، d MMM، y + E، d MMM – E، d MMM، y + E، d MMM، y – E، d MMM، y + + + MMMM–MMMM y + MMMM y – MMMM y + + + + + + + + دور + + + دور + + + دور + + + کال + پیوک کال + ایݜ + کال گے + + {0} کالا میں + + + {0} کالا موش + + + + کال + پیوک کال + ایݜ + کال گے + + {0} کالا میں + + + {0} کالا موش + + + + کال + پیوک کال + ایݜ + کال گے + + {0} کالا میں + + + {0} کالا موش + + + + ڇا مای + + + ڇا مای + + + ڇا مای + + + ما + مُشُم ما + میں ما + دُوئی ما + + {0} ما میں + + + {0} مائے موش + + + + ما + مُشُم ما + میں ما + دُوئی ما + + {0} ما میں + + + {0} مائے موش + + + + ما + مُشُم ما + میں ما + دُوئی ما + + {0} ما میں + + + {0} مائے موش + + + + ہفتہ + مُشُم ہفتہ + میں ہفتہ + دُوئی ہفتہ + + {0} ہفتہ میں + + + {0} ہفتائے موش + + {0} سی ہفتہ + + + ہفتہ + مُشُم ہفتہ + میں ہفتہ + دُوئی ہفتہ + + {0} ہفتہ میں + + + {0} ہفتائے موش + + {0} سی ہفتہ + + + ہفتہ + مُشُم ہفتہ + میں ہفتہ + دُوئی ہفتہ + + {0} ہفتہ میں + + + {0} ہفتائے موش + + {0} سی ہفتہ + + + ما سی ہفتہ + + + ما سی ہفتہ + + + ما سی ہفتہ + + + دی + بأل + آش + بول + + {0} دیا میں + + + {0} دیےموش + + + + دی + بأل + آش + بول + + {0} دیا میں + + + {0} دیےموش + + + + دی + بأل + آش + بول + + {0} دیا میں + + + {0} دیےموش + + + + کال سی دی + + + کال سی دی + + + کال سی دی + + + ہفتہ سی دی + + + ہفتہ سی دی + + + ہفتہ سی دی + + + موشوم ایکشیمے + مے ایکشیمے + دوی ایکشیمے + + +{0} ایکشیمے + + + -{0} ایکشیمے + + + + موشوم ایکشیمے + مے ایکشیمے + دوی ایکشیمے + + +{0} ایکشیمے + + + -{0} ایکشیمے + + + + موشوم ایکشیمے + مے ایکشیمے + دوی ایکشیمے + + +{0} ایکشیمے + + + -{0} ایکشیمے + + + + موشوم دوشیمے + مے دوشیمے + دوی دوشیمے + + +{0} دوشیمے + + + -{0} دوشیمے + + + + موشوم دوشیمے + مے دوشیمے + دوی دوشیمے + + +{0} دوشیمے + + + -{0} دوشیمے + + + + موشوم دوشیمے + مے دوشیمے + دوی دوشیمے + + +{0} دوشیمے + + + -{0} دوشیمے + + + + موشوم گھن آنگا + مے گھن آنگا + دوی گھن آنگا + + +{0} گھن آنگا + + + -{0} گھن آنگا + + + + موشوم گھن آنگا + مے گھن آنگا + دوی گھن آنگا + + +{0} گھن آنگا + + + -{0} گھن آنگا + + + + موشوم گھن آنگا + مے گھن آنگا + دوی گھن آنگا + + +{0} گھن آنگا + + + -{0} گھن آنگا + + + + موشوم چارشیمے + مے چارشیمے + دوی چارشیمے + + +{0} چارشیمے + + + -{0} چارشیمے + + + + موشوم چارشیمے + مے چارشیمے + دوی چارشیمے + + +{0} چارشیمے + + + -{0} چارشیمے + + + + موشوم چارشیمے + مے چارشیمے + دوی چارشیمے + + +{0} چارشیمے + + + -{0} چارشیمے + + + + موشوم پئی شیمے + مے پئی شیمے + دوی پئی شیمے + + +{0} پئی شیمے + + + -{0} پئی شیمے + + + + موشوم پئی شیمے + مے پئی شیمے + دوی پئی شیمے + + +{0} پئی شیمے + + + -{0} پئی شیمے + + + + موشوم پئی شیمے + مے پئی شیمے + دوی پئی شیمے + + +{0} پئی شیمے + + + -{0} پئی شیمے + + + + موشوم شُوگار + مے شُوگار + دوی شُوگار + + +{0} شُوگار + + + -{0} شُوگار + + + + موشوم شُوگار + مے شُوگار + دوی شُوگار + + +{0} شُوگار + + + -{0} شُوگار + + + + موشوم شُوگار + مے شُوگار + دوی شُوگار + + +{0} شُوگار + + + -{0} شُوگار + + + + موشوم لَو آنگا + مے لَو آنگا + دوی لَو آنگا + + +{0} لَو آنگا + + + -{0} لَو آنگا + + + + موشوم لَو آنگا + مے لَو آنگا + دوی لَو آنگا + + +{0} لَو آنگا + + + -{0} لَو آنگا + + + + موشوم لَو آنگا + مے لَو آنگا + دوی لَو آنگا + + +{0} لَو آنگا + + + -{0} لَو آنگا + + + + پیشیا موش/پیشیا پأش + + + پیشیا موش/پیشیا پأش + + + پیشیا موش/پیشیا پأش + + + گینٹہ + میں گینٹہ + + {0} گینٹہ میں + + + {0} گینٹائے موش + + + + گینٹہ + میں گینٹہ + + {0} گینٹہ میں + + + {0} گینٹائے موش + + + + گینٹہ + میں گینٹہ + + {0} گینٹہ میں + + + {0} گینٹائے موش + + + + میلٹ + میں میلٹ + + +{0} میلڑا میں + + + -{0} میلڑے موش + + + + میلٹ + میں میلٹ + + +{0} میلڑا میں + + + -{0} میلڑے موش + + + + میلٹ + میں میلٹ + + +{0} میلٹ میں + + + -{0} میلٹ موش + + + + سیکنڈ + مھیرے + + +{0} سیکنڑا میں + + + -{0} سیکنڑے موش + + + + سیکنڈ + مھیرے + + +{0} سیکنڈ میں + + + -{0} سیکنڈ موش + + + + سیکنڈ + مھیرے + + +{0} سیکنڈ میں + + + -{0} سیکنڈ موش + + + + منطقۂ وَخ + + + منطقۂ وَخ + + + منطقۂ وَخ + + + + {0} وَخ + {0} دھات + {0} معیاری وَخ + + + کوآرڈینیٹڈ یونیورسل ٹائم + + + + نامعلوم خار + + + انڈورا + + + دبئی + + + سیبل + + + انٹیگوا + + + انگویلا + + + ٹیرانی + + + یریوان + + + لوانڈا + + + روتھیرا + + + پلمیر + + + ٹرول + + + سیووا + + + ماؤسن + + + ڈیوس + + + ووستوک + + + کیسی + + + ڈومونٹ ڈی ارویلے + + + میک مرڈو + + + ریو گالیگوس + + + مینڈوزا + + + سان جوآن + + + اوشوآئیا + + + لا ریئوجا + + + سان لوئس + + + کیٹامارسی + + + سالٹا + + + جوجوئی + + + ٹوکومین + + + کورڈوبا + + + بیونس آئرس + + + پاگو پاگو + + + ویانا + + + پرتھ + + + ایوکلا + + + ڈارون + + + اڈیلائڈ + + + بروکن ہِل + + + ملبورن + + + کیوری + + + ہوبارٹ + + + لِنڈمین + + + سڈنی + + + برسبین + + + میکواری + + + لارڈ ہووے + + + اروبا + + + میریہام + + + باکو + + + سراجیوو + + + بارباڈوس + + + ڈھاکہ + + + برسلز + + + اؤگاڈؤگوو + + + صوفیہ + + + بحرین + + + بجمبرا + + + پورٹو نووو + + + سینٹ برتھیلمی + + + برمودا + + + برونئی + + + لا پاز + + + کرالینڈیجک + + + ایرونیپ + + + ریئو برینکو + + + پورٹو ویلہو + + + بوآ وسٹا + + + مناؤس + + + کوئیابا + + + سنٹارین + + + کیمپو گرینڈ + + + بیلیم + + + اراگویانا + + + ساؤ پالو + + + باہیا + + + فورٹالیزا + + + میسیئو + + + ریسائف + + + نورونہا + + + نساؤ + + + تھمپو + + + گبرون + + + مِنسک + + + بیلائز + + + ڈاؤسن + + + وہائٹ ہارس + + + انووِک + + + وینکوور + + + فورٹ نیلسن + + + ڈاؤسن کریک + + + کریسٹون + + + ایلو نائف + + + ایڈمونٹن + + + سوِفٹ کرنٹ + + + کیمبرج سی کھاڑی + + + ریجینا + + + ونّیپیگ + + + ریزولیوٹ + + + رینی ریور + + + رینکن انلیٹ + + + اٹیکوکن + + + تھنڈر بے + + + نپیگون + + + ٹورنٹو + + + ایکالوئٹ + + + پینگنِرٹنگ + + + مونکٹن + + + ہیلیفیکس + + + گوس سی کھاڑی + + + گلیس سی کھاڑی + + + بلانک سبلون + + + سینٹ جانز + + + کوکوس + + + کنشاسا + + + لوبمباشی + + + بنگوئی + + + برازاویلے + + + زیورخ + + + عابدجان + + + راروٹونگا + + + ایسٹر + + + پنٹا اریناس + + + سنٹیاگو + + + ڈوآلا + + + یورومکی + + + شنگھائی + + + بگوٹا + + + کوسٹا ریکا + + + ہوانا + + + کیپ ورڈی + + + کیوراکاؤ + + + کرسمس + + + نکوسیا + + + فاماگوسٹا + + + پراگ + + + بزنجن + + + برلن + + + جبوتی + + + کوپن ہیگن + + + ڈومنیکا + + + سانتو ڈومنگو + + + الجیئرس + + + گیلاپیگوس + + + گوآیاکوئل + + + ٹالن + + + قاہرہ + + + العیون + + + اسمارا + + + کینری + + + سیوٹا + + + میڈرڈ + + + عدیس ابابا + + + ہیلسنکی + + + فجی + + + اسٹینلے + + + چیوک + + + پونپیئی + + + کوسرائی + + + فارو + + + پیرس + + + لبرے ویلے + + + + برٹش سمر ٹائم + + لندن + + + غرناطہ + + + طبلیسی + + + سیئین + + + گرنزی + + + اکّرا + + + جبل الطارق + + + تھولو + + + نوک + + + اسکورز بائی سنڈ + + + ڈنمارک شاون + + + بنجول + + + کونکری + + + گواڈیلوپ + + + ملابو + + + ایتھنز + + + جنوبی جارجیا + + + گواٹے مالا + + + گوآم + + + بِساؤ + + + گیانا + + + ہانگ سینگ + + + ٹیگوسیگالپے + + + زیگریب + + + پورٹ او پرنس + + + بڈاپسٹ + + + جکارتہ + + + پونٹیانک + + + مکاسر + + + جے پورہ + + + + آئرش اسٹینڈرڈ ٹائم + + ڈبلن + + + یروشلم + + + آئل آف مین + + + کولسیتا + + + چاگوس + + + بغداد + + + تہران + + + ریکجاوک + + + روم + + + جرسی + + + جمائیکا + + + امّان + + + ٹوکیو + + + نیروبی + + + بشکیک + + + پنوم پن + + + اینڈربری + + + کریتیماٹی + + + ٹراوا + + + کومورو + + + سینٹ کٹس + + + پیونگ یانگ + + + سیئول + + + کویت + + + کیمین + + + اکتاؤ + + + اورال + + + آتیراؤ + + + اکٹوب + + + کوستانے + + + کیزیلورڈا + + + الماٹی + + + وینٹیانا + + + بیروت + + + سینٹ لوسیا + + + ویڈوز + + + کولمبو + + + مونروویا + + + مسیرو + + + وِلنیئس + + + لگژمبرگ + + + ریگا + + + ٹریپولی + + + کیسا بلانکا + + + موناکو + + + چیسیناؤ + + + پوڈگورسیا + + + میریگوٹ + + + انٹاناناریوو + + + کواجیلین + + + مجورو + + + اسکوپجے + + + بماکو + + + رنگون + + + ہووارڈ + + + اولان باتار + + + چوئبالسان + + + مسیؤ + + + سائپین + + + مارٹینک + + + نواکشوط + + + مونٹسیراٹ + + + مالٹا + + + ماریشس + + + مالدیپ + + + بلینٹائر + + + تیجوآنا + + + ہرموسیلو + + + میزٹلان + + + چیہوآہوآ + + + بہیا بندراز + + + اوجیناگا + + + مونٹیری + + + میکسیکو سٹی + + + میٹاموروس + + + میریڈا + + + کنکیون + + + کوالا لمپور + + + کیوچنگ + + + مپوٹو + + + ونڈہوک + + + نؤمیا + + + نیامی + + + نورفوک + + + لاگوس + + + مناگوآ + + + ایمسٹرڈم + + + اوسلو + + + سیٹھمنڈو + + + ناؤرو + + + نیئو + + + چیتھم + + + آکلینڈ + + + مسقط + + + پنامہ + + + لیما + + + تاہیتی + + + مارکیساس + + + گامبیئر + + + پورٹ موریسبی + + + بوگینولے + + + منیلا + + + کراچی + + + وارسا + + + میکلیئون + + + پٹکائرن + + + پیورٹو ریکو + + + غزہ + + + ہیبرون + + + ازوریس + + + مڈیئرا + + + لسبن + + + پلاؤ + + + اسنسیئن + + + قطر + + + ری یونین + + + بخارسٹ + + + بلغراد + + + کالينينغراد + + + ماسکو + + + وولگوگراد + + + سیراٹو + + + استراخان + + + الیانوسک + + + کیروف + + + سمارا + + + یکاٹیرِنبرگ + + + اومسک + + + نوووسِبِرسک + + + برنال + + + ٹامسک + + + نوووکیوزنیسک + + + کریسنویارسک + + + ارکتسک + + + چیتا + + + یکوتسک + + + ولادی ووستک + + + خندیگا + + + سخالین + + + اوست-نیرا + + + میگیدن + + + سرہدنیکولیمسک + + + کیمچٹکا + + + انیدر + + + کگالی + + + ریاض + + + گواڈل کینال + + + ماہی + + + خرطوم + + + اسٹاک ہوم + + + سنگاپور + + + سینٹ ہیلینا + + + لیوبلیانا + + + لانگ ایئر بین + + + بریٹِسلاوا + + + فری ٹاؤن + + + سان ماریانو + + + ڈکار + + + موگادیشو + + + پراماریبو + + + جوبا + + + ساؤ ٹوم + + + ال سلواڈور + + + لوور پرنسس کوارٹر + + + دمشق + + + مبابین + + + عظیم ترک + + + اینجامینا + + + کرگیولین + + + لوم + + + بنکاک + + + دوشانبے + + + فکاؤفو + + + ڈلی + + + اشغبت + + + تیونس + + + ٹونگاٹاپو + + + استنبول + + + پورٹ آف اسپین + + + فیونافیوٹی + + + تائپے + + + دار السلام + + + ازہوراڈ + + + کیو + + + سمفروپول + + + زیپوروزائی + + + کیمپالا + + + مڈوے + + + ویک + + + اداک + + + نوم + + + جانسٹن + + + اینکریج + + + یکوٹیٹ + + + سیٹکا + + + جونیئو + + + میٹلا کاٹلا + + + لاس اینجلس + + + بوائس + + + فینکس + + + ڈینور + + + بیولاہ، شمالی ڈکوٹا + + + نیو سلیم، شمالی ڈکوٹا + + + وسط، شمالی ڈکوٹا + + + شکاگو + + + مینومینی + + + ونسینیز، انڈیانا + + + پیٹرزبرگ، انڈیانا + + + ٹیل سٹی، انڈیانا + + + کنوکس، انڈیانا + + + وینامیک، انڈیانا + + + مرینگو، انڈیانا + + + انڈیاناپولس + + + لوئس ویلے + + + ویوے، انڈیانا + + + مونٹیسیلو، کینٹوکی + + + ڈیٹرائٹ + + + نیو یارک + + + مونٹی ویڈیو + + + سمرقند + + + تاشقند + + + واٹیکن + + + سینٹ ونسنٹ + + + کراسیس + + + ٹورٹولا + + + سینٹ تھامس + + + ہو چی منہ سٹی + + + ایفیٹ + + + ولّیس + + + اپیا + + + عدن + + + مایوٹ + + + جوہانسبرگ + + + لیوساکا + + + ہرارے + + + + افغانستان سی وَخ + + + + + وسطی افریقہ ٹائم + + + + + مشرقی افریقہ ٹائم + + + + + جنوبی افریقہ سٹینڈرڈ ٹائم + + + + + مغربی افریقہ ٹائم + مغربی افریقہ سٹینڈرڈ ٹائم + مغربی افریقہ سمر ٹائم + + + + + الاسکا ٹائم + الاسکا اسٹینڈرڈ ٹائم + الاسکا ڈے لائٹ ٹائم + + + + + امیزون ٹائم + ایمیزون سی معیاری وَخ + امیزون سی موسم گرما سی وَخ + + + + + سنٹرل ٹائم + سنٹرل اسٹینڈرڈ ٹائم + سنٹرل ڈے لائٹ ٹائم + + + + + ایسٹرن ٹائم + ایسٹرن اسٹینڈرڈ ٹائم + ایسٹرن ڈے لائٹ ٹائم + + + + + ماؤنٹین ٹائم + ماؤنٹین اسٹینڈرڈ ٹائم + ماؤنٹین ڈے لائٹ ٹائم + + + + + پیسفک ٹائم + پیسفک اسٹینڈرڈ ٹائم + پیسفک ڈے لائٹ ٹائم + + + + + ایپیا ٹائم + ایپیا سٹینڈرڈ ٹائم + ایپیا ڈے لائٹ ٹائم + + + + + عرب سی وَخ + عرب سی معیاری وَخ + عرب ڈے لائٹ ٹائم + + + + + ارجنٹینا سی وَخ + ارجنٹینا سی معیاری وَخ + ارجنٹینا سی موسم گرما سی وَخ + + + + + مغربی ارجنٹینا سی وَخ + مغربی ارجنٹینا سی معیاری وَخ + مغربی ارجنٹینا سی موسم گرما سی وَخ + + + + + آرمینیا سی وَخ + آرمینیا سی معیاری وَخ + آرمینیا سی موسم گرما سی وَخ + + + + + اٹلانٹک ٹائم + اٹلانٹک اسٹینڈرڈ ٹائم + اٹلانٹک ڈے لائٹ ٹائم + + + + + سنٹرل آسٹریلیا ٹائم + آسٹریلین سنٹرل اسٹینڈرڈ ٹائم + آسٹریلین سنٹرل ڈے لائٹ ٹائم + + + + + آسٹریلین سنٹرل ویسٹرن ٹائم + آسٹریلین سنٹرل ویسٹرن اسٹینڈرڈ ٹائم + آسٹریلین سنٹرل ویسٹرن ڈے لائٹ ٹائم + + + + + ایسٹرن آسٹریلیا ٹائم + آسٹریلین ایسٹرن اسٹینڈرڈ ٹائم + آسٹریلین ایسٹرن ڈے لائٹ ٹائم + + + + + ویسٹرن آسٹریلیا ٹائم + سٹریلیا ویسٹرن اسٹینڈرڈ ٹائم + آسٹریلین ویسٹرن ڈے لائٹ ٹائم + + + + + آذربائیجان سی وَخ + آذربائیجان سی معیاری وَخ + آذربائیجان سی موسم گرما سی وَخ + + + + + ازوریس سی وَخ + ازوریس سی معیاری وَخ + ازوریس سی موسم گرما سی وَخ + + + + + بنگلہ دیش سی وَخ + بنگلہ دیش سی معیاری وَخ + بنگلہ دیش سی موسم گرما سی وَخ + + + + + بھوٹان سی وَخ + + + + + بولیویا سی وَخ + + + + + برازیلیا ٹائم + برازیلیا اسٹینڈرڈ ٹائم + برازیلیا سمر ٹائم + + + + + برونئی دارالسلام ٹائم + + + + + کیپ ورڈی ٹائم + کیپ ورڈی سٹینڈرڈ ٹائم + کیپ ورڈی سمر ٹائم + + + + + چامورو سٹینڈرڈ ٹائم + + + + + چیتھم ٹائم + چیتھم اسٹینڈرڈ ٹائم + چیتھم ڈے لائٹ ٹائم + + + + + چلی سی وَخ + چلی سی معیاری وَخ + چلی سی موسم گرما سی وَخ + + + + + چین سی وَخ + چین سٹینڈرڈ ٹائم + چینی ڈے لائٹ ٹائم + + + + + کوئبلسان ٹائم + کوئبلسان سٹینڈرڈ ٹائم + کوائبلسان سمر ٹائم + + + + + کرسمس آئلینڈ ٹائم + + + + + کوکوس آئلینڈز ٹائم + + + + + کولمبیا ٹائم + کولمبیا سی معیاری وَخ + کولمبیا سی موسم گرما سی وَخ + + + + + کک آئلینڈز ٹائم + کک آئلینڈز سٹینڈرڈ ٹائم + کک آئلینڈز نصف سمر ٹائم + + + + + کیوبا ٹائم + کیوبا اسٹینڈرڈ ٹائم + کیوبا ڈے لائٹ ٹائم + + + + + ڈیوس ٹائم + + + + + ڈومونٹ-ڈی’ارویلے ٹائم + + + + + مشرقی تیمور ٹائم + + + + + ایسٹر آئلینڈ سی وَخ + ایسٹر آئلینڈ سی معیاری وَخ + ایسٹر آئلینڈ سی موسم گرما سی وَخ + + + + + ایکواڈور سی وَخ + + + + + وسط یورپ سی وَخ + وسطی یورپ سی معیاری وَخ + وسطی یورپ سی موسم گرما سی وَخ + + + + + مشرقی یورپ سی وَخ + مشرقی یورپ سی معیاری وَخ + مشرقی یورپ سی موسم گرما سی وَخ + + + + + بعید مشرقی یورپی وَخ + + + + + مغربی یورپ سی وَخ + مغربی یورپ سی معیاری وَخ + مغربی یورپ سی موسم گرما سی وَخ + + + + + فاک لینڈ آئلینڈز سی وَخ + فاک لینڈ آئلینڈز سی معیاری وَخ + فاک لینڈ آئلینڈز سی موسم گرما سی وَخ + + + + + فجی ٹائم + فجی سٹینڈرڈ ٹائم + فجی سمر ٹائم + + + + + فرینچ گیانا سی وَخ + + + + + فرینچ جنوبی آں انٹارکٹک ٹائم + + + + + گالاپاگوز سی وَخ + + + + + گیمبیئر ٹائم + + + + + جارجیا سی وَخ + جارجیا سی معیاری وَخ + جارجیا سی موسم گرما سی وَخ + + + + + جلبرٹ آئلینڈز ٹائم + + + + + گرین وچ سی اصل وَخ + + + + + مشرقی گرین لینڈ ٹائم + مشرقی گرین لینڈ اسٹینڈرڈ ٹائم + مشرقی گرین لینڈ سی موسم گرما سی وَخ + + + + + مغربی گرین لینڈ ٹائم + مغربی گرین لینڈ اسٹینڈرڈ ٹائم + مغربی گرین لینڈ سی موسم گرما سی وَخ + + + + + خلیج سی معیاری وَخ + + + + + گیانا سی وَخ + + + + + ہوائی الیوٹیئن ٹائم + ہوائی الیوٹیئن اسٹینڈرڈ ٹائم + ہوائی الیوٹیئن ڈے لائٹ ٹائم + + + + + ہانگ سینگ ٹائم + ہانگ سینگ سٹینڈرڈ ٹائم + ہانگ سینگ سمر ٹائم + + + + + ہووڈ ٹائم + ہووڈ سٹینڈرڈ ٹائم + ہووڈ سمر ٹائم + + + + + ہندوستان سی معیاری وَخ + + + + + بحر ہند ٹائم + + + + + ہند چین ٹائم + + + + + وسطی انڈونیشیا ٹائم + + + + + مشرقی انڈونیشیا ٹائم + + + + + مغربی انڈونیشیا ٹائم + + + + + ایران سی وَخ + ایران سی معیاری وَخ + ایران ڈے لائٹ ٹائم + + + + + ارکتسک ٹائم + ارکتسک سٹینڈرڈ ٹائم + ارکتسک سمر ٹائم + + + + + اسرائیل سی وَخ + اسرائیل سی معیاری وَخ + اسرائیل ڈے لائٹ ٹائم + + + + + جاپان ٹائم + جاپان سٹینڈرڈ ٹائم + جاپان ڈے لائٹ ٹائم + + + + + مشرقی قزاخستان سی وَخ + + + + + مغربی قزاخستان سی وَخ + + + + + کوریا ٹائم + کوریا سٹینڈرڈ ٹائم + کوریا ڈے لائٹ ٹائم + + + + + کوسرے ٹائم + + + + + کریسنویارسک ٹائم + کرسنویارسک سٹینڈرڈ ٹائم + کریسنویارسک سمر ٹائم + + + + + کرغستان سی وَخ + + + + + لائن آئلینڈز ٹائم + + + + + لارڈ ہووے ٹائم + لارڈ ہووے اسٹینڈرڈ ٹائم + لارڈ ہووے ڈے لائٹ ٹائم + + + + + مکوآری آئلینڈ سی وَخ + + + + + میگیدن ٹائم + مگادان اسٹینڈرڈ ٹائم + میگیدن سمر ٹائم + + + + + ملیشیا ٹائم + + + + + مالدیپ سی وَخ + + + + + مارکیسس ٹائم + + + + + مارشل آئلینڈز ٹائم + + + + + ماریشس ٹائم + ماریشس سٹینڈرڈ ٹائم + ماریشس سمر ٹائم + + + + + ماؤسن ٹائم + + + + + شمال مغربی میکسیکو ٹائم + شمال مغربی میکسیکو اسٹینڈرڈ ٹائم + شمال مغربی میکسیکو ڈے لائٹ ٹائم + + + + + میکسیکن پیسفک ٹائم + میکسیکن پیسفک اسٹینڈرڈ ٹائم + میکسیکن پیسفک ڈے لائٹ ٹائم + + + + + یولان بیتور ٹائم + یولان بیتور سٹینڈرڈ ٹائم + یولان بیتور سمر ٹائم + + + + + ماسکو ٹائم + ماسکو اسٹینڈرڈ ٹائم + ماسکو سمر ٹائم + + + + + میانمار ٹائم + + + + + ناؤرو ٹائم + + + + + نیپال سی وَخ + + + + + نیو کیلیڈونیا ٹائم + نیو کیلیڈونیا سٹینڈرڈ ٹائم + نیو کیلیڈونیا سمر ٹائم + + + + + نیوزی لینڈ سی وَخ + نیوزی لینڈ سی معیاری وَخ + نیوزی لینڈ ڈے لائٹ ٹائم + + + + + نیو فاؤنڈ لینڈ ٹائم + نیو فاؤنڈ لینڈ اسٹینڈرڈ ٹائم + نیو فاؤنڈ لینڈ ڈے لائٹ ٹائم + + + + + نیئو ٹائم + + + + + نارفوک آئلینڈ سی وَخ + نارفوک آئلینڈ سی معیاری وَخ + نارفوک آئلینڈ سی موسم گرما سی وَخ + + + + + فرنانڈو ڈی نورنہا سی وَخ + فرنانڈو ڈی نورنہا سی معیاری وَخ + فرنانڈو ڈی نورونہا سمر ٹائم + + + + + نوووسیبرسک ٹائم + نوووسیبرسک سٹینڈرڈ ٹائم + نوووسیبرسک سمر ٹائم + + + + + اومسک ٹائم + اومسک سٹینڈرڈ ٹائم + اومسک سمر ٹائم + + + + + پاکستان سی وَخ + پاکستان سی معیاری وَخ + پاکستان سی موسم گرما سی وَخ + + + + + پلاؤ ٹائم + + + + + پاپوآ نیو گنی ٹائم + + + + + پیراگوئے سی وَخ + پیراگوئے سی معیاری وَخ + پیراگوئے سی موسم گرما سی وَخ + + + + + پیرو سی وَخ + پیرو سی معیاری وَخ + پیرو سی موسم گرما سی وَخ + + + + + فلپائن ٹائم + فلپائن سٹینڈرڈ ٹائم + فلپائن سمر ٹائم + + + + + فینکس آئلینڈز ٹائم + + + + + سینٹ پیئر آں مکلیئون ٹائم + سینٹ پیئر آں مکلیئون اسٹینڈرڈ ٹائم + سینٹ پیئر آں مکلیئون ڈے لائٹ ٹائم + + + + + پٹکائرن ٹائم + + + + + پوناپے ٹائم + + + + + پیانگ یانگ وَخ + + + + + ری یونین ٹائم + + + + + روتھیرا سی وَخ + + + + + سخالین ٹائم + سخالین سٹینڈرڈ ٹائم + سخالین سمر ٹائم + + + + + ساموآ ٹائم + ساموآ سٹینڈرڈ ٹائم + ساموآ ڈے لائٹ ٹائم + + + + + سیشلیز ٹائم + + + + + سنگاپور سٹینڈرڈ ٹائم + + + + + سولمن آئلینڈز ٹائم + + + + + جنوبی جارجیا ٹائم + + + + + سورینام سی وَخ + + + + + سیووا ٹائم + + + + + تاہیتی ٹائم + + + + + تائی پیئی ٹائم + تائی پیئی اسٹینڈرڈ ٹائم + تئی پیئی ڈے لائٹ ٹائم + + + + + تاجکستان سی وَخ + + + + + ٹوکیلاؤ ٹائم + + + + + ٹونگا ٹائم + ٹونگا سٹینڈرڈ ٹائم + ٹونگا سمر ٹائم + + + + + چوک ٹائم + + + + + ترکمانستان سی وَخ + ترکمانستان سی معیاری وَخ + ترکمانستان سی موسم گرما سی وَخ + + + + + ٹوالو ٹائم + + + + + یوروگوئے سی وَخ + یوروگوئے سی معیاری وَخ + یوروگوئے سی موسم گرما سی وَخ + + + + + ازبکستان سی وَخ + ازبکستان سی معیاری وَخ + ازبکستان سی موسم گرما سی وَخ + + + + + وانوآٹو ٹائم + وانوآٹو سٹینڈرڈ ٹائم + وانوآٹو سمر ٹائم + + + + + وینزوئیلا سی وَخ + + + + + ولادی ووستک ٹائم + ولادی ووستک سٹینڈرڈ ٹائم + ولادی ووستک سمر ٹائم + + + + + وولگوگراد ٹائم + وولگوگراد اسٹینڈرڈ ٹائم + وولگوگراد سمر ٹائم + + + + + ووسٹاک سی وَخ + + + + + ویک آئلینڈ ٹائم + + + + + والیز اور فٹونا ٹائم + + + + + یکوتسک ٹائم + یکوتسک اسٹینڈرڈ ٹائم + یکوتسک سمر ٹائم + + + + + یکاٹیرِنبرگ ٹائم + یکاٹیرِنبرگ اسٹینڈرڈ ٹائم + یکاٹیرِنبرگ سمر ٹائم + + + + + یوکون ٹائم + + + + + + + arabext + + + + + ¤ #,##0.00 + + + + + + متحدہ عرب اماراتی درہم + متحدہ عرب اماراتی درہم + + + افغان افغانی + افغان افغانی + + + البانیا سی لیک + البانیا سی لیک + + + آرمینیائی ڈرم + آرمینیائی ڈرم + + + نیدر لینڈز انٹیلیئن گلڈر + نیدر لینڈز انٹیلیئن گلڈر + + + انگولا سی کوانزا + انگولا سی کوانزا + + + ارجنٹائن پیسہ + ارجنٹائن پیسہ + + + آسٹریلین ڈالر + آسٹریلین ڈالر + + + اروبن فلورِن + اروبن فلورِن + + + آذربائجانی منات + آذربائجانی منات + + + بوسنیا ہرزیگووینا کا قابل منتقلی نشان + بوسنیا ہرزیگووینا کا قابل منتقلی نشان + + + باربیڈین ڈالر + باربیڈین ڈالر + + + بنگلہ دیشی ٹکا + بنگلہ دیشی ٹکا + + + بلغارین لیو + بلغارین لیو + + + بحرینی دینار + بحرینی دینار + + + برونڈیئن فرانک + برونڈیئن فرانک + + + برموڈا ڈالر + برموڈا ڈالر + + + برونئی ڈالر + برونئی ڈالر + + + بولیوین بولیویانو + بولیوین بولیویانو + + + برازیلی ریئل + برازیلی ریئل + + + بہامانی ڈالر + بہامانی ڈالر + + + بھوٹانی گُلٹرم + بھوٹانی گُلٹرم + + + بوتسوانا سی پولا + بوتسوانا سی پولا + + + بیلاروسی روبل + بیلاروسی روبل + BYN + + + بیلیز ڈالر + بیلیز ڈالر + + + کنیڈین ڈالر + کنیڈین ڈالر + CA$ + + + کانگولیز فرانک + کانگولیز فرانک + + + سوئس فرانکس + سوئس فرانکس + + + چلّین پیسہ + چلّین پیسہ + + + چینی یوآن (آف شور) + چینی یوآن (آف شور) + + + چینی یوآن + چینی یوآن + + + کولمبین پیسہ + کولمبین پیسہ + + + کوسٹا ریکا کا کولن + کوسٹا ریکا کا کولن + + + کیوبا کا قابل منتقلی پیسو + کیوبا کا قابل منتقلی پیسو + + + کیوبا سی پیسو + کیوبا سی پیسو + + + کیپ ورڈی سی اسکیوڈو + کیپ ورڈی سی اسکیوڈو + + + چیک کرونا + چیک کروناز + + + جبوتی فرانک + جبوتی فرانک + + + ڈنمارک کرون + ڈنمارک کرون + + + ڈومنیکن پیسو + ڈومنیکن پیسو + + + الجیریائی دینار + الجیریائی دینار + + + مصری پاؤنڈ + مصری پاؤنڈ + + + اریٹیریا سی نافکا + اریٹیریا سی نافکا + + + ایتھوپیائی بِرّ + ایتھوپیائی بِرّ + + + یورو + یورو + + + فجی سی ڈالر + فجی سی ڈالر + + + فاکلینڈ آئلینڈز پونڈ + فاکلینڈ آئلینڈز پونڈ + + + برطانوی پاؤنڈ + برطانوی پاؤنڈ + + + جارجیائی لاری + جارجیائی لاری + + + گھانا سی سیڈی + گھانا سی سیڈی + + + جبل الطارق پونڈ + جبل الطارق پونڈ + + + گامبیا سی ڈلاسی + گامبیا سی ڈلاسی + + + گنی فرانک + گنی فرانک + + + گواٹے مالا کا کوئٹزل + گواٹے مالا کا کوئٹزل + + + گویانیز ڈالر + گویانیز ڈالر + + + ھانگ کانگ ڈالر + ھانگ کانگ ڈالر + + + ہونڈوران لیمپیرا + ہونڈوران لیمپیرا + + + کروشین کونا + کروشین کونا + + + ہیتی کا گؤرڈی + ہیتی کا گؤرڈی + + + ہنگرین فورنٹ + ہنگرین فورنٹ + + + انڈونیشین روپیہ + انڈونیشین روپیہ + + + اسرائیلی نم شیکل + اسرائیلی نم شیکل + + + بھارتی روپیہ + بھارتی روپیہ + + + عراقی دینار + عراقی دینار + + + ایرانی ریال + ایرانی ریال + + + آئس لينڈی کرونا + آئس لينڈی کرونا + + + جمائیکن ڈالر + جمائیکن ڈالر + + + اردنی دینار + اردنی دینار + + + جاپانی ین + جاپانی ین + + + کینیائی شلنگ + کینیائی شلنگ + + + کرغستانی سوم + کرغستانی سوم + + + کمبوڈیائی ریئل + کمبوڈیائی ریئل + + + کوموریئن فرانک + کوموریئن فرانک + + + شمالی کوریائی وون + شمالی کوریائی وون + + + جنوبی کوریائی وون + جنوبی کوریائی وون + + + کویتی دینار + کویتی دینار + + + کیمین آئلینڈز ڈالر + کیمین آئلینڈز ڈالر + + + قزاخستانی ٹینگ + قزاخستانی ٹینگ + + + لاؤشیائی کِپ + لاؤشیائی کِپ + + + لبنانی پونڈ + لبنانی پونڈ + + + سری لنکائی روپیہ + سری لنکائی روپیہ + + + لائبریائی ڈالر + لائبریائی ڈالر + + + لیسوتو لوٹی + لیسوتو لوٹیس + + + لیبیائی دینار + لیبیائی دینار + + + مراکشی درہم + مراکشی درہم + + + مالدووی لیو + مالدووی لیو + + + ملاگاسی اریاری + ملاگاسی اریاری + + + مقدونیائی دینار + مقدونیائی دینار + + + میانمار کیاٹ + میانمار کیاٹ + + + منگولیائی ٹگرِ + منگولیائی ٹگرِ + + + میکانیز پٹاکا + میکانیز پٹاکا + + + موریطانیائی اوگوئیا + موریطانیائی اوگوئیا + + + ماریشس کا روپیہ + ماریشس کا روپیہ + + + مالدیپ سی روفیہ + مالدیپ سی روفیہ + + + ملاوی کواچا + ملاوی کواچا + + + میکسیکی پیسہ + میکسیکی پیسہ + + + ملیشیائی رنگِٹ + ملیشیائی رنگِٹ + + + موزامبیقی میٹیکل + موزامبیقی میٹیکل + + + نامیبیائی ڈالر + نامیبیائی ڈالر + + + نائیجیریائی نائرا + نائیجیریائی نائرا + + + نکارا گوا کا کورڈوبا + نکارا گوا کا کورڈوبا + + + ناروے کرون + ناروے کرون + + + نیپالی روپیہ + نیپالی روپیہ + + + نیوزی لینڈ ڈالر + نیوزی لینڈ ڈالر + + + عمانی ریال + عمانی ریال + + + پنامہ کا بالبوآ + پنامہ کا بالبوآ + + + پیروویئن سول + پیروویئن سول + + + پاپوآ نم گنی سی کینا + پاپوآ نم گنی سی کینا + + + فلپائینی پیسہ + فلپائینی پیسہ + + + پاکستانی روپیہ + پاکستانی روپیہ + + + پولش زلوٹی + پولش زلوٹی + + + پیراگوئے سی گوآرنی + پیراگوئے سی گوآرنی + + + قطری ریال + قطری ریال + + + رومانیائی لیو + رومانیائی لیو + + + سربین دینار + سربین دینار + + + روسی روبل + روسی روبل + + + روانڈا سی فرانک + روانڈا سی فرانک + + + سعودی ریال + سعودی ریال + + + سولومن آئلینڈز ڈالر + سولومن آئلینڈز ڈالر + + + سشلی کا روپیہ + سشلی کا روپیہ + + + سوڈانی پاؤنڈ + سوڈانی پاؤنڈ + + + سویڈن کرونا + سویڈن کرونا + + + سنگا پور ڈالر + سنگا پور ڈالر + + + سینٹ ہیلینا پاؤنڈ + سینٹ ہیلینا پاؤنڈ + + + سیئرا لیون لیون + سیئرا لیون لیون + + + صومالی شلنگ + صومالی شلنگ + + + سورینامی ڈالر + سورینامی ڈالر + + + جنوبی سوڈانی پاؤنڈ + جنوبی سوڈانی پاؤنڈ + + + ساؤ ٹومے آں پرنسپے ڈوبرا + ساؤ ٹومے آں پرنسپے ڈوبرا + + + شامی پونڈ + شامی پونڈ + + + سوازی لیلانجینی + سوازی لیلانجینی + + + تھائی باہت + تھائی باہت + + + تاجکستانی سومونی + تاجکستانی سومونی + + + ترکمانستانی منات + ترکمانستانی منات + + + تیونیسیائی دینار + تیونیسیائی دینار + + + ٹونگن پانگا + ٹونگن پانگا + + + ترکی لیرا + ترکی لیرا + + + ترینیداد آں ٹوباگو سی ڈالر + ترینیداد آں ٹوباگو سی ڈالر + + + نیو تائیوان ڈالر + نیو تائیوان ڈالر + + + تنزانیائی شلنگ + تنزانیائی شلنگ + + + یوکرینیائی ہریونیا + یوکرینیائی ہریونیا + + + یوگانڈا شلنگ + یوگانڈا شلنگ + + + امریکی ڈالر + امریکی ڈالر + $ + + + یوروگویان پیسو + یوروگویان پیسو + + + ازبکستانی سوم + ازبکستانی سوم + + + وینزویلا بولیور + وینزویلا بولیور + + + ویتنامی ڈانگ + ویتنامی ڈانگ + + + وینوواتو واتو + وینوواتو واتو + + + ساموآ سی ٹالا + ساموآ سی ٹالا + + + وسطی افریقی [CFA] فرانک + وسطی افریقی [CFA] فرانک + + + مشرقی کریبیا سی ڈالر + مشرقی کریبیا سی ڈالر + + + مغربی افریقی [CFA] فرانک + مغربی افریقی [CFA] فرانک + + + CFP فرانک + CFP فرانک + + + نامعلوم پیس + نامعلوم پیس + + + یمنی ریال + یمنی ریال + + + جنوبی افریقی رانڈ + جنوبی افریقی رانڈ + + + زامبیائی کواچا + زامبیائی کواچا + + + + {0} گینٹہ + دایاں موڑ نمبر {0} مڑیں + + + + + + ڈیسی {0} + + + سینٹی {0} + + + ملی {0} + + + مائکرو {0} + + + نینو {0} + + + پکو{0} + + + فیمٹو{0} + + + اٹو{0} + + + زپٹو{0} + + + یوکٹو{0} + + + ڈیکا{0} + + + ہیکٹو{0} + + + کلو{0} + + + میگا{0} + + + گیگا {0} + + + ٹیرا{0} + + + پیٹا{0} + + + اکسا{0} + + + زیٹا{0} + + + یوٹا{0} + + + کیبی{0} + + + میبی{0} + + + جیبی{0} + + + ٹیبی{0} + + + پیبی{0} + + + ایکسبی{0} + + + زیبی{0} + + + یوب{0} + + + {0} فی {1} + + + مربع {0} + + + کیوبک {0} + + + {0}⋅{1} + + + جی-فورس + {0} جی-فورس + + + میٹر فی مربع سیکنڈ + {0} میٹر فی مربع سیکنڈ + + + گردش + {0} rev + + + ریڈینس + {0} ریڈین + + + ڈگری + {0} ڈگری + + + آرک منٹ + {0} آرک منٹ + + + آرک سیکنڈ + {0} آرک سیکنڈ + + + مربع کلو میٹر + {0} مربع کلو میٹر + {0} فی مربع کلو میٹر + + + ہیکٹر + {0} ہیکٹر + + + مربع میٹر + {0} مربع میٹر + {0} فی مربع میٹر + + + مربع سینٹی میٹر + {0} مربع سینٹی میٹر + {0} فی مربع سینٹی میٹر + + + مربع میل + {0} مربع میل + {0} فی مربع میل + + + ایکڑ + {0} ایکڑ + + + مربع گز + {0} مربع گز + + + مربع فٹ + {0} مربع فٹ + + + مربع انچا + {0} مربع انچا + {0} فی مربع انچا + + + دُنامز + {0} دُنام + + + قیراط + {0} قیراط + + + ملی گرام فی ڈیسی لیٹر + {0} ملی گرام فی ڈیسی لیٹر + + + ملی مولس فی لیٹر + {0} ملی مول فی لیٹر + + + فی ملین حصے + {0} فی ملین حصے + + + فیصد + {0} فیصد + + + فی ملی + {0} فی ملی + + + پرمرئیڈ + {0} پرمرئیڈ + + + مولز + {0} مول + + + لیٹر فی کلومیٹر + {0} لیٹر فی کلومیٹر + + + لیٹر فی 100 کلو میٹر + {0} لیٹر فی 100 کلو میٹر + + + میل فی گیلن + {0} میل فی گیلن + + + میل فی امپیریل گیلن + {0} میل فی امپیریل گیلن + + + پیٹا بائٹس + {0} پیٹا بائٹ + + + ٹیرابائٹس + {0} ٹیرابائٹ + + + ٹیرابٹس + {0} ٹیرابٹ + + + گیگابائٹس + {0} گیگابائٹ + + + گیگابٹس + {0} گیگابٹ + + + ميگابائٹس + {0} میگابائٹ + + + میگابٹس + {0} میگابٹ + + + کلوبائٹس + {0} کلوبائٹ + + + کلوبٹس + {0} کلوبٹ + + + بائٹ + {0} بائٹ + + + بٹس + {0} بٹ + + + قرن + {0} قرن + + + دہائیاں + {0} دہائی + + + کال + {0} کال + فی کال {0} + + + ما + {0} ما + فی ما {0} + + + ہفتہ + {0} ہفتہ + {0} فی ہفتہ + + + دی + {0} دی + {0} فی دی + + + گینٹہ + {0} گینٹہ + {0} فی گینٹہ + + + میلٹ + {0} میلٹ + {0} فی میلٹ + + + سیکنڈ + {0} سیکنڈ + {0} فی سیکنڈ + + + ملی سیکنڈز + {0} ملی سیکنڈ + + + مائیکرو سیکنڈز + {0} مائیکرو سیکنڈ + + + نینو سیکنڈز + {0} نینو سیکنڈ + + + ایمپیئر + {0} ایمپیئر + + + ملی ایمپیئر + {0} ملی ایمپیئر + + + اوہم + {0} اوہم + + + وولٹ + {0} وولٹ + + + کلو کیلوریز + {0} کلو کیلوری + + + کیلوریز + {0} کیلوری + + + کیلوریز + {0} کیلوری + + + کلو جول + {0} کلو جول + + + جول + {0} جول + + + کلو واٹ آور + {0} کلو واٹ آور + + + الیکٹرون وولٹس + {0} الیکٹرون وولٹ + + + برطانوی تھرمل اکائیاں + {0} برطانوی تھرمل اکائی + + + امریکی تھرمز + {0} امریکی تھرم + + + پاؤنڈز قوت + {0} پاؤنڈ قوت + + + نیوٹنز + {0} نیوٹن + + + گیگاہرٹز + {0} گیگاہرٹز + + + میگاہرٹز + {0} میگاہرٹز + + + کلوہرٹز + {0} کلوہرٹز + + + ہرٹز + {0} ہرٹز + + + ٹائپوگرافک em + {0} em + + + پکسلز + {0} پکسل + + + میگا پکسلز + {0} میگا پکسل + + + پکسلز فی سینٹی میٹر + {0} پکسل فی سینٹی میٹر + + + پکسلز فی انچا + {0} پکسل فی انچا + + + ڈاٹس فی سینٹی میٹر + {0} ڈاٹ فی سینٹی میٹر + + + ڈاٹس فی انچا + {0} ڈاٹ فی انچا + + + ڈاٹ + {0} ڈاٹ + + + زمین کا رداس + {0} زمین رداس + + + کلو میٹر + {0} کلو میٹر + {0} فی کلومیٹر + + + میٹر + {0} میٹر + {0} فی میٹر + + + ڈیسی میٹر + {0} ڈیسی میٹر + + + سینٹی میٹر + {0} سینٹی میٹر + {0} فی سینٹی میٹر + + + ملی میٹر + {0} ملیمیٹر + + + مائیکرو میٹر + {0} مائیکرو میٹر + + + نینو میٹر + {0} نینو میٹر + + + پیکو میٹر + {0} پیکو میٹر + + + میل + {0} میل + + + گز + {0} گز + + + فٹ + {0} فٹ + {0} فی فٹ + + + انچا + {0} انچا + {0} فی انچا + + + پارسیک + {0} پارسیک + + + نوری کال + {0} نوری کال + + + ایسٹرونومیکل یونٹس + {0} ایسٹرونومیکل یونٹ + + + فرلانگ + {0} فرلانگ + + + فیتھامز + {0} فیتھامز + + + بحری میل + {0} بحری میل + + + اسکینڈی نیویائی میل + {0} اسکینڈی نیویائی میل + + + پوائنٹس + {0} پوائنٹس + + + شمسی رداس + {0} شمسی رداس + + + lux + {0} lux + + + کنڈیلا + {0} کنڈیلا + + + لیومِن + {0} لیومِن + + + شمسی چمک + {0} شمسی چمک + + + میٹرک ٹن + {0} میٹرک ٹن + + + کلو + {0} کلو + {0} فی کلو + + + گرام + {0} گرام + {0} فی گرام + + + ملی گرام + {0} ملی گرام + + + مائکرو گرام + {0} مائکرو گرام + + + ٹن + {0} ٹن + + + اسٹونز + {0} اسٹون + + + پاؤنڈ + {0} پاؤنڈ + {0} فی پاؤنڈ + + + اونس + {0} اونس + {0} فی اونس + + + ٹرائے اونس + {0} ٹرائے اونس + + + قیراط + {0} قیراط + + + ڈالٹنز + {0} ڈالٹن + + + زمینی کمیتیں + {0} زمینی کمیت + + + شمسی کمیتیں + {0} شمسی کمیت + + + گرین + {0} گرین + + + گیگا واٹ + {0} گیگا واٹ + + + میگا واٹ + {0} میگا واٹ + + + کلو واٹ + {0} کلو واٹ + + + واٹ + {0} واٹ + + + ملی واٹ + {0} ملی واٹ + + + ہارس پاور + {0} ہارس پاور + + + ملی میٹر مرکری + {0} ملی میٹر مرکری + + + پاؤنڈز فی مربع انچا + {0} پاؤنڈ فی مربع انچا + + + انچا مرکری + {0} انچا مرکری + + + بارز + {0} بار + + + ملی بار + {0} ملی بار + + + ماحول + {0} ماحول + + + پاسکل + {0} پاسکل + + + ہیکٹو پاسکل + {0} ہیکٹو پاسکل + + + کلو پاسکلز + {0} کلو پاسکل + + + میگا پاسکلز + {0} میگا پاسکل + + + کلومیٹر فی گینٹہ + {0} کلومیٹر فی گینٹہ + + + میٹر فی سیکنڈ + {0} میٹر فی سیکنڈ + + + میل فی گینٹہ + {0} میل فی گینٹہ + + + ناٹس + {0} ناٹ + + + ° + {0}° + + + ڈگری سیلسیس + {0} ڈگری سیلسیس + + + ڈگری فارن ہائیٹ + {0} ڈگری فارن ہائیٹ + + + کیلون + {0} کیلون + + + پاؤنڈ فٹ + {0} پاؤنڈ فٹ + + + نیوٹن میٹر + {0} نیوٹن میٹر + + + کیوبک کلو میٹر + {0} کیوبک کلو میٹر + + + کیوبک میٹر + {0} کیوبک میٹر + {0} فی کیوبک میٹر + + + کیوبک سینٹی میٹر + {0} کیوبک سینٹی میٹر + {0} فی کیوبک سینٹی میٹر + + + کیوبک میل + {0} کیوبک میل + + + کیوبک گز + {0} کیوبک گز + + + کیوبک فٹ + {0} کیوبک فٹ + + + کیوبک انچا + {0} کیوبک انچا + + + میگا لیٹر + {0} میگا لیٹر + + + ہیکٹو لیٹر + {0} ہیکٹو لیٹر + + + لیٹر + {0} لیٹر + {0} فی لیٹر + + + ڈیسی لیٹر + {0} ڈیسی لیٹر + + + سینٹی لیٹر + {0} سینٹی لیٹر + + + ملی لیٹر + {0} ملی لیٹر + + + میٹرک پائنٹ + {0} میٹرک پائنٹ + + + میٹرک کپ + {0} میٹرک کپ + + + ایکڑ فٹ + {0} ایکڑ فٹ + + + بوشیل + {0} بوشیل + + + گیلن + {0} گیلن + {0} فی گیلن + + + امپیریل گیلن + {0} امپیریل گیلن + {0} فی امپیریل گیلن + + + کوارٹ + {0} کوارٹ + + + پائنٹ + {0} پائنٹ + + + کپ + {0} کپ + + + فلوئڈ اونس + {0} فلوئڈ اونس + + + امپیریل فلوئڈ اونس + {0} امپیریئل فلوئڈ اونس + + + ٹیبل سپون + {0} ٹیبل سپون + + + ٹی سپون + {0} ٹی سپون + + + بیرلز + {0} بیرل + + + dstspn + {0} dstspn + + + dstspn Imp + {0} dstspn Imp + + + ٹیگیل + {0} ٹیگیل + + + dram fluid + {0} dram fl + + + jigger + {0} jigger + + + پینچ + {0} پینچ + + + qt Imp + {0} qt Imp. + + + کارڈینل ڈائریکشن + {0} مشرق + {0} شمال + {0} جنوب + {0} مغرب + + + + + ڈی۔ {0} + + + سی۔ {0} + + + می۔ {0} + + + نے۔ {0} + + + پی۔{0} + + + فے۔{0} + + + ا۔{0} + + + ز۔{0} + + + یوکٹو{0} + + + ڈے۔{0} + + + ہے۔{0} + + + کی{0} + + + مے۔{0} + + + گی۔{0} + + + ٹے۔{0} + + + پے۔{0} + + + ای۔{0} + + + زے{0} + + + یو{0} + + + {0}⋅{1} + + + جی-فورس + + + ڈگری + {0} ڈگری + + + آرک منٹ + {0} آرک منٹ + + + آرک سیکنڈ + {0} آرک سیکنڈ + + + km² + {0} km² + {0}/km² + + + ہیکٹر + {0} ہیکٹر + + + مربع میٹر + {0} m² + {0}/m² + + + cm² + {0} cm² + {0}/cm² + + + مربع میل + {0} sq mi + {0}/mi² + + + ایکڑ + {0} ایکڑ + + + مربع گز + {0} yd² + + + مربع فٹ + {0} مربع فٹ + + + مربع انچا + {0} in² + {0}/in² + + + دُنامز + {0} دُنام + + + قیراط + {0} kt + + + ملی مول/لیٹر + + + حصے/ملین + + + فیصد + + + فی ملی + {0} فی ملی + + + پرمرئیڈ + {0}‱ + + + مول + {0} مول + + + لیٹر/100 کلو میٹر + {0} L/100km + + + mpg + {0} mpg + + + miles/gal Imp. + {0} mpg Imp. + + + پی بائٹ + {0} پی بی + + + Tbit + + + Gbit + + + MByte + + + kByte + + + kbit + + + بائٹ + {0} byte + + + قرن + {0} قرن + + + دہائی + {0} دہائی + + + کال + {0} کال + + + ما + {0} ما + فی ما {0} + + + ہفتہ + {0} ہفتہ + {0} فی ہفتہ + + + دی + {0} دی + {0} فی دی + + + گینٹہ + {0} گینٹہ + {0} فی گینٹہ + + + میلٹ + {0} میلٹ + {0} فی میلٹ + + + سیکنڈ + {0} سیکنڈ + {0} فی سیکنڈ + + + ملی سیکنڈ + {0} ملی سیکنڈ + + + مائیکرو سیکنڈ + {0} مائیکرو سیکنڈ + + + نینو سیکنڈز + {0} نینو سیکنڈ + + + اوہم + + + وولٹ + + + kcal + {0} kcal + + + الیکٹرون وولٹ + + + امریکی تھرم + {0} امریکی تھرمز + + + پاؤنڈ قوت + + + نیوٹن + + + پکسلز + + + میگا پکسلز + + + ppcm + {0} ppcm + + + ppi + {0} ppi + + + ڈاٹ + {0} ڈاٹ + + + کلو میٹر + {0} کلو میٹر + {0} فی کلو میٹر + + + میٹر + {0} میٹر + {0}/m + + + dm + {0} dm + + + سینٹی میٹر + {0} سینٹی میٹر + {0}/سینٹی میٹر + + + ملی میٹر + {0} ملی میٹر + + + μm + {0} μm + + + nm + {0} nm + + + پیکو میٹر + {0} پیکو میٹر + + + میل + {0} میل + + + گز + {0} گز + + + فٹ + {0} فٹ + {0}/فٹ + + + انچا + {0} انچا + {0}/انچا + + + پارسیک + {0} پارسیک + + + نوری کال + {0} نوری کال + + + au + {0} au + + + فرلانگ + {0} فرلانگ + + + فیتھامز + {0} فیتھامز + + + بحری میل + {0} بحری میل + + + smi + {0} smi + + + پوائنٹس + {0} پوائنٹس + + + شمسی رداس + {0} شمسی رداس + + + lux + {0} lx + + + گرام + {0}/g + + + ٹن + + + پاؤنڈ + + + قیراط + + + ڈالٹنز + + + زمینی کمیتیں + + + شمسی کمیتیں + + + گرین + {0} گرین + + + {0} کلو واٹ + + + واٹ + {0} واٹ + + + mm Hg + {0} mm Hg + + + {0} psi + + + بار + {0} بارز + + + کلومیٹر/گھنٹہ + {0} kph + + + میٹر فی سیکنڈ + + + میل فی گینٹہ + {0} mph + + + ڈگری سیلسیس + {0}‎°C + + + ڈگری فارن ہائیٹ + + + km³ + {0} km³ + + + + {0} m³ + {0}/m³ + + + کیوبک سینٹی میٹر + {0} cm³ + {0}/cm³ + + + کیوبک میل + {0} کیوبک میل + + + کیوبک گز + {0} yd³ + + + کیوبک فٹ + {0} ft³ + + + کیوبک انچا + {0} in³ + + + ML + {0} ML + + + لیٹر + {0} لیٹر + {0} فی لیٹر + + + ڈیسی لیٹر + + + سینٹی لیٹر + + + ایکڑ فٹ + + + بوشیل + + + gal + {0} gal + {0}/gal + + + Imp. gal + + + qts + {0} qt + + + کپ + + + fl oz + {0} fl oz + + + بیرل + + + ٹیگیل + {0} ٹیگیل + + + پینچ + {0} پینچ + + + ڈائریکشن + {0}E + {0}N + + + + + ڈیسی {0} + + + سینٹی {0} + + + ملی {0} + + + μ{0} + + + نینو {0} + + + پکو{0} + + + فیمٹو{0} + + + اٹو{0} + + + زپٹو{0} + + + یوکٹو{0} + + + ڈیکا{0} + + + ہیکٹو{0} + + + کلو{0} + + + میگا{0} + + + گیگا {0} + + + ٹیرا{0} + + + پیٹا{0} + + + اکسا{0} + + + زیٹا{0} + + + یوٹا{0} + + + کیبی{0} + + + Mi{0} + + + Gi{0} + + + Ti{0} + + + Pi{0} + + + Ei{0} + + + Zi{0} + + + Yi{0} + + + {0}/{1} + + + {0}² + + + {0}³ + + + {0}⋅{1} + + + ہیکٹر + {0} ہیکٹر + + + مربع میٹر + {0} m² + {0}/m² + + + cm² + {0} cm² + {0}/cm² + + + مربع میل + {0} sq mi + {0}/mi² + + + ایکڑ + {0} ایکڑ + + + مربع گز + {0} yd² + + + مربع فٹ + {0} مربع فٹ + + + مربع انچا + + + دُنامز + {0} دُنام + + + فیصد + {0}% + + + لیٹر/100 کلو میٹر + {0} L/100km + + + قرن + {0} قرن + + + دہائی + {0} دہائی + + + کال + {0} کال + + + ما + {0} ما + فی ما {0} + + + ہفتہ + {0} ہفتہ + {0} فی ہفتہ + + + دی + {0} دی + + + گینٹہ + {0} گینٹہ + {0} فی گینٹہ + + + میلٹ + {0} میلٹ + {0} فی میلٹ + + + سیکنڈ + {0} سیکنڈ + {0} فی سیکنڈ + + + ملی سیکنڈ + {0} ملی س + + + مائیکرو سیکنڈ + {0} م س + + + نینو سیکنڈز + {0} ن س + + + پکسلز + + + میگا پکسلز + + + ڈاٹ + {0} ڈاٹ + + + کلو میٹر + {0} کلو میٹر + {0} فی کلو میٹر + + + میٹر + {0} میٹر + + + سینٹی میٹر + {0}cm + {0}/سینٹی میٹر + + + ملی میٹر + {0} ملی میٹر + + + پیکو میٹر + {0} پیکو میٹر + + + میل + {0} میل + + + گز + {0} گز + + + فٹ + {0} فٹ + {0}/فٹ + + + انچا + {0} انچا + {0}/انچا + + + پارسیک + {0} پارسیک + + + نوری کال + {0} نوری کال + + + فرلانگ + {0} فرلانگ + + + فیتھامز + {0} فیتھامز + + + بحری میل + {0} بحری میل + + + پوائنٹس + {0} پوائنٹس + + + شمسی رداس + {0} شمسی ر + + + kg + {0} kg + + + گرام + {0} گرام + + + km/hr + {0}kph + + + ڈگری سیلسیس + {0}‎°C + + + لیٹر + {0} لیٹر + + + ڈائریکشن + {0}E + {0}N + {0}S + {0}W + + + + + + {0}،{1} + {0}،{1} + {0} ،آں {1} + {0} آں {1} + + + {0}،{1} + {0}،{1} + {0}، یا {1} + {0} یا {1} + + + {0}،{1} + {0}،{1} + {0}، یا {1} + {0} یا {1} + + + {0}،{1} + {0}،{1} + {0}، یا {1} + {0} یا {1} + + + {0}،{1} + {0}،{1} + {0}،{1} + {0}،{1} + + + {0}،{1} + {0}،{1} + {0} ،آں {1} + {0} آں {1} + + + {0}،{1} + {0}،{1} + {0} ،آں {1} + {0} آں {1} + + + {0}،{1} + {0}،{1} + {0} ،آں {1} + {0} آں {1} + + + {0}،{1} + {0}،{1} + {0} ،آں {1} + {0} آں {1} + + + + + ہاں:ہاں + نأ:نأ + + + diff --git a/make/data/cldr/common/main/trw_PK.xml b/make/data/cldr/common/main/trw_PK.xml new file mode 100644 index 00000000000..c7ddcf83557 --- /dev/null +++ b/make/data/cldr/common/main/trw_PK.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/ts.xml b/make/data/cldr/common/main/ts.xml new file mode 100644 index 00000000000..062cc30c7f6 --- /dev/null +++ b/make/data/cldr/common/main/ts.xml @@ -0,0 +1,579 @@ + + + + + + + + + + + Xi Czech + Xi Danish + Xi Jarimani + Xi Giriki + Xi Nghezi + Xi spain + hi xi Estonia + Xi Finnish + Xi Furwa + XiHeberu + hi xi Hungary + hi xi Iceland + Xi Ithali + Xi Japani + Xikorea + hi xi Lithuania + hi xi Latvia + Xi bunu + Xi Norway + Xi Polixi + Putukezi + hi xi Romania + Xi Rhaxiya + Xi Swiden + Xitsonga + + + + [a b c d e f g h i j k l m n o p q r s t u v w x y z] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + + + + + + + Sun + Yan + Kul + Dzi + Mud + Kho + Maw + Mha + Ndz + Nhl + Huk + N’w + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + Sunguti + Nyenyenyani + Nyenyankulu + Dzivamisoko + Mudyaxihi + Khotavuxika + Mawuwani + Mhawuri + Ndzhati + Nhlangula + Hukuri + N’wendzamhala + + + + + Sun + Yan + Kul + Dzi + Mud + Kho + Maw + Mha + Ndz + Nhl + Huk + N’w + + + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + + + Sunguti + Nyenyenyani + Nyenyankulu + Dzivamisoko + Mudyaxihi + Khotavuxika + Mawuwani + Mhawuri + Ndzhati + Nhlangula + Hukuri + N’wendzamhala + + + + + + + Son + Mus + Bir + Har + Ne + Tlh + Mug + + + S + M + T + W + T + F + S + + + Son + Mus + Bir + Har + Ne + Tlh + Mug + + + Sonta + Musumbhunuku + Ravumbirhi + Ravunharhu + Ravumune + Ravuntlhanu + Mugqivela + + + + + Son + Mus + Bir + Har + Ne + Tlh + Mug + + + S + M + T + W + T + F + S + + + Son + Mus + Bir + Har + Ne + Tlh + Mug + + + Sonta + Musumbhunuku + Ravumbirhi + Ravunharhu + Ravumune + Ravuntlhanu + Mugqivela + + + + + + + K1 + K2 + K3 + K4 + + + 1 + 2 + 3 + 4 + + + Kotara yo sungula + Kotara ya vumbirhi + Kotara ya vunharhu + Kotara ya vumune + + + + + K1 + K2 + K3 + K4 + + + 1 + 2 + 3 + 4 + + + Kotara yo sungula + Kotara ya vumbirhi + Kotara ya vunharhu + Kotara ya vumune + + + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + AM + PM + + + AM + PM + + + AM + PM + + + + + + BC + BCE + CE + + + + + + y MMMM d, EEEE + yMMMMEEEEd + + + + + y MMMM d + yMMMMd + + + + + y MMM d + yMMMd + + + + + y-MM-dd + yMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + + {1} {0} + + + + d + ccc + d, E + E h:mm a + E HH:mm + E h:mm:ss a + E HH:mm:ss + G y + G y MMM + G y MMM d + G y MMM d, E + h a + HH + h:mm a + HH:mm + h:mm:ss a + HH:mm:ss + h:mm:ss a v + HH:mm:ss v + h:mm a v + HH:mm v + L + MM-dd + MM-dd, E + LLL + MMM d + MMM d, E + MMMM d + 'week' W 'of' MMM + 'week' W 'of' MMM + mm:ss + y + y-MM + y-MM-dd + y-MM-dd, E + y MMM + y MMM d + y MMM d, E + y MMMM + y QQQ + y QQQQ + 'week' w 'of' Y + 'week' w 'of' Y + + + {0} {1} + + + {0} – {1} + + d–d + + + h a – h a + h–h a + + + HH–HH + + + h:mm a – h:mm a + h:mm–h:mm a + h:mm–h:mm a + + + HH:mm–HH:mm + HH:mm–HH:mm + + + h:mm a – h:mm a v + h:mm–h:mm a v + h:mm–h:mm a v + + + HH:mm–HH:mm v + HH:mm–HH:mm v + + + h a – h a v + h–h a v + + + HH–HH v + + + MM–MM + + + MM-dd – MM-dd + MM-dd – MM-dd + + + MM-dd, E – MM-dd, E + MM-dd, E – MM-dd, E + + + LLL–LLL + + + MMM d–d + MMM d – MMM d + + + MMM d, E – MMM d, E + MMM d, E – MMM d, E + + + y–y + + + y-MM – y-MM + y-MM – y-MM + + + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + y-MM-dd – y-MM-dd + + + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + y-MM-dd, E – y-MM-dd, E + + + y MMM–MMM + y MMM – y MMM + + + y MMM d–d + y MMM d – MMM d + y MMM d – y MMM d + + + y MMM d, E – MMM d, E + y MMM d, E – MMM d, E + y MMM d, E – y MMM d, E + + + y MMMM–MMMM + y MMMM – y MMMM + + + + + + + + 1 + + , +   + % + + + - + E + × + + + NaN + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤ #,##0.00 + + + ¤ #,##0.00 + + + {0} {1} + {0} {1} + + + + R + + + + ≥{0} + {0}–{1} + + + diff --git a/make/data/cldr/common/main/ts_ZA.xml b/make/data/cldr/common/main/ts_ZA.xml new file mode 100644 index 00000000000..4cb6219d8d8 --- /dev/null +++ b/make/data/cldr/common/main/ts_ZA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/tt.xml b/make/data/cldr/common/main/tt.xml index 2851a33c039..5d642d1b6d3 100644 --- a/make/data/cldr/common/main/tt.xml +++ b/make/data/cldr/common/main/tt.xml @@ -1,6 +1,6 @@ - + + + + + + + + Tshivenḓa + + + + [a b d ḓ e f g h i k l ḽ m n ṅ ṋ o p r s t ṱ u v w x y z] + [c j q] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + + + + + + + + + + + + + + Pha + Luh + Ṱhf + Lam + Shu + Lwi + Lwa + Ṱha + Khu + Tsh + Ḽar + Nye + + + Phando + Luhuhi + Ṱhafamuhwe + Lambamai + Shundunthule + Fulwi + Fulwana + Ṱhangule + Khubvumedzi + Tshimedzi + Ḽara + Nyendavhusiku + + + + + + + Swo + Mus + Vhi + Rar + Ṋa + Ṱan + Mug + + + Swondaha + Musumbuluwo + Ḽavhuvhili + Ḽavhuraru + Ḽavhuṋa + Ḽavhuṱanu + Mugivhela + + + + + + + K1 + K2 + K3 + K4 + + + Kotara ya u thoma + Kotara ya vhuvhili + Kotara ya vhuraru + Kotara ya vhuṋa + + + + + + + + + , +   + + + + + #,##0.### + + + + + + + #E0 + + + + + + + #,##0% + + + + + + + ¤#,##0.00 + + + + + + R + + + + diff --git a/make/data/cldr/common/main/ve_ZA.xml b/make/data/cldr/common/main/ve_ZA.xml new file mode 100644 index 00000000000..d55f653225d --- /dev/null +++ b/make/data/cldr/common/main/ve_ZA.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/vec.xml b/make/data/cldr/common/main/vec.xml new file mode 100644 index 00000000000..32e6fceec46 --- /dev/null +++ b/make/data/cldr/common/main/vec.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + abcazo + achineze + adangme + adiga + afregan + aghem + ainu + akan + aleutian + amàrego + aragoneze + angika + àrabo + àrabo moderno + arapaho + albaneze + Veneto + + + Lengua: {0} + Scritura: {0} + Rejon: {0} + + + + [a à b c d e é è f g h i ì j l m n o ó ò p r s t u ù v x z] + [ª á ć ç ḑ ʣ ǵ í k ł º q ş ţ ʦ ú w y {z\u0327}] + [\- ‐ ‑ ‒ – — ― ⁓ , ; \: ! ? . … · ' ‘ ’ " “ ” « » ( ) \[ \] \{ \} 〈 〉 @ * / \\ \& # + = ⁄] + + diff --git a/make/data/cldr/common/main/vec_IT.xml b/make/data/cldr/common/main/vec_IT.xml new file mode 100644 index 00000000000..ee6861ddd28 --- /dev/null +++ b/make/data/cldr/common/main/vec_IT.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/vi.xml b/make/data/cldr/common/main/vi.xml index 5c88443c07f..6b6dd420fc9 100644 --- a/make/data/cldr/common/main/vi.xml +++ b/make/data/cldr/common/main/vi.xml @@ -1,6 +1,6 @@ - + + + + + + + + Deutänapük + Linglänapük + Sperantapük + Spanyänapük + Fransänapük + Litaliyänapük + Yapänapük + Portugänapük + Rusänapük + Volapük + Tsyinänapük + + + Brasilän + Tsyinän + Deutän + Spanyän + Fransän + Regän Pebalöl + Grusiyän + Grikän + Lindän + Litaliyän + Yapän + Mäxikän + Naureän + Portugän + Palauäns + Rusän + Lamerikän + + + Pük: {0} + Topäd: {0} + + + + [a ä b c d e f g h i j k l m n o ö p r s t u ü v x y z] + [q w] + [A Ä B C D E F G H I J K L M N O Ö P R S T U Ü V X Y Z] + [\- ‐ ‑ – — , ; \: ! ? . … ' ‘ ’ " “ ” « » ( ) \[ \] \{ \} § @ * / \& #] + {0}… + … {0} + {0} … {1} + + + + + + + + + + + + + + G y MMMM'a' 'd'. d'id' + GyMMMMd + + + + + G y MMMM d + GyMMMMd + + + + + G y MMM. d + GyMMMd + + + + + GGGGG y-MM-dd + GGGGGyMMdd + + + + + + + + + yan + feb + mäz + prl + may + yun + yul + gst + set + ton + nov + dek + + + Y + F + M + P + M + Y + Y + G + S + T + N + D + + + yanul + febul + mäzul + prilul + mayul + yunul + yulul + gustul + setul + tobul + novul + dekul + + + + + yan + feb + mäz + prl + may + yun + yul + gst + set + tob + nov + dek + + + Y + F + M + P + M + Y + Y + G + S + T + N + D + + + yanul + febul + mäzul + prilul + mayul + yunul + yulul + gustul + setul + tobul + novul + dekul + + + + + + + su. + mu. + tu. + ve. + dö. + fr. + zä. + + + sudel + mudel + tudel + vedel + dödel + fridel + zädel + + + + + Su + Mu + Tu + Ve + + Fr + + + + S + M + T + V + D + F + Z + + + sudel + mudel + tudel + vedel + dödel + fridel + zädel + + + + + + + Yf1 + Yf2 + Yf3 + Yf4 + + + 1 + 2 + 3 + 4 + + + 1id yelafoldil + 2id yelafoldil + 3id yelafoldil + 4id yelafoldil + + + + + + b. t. kr. + p. t. kr. + + + b. t. kr. + p. t. kr. + + + + + + y MMMM'a' 'd'. d'id' + yMMMMd + + + + + y MMMM d + yMMMMd + + + + + y MMM. d + yMMMd + + + + + y-MM-dd + yMMdd + + + + + + + HH:mm:ss zzzz + HHmmsszzzz + + + + + HH:mm:ss z + HHmmssz + + + + + HH:mm:ss + HHmmss + + + + + HH:mm + HHmm + + + + + + + + Epoche + + + yel + äyelo + ayelo + oyelo + + + mul + ämulo + amulo + omulo + + + vig + ävigo + avigo + ovigo + + + Tag + edelo + ädelo + adelo + odelo + udelo + + + vodabel + + + delalaf + + + düp + + + minut + + + sekun + + + zon + + + + + + + yels + yel {0} + yels {0} + + + muls + mul {0} + muls {0} + + + vigs + + + dels + + + + + + si:s + no:n + + + diff --git a/make/data/cldr/common/main/vo_001.xml b/make/data/cldr/common/main/vo_001.xml new file mode 100644 index 00000000000..93848d5accd --- /dev/null +++ b/make/data/cldr/common/main/vo_001.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/vun.xml b/make/data/cldr/common/main/vun.xml index 9e3b3db51d4..65b2413a02e 100644 --- a/make/data/cldr/common/main/vun.xml +++ b/make/data/cldr/common/main/vun.xml @@ -1,6 +1,6 @@ - + + + + + + + + walon + + + + [a à â å æ b c ç d e é è ê ë f g h i î ï j k l m n o ô œ p q r s t u ù û ü v w x y ÿ z] + [á ä ã ā ē í ì ī ñ ó ò ö ø ú ǔ] + [A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + [\- ‐ ‑ – — , ; \: ! ? . … ’ " “ ” « » ( ) \[ \] § @ * / \& # † ‡] + + diff --git a/make/data/cldr/common/main/wa_BE.xml b/make/data/cldr/common/main/wa_BE.xml new file mode 100644 index 00000000000..1b6bab96e0c --- /dev/null +++ b/make/data/cldr/common/main/wa_BE.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/wae.xml b/make/data/cldr/common/main/wae.xml index 346980341d8..8bf9305ca7b 100644 --- a/make/data/cldr/common/main/wae.xml +++ b/make/data/cldr/common/main/wae.xml @@ -1,6 +1,6 @@ - + + + + + + + + ዐርቢኛ + ጀርመን + እንግሊዝኛ + ስፓኒሽ + ፈረንሳይኛ + ሐንድኛ + ጣሊያንኛ + ጃፓንኛ + ፖርቱጋሊኛ + ራሽኛ + ወላይታቱ + ቻይንኛ + + + + + + አንዶራ + የተባበሩት አረብ ኤምሬትስ + አልባኒያ + አርሜኒያ + አርጀንቲና + ኦስትሪያ + አውስትሬሊያ + አዘርባጃን + ቦስኒያ እና ሄርዞጎቪኒያ + ባርቤዶስ + ቤልጄም + ቡልጌሪያ + ባህሬን + ቤርሙዳ + ቦሊቪያ + ብራዚል + ቡህታን + ቤላሩስ + ቤሊዘ + ኮንጎ + የመካከለኛው አፍሪካ ሪፐብሊክ + ስዊዘርላንድ + ቺሊ + ካሜሩን + ቻይና + ኮሎምቢያ + ኬፕ ቬርዴ + ሳይፕረስ + ቼክ ሪፑብሊክ + ጀርመን + ዴንማርክ + ዶሚኒካ + ዶሚኒክ ሪፑብሊክ + አልጄሪያ + ኢኳዶር + ኤስቶኒያ + ግብጽ + ምዕራባዊ ሳህራ + ኤርትራ + ስፔን + ኢትዮጵያ + ፊንላንድ + ፊጂ + ሚክሮኔዢያ + ፈረንሳይ + እንግሊዝ + ጆርጂያ + የፈረንሳይ ጉዊአና + ጋምቢያ + ጊኒ + ኢኳቶሪያል ጊኒ + ግሪክ + ቢሳዎ + ጉያና + ሆንግ ኮንግ + ክሮኤሽያ + ሀይቲ + ሀንጋሪ + ኢንዶኔዢያ + አየርላንድ + እስራኤል + ህንድ + ኢራቅ + አይስላንድ + ጣሊያን + ጃማይካ + ጆርዳን + ጃፓን + ካምቦዲያ + ኮሞሮስ + ሰሜን ኮሪያ + ደቡብ ኮሪያ + ክዌት + ሊባኖስ + ሊቱዌኒያ + ላትቪያ + ሊቢያ + ሞሮኮ + ሞልዶቫ + ማከዶኒያ + ሞንጎሊያ + ማካዎ + ሞሪቴኒያ + ማልታ + ማሩሸስ + ሜክሲኮ + ማሌዢያ + ናሚቢያ + ኒው ካሌዶኒያ + ናይጄሪያ + ኔዘርላንድ + ኖርዌ + ኔፓል + ኒው ዚላንድ + ፔሩ + የፈረንሳይ ፖሊኔዢያ + ፓፑዋ ኒው ጊኒ + ፖላንድ + ፖርታ ሪኮ + ሮሜኒያ + ራሺያ + ሳውድአረቢያ + ሱዳን + ስዊድን + ሲንጋፖር + ስሎቬኒያ + ስሎቫኪያ + ሴኔጋል + ሱማሌ + ሲሪያ + ቻድ + የፈረንሳይ ደቡባዊ ግዛቶች + ታይላንድ + ታጃኪስታን + ምስራቅ ቲሞር + ቱኒዚያ + ቱርክ + ትሪኒዳድ እና ቶባጎ + ታንዛኒያ + ዩጋንዳ + አሜሪካ + ዩዝበኪስታን + ቬንዙዌላ + የእንግሊዝ ድንግል ደሴቶች + የአሜሪካ ቨርጂን ደሴቶች + የመን + ደቡብ አፍሪካ + ዛምቢያ + + + + [\u135F ᎐-᎙ ሀ-ሏ ⶀ ሐ-ሟ ᎀ-ᎃ ⶁ ሠ-ሯ ⶂ ሰ-ሷ ⶃ ሸ-ሿ ⶄ ቀ-ቈ ቊ-ቍ ቐ-ቖ ቘ ቚ-ቝ በ-ቧ ᎄ-ᎇ ⶅ ቨ-ቷ ⶆ ቸ-ቿ ⶇ ኀ-ኈ ኊ-ኍ ነ-ኗ ⶈ ኘ-ኟ ⶉ አ-ኧ ⶊ ከ-ኰ ኲ-ኵ ኸ-ኾ ዀ ዂ-ዅ ወ-ዖ ዘ-ዟ ⶋ ዠ-ዷ ⶌ ዸ-ዿ ⶍ ጀ-ጇ ⶎ ገ-ጐ ጒ-ጕ ጘ-ጟ ⶓ-ⶖ ጠ-ጧ ⶏ ጨ-ጯ ⶐ ጰ-ጷ ⶑ ጸ-ፏ ᎈ-ᎋ ፐ-ፗ ᎌ-ᎏ ⶒ ፘ-ፚ ⶠ-ⶦ ⶨ-ⶮ ⶰ-ⶶ ⶸ-ⶾ ⷀ-ⷆ ⷈ-ⷎ ⷐ-ⷖ ⷘ-ⷞ] + [ሀ ለ ⶀ መ ᎀ ᎁ ᎃ ⶁ ረ ⶂ ሰ ሸ ⶄ ቈ ቐ ቘ ᎄ ᎅ ᎇ ⶅ ቨ ⶆ ቸ ኀ ኈ ነ ኘ ⶉ ⶊ ከ ኰ ዀ ወ ዐ ⶋ ዠ ደ ⶌ ዸ ጀ ⶎ ጐ ጘ ⶓ ⶕ ⶖ ⶏ ጨ ⶐ ⶑ ጸ ፈ ᎈ ᎉ ᎋ ፐ ᎍ ᎎ ᎏ ፘ ⶠ ⶢ ⶣ ⶤ ⶦ ⶨ ⶩ ⶫ ⶬ ⶮ ⶰ ⶱ ⶳ ⶴ ⶶ ⶸ ⶹ ⶻ ⶼ ⶾ ⷀ ⷁ ⷃ ⷄ ⷆ ⷈ ⷉ ⷋ ⷌ ⷎ ⷐ ⷑ ⷓ ⷔ ⷖ ⷘ ⷙ ⷛ ⷜ ⷝ] + + + + + + + + EEEE፥ dd MMMM ጋላሳ y G + GyMMMMEEEEdd + + + + + dd MMMM y G + GyMMMMdd + + + + + dd-MMM-y G + GyMMMdd + + + + + dd/MM/yy GGGGG + GGGGGyyMMdd + + + + + + + + + ጃንዩ + ፌብሩ + ማርች + ኤፕረ + ሜይ + ጁን + ጁላይ + ኦገስ + ሴፕቴ + ኦክተ + ኖቬም + ዲሴም + + + ጃንዩወሪ + ፌብሩወሪ + ማርች + ኤፕረል + ሜይ + ጁን + ጁላይ + ኦገስት + ሴፕቴምበር + ኦክተውበር + ኖቬምበር + ዲሴምበር + + + + + + + + + + + + + + + + + + + + + + + ወጋ + ሳይኖ + ማቆሳኛ + አሩዋ + ሃሙሳ + አርባ + ቄራ + + + + + + + + + + + + + + + + + + ማለዶ + ቃማ + + + ማለዶ + ቃማ + + + + + + አዳ ዎዴ + ግሮተታ ላይታ + + + + + + EEEE፥ dd MMMM ጋላሳ y G + GyMMMMEEEEdd + + + + + dd MMMM y + yMMMMdd + + + + + dd-MMM-y + yMMMdd + + + + + dd/MM/yy + yyMMdd + + + + + + + h:mm:ss a zzzz + ahmmsszzzz + + + + + h:mm:ss a z + ahmmssz + + + + + h:mm:ss a + ahmmss + + + + + h:mm a + ahmm + + + + + + + + latn + + latn + ethi + + + + + + + + ¤#,##0.00 + + + + + + የብራዚል ሪል + + + የቻይና ዩአን ረንሚንቢ + + + የኢትዮጵያ ብር + Br + + + አውሮ + + + የእንግሊዝ ፓውንድ ስተርሊንግ + + + የሕንድ ሩፒ + + + የጃፓን የን + + + የራሻ ሩብል + + + የአሜሪካን ዶላር + + + + diff --git a/make/data/cldr/common/main/wal_ET.xml b/make/data/cldr/common/main/wal_ET.xml new file mode 100644 index 00000000000..6c199c58faf --- /dev/null +++ b/make/data/cldr/common/main/wal_ET.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/wbp.xml b/make/data/cldr/common/main/wbp.xml new file mode 100644 index 00000000000..9058e0b31e5 --- /dev/null +++ b/make/data/cldr/common/main/wbp.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + Yinkirliji + Warlpiri + + + + + left-to-right + top-to-bottom + + + + [a d g i j k l m n p r t u w y] + [b c e f h o q s v x z] + [J K L {LY} M N {NG} {NY} P R {RD} {RL} {RN} {RR} {RT} T W Y] + + diff --git a/make/data/cldr/common/main/wbp_AU.xml b/make/data/cldr/common/main/wbp_AU.xml new file mode 100644 index 00000000000..04068cc1078 --- /dev/null +++ b/make/data/cldr/common/main/wbp_AU.xml @@ -0,0 +1,14 @@ + + + + + + + + + + diff --git a/make/data/cldr/common/main/wo.xml b/make/data/cldr/common/main/wo.xml index 6406df14ba0..0676fddae95 100644 --- a/make/data/cldr/common/main/wo.xml +++ b/make/data/cldr/common/main/wo.xml @@ -1,6 +1,6 @@ - - af agq ak am ann ar as asa ast az + af agq ak am ann apc ar as asa ast az bas be bem bez bg bgc bho bm bn bo br brx bs ca ccp ce ceb cgg chr ckb cs cu cv cy da dav de dje doi dsb dua dyo dz @@ -22,13 +22,13 @@ ia id ig ii is it ja jgo jmc jv ka kab kam kde kea kgp khq ki kk kkj kl kln km kn ko kok ks ksb ksf ksh ku kw ky - lag lb lg lij lkt ln lo lrc lt lu luo luy lv + lag lb lg lij lkt lmo ln lo lrc lt lu luo luy lv mai mas mer mfe mg mgh mgo mi mk ml mn mni mr ms mt mua my mzn naq nb nd nds ne nl nmg nn nnh no nus nyn om or os - pa pcm pis pl prg ps pt + pa pap pcm pis pl prg ps pt qu - raj rm rn ro rof + raj rif rm rn ro rof und ru rw rwk sa sah saq sat sbp sc sd se seh ses sg shi si sk sl smn sms sn so sq sr su sv sw @@ -57,7 +57,7 @@ mad mag mak mdf men mh mic min moe moh mos mus mwl myv na nap new ng nia niu nog nqo nr nso nv ny oc ojb ojc ojs ojw oka - pag pam pap pau pqm + pag pam pau pqm rap rar rhg rup sad sba scn sco shn slh sm snk srn ss st str suk swb syr tce tem tet tgx tht tig tlh tli tn tpi trv ts ttm tum tvl ty tyv diff --git a/make/data/cldr/common/supplemental/languageGroup.xml b/make/data/cldr/common/supplemental/languageGroup.xml index 5bdb5ebe8d1..a854fd4de46 100644 --- a/make/data/cldr/common/supplemental/languageGroup.xml +++ b/make/data/cldr/common/supplemental/languageGroup.xml @@ -1,6 +1,6 @@ - - - + + @@ -287,7 +287,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -961,7 +961,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -972,6 +972,28 @@ not be patched by hand, as any changes made in that fashion may be lost. + + + + + + + + + + + + + + + + + + + + + + @@ -1199,7 +1221,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -1325,7 +1347,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -1387,11 +1409,11 @@ not be patched by hand, as any changes made in that fashion may be lost. - + - + @@ -1529,7 +1551,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -1928,8 +1950,6 @@ not be patched by hand, as any changes made in that fashion may be lost. - - @@ -1984,8 +2004,8 @@ not be patched by hand, as any changes made in that fashion may be lost. - - + + @@ -2084,10 +2104,8 @@ not be patched by hand, as any changes made in that fashion may be lost. - - - - + + @@ -2168,10 +2186,10 @@ not be patched by hand, as any changes made in that fashion may be lost. - - + + @@ -2297,7 +2315,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -2481,9 +2499,9 @@ not be patched by hand, as any changes made in that fashion may be lost. - + - + @@ -2709,7 +2727,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -2717,9 +2735,9 @@ not be patched by hand, as any changes made in that fashion may be lost. - + - + @@ -2877,7 +2895,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -3299,7 +3317,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -3370,8 +3388,8 @@ not be patched by hand, as any changes made in that fashion may be lost. - - + + @@ -3407,7 +3425,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -3417,9 +3435,9 @@ not be patched by hand, as any changes made in that fashion may be lost. - + - + @@ -3439,7 +3457,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -3481,7 +3499,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -3511,7 +3529,7 @@ not be patched by hand, as any changes made in that fashion may be lost. - + @@ -3550,6 +3568,24 @@ not be patched by hand, as any changes made in that fashion may be lost. + + + + + + + + + + + + + + + + + + @@ -3599,9 +3635,9 @@ not be patched by hand, as any changes made in that fashion may be lost. - + - + @@ -3780,5 +3816,6147 @@ not be patched by hand, as any changes made in that fashion may be lost. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/make/data/cldr/common/supplemental/supplementalData.xml b/make/data/cldr/common/supplemental/supplementalData.xml index 8c390b5477c..cc5d5342568 100644 --- a/make/data/cldr/common/supplemental/supplementalData.xml +++ b/make/data/cldr/common/supplemental/supplementalData.xml @@ -546,7 +546,7 @@ The printed version of ISO-4217:2001 - + @@ -958,7 +958,7 @@ The printed version of ISO-4217:2001 - + @@ -1238,7 +1238,7 @@ XXX Code for transations where no currency is involved - + @@ -1311,6 +1311,7 @@ XXX Code for transations where no currency is involved + @@ -1532,7 +1533,7 @@ XXX Code for transations where no currency is involved - + @@ -1847,6 +1848,7 @@ XXX Code for transations where no currency is involved + @@ -2804,6 +2806,9 @@ XXX Code for transations where no currency is involved + + + @@ -3209,6 +3214,7 @@ XXX Code for transations where no currency is involved + @@ -3346,6 +3352,7 @@ XXX Code for transations where no currency is involved + @@ -3358,7 +3365,6 @@ XXX Code for transations where no currency is involved - @@ -3372,6 +3378,7 @@ XXX Code for transations where no currency is involved + @@ -3449,6 +3456,7 @@ XXX Code for transations where no currency is involved + @@ -3517,7 +3525,7 @@ XXX Code for transations where no currency is involved - + @@ -3732,7 +3740,7 @@ XXX Code for transations where no currency is involved - + @@ -3882,6 +3890,7 @@ XXX Code for transations where no currency is involved + @@ -4102,6 +4111,7 @@ XXX Code for transations where no currency is involved + @@ -4177,10 +4187,11 @@ XXX Code for transations where no currency is involved - + + @@ -4398,14 +4409,15 @@ XXX Code for transations where no currency is involved - - + + + @@ -4639,101 +4651,101 @@ XXX Code for transations where no currency is involved - - - - - + + + + + - + - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + - - + + @@ -4868,7 +4880,7 @@ XXX Code for transations where no currency is involved + regions="AD AM AO AT AW BE BF BJ BL BR CG CI CV CW DE EE FR GA GF GN GP GW HR IL IT KZ MC MD MF MQ MZ NC NL PM PT RE RO SI SR ST TG TR WF YT"/> @@ -5407,11 +5419,24 @@ XXX Code for transations where no currency is involved - + + + + + + + + + + + + + + und hu ja km ko mn si ta te vi yue zh @@ -5584,11 +5609,14 @@ XXX Code for transations where no currency is involved Dutch is spoken as a mother tongue by about 60% of the Surinamese, while most others speak it as a second or third language. main language of trade and comm. in Isan region, except ... media where it gives way to Thai; now largely an unwritten language. 10% writing pop estimated in absence of other data - primarily written using an Arabic-derived alphabet + and https://islandstudies.com/files/2016/11/Guernsey-Herm-Sark.pdf - extrapolated GDP from per capita x population understood by 10 million, perhaps. Figure is questionable writing pop artificially set to 5% see also: http://en.wikipedia.org/wiki/Low_German (understood by 10 million people, and native to about 3 million people all around northern Germany) [missing] See the 2006 language survey data for 2nd langs = Shimaore See the 2006 language survey data for 2nd langs Common lingua franca, widely used. High literacy. + but subtracting 270,000 per https://en.wikipedia.org/wiki/Swiss_Italian + [missing] [missing] [missing] 98.8% speak Spanish. Also, https://www.cia.gov/library/publications/the-world-factbook/geos/sp.html diff --git a/make/data/cldr/common/supplemental/supplementalMetadata.xml b/make/data/cldr/common/supplemental/supplementalMetadata.xml index 064239d81b6..e704136950f 100644 --- a/make/data/cldr/common/supplemental/supplementalMetadata.xml +++ b/make/data/cldr/common/supplemental/supplementalMetadata.xml @@ -830,7 +830,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -1129,7 +1129,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -1799,13 +1799,13 @@ For terms of use, see http://www.unicode.org/copyright.html hectare acre + + square-meter + square-foot + millimole-per-liter milligram-ofglucose-per-deciliter @@ -443,6 +447,15 @@ For terms of use, see http://www.unicode.org/copyright.html kilometer-per-hour mile-per-hour + + millimeter-per-hour + centimeter-per-hour + inch-per-hour + + + centimeter-per-hour + inch-per-hour + kilometer-per-hour meter-per-second diff --git a/make/data/cldr/common/supplemental/windowsZones.xml b/make/data/cldr/common/supplemental/windowsZones.xml index 75b7dff71b0..b49266ef9e3 100644 --- a/make/data/cldr/common/supplemental/windowsZones.xml +++ b/make/data/cldr/common/supplemental/windowsZones.xml @@ -70,13 +70,13 @@ For terms of use, see http://www.unicode.org/copyright.html - - + + - + @@ -98,7 +98,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + @@ -108,7 +108,7 @@ For terms of use, see http://www.unicode.org/copyright.html - + diff --git a/make/data/hotspot-symbols/symbols-shared b/make/data/hotspot-symbols/symbols-shared index ab6adf06d4d..c5b13ef1ee8 100644 --- a/make/data/hotspot-symbols/symbols-shared +++ b/make/data/hotspot-symbols/symbols-shared @@ -1,5 +1,5 @@ # -# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -30,5 +30,6 @@ jio_vsnprintf JNI_CreateJavaVM JNI_GetCreatedJavaVMs JNI_GetDefaultJavaVMInitArgs +JVM_IsForeignLinkerSupported JVM_FindClassFromBootLoader JVM_InitAgentProperties diff --git a/make/data/hotspot-symbols/symbols-unix b/make/data/hotspot-symbols/symbols-unix index c08ce4590fc..fb7644b5303 100644 --- a/make/data/hotspot-symbols/symbols-unix +++ b/make/data/hotspot-symbols/symbols-unix @@ -1,5 +1,5 @@ # -# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -217,6 +217,8 @@ JVM_DefineModule JVM_SetBootLoaderUnnamedModule # Virtual thread notifications for JVMTI +JVM_VirtualThreadStart +JVM_VirtualThreadEnd JVM_VirtualThreadMount JVM_VirtualThreadUnmount JVM_VirtualThreadHideFrames diff --git a/make/devkit/createLibffiBundle.sh b/make/devkit/createLibffiBundle.sh new file mode 100644 index 00000000000..100dcc1fe0d --- /dev/null +++ b/make/devkit/createLibffiBundle.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# +# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +# This script generates a libffi bundle. On linux by building it from source +# using a devkit, which should match the devkit used to build the JDK. +# +# Set MAKE_ARGS to add parameters to make. Ex: +# +# $ MAKE_ARGS=-j32 bash createLibffiBundle.sh +# +# The script tries to behave well on multiple invocations, only performing steps +# not already done. To redo a step, manually delete the target files from that +# step. +# +# Note that the libtool and texinfo packages are needed to build libffi +# $ sudo apt install libtool texinfo + +LIBFFI_VERSION=3.4.2 + +BUNDLE_NAME=libffi-$LIBFFI_VERSION.tar.gz + +SCRIPT_FILE="$(basename $0)" +SCRIPT_DIR="$(cd "$(dirname $0)" > /dev/null && pwd)" +OUTPUT_DIR="${SCRIPT_DIR}/../../build/libffi" +SRC_DIR="$OUTPUT_DIR/src" +DOWNLOAD_DIR="$OUTPUT_DIR/download" +INSTALL_DIR="$OUTPUT_DIR/install" +IMAGE_DIR="$OUTPUT_DIR/image" + +USAGE="$0 " + +if [ "$1" = "" ]; then + echo $USAGE + exit 1 +fi +DEVKIT_DIR="$1" + +# Download source distros +mkdir -p $DOWNLOAD_DIR +cd $DOWNLOAD_DIR +SOURCE_TAR=v$LIBFFI_VERSION.tar.gz +if [ ! -f $SOURCE_TAR ]; then + wget https://github.com/libffi/libffi/archive/refs/tags/v$LIBFFI_VERSION.tar.gz +fi + +# Unpack src +mkdir -p $SRC_DIR +cd $SRC_DIR +LIBFFI_DIRNAME=libffi-$LIBFFI_VERSION +LIBFFI_DIR=$SRC_DIR/$LIBFFI_DIRNAME +if [ ! -d $LIBFFI_DIRNAME ]; then + echo "Unpacking $SOURCE_TAR" + tar xf $DOWNLOAD_DIR/$SOURCE_TAR +fi + +# Build +cd $LIBFFI_DIR +if [ ! -e $LIBFFI_DIR/configure ]; then + bash ./autogen.sh +fi +# For Linux/x86, add --build=i686-pc-linux-gnu CFLAGS=-m32 CXXFLAGS=-m32 LDFLAGS=-m32 +bash ./configure --prefix=$INSTALL_DIR CC=$DEVKIT_DIR/bin/gcc CXX=$DEVKIT_DIR/bin/g++ + +# Run with nice to keep system usable during build. +nice make $MAKE_ARGS install + +mkdir -p $IMAGE_DIR +# Extract what we need into an image +if [ ! -e $IMAGE_DIR/lib/libffi.so ]; then + echo "Copying libffi.so* to image" + mkdir -p $IMAGE_DIR/lib + # For Linux/x86 it's under /lib/ instead of /lib64/ + cp -a $INSTALL_DIR/lib64/libffi.so* $IMAGE_DIR/lib/ +fi +if [ ! -e $IMAGE_DIR/include/ ]; then + echo "Copying include to image" + mkdir -p $IMAGE_DIR/include + cp -a $INSTALL_DIR/include/. $IMAGE_DIR/include/ +fi +if [ ! -e $IMAGE_DIR/$SCRIPT_FILE ]; then + echo "Copying this script to image" + cp -a $SCRIPT_DIR/$SCRIPT_FILE $IMAGE_DIR/ +fi + +# Create bundle +if [ ! -e $OUTPUT_DIR/$BUNDLE_NAME ]; then + echo "Creating $OUTPUT_DIR/$BUNDLE_NAME" + cd $IMAGE_DIR + tar zcf $OUTPUT_DIR/$BUNDLE_NAME * +fi diff --git a/make/jdk/src/classes/build/tools/cldrconverter/OtherCommonLocales.properties b/make/jdk/src/classes/build/tools/cldrconverter/OtherCommonLocales.properties index b7bb73388a4..354bcd13c15 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/OtherCommonLocales.properties +++ b/make/jdk/src/classes/build/tools/cldrconverter/OtherCommonLocales.properties @@ -135,6 +135,3 @@ xog=Soga yav=Yangben yi=Yiddish zgh=Standard Moroccan Tamazight - -# Not listed, but existed -sr-Latn=Serbian (Latin) diff --git a/make/jdk/src/classes/build/tools/cldrconverter/SupplementDataParseHandler.java b/make/jdk/src/classes/build/tools/cldrconverter/SupplementDataParseHandler.java index 11547ccb38e..a2b5bd39ae2 100644 --- a/make/jdk/src/classes/build/tools/cldrconverter/SupplementDataParseHandler.java +++ b/make/jdk/src/classes/build/tools/cldrconverter/SupplementDataParseHandler.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,6 +70,9 @@ class SupplementDataParseHandler extends AbstractLDMLHandler { // Map<"preferred"/"allowed", Map<"skeleton", SortedSet<"regions">>> private final Map>> inputSkeletonMap; + // "component" specific to this parent locale chain + private String currentParentLocaleComponent; + SupplementDataParseHandler() { firstDayMap = new HashMap<>(); minDaysMap = new HashMap<>(); @@ -163,11 +166,19 @@ class SupplementDataParseHandler extends AbstractLDMLHandler { minDaysMap.put(attributes.getValue("territories"), attributes.getValue("count")); } break; + case "parentLocales": + currentParentLocaleComponent = attributes.getValue("component"); + pushContainer(qName, attributes); + break; case "parentLocale": if (!isIgnored(attributes)) { - parentLocalesMap.put( - attributes.getValue("parent").replaceAll("_", "-"), - attributes.getValue("locales").replaceAll("_", "-")); + // Ignore component for now, otherwise "zh-Hant" falling back to "zh" would happen + // https://github.com/unicode-org/cldr/pull/2664 + if (currentParentLocaleComponent == null) { + parentLocalesMap.put( + attributes.getValue("parent").replaceAll("_", "-"), + attributes.getValue("locales").replaceAll("_", "-")); + } } break; case "hours": diff --git a/make/jdk/src/classes/build/tools/generatelsrequivmaps/EquivMapsGenerator.java b/make/jdk/src/classes/build/tools/generatelsrequivmaps/EquivMapsGenerator.java index d196230bfbe..79597f4d8b2 100644 --- a/make/jdk/src/classes/build/tools/generatelsrequivmaps/EquivMapsGenerator.java +++ b/make/jdk/src/classes/build/tools/generatelsrequivmaps/EquivMapsGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ import java.util.Locale; import java.util.Map; import java.util.TimeZone; import java.util.TreeMap; -import java.util.stream.Collectors; +import java.util.regex.Pattern; /** * This tool reads the IANA Language Subtag Registry data file downloaded from @@ -136,10 +136,29 @@ public class EquivMapsGenerator { } } else { // language, extlang, legacy, and redundant if (!initialLanguageMap.containsKey(preferred)) { - sb = new StringBuilder(preferred); - sb.append(','); - sb.append(tag); - initialLanguageMap.put(preferred, sb); + // IANA update 4/13 introduced case where a preferred value + // can have a preferred value itself. + // eg: ar-ajp has pref ajp which has pref apc + boolean foundInOther = false; + Pattern pattern = Pattern.compile(","+preferred+"(,|$)"); + // Check if current pref exists inside a value for another pref + List doublePrefs = initialLanguageMap + .values() + .stream() + .filter(e -> pattern.matcher(e.toString()).find()) + .toList(); + for (StringBuilder otherPrefVal : doublePrefs) { + otherPrefVal.append(","); + otherPrefVal.append(tag); + foundInOther = true; + } + if (!foundInOther) { + // does not exist in any other pref's values, so add as new entry + sb = new StringBuilder(preferred); + sb.append(','); + sb.append(tag); + initialLanguageMap.put(preferred, sb); + } } else { sb = initialLanguageMap.get(preferred); sb.append(','); @@ -156,7 +175,7 @@ public class EquivMapsGenerator { // "yue" is defined both as extlang and redundant. Remove the dup. subtags = Arrays.stream(initialLanguageMap.get(preferred).toString().split(",")) .distinct() - .collect(Collectors.toList()) + .toList() .toArray(new String[0]); if (subtags.length == 2) { @@ -241,7 +260,7 @@ public class EquivMapsGenerator { + " static final Map multiEquivsMap;\n" + " static final Map regionVariantEquivMap;\n\n" + " static {\n" - + " singleEquivMap = new HashMap<>("; + + " singleEquivMap = HashMap.newHashMap("; private static final String footerText = " }\n\n" @@ -263,11 +282,11 @@ public class EquivMapsGenerator { Paths.get(fileName))) { writer.write(getOpenJDKCopyright()); writer.write(headerText - + (int)(sortedLanguageMap1.size() / 0.75f + 1) + ");\n" - + " multiEquivsMap = new HashMap<>(" - + (int)(sortedLanguageMap2.size() / 0.75f + 1) + ");\n" - + " regionVariantEquivMap = new HashMap<>(" - + (int)(sortedRegionVariantMap.size() / 0.75f + 1) + ");\n\n" + + sortedLanguageMap1.size() + ");\n" + + " multiEquivsMap = HashMap.newHashMap(" + + sortedLanguageMap2.size() + ");\n" + + " regionVariantEquivMap = HashMap.newHashMap(" + + sortedRegionVariantMap.size() + ");\n\n" + " // This is an auto-generated file and should not be manually edited.\n" + " // LSR Revision: " + LSRrevisionDate); writer.newLine(); diff --git a/make/langtools/src/classes/build/tools/symbolgenerator/CreateSymbols.java b/make/langtools/src/classes/build/tools/symbolgenerator/CreateSymbols.java index 5e3cd01a4b8..03cb62429c1 100644 --- a/make/langtools/src/classes/build/tools/symbolgenerator/CreateSymbols.java +++ b/make/langtools/src/classes/build/tools/symbolgenerator/CreateSymbols.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2384,7 +2384,8 @@ public class CreateSymbols { MethodDescription method = (MethodDescription) feature; method.methodParameters = new ArrayList<>(); for (MethodParameters_attribute.Entry e : params.method_parameter_table) { - String name = cf.constant_pool.getUTF8Value(e.name_index); + String name = e.name_index == 0 ? null + : cf.constant_pool.getUTF8Value(e.name_index); MethodDescription.MethodParam param = new MethodDescription.MethodParam(e.flags, name); method.methodParameters.add(param); diff --git a/make/modules/java.base/Lib.gmk b/make/modules/java.base/Lib.gmk index 93c0a361671..d6ca2932914 100644 --- a/make/modules/java.base/Lib.gmk +++ b/make/modules/java.base/Lib.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -215,3 +215,18 @@ $(eval $(call SetupJdkLibrary, BUILD_SYSLOOKUPLIB, \ )) TARGETS += $(BUILD_SYSLOOKUPLIB) + +################################################################################ +# Create fallback linker lib + +ifeq ($(ENABLE_FALLBACK_LINKER), true) + $(eval $(call SetupJdkLibrary, BUILD_LIBFALLBACKLINKER, \ + NAME := fallbackLinker, \ + CFLAGS := $(CFLAGS_JDKLIB) $(LIBFFI_CFLAGS), \ + LDFLAGS := $(LDFLAGS_JDKLIB) \ + $(call SET_SHARED_LIBRARY_ORIGIN), \ + LIBS := $(LIBFFI_LIBS), \ + )) + + TARGETS += $(BUILD_LIBFALLBACKLINKER) +endif diff --git a/make/modules/java.base/gensrc/GensrcMisc.gmk b/make/modules/java.base/gensrc/GensrcMisc.gmk index b2bae11baa3..e37aa50d41c 100644 --- a/make/modules/java.base/gensrc/GensrcMisc.gmk +++ b/make/modules/java.base/gensrc/GensrcMisc.gmk @@ -50,11 +50,33 @@ $(eval $(call SetupTextFileProcessing, BUILD_VERSION_JAVA, \ @@VENDOR_URL_VM_BUG@@ => $(VENDOR_URL_VM_BUG), \ )) + +# Normalize OPENJDK_TARGET_CPU name to match jdk.internal.util.Architecture enum +ifneq ($(filter $(OPENJDK_TARGET_CPU), ppc64le), ) + OPENJDK_TARGET_ARCH_CANONICAL = ppc64 +else ifneq ($(filter $(OPENJDK_TARGET_CPU), s390x), ) + OPENJDK_TARGET_ARCH_CANONICAL = s390 +else ifneq ($(filter $(OPENJDK_TARGET_CPU), x86_64 amd64), ) + OPENJDK_TARGET_ARCH_CANONICAL = x64 +else + OPENJDK_TARGET_ARCH_CANONICAL := $(OPENJDK_TARGET_CPU) +endif + +# Normalize OPENJDK_TARGET_OS operating system name to match jdk.internal.util.OperatingSystem enum +ifeq ($(OPENJDK_TARGET_OS), macosx) + OPENJDK_TARGET_OS_CANONICAL = macos +else + OPENJDK_TARGET_OS_CANONICAL := $(OPENJDK_TARGET_OS) +endif + $(eval $(call SetupTextFileProcessing, BUILD_PLATFORMPROPERTIES_JAVA, \ - SOURCE_FILES := $(TOPDIR)/src/java.base/share/classes/jdk/internal/util/OperatingSystemProps.java.template, \ - OUTPUT_FILE := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/util/OperatingSystemProps.java, \ + SOURCE_FILES := $(TOPDIR)/src/java.base/share/classes/jdk/internal/util/PlatformProps.java.template, \ + OUTPUT_FILE := $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/util/PlatformProps.java, \ REPLACEMENTS := \ - @@OPENJDK_TARGET_OS@@ => $(OPENJDK_TARGET_OS), \ + @@OPENJDK_TARGET_OS@@ => $(OPENJDK_TARGET_OS_CANONICAL) ; \ + @@OPENJDK_TARGET_CPU@@ => $(OPENJDK_TARGET_ARCH_CANONICAL) ; \ + @@OPENJDK_TARGET_CPU_ENDIAN@@ => $(OPENJDK_TARGET_CPU_ENDIAN) ; \ + @@OPENJDK_TARGET_CPU_BITS@@ => $(OPENJDK_TARGET_CPU_BITS), \ )) TARGETS += $(BUILD_VERSION_JAVA) $(BUILD_PLATFORMPROPERTIES_JAVA) diff --git a/make/modules/java.desktop/lib/Awt2dLibraries.gmk b/make/modules/java.desktop/lib/Awt2dLibraries.gmk index b2139188bfa..a0d70333108 100644 --- a/make/modules/java.desktop/lib/Awt2dLibraries.gmk +++ b/make/modules/java.desktop/lib/Awt2dLibraries.gmk @@ -120,12 +120,7 @@ ifeq ($(call isTargetOs, windows), true) LIBAWT_VERSIONINFO_RESOURCE := $(TOPDIR)/src/$(MODULE)/windows/native/libawt/windows/awt.rc endif -# Turn off all warnings for debug_mem.c This is needed because the specific warning -# about initializing a declared 'extern' cannot be turned off individually. Only -# applies to debug builds. This limitation in gcc is tracked in -# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=45977 ifeq ($(TOOLCHAIN_TYPE), gcc) - BUILD_LIBAWT_debug_mem.c_CFLAGS := -w # This option improves performance of MaskFill in Java2D by 20% for some gcc LIBAWT_CFLAGS += -fgcse-after-reload endif @@ -138,24 +133,18 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBAWT, \ OPTIMIZATION := HIGHEST, \ CFLAGS := $(CFLAGS_JDKLIB) $(LIBAWT_CFLAGS), \ EXTRA_HEADER_DIRS := $(LIBAWT_EXTRA_HEADER_DIRS), \ - DISABLED_WARNINGS_gcc_awt_ImagingLib.c := unused-function sign-compare, \ DISABLED_WARNINGS_gcc_awt_LoadLibrary.c := unused-result, \ - DISABLED_WARNINGS_gcc_awt_Mlib.c := unused-function, \ - DISABLED_WARNINGS_gcc_awt_parseImage.c := sign-compare unused-function, \ - DISABLED_WARNINGS_gcc_debug_trace.c := unused-function, \ + DISABLED_WARNINGS_gcc_debug_mem.c := format-nonliteral, \ DISABLED_WARNINGS_gcc_ProcessPath.c := maybe-uninitialized, \ DISABLED_WARNINGS_gcc_Region.c := maybe-uninitialized, \ DISABLED_WARNINGS_gcc_SurfaceData.c := unused-value, \ DISABLED_WARNINGS_gcc_TransformHelper.c := sign-compare, \ - DISABLED_WARNINGS_clang_awt_ImagingLib.c := sign-compare deprecated-non-prototype, \ - DISABLED_WARNINGS_clang_awt_parseImage.c := sign-compare, \ - DISABLED_WARNINGS_clang_debug_mem.c := extern-initializer format-nonliteral, \ + DISABLED_WARNINGS_clang_awt_ImagingLib.c := deprecated-non-prototype, \ DISABLED_WARNINGS_clang_debug_trace.c := format-nonliteral, \ DISABLED_WARNINGS_clang_Trace.c := format-nonliteral, \ DISABLED_WARNINGS_clang_TransformHelper.c := sign-compare, \ DISABLED_WARNINGS_microsoft := 4244 4996, \ DISABLED_WARNINGS_microsoft_awt_Toolkit.cpp := 4267, \ - DISABLED_WARNINGS_microsoft_OGLContext.c := 4267, \ LDFLAGS := $(LDFLAGS_JDKLIB) $(call SET_SHARED_LIBRARY_ORIGIN), \ LDFLAGS_macosx := -L$(INSTALL_LIBRARIES_HERE), \ LDFLAGS_windows := -delayload:user32.dll -delayload:gdi32.dll \ diff --git a/make/modules/jdk.compiler/Gendata.gmk b/make/modules/jdk.compiler/Gendata.gmk index 24d4707c54a..e005e703f2c 100644 --- a/make/modules/jdk.compiler/Gendata.gmk +++ b/make/modules/jdk.compiler/Gendata.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,7 @@ CT_DATA_DESCRIPTION += $(MODULE_SRC)/share/data/symbols/symbols COMPILECREATESYMBOLS_ADD_EXPORTS := \ --add-exports java.base/jdk.internal.javac=java.compiler.interim,jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.compiler.interim,jdk.javadoc.interim \ --add-exports jdk.compiler.interim/com.sun.tools.javac.api=ALL-UNNAMED \ --add-exports jdk.compiler.interim/com.sun.tools.javac.code=ALL-UNNAMED \ --add-exports jdk.compiler.interim/com.sun.tools.javac.util=ALL-UNNAMED \ diff --git a/make/modules/jdk.javadoc/Gendata.gmk b/make/modules/jdk.javadoc/Gendata.gmk index 501b0540c53..412559c5fe1 100644 --- a/make/modules/jdk.javadoc/Gendata.gmk +++ b/make/modules/jdk.javadoc/Gendata.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,7 @@ CT_DATA_DESCRIPTION += $(TOPDIR)/src/jdk.compiler/share/data/symbols/symbols COMPILECREATESYMBOLS_ADD_EXPORTS := \ --add-exports java.base/jdk.internal=java.compiler.interim,jdk.compiler.interim \ + --add-exports jdk.internal.opt/jdk.internal.opt=jdk.compiler.interim,jdk.javadoc.interim \ --add-exports jdk.compiler.interim/com.sun.tools.javac.api=ALL-UNNAMED \ --add-exports jdk.compiler.interim/com.sun.tools.javac.code=ALL-UNNAMED \ --add-exports jdk.compiler.interim/com.sun.tools.javac.util=ALL-UNNAMED \ diff --git a/make/test/BuildJtregTestThreadFactory.gmk b/make/test/BuildJtregTestThreadFactory.gmk new file mode 100644 index 00000000000..b096ae303ea --- /dev/null +++ b/make/test/BuildJtregTestThreadFactory.gmk @@ -0,0 +1,65 @@ +# +# Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +default: build + +include $(SPEC) +include MakeBase.gmk +include JavaCompilation.gmk + +TARGETS := + +################################################################################ + +TTF_BASEDIR := $(TOPDIR)/test/jtreg_test_thread_factory +TTF_SUPPORT := $(SUPPORT_OUTPUTDIR)/test/jtreg_test_thread_factory +TTF_JAR := $(TTF_SUPPORT)/jtregTestThreadFactory.jar + +$(eval $(call SetupJavaCompilation, BUILD_JTREG_TEST_THREAD_FACTORY, \ + TARGET_RELEASE := $(TARGET_RELEASE_NEWJDK_UPGRADED), \ + SRC := $(TTF_BASEDIR)/src/share/classes, \ + BIN := $(TTF_SUPPORT)/classes, \ + JAR := $(TTF_JAR), \ +)) + +TARGETS += $(BUILD_JTREG_TEST_THREAD_FACTORY) + +################################################################################ +# Targets for building test-image. +################################################################################ + +# Copy to hotspot jtreg test image +$(eval $(call SetupCopyFiles, COPY_TTF, \ + SRC := $(TTF_SUPPORT), \ + DEST := $(TEST_IMAGE_DIR)/jtreg_test_thread_factory, \ + FILES := $(TTF_JAR), \ +)) + +IMAGES_TARGETS += $(COPY_TTF) + +build: $(TARGETS) +images: $(IMAGES_TARGETS) + +.PHONY: all images diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp index df44024d997..afeb19e906e 100644 --- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp @@ -102,7 +102,7 @@ void Assembler::emit_data64(jlong data, RelocationHolder const& rspec, int format) { - assert(inst_mark() != NULL, "must be inside InstructionMark"); + assert(inst_mark() != nullptr, "must be inside InstructionMark"); // Do not use AbstractAssembler::relocate, which is not intended for // embedded words. Instead, relocate to the enclosing instruction. code_section()->relocate(inst_mark(), rspec, format); diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp index 59d721eb4bf..ca175fe1c47 100644 --- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -44,7 +44,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { __ adr(rscratch1, safepoint_pc); __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset())); - assert(SharedRuntime::polling_page_return_handler_blob() != NULL, + assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, "polling page return stub not created yet"); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); @@ -334,7 +334,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { Address resolve(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); address call = __ trampoline_call(resolve); - if (call == NULL) { + if (call == nullptr) { ce->bailout("trampoline stub overflow"); return; } diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index ef21b759360..b493fbc4a71 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -111,7 +111,7 @@ LIR_Opr LIR_Assembler::osrBufferPointer() { address LIR_Assembler::float_constant(float f) { address const_addr = __ float_constant(f); - if (const_addr == NULL) { + if (const_addr == nullptr) { bailout("const section overflow"); return __ code()->consts()->start(); } else { @@ -122,7 +122,7 @@ address LIR_Assembler::float_constant(float f) { address LIR_Assembler::double_constant(double d) { address const_addr = __ double_constant(d); - if (const_addr == NULL) { + if (const_addr == nullptr) { bailout("const section overflow"); return __ code()->consts()->start(); } else { @@ -132,7 +132,7 @@ address LIR_Assembler::double_constant(double d) { address LIR_Assembler::int_constant(jlong n) { address const_addr = __ long_constant(n); - if (const_addr == NULL) { + if (const_addr == nullptr) { bailout("const section overflow"); return __ code()->consts()->start(); } else { @@ -278,7 +278,7 @@ void LIR_Assembler::osr_entry() { Label L; __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord)); __ cbnz(rscratch1, L); - __ stop("locked object is NULL"); + __ stop("locked object is null"); __ bind(L); } #endif @@ -328,7 +328,7 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) { } void LIR_Assembler::jobject2reg(jobject o, Register reg) { - if (o == NULL) { + if (o == nullptr) { __ mov(reg, zr); } else { __ movoop(reg, o); @@ -336,7 +336,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) { } void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { - address target = NULL; + address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (patching_id(info)) { @@ -379,7 +379,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const { int LIR_Assembler::emit_exception_handler() { // generate code for exception handler address handler_base = __ start_a_stub(exception_handler_size()); - if (handler_base == NULL) { + if (handler_base == nullptr) { // not enough space left for the handler bailout("exception handler overflow"); return -1; @@ -427,7 +427,7 @@ int LIR_Assembler::emit_unwind_handler() { } // Perform needed unlocking - MonitorExitStub* stub = NULL; + MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { monitor_address(0, FrameMap::r0_opr); stub = new MonitorExitStub(FrameMap::r0_opr, true, 0); @@ -455,7 +455,7 @@ int LIR_Assembler::emit_unwind_handler() { __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); // Emit the slow path assembly - if (stub != NULL) { + if (stub != nullptr) { stub->emit_code(this); } @@ -466,7 +466,7 @@ int LIR_Assembler::emit_unwind_handler() { int LIR_Assembler::emit_deopt_handler() { // generate code for exception handler address handler_base = __ start_a_stub(deopt_handler_size()); - if (handler_base == NULL) { + if (handler_base == nullptr) { // not enough space left for the handler bailout("deopt handler overflow"); return -1; @@ -487,7 +487,7 @@ void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) { int pc_offset = code_offset(); flush_debug_info(pc_offset); info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); - if (info->exception_handlers() != NULL) { + if (info->exception_handlers() != nullptr) { compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); } } @@ -509,7 +509,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { } int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { - guarantee(info != NULL, "Shouldn't be NULL"); + guarantee(info != nullptr, "Shouldn't be null"); __ get_polling_page(rscratch1, relocInfo::poll_type); add_debug_info_for_branch(info); // This isn't just debug info: // it's the oop map @@ -604,14 +604,14 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { if (! c->as_jobject()) __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix())); else { - const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); + const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr); reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); } } break; case T_ADDRESS: { - const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL); + const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, nullptr); reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false); } case T_INT: @@ -775,7 +775,7 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) { LIR_Address* to_addr = dest->as_address_ptr(); - PatchingStub* patch = NULL; + PatchingStub* patch = nullptr; Register compressed_src = rscratch1; if (patch_code != lir_patch_none) { @@ -847,7 +847,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch default: ShouldNotReachHere(); } - if (info != NULL) { + if (info != nullptr) { add_debug_info_for_null_check(null_check_here, info); } } @@ -890,7 +890,7 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { - address target = NULL; + address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (patching_id(info)) { @@ -943,7 +943,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch return; } - if (info != NULL) { + if (info != nullptr) { add_debug_info_for_null_check_here(info); } int null_check_here = code_offset(); @@ -1053,13 +1053,13 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) { void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { #ifdef ASSERT - assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); - if (op->block() != NULL) _branch_target_blocks.append(op->block()); - if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); + assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); + if (op->block() != nullptr) _branch_target_blocks.append(op->block()); + if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); #endif if (op->cond() == lir_cond_always) { - if (op->info() != NULL) add_debug_info_for_branch(op->info()); + if (op->info() != nullptr) add_debug_info_for_branch(op->info()); __ b(*(op->label())); } else { Assembler::Condition acond; @@ -1288,12 +1288,12 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L if (should_profile) { ciMethod* method = op->profiled_method(); - assert(method != NULL, "Should have method"); + assert(method != nullptr, "Should have method"); int bci = op->profiled_bci(); md = method->method_data_or_null(); - assert(md != NULL, "Sanity"); + assert(md != nullptr, "Sanity"); data = md->bci_to_data(bci); - assert(data != NULL, "need data for type check"); + assert(data != nullptr, "need data for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); } Label profile_cast_success, profile_cast_failure; @@ -1375,7 +1375,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L } } else { // perform the fast part of the checking logic - __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); + __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); @@ -1428,12 +1428,12 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { if (should_profile) { ciMethod* method = op->profiled_method(); - assert(method != NULL, "Should have method"); + assert(method != nullptr, "Should have method"); int bci = op->profiled_bci(); md = method->method_data_or_null(); - assert(md != NULL, "Sanity"); + assert(md != nullptr, "Sanity"); data = md->bci_to_data(bci); - assert(data != NULL, "need data for type check"); + assert(data != nullptr, "need data for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); } Label profile_cast_success, profile_cast_failure, done; @@ -1466,7 +1466,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { // get instance klass (it's already uncompressed) __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); // perform the fast part of the checking logic - __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); + __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); @@ -1620,7 +1620,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L } else if (opr1->is_constant()) { LIR_Opr tmp = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr; - const2reg(opr1, tmp, lir_patch_none, NULL); + const2reg(opr1, tmp, lir_patch_none, nullptr); opr1 = tmp; } @@ -1630,7 +1630,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L } else if (opr2->is_constant()) { LIR_Opr tmp = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr; - const2reg(opr2, tmp, lir_patch_none, NULL); + const2reg(opr2, tmp, lir_patch_none, nullptr); opr2 = tmp; } @@ -1641,7 +1641,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L } void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { - assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); + assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); if (left->is_single_cpu()) { Register lreg = left->as_register(); @@ -2033,7 +2033,7 @@ void LIR_Assembler::align_call(LIR_Code code) { } void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { address call = __ trampoline_call(Address(op->addr(), rtype)); - if (call == NULL) { + if (call == nullptr) { bailout("trampoline stub overflow"); return; } @@ -2044,7 +2044,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { address call = __ ic_call(op->addr()); - if (call == NULL) { + if (call == nullptr) { bailout("trampoline stub overflow"); return; } @@ -2055,7 +2055,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { void LIR_Assembler::emit_static_call_stub() { address call_pc = __ pc(); address stub = __ start_a_stub(call_stub_size()); - if (stub == NULL) { + if (stub == nullptr) { bailout("static call stub overflow"); return; } @@ -2226,11 +2226,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { CodeStub* stub = op->stub(); int flags = op->flags(); - BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; + BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; if (is_reference_type(basic_type)) basic_type = T_OBJECT; // if we don't know anything, just go through the generic arraycopy - if (default_type == NULL // || basic_type == T_OBJECT + if (default_type == nullptr // || basic_type == T_OBJECT ) { Label done; assert(src == r1 && src_pos == r2, "mismatch in calling convention"); @@ -2242,7 +2242,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ str(src, Address(sp, 4*BytesPerWord)); address copyfunc_addr = StubRoutines::generic_arraycopy(); - assert(copyfunc_addr != NULL, "generic arraycopy stub required"); + assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); // The arguments are in java calling convention so we shift them // to C convention @@ -2282,7 +2282,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { return; } - assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); + assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); int elem_size = type2aelembytes(basic_type); int scale = exact_log2(elem_size); @@ -2292,7 +2292,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); - // test for NULL + // test for null if (flags & LIR_OpArrayCopy::src_null_check) { __ cbz(src, *stub->entry()); } @@ -2377,7 +2377,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ load_klass(src, src); __ load_klass(dst, dst); - __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); + __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); __ PUSH(src, dst); __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); @@ -2389,7 +2389,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ POP(src, dst); address copyfunc_addr = StubRoutines::checkcast_arraycopy(); - if (copyfunc_addr != NULL) { // use stub if available + if (copyfunc_addr != nullptr) { // use stub if available // src is not a sub class of dst so we have to do a // per-element check. @@ -2559,7 +2559,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register hdr = op->hdr_opr()->as_register(); Register lock = op->lock_opr()->as_register(); if (UseHeavyMonitors) { - if (op->info() != NULL) { + if (op->info() != nullptr) { add_debug_info_for_null_check_here(op->info()); __ null_check(obj, -1); } @@ -2568,7 +2568,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // add debug info for NullPointerException only if one is possible int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); - if (op->info() != NULL) { + if (op->info() != nullptr) { add_debug_info_for_null_check(null_check_offset, op->info()); } // done @@ -2586,7 +2586,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { Register result = op->result_opr()->as_pointer_register(); CodeEmitInfo* info = op->info(); - if (info != NULL) { + if (info != nullptr) { add_debug_info_for_null_check_here(info); } @@ -2605,9 +2605,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { // Update counter for all call types ciMethodData* md = method->method_data_or_null(); - assert(md != NULL, "Sanity"); + assert(md != nullptr, "Sanity"); ciProfileData* data = md->bci_to_data(bci); - assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); + assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); Register mdo = op->mdo()->as_register(); __ mov_metadata(mdo, md->constant_encoding()); @@ -2620,7 +2620,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { assert_different_registers(mdo, recv); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); ciKlass* known_klass = op->known_holder(); - if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { + if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { // We know the type that will be seen at this call site; we can // statically update the MethodData* rather than needing to do // dynamic tests on the receiver type @@ -2645,7 +2645,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { // VirtualCallData rather than just the first time for (i = 0; i < VirtualCallData::row_limit(); i++) { ciKlass* receiver = vc_data->receiver(i); - if (receiver == NULL) { + if (receiver == nullptr) { Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); __ mov_metadata(rscratch1, known_klass->constant_encoding()); __ lea(rscratch2, recv_addr); @@ -2712,7 +2712,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { Label update, next, none; bool do_null = !not_null; - bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; + bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; assert(do_null || do_update, "why are we here?"); @@ -2748,7 +2748,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { if (do_update) { #ifdef ASSERT - if (exact_klass != NULL) { + if (exact_klass != nullptr) { Label ok; __ load_klass(tmp, tmp); __ mov_metadata(rscratch1, exact_klass->constant_encoding()); @@ -2759,8 +2759,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { } #endif if (!no_conflict) { - if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { - if (exact_klass != NULL) { + if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { + if (exact_klass != nullptr) { __ mov_metadata(tmp, exact_klass->constant_encoding()); } else { __ load_klass(tmp, tmp); @@ -2789,7 +2789,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { __ cbz(rscratch1, next); } } else { - assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); __ ldr(tmp, mdo_addr); @@ -2810,7 +2810,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { } } else { // There's a single possible klass at this profile point - assert(exact_klass != NULL, "should be"); + assert(exact_klass != nullptr, "should be"); if (TypeEntries::is_type_none(current_klass)) { __ mov_metadata(tmp, exact_klass->constant_encoding()); __ ldr(rscratch2, mdo_addr); @@ -2839,7 +2839,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { // first time here. Set profile type. __ str(tmp, mdo_addr); } else { - assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); __ ldr(tmp, mdo_addr); @@ -2903,7 +2903,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg __ blr(rscratch1); } - if (info != NULL) { + if (info != nullptr) { add_call_info_here(info); } __ post_call_nop(); diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp index c31ad9b211c..43ec189255f 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -60,7 +60,7 @@ friend class ArrayCopyStub; void casw(Register addr, Register newval, Register cmpval); void casl(Register addr, Register newval, Register cmpval); - void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL); + void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = nullptr); static const int max_tableswitches = 20; struct tableswitch switches[max_tableswitches]; diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp index 2d12590028a..c32f9759463 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp @@ -107,11 +107,11 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) { bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { - if (v->type()->as_IntConstant() != NULL) { + if (v->type()->as_IntConstant() != nullptr) { return v->type()->as_IntConstant()->value() == 0L; - } else if (v->type()->as_LongConstant() != NULL) { + } else if (v->type()->as_LongConstant() != nullptr) { return v->type()->as_LongConstant()->value() == 0L; - } else if (v->type()->as_ObjectConstant() != NULL) { + } else if (v->type()->as_ObjectConstant() != nullptr) { return v->type()->as_ObjectConstant()->value()->is_null_object(); } else { return false; @@ -120,11 +120,11 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { bool LIRGenerator::can_inline_as_constant(Value v) const { // FIXME: Just a guess - if (v->type()->as_IntConstant() != NULL) { + if (v->type()->as_IntConstant() != nullptr) { return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value()); - } else if (v->type()->as_LongConstant() != NULL) { + } else if (v->type()->as_LongConstant() != nullptr) { return v->type()->as_LongConstant()->value() == 0L; - } else if (v->type()->as_ObjectConstant() != NULL) { + } else if (v->type()->as_ObjectConstant() != nullptr) { return v->type()->as_ObjectConstant()->value()->is_null_object(); } else { return false; @@ -315,7 +315,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { // "lock" stores the address of the monitor stack slot, so this is not an oop LIR_Opr lock = new_register(T_INT); - CodeEmitInfo* info_for_exception = NULL; + CodeEmitInfo* info_for_exception = nullptr; if (x->needs_null_check()) { info_for_exception = state_for(x); } @@ -466,7 +466,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { } } rlock_result(x); - arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); + arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr); } } @@ -512,9 +512,9 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { LIR_Opr ill = LIR_OprFact::illegalOpr; if (x->op() == Bytecodes::_irem) { - __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); + __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr); } else if (x->op() == Bytecodes::_idiv) { - __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); + __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, nullptr); } } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { @@ -547,7 +547,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { // when an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary - if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { + if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { x->swap_operands(); } @@ -800,7 +800,7 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { LIR_Opr calc_result = rlock_result(x); LIR_Opr result_reg = result_register_for(x->type()); - CallingConvention* cc = NULL; + CallingConvention* cc = nullptr; if (x->id() == vmIntrinsics::_dpow) { LIRItem value1(x->argument_at(1), this); @@ -822,49 +822,49 @@ void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { switch (x->id()) { case vmIntrinsics::_dexp: - if (StubRoutines::dexp() != NULL) { + if (StubRoutines::dexp() != nullptr) { __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); } break; case vmIntrinsics::_dlog: - if (StubRoutines::dlog() != NULL) { + if (StubRoutines::dlog() != nullptr) { __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); } break; case vmIntrinsics::_dlog10: - if (StubRoutines::dlog10() != NULL) { + if (StubRoutines::dlog10() != nullptr) { __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); } break; case vmIntrinsics::_dpow: - if (StubRoutines::dpow() != NULL) { + if (StubRoutines::dpow() != nullptr) { __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); } break; case vmIntrinsics::_dsin: - if (StubRoutines::dsin() != NULL) { + if (StubRoutines::dsin() != nullptr) { __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); } break; case vmIntrinsics::_dcos: - if (StubRoutines::dcos() != NULL) { + if (StubRoutines::dcos() != nullptr) { __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); } break; case vmIntrinsics::_dtan: - if (StubRoutines::dtan() != NULL) { + if (StubRoutines::dtan() != nullptr) { __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); } else { __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); @@ -1161,7 +1161,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { LIRItem length(x->length(), this); // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); } @@ -1194,14 +1194,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { Values* dims = x->dims(); int i = dims->length(); - LIRItemList* items = new LIRItemList(i, i, NULL); + LIRItemList* items = new LIRItemList(i, i, nullptr); while (i-- > 0) { LIRItem* size = new LIRItem(dims->at(i), this); items->at_put(i, size); } // Evaluate state_for early since it may emit code. - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); @@ -1248,7 +1248,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) { void LIRGenerator::do_CheckCast(CheckCast* x) { LIRItem obj(x->obj(), this); - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization) @@ -1263,10 +1263,10 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { - assert(patching_info == NULL, "can't patch this"); + assert(patching_info == nullptr, "can't patch this"); stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { - assert(patching_info == NULL, "can't patch this"); + assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); @@ -1289,7 +1289,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { // result and test object may not be in same register LIR_Opr reg = rlock_result(x); - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register patching_info = state_for(x, x->state_before()); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index 1fd71eb6330..ccc05005822 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -91,7 +91,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr // displaced header address in the object header - if it is not the same, get the // object header instead lea(rscratch2, Address(obj, hdr_offset)); - cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/NULL); + cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/nullptr); // if the object header was the same, we're done // if the object header was not the same, it is now in the hdr register // => test if it is a stack pointer into the same stack (recursive locking), i.e.: @@ -110,7 +110,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr sub(hdr, hdr, rscratch1); ands(hdr, hdr, aligned_mask - (int)os::vm_page_size()); // for recursive locking, the result is zero => save it in the displaced header - // location (NULL in the displaced hdr location indicates recursive locking) + // location (null in the displaced hdr location indicates recursive locking) str(hdr, Address(disp_hdr, 0)); // otherwise we don't care about the result and handle locking via runtime call cbnz(hdr, slow_case); @@ -129,7 +129,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ // load displaced header ldr(hdr, Address(disp_hdr, 0)); - // if the loaded hdr is NULL we had recursive locking + // if the loaded hdr is null we had recursive locking // if we had recursive locking, we are done cbz(hdr, done); // load object @@ -294,7 +294,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { verify_oop(receiver); - // explicit NULL check not needed since load from [klass_offset] causes a trap + // explicit null check not needed since load from [klass_offset] causes a trap // check against inline cache assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); @@ -311,7 +311,7 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) { // Insert nmethod entry barrier into frame. BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */); + bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */); } void C1_MacroAssembler::remove_frame(int framesize) { diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp index 92b293ca6be..98cffb45524 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -106,7 +106,7 @@ using MacroAssembler::null_check; void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN; // This platform only uses signal-based null checks. The Label is not needed. - void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); } + void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); } void load_parameter(int offset_in_words, Register reg); diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp index 9c4158d630a..bdba111f6df 100644 --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -369,7 +369,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { // Save registers, if required. OopMapSet* oop_maps = new OopMapSet(); - OopMap* oop_map = NULL; + OopMap* oop_map = nullptr; switch (id) { case forward_exception_id: // We're handling an exception in the context of a compiled frame. @@ -542,7 +542,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { // Note: This number affects also the RT-Call in generate_handle_exception because // the oop-map is shared for all calls. DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); - assert(deopt_blob != NULL, "deoptimization blob must have been created"); + assert(deopt_blob != nullptr, "deoptimization blob must have been created"); OopMap* oop_map = save_live_registers(sasm); @@ -616,8 +616,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { bool save_fpu_registers = true; // stub code & info for the different stubs - OopMapSet* oop_maps = NULL; - OopMap* oop_map = NULL; + OopMapSet* oop_maps = nullptr; + OopMap* oop_map = nullptr; switch (id) { { case forward_exception_id: @@ -834,7 +834,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); Label miss; - __ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss); + __ check_klass_subtype_slow_path(r4, r0, r2, r5, nullptr, &miss); // fallthrough on success: __ mov(rscratch1, 1); @@ -904,7 +904,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); - assert(deopt_blob != NULL, "deoptimization blob must have been created"); + assert(deopt_blob != nullptr, "deoptimization blob must have been created"); __ leave(); __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); } @@ -991,7 +991,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { restore_live_registers(sasm); __ leave(); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); - assert(deopt_blob != NULL, "deoptimization blob must have been created"); + assert(deopt_blob != nullptr, "deoptimization blob must have been created"); __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); } diff --git a/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp index 009883d6082..81bde9a6611 100644 --- a/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_CodeStubs_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ int C2SafepointPollStub::max_size() const { } void C2SafepointPollStub::emit(C2_MacroAssembler& masm) { - assert(SharedRuntime::polling_page_return_handler_blob() != NULL, + assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, "polling page return stub not created yet"); address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index d90bf8fb642..e96621ae2d3 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -287,16 +287,16 @@ void C2_MacroAssembler::string_indexof(Register str2, Register str1, cmp(cnt1, (u1)16); // small patterns still should be handled by simple algorithm br(LT, LINEAR_MEDIUM); mov(result, zr); - RuntimeAddress stub = NULL; + RuntimeAddress stub = nullptr; if (isL) { stub = RuntimeAddress(StubRoutines::aarch64::string_indexof_linear_ll()); - assert(stub.target() != NULL, "string_indexof_linear_ll stub has not been generated"); + assert(stub.target() != nullptr, "string_indexof_linear_ll stub has not been generated"); } else if (str1_isL) { stub = RuntimeAddress(StubRoutines::aarch64::string_indexof_linear_ul()); - assert(stub.target() != NULL, "string_indexof_linear_ul stub has not been generated"); + assert(stub.target() != nullptr, "string_indexof_linear_ul stub has not been generated"); } else { stub = RuntimeAddress(StubRoutines::aarch64::string_indexof_linear_uu()); - assert(stub.target() != NULL, "string_indexof_linear_uu stub has not been generated"); + assert(stub.target() != nullptr, "string_indexof_linear_uu stub has not been generated"); } address call = trampoline_call(stub); if (call == nullptr) { @@ -844,7 +844,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, } bind(STUB); - RuntimeAddress stub = NULL; + RuntimeAddress stub = nullptr; switch(ae) { case StrIntrinsicNode::LL: stub = RuntimeAddress(StubRoutines::aarch64::compare_long_string_LL()); @@ -861,7 +861,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, default: ShouldNotReachHere(); } - assert(stub.target() != NULL, "compare_long_string stub has not been generated"); + assert(stub.target() != nullptr, "compare_long_string stub has not been generated"); address call = trampoline_call(stub); if (call == nullptr) { DEBUG_ONLY(reset_labels(DONE, SHORT_LOOP, SHORT_STRING, SHORT_LAST, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, SHORT_LOOP_START)); @@ -2049,9 +2049,9 @@ void C2_MacroAssembler::vector_signum_sve(FloatRegister dst, FloatRegister src, } bool C2_MacroAssembler::in_scratch_emit_size() { - if (ciEnv::current()->task() != NULL) { + if (ciEnv::current()->task() != nullptr) { PhaseOutput* phase_output = Compile::current()->output(); - if (phase_output != NULL && phase_output->in_scratch_emit_size()) { + if (phase_output != nullptr && phase_output->in_scratch_emit_size()) { return true; } } diff --git a/src/hotspot/cpu/aarch64/codeBuffer_aarch64.cpp b/src/hotspot/cpu/aarch64/codeBuffer_aarch64.cpp index 3ba166d39eb..31618414e31 100644 --- a/src/hotspot/cpu/aarch64/codeBuffer_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/codeBuffer_aarch64.cpp @@ -69,7 +69,7 @@ static bool emit_shared_trampolines(CodeBuffer* cb, CodeBuffer::SharedTrampoline assert(requests->number_of_entries() >= 1, "at least one"); const int total_requested_size = MacroAssembler::max_trampoline_stub_size() * requests->number_of_entries(); - if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == NULL) { + if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == nullptr) { return false; } diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp index d09375f7f0d..d1001687bd5 100644 --- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp @@ -44,7 +44,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) // mov rmethod, 0 // jmp -4 # to self - if (mark == NULL) { + if (mark == nullptr) { mark = cbuf.insts_mark(); // Get mark within main instrs section. } @@ -54,8 +54,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) address base = __ start_a_stub(to_interp_stub_size()); int offset = __ offset(); - if (base == NULL) { - return NULL; // CodeBuffer::expand failed + if (base == nullptr) { + return nullptr; // CodeBuffer::expand failed } // static stub relocation stores the instruction address of the call __ relocate(static_stub_Relocation::spec(mark)); @@ -88,7 +88,7 @@ int CompiledStaticCall::reloc_to_interp_stub() { void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { address stub = find_stub(); - guarantee(stub != NULL, "stub not found"); + guarantee(stub != nullptr, "stub not found"); if (TraceICs) { ResourceMark rm; @@ -117,7 +117,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { // Reset stub. address stub = static_stub->addr(); - assert(stub != NULL, "stub not found"); + assert(stub != nullptr, "stub not found"); assert(CompiledICLocker::is_safe(stub), "mt unsafe call"); // Creation also verifies the object. NativeMovConstReg* method_holder @@ -138,7 +138,7 @@ void CompiledDirectStaticCall::verify() { // Verify stub. address stub = find_stub(); - assert(stub != NULL, "no stub found for static call"); + assert(stub != nullptr, "no stub found for static call"); // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeInstruction::instruction_size); diff --git a/src/hotspot/cpu/aarch64/continuationFreezeThaw_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/continuationFreezeThaw_aarch64.inline.hpp index b32e872c051..17c86c81071 100644 --- a/src/hotspot/cpu/aarch64/continuationFreezeThaw_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/continuationFreezeThaw_aarch64.inline.hpp @@ -84,11 +84,11 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) { if (FKind::interpreted) { assert((intptr_t*)f.at(frame::interpreter_frame_last_sp_offset) == nullptr || f.unextended_sp() == (intptr_t*)f.at(frame::interpreter_frame_last_sp_offset), ""); - int locals = f.interpreter_frame_method()->max_locals(); + intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset); // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty(); - fp = caller.unextended_sp() - (locals + frame::sender_sp_offset) + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0); + fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0); sp = fp - (f.fp() - f.unextended_sp()); assert(sp <= fp, ""); assert(fp <= caller.unextended_sp(), ""); @@ -97,7 +97,8 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) { assert(_cont.tail()->is_in_chunk(sp), ""); frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */); - *hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + locals - 1; + // copy relativized locals from the stack frame + *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset; return hf; } else { // We need to re-read fp out of the frame because it may be an oop and we might have @@ -145,13 +146,11 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co // on AARCH64, we may insert padding between the locals and the rest of the frame // (see TemplateInterpreterGenerator::generate_normal_entry, and AbstractInterpreter::layout_activation) - // so we compute locals "from scratch" rather than relativizing the value in the stack frame, which might include padding, - // since we don't freeze the padding word (see recurse_freeze_interpreted_frame). + // because we freeze the padding word (see recurse_freeze_interpreted_frame) in order to keep the same relativized + // locals value, we don't need to change the locals value here. - // at(frame::interpreter_frame_last_sp_offset) can be NULL at safepoint preempts + // at(frame::interpreter_frame_last_sp_offset) can be null at safepoint preempts *hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp(); - // This line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197 - *hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + f.interpreter_frame_method()->max_locals() - 1; relativize_one(vfp, hfp, frame::interpreter_frame_initial_sp_offset); // == block_top == block_bottom relativize_one(vfp, hfp, frame::interpreter_frame_extended_sp_offset); @@ -222,11 +221,9 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& const int locals = hf.interpreter_frame_method()->max_locals(); intptr_t* frame_sp = caller.unextended_sp() - fsize; intptr_t* fp = frame_sp + (hf.fp() - heap_sp); - int padding = 0; if ((intptr_t)fp % frame::frame_alignment != 0) { fp--; frame_sp--; - padding++; log_develop_trace(continuations)("Adding internal interpreted frame alignment"); } DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);) @@ -235,10 +232,8 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& frame f(frame_sp, frame_sp, fp, hf.pc()); // we need to set the locals so that the caller of new_stack_frame() can call // ContinuationHelper::InterpretedFrame::frame_bottom - intptr_t offset = *hf.addr_at(frame::interpreter_frame_locals_offset); - assert((int)offset == frame::sender_sp_offset + locals - 1, ""); - // set relativized locals - *f.addr_at(frame::interpreter_frame_locals_offset) = padding + offset; + // copy relativized locals from the heap frame + *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset); assert((intptr_t)f.fp() % frame::frame_alignment == 0, ""); return f; } else { @@ -300,10 +295,4 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c derelativize_one(vfp, frame::interpreter_frame_extended_sp_offset); } -inline void ThawBase::set_interpreter_frame_bottom(const frame& f, intptr_t* bottom) { - // set relativized locals - // this line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197 - *f.addr_at(frame::interpreter_frame_locals_offset) = (bottom - 1) - f.fp(); -} - #endif // CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP diff --git a/src/hotspot/cpu/aarch64/disassembler_aarch64.hpp b/src/hotspot/cpu/aarch64/disassembler_aarch64.hpp index 725ed30d186..39b493db412 100644 --- a/src/hotspot/cpu/aarch64/disassembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/disassembler_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -39,7 +39,7 @@ // the perfect job. In those cases, decode_instruction0 may kick in // and do it right. // If nothing had to be done, just return "here", otherwise return "here + instr_len(here)" - static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL) { + static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr) { return here; } diff --git a/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp b/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp index 9c9d78e8d29..315980542b6 100644 --- a/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/downcallLinker_aarch64.cpp @@ -47,6 +47,7 @@ class DowncallStubGenerator : public StubCodeGenerator { bool _needs_return_buffer; int _captured_state_mask; + bool _needs_transition; int _frame_complete; int _frame_size_slots; @@ -60,7 +61,8 @@ public: const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) + int captured_state_mask, + bool needs_transition) : StubCodeGenerator(buffer, PrintMethodHandleStubs), _signature(signature), _num_args(num_args), @@ -70,9 +72,10 @@ public: _output_registers(output_registers), _needs_return_buffer(needs_return_buffer), _captured_state_mask(captured_state_mask), + _needs_transition(needs_transition), _frame_complete(0), _frame_size_slots(0), - _oop_maps(NULL) { + _oop_maps(nullptr) { } void generate(); @@ -100,13 +103,15 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { int code_size = native_invoker_code_base_size + (num_args * native_invoker_size_per_arg); int locs_size = 1; // must be non-zero CodeBuffer code("nep_invoker_blob", code_size, locs_size); DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi, input_registers, output_registers, - needs_return_buffer, captured_state_mask); + needs_return_buffer, captured_state_mask, + needs_transition); g.generate(); code.log_section_sizes("nep_invoker_blob"); @@ -163,7 +168,7 @@ void DowncallStubGenerator::generate() { assert(_abi._shadow_space_bytes == 0, "not expecting shadow space on AArch64"); allocated_frame_size += arg_shuffle.out_arg_bytes(); - bool should_save_return_value = !_needs_return_buffer; + bool should_save_return_value = !_needs_return_buffer && _needs_transition; RegSpiller out_reg_spiller(_output_registers); int spill_offset = -1; @@ -191,7 +196,7 @@ void DowncallStubGenerator::generate() { _frame_size_slots = align_up(framesize + (allocated_frame_size >> LogBytesPerInt), 4); assert(is_even(_frame_size_slots/2), "sp not 16-byte aligned"); - _oop_maps = new OopMapSet(); + _oop_maps = _needs_transition ? new OopMapSet() : nullptr; address start = __ pc(); __ enter(); @@ -201,15 +206,17 @@ void DowncallStubGenerator::generate() { _frame_complete = __ pc() - start; - address the_pc = __ pc(); - __ set_last_Java_frame(sp, rfp, the_pc, tmp1); - OopMap* map = new OopMap(_frame_size_slots, 0); - _oop_maps->add_gc_map(the_pc - start, map); + if (_needs_transition) { + address the_pc = __ pc(); + __ set_last_Java_frame(sp, rfp, the_pc, tmp1); + OopMap* map = new OopMap(_frame_size_slots, 0); + _oop_maps->add_gc_map(the_pc - start, map); - // State transition - __ mov(tmp1, _thread_in_native); - __ lea(tmp2, Address(rthread, JavaThread::thread_state_offset())); - __ stlrw(tmp1, tmp2); + // State transition + __ mov(tmp1, _thread_in_native); + __ lea(tmp2, Address(rthread, JavaThread::thread_state_offset())); + __ stlrw(tmp1, tmp2); + } __ block_comment("{ argument shuffle"); arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes, locs); @@ -257,86 +264,89 @@ void DowncallStubGenerator::generate() { ////////////////////////////////////////////////////////////////////////////// - __ mov(tmp1, _thread_in_native_trans); - __ strw(tmp1, Address(rthread, JavaThread::thread_state_offset())); - - // Force this write out before the read below - if (!UseSystemMemoryBarrier) { - __ membar(Assembler::LoadLoad | Assembler::LoadStore | - Assembler::StoreLoad | Assembler::StoreStore); - } - - __ verify_sve_vector_length(tmp1); - Label L_after_safepoint_poll; Label L_safepoint_poll_slow_path; - - __ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */, tmp1); - - __ ldrw(tmp1, Address(rthread, JavaThread::suspend_flags_offset())); - __ cbnzw(tmp1, L_safepoint_poll_slow_path); - - __ bind(L_after_safepoint_poll); - - // change thread state - __ mov(tmp1, _thread_in_Java); - __ lea(tmp2, Address(rthread, JavaThread::thread_state_offset())); - __ stlrw(tmp1, tmp2); - - __ block_comment("reguard stack check"); Label L_reguard; Label L_after_reguard; - __ ldrb(tmp1, Address(rthread, JavaThread::stack_guard_state_offset())); - __ cmpw(tmp1, StackOverflow::stack_guard_yellow_reserved_disabled); - __ br(Assembler::EQ, L_reguard); - __ bind(L_after_reguard); + if (_needs_transition) { + __ mov(tmp1, _thread_in_native_trans); + __ strw(tmp1, Address(rthread, JavaThread::thread_state_offset())); - __ reset_last_Java_frame(true); + // Force this write out before the read below + if (!UseSystemMemoryBarrier) { + __ membar(Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore); + } + + __ verify_sve_vector_length(tmp1); + + __ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */, tmp1); + + __ ldrw(tmp1, Address(rthread, JavaThread::suspend_flags_offset())); + __ cbnzw(tmp1, L_safepoint_poll_slow_path); + + __ bind(L_after_safepoint_poll); + + // change thread state + __ mov(tmp1, _thread_in_Java); + __ lea(tmp2, Address(rthread, JavaThread::thread_state_offset())); + __ stlrw(tmp1, tmp2); + + __ block_comment("reguard stack check"); + __ ldrb(tmp1, Address(rthread, JavaThread::stack_guard_state_offset())); + __ cmpw(tmp1, StackOverflow::stack_guard_yellow_reserved_disabled); + __ br(Assembler::EQ, L_reguard); + __ bind(L_after_reguard); + + __ reset_last_Java_frame(true); + } __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(lr); ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ L_safepoint_poll_slow_path"); - __ bind(L_safepoint_poll_slow_path); + if (_needs_transition) { + __ block_comment("{ L_safepoint_poll_slow_path"); + __ bind(L_safepoint_poll_slow_path); - if (should_save_return_value) { - // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); - } + if (should_save_return_value) { + // Need to save the native result registers around any runtime calls. + out_reg_spiller.generate_spill(_masm, spill_offset); + } - __ mov(c_rarg0, rthread); - assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); - __ lea(tmp1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); - __ blr(tmp1); + __ mov(c_rarg0, rthread); + assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); + __ lea(tmp1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); + __ blr(tmp1); - if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); - } + if (should_save_return_value) { + out_reg_spiller.generate_fill(_masm, spill_offset); + } - __ b(L_after_safepoint_poll); - __ block_comment("} L_safepoint_poll_slow_path"); + __ b(L_after_safepoint_poll); + __ block_comment("} L_safepoint_poll_slow_path"); ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ L_reguard"); - __ bind(L_reguard); + __ block_comment("{ L_reguard"); + __ bind(L_reguard); - if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_offset); + if (should_save_return_value) { + out_reg_spiller.generate_spill(_masm, spill_offset); + } + + __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), tmp1); + + if (should_save_return_value) { + out_reg_spiller.generate_fill(_masm, spill_offset); + } + + __ b(L_after_reguard); + + __ block_comment("} L_reguard"); } - __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), tmp1); - - if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); - } - - __ b(L_after_reguard); - - __ block_comment("} L_reguard"); - ////////////////////////////////////////////////////////////////////////////// __ flush(); diff --git a/src/hotspot/cpu/aarch64/foreignGlobals_aarch64.cpp b/src/hotspot/cpu/aarch64/foreignGlobals_aarch64.cpp index 90c1942c32d..2692054bcdc 100644 --- a/src/hotspot/cpu/aarch64/foreignGlobals_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/foreignGlobals_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2019, 2022, Arm Limited. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,6 +33,10 @@ #include "prims/vmstorage.hpp" #include "utilities/formatBuffer.hpp" +bool ForeignGlobals::is_foreign_linker_supported() { + return true; +} + bool ABIDescriptor::is_volatile_reg(Register reg) const { return _integer_argument_registers.contains(reg) || _integer_additional_volatile_registers.contains(reg); diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp index d8ced4195c6..ea76c9d20c1 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp @@ -96,7 +96,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // to construct the sender and do some validation of it. This goes a long way // toward eliminating issues when we get in frame construction code - if (_cb != NULL ) { + if (_cb != nullptr ) { // First check if frame is complete and tester is reliable // Unfortunately we can only check frame complete for runtime stubs and nmethod @@ -122,10 +122,10 @@ bool frame::safe_for_sender(JavaThread *thread) { return fp_safe; } - intptr_t* sender_sp = NULL; - intptr_t* sender_unextended_sp = NULL; - address sender_pc = NULL; - intptr_t* saved_fp = NULL; + intptr_t* sender_sp = nullptr; + intptr_t* sender_unextended_sp = nullptr; + address sender_pc = nullptr; + intptr_t* saved_fp = nullptr; if (is_interpreted_frame()) { // fp must be safe @@ -189,7 +189,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // We must always be able to find a recognizable pc CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); - if (sender_pc == NULL || sender_blob == NULL) { + if (sender_pc == nullptr || sender_blob == nullptr) { return false; } @@ -222,7 +222,7 @@ bool frame::safe_for_sender(JavaThread *thread) { } CompiledMethod* nm = sender_blob->as_compiled_method_or_null(); - if (nm != NULL) { + if (nm != nullptr) { if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) { return false; @@ -264,7 +264,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // Will the pc we fetch be non-zero (which we'll find at the oldest frame) - if ( (address) this->fp()[return_addr_offset] == NULL) return false; + if ( (address) this->fp()[return_addr_offset] == nullptr) return false; // could try and do some more potential verification of native frame if we could think of some... @@ -298,7 +298,7 @@ void frame::patch_pc(Thread* thread, address pc) { *pc_addr = signed_pc; _pc = pc; // must be set before call to get_deopt_original_pc address original_pc = CompiledMethod::get_deopt_original_pc(this); - if (original_pc != NULL) { + if (original_pc != nullptr) { assert(original_pc == old_pc, "expected original PC to be stored before patching"); _deopt_state = is_deoptimized; _pc = original_pc; @@ -364,7 +364,7 @@ void frame::interpreter_frame_set_extended_sp(intptr_t* sp) { } frame frame::sender_for_entry_frame(RegisterMap* map) const { - assert(map != NULL, "map must be set"); + assert(map != nullptr, "map must be set"); // Java frame called from C; skip all C frames and return top C // frame of that chunk as the sender JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); @@ -392,11 +392,11 @@ bool frame::upcall_stub_frame_is_first() const { assert(is_upcall_stub_frame(), "must be optimzed entry frame"); UpcallStub* blob = _cb->as_upcall_stub(); JavaFrameAnchor* jfa = blob->jfa_for_frame(*this); - return jfa->last_Java_sp() == NULL; + return jfa->last_Java_sp() == nullptr; } frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const { - assert(map != NULL, "map must be set"); + assert(map != nullptr, "map must be set"); UpcallStub* blob = _cb->as_upcall_stub(); // Java frame called from C; skip all C frames and return top C // frame of that chunk as the sender @@ -441,9 +441,9 @@ void frame::adjust_unextended_sp() { // as any other call site. Therefore, no special action is needed when we are // returning to any of these call sites. - if (_cb != NULL) { + if (_cb != nullptr) { CompiledMethod* sender_cm = _cb->as_compiled_method_or_null(); - if (sender_cm != NULL) { + if (sender_cm != nullptr) { // If the sender PC is a deoptimization point, get the original PC. if (sender_cm->is_deopt_entry(_pc) || sender_cm->is_deopt_mh_entry(_pc)) { @@ -568,7 +568,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); } else { oop* obj_p = (oop*)tos_addr; - obj = (obj_p == NULL) ? (oop)NULL : *obj_p; + obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p; } assert(Universe::is_in_heap_or_null(obj), "sanity check"); *oop_result = obj; @@ -636,7 +636,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) { intptr_t *frame::initial_deoptimization_info() { // Not used on aarch64, but we must return something. - return NULL; + return nullptr; } #undef DESCRIBE_FP_OFFSET @@ -716,7 +716,7 @@ void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) { printf("not a Method\n"); } else { CodeBlob *cb = CodeCache::find_blob((address)pc); - if (cb != NULL) { + if (cb != nullptr) { if (cb->is_nmethod()) { ResourceMark rm; nmethod* nm = (nmethod*)cb; @@ -782,11 +782,11 @@ frame::frame(void* sp, void* fp, void* pc) { void JavaFrameAnchor::make_walkable() { // last frame set? - if (last_Java_sp() == NULL) return; + if (last_Java_sp() == nullptr) return; // already walkable? if (walkable()) return; - vmassert(last_Java_sp() != NULL, "not called from Java code?"); - vmassert(last_Java_pc() == NULL, "already walkable"); + vmassert(last_Java_sp() != nullptr, "not called from Java code?"); + vmassert(last_Java_pc() == nullptr, "already walkable"); _last_Java_pc = (address)_last_Java_sp[-1]; vmassert(walkable(), "something went wrong"); } diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp index 52e919b502c..b969e180e4a 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp @@ -39,11 +39,11 @@ // Constructors: inline frame::frame() { - _pc = NULL; - _sp = NULL; - _unextended_sp = NULL; - _fp = NULL; - _cb = NULL; + _pc = nullptr; + _sp = nullptr; + _unextended_sp = nullptr; + _fp = nullptr; + _cb = nullptr; _deopt_state = unknown; _sp_is_trusted = false; _on_heap = false; @@ -60,11 +60,11 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) { _unextended_sp = sp; _fp = fp; _pc = pc; - _oop_map = NULL; + _oop_map = nullptr; _on_heap = false; DEBUG_ONLY(_frame_index = -1;) - assert(pc != NULL, "no pc?"); + assert(pc != nullptr, "no pc?"); _cb = CodeCache::find_blob(pc); setup(pc); } @@ -73,10 +73,10 @@ inline void frame::setup(address pc) { adjust_unextended_sp(); address original_pc = CompiledMethod::get_deopt_original_pc(this); - if (original_pc != NULL) { + if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; - assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc), + assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc), "original PC must be in the main code section of the compiled method (or must be immediately following it)"); } else { if (_cb == SharedRuntime::deopt_blob()) { @@ -100,10 +100,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address _unextended_sp = unextended_sp; _fp = fp; _pc = pc; - assert(pc != NULL, "no pc?"); + assert(pc != nullptr, "no pc?"); _cb = cb; - _oop_map = NULL; - assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc)); + _oop_map = nullptr; + assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc)); _on_heap = false; DEBUG_ONLY(_frame_index = -1;) @@ -124,7 +124,7 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why. assert(_on_heap || _cb != nullptr, "these frames are always heap frames"); - if (cb != NULL) { + if (cb != nullptr) { setup(pc); } #ifdef ASSERT @@ -144,8 +144,8 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address _fp = fp; _pc = pc; _cb = CodeCache::find_blob_fast(pc); - _oop_map = NULL; - assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp)); + _oop_map = nullptr; + assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp)); _on_heap = false; DEBUG_ONLY(_frame_index = -1;) @@ -171,13 +171,13 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) { // call a specilaized frame constructor instead of this one. // Then we could use the assert below. However this assert is of somewhat dubious // value. - // assert(_pc != NULL, "no pc?"); + // assert(_pc != nullptr, "no pc?"); _cb = CodeCache::find_blob(_pc); adjust_unextended_sp(); address original_pc = CompiledMethod::get_deopt_original_pc(this); - if (original_pc != NULL) { + if (original_pc != nullptr) { _pc = original_pc; _deopt_state = is_deoptimized; } else { @@ -198,19 +198,19 @@ inline bool frame::equal(frame other) const { } // Return unique id for this frame. The id must have a value where we can distinguish -// identity and younger/older relationship. NULL represents an invalid (incomparable) +// identity and younger/older relationship. null represents an invalid (incomparable) // frame. inline intptr_t* frame::id(void) const { return unextended_sp(); } // Return true if the frame is older (less recent activation) than the frame represented by id -inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); +inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id"); return this->id() > id ; } inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); } inline intptr_t* frame::link_or_null() const { intptr_t** ptr = (intptr_t **)addr_at(link_offset); - return os::is_readable_pointer(ptr) ? *ptr : NULL; + return os::is_readable_pointer(ptr) ? *ptr : nullptr; } inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; } @@ -219,7 +219,7 @@ inline int frame::offset_unextended_sp() const { assert_offset(); retu inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; } inline intptr_t* frame::real_fp() const { - if (_cb != NULL) { + if (_cb != nullptr) { // use the frame size if valid int size = _cb->frame_size(); if (size > 0) { @@ -243,7 +243,7 @@ inline int frame::compiled_frame_stack_argsize() const { } inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { - assert(mask != NULL, ""); + assert(mask != nullptr, ""); Method* m = interpreter_frame_method(); int bci = interpreter_frame_bci(); m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask); @@ -296,7 +296,7 @@ inline oop* frame::interpreter_frame_mirror_addr() const { // top of expression stack inline intptr_t* frame::interpreter_frame_tos_address() const { intptr_t* last_sp = interpreter_frame_last_sp(); - if (last_sp == NULL) { + if (last_sp == nullptr) { return sp(); } else { // sp() may have been extended or shrunk by an adapter. At least @@ -336,13 +336,13 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const { inline oop frame::saved_oop_result(RegisterMap* map) const { oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp()); - guarantee(result_adr != NULL, "bad register save location"); + guarantee(result_adr != nullptr, "bad register save location"); return *result_adr; } inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp()); - guarantee(result_adr != NULL, "bad register save location"); + guarantee(result_adr != nullptr, "bad register save location"); *result_adr = obj; } @@ -356,17 +356,17 @@ inline int frame::sender_sp_ret_address_offset() { } inline const ImmutableOopMap* frame::get_oop_map() const { - if (_cb == NULL) return NULL; - if (_cb->oop_maps() != NULL) { + if (_cb == nullptr) return nullptr; + if (_cb->oop_maps() != nullptr) { NativePostCallNop* nop = nativePostCallNop_at(_pc); - if (nop != NULL && nop->displacement() != 0) { + if (nop != nullptr && nop->displacement() != 0) { int slot = ((nop->displacement() >> 24) & 0xff); return _cb->oop_map_for_slot(slot, _pc); } const ImmutableOopMap* oop_map = OopMapSet::find_map(this); return oop_map; } - return NULL; + return nullptr; } //------------------------------------------------------------------------------ @@ -395,7 +395,7 @@ inline frame frame::sender_raw(RegisterMap* map) const { if (is_interpreted_frame()) return sender_for_interpreter_frame(map); assert(_cb == CodeCache::find_blob(pc()), "Must be the same"); - if (_cb != NULL) return sender_for_compiled_frame(map); + if (_cb != nullptr) return sender_for_compiled_frame(map); // Must be native-compiled frame, i.e. the marshaling code for native // methods that exists in the core system. @@ -428,13 +428,13 @@ inline frame frame::sender_for_compiled_frame(RegisterMap* map) const { // outside of update_register_map. if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); - if (oop_map() != NULL) { + if (oop_map() != nullptr) { _oop_map->update_register_map(this, map); } } else { assert(!_cb->caller_must_gc_arguments(map->thread()), ""); assert(!map->include_argument_oops(), ""); - assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame"); + assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame"); } // Since the prolog does the save and restore of FP there is no oopmap diff --git a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp index 3b3305a6a71..42081d422c8 100644 --- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -159,7 +159,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, // Calling the runtime using the regular call_VM_leaf mechanism generates // code (generated by InterpreterMacroAssember::call_VM_leaf_base) - // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. + // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr. // // If we care generating the pre-barrier without a frame (e.g. in the // intrinsified Reference.get() routine) then rfp might be pointing to @@ -210,11 +210,11 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, __ lsr(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes); __ cbz(tmp1, done); - // crosses regions, storing NULL? + // crosses regions, storing null? __ cbz(new_val, done); - // storing region crossing non-NULL, is card already dirty? + // storing region crossing non-null, is card already dirty? const Register card_addr = tmp1; @@ -234,7 +234,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, __ ldrb(tmp2, Address(card_addr)); __ cbzw(tmp2, done); - // storing a region crossing, non-NULL oop, card is clean. + // storing a region crossing, non-null oop, card is clean. // dirty card and log. __ strb(zr, Address(card_addr)); @@ -427,7 +427,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* Label done; Label runtime; - // At this point we know new_value is non-NULL and the new_value crosses regions. + // At this point we know new_value is non-null and the new_value crosses regions. // Must check to see if card is already dirty const Register thread = rthread; @@ -454,7 +454,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* __ ldrb(rscratch1, Address(byte_map_base, card_offset)); __ cbzw(rscratch1, done); - // storing region crossing non-NULL, card is clean. + // storing region crossing non-null, card is clean. // dirty card and log. __ strb(zr, Address(byte_map_base, card_offset)); diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp index 316548ae532..bfe29ddc8f7 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp @@ -297,7 +297,7 @@ void BarrierSetAssembler::clear_patching_epoch() { void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) { BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm == NULL) { + if (bs_nm == nullptr) { return; } @@ -305,13 +305,13 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo Label skip_barrier; NMethodPatchingType patching_type = nmethod_patching_type(); - if (slow_path == NULL) { + if (slow_path == nullptr) { guard = &local_guard; } // If the slow path is out of line in a stub, we flip the condition - Assembler::Condition condition = slow_path == NULL ? Assembler::EQ : Assembler::NE; - Label& barrier_target = slow_path == NULL ? skip_barrier : *slow_path; + Assembler::Condition condition = slow_path == nullptr ? Assembler::EQ : Assembler::NE; + Label& barrier_target = slow_path == nullptr ? skip_barrier : *slow_path; __ ldrw(rscratch1, *guard); @@ -357,7 +357,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo } __ br(condition, barrier_target); - if (slow_path == NULL) { + if (slow_path == nullptr) { __ movptr(rscratch1, (uintptr_t) StubRoutines::aarch64::method_entry_barrier()); __ blr(rscratch1); __ b(skip_barrier); @@ -374,7 +374,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs == NULL) { + if (bs == nullptr) { return; } @@ -417,5 +417,5 @@ void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register // make sure klass is 'reasonable', which is not zero. __ load_klass(obj, obj); // get klass - __ cbz(obj, error); // if klass is NULL it is broken + __ cbz(obj, error); // if klass is null it is broken } diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp index de66739daae..fe4df9b8c0d 100644 --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp @@ -157,7 +157,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, // Calling the runtime using the regular call_VM_leaf mechanism generates // code (generated by InterpreterMacroAssember::call_VM_leaf_base) - // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. + // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr. // // If we care generating the pre-barrier without a frame (e.g. in the // intrinsified Reference.get() routine) then rfp might be pointing to @@ -447,7 +447,7 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler // b) A parallel thread may heal the contents of addr, replacing a // from-space pointer held in addr with the to-space pointer // representing the new location of the object. -// Upon entry to cmpxchg_oop, it is assured that new_val equals NULL +// Upon entry to cmpxchg_oop, it is assured that new_val equals null // or it refers to an object that is not being evacuated out of // from-space, or it refers to the to-space version of an object that // is being evacuated out of from-space. diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp index 8b6cb162954..b26eaa4bfcd 100644 --- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp @@ -34,7 +34,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks define_pd_global(bool, TrapBasedNullChecks, false); -define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast +define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls past to check cast define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI); diff --git a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp b/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp index 7d0483fb35c..bd8cfc42600 100644 --- a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -47,7 +47,7 @@ void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached // because // (1) the value is old (i.e., doesn't matter for scavenges) // (2) these ICStubs are removed *before* a GC happens, so the roots disappear - // assert(cached_value == NULL || cached_oop->is_perm(), "must be perm oop"); + // assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop"); address start = __ pc(); Label l; diff --git a/src/hotspot/cpu/aarch64/icache_aarch64.cpp b/src/hotspot/cpu/aarch64/icache_aarch64.cpp index 63b92247b6c..8bdddd4f151 100644 --- a/src/hotspot/cpu/aarch64/icache_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/icache_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020 Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,7 +29,7 @@ void ICacheStubGenerator::generate_icache_flush( ICache::flush_icache_stub_t* flush_icache_stub) { // Give anyone who calls this a surprise - *flush_icache_stub = (ICache::flush_icache_stub_t)NULL; + *flush_icache_stub = nullptr; } void ICache::initialize() {} diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index 0cc1871e588..9c7fec16ae2 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -140,7 +140,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) if (JvmtiExport::can_force_early_return()) { Label L; ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); - cbz(rscratch1, L); // if (thread->jvmti_thread_state() == NULL) exit; + cbz(rscratch1, L); // if (thread->jvmti_thread_state() == nullptr) exit; // Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code @@ -769,7 +769,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) "displached header must be first word in BasicObjectLock"); Label fail; - cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL); + cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr); // Fast check for recursive lock. // @@ -868,7 +868,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) cbz(header_reg, count); // Atomic swap back the old header - cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL); + cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr); // Call the runtime routine for slow case. str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj @@ -896,7 +896,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { Label set_mdp; stp(r0, r1, Address(pre(sp, -2 * wordSize))); - // Test MDO to avoid the call if it is NULL. + // Test MDO to avoid the call if it is null. ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset()))); cbz(r0, set_mdp); call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp); @@ -1287,7 +1287,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg } // In the fall-through case, we found no matching item, but we - // observed the item[start_row] is NULL. + // observed the item[start_row] is null. // Fill in the item field and increment the count. int item_offset = in_bytes(item_offset_fn(start_row)); @@ -1303,13 +1303,13 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg // Example state machine code for three profile rows: // // main copy of decision tree, rooted at row[1] // if (row[0].rec == rec) { row[0].incr(); goto done; } -// if (row[0].rec != NULL) { +// if (row[0].rec != nullptr) { // // inner copy of decision tree, rooted at row[1] // if (row[1].rec == rec) { row[1].incr(); goto done; } -// if (row[1].rec != NULL) { +// if (row[1].rec != nullptr) { // // degenerate decision tree, rooted at row[2] // if (row[2].rec == rec) { row[2].incr(); goto done; } -// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow +// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow // row[2].init(rec); goto done; // } else { // // remember row[1] is empty @@ -1583,7 +1583,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); cbz(rscratch1, L); stop("InterpreterMacroAssembler::call_VM_leaf_base:" - " last_sp != NULL"); + " last_sp != nullptr"); bind(L); } #endif /* ASSERT */ @@ -1611,7 +1611,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result, ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); cbz(rscratch1, L); stop("InterpreterMacroAssembler::call_VM_base:" - " last_sp != NULL"); + " last_sp != nullptr"); bind(L); } #endif /* ASSERT */ diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp index 0ff570b2e94..e5fd1431d65 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp @@ -175,7 +175,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void empty_expression_stack() { ldr(esp, Address(rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); - // NULL last_sp until next java call + // null last_sp until next java call str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); } diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp index f5970adc412..90c7ca6f08a 100644 --- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -267,7 +267,7 @@ class SlowSignatureHandler virtual void pass_object() { intptr_t* addr = single_slot_addr(); - intptr_t value = *addr == 0 ? NULL : (intptr_t)addr; + intptr_t value = *addr == 0 ? (intptr_t)0 : (intptr_t)addr; if (pass_gpr(value) < 0) { pass_stack<>(value); } diff --git a/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp b/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp index 32e8f723609..8d125d3c027 100644 --- a/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -40,10 +40,10 @@ public: void clear(void) { // clearing _last_Java_sp must be first - _last_Java_sp = NULL; + _last_Java_sp = nullptr; OrderAccess::release(); - _last_Java_fp = NULL; - _last_Java_pc = NULL; + _last_Java_fp = nullptr; + _last_Java_pc = nullptr; } void copy(JavaFrameAnchor* src) { @@ -51,11 +51,11 @@ public: // We must clear _last_Java_sp before copying the rest of the new data // // Hack Alert: Temporary bugfix for 4717480/4721647 - // To act like previous version (pd_cache_state) don't NULL _last_Java_sp + // To act like previous version (pd_cache_state) don't null _last_Java_sp // unless the value is changing // if (_last_Java_sp != src->_last_Java_sp) { - _last_Java_sp = NULL; + _last_Java_sp = nullptr; OrderAccess::release(); } _last_Java_fp = src->_last_Java_fp; @@ -64,7 +64,7 @@ public: _last_Java_sp = src->_last_Java_sp; } - bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } + bool walkable(void) { return _last_Java_sp != nullptr && _last_Java_pc != nullptr; } void make_walkable(); diff --git a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp index 03cfafea143..e79b93651b0 100644 --- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -108,7 +108,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; default: ShouldNotReachHere(); - name = NULL; // unreachable + name = nullptr; // unreachable } ResourceMark rm; BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); @@ -196,7 +196,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; default: ShouldNotReachHere(); - slow_case_addr = NULL; // unreachable + slow_case_addr = nullptr; // unreachable } { diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 67c99750dee..350f8082c34 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -648,7 +648,7 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_java_fp, address last_java_pc, Register scratch) { - assert(last_java_pc != NULL, "must provide a valid PC"); + assert(last_java_pc != nullptr, "must provide a valid PC"); adr(scratch, last_java_pc); str(scratch, Address(rthread, @@ -686,7 +686,7 @@ static inline bool target_needs_far_branch(address addr) { void MacroAssembler::far_call(Address entry, Register tmp) { assert(ReservedCodeCacheSize < 4*G, "branch out of range"); - assert(CodeCache::find_blob(entry.target()) != NULL, + assert(CodeCache::find_blob(entry.target()) != nullptr, "destination of far call not found in code cache"); assert(entry.rspec().type() == relocInfo::external_word_type || entry.rspec().type() == relocInfo::runtime_call_type @@ -705,7 +705,7 @@ void MacroAssembler::far_call(Address entry, Register tmp) { int MacroAssembler::far_jump(Address entry, Register tmp) { assert(ReservedCodeCacheSize < 4*G, "branch out of range"); - assert(CodeCache::find_blob(entry.target()) != NULL, + assert(CodeCache::find_blob(entry.target()) != nullptr, "destination of far call not found in code cache"); assert(entry.rspec().type() == relocInfo::external_word_type || entry.rspec().type() == relocInfo::runtime_call_type @@ -863,7 +863,7 @@ static bool is_always_within_branch_range(Address entry) { // Runtime calls are calls of a non-compiled method (stubs, adapters). // Non-compiled methods stay forever in CodeCache. // We check whether the longest possible branch is within the branch range. - assert(CodeCache::find_blob(target) != NULL && + assert(CodeCache::find_blob(target) != nullptr && !CodeCache::find_blob(target)->is_compiled(), "runtime call of compiled method"); const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; @@ -895,9 +895,9 @@ address MacroAssembler::trampoline_call(Address entry) { code()->share_trampoline_for(entry.target(), offset()); } else { address stub = emit_trampoline_stub(offset(), target); - if (stub == NULL) { + if (stub == nullptr) { postcond(pc() == badAddress); - return NULL; // CodeCache is full + return nullptr; // CodeCache is full } } } @@ -927,8 +927,8 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, address dest) { // Max stub size: alignment nop, TrampolineStub. address stub = start_a_stub(max_trampoline_stub_size()); - if (stub == NULL) { - return NULL; // CodeBuffer::expand failed + if (stub == nullptr) { + return nullptr; // CodeBuffer::expand failed } // Create a trampoline stub relocation which relates this trampoline stub @@ -968,7 +968,7 @@ void MacroAssembler::emit_static_call_stub() { // exact layout of this stub. isb(); - mov_metadata(rmethod, (Metadata*)NULL); + mov_metadata(rmethod, nullptr); // Jump to the entry point of the c2i stub. movptr(rscratch1, 0); @@ -1164,7 +1164,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass, add(recv_klass, recv_klass, itentry_off); } - // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { + // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { // if (scan->interface() == intf) { // result = (klass + scan->offset() + itable_index); // } @@ -1223,8 +1223,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass, Register temp_reg, Label& L_success) { Label L_failure; - check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); - check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); + check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); + check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); bind(L_failure); } @@ -1247,10 +1247,10 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, Label L_fallthrough; int label_nulls = 0; - if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } - if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } - if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } - assert(label_nulls <= 1, "at most one NULL in the batch"); + if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } + if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } + if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } + assert(label_nulls <= 1, "at most one null in the batch"); int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); int sco_offset = in_bytes(Klass::super_check_offset_offset()); @@ -1369,9 +1369,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, Label L_fallthrough; int label_nulls = 0; - if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } - if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } - assert(label_nulls <= 1, "at most one NULL in the batch"); + if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } + if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } + assert(label_nulls <= 1, "at most one null in the batch"); // a couple of useful fields in sub_klass: int ss_offset = in_bytes(Klass::secondary_supers_offset()); @@ -1442,13 +1442,13 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, } void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { - assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); + assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); assert_different_registers(klass, rthread, scratch); Label L_fallthrough, L_tmp; - if (L_fast_path == NULL) { + if (L_fast_path == nullptr) { L_fast_path = &L_fallthrough; - } else if (L_slow_path == NULL) { + } else if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; } // Fast path check: class is fully initialized @@ -1475,7 +1475,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, if (!VerifyOops) return; // Pass register number to verify_oop_subroutine - const char* b = NULL; + const char* b = nullptr; { ResourceMark rm; stringStream ss; @@ -1507,7 +1507,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { if (!VerifyOops) return; - const char* b = NULL; + const char* b = nullptr; { ResourceMark rm; stringStream ss; @@ -1639,13 +1639,13 @@ void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Reg void MacroAssembler::null_check(Register reg, int offset) { if (needs_explicit_null_check(offset)) { - // provoke OS NULL exception if reg = NULL by + // provoke OS null exception if reg is null by // accessing M[reg] w/o changing any registers // NOTE: this is plenty to provoke a segv ldr(zr, Address(reg)); } else { // nothing to do, (later) access of M[reg + offset] - // will provoke OS NULL exception if reg = NULL + // will provoke OS null exception if reg is null } } @@ -1963,7 +1963,7 @@ int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, void MacroAssembler::membar(Membar_mask_bits order_constraint) { address prev = pc() - NativeMembar::instruction_size; address last = code()->last_insn(); - if (last != NULL && nativeInstruction_at(last)->is_Membar() && prev == last) { + if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { NativeMembar *bar = NativeMembar_at(prev); // We are merging two memory barrier instructions. On AArch64 we // can do this simply by ORing them together. @@ -2448,8 +2448,8 @@ int MacroAssembler::pop_p(unsigned int bitset, Register stack) { void MacroAssembler::verify_heapbase(const char* msg) { #if 0 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); - assert (Universe::heap() != NULL, "java heap should be initialized"); - if (!UseCompressedOops || Universe::ptr_base() == NULL) { + assert (Universe::heap() != nullptr, "java heap should be initialized"); + if (!UseCompressedOops || Universe::ptr_base() == nullptr) { // rheapbase is allocated as general register return; } @@ -2470,7 +2470,7 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp assert_different_registers(value, tmp1, tmp2); Label done, tagged, weak_tagged; - cbz(value, done); // Use NULL as-is. + cbz(value, done); // Use null as-is. tst(value, JNIHandles::tag_mask); // Test for tag. br(Assembler::NE, tagged); @@ -2501,7 +2501,7 @@ void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Regis assert_different_registers(value, tmp1, tmp2); Label done; - cbz(value, done); // Use NULL as-is. + cbz(value, done); // Use null as-is. #ifdef ASSERT { @@ -2527,7 +2527,7 @@ void MacroAssembler::stop(const char* msg) { } void MacroAssembler::unimplemented(const char* what) { - const char* buf = NULL; + const char* buf = nullptr; { ResourceMark rm; stringStream ss; @@ -3096,7 +3096,7 @@ bool MacroAssembler::ldst_can_merge(Register rt, address prev = pc() - NativeInstruction::instruction_size; address last = code()->last_insn(); - if (last == NULL || !nativeInstruction_at(last)->is_Imm_LdSt()) { + if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { return false; } @@ -4356,7 +4356,7 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { if (UseCompressedClassPointers) { ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); - if (CompressedKlassPointers::base() == NULL) { + if (CompressedKlassPointers::base() == nullptr) { cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); return; } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 @@ -4396,7 +4396,7 @@ void MacroAssembler::encode_heap_oop(Register d, Register s) { verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); #endif verify_oop_msg(s, "broken oop in encode_heap_oop"); - if (CompressedOops::base() == NULL) { + if (CompressedOops::base() == nullptr) { if (CompressedOops::shift() != 0) { assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); lsr(d, s, LogMinObjAlignmentInBytes); @@ -4429,7 +4429,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register r) { } #endif verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); - if (CompressedOops::base() != NULL) { + if (CompressedOops::base() != nullptr) { sub(r, r, rheapbase); } if (CompressedOops::shift() != 0) { @@ -4451,7 +4451,7 @@ void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); Register data = src; - if (CompressedOops::base() != NULL) { + if (CompressedOops::base() != nullptr) { sub(dst, src, rheapbase); data = dst; } @@ -4468,7 +4468,7 @@ void MacroAssembler::decode_heap_oop(Register d, Register s) { #ifdef ASSERT verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); #endif - if (CompressedOops::base() == NULL) { + if (CompressedOops::base() == nullptr) { if (CompressedOops::shift() != 0 || d != s) { lsl(d, s, CompressedOops::shift()); } @@ -4485,37 +4485,37 @@ void MacroAssembler::decode_heap_oop(Register d, Register s) { void MacroAssembler::decode_heap_oop_not_null(Register r) { assert (UseCompressedOops, "should only be used for compressed headers"); - assert (Universe::heap() != NULL, "java heap should be initialized"); + assert (Universe::heap() != nullptr, "java heap should be initialized"); // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. if (CompressedOops::shift() != 0) { assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); - if (CompressedOops::base() != NULL) { + if (CompressedOops::base() != nullptr) { add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); } else { add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); } } else { - assert (CompressedOops::base() == NULL, "sanity"); + assert (CompressedOops::base() == nullptr, "sanity"); } } void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { assert (UseCompressedOops, "should only be used for compressed headers"); - assert (Universe::heap() != NULL, "java heap should be initialized"); + assert (Universe::heap() != nullptr, "java heap should be initialized"); // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. if (CompressedOops::shift() != 0) { assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); - if (CompressedOops::base() != NULL) { + if (CompressedOops::base() != nullptr) { add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); } else { add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); } } else { - assert (CompressedOops::base() == NULL, "sanity"); + assert (CompressedOops::base() == nullptr, "sanity"); if (dst != src) { mov(dst, src); } @@ -4535,7 +4535,7 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); - if (CompressedKlassPointers::base() == NULL) { + if (CompressedKlassPointers::base() == nullptr) { return (_klass_decode_mode = KlassDecodeZero); } @@ -4643,8 +4643,8 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { { ThreadInVMfromUnknown tiv; assert (UseCompressedOops, "should only be used for compressed oops"); - assert (Universe::heap() != NULL, "java heap should be initialized"); - assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); + assert (Universe::heap() != nullptr, "java heap should be initialized"); + assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); } #endif @@ -4658,7 +4658,7 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { assert (UseCompressedClassPointers, "should only be used for compressed headers"); - assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); + assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int index = oop_recorder()->find_index(k); assert(! Universe::heap()->is_in(k), "should not be an oop"); @@ -4711,13 +4711,13 @@ void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); } -// Used for storing NULLs. +// Used for storing nulls. void MacroAssembler::store_heap_oop_null(Address dst) { access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); } Address MacroAssembler::allocate_metadata_address(Metadata* obj) { - assert(oop_recorder() != NULL, "this assembler needs a Recorder"); + assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); int index = oop_recorder()->allocate_metadata_index(obj); RelocationHolder rspec = metadata_Relocation::spec(index); return Address((address)obj, rspec); @@ -4726,7 +4726,7 @@ Address MacroAssembler::allocate_metadata_address(Metadata* obj) { // Move an oop into a register. void MacroAssembler::movoop(Register dst, jobject obj) { int oop_index; - if (obj == NULL) { + if (obj == nullptr) { oop_index = oop_recorder()->allocate_oop_index(obj); } else { #ifdef ASSERT @@ -4751,7 +4751,7 @@ void MacroAssembler::movoop(Register dst, jobject obj) { // Move a metadata address into a register. void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { int oop_index; - if (obj == NULL) { + if (obj == nullptr) { oop_index = oop_recorder()->allocate_metadata_index(obj); } else { oop_index = oop_recorder()->find_index(obj); @@ -4764,7 +4764,7 @@ Address MacroAssembler::constant_oop_address(jobject obj) { #ifdef ASSERT { ThreadInVMfromUnknown tiv; - assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); + assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); } #endif @@ -4978,23 +4978,23 @@ address MacroAssembler::count_positives(Register ary1, Register len, Register re BIND(STUB); RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); - assert(count_pos.target() != NULL, "count_positives stub has not been generated"); + assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); address tpc1 = trampoline_call(count_pos); - if (tpc1 == NULL) { + if (tpc1 == nullptr) { DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); postcond(pc() == badAddress); - return NULL; + return nullptr; } b(DONE); BIND(STUB_LONG); RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); - assert(count_pos_long.target() != NULL, "count_positives_long stub has not been generated"); + assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); address tpc2 = trampoline_call(count_pos_long); - if (tpc2 == NULL) { + if (tpc2 == nullptr) { DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); postcond(pc() == badAddress); - return NULL; + return nullptr; } b(DONE); @@ -5041,7 +5041,7 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, if (UseSimpleArrayEquals) { Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; - // if (a1 == null || a2 == null) + // if (a1 == nullptr || a2 == nullptr) // return false; // a1 & a2 == 0 means (some-pointer is null) or // (very-rare-or-even-probably-impossible-pointer-values) @@ -5172,12 +5172,12 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, eor(tmp5, tmp3, tmp4); cbnz(tmp5, DONE); RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); - assert(stub.target() != NULL, "array_equals_long stub has not been generated"); + assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); address tpc = trampoline_call(stub); - if (tpc == NULL) { + if (tpc == nullptr) { DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); postcond(pc() == badAddress); - return NULL; + return nullptr; } b(DONE); @@ -5324,14 +5324,14 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) BLOCK_COMMENT("zero_words {"); assert(ptr == r10 && cnt == r11, "mismatch in register usage"); RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); - assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated"); + assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); subs(rscratch1, cnt, zero_words_block_size); Label around; br(LO, around); { RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); - assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated"); + assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); // Make sure this is a C2 compilation. C1 allocates space only for // trampoline stubs generated by Call LIR ops, and in any case it // makes sense for a C1 compilation task to proceed as quickly as @@ -5342,9 +5342,9 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) && (task = ciEnv::current()->task()) && is_c2_compile(task->comp_level())) { address tpc = trampoline_call(zero_blocks); - if (tpc == NULL) { + if (tpc == nullptr) { DEBUG_ONLY(reset_labels(around)); - return NULL; + return nullptr; } } else { far_call(zero_blocks); @@ -5693,12 +5693,12 @@ address MacroAssembler::byte_array_inflate(Register src, Register dst, Register if (SoftwarePrefetchHintDistance >= 0) { bind(to_stub); RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); - assert(stub.target() != NULL, "large_byte_array_inflate stub has not been generated"); + assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); address tpc = trampoline_call(stub); - if (tpc == NULL) { + if (tpc == nullptr) { DEBUG_ONLY(reset_labels(big, done)); postcond(pc() == badAddress); - return NULL; + return nullptr; } b(after_init); } @@ -6085,7 +6085,7 @@ void MacroAssembler::object_move( Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); - // See if oop is NULL if it is we need no handle + // See if oop is null if it is we need no handle if (src.first()->is_stack()) { @@ -6098,13 +6098,13 @@ void MacroAssembler::object_move( ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); lea(rHandle, Address(rfp, reg2offset_in(src.first()))); - // conditionally move a NULL + // conditionally move a null cmp(rscratch1, zr); csel(rHandle, zr, rHandle, Assembler::EQ); } else { // Oop is in an a register we must store it to the space we reserve - // on the stack for oop_handles and pass a handle if oop is non-NULL + // on the stack for oop_handles and pass a handle if oop is non-null const Register rOop = src.first()->as_Register(); int oop_slot; @@ -6131,7 +6131,7 @@ void MacroAssembler::object_move( int offset = oop_slot*VMRegImpl::stack_slot_size; map->set_oop(VMRegImpl::stack2reg(oop_slot)); - // Store oop in handle area, may be NULL + // Store oop in handle area, may be null str(rOop, Address(sp, offset)); if (is_receiver) { *receiver_offset = offset; @@ -6139,7 +6139,7 @@ void MacroAssembler::object_move( cmp(rOop, zr); lea(rHandle, Address(sp, offset)); - // conditionally move a NULL + // conditionally move a null csel(rHandle, zr, rHandle, Assembler::EQ); } diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index ac3b946b2fe..6211f1e74f2 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -57,7 +57,7 @@ class MacroAssembler: public Assembler { virtual void call_VM_leaf_base( address entry_point, // the entry point int number_of_arguments, // the number of arguments to pop after the call - Label *retaddr = NULL + Label *retaddr = nullptr ); virtual void call_VM_leaf_base( @@ -602,9 +602,9 @@ public: int corrected_idivq(Register result, Register ra, Register rb, bool want_remainder, Register tmp = rscratch1); - // Support for NULL-checks + // Support for null-checks // - // Generates code that causes a NULL OS exception if the content of reg is NULL. + // Generates code that causes a null OS exception if the content of reg is null. // If the accessed location is M[reg + offset] and the offset is known, provide the // offset. No explicit code generation is needed if the offset is within a certain // range (0 <= offset <= page_size). @@ -627,7 +627,7 @@ public: // Required platform-specific helpers for Label::patch_instructions. // They _shadow_ the declarations in AbstractAssembler, which are undefined. static int pd_patch_instruction_size(address branch, address target); - static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) { + static void pd_patch_instruction(address branch, address target, const char* file = nullptr, int line = 0) { pd_patch_instruction_size(branch, target); } static address pd_call_destination(address branch) { @@ -872,14 +872,14 @@ public: Register tmp2, Register tmp3, DecoratorSet decorators = 0); // currently unimplemented - // Used for storing NULL. All other oop constants should be + // Used for storing null. All other oop constants should be // stored using routines that take a jobject. void store_heap_oop_null(Address dst); void store_klass_gap(Register dst, Register src); // This dummy is to prevent a call to store_heap_oop from - // converting a zero (like NULL) into a Register by giving + // converting a zero (like null) into a Register by giving // the compiler two choices it can't resolve void store_heap_oop(Address dst, void* dummy); @@ -951,7 +951,7 @@ public: // Test sub_klass against super_klass, with fast and slow paths. // The fast path produces a tri-state answer: yes / no / maybe-slow. - // One of the three labels can be NULL, meaning take the fall-through. + // One of the three labels can be null, meaning take the fall-through. // If super_check_offset is -1, the value is loaded up from super_klass. // No registers are killed, except temp_reg. void check_klass_subtype_fast_path(Register sub_klass, @@ -984,8 +984,8 @@ public: void clinit_barrier(Register klass, Register thread, - Label* L_fast_path = NULL, - Label* L_slow_path = NULL); + Label* L_fast_path = nullptr, + Label* L_slow_path = nullptr); Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); @@ -1188,7 +1188,7 @@ public: // - relocInfo::static_call_type // - relocInfo::virtual_call_type // - // Return: the call PC or NULL if CodeCache is full. + // Return: the call PC or null if CodeCache is full. address trampoline_call(Address entry); static bool far_branches() { diff --git a/src/hotspot/cpu/aarch64/matcher_aarch64.hpp b/src/hotspot/cpu/aarch64/matcher_aarch64.hpp index 5108c5c802b..b05f1a47e4a 100644 --- a/src/hotspot/cpu/aarch64/matcher_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/matcher_aarch64.hpp @@ -94,12 +94,12 @@ static bool const_oop_prefer_decode() { // Prefer ConN+DecodeN over ConP in simple compressed oops mode. - return CompressedOops::base() == NULL; + return CompressedOops::base() == nullptr; } static bool const_klass_prefer_decode() { // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode. - return CompressedKlassPointers::base() == NULL; + return CompressedKlassPointers::base() == nullptr; } // Is it better to copy float constants, or load them directly from diff --git a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp index 6ff5c82d478..b0a3d858fe0 100644 --- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -175,14 +175,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. // They all allow an appendix argument. __ hlt(0); // empty stubs make SG sick - return NULL; + return nullptr; } // No need in interpreter entry for linkToNative for now. // Interpreter calls compiled entry through i2c. if (iid == vmIntrinsics::_linkToNative) { __ hlt(0); - return NULL; + return nullptr; } // r19_sender_sp: sender SP (must preserve; see prepare_to_jump_from_interpreted) diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp index 0844fdf6021..27bf35e12c8 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -197,7 +197,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { // Patch the constant in the call's trampoline stub. address trampoline_stub_addr = get_trampoline(); - if (trampoline_stub_addr != NULL) { + if (trampoline_stub_addr != nullptr) { assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines"); nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); } @@ -206,7 +206,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { if (reachable) { set_destination(dest); } else { - assert (trampoline_stub_addr != NULL, "we need a trampoline"); + assert (trampoline_stub_addr != nullptr, "we need a trampoline"); set_destination(trampoline_stub_addr); } @@ -217,7 +217,7 @@ address NativeCall::get_trampoline() { address call_addr = addr_at(0); CodeBlob *code = CodeCache::find_blob(call_addr); - assert(code != NULL, "Could not find the containing code blob"); + assert(code != nullptr, "Could not find the containing code blob"); address bl_destination = MacroAssembler::pd_call_destination(call_addr); @@ -229,7 +229,7 @@ address NativeCall::get_trampoline() { return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); } - return NULL; + return nullptr; } // Inserts a native call instruction at a given pc @@ -270,7 +270,7 @@ void NativeMovConstReg::set_data(intptr_t x) { // instruction in oops section. CodeBlob* cb = CodeCache::find_blob(instruction_address()); nmethod* nm = cb->as_nmethod_or_null(); - if (nm != NULL) { + if (nm != nullptr) { RelocIterator iter(nm, instruction_address(), next_instruction_address()); while (iter.next()) { if (iter.type() == relocInfo::oop_type) { diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp index bb9eb9c4dbf..1740fde772f 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -294,7 +294,7 @@ public: else if (is_ldr_literal_at(instruction_address())) return(addr_at(4)); assert(false, "Unknown instruction in NativeMovConstReg"); - return NULL; + return nullptr; } intptr_t data() const; @@ -589,7 +589,7 @@ public: next_instruction_offset = 4 * 4 }; - address destination(nmethod* nm = NULL) const; + address destination(nmethod* nm = nullptr) const; void set_destination(address new_destination); ptrdiff_t destination_offset() const; }; @@ -709,7 +709,7 @@ inline NativePostCallNop* nativePostCallNop_at(address address) { if (nop->check()) { return nop; } - return NULL; + return nullptr; } inline NativePostCallNop* nativePostCallNop_unsafe_at(address address) { @@ -731,7 +731,7 @@ class NativeDeoptInstruction: public NativeInstruction { void verify(); static bool is_deopt_at(address instr) { - assert(instr != NULL, ""); + assert(instr != nullptr, ""); uint32_t value = *(uint32_t *) instr; return value == 0xd4ade001; } diff --git a/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp b/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp index 6e36e877fdc..7bf513eba31 100644 --- a/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/registerMap_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Arm Limited. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -37,10 +37,10 @@ address RegisterMap::pd_location(VMReg base_reg, int slot_idx) const { FloatRegister::max_slots_per_register; intptr_t offset_in_bytes = slot_idx * VMRegImpl::stack_slot_size; address base_location = location(base_reg, nullptr); - if (base_location != NULL) { + if (base_location != nullptr) { return base_location + offset_in_bytes; } else { - return NULL; + return nullptr; } } else { return location(base_reg->next(slot_idx), nullptr); diff --git a/src/hotspot/cpu/aarch64/registerMap_aarch64.hpp b/src/hotspot/cpu/aarch64/registerMap_aarch64.hpp index f7d70e70342..63ed3be2ea9 100644 --- a/src/hotspot/cpu/aarch64/registerMap_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/registerMap_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,7 +32,7 @@ private: // This is the hook for finding a register in an "well-known" location, // such as a register block of a predetermined format. - address pd_location(VMReg reg) const { return NULL; } + address pd_location(VMReg reg) const { return nullptr; } address pd_location(VMReg base_reg, int slot_idx) const; // no PD state to clear or copy: diff --git a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp index 49cc3207098..4b7930c94a8 100644 --- a/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/relocInfo_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -66,7 +66,7 @@ address Relocation::pd_call_destination(address orig_addr) { return nativeCallTrampolineStub_at(trampoline)->destination(); } } - if (orig_addr != NULL) { + if (orig_addr != nullptr) { address new_addr = MacroAssembler::pd_call_destination(orig_addr); // If call is branch to self, don't try to relocate it, just leave it // as branch to self. This happens during code generation if the code diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index f8081cd0bbf..82d4cd64cd8 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -570,7 +570,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, // caller, but with an uncorrected stack, causing delayed havoc. if (VerifyAdapterCalls && - (Interpreter::code() != NULL || StubRoutines::final_stubs_code() != NULL)) { + (Interpreter::code() != nullptr || StubRoutines::final_stubs_code() != nullptr)) { #if 0 // So, let's test for cascading c2i/i2c adapters right now. // assert(Interpreter::contains($return_addr) || @@ -578,18 +578,18 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, // "i2c adapter must return to an interpreter frame"); __ block_comment("verify_i2c { "); Label L_ok; - if (Interpreter::code() != NULL) { + if (Interpreter::code() != nullptr) { range_check(masm, rax, r11, Interpreter::code()->code_start(), Interpreter::code()->code_end(), L_ok); } - if (StubRoutines::initial_stubs_code() != NULL) { + if (StubRoutines::initial_stubs_code() != nullptr) { range_check(masm, rax, r11, StubRoutines::initial_stubs_code()->code_begin(), StubRoutines::initial_stubs_code()->code_end(), L_ok); } - if (StubRoutines::final_stubs_code() != NULL) { + if (StubRoutines::final_stubs_code() != nullptr) { range_check(masm, rax, r11, StubRoutines::final_stubs_code()->code_begin(), StubRoutines::final_stubs_code()->code_end(), @@ -773,7 +773,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm address c2i_entry = __ pc(); // Class initialization barrier for static methods - address c2i_no_clinit_check_entry = NULL; + address c2i_no_clinit_check_entry = nullptr; if (VM_Version::supports_fast_class_init_checks()) { Label L_skip_barrier; @@ -803,7 +803,7 @@ static int c_calling_convention_priv(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2, int total_args_passed) { - assert(regs2 == NULL, "not needed on AArch64"); + assert(regs2 == nullptr, "not needed on AArch64"); // We return the amount of VMRegImpl stack slots we need to reserve for all // the arguments NOT counting out_preserve_stack_slots. @@ -1421,10 +1421,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, stack_slots / VMRegImpl::slots_per_word, in_ByteSize(-1), in_ByteSize(-1), - (OopMapSet*)NULL); + nullptr); } address native_func = method->native_function(); - assert(native_func != NULL, "must have function"); + assert(native_func != nullptr, "must have function"); // An OopMap for lock (and class if static) OopMapSet *oop_maps = new OopMapSet(); @@ -1441,7 +1441,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); - BasicType* in_elem_bt = NULL; + BasicType* in_elem_bt = nullptr; int argc = 0; out_sig_bt[argc++] = T_ADDRESS; @@ -1456,10 +1456,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // Now figure out where the args must be stored and how much stack space // they require. int out_arg_slots; - out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, NULL, total_c_args); + out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, nullptr, total_c_args); if (out_arg_slots < 0) { - return NULL; + return nullptr; } // Compute framesize for the wrapper. We need to handlize all oops in @@ -1581,7 +1581,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ sub(sp, sp, stack_size - 2*wordSize); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->nmethod_entry_barrier(masm, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */); + bs->nmethod_entry_barrier(masm, nullptr /* slow_path */, nullptr /* continuation */, nullptr /* guard */); // Frame is now completed as far as size and linkage. int frame_complete = ((intptr_t)__ pc()) - start; @@ -1787,7 +1787,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ str(swap_reg, Address(lock_reg, mark_word_offset)); // src -> dest iff dest == r0 else r0 <- dest - __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL); + __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr); // Hmm should this move to the slow path code area??? @@ -2178,7 +2178,7 @@ void SharedRuntime::generate_deopt_blob() { CodeBuffer buffer("deopt_blob", 2048+pad, 1024); MacroAssembler* masm = new MacroAssembler(&buffer); int frame_size_in_words; - OopMap* map = NULL; + OopMap* map = nullptr; OopMapSet *oop_maps = new OopMapSet(); RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0); @@ -2384,7 +2384,7 @@ void SharedRuntime::generate_deopt_blob() { __ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending? __ br(Assembler::NE, noException); __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); - // QQQ this is useless it was NULL above + // QQQ this is useless it was null above __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); __ str(zr, Address(rthread, JavaThread::exception_oop_offset())); __ str(zr, Address(rthread, JavaThread::exception_pc_offset())); @@ -2764,7 +2764,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t MacroAssembler* masm = new MacroAssembler(&buffer); address start = __ pc(); - address call_pc = NULL; + address call_pc = nullptr; int frame_size_in_words; bool cause_return = (poll_type == POLL_AT_RETURN); RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */); @@ -2880,7 +2880,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t // must do any gc of the args. // RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { - assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); + assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); // allocate space for the code ResourceMark rm; @@ -2892,7 +2892,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha RegisterSaver reg_save(false /* save_vectors */); OopMapSet *oop_maps = new OopMapSet(); - OopMap* map = NULL; + OopMap* map = nullptr; int start = __ offset(); diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index 73423a1bf1c..389bb0d7d0e 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -438,7 +438,7 @@ class StubGenerator: public StubCodeGenerator { __ strw(rscratch1, Address(rthread, Thread::exception_line_offset())); // complete return to VM - assert(StubRoutines::_call_stub_return_address != NULL, + assert(StubRoutines::_call_stub_return_address != nullptr, "_call_stub_return_address must have been generated before"); __ b(StubRoutines::_call_stub_return_address); @@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator { // object is in r0 // make sure object is 'reasonable' - __ cbz(r0, exit); // if obj is NULL it is OK + __ cbz(r0, exit); // if obj is null it is OK BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler(); bs_asm->check_oop(_masm, r0, c_rarg2, c_rarg3, error); @@ -791,7 +791,7 @@ class StubGenerator: public StubCodeGenerator { t4 = r7, t5 = r11, t6 = r12, t7 = r13; const Register stride = r14; const Register gct1 = rscratch1, gct2 = rscratch2, gct3 = r10; - const FloatRegister gcvt1 = v6, gcvt2 = v7, gcvt3 = v8; + const FloatRegister gcvt1 = v6, gcvt2 = v7, gcvt3 = v16; // Note that v8-v15 are callee saved ArrayCopyBarrierSetHelper bs(_masm, decorators, type, gct1, gct2, gct3, gcvt1, gcvt2, gcvt3); assert_different_registers(rscratch1, rscratch2, t0, t1, t2, t3, t4, t5, t6, t7); @@ -1185,7 +1185,7 @@ class StubGenerator: public StubCodeGenerator { const Register t6 = r12, t7 = r13, t8 = r14, t9 = r15; const Register send = r17, dend = r16; const Register gct1 = rscratch1, gct2 = rscratch2, gct3 = r10; - const FloatRegister gcvt1 = v6, gcvt2 = v7, gcvt3 = v8; + const FloatRegister gcvt1 = v6, gcvt2 = v7, gcvt3 = v16; // Note that v8-v15 are callee saved ArrayCopyBarrierSetHelper bs(_masm, decorators, type, gct1, gct2, gct3, gcvt1, gcvt2, gcvt3); if (PrefetchCopyIntervalInBytes > 0) @@ -1480,7 +1480,7 @@ class StubGenerator: public StubCodeGenerator { address start = __ pc(); __ enter(); - if (entry != NULL) { + if (entry != nullptr) { *entry = __ pc(); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); @@ -1546,7 +1546,7 @@ class StubGenerator: public StubCodeGenerator { address start = __ pc(); __ enter(); - if (entry != NULL) { + if (entry != nullptr) { *entry = __ pc(); // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) BLOCK_COMMENT("Entry:"); @@ -1824,9 +1824,9 @@ class StubGenerator: public StubCodeGenerator { Label L_miss; - __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, + __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, nullptr, super_check_offset); - __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); + __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, nullptr); // Fall through on failure! __ BIND(L_miss); @@ -1897,7 +1897,7 @@ class StubGenerator: public StubCodeGenerator { #endif //ASSERT // Caller of this entry point must set up the argument registers. - if (entry != NULL) { + if (entry != nullptr) { *entry = __ pc(); BLOCK_COMMENT("Entry:"); } @@ -2134,19 +2134,19 @@ class StubGenerator: public StubCodeGenerator { // (2) src_pos must not be negative. // (3) dst_pos must not be negative. // (4) length must not be negative. - // (5) src klass and dst klass should be the same and not NULL. + // (5) src klass and dst klass should be the same and not null. // (6) src and dst should be arrays. // (7) src_pos + length must not exceed length of src. // (8) dst_pos + length must not exceed length of dst. // - // if (src == NULL) return -1; + // if (src == nullptr) return -1; __ cbz(src, L_failed); // if (src_pos < 0) return -1; __ tbnz(src_pos, 31, L_failed); // i.e. sign bit set - // if (dst == NULL) return -1; + // if (dst == nullptr) return -1; __ cbz(dst, L_failed); // if (dst_pos < 0) return -1; @@ -2163,11 +2163,11 @@ class StubGenerator: public StubCodeGenerator { __ load_klass(scratch_src_klass, src); #ifdef ASSERT - // assert(src->klass() != NULL); + // assert(src->klass() != nullptr); { BLOCK_COMMENT("assert klasses not null {"); Label L1, L2; - __ cbnz(scratch_src_klass, L2); // it is broken if klass is NULL + __ cbnz(scratch_src_klass, L2); // it is broken if klass is null __ bind(L1); __ stop("broken null klass"); __ bind(L2); @@ -2575,7 +2575,7 @@ class StubGenerator: public StubCodeGenerator { "jbyte_arraycopy"); StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, "arrayof_jbyte_disjoint_arraycopy"); - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, nullptr, "arrayof_jbyte_arraycopy"); //*** jshort @@ -2587,7 +2587,7 @@ class StubGenerator: public StubCodeGenerator { "jshort_arraycopy"); StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, "arrayof_jshort_disjoint_arraycopy"); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, nullptr, "arrayof_jshort_arraycopy"); //*** jint @@ -2630,7 +2630,7 @@ class StubGenerator: public StubCodeGenerator { = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit", /*dest_uninitialized*/true); StubRoutines::_arrayof_oop_arraycopy_uninit - = generate_conjoint_oop_copy(aligned, entry, NULL, "arrayof_oop_arraycopy_uninit", + = generate_conjoint_oop_copy(aligned, entry, nullptr, "arrayof_oop_arraycopy_uninit", /*dest_uninitialized*/true); } @@ -2640,7 +2640,7 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr, /*dest_uninitialized*/true); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", @@ -8090,7 +8090,7 @@ class StubGenerator: public StubCodeGenerator { generate_arraycopy_stubs(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm != NULL) { + if (bs_nm != nullptr) { StubRoutines::aarch64::_method_entry_barrier = generate_method_entry_barrier(); } diff --git a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp index 320c8be59e9..3b6d3b5c40a 100644 --- a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp @@ -33,30 +33,30 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::aarch64::_get_previous_sp_entry = NULL; +address StubRoutines::aarch64::_get_previous_sp_entry = nullptr; -address StubRoutines::aarch64::_f2i_fixup = NULL; -address StubRoutines::aarch64::_f2l_fixup = NULL; -address StubRoutines::aarch64::_d2i_fixup = NULL; -address StubRoutines::aarch64::_d2l_fixup = NULL; -address StubRoutines::aarch64::_vector_iota_indices = NULL; -address StubRoutines::aarch64::_float_sign_mask = NULL; -address StubRoutines::aarch64::_float_sign_flip = NULL; -address StubRoutines::aarch64::_double_sign_mask = NULL; -address StubRoutines::aarch64::_double_sign_flip = NULL; -address StubRoutines::aarch64::_zero_blocks = NULL; -address StubRoutines::aarch64::_count_positives = NULL; -address StubRoutines::aarch64::_count_positives_long = NULL; -address StubRoutines::aarch64::_large_array_equals = NULL; -address StubRoutines::aarch64::_compare_long_string_LL = NULL; -address StubRoutines::aarch64::_compare_long_string_UU = NULL; -address StubRoutines::aarch64::_compare_long_string_LU = NULL; -address StubRoutines::aarch64::_compare_long_string_UL = NULL; -address StubRoutines::aarch64::_string_indexof_linear_ll = NULL; -address StubRoutines::aarch64::_string_indexof_linear_uu = NULL; -address StubRoutines::aarch64::_string_indexof_linear_ul = NULL; -address StubRoutines::aarch64::_large_byte_array_inflate = NULL; -address StubRoutines::aarch64::_method_entry_barrier = NULL; +address StubRoutines::aarch64::_f2i_fixup = nullptr; +address StubRoutines::aarch64::_f2l_fixup = nullptr; +address StubRoutines::aarch64::_d2i_fixup = nullptr; +address StubRoutines::aarch64::_d2l_fixup = nullptr; +address StubRoutines::aarch64::_vector_iota_indices = nullptr; +address StubRoutines::aarch64::_float_sign_mask = nullptr; +address StubRoutines::aarch64::_float_sign_flip = nullptr; +address StubRoutines::aarch64::_double_sign_mask = nullptr; +address StubRoutines::aarch64::_double_sign_flip = nullptr; +address StubRoutines::aarch64::_zero_blocks = nullptr; +address StubRoutines::aarch64::_count_positives = nullptr; +address StubRoutines::aarch64::_count_positives_long = nullptr; +address StubRoutines::aarch64::_large_array_equals = nullptr; +address StubRoutines::aarch64::_compare_long_string_LL = nullptr; +address StubRoutines::aarch64::_compare_long_string_UU = nullptr; +address StubRoutines::aarch64::_compare_long_string_LU = nullptr; +address StubRoutines::aarch64::_compare_long_string_UL = nullptr; +address StubRoutines::aarch64::_string_indexof_linear_ll = nullptr; +address StubRoutines::aarch64::_string_indexof_linear_uu = nullptr; +address StubRoutines::aarch64::_string_indexof_linear_ul = nullptr; +address StubRoutines::aarch64::_large_byte_array_inflate = nullptr; +address StubRoutines::aarch64::_method_entry_barrier = nullptr; static void empty_spin_wait() { } address StubRoutines::aarch64::_spin_wait = CAST_FROM_FN_PTR(address, empty_spin_wait); diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp index ecd77e8e6f0..fada22ae385 100644 --- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp @@ -99,7 +99,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() { // stack args <- esp // garbage // expression stack bottom - // bcp (NULL) + // bcp (null) // ... // Restore LR @@ -162,7 +162,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M // [ arg ] // retaddr in lr - address entry_point = NULL; + address entry_point = nullptr; Register continuation = lr; switch (kind) { case Interpreter::java_lang_math_abs: @@ -242,49 +242,49 @@ void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpr address fn; switch (kind) { case Interpreter::java_lang_math_sin : - if (StubRoutines::dsin() == NULL) { + if (StubRoutines::dsin() == nullptr) { fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); } else { fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin()); } break; case Interpreter::java_lang_math_cos : - if (StubRoutines::dcos() == NULL) { + if (StubRoutines::dcos() == nullptr) { fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); } else { fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos()); } break; case Interpreter::java_lang_math_tan : - if (StubRoutines::dtan() == NULL) { + if (StubRoutines::dtan() == nullptr) { fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); } else { fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan()); } break; case Interpreter::java_lang_math_log : - if (StubRoutines::dlog() == NULL) { + if (StubRoutines::dlog() == nullptr) { fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); } else { fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog()); } break; case Interpreter::java_lang_math_log10 : - if (StubRoutines::dlog10() == NULL) { + if (StubRoutines::dlog10() == nullptr) { fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); } else { fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10()); } break; case Interpreter::java_lang_math_exp : - if (StubRoutines::dexp() == NULL) { + if (StubRoutines::dexp() == nullptr) { fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); } else { fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp()); } break; case Interpreter::java_lang_math_pow : - if (StubRoutines::dpow() == NULL) { + if (StubRoutines::dpow() == nullptr) { fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); } else { fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow()); @@ -292,7 +292,7 @@ void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpr break; default: ShouldNotReachHere(); - fn = NULL; // unreachable + fn = nullptr; // unreachable } __ mov(rscratch1, fn); __ blr(rscratch1); @@ -342,7 +342,7 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) { // abstract method entry - // pop return address, reset last_sp to NULL + // pop return address, reset last_sp to null __ empty_expression_stack(); __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) @@ -427,7 +427,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() { address TemplateInterpreterGenerator::generate_exception_handler_common( const char* name, const char* message, bool pass_oop) { - assert(!pass_oop || message == NULL, "either oop or message but not both"); + assert(!pass_oop || message == nullptr, "either oop or message but not both"); address entry = __ pc(); if (pass_oop) { // object is at TOS @@ -444,9 +444,9 @@ address TemplateInterpreterGenerator::generate_exception_handler_common( create_klass_exception), c_rarg1, c_rarg2); } else { - // kind of lame ExternalAddress can't take NULL because + // kind of lame ExternalAddress can't take null because // external_word_Relocation will assert. - if (message != NULL) { + if (message != nullptr) { __ lea(c_rarg2, Address((address)message)); } else { __ mov(c_rarg2, NULL_WORD); @@ -465,7 +465,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, // Restore stack bottom in case i2c adjusted stack __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); - // and NULL it as marker that esp is now tos until next java call + // and null it as marker that esp is now tos until next java call __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); __ restore_bcp(); __ restore_locals(); @@ -521,7 +521,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, // Restore expression stack pointer __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); - // NULL last_sp until next java call + // null last_sp until next java call __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); #if INCLUDE_JVMCI @@ -560,7 +560,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, __ bind(L); } - if (continuation == NULL) { + if (continuation == nullptr) { __ dispatch_next(state, step); } else { __ jump_to_entry(continuation); @@ -653,8 +653,8 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) // InterpreterRuntime::frequency_counter_overflow takes two // arguments, the first (thread) is passed by call_VM, the second // indicates if the counter overflow occurs at a backwards branch - // (NULL bcp). We pass zero for it. The call returns the address - // of the verified entry point for the method or NULL if the + // (null bcp). We pass zero for it. The call returns the address + // of the verified entry point for the method or null if the // compilation did not complete (either went background or bailed // out). __ mov(c_rarg1, 0); @@ -746,7 +746,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { // Note: the restored frame is not necessarily interpreted. // Use the shared runtime version of the StackOverflowError. - assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); + assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); // all done with frame size check @@ -796,7 +796,7 @@ void TemplateInterpreterGenerator::lock_method() { { Label L; __ cbnz(r0, L); - __ stop("synchronization object is NULL"); + __ stop("synchronization object is null"); __ bind(L); } #endif // ASSERT @@ -934,7 +934,7 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { Label slow_path; const Register local_0 = c_rarg0; - // Check if local 0 != NULL + // Check if local 0 != null // If the receiver is null then it is OK to jump to the slow path. __ ldr(local_0, Address(esp, 0)); __ cbz(local_0, slow_path); @@ -1894,7 +1894,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() { __ br(Assembler::NE, L_done); // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. - // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. + // Detect such a case in the InterpreterRuntime function and return the member name argument, or null. __ ldr(c_rarg0, Address(rlocals, 0)); __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp); @@ -2061,7 +2061,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) { // The run-time runtime saves the right registers, depending on // the tosca in-state for the given template. - assert(Interpreter::trace_code(t->tos_in()) != NULL, + assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated"); __ bl(Interpreter::trace_code(t->tos_in())); __ reinit_heapbase(); diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index 20177d8d49e..d6819b5d06e 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -139,8 +139,8 @@ static Assembler::Condition j_not(TemplateTable::Condition cc) { // Miscellaneous helper routines -// Store an oop (or NULL) at the Address described by obj. -// If val == noreg this means store a NULL +// Store an oop (or null) at the Address described by obj. +// If val == noreg this means store a null static void do_oop_store(InterpreterMacroAssembler* _masm, Address dst, Register val, @@ -414,7 +414,7 @@ void TemplateTable::fast_aldc(LdcType type) __ resolve_oop_handle(tmp, r5, rscratch2); __ cmpoop(result, tmp); __ br(Assembler::NE, notNull); - __ mov(result, 0); // NULL object reference + __ mov(result, 0); // null object reference __ bind(notNull); } @@ -1109,7 +1109,7 @@ void TemplateTable::aastore() { index_check(r3, r2); // kills r1 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop); - // do array store check - check for NULL value first + // do array store check - check for null value first __ cbz(r0, is_null); // Move subklass into r1 @@ -1137,11 +1137,11 @@ void TemplateTable::aastore() { do_oop_store(_masm, element_address, r0, IS_ARRAY); __ b(done); - // Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx] + // Have a null in r0, r3=array, r2=index. Store null at ary[idx] __ bind(is_null); __ profile_null_seen(r2); - // Store a NULL + // Store a null do_oop_store(_masm, element_address, noreg, IS_ARRAY); // Pop stack arguments @@ -1747,12 +1747,6 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) void TemplateTable::branch(bool is_jsr, bool is_wide) { - // We might be moving to a safepoint. The thread which calls - // Interpreter::notice_safepoints() will effectively flush its cache - // when it makes a system call, but we need to do something to - // ensure that we see the changed dispatch table. - __ membar(MacroAssembler::LoadLoad); - __ profile_taken_branch(r0, r1); const ByteSize be_offset = MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset(); @@ -1870,7 +1864,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) r2); __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode - // r0: osr nmethod (osr ok) or NULL (osr not possible) + // r0: osr nmethod (osr ok) or null (osr not possible) // w1: target bytecode // r2: scratch __ cbz(r0, dispatch); // test result -- no osr if null @@ -1968,12 +1962,6 @@ void TemplateTable::if_acmp(Condition cc) void TemplateTable::ret() { transition(vtos, vtos); - // We might be moving to a safepoint. The thread which calls - // Interpreter::notice_safepoints() will effectively flush its cache - // when it makes a system call, but we need to do something to - // ensure that we see the changed dispatch table. - __ membar(MacroAssembler::LoadLoad); - locals_index(r1); __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp __ profile_ret(r1, r2); @@ -2286,7 +2274,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no, if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { __ load_resolved_method_at_index(byte_no, temp, Rcache); __ load_method_holder(temp, temp); - __ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow); + __ clinit_barrier(temp, rscratch1, nullptr, &clinit_barrier_slow); } } @@ -2445,12 +2433,12 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset()))); if (is_static) { - __ mov(c_rarg1, zr); // NULL object reference + __ mov(c_rarg1, zr); // null object reference } else { __ ldr(c_rarg1, at_tos()); // get object pointer without popping it __ verify_oop(c_rarg1); } - // c_rarg1: object pointer or NULL + // c_rarg1: object pointer or null // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -2698,7 +2686,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset)); // object (tos) __ mov(c_rarg3, esp); - // c_rarg1: object pointer set up above (NULL if static) + // c_rarg1: object pointer set up above (null if static) // c_rarg2: cache entry pointer // c_rarg3: jvalue object on the stack __ call_VM(noreg, @@ -3695,7 +3683,7 @@ void TemplateTable::checkcast() __ bind(ok_is_subtype); __ mov(r0, r3); // Restore object in r3 - // Collect counts on whether this test sees NULLs a lot or not. + // Collect counts on whether this test sees nulls a lot or not. if (ProfileInterpreter) { __ b(done); __ bind(is_null); @@ -3748,7 +3736,7 @@ void TemplateTable::instanceof() { __ bind(ok_is_subtype); __ mov(r0, 1); - // Collect counts on whether this test sees NULLs a lot or not. + // Collect counts on whether this test sees nulls a lot or not. if (ProfileInterpreter) { __ b(done); __ bind(is_null); @@ -3757,8 +3745,8 @@ void TemplateTable::instanceof() { __ bind(is_null); // same as 'done' } __ bind(done); - // r0 = 0: obj == NULL or obj is not an instanceof the specified klass - // r0 = 1: obj != NULL and obj is an instanceof the specified klass + // r0 = 0: obj == nullptr or obj is not an instanceof the specified klass + // r0 = 1: obj != nullptr and obj is an instanceof the specified klass } //----------------------------------------------------------------------------- @@ -3818,7 +3806,7 @@ void TemplateTable::monitorenter() { transition(atos, vtos); - // check for NULL object + // check for null object __ null_check(r0); const Address monitor_block_top( @@ -3830,7 +3818,7 @@ void TemplateTable::monitorenter() Label allocated; // initialize entry pointer - __ mov(c_rarg1, zr); // points to free slot or NULL + __ mov(c_rarg1, zr); // points to free slot or null // find a free slot in the monitor block (result in c_rarg1) { @@ -3921,7 +3909,7 @@ void TemplateTable::monitorexit() { transition(atos, vtos); - // check for NULL object + // check for null object __ null_check(r0); const Address monitor_block_top( diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp index 7cddd6dff1f..994e32c7ad0 100644 --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp @@ -591,7 +591,7 @@ static bool check_info_file(const char* fpath, fclose(fp); return true; } - if (virt2 != NULL && strcasestr(line, virt2) != 0) { + if (virt2 != nullptr && strcasestr(line, virt2) != 0) { Abstract_VM_Version::_detected_virtualization = vt2; fclose(fp); return true; @@ -609,7 +609,7 @@ void VM_Version::check_virtualizations() { if (check_info_file(pname_file, "KVM", KVM, "VMWare", VMWare)) { return; } - check_info_file(tname_file, "Xen", XenPVHVM, NULL, NoDetectedVirtualization); + check_info_file(tname_file, "Xen", XenPVHVM, nullptr, NoDetectedVirtualization); #endif } diff --git a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp index acef8d21abc..2cf3e68eebc 100644 --- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -51,9 +51,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(true); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); - // Can be NULL if there is no free space in the code cache. - if (s == NULL) { - return NULL; + // Can be null if there is no free space in the code cache. + if (s == nullptr) { + return nullptr; } // Count unused bytes in instruction sequences of variable size. @@ -118,7 +118,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { __ cbz(rmethod, L); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ cbnz(rscratch1, L); - __ stop("Vtable entry is NULL"); + __ stop("Vtable entry is null"); __ bind(L); } #endif // PRODUCT @@ -141,9 +141,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(false); VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); - // Can be NULL if there is no free space in the code cache. - if (s == NULL) { - return NULL; + // Can be null if there is no free space in the code cache. + if (s == nullptr) { + return nullptr; } // Count unused bytes in instruction sequences of variable size. @@ -241,7 +241,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // We force resolving of the call site by jumping to the "handle // wrong method" stub, and so let the interpreter runtime do all the // dirty work. - assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order"); + assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order"); __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); masm->flush(); diff --git a/src/hotspot/cpu/arm/continuationFreezeThaw_arm.inline.hpp b/src/hotspot/cpu/arm/continuationFreezeThaw_arm.inline.hpp index 1cdb89de2e3..29af5681a67 100644 --- a/src/hotspot/cpu/arm/continuationFreezeThaw_arm.inline.hpp +++ b/src/hotspot/cpu/arm/continuationFreezeThaw_arm.inline.hpp @@ -70,10 +70,6 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& return frame(); } -inline void ThawBase::set_interpreter_frame_bottom(const frame& f, intptr_t* bottom) { - Unimplemented(); -} - inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) { Unimplemented(); } diff --git a/src/hotspot/cpu/arm/downcallLinker_arm.cpp b/src/hotspot/cpu/arm/downcallLinker_arm.cpp index 37b6f43ac14..baee7d7a043 100644 --- a/src/hotspot/cpu/arm/downcallLinker_arm.cpp +++ b/src/hotspot/cpu/arm/downcallLinker_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,7 +33,8 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { Unimplemented(); return nullptr; } diff --git a/src/hotspot/cpu/arm/foreignGlobals_arm.cpp b/src/hotspot/cpu/arm/foreignGlobals_arm.cpp index 5438cbe5cd6..d3a318536bd 100644 --- a/src/hotspot/cpu/arm/foreignGlobals_arm.cpp +++ b/src/hotspot/cpu/arm/foreignGlobals_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,6 +29,10 @@ class MacroAssembler; +bool ForeignGlobals::is_foreign_linker_supported() { + return false; +} + const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) { Unimplemented(); return {}; diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp index 84dfbdc5fa4..b3c4e85474a 100644 --- a/src/hotspot/cpu/arm/interp_masm_arm.cpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp @@ -290,6 +290,27 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset( ldr(Rklass, Address(Rklass, Array::base_offset_in_bytes())); } +void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) { + // Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp + assert_different_registers(cache, index, Rtemp); + + get_index_at_bcp(index, 1, Rtemp, sizeof(u4)); + + // load constant pool cache pointer + ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize)); + + // Get address of invokedynamic array + ldr(cache, Address(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()))); + + // Scale the index to be the entry index * sizeof(ResolvedInvokeDynamicInfo) + // On ARM32 sizeof(ResolvedIndyEntry) is 12, use mul instead of lsl + mov(Rtemp, sizeof(ResolvedIndyEntry)); + mul(index, index, Rtemp); + + add(cache, cache, Array::base_offset_in_bytes()); + add(cache, cache, index); +} + // Generate a subtype check: branch to not_subtype if sub_klass is // not a subtype of super_klass. // Profiling code for the subtype check failure (profile_typecheck_failed) diff --git a/src/hotspot/cpu/arm/interp_masm_arm.hpp b/src/hotspot/cpu/arm/interp_masm_arm.hpp index 067d50a8360..9e2c3d58a22 100644 --- a/src/hotspot/cpu/arm/interp_masm_arm.hpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.hpp @@ -69,7 +69,6 @@ class InterpreterMacroAssembler: public MacroAssembler { inline void check_extended_sp(Register tmp) {} inline void check_no_cached_stack_top(Register tmp) {} - void save_bcp() { str(Rbcp, Address(FP, frame::interpreter_frame_bcp_offset * wordSize)); } void restore_bcp() { ldr(Rbcp, Address(FP, frame::interpreter_frame_bcp_offset * wordSize)); } void restore_locals() { @@ -103,6 +102,8 @@ class InterpreterMacroAssembler: public MacroAssembler { // load cpool->resolved_klass_at(index); Rtemp is corrupted upon return void load_resolved_klass_at_offset(Register Rcpool, Register Rindex, Register Rklass); + void load_resolved_indy_entry(Register cache, Register index); + void pop_ptr(Register r); void pop_i(Register r = R0_tos); void pop_l(Register lo = R0_tos_lo, Register hi = R1_tos_hi); diff --git a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp index f42c53d0f5b..d4e8c1f8859 100644 --- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp @@ -364,24 +364,32 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, const Register Rcache = R2_tmp; const Register Rindex = R3_tmp; - __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); - __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); - __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); - __ check_stack_top(); - __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); + if (index_size == sizeof(u4)) { + __ load_resolved_indy_entry(Rcache, Rindex); + __ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedIndyEntry::num_parameters_offset()))); + __ check_stack_top(); + __ add(Rstack_top, Rstack_top, AsmOperand(Rcache, lsl, Interpreter::logStackElementSize)); + } else { + // Pop N words from the stack + __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); + + __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); + __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); + __ check_stack_top(); + __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); + } __ convert_retval_to_tos(state); - __ check_and_handle_popframe(); - __ check_and_handle_earlyret(); + __ check_and_handle_popframe(); + __ check_and_handle_earlyret(); __ dispatch_next(state, step); return entry; } - address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { address entry = __ pc(); diff --git a/src/hotspot/cpu/arm/templateTable_arm.cpp b/src/hotspot/cpu/arm/templateTable_arm.cpp index 99c814bab90..8b826d55244 100644 --- a/src/hotspot/cpu/arm/templateTable_arm.cpp +++ b/src/hotspot/cpu/arm/templateTable_arm.cpp @@ -2608,6 +2608,66 @@ void TemplateTable::load_field_cp_cache_entry(Register Rcache, } } +// The rmethod register is input and overwritten to be the adapter method for the +// indy call. Link Register (lr) is set to the return address for the adapter and +// an appendix may be pushed to the stack. Registers R1-R3, Rtemp (R12) are clobbered +void TemplateTable::load_invokedynamic_entry(Register method) { + // setup registers + const Register appendix = R1; + const Register cache = R2_tmp; + const Register index = R3_tmp; + assert_different_registers(method, appendix, cache, index); + + __ save_bcp(); + + Label resolved; + __ load_resolved_indy_entry(cache, index); + // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in() + __ ldr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset()))); + TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg, true); + // Compare the method to zero + __ cbnz(method, resolved); + + Bytecodes::Code code = bytecode(); + + // Call to the interpreter runtime to resolve invokedynamic + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); + __ mov(R1, code); // this is essentially Bytecodes::_invokedynamic, call_VM requires R1 + __ call_VM(noreg, entry, R1); + // Update registers with resolved info + __ load_resolved_indy_entry(cache, index); + // Load-acquire the adapter method to match store-release in ResolvedIndyEntry::fill_in() + __ ldr(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset()))); + TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg, true); + +#ifdef ASSERT + __ cbnz(method, resolved); + __ stop("Should be resolved by now"); +#endif // ASSERT + __ bind(resolved); + + Label L_no_push; + // Check if there is an appendix + __ ldrb(index, Address(cache, in_bytes(ResolvedIndyEntry::flags_offset()))); + __ tbz(index, ResolvedIndyEntry::has_appendix_shift, L_no_push); + // Get appendix + __ ldrh(index, Address(cache, in_bytes(ResolvedIndyEntry::resolved_references_index_offset()))); + // Push the appendix as a trailing parameter + // since the parameter_size includes it. + __ load_resolved_reference_at_index(appendix, index); + __ verify_oop(appendix); + __ push(appendix); // push appendix (MethodType, CallSite, etc.) + __ bind(L_no_push); + + // compute return type + __ ldrb(index, Address(cache, in_bytes(ResolvedIndyEntry::result_type_offset()))); + // load return address + { + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); + __ mov_address(Rtemp, table_addr); + __ ldr(LR, Address(Rtemp, index, lsl, Interpreter::logStackElementSize)); + } +} // Blows all volatile registers: R0-R3, Rtemp, LR. void TemplateTable::load_invoke_cp_cache_entry(int byte_no, @@ -2616,7 +2676,7 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no, Register flags, bool is_invokevirtual, bool is_invokevfinal/*unused*/, - bool is_invokedynamic) { + bool is_invokedynamic /*unused*/) { // setup registers const Register cache = R2_tmp; const Register index = R3_tmp; @@ -2639,7 +2699,7 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no, const int index_offset = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()); - size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); + size_t index_size = sizeof(u2); resolve_cache_and_index(byte_no, cache, index, index_size); __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord)); __ ldr(method, Address(temp_reg, method_offset)); @@ -3565,7 +3625,7 @@ void TemplateTable::prepare_invoke(int byte_no, load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); // maybe push extra argument - if (is_invokedynamic || is_invokehandle) { + if (is_invokehandle) { Label L_no_push; __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push); __ mov(temp, index); @@ -3810,7 +3870,7 @@ void TemplateTable::invokedynamic(int byte_no) { const Register Rcallsite = R4_tmp; const Register R5_method = R5_tmp; // can't reuse Rmethod! - prepare_invoke(byte_no, R5_method, Rcallsite); + load_invokedynamic_entry(R5_method); // Rcallsite: CallSite object (from cpool->resolved_references[f1]) // Rmethod: MH.linkToCallSite method (from f2) diff --git a/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp b/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp index 3280330f1d0..56f8fce5ce9 100644 --- a/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp +++ b/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015 SAP SE. All rights reserved. + * Copyright (c) 2015, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) { // Note: This returns the conservative size assuming maximum alignment. int AbstractInterpreter::size_top_interpreter_activation(Method* method) { const int max_alignment_size = 2; - const int abi_scratch = frame::abi_reg_args_size; + const int abi_scratch = frame::top_ijava_frame_abi_size; return method->max_locals() + method->max_stack() + frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch; } @@ -76,8 +76,8 @@ int AbstractInterpreter::size_activation(int max_stack, // in TemplateInterpreterGenerator::generate_fixed_frame. assert(Interpreter::stackElementWords == 1, "sanity"); const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize; - const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) : - (frame::abi_minframe_size / Interpreter::stackElementSize); + const int abi_scratch = is_top_frame ? (frame::top_ijava_frame_abi_size / Interpreter::stackElementSize) : + (frame::parent_ijava_frame_abi_size / Interpreter::stackElementSize); const int size = max_stack + (callee_locals - callee_params) + @@ -122,20 +122,20 @@ void AbstractInterpreter::layout_activation(Method* method, bool is_top_frame, bool is_bottom_frame) { - const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) : - (frame::abi_minframe_size / Interpreter::stackElementSize); + const int abi_scratch = is_top_frame ? (frame::top_ijava_frame_abi_size / Interpreter::stackElementSize) : + (frame::parent_ijava_frame_abi_size / Interpreter::stackElementSize); intptr_t* locals_base = (caller->is_interpreted_frame()) ? caller->interpreter_frame_esp() + caller_actual_parameters : - caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize); + caller->sp() + method->max_locals() - 1 + (frame::java_abi_size / Interpreter::stackElementSize); intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize; intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size()); intptr_t* esp_base = monitor - 1; intptr_t* esp = esp_base - tempcount - popframe_extra_args; intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes); - intptr_t* sender_sp = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize; - intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize; + intptr_t* sender_sp = caller->sp() + (frame::parent_ijava_frame_abi_size - frame::top_ijava_frame_abi_size) / Interpreter::stackElementSize; + intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::parent_ijava_frame_abi_size - frame::top_ijava_frame_abi_size) / Interpreter::stackElementSize; interpreter_frame->interpreter_frame_set_method(method); interpreter_frame->interpreter_frame_set_mirror(method->method_holder()->java_mirror()); diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp index dd78a3d492f..4f48f7efe50 100644 --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2022 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1834,7 +1834,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // Set up the arraycopy stub information. ArrayCopyStub* stub = op->stub(); - const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame. + const int frame_resize = frame::native_abi_reg_args_size - sizeof(frame::java_abi); // C calls need larger frame. // Always do stub if no type information is available. It's ok if // the known type isn't loaded since the code sanity checks diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp index b3b68034e7a..348de609901 100644 --- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2018 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,7 +153,7 @@ static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; static int frame_size_in_bytes = -1; static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { - assert(frame_size_in_bytes > frame::abi_reg_args_size, "init"); + assert(frame_size_in_bytes > frame::native_abi_reg_args_size, "init"); sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); OopMap* oop_map = new OopMap(frame_size_in_slots, 0); @@ -241,7 +241,7 @@ static void restore_live_registers(StubAssembler* sasm, Register result1, Regist void Runtime1::initialize_pd() { int i; - int sp_offset = frame::abi_reg_args_size; + int sp_offset = frame::native_abi_reg_args_size; for (i = 0; i < FrameMap::nof_cpu_regs; i++) { Register r = as_Register(i); @@ -487,9 +487,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ mflr(R0); __ std(R0, _abi0(lr), R1_SP); - __ push_frame(frame::abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs). - sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); - OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); + __ push_frame(frame::native_abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs). + sasm->set_frame_size(frame::native_abi_reg_args_size / BytesPerWord); + OopMap* oop_map = new OopMap(frame::native_abi_reg_args_size / sizeof(jint), 0); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R3_ARG1); oop_maps = new OopMapSet(); @@ -732,9 +732,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ set_info("unimplemented entry", dont_gc_arguments); __ mflr(R0); __ std(R0, _abi0(lr), R1_SP); - __ push_frame(frame::abi_reg_args_size, R0); // empty dummy frame - sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord); - OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0); + __ push_frame(frame::native_abi_reg_args_size, R0); // empty dummy frame + sasm->set_frame_size(frame::native_abi_reg_args_size / BytesPerWord); + OopMap* oop_map = new OopMap(frame::native_abi_reg_args_size / sizeof(jint), 0); __ load_const_optimized(R4_ARG2, (int)id); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R4_ARG2); diff --git a/src/hotspot/cpu/ppc/continuationEntry_ppc.hpp b/src/hotspot/cpu/ppc/continuationEntry_ppc.hpp index ae912b15cee..5928c2ef254 100644 --- a/src/hotspot/cpu/ppc/continuationEntry_ppc.hpp +++ b/src/hotspot/cpu/ppc/continuationEntry_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 SAP SE. All rights reserved. + * Copyright (c) 2022, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ class ContinuationEntryPD { // This is needed to position the ContinuationEntry at the unextended sp of the entry frame - frame::abi_reg_args _abi; + frame::native_abi_reg_args _abi; }; #endif // CPU_PPC_CONTINUATIONENTRY_PPC_HPP diff --git a/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp b/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp index 0bef7b68d07..c72a4c8a54d 100644 --- a/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/continuationFreezeThaw_ppc.inline.hpp @@ -84,9 +84,9 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co assert(f.fp() > (intptr_t*)f.interpreter_frame_esp(), ""); // There is alignment padding between vfp and f's locals array in the original - // frame, therefore we cannot use it to relativize the locals pointer. - // This line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197 - *hf.addr_at(ijava_idx(locals)) = frame::metadata_words + f.interpreter_frame_method()->max_locals() - 1; + // frame, because we freeze the padding (see recurse_freeze_interpreted_frame) + // in order to keep the same relativized locals pointer, we don't need to change it here. + relativize_one(vfp, hfp, ijava_idx(monitors)); relativize_one(vfp, hfp, ijava_idx(esp)); relativize_one(vfp, hfp, ijava_idx(top_frame_sp)); @@ -131,7 +131,7 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // // Caller on entry New frame with resized Caller // -// | frame::abi_minframe | | | +// | frame::java_abi | | | // | |<- FP of caller | Caller's SP |<- FP of caller // ========================== ========================== // | ijava_state | | ijava_state | @@ -141,9 +141,9 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // | : | | | : : | // | Pn |<- unext. SP | | : Pn |<- unext. SP // |------------------------| + metadata overlap | : | + metadata -// | frame::abi_minframe | | | Lm | +// | frame::java_abi | | | Lm | // | (metadata_words_at_top)|<- SP == unext. SP v |------------------------|<- unextended SP of caller (1) -// ========================== of caller ----- | frame::abi_minframe | +// ========================== of caller ----- | frame::java_abi | // | (metadata_words_at_top)|<- new SP of caller / FP of new frame // overlap = stack_argsize(f) ========================== ^ // + frame::metadata_words_at_top | ijava_state | | @@ -154,7 +154,7 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // | : | | // | Growth | | Pi | v // v v |------------------------| --- -// | frame::abi_minframe | +// | frame::java_abi | // | (metadata_words_at_top)|<- unextended SP / // ========================== SP of new frame // ### Compiled Caller: No Overlap @@ -164,13 +164,13 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // // Caller on entry New frame with resized Caller // -// | frame::abi_minframe | | | +// | frame::java_abi | | | // | (metadata_words_at_top)|<- FP of caller | Caller's SP |<- FP of caller // ========================== ========================== // | | | | // | | | | // |------------------------| |------------------------| -// | frame::abi_minframe | | frame::abi_minframe | +// | frame::java_abi | | frame::java_abi | // | (metadata_words_at_top)|<- SP == unext. SP | (metadata_words_at_top)|<- unext. SP of caller // ========================== of caller |------------------------| // | L0 aka P0 | @@ -178,7 +178,7 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // | : Pn | // overlap = 0 | Lm | // |------------------------| -// f is the frame to be relocated on the heap | frame::abi_minframe | +// f is the frame to be relocated on the heap | frame::java_abi | // | (metadata_words_at_top)|<- new SP of caller / FP of new frame // ========================== ^ // | ijava_state | | @@ -189,7 +189,7 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // | : | | // | Pi | v // |------------------------| --- -// | frame::abi_minframe | +// | frame::java_abi | // | (metadata_words_at_top)|<- unextended SP / // ========================== SP of new frame // @@ -209,7 +209,7 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // // Caller on entry New frame with resized Caller // -// | frame::abi_minframe | | frame::abi_minframe | +// | frame::java_abi | | frame::java_abi | // | (metadata_words_at_top)|<- FP of caller | (metadata_words_at_top)|<- FP of caller // ========================== ========================== // | ijava_state | | ijava_state | @@ -219,19 +219,19 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // | : | | : | // | Pn |<- unext. SP | Pn |<- unext. SP // |------------------------| + metadata |------------------------| + metadata -// | frame::abi_minframe | | frame::abi_minframe | +// | frame::java_abi | | frame::java_abi | // | (metadata_words_at_top)|<- SP == unext. SP | (metadata_words_at_top)|<- unextended SP of caller (1) // ========================== of caller |------------------------| // | Stack Args | // overlap = 0 | (if any) | // |------------------------| -// f is the frame to be relocated on the heap | frame::abi_minframe | +// f is the frame to be relocated on the heap | frame::java_abi | // | (metadata_words_at_top)|<- new SP of caller / FP of new frame // ========================== // | | // | Growth | | | // v v |------------------------| -// | frame::abi_minframe | +// | frame::java_abi | // | (metadata_words_at_top)|<- SP == unext. SP of new frame // ========================== // @@ -239,7 +239,7 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // // Caller on entry New frame with resized Caller // -// | frame::abi_minframe | | frame::abi_minframe | +// | frame::java_abi | | frame::java_abi | // | (metadata_words_at_top)|<- FP of caller | (metadata_words_at_top)|<- FP of caller // ========================== ========================== // | | | | @@ -248,13 +248,13 @@ inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { // | Stack Args | ^ | Stack Args | // | (if any) | | | (if any) | // |------------------------| overlap |------------------------| -// | frame::abi_minframe | | | frame::abi_minframe | +// | frame::java_abi | | | frame::java_abi | // | (metadata_words_at_top)|<- SP == unext. SP v | (metadata_words_at_top)|<- SP == unext. SP of caller // ========================== of caller ----- ========================== / FP of new frame // | | // overlap = stack_argsize(f) | | // + frame::metadata_words_at_top |------------------------| -// | frame::abi_minframe | +// | frame::java_abi | // Where f is the frame to be relocated on the heap. | (metadata_words_at_top)|<- SP == unext. SP of new frame // See also StackChunkFrameStream::frame_size(). ========================== // @@ -264,7 +264,7 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) { intptr_t *sp, *fp; if (FKind::interpreted) { - int locals = f.interpreter_frame_method()->max_locals(); + intptr_t locals_offset = *f.addr_at(ijava_idx(locals)); // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp // See also comment on StackChunkFrameStream::interpreter_frame_size() @@ -272,7 +272,7 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) { (caller.is_interpreted_frame() || caller.is_empty()) ? ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top : 0; - fp = caller.unextended_sp() + overlap - locals - frame::metadata_words_at_top; + fp = caller.unextended_sp() - 1 - locals_offset + overlap; // esp points one slot below the last argument intptr_t* x86_64_like_unextended_sp = f.interpreter_frame_esp() + 1 - frame::metadata_words_at_top; sp = fp - (f.fp() - x86_64_like_unextended_sp); @@ -286,7 +286,7 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) { frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */); // frame_top() and frame_bottom() read these before relativize_interpreted_frame_metadata() is called - *hf.addr_at(ijava_idx(locals)) = frame::metadata_words + locals - 1; + *hf.addr_at(ijava_idx(locals)) = locals_offset; *hf.addr_at(ijava_idx(esp)) = f.interpreter_frame_esp() - f.fp(); return hf; } else { @@ -346,7 +346,7 @@ inline void ThawBase::prefetch_chunk_pd(void* start, int size) { template inline void Thaw::patch_caller_links(intptr_t* sp, intptr_t* bottom) { for (intptr_t* callers_sp; sp < bottom; sp = callers_sp) { - address pc = (address)((frame::abi_minframe*) sp)->lr; + address pc = (address)((frame::java_abi*) sp)->lr; assert(pc != nullptr, ""); // see ThawBase::patch_return() which gets called just before bool is_entry_frame = pc == StubRoutines::cont_returnBarrier() || pc == _cont.entryPC(); @@ -357,7 +357,7 @@ inline void Thaw::patch_caller_links(intptr_t* sp, intptr_t* bottom) { callers_sp = sp + cb->frame_size(); } // set the back link - ((frame::abi_minframe*) sp)->callers_sp = (intptr_t) callers_sp; + ((frame::java_abi*) sp)->callers_sp = (intptr_t) callers_sp; } } @@ -376,29 +376,29 @@ inline frame ThawBase::new_entry_frame() { // // | | Non-Interpreted | | // | |<- bottom Caller |----------------------| -// |----------------------| ^ | frame::abi_minframe |<- unextended SP +// |----------------------| ^ | frame::java_abi |<- unextended SP // | L0 aka P0 | | --- ======================== // | : : | | ^ | L0 aka P0 | // | : Pn | | | | : : | Parameters do // | : | | | | : Pn | not overlap with // | Lm | | | | : | caller! // |----------------------| `fsize` | | : | -// | frame::abi_minframe | | | : | +// | frame::java_abi | | | : | // ======================== | `fsize` + padding | Lm | // | | | |----------------------| // | ijava_state | | | | Opt. Align. Padding | // | | | | |----------------------| -// |----------------------| | | | frame::abi_minframe |<- new SP of caller +// |----------------------| | | | frame::java_abi |<- new SP of caller // | L0 aka P0 | | | ======================== / FP of new frame // | : : | | | | | (aligned) // | : Pn |<- unext. SP + metadata | | ijava_state | // | : | | | | | // | Lm | | | |----------------------| // |----------------------| v | | P0 | -// | frame::abi_minframe |<- SP / unextended SP | | : | +// | frame::java_abi |<- SP / unextended SP | | : | // ======================== | | Pi |<- unextended SP + metadata // | |----------------------| -// | Growth | v | frame::abi_minframe |<- unextended SP / SP of new frame +// | Growth | v | frame::java_abi |<- unextended SP / SP of new frame // v v --- ======================== (not yet aligned(1)) // // @@ -415,13 +415,13 @@ inline frame ThawBase::new_entry_frame() { // | : | | | | : : | // | Pn |<- unextended SP overlap | | : Pn |<- unextended SP // |----------------------| + metadata_words_at_top | | | : | + metadata_words_at_top -// | frame::abi_minframe |<- unextended SP v | | : | (unaligned) +// | frame::java_abi |<- unextended SP v | | : | (unaligned) // ======================== / SP of new frame --- | | : | of caller // (not yet aligned(1)) | | Lm | // `fsize` |----------------------| // overlap = stack_argsize(hf) + padding| Opt. Align. Padding | // + frame::metadata_words_at_top | |----------------------| -// | | frame::abi_minframe |<- new SP of caller +// | | frame::java_abi |<- new SP of caller // | ======================== / FP of new frame // | | | (aligned) // | Growth | | | ijava_state | @@ -431,7 +431,7 @@ inline frame ThawBase::new_entry_frame() { // | | : | // | | Pi |<- unextended SP // | |----------------------| + metadata_words_at_top -// v | frame::abi_minframe |<- unextended SP / SP of new frame +// v | frame::java_abi |<- unextended SP / SP of new frame // --- ======================== (not yet aligned(1)) // // @@ -459,20 +459,20 @@ inline frame ThawBase::new_entry_frame() { // g l |----------------------| | | // i e | Stack Args | | | // n r | (if any) | |----------------------| -// a |----------------------| | frame::abi_minframe | -// l | frame::abi_minframe |<- unext. SP / SP | (unused) |<- unal.unext.SP +// a |----------------------| | frame::java_abi | +// l | frame::java_abi |<- unext. SP / SP | (unused) |<- unal.unext.SP // - - - ======================== - - - - - - - - - - |----------------------|- - - - - - - - - - - - - - - - - - - - - - - - - - - - // N | | | Opt. Align. Padding | // e | | |----------------------| // w |----------------------| | Stack Args | -// | frame::abi_minframe |<- unext. SP / SP | (if any) | +// | frame::java_abi |<- unext. SP / SP | (if any) | // F ======================== |----------------------| -// r | frame::abi_minframe |<- caller's SP +// r | frame::java_abi |<- caller's SP // a ======================== / new frame's FP // m | | (aligned) // e | | // |----------------------| -// | frame::abi_minframe |<- unext. SP / SP +// | frame::java_abi |<- unext. SP / SP // ======================== // // If the new frame is at the bottom just above the ContinuationEntry frame then the stackargs @@ -507,10 +507,8 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& frame f(frame_sp, hf.pc(), frame_sp, fp); // we need to set the locals so that the caller of new_stack_frame() can call // ContinuationHelper::InterpretedFrame::frame_bottom - intptr_t offset = *hf.addr_at(ijava_idx(locals)) + padding; - assert((int)offset == hf.interpreter_frame_method()->max_locals() + frame::metadata_words_at_top + padding - 1, ""); - // set relativized locals - *f.addr_at(ijava_idx(locals)) = offset; + // copy relativized locals from the heap frame + *f.addr_at(ijava_idx(locals)) = *hf.addr_at(ijava_idx(locals)); return f; } else { @@ -549,12 +547,6 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c derelativize_one(vfp, ijava_idx(top_frame_sp)); } -inline void ThawBase::set_interpreter_frame_bottom(const frame& f, intptr_t* bottom) { - // set relativized locals - // This line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197 - *f.addr_at(ijava_idx(locals)) = (bottom - 1) - f.fp(); -} - inline void ThawBase::patch_pd(frame& f, const frame& caller) { patch_callee_link(caller, caller.fp()); // Prevent assertion if f gets deoptimized right away before it's fully initialized @@ -582,8 +574,8 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) { // - Note that unextended SP < SP // is possible on ppc. // -// | Minimal ABI | | Minimal ABI | | Minimal ABI | -// | (frame::abi_minframe)| | (frame::abi_minframe)| | (frame::abi_minframe)| +// | | | | | | +// | (frame::java_abi) | | (frame::java_abi) | | (frame::java_abi) | // | 4 words | | 4 words | | 4 words | // | Caller's SP |<- FP of caller | Caller's SP |<- FP of caller | Caller's SP |<- FP of caller // ======================== (aligned) ======================== ======================== @@ -599,13 +591,13 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) { // | Reserved Expr. Stack | |----------------------| |----------------------| // | | | Opt. Alignm. Padding | | Opt. Alignm. Padding | // | |<- ConstMethod |----------------------| |----------------------| -// |----------------------| ::_max_stack | Minimal ABI | | Minimal ABI | -// | Opt. Alignm. Padding | | (frame::abi_minframe)| | (frame::abi_minframe)| +// |----------------------| ::_max_stack | | | | +// | Opt. Alignm. Padding | | (frame::java_abi) | | (frame::java_abi) | // |----------------------| | 4 words | | 4 words | // | Large ABI | | Caller's SP |<- new SP of caller | Caller's SP |<- SP of caller / // | for C++ calls | ======================== (aligned) ======================== FP of callee -// | (frame::abi_reg_args)| | frame:: | (aligned) -// | | | ijava_state | +// | (frame:: | | frame:: | (aligned) +// | native_abi_reg_args)| | ijava_state | // | | | | // | | |----------------------| // | | | | @@ -621,8 +613,8 @@ inline void ThawBase::patch_pd(frame& f, const frame& caller) { // --------------------> ------------------------> |----------------------| // (ABI, expressions, locals) | Large ABI | // | for C++ calls | -// | (frame::abi_reg_args)| -// | | +// | (frame:: | +// | native_abi_reg_args)| // | Growth | | | // v v | | // | | diff --git a/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp b/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp index f2a1ef828c9..3fb56b6703b 100644 --- a/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/continuationHelper_ppc.inline.hpp @@ -107,7 +107,7 @@ inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) { } // | Minimal ABI | -// | (frame::abi_minframe)| +// | (frame::java_abi) | // | 4 words | // | Caller's SP |<- FP of f's caller // |======================| @@ -124,7 +124,7 @@ inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) { // | SP alignment (opt.) | // |----------------------| // | Minimal ABI | -// | (frame::abi_minframe)| +// | (frame::java_abi) | // | 4 words | // | Caller's SP |<- SP of f's caller / FP of f // |======================| @@ -145,7 +145,7 @@ inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) { // | SP alignment (opt.) | // |----------------------| // | Minimal ABI | -// | (frame::abi_minframe)| +// | (frame::java_abi) | // | 4 words | // | Caller's SP |<- SP of f / FP of f's callee // |======================| diff --git a/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp b/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp index 13679cf6669..0a4c2c31086 100644 --- a/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp +++ b/src/hotspot/cpu/ppc/downcallLinker_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2020 SAP SE. All rights reserved. - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,8 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { Unimplemented(); return nullptr; } diff --git a/src/hotspot/cpu/ppc/foreignGlobals_ppc.cpp b/src/hotspot/cpu/ppc/foreignGlobals_ppc.cpp index b685023656d..cf9904cfae5 100644 --- a/src/hotspot/cpu/ppc/foreignGlobals_ppc.cpp +++ b/src/hotspot/cpu/ppc/foreignGlobals_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 SAP SE. All rights reserved. + * Copyright (c) 2020, 2023, SAP SE. All rights reserved. * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,6 +29,10 @@ class MacroAssembler; +bool ForeignGlobals::is_foreign_linker_supported() { + return false; +} + // Stubbed out, implement later const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) { Unimplemented(); diff --git a/src/hotspot/cpu/ppc/frame_ppc.cpp b/src/hotspot/cpu/ppc/frame_ppc.cpp index 0657da07deb..90fcbad8f27 100644 --- a/src/hotspot/cpu/ppc/frame_ppc.cpp +++ b/src/hotspot/cpu/ppc/frame_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2022 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,7 +117,7 @@ bool frame::safe_for_sender(JavaThread *thread) { return false; } - abi_minframe* sender_abi = (abi_minframe*) fp; + common_abi* sender_abi = (common_abi*) fp; intptr_t* sender_sp = (intptr_t*) fp; address sender_pc = (address) sender_abi->lr;; @@ -291,7 +291,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const { if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) { return false; } - int min_frame_slots = (abi_minframe_size + ijava_state_size) / sizeof(intptr_t); + int min_frame_slots = (parent_ijava_frame_abi_size + ijava_state_size) / sizeof(intptr_t); if (fp() - min_frame_slots < sp()) { return false; } diff --git a/src/hotspot/cpu/ppc/frame_ppc.hpp b/src/hotspot/cpu/ppc/frame_ppc.hpp index 76a5d27a7a7..a4ec15d5aa0 100644 --- a/src/hotspot/cpu/ppc/frame_ppc.hpp +++ b/src/hotspot/cpu/ppc/frame_ppc.hpp @@ -62,16 +62,16 @@ // ... // spill slot for FR // - // ABI_48: + // ABI_MINFRAME: // 0 caller's SP // 8 space for condition register (CR) for next call // 16 space for link register (LR) for next call - // 24 reserved - // 32 reserved + // 24 reserved (ABI_ELFv2 only) + // 32 reserved (ABI_ELFv2 only) // 40 space for TOC (=R2) register for next call // // ABI_REG_ARGS: - // 0 [ABI_48] + // 0 [ABI_MINFRAME] // 48 CARG_1: spill slot for outgoing arg 1. used by next callee. // ... ... // 104 CARG_8: spill slot for outgoing arg 8. used by next callee. @@ -82,11 +82,15 @@ // C frame layout static const int alignment_in_bytes = 16; - // ABI_MINFRAME: - struct abi_minframe { + // Common ABI. On top of all frames, C and Java + struct common_abi { uint64_t callers_sp; - uint64_t cr; //_16 + uint64_t cr; uint64_t lr; + }; + + // ABI_MINFRAME. Used for native C frames. + struct native_abi_minframe : common_abi { #if !defined(ABI_ELFv2) uint64_t reserved1; //_16 uint64_t reserved2; @@ -96,11 +100,7 @@ // aligned to frame::alignment_in_bytes (16) }; - enum { - abi_minframe_size = sizeof(abi_minframe) - }; - - struct abi_reg_args : abi_minframe { + struct native_abi_reg_args : native_abi_minframe { uint64_t carg_1; uint64_t carg_2; //_16 uint64_t carg_3; @@ -113,13 +113,14 @@ }; enum { - abi_reg_args_size = sizeof(abi_reg_args) + native_abi_minframe_size = sizeof(native_abi_minframe), + native_abi_reg_args_size = sizeof(native_abi_reg_args) }; #define _abi0(_component) \ - (offset_of(frame::abi_reg_args, _component)) + (offset_of(frame::native_abi_reg_args, _component)) - struct abi_reg_args_spill : abi_reg_args { + struct native_abi_reg_args_spill : native_abi_reg_args { // additional spill slots uint64_t spill_ret; uint64_t spill_fret; //_16 @@ -127,11 +128,11 @@ }; enum { - abi_reg_args_spill_size = sizeof(abi_reg_args_spill) + native_abi_reg_args_spill_size = sizeof(native_abi_reg_args_spill) }; - #define _abi_reg_args_spill(_component) \ - (offset_of(frame::abi_reg_args_spill, _component)) + #define _native_abi_reg_args_spill(_component) \ + (offset_of(frame::native_abi_reg_args_spill, _component)) // non-volatile GPRs: @@ -186,6 +187,10 @@ // Frame layout for the Java template interpreter on PPC64. // + // We differnetiate between TOP and PARENT frames. + // TOP frames allow for calling native C code. + // A TOP frame is trimmed to a PARENT frame when calling a Java method. + // // In these figures the stack grows upwards, while memory grows // downwards. Square brackets denote regions possibly larger than // single 64 bit slots. @@ -227,20 +232,23 @@ // [outgoing arguments] // [ENTRY_FRAME_LOCALS] - struct parent_ijava_frame_abi : abi_minframe { + // ABI for every Java frame, compiled and interpreted + struct java_abi : common_abi { + uint64_t toc; }; - enum { - parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi) + struct parent_ijava_frame_abi : java_abi { }; #define _parent_ijava_frame_abi(_component) \ (offset_of(frame::parent_ijava_frame_abi, _component)) - struct top_ijava_frame_abi : abi_reg_args { + struct top_ijava_frame_abi : native_abi_reg_args { }; enum { + java_abi_size = sizeof(java_abi), + parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi), top_ijava_frame_abi_size = sizeof(top_ijava_frame_abi) }; @@ -318,18 +326,10 @@ // [in_preserve] added / removed by prolog / epilog // - // JIT_ABI (TOP and PARENT) + // For JIT frames we don't differentiate between TOP and PARENT frames. + // Runtime calls go through stubs which push a new frame. - struct jit_abi { - uint64_t callers_sp; - uint64_t cr; - uint64_t lr; - uint64_t toc; - // Nothing to add here! - // NOT ALIGNED to frame::alignment_in_bytes (16). - }; - - struct jit_out_preserve : jit_abi { + struct jit_out_preserve : java_abi { // Nothing to add here! }; @@ -390,8 +390,8 @@ void mark_not_fully_initialized() const { DEBUG_ONLY(own_abi()->callers_sp = NOT_FULLY_INITIALIZED;) } // Accessors for ABIs - inline abi_minframe* own_abi() const { return (abi_minframe*) _sp; } - inline abi_minframe* callers_abi() const { return (abi_minframe*) _fp; } + inline common_abi* own_abi() const { return (common_abi*) _sp; } + inline common_abi* callers_abi() const { return (common_abi*) _fp; } private: @@ -439,14 +439,14 @@ // normal return address is 1 bundle past PC pc_return_offset = 0, // size, in words, of frame metadata (e.g. pc and link) - metadata_words = sizeof(abi_minframe) >> LogBytesPerWord, + metadata_words = sizeof(java_abi) >> LogBytesPerWord, // size, in words, of metadata at frame bottom, i.e. it is not part of the // caller/callee overlap metadata_words_at_bottom = 0, // size, in words, of frame metadata at the frame top, i.e. it is located // between a callee frame and its stack arguments, where it is part // of the caller/callee overlap - metadata_words_at_top = sizeof(abi_minframe) >> LogBytesPerWord, + metadata_words_at_top = sizeof(java_abi) >> LogBytesPerWord, // size, in words, of frame metadata at the frame top that needs // to be reserved for callee functions in the runtime frame_alignment = 16, diff --git a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp index ac988dcad1b..cd2fd355bbb 100644 --- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2018, 2021 SAP SE. All rights reserved. + * Copyright (c) 2018, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm int spill_slots = 3; if (preserve1 != noreg) { spill_slots++; } if (preserve2 != noreg) { spill_slots++; } - const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); + const int frame_size = align_up(frame::native_abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); Label filtered; // Is marking active? @@ -98,7 +98,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register preserve) { int spill_slots = (preserve != noreg) ? 1 : 0; - const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); + const int frame_size = align_up(frame::native_abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); __ save_LR_CR(R0); __ push_frame(frame_size, R0); diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp index 64c32337e1a..84042505089 100644 --- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, 2022 SAP SE. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -411,7 +411,7 @@ class ZSaveLiveRegisters { const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord; _frame_size = align_up(register_save_size, frame::alignment_in_bytes) - + frame::abi_reg_args_size; + + frame::native_abi_reg_args_size; __ save_LR_CR(R0); __ push_frame(_frame_size, R0); diff --git a/src/hotspot/cpu/ppc/globals_ppc.hpp b/src/hotspot/cpu/ppc/globals_ppc.hpp index c33aee3c125..303f06eec55 100644 --- a/src/hotspot/cpu/ppc/globals_ppc.hpp +++ b/src/hotspot/cpu/ppc/globals_ppc.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2020 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,7 @@ define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); -define_pd_global(bool, VMContinuations, true BIG_ENDIAN_ONLY(&& false)); +define_pd_global(bool, VMContinuations, true); // Use large code-entry alignment. define_pd_global(uintx, CodeCacheSegmentSize, 128); diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index be1611739c5..8dc0b627d93 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -1171,7 +1171,7 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R // to meet the abi scratch requirements. // The max_stack pointer will get restored by means of the GR_Lmax_stack local in // the return entry of the interpreter. - addi(Rscratch2, R15_esp, Interpreter::stackElementSize - frame::abi_reg_args_size); + addi(Rscratch2, R15_esp, Interpreter::stackElementSize - frame::top_ijava_frame_abi_size); clrrdi(Rscratch2, Rscratch2, exact_log2(frame::alignment_in_bytes)); // round towards smaller address resize_frame_absolute(Rscratch2, Rscratch2, R0); @@ -2186,7 +2186,7 @@ void InterpreterMacroAssembler::save_interpreter_state(Register scratch) { void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool bcp_and_mdx_only, bool restore_top_frame_sp) { ld_ptr(scratch, _abi0(callers_sp), R1_SP); // Load frame pointer. if (restore_top_frame_sp) { - // After thawing the top frame of a continuation we reach here with frame::abi_minframe. + // After thawing the top frame of a continuation we reach here with frame::java_abi. // therefore we have to restore top_frame_sp before the assertion below. assert(!bcp_and_mdx_only, "chose other registers"); Register tfsp = R18_locals; @@ -2211,7 +2211,7 @@ void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool { Label Lok; subf(R0, R1_SP, scratch); - cmpdi(CCR0, R0, frame::abi_reg_args_size + frame::ijava_state_size); + cmpdi(CCR0, R0, frame::top_ijava_frame_abi_size + frame::ijava_state_size); bge(CCR0, Lok); stop("frame too small (restore istate)"); bind(Lok); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index a27c208eafe..3ca2ed3ff69 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2022 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -730,7 +730,7 @@ void MacroAssembler::clobber_carg_stack_slots(Register tmp) { li(tmp, magic_number); for (int m = 0; m <= 7; m++) { - std(tmp, frame::abi_minframe_size + m * 8, R1_SP); + std(tmp, frame::native_abi_minframe_size + m * 8, R1_SP); } } @@ -976,16 +976,16 @@ void MacroAssembler::push_frame(unsigned int bytes, Register tmp) { } } -// Push a frame of size `bytes' plus abi_reg_args on top. +// Push a frame of size `bytes' plus native_abi_reg_args on top. void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) { - push_frame(bytes + frame::abi_reg_args_size, tmp); + push_frame(bytes + frame::native_abi_reg_args_size, tmp); } // Setup up a new C frame with a spill area for non-volatile GPRs and // additional space for local variables. void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp) { - push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp); + push_frame(bytes + frame::native_abi_reg_args_size + frame::spill_nonvolatiles_size, tmp); } // Pop current C frame. diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp index 732d35b33a2..cedc764fdbd 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2021 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -316,7 +316,7 @@ class MacroAssembler: public Assembler { // Push a frame of size `bytes'. No abi space provided. void push_frame(unsigned int bytes, Register tmp); - // Push a frame of size `bytes' plus abi_reg_args on top. + // Push a frame of size `bytes' plus native_abi_reg_args on top. void push_frame_reg_args(unsigned int bytes, Register tmp); // Setup up a new C frame with a spill area for non-volatile GPRs and additional diff --git a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp index eee918e7bb0..923eb44cf17 100644 --- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp +++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2022 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -506,11 +506,11 @@ void trace_method_handle_stub(const char* adaptername, ResourceMark rm; LogStream ls(lt); ls.print_cr("Registers:"); - const int abi_offset = frame::abi_reg_args_size / 8; + const int abi_offset = frame::native_abi_reg_args_size / 8; for (int i = R3->encoding(); i <= R12->encoding(); i++) { Register r = as_Register(i); int count = i - R3->encoding(); - // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_reg_args_size)). + // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, native_abi_reg_args_size)). ls.print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]); if ((count + 1) % 4 == 0) { ls.cr(); diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index bc7f6a3afe6..461c2c161a4 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -3799,7 +3799,7 @@ frame %{ // out_preserve_stack_slots for calls to C. Supports the var-args // backing area for register parms. // - varargs_C_out_slots_killed(((frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size)); + varargs_C_out_slots_killed(((frame::native_abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size)); // The after-PROLOG location of the return address. Location of // return address specifies a type (REG or STACK) and a number diff --git a/src/hotspot/cpu/ppc/runtime_ppc.cpp b/src/hotspot/cpu/ppc/runtime_ppc.cpp index 7f2d3e627eb..8c3bfd4f37b 100644 --- a/src/hotspot/cpu/ppc/runtime_ppc.cpp +++ b/src/hotspot/cpu/ppc/runtime_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2018 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,7 +77,7 @@ void OptoRuntime::generate_exception_blob() { address start = __ pc(); - int frame_size_in_bytes = frame::abi_reg_args_size; + int frame_size_in_bytes = frame::native_abi_reg_args_size; OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); // Exception pc is 'return address' for stack walker. diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 71842f33dfd..9cc2e988815 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2022 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -272,7 +272,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble : 0; const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) - + frame::abi_reg_args_size; + + frame::native_abi_reg_args_size; *out_frame_size_in_bytes = frame_size_in_bytes; const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); @@ -790,7 +790,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt, int i; VMReg reg; // Leave room for C-compatible ABI_REG_ARGS. - int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; + int stk = (frame::native_abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; int arg = 0; int freg = 0; @@ -1951,7 +1951,7 @@ static void gen_continuation_yield(MacroAssembler* masm, int& compiled_entry_offset) { Register tmp = R10_ARG8; - const int framesize_bytes = (int)align_up((int)frame::abi_reg_args_size, frame::alignment_in_bytes); + const int framesize_bytes = (int)align_up((int)frame::native_abi_reg_args_size, frame::alignment_in_bytes); framesize_words = framesize_bytes / wordSize; address start = __ pc(); @@ -2480,7 +2480,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // disallows any pending_exception. // Save argument registers and leave room for C-compatible ABI_REG_ARGS. - int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); + int frame_size = frame::native_abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes); __ mr(R11_scratch1, R1_SP); RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); @@ -2950,7 +2950,7 @@ void SharedRuntime::generate_deopt_blob() { OopMapSet *oop_maps = new OopMapSet(); // size of ABI112 plus spill slots for R3_RET and F1_RET. - const int frame_size_in_bytes = frame::abi_reg_args_spill_size; + const int frame_size_in_bytes = frame::native_abi_reg_args_spill_size; const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. @@ -3119,8 +3119,8 @@ void SharedRuntime::generate_deopt_blob() { // ...). // Spill live volatile registers since we'll do a call. - __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); - __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); + __ std( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); + __ stfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); // Let the unpacker layout information in the skeletal frames just // allocated. @@ -3132,8 +3132,8 @@ void SharedRuntime::generate_deopt_blob() { __ reset_last_Java_frame(); // Restore the volatiles saved above. - __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP); - __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP); + __ ld( R3_RET, _native_abi_reg_args_spill(spill_ret), R1_SP); + __ lfd(F1_RET, _native_abi_reg_args_spill(spill_fret), R1_SP); // Pop the unpack frame. __ pop_frame(); @@ -3179,7 +3179,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { Register r_return_pc = R27_tmp7; OopMapSet* oop_maps = new OopMapSet(); - int frame_size_in_bytes = frame::abi_reg_args_size; + int frame_size_in_bytes = frame::native_abi_reg_args_size; OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); // stack: (deoptee, optional i2c, caller_of_deoptee, ...). diff --git a/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp b/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp index e4489660f46..f8d60ed9f93 100644 --- a/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/stackChunkFrameStream_ppc.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,14 +54,14 @@ inline frame StackChunkFrameStream::to_frame() const { template inline address StackChunkFrameStream::get_pc() const { assert(!is_done(), ""); - return (address)((frame::abi_minframe*) _sp)->lr; + return (address)((frame::common_abi*) _sp)->lr; } template inline intptr_t* StackChunkFrameStream::fp() const { // See FreezeBase::patch_pd() and frame::setup() assert((frame_kind == ChunkFrames::Mixed && is_interpreted()), ""); - intptr_t* fp_addr = (intptr_t*)&((frame::abi_minframe*)_sp)->callers_sp; + intptr_t* fp_addr = (intptr_t*)&((frame::common_abi*)_sp)->callers_sp; assert(*(intptr_t**)fp_addr != nullptr, ""); // derelativize return fp_addr + *fp_addr; diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index 170c77890b3..a20baaee5c8 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2022 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,8 +91,8 @@ class StubGenerator: public StubCodeGenerator { address start = __ function_entry(); // some sanity checks - assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned"); - assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned"); + assert((sizeof(frame::native_abi_minframe) % 16) == 0, "unaligned"); + assert((sizeof(frame::native_abi_reg_args) % 16) == 0, "unaligned"); assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned"); assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned"); assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned"); @@ -540,7 +540,7 @@ class StubGenerator: public StubCodeGenerator { MacroAssembler* masm = new MacroAssembler(&code); OopMapSet* oop_maps = new OopMapSet(); - int frame_size_in_bytes = frame::abi_reg_args_size; + int frame_size_in_bytes = frame::native_abi_reg_args_size; OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); address start = __ pc(); @@ -4553,7 +4553,7 @@ class StubGenerator: public StubCodeGenerator { __ mtctr(tmp1); __ bctr(); __ bind(thaw_success); - __ addi(R3_RET, R3_RET, frame::abi_reg_args_size); // Large abi required for C++ calls. + __ addi(R3_RET, R3_RET, frame::native_abi_reg_args_size); // Large abi required for C++ calls. __ neg(R3_RET, R3_RET); // align down resulting in a smaller negative offset __ clrrdi(R3_RET, R3_RET, exact_log2(frame::alignment_in_bytes)); @@ -4576,8 +4576,11 @@ class StubGenerator: public StubCodeGenerator { Register ex_pc = R17_tos; // nonvolatile register __ ld(ex_pc, _abi0(lr), R1_SP); // LR __ mr(nvtmp, R3_RET); // save return value containing the exception oop + // The thawed top frame has got a frame::java_abi. This is not sufficient for the runtime call. + __ push_frame_reg_args(0, tmp1); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, ex_pc); __ mtlr(R3_RET); // the exception handler + __ pop_frame(); // See OptoRuntime::generate_exception_blob for register arguments __ mr(R3_ARG1, nvtmp); // exception oop __ mr(R4_ARG2, ex_pc); // exception pc @@ -4617,7 +4620,7 @@ class StubGenerator: public StubCodeGenerator { Register tmp1 = R10_ARG8; Register tmp2 = R9_ARG7; - int framesize = frame::abi_reg_args_size / VMRegImpl::stack_slot_size; + int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; address start = __ pc(); __ mflr(tmp1); __ std(tmp1, _abi0(lr), R1_SP); // save return pc diff --git a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp index 42c179b05d1..d8747a1c981 100644 --- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp @@ -978,10 +978,10 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist } // Compute top frame size. - __ addi(Rtop_frame_size, Rtop_frame_size, frame::abi_reg_args_size + frame::ijava_state_size); + __ addi(Rtop_frame_size, Rtop_frame_size, frame::top_ijava_frame_abi_size + frame::ijava_state_size); // Cut back area between esp and max_stack. - __ addi(Rparent_frame_resize, Rparent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize); + __ addi(Rparent_frame_resize, Rparent_frame_resize, frame::parent_ijava_frame_abi_size - Interpreter::stackElementSize); __ round_to(Rtop_frame_size, frame::alignment_in_bytes); __ round_to(Rparent_frame_resize, frame::alignment_in_bytes); diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.cpp b/src/hotspot/cpu/ppc/vm_version_ppc.cpp index 8db4adeb9ae..33736a9c12c 100644 --- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp +++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2020 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -380,7 +380,7 @@ void VM_Version::initialize() { // Adjust RTM (Restricted Transactional Memory) flags. if (UseRTMLocking) { // If CPU or OS do not support RTM: - if (PowerArchitecturePPC64 < 8) { + if (PowerArchitecturePPC64 < 8 || PowerArchitecturePPC64 > 9) { vm_exit_during_initialization("RTM instructions are not available on this CPU."); } @@ -673,7 +673,7 @@ void VM_Version::determine_features() { // We don't want to change this property, as user code might depend on it. // So the tests can not check on subversion 3.30, and we only enable RTM // with AIX 7.2. - if (has_lqarx()) { // POWER8 or above + if (has_lqarx() && !has_brw()) { // POWER8 or POWER9 if (os::Aix::os_version() >= 0x07020000) { // At least AIX 7.2. _features |= rtm_m; } diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp index 35f36a1f1c0..d1669cd3737 100644 --- a/src/hotspot/cpu/riscv/assembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp @@ -1488,6 +1488,16 @@ enum VectorMask { #undef INSN +#define INSN(NAME, op, funct3, vm, funct6) \ + void NAME(VectorRegister Vd, VectorRegister Vs2, Register Rs1) { \ + patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ + } + + // Vector Integer Merge Instructions + INSN(vmerge_vxm, 0b1010111, 0b100, 0b0, 0b010111); + +#undef INSN + #define INSN(NAME, op, funct3, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs2, FloatRegister Rs1, VectorMask vm = unmasked) { \ patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \ @@ -1542,6 +1552,17 @@ enum VectorMask { #undef INSN +#define INSN(NAME, op, funct3, vm, funct6) \ + void NAME(VectorRegister Vd, VectorRegister Vs2, int32_t imm) { \ + guarantee(is_simm5(imm), "imm is invalid"); \ + patch_VArith(op, Vd, funct3, (uint32_t)(imm & 0x1f), Vs2, vm, funct6); \ + } + + // Vector Integer Merge Instructions + INSN(vmerge_vim, 0b1010111, 0b011, 0b0, 0b010111); + +#undef INSN + #define INSN(NAME, op, funct3, vm, funct6) \ void NAME(VectorRegister Vd, VectorRegister Vs2, VectorRegister Vs1) { \ patch_VArith(op, Vd, funct3, Vs1->raw_encoding(), Vs2, vm, funct6); \ @@ -1560,6 +1581,9 @@ enum VectorMask { INSN(vmnand_mm, 0b1010111, 0b010, 0b1, 0b011101); INSN(vmand_mm, 0b1010111, 0b010, 0b1, 0b011001); + // Vector Integer Merge Instructions + INSN(vmerge_vvm, 0b1010111, 0b000, 0b0, 0b010111); + #undef INSN #define INSN(NAME, op, funct3, Vs2, vm, funct6) \ diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index ba8f221e291..01d99db782c 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -1304,7 +1304,7 @@ void C2_MacroAssembler::enc_cmove(int cmpFlag, Register op1, Register op2, Regis } // Set dst to NaN if any NaN input. -void C2_MacroAssembler::minmax_FD(FloatRegister dst, FloatRegister src1, FloatRegister src2, +void C2_MacroAssembler::minmax_fp(FloatRegister dst, FloatRegister src1, FloatRegister src2, bool is_double, bool is_min) { assert_different_registers(dst, src1, src2); @@ -1616,7 +1616,7 @@ void C2_MacroAssembler::string_indexof_char_v(Register str1, Register cnt1, } // Set dst to NaN if any NaN input. -void C2_MacroAssembler::minmax_FD_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, +void C2_MacroAssembler::minmax_fp_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, bool is_double, bool is_min, int length_in_bytes) { assert_different_registers(dst, src1, src2); @@ -1632,7 +1632,7 @@ void C2_MacroAssembler::minmax_FD_v(VectorRegister dst, VectorRegister src1, Vec } // Set dst to NaN if any NaN input. -void C2_MacroAssembler::reduce_minmax_FD_v(FloatRegister dst, +void C2_MacroAssembler::reduce_minmax_fp_v(FloatRegister dst, FloatRegister src1, VectorRegister src2, VectorRegister tmp1, VectorRegister tmp2, bool is_double, bool is_min, int length_in_bytes) { @@ -1722,3 +1722,64 @@ void C2_MacroAssembler::rvv_vsetvli(BasicType bt, int length_in_bytes, Register } } } + +void C2_MacroAssembler::compare_integral_v(VectorRegister vd, BasicType bt, int length_in_bytes, + VectorRegister src1, VectorRegister src2, int cond, VectorMask vm) { + assert(is_integral_type(bt), "unsupported element type"); + assert(vm == Assembler::v0_t ? vd != v0 : true, "should be different registers"); + rvv_vsetvli(bt, length_in_bytes); + vmclr_m(vd); + switch (cond) { + case BoolTest::eq: vmseq_vv(vd, src1, src2, vm); break; + case BoolTest::ne: vmsne_vv(vd, src1, src2, vm); break; + case BoolTest::le: vmsle_vv(vd, src1, src2, vm); break; + case BoolTest::ge: vmsge_vv(vd, src1, src2, vm); break; + case BoolTest::lt: vmslt_vv(vd, src1, src2, vm); break; + case BoolTest::gt: vmsgt_vv(vd, src1, src2, vm); break; + default: + assert(false, "unsupported compare condition"); + ShouldNotReachHere(); + } +} + +void C2_MacroAssembler::compare_floating_point_v(VectorRegister vd, BasicType bt, int length_in_bytes, + VectorRegister src1, VectorRegister src2, + VectorRegister tmp1, VectorRegister tmp2, + VectorRegister vmask, int cond, VectorMask vm) { + assert(is_floating_point_type(bt), "unsupported element type"); + assert(vd != v0, "should be different registers"); + assert(vm == Assembler::v0_t ? vmask != v0 : true, "vmask should not be v0"); + rvv_vsetvli(bt, length_in_bytes); + // Check vector elements of src1 and src2 for quiet or signaling NaN. + vfclass_v(tmp1, src1); + vfclass_v(tmp2, src2); + vsrl_vi(tmp1, tmp1, 8); + vsrl_vi(tmp2, tmp2, 8); + vmseq_vx(tmp1, tmp1, zr); + vmseq_vx(tmp2, tmp2, zr); + if (vm == Assembler::v0_t) { + vmand_mm(tmp2, tmp1, tmp2); + if (cond == BoolTest::ne) { + vmandn_mm(tmp1, vmask, tmp2); + } + vmand_mm(v0, vmask, tmp2); + } else { + vmand_mm(v0, tmp1, tmp2); + if (cond == BoolTest::ne) { + vmnot_m(tmp1, v0); + } + } + vmclr_m(vd); + switch (cond) { + case BoolTest::eq: vmfeq_vv(vd, src1, src2, Assembler::v0_t); break; + case BoolTest::ne: vmfne_vv(vd, src1, src2, Assembler::v0_t); + vmor_mm(vd, vd, tmp1); break; + case BoolTest::le: vmfle_vv(vd, src1, src2, Assembler::v0_t); break; + case BoolTest::ge: vmfge_vv(vd, src1, src2, Assembler::v0_t); break; + case BoolTest::lt: vmflt_vv(vd, src1, src2, Assembler::v0_t); break; + case BoolTest::gt: vmfgt_vv(vd, src1, src2, Assembler::v0_t); break; + default: + assert(false, "unsupported compare condition"); + ShouldNotReachHere(); + } +} \ No newline at end of file diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp index 94a5068fd9a..30aac05f40b 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp @@ -137,13 +137,15 @@ vl1re8_v(v, t0); } - void spill_copy_vector_stack_to_stack(int src_offset, int dst_offset, int vec_reg_size_in_bytes) { - assert(vec_reg_size_in_bytes % 16 == 0, "unexpected vector reg size"); - unspill(v0, src_offset); - spill(v0, dst_offset); + void spill_copy_vector_stack_to_stack(int src_offset, int dst_offset, int vector_length_in_bytes) { + assert(vector_length_in_bytes % 16 == 0, "unexpected vector reg size"); + for (int i = 0; i < vector_length_in_bytes / 8; i++) { + unspill(t0, true, src_offset + (i * 8)); + spill(t0, true, dst_offset + (i * 8)); + } } - void minmax_FD(FloatRegister dst, + void minmax_fp(FloatRegister dst, FloatRegister src1, FloatRegister src2, bool is_double, bool is_min); @@ -183,11 +185,11 @@ Register tmp1, Register tmp2, bool isL); - void minmax_FD_v(VectorRegister dst, + void minmax_fp_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, bool is_double, bool is_min, int length_in_bytes); - void reduce_minmax_FD_v(FloatRegister dst, + void reduce_minmax_fp_v(FloatRegister dst, FloatRegister src1, VectorRegister src2, VectorRegister tmp1, VectorRegister tmp2, bool is_double, bool is_min, int length_in_bytes); @@ -198,4 +200,34 @@ void rvv_vsetvli(BasicType bt, int length_in_bytes, Register tmp = t0); + void compare_integral_v(VectorRegister dst, BasicType bt, int length_in_bytes, + VectorRegister src1, VectorRegister src2, int cond, VectorMask vm = Assembler::unmasked); + + void compare_floating_point_v(VectorRegister dst, BasicType bt, int length_in_bytes, + VectorRegister src1, VectorRegister src2, VectorRegister tmp1, VectorRegister tmp2, + VectorRegister vmask, int cond, VectorMask vm = Assembler::unmasked); + + // In Matcher::scalable_predicate_reg_slots, + // we assume each predicate register is one-eighth of the size of + // scalable vector register, one mask bit per vector byte. + void spill_vmask(VectorRegister v, int offset){ + rvv_vsetvli(T_BYTE, MaxVectorSize >> 3); + add(t0, sp, offset); + vse8_v(v, t0); + } + + void unspill_vmask(VectorRegister v, int offset){ + rvv_vsetvli(T_BYTE, MaxVectorSize >> 3); + add(t0, sp, offset); + vle8_v(v, t0); + } + + void spill_copy_vmask_stack_to_stack(int src_offset, int dst_offset, int vector_length_in_bytes) { + assert(vector_length_in_bytes % 4 == 0, "unexpected vector mask reg size"); + for (int i = 0; i < vector_length_in_bytes / 4; i++) { + unspill(t0, false, src_offset + (i * 4)); + spill(t0, false, dst_offset + (i * 4)); + } + } + #endif // CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/continuationFreezeThaw_riscv.inline.hpp b/src/hotspot/cpu/riscv/continuationFreezeThaw_riscv.inline.hpp index b4b9b32119b..71c22ad263c 100644 --- a/src/hotspot/cpu/riscv/continuationFreezeThaw_riscv.inline.hpp +++ b/src/hotspot/cpu/riscv/continuationFreezeThaw_riscv.inline.hpp @@ -83,11 +83,11 @@ template frame FreezeBase::new_heap_frame(frame& f, frame& calle if (FKind::interpreted) { assert((intptr_t*)f.at(frame::interpreter_frame_last_sp_offset) == nullptr || f.unextended_sp() == (intptr_t*)f.at(frame::interpreter_frame_last_sp_offset), ""); - int locals = f.interpreter_frame_method()->max_locals(); + intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset); // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty(); - fp = caller.unextended_sp() - (locals + frame::sender_sp_offset) + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0); + fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0); sp = fp - (f.fp() - f.unextended_sp()); assert(sp <= fp, ""); assert(fp <= caller.unextended_sp(), ""); @@ -96,7 +96,7 @@ template frame FreezeBase::new_heap_frame(frame& f, frame& calle assert(_cont.tail()->is_in_chunk(sp), ""); frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */); - *hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + locals - 1; + *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset; return hf; } else { // We need to re-read fp out of the frame because it may be an oop and we might have @@ -144,13 +144,11 @@ inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, co // On RISCV, we may insert padding between the locals and the rest of the frame // (see TemplateInterpreterGenerator::generate_normal_entry, and AbstractInterpreter::layout_activation) - // so we compute locals "from scratch" rather than relativizing the value in the stack frame, which might include padding, - // since we don't freeze the padding word (see recurse_freeze_interpreted_frame). + // because we freeze the padding word (see recurse_freeze_interpreted_frame) in order to keep the same relativized + // locals value, we don't need to change the locals value here. // at(frame::interpreter_frame_last_sp_offset) can be null at safepoint preempts *hf.addr_at(frame::interpreter_frame_last_sp_offset) = hf.unextended_sp() - hf.fp(); - // this line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197 - *hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + f.interpreter_frame_method()->max_locals() - 1; relativize_one(vfp, hfp, frame::interpreter_frame_initial_sp_offset); // == block_top == block_bottom relativize_one(vfp, hfp, frame::interpreter_frame_extended_sp_offset); @@ -225,11 +223,9 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& const int locals = hf.interpreter_frame_method()->max_locals(); intptr_t* frame_sp = caller.unextended_sp() - fsize; intptr_t* fp = frame_sp + (hf.fp() - heap_sp); - int padding = 0; if ((intptr_t)fp % frame::frame_alignment != 0) { fp--; frame_sp--; - padding++; log_develop_trace(continuations)("Adding internal interpreted frame alignment"); } DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);) @@ -238,10 +234,8 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& frame f(frame_sp, frame_sp, fp, hf.pc()); // we need to set the locals so that the caller of new_stack_frame() can call // ContinuationHelper::InterpretedFrame::frame_bottom - intptr_t offset = *hf.addr_at(frame::interpreter_frame_locals_offset); - assert((int)offset == frame::sender_sp_offset + locals - 1, ""); - // set relativized locals - *f.addr_at(frame::interpreter_frame_locals_offset) = padding + offset; + // copy relativized locals from the heap frame + *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset); assert((intptr_t)f.fp() % frame::frame_alignment == 0, ""); return f; } else { @@ -303,10 +297,4 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c derelativize_one(vfp, frame::interpreter_frame_extended_sp_offset); } -inline void ThawBase::set_interpreter_frame_bottom(const frame& f, intptr_t* bottom) { - // set relativized locals - // This line can be changed into an assert when we have fixed the "frame padding problem", see JDK-8300197 - *f.addr_at(frame::interpreter_frame_locals_offset) = (bottom - 1) - f.fp(); -} - #endif // CPU_RISCV_CONTINUATIONFREEZETHAW_RISCV_INLINE_HPP diff --git a/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp b/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp index 994402bc2b4..6f0e4e97246 100644 --- a/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp +++ b/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp @@ -48,6 +48,7 @@ class DowncallStubGenerator : public StubCodeGenerator { bool _needs_return_buffer; int _captured_state_mask; + bool _needs_transition; int _frame_complete; int _frame_size_slots; @@ -61,7 +62,8 @@ public: const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) + int captured_state_mask, + bool needs_transition) : StubCodeGenerator(buffer, PrintMethodHandleStubs), _signature(signature), _num_args(num_args), @@ -71,6 +73,7 @@ public: _output_registers(output_registers), _needs_return_buffer(needs_return_buffer), _captured_state_mask(captured_state_mask), + _needs_transition(needs_transition), _frame_complete(0), _frame_size_slots(0), _oop_maps(nullptr) { @@ -101,13 +104,15 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { int code_size = native_invoker_code_base_size + (num_args * native_invoker_size_per_arg); int locs_size = 1; // must be non-zero CodeBuffer code("nep_invoker_blob", code_size, locs_size); DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi, input_registers, output_registers, - needs_return_buffer, captured_state_mask); + needs_return_buffer, captured_state_mask, + needs_transition); g.generate(); code.log_section_sizes("nep_invoker_blob"); @@ -160,7 +165,7 @@ void DowncallStubGenerator::generate() { assert(_abi._shadow_space_bytes == 0, "not expecting shadow space on RISCV64"); allocated_frame_size += arg_shuffle.out_arg_bytes(); - bool should_save_return_value = !_needs_return_buffer; + bool should_save_return_value = !_needs_return_buffer && _needs_transition; RegSpiller out_reg_spiller(_output_registers); int spill_offset = -1; @@ -190,7 +195,7 @@ void DowncallStubGenerator::generate() { _frame_size_slots += framesize + (allocated_frame_size >> LogBytesPerInt); assert(is_even(_frame_size_slots / 2), "sp not 16-byte aligned"); - _oop_maps = new OopMapSet(); + _oop_maps = _needs_transition ? new OopMapSet() : nullptr; address start = __ pc(); __ enter(); @@ -200,17 +205,19 @@ void DowncallStubGenerator::generate() { _frame_complete = __ pc() - start; // frame build complete. - __ block_comment("{ thread java2native"); - address the_pc = __ pc(); - __ set_last_Java_frame(sp, fp, the_pc, t0); - OopMap* map = new OopMap(_frame_size_slots, 0); - _oop_maps->add_gc_map(the_pc - start, map); + if (_needs_transition) { + __ block_comment("{ thread java2native"); + address the_pc = __ pc(); + __ set_last_Java_frame(sp, fp, the_pc, t0); + OopMap* map = new OopMap(_frame_size_slots, 0); + _oop_maps->add_gc_map(the_pc - start, map); - // State transition - __ mv(t0, _thread_in_native); - __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); - __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); - __ block_comment("} thread java2native"); + // State transition + __ mv(t0, _thread_in_native); + __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); + __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); + __ block_comment("} thread java2native"); + } __ block_comment("{ argument shuffle"); arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes, locs); @@ -260,80 +267,85 @@ void DowncallStubGenerator::generate() { ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ thread native2java"); - __ mv(t0, _thread_in_native_trans); - __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); - - // Force this write out before the read below - if (!UseSystemMemoryBarrier) { - __ membar(MacroAssembler::AnyAny); - } - Label L_after_safepoint_poll; Label L_safepoint_poll_slow_path; - __ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */); - __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset())); - __ bnez(t0, L_safepoint_poll_slow_path); - - __ bind(L_after_safepoint_poll); - - __ mv(t0, _thread_in_Java); - __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); - __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); - - __ block_comment("reguard stack check"); Label L_reguard; Label L_after_reguard; - __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset())); - __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled); - __ beq(t0, t1, L_reguard); - __ bind(L_after_reguard); + if (_needs_transition) { + __ block_comment("{ thread native2java"); + __ mv(t0, _thread_in_native_trans); + __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); - __ reset_last_Java_frame(true); - __ block_comment("} thread native2java"); + // Force this write out before the read below + if (!UseSystemMemoryBarrier) { + __ membar(MacroAssembler::AnyAny); + } + + __ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */); + __ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset())); + __ bnez(t0, L_safepoint_poll_slow_path); + + __ bind(L_after_safepoint_poll); + + // change thread state + __ mv(t0, _thread_in_Java); + __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); + __ sw(t0, Address(xthread, JavaThread::thread_state_offset())); + + __ block_comment("reguard stack check"); + __ lbu(t0, Address(xthread, JavaThread::stack_guard_state_offset())); + __ mv(t1, StackOverflow::stack_guard_yellow_reserved_disabled); + __ beq(t0, t1, L_reguard); + __ bind(L_after_reguard); + + __ reset_last_Java_frame(true); + __ block_comment("} thread native2java"); + } __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(); ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ L_safepoint_poll_slow_path"); - __ bind(L_safepoint_poll_slow_path); + if (_needs_transition) { + __ block_comment("{ L_safepoint_poll_slow_path"); + __ bind(L_safepoint_poll_slow_path); - if (should_save_return_value) { - // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); - } + if (should_save_return_value) { + // Need to save the native result registers around any runtime calls. + out_reg_spiller.generate_spill(_masm, spill_offset); + } - __ mv(c_rarg0, xthread); - assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); - __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); + __ mv(c_rarg0, xthread); + assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); + __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); - if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); - } - __ j(L_after_safepoint_poll); - __ block_comment("} L_safepoint_poll_slow_path"); + if (should_save_return_value) { + out_reg_spiller.generate_fill(_masm, spill_offset); + } + __ j(L_after_safepoint_poll); + __ block_comment("} L_safepoint_poll_slow_path"); ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ L_reguard"); - __ bind(L_reguard); + __ block_comment("{ L_reguard"); + __ bind(L_reguard); - if (should_save_return_value) { - // Need to save the native result registers around any runtime calls. - out_reg_spiller.generate_spill(_masm, spill_offset); + if (should_save_return_value) { + // Need to save the native result registers around any runtime calls. + out_reg_spiller.generate_spill(_masm, spill_offset); + } + + __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); + + if (should_save_return_value) { + out_reg_spiller.generate_fill(_masm, spill_offset); + } + + __ j(L_after_reguard); + __ block_comment("} L_reguard"); } - __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); - - if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_offset); - } - - __ j(L_after_reguard); - __ block_comment("} L_reguard"); - ////////////////////////////////////////////////////////////////////////////// __ flush(); diff --git a/src/hotspot/cpu/riscv/foreignGlobals_riscv.cpp b/src/hotspot/cpu/riscv/foreignGlobals_riscv.cpp index 44cff28b119..7ecfe0c38d3 100644 --- a/src/hotspot/cpu/riscv/foreignGlobals_riscv.cpp +++ b/src/hotspot/cpu/riscv/foreignGlobals_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -44,6 +44,10 @@ bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const { || _float_additional_volatile_registers.contains(reg); } +bool ForeignGlobals::is_foreign_linker_supported() { + return true; +} + const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) { oop abi_oop = JNIHandles::resolve_non_null(jabi); ABIDescriptor abi; diff --git a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp index 070ec8e6338..2ad44400687 100644 --- a/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/cardTableBarrierSetAssembler_riscv.cpp @@ -49,7 +49,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob if (UseCondCardMark) { Label L_already_dirty; - __ membar(MacroAssembler::StoreLoad); __ lbu(t1, Address(tmp)); __ beqz(t1, L_already_dirty); __ sb(zr, Address(tmp)); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index 18a03eff7ee..c60d1a5ad66 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -1264,7 +1264,7 @@ public: vmnand_mm(vd, vs, vs); } - inline void vncvt_x_x_w(VectorRegister vd, VectorRegister vs, VectorMask vm) { + inline void vncvt_x_x_w(VectorRegister vd, VectorRegister vs, VectorMask vm = unmasked) { vnsrl_wx(vd, vs, x0, vm); } @@ -1276,6 +1276,45 @@ public: vfsgnjn_vv(vd, vs, vs); } + inline void vmsgt_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) { + vmslt_vv(vd, vs1, vs2, vm); + } + + inline void vmsgtu_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) { + vmsltu_vv(vd, vs1, vs2, vm); + } + + inline void vmsge_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) { + vmsle_vv(vd, vs1, vs2, vm); + } + + inline void vmsgeu_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) { + vmsleu_vv(vd, vs1, vs2, vm); + } + + inline void vmfgt_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) { + vmflt_vv(vd, vs1, vs2, vm); + } + + inline void vmfge_vv(VectorRegister vd, VectorRegister vs2, VectorRegister vs1, VectorMask vm = unmasked) { + vmfle_vv(vd, vs1, vs2, vm); + } + + // Copy mask register + inline void vmmv_m(VectorRegister vd, VectorRegister vs) { + vmand_mm(vd, vs, vs); + } + + // Clear mask register + inline void vmclr_m(VectorRegister vd) { + vmxor_mm(vd, vd, vd); + } + + // Set mask register + inline void vmset_m(VectorRegister vd) { + vmxnor_mm(vd, vd, vd); + } + static const int zero_words_block_size; void cast_primitive_type(BasicType type, Register Rt) { diff --git a/src/hotspot/cpu/riscv/matcher_riscv.hpp b/src/hotspot/cpu/riscv/matcher_riscv.hpp index eeee72f3910..a2b38ee4a48 100644 --- a/src/hotspot/cpu/riscv/matcher_riscv.hpp +++ b/src/hotspot/cpu/riscv/matcher_riscv.hpp @@ -149,7 +149,7 @@ // Some microarchitectures have mask registers used on vectors static const bool has_predicated_vectors(void) { - return false; + return UseRVV; } // true means we have fast l2f conversion diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 708defd68e7..88dd95a1b8a 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -830,7 +830,8 @@ reg_class double_reg( F31, F31_H ); -// Class for all RVV vector registers +// Class for RVV vector registers +// Note: v0, v30 and v31 are used as mask registers. reg_class vectora_reg( V1, V1_H, V1_J, V1_K, V2, V2_H, V2_J, V2_K, @@ -860,9 +861,7 @@ reg_class vectora_reg( V26, V26_H, V26_J, V26_K, V27, V27_H, V27_J, V27_K, V28, V28_H, V28_J, V28_K, - V29, V29_H, V29_J, V29_K, - V30, V30_H, V30_J, V30_K, - V31, V31_H, V31_J, V31_K + V29, V29_H, V29_J, V29_K ); // Class for 64 bit register f0 @@ -912,6 +911,23 @@ reg_class v5_reg( // class for condition codes reg_class reg_flags(RFLAGS); + +// Class for RVV v0 mask register +// https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#53-vector-masking +// The mask value used to control execution of a masked vector +// instruction is always supplied by vector register v0. +reg_class vmask_reg_v0 ( + V0 +); + +// Class for RVV mask registers +// We need two more vmask registers to do the vector mask logical ops, +// so define v30, v31 as mask register too. +reg_class vmask_reg ( + V0, + V30, + V31 +); %} //----------DEFINITION BLOCK--------------------------------------------------- @@ -1522,7 +1538,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register"); - if (src_hi != OptoReg::Bad) { + if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) { assert((src_lo & 1) == 0 && src_lo + 1 == src_hi && (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi, "expected aligned-adjacent pairs"); @@ -1558,6 +1574,25 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo } else { ShouldNotReachHere(); } + } else if (bottom_type()->isa_vectmask() && cbuf) { + C2_MacroAssembler _masm(cbuf); + int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8; + if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) { + // stack to stack + __ spill_copy_vmask_stack_to_stack(src_offset, dst_offset, + vmask_size_in_bytes); + } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) { + // vmask to stack + __ spill_vmask(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo)); + } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) { + // stack to vmask + __ unspill_vmask(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo)); + } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) { + // vmask to vmask + __ vmv1r_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo])); + } else { + ShouldNotReachHere(); + } } } else if (cbuf != NULL) { C2_MacroAssembler _masm(cbuf); @@ -1642,7 +1677,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo } else { st->print("%s", Matcher::regName[dst_lo]); } - if (bottom_type()->isa_vect() != NULL) { + if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) { int vsize = 0; if (ideal_reg() == Op_VecA) { vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8; @@ -1650,6 +1685,10 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo ShouldNotReachHere(); } st->print("\t# vector spill size = %d", vsize); + } else if (ideal_reg() == Op_RegVectMask) { + assert(Matcher::supports_scalable_vector(), "bad register type for spill"); + int vsize = Matcher::scalable_predicate_reg_slots() * 32; + st->print("\t# vmask spill size = %d", vsize); } else { st->print("\t# spill size = %d", is64 ? 64 : 32); } @@ -1863,7 +1902,59 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType } const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { - return false; + if (!UseRVV) { + return false; + } + switch (opcode) { + case Op_AddVB: + case Op_AddVS: + case Op_AddVI: + case Op_AddVL: + case Op_AddVF: + case Op_AddVD: + case Op_SubVB: + case Op_SubVS: + case Op_SubVI: + case Op_SubVL: + case Op_SubVF: + case Op_SubVD: + case Op_MulVB: + case Op_MulVS: + case Op_MulVI: + case Op_MulVL: + case Op_MulVF: + case Op_MulVD: + case Op_DivVF: + case Op_DivVD: + case Op_VectorLoadMask: + case Op_VectorMaskCmp: + case Op_AndVMask: + case Op_XorVMask: + case Op_OrVMask: + case Op_RShiftVB: + case Op_RShiftVS: + case Op_RShiftVI: + case Op_RShiftVL: + case Op_LShiftVB: + case Op_LShiftVS: + case Op_LShiftVI: + case Op_LShiftVL: + case Op_URShiftVB: + case Op_URShiftVS: + case Op_URShiftVI: + case Op_URShiftVL: + case Op_VectorBlend: + break; + case Op_LoadVector: + opcode = Op_LoadVectorMasked; + break; + case Op_StoreVector: + opcode = Op_StoreVectorMasked; + break; + default: + return false; + } + return match_rule_supported_vector(opcode, vlen, bt); } const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { @@ -1875,11 +1966,11 @@ const bool Matcher::vector_needs_load_shuffle(BasicType elem_bt, int vlen) { } const RegMask* Matcher::predicate_reg_mask(void) { - return NULL; + return &_VMASK_REG_mask; } const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) { - return NULL; + return new TypeVectMask(elemTy, length); } // Vector calling convention not yet implemented. @@ -3556,6 +3647,28 @@ operand vReg_V5() interface(REG_INTER); %} +operand vRegMask() +%{ + constraint(ALLOC_IN_RC(vmask_reg)); + match(RegVectMask); + match(vRegMask_V0); + op_cost(0); + format %{ %} + interface(REG_INTER); +%} + +// The mask value used to control execution of a masked +// vector instruction is always supplied by vector register v0. +operand vRegMask_V0() +%{ + constraint(ALLOC_IN_RC(vmask_reg_v0)); + match(RegVectMask); + match(vRegMask); + op_cost(0); + format %{ %} + interface(REG_INTER); +%} + // Java Thread Register operand javaThread_RegP(iRegP reg) %{ @@ -7271,7 +7384,7 @@ instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{ format %{ "maxF $dst, $src1, $src2" %} ins_encode %{ - __ minmax_FD(as_FloatRegister($dst$$reg), + __ minmax_fp(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), false /* is_double */, false /* is_min */); %} @@ -7287,7 +7400,7 @@ instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{ format %{ "minF $dst, $src1, $src2" %} ins_encode %{ - __ minmax_FD(as_FloatRegister($dst$$reg), + __ minmax_fp(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), false /* is_double */, true /* is_min */); %} @@ -7303,7 +7416,7 @@ instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{ format %{ "maxD $dst, $src1, $src2" %} ins_encode %{ - __ minmax_FD(as_FloatRegister($dst$$reg), + __ minmax_fp(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), true /* is_double */, false /* is_min */); %} @@ -7319,7 +7432,7 @@ instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{ format %{ "minD $dst, $src1, $src2" %} ins_encode %{ - __ minmax_FD(as_FloatRegister($dst$$reg), + __ minmax_fp(as_FloatRegister($dst$$reg), as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg), true /* is_double */, true /* is_min */); %} diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad index 700cb18eafb..240baa1b577 100644 --- a/src/hotspot/cpu/riscv/riscv_v.ad +++ b/src/hotspot/cpu/riscv/riscv_v.ad @@ -35,14 +35,18 @@ source_hpp %{ source %{ static void loadStore(C2_MacroAssembler masm, bool is_store, - VectorRegister reg, BasicType bt, Register base, int length_in_bytes) { + VectorRegister reg, BasicType bt, Register base, + int length_in_bytes, Assembler::VectorMask vm = Assembler::unmasked) { Assembler::SEW sew = Assembler::elemtype_to_sew(bt); masm.rvv_vsetvli(bt, length_in_bytes); if (is_store) { - masm.vsex_v(reg, base, sew); + masm.vsex_v(reg, base, sew, vm); } else { - masm.vlex_v(reg, base, sew); + if (vm == Assembler::v0_t) { + masm.vxor_vv(reg, reg, reg); + } + masm.vlex_v(reg, base, sew, vm); } } @@ -66,7 +70,6 @@ source %{ // Vector API specific case Op_LoadVectorGather: case Op_StoreVectorScatter: - case Op_VectorBlend: case Op_VectorCast: case Op_VectorCastB2X: case Op_VectorCastD2X: @@ -75,12 +78,9 @@ source %{ case Op_VectorCastL2X: case Op_VectorCastS2X: case Op_VectorInsert: - case Op_VectorLoadMask: case Op_VectorLoadShuffle: - case Op_VectorMaskCmp: case Op_VectorRearrange: case Op_VectorReinterpret: - case Op_VectorStoreMask: case Op_VectorTest: case Op_PopCountVI: case Op_PopCountVL: @@ -123,6 +123,112 @@ instruct storeV(vReg src, vmemA mem) %{ ins_pipe(pipe_slow); %} +// vector load mask + +instruct vloadmask(vRegMask dst, vReg src) %{ + match(Set dst (VectorLoadMask src)); + format %{ "vloadmask $dst, $src" %} + ins_encode %{ + __ rvv_vsetvli(T_BOOLEAN, Matcher::vector_length(this)); + __ vmsne_vx(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), zr); + %} + ins_pipe(pipe_slow); +%} + +instruct vloadmask_masked(vRegMask dst, vReg src, vRegMask_V0 v0) %{ + match(Set dst (VectorLoadMask src v0)); + format %{ "vloadmask_masked $dst, $src, $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_BOOLEAN, Matcher::vector_length(this)); + __ vmsne_vx(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), zr, Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +// vector store mask + +instruct vstoremask(vReg dst, vRegMask_V0 v0, immI size) %{ + match(Set dst (VectorStoreMask v0 size)); + format %{ "vstoremask $dst, V0" %} + ins_encode %{ + __ rvv_vsetvli(T_BOOLEAN, Matcher::vector_length(this)); + __ vmv_v_x(as_VectorRegister($dst$$reg), zr); + __ vmerge_vim(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), 1); + %} + ins_pipe(pipe_slow); +%} + +// vector mask compare + +instruct vmaskcmp(vRegMask dst, vReg src1, vReg src2, immI cond) %{ + predicate(Matcher::vector_element_basic_type(n) == T_BYTE || + Matcher::vector_element_basic_type(n) == T_SHORT || + Matcher::vector_element_basic_type(n) == T_INT || + Matcher::vector_element_basic_type(n) == T_LONG); + match(Set dst (VectorMaskCmp (Binary src1 src2) cond)); + format %{ "vmaskcmp $dst, $src1, $src2, $cond" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint length_in_bytes = Matcher::vector_length_in_bytes(this); + __ compare_integral_v(as_VectorRegister($dst$$reg), bt, length_in_bytes, as_VectorRegister($src1$$reg), + as_VectorRegister($src2$$reg), (int)($cond$$constant)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmaskcmp_masked(vRegMask dst, vReg src1, vReg src2, immI cond, vRegMask_V0 v0) %{ + predicate(Matcher::vector_element_basic_type(n) == T_BYTE || + Matcher::vector_element_basic_type(n) == T_SHORT || + Matcher::vector_element_basic_type(n) == T_INT || + Matcher::vector_element_basic_type(n) == T_LONG); + match(Set dst (VectorMaskCmp (Binary src1 src2) (Binary cond v0))); + effect(TEMP_DEF dst); + format %{ "vmaskcmp_masked $dst, $src1, $src2, $cond, $v0" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint length_in_bytes = Matcher::vector_length_in_bytes(this); + __ compare_integral_v(as_VectorRegister($dst$$reg), bt, length_in_bytes, as_VectorRegister($src1$$reg), + as_VectorRegister($src2$$reg), (int)($cond$$constant), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +// vector mask float compare + +instruct vmaskcmp_fp(vRegMask dst, vReg src1, vReg src2, immI cond, vRegMask_V0 v0, vReg tmp1, vReg tmp2) %{ + predicate(Matcher::vector_element_basic_type(n) == T_FLOAT || + Matcher::vector_element_basic_type(n) == T_DOUBLE); + match(Set dst (VectorMaskCmp (Binary src1 src2) cond)); + effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP v0); + format %{ "vmaskcmp_fp $dst, $src1, $src2, $cond\t# KILL $tmp1, $tmp2" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint length_in_bytes = Matcher::vector_length_in_bytes(this); + __ compare_floating_point_v(as_VectorRegister($dst$$reg), bt, length_in_bytes, + as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), + as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), + as_VectorRegister($v0$$reg), (int)($cond$$constant)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmaskcmp_fp_masked(vRegMask dst, vReg src1, vReg src2, immI cond, vRegMask vmask, vReg tmp1, vReg tmp2, vRegMask_V0 v0) %{ + predicate(Matcher::vector_element_basic_type(n) == T_FLOAT || + Matcher::vector_element_basic_type(n) == T_DOUBLE); + match(Set dst (VectorMaskCmp (Binary src1 src2) (Binary cond vmask))); + effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP v0); + format %{ "vmaskcmp_fp_masked $dst, $src1, $src2, $cond, $vmask\t# KILL $tmp1, $tmp2, $v0" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint length_in_bytes = Matcher::vector_length_in_bytes(this); + __ compare_floating_point_v(as_VectorRegister($dst$$reg), bt, length_in_bytes, + as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), + as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), + as_VectorRegister($vmask$$reg), (int)($cond$$constant), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + // vector abs instruct vabsB(vReg dst, vReg src, vReg tmp) %{ @@ -283,6 +389,40 @@ instruct vaddD(vReg dst, vReg src1, vReg src2) %{ ins_pipe(pipe_slow); %} +// vector add - predicated + +instruct vadd_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst_src1 (AddVB (Binary dst_src1 src2) v0)); + match(Set dst_src1 (AddVS (Binary dst_src1 src2) v0)); + match(Set dst_src1 (AddVI (Binary dst_src1 src2) v0)); + match(Set dst_src1 (AddVL (Binary dst_src1 src2) v0)); + ins_cost(VEC_COST); + format %{ "vadd.vv $dst_src1, $src2, $v0\t#@vadd_masked" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vadd_vv(as_VectorRegister($dst_src1$$reg), + as_VectorRegister($dst_src1$$reg), + as_VectorRegister($src2$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vadd_fp_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst_src1 (AddVF (Binary dst_src1 src2) v0)); + match(Set dst_src1 (AddVD (Binary dst_src1 src2) v0)); + ins_cost(VEC_COST); + format %{ "vfadd.vv $dst_src1, $src2, $v0\t#@vadd_fp_masked" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vfadd_vv(as_VectorRegister($dst_src1$$reg), + as_VectorRegister($dst_src1$$reg), + as_VectorRegister($src2$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + // vector and instruct vand(vReg dst, vReg src1, vReg src2) %{ @@ -290,7 +430,8 @@ instruct vand(vReg dst, vReg src1, vReg src2) %{ ins_cost(VEC_COST); format %{ "vand.vv $dst, $src1, $src2\t#@vand" %} ins_encode %{ - __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); __ vand_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg)); @@ -305,7 +446,8 @@ instruct vor(vReg dst, vReg src1, vReg src2) %{ ins_cost(VEC_COST); format %{ "vor.vv $dst, $src1, $src2\t#@vor" %} ins_encode %{ - __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); __ vor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg)); @@ -320,7 +462,8 @@ instruct vxor(vReg dst, vReg src1, vReg src2) %{ ins_cost(VEC_COST); format %{ "vxor.vv $dst, $src1, $src2\t#@vxor" %} ins_encode %{ - __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg)); @@ -356,6 +499,23 @@ instruct vdivD(vReg dst, vReg src1, vReg src2) %{ ins_pipe(pipe_slow); %} +// vector float div - predicated + +instruct vdiv_fp_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst_src1 (DivVF (Binary dst_src1 src2) v0)); + match(Set dst_src1 (DivVD (Binary dst_src1 src2) v0)); + ins_cost(VEC_COST); + format %{ "vfdiv.vv $dst_src1, $src2, $v0\t#@vdiv_fp_masked" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vfdiv_vv(as_VectorRegister($dst_src1$$reg), + as_VectorRegister($dst_src1$$reg), + as_VectorRegister($src2$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + // vector integer max/min instruct vmax(vReg dst, vReg src1, vReg src2) %{ @@ -397,7 +557,7 @@ instruct vmaxF(vReg dst, vReg src1, vReg src2) %{ ins_cost(VEC_COST); format %{ "vmaxF $dst, $src1, $src2\t#@vmaxF" %} ins_encode %{ - __ minmax_FD_v(as_VectorRegister($dst$$reg), + __ minmax_fp_v(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), false /* is_double */, false /* is_min */, Matcher::vector_length_in_bytes(this)); %} @@ -411,7 +571,7 @@ instruct vmaxD(vReg dst, vReg src1, vReg src2) %{ ins_cost(VEC_COST); format %{ "vmaxD $dst, $src1, $src2\t#@vmaxD" %} ins_encode %{ - __ minmax_FD_v(as_VectorRegister($dst$$reg), + __ minmax_fp_v(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), true /* is_double */, false /* is_min */, Matcher::vector_length_in_bytes(this)); %} @@ -425,7 +585,7 @@ instruct vminF(vReg dst, vReg src1, vReg src2) %{ ins_cost(VEC_COST); format %{ "vminF $dst, $src1, $src2\t#@vminF" %} ins_encode %{ - __ minmax_FD_v(as_VectorRegister($dst$$reg), + __ minmax_fp_v(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), false /* is_double */, true /* is_min */, Matcher::vector_length_in_bytes(this)); %} @@ -439,7 +599,7 @@ instruct vminD(vReg dst, vReg src1, vReg src2) %{ ins_cost(VEC_COST); format %{ "vminD $dst, $src1, $src2\t#@vminD" %} ins_encode %{ - __ minmax_FD_v(as_VectorRegister($dst$$reg), + __ minmax_fp_v(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), as_VectorRegister($src2$$reg), true /* is_double */, true /* is_min */, Matcher::vector_length_in_bytes(this)); %} @@ -756,6 +916,38 @@ instruct vmulD(vReg dst, vReg src1, vReg src2) %{ ins_pipe(pipe_slow); %} +// vector mul - predicated + +instruct vmul_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst_src1 (MulVB (Binary dst_src1 src2) v0)); + match(Set dst_src1 (MulVS (Binary dst_src1 src2) v0)); + match(Set dst_src1 (MulVI (Binary dst_src1 src2) v0)); + match(Set dst_src1 (MulVL (Binary dst_src1 src2) v0)); + ins_cost(VEC_COST); + format %{ "vmul.vv $dst_src1, $src2, $v0\t#@vmul_masked" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vmul_vv(as_VectorRegister($dst_src1$$reg), as_VectorRegister($dst_src1$$reg), + as_VectorRegister($src2$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vmul_fp_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst_src1 (MulVF (Binary dst_src1 src2) v0)); + match(Set dst_src1 (MulVD (Binary dst_src1 src2) v0)); + ins_cost(VEC_COST); + format %{ "vmul.vv $dst_src1, $src2, $v0\t#@vmul_fp_masked" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vfmul_vv(as_VectorRegister($dst_src1$$reg), as_VectorRegister($dst_src1$$reg), + as_VectorRegister($src2$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + // vector neg instruct vnegI(vReg dst, vReg src) %{ @@ -1000,7 +1192,7 @@ instruct vreduce_maxI(iRegINoSp dst, iRegIorL2I src1, vReg src2, vReg tmp) %{ match(Set dst (MaxReductionV src1 src2)); ins_cost(VEC_COST); effect(TEMP tmp); - format %{ "vreduce_maxI $dst, $src1, $src2, $tmp" %} + format %{ "vreduce_maxI $dst, $src1, $src2\t# KILL $tmp" %} ins_encode %{ BasicType bt = Matcher::vector_element_basic_type(this, $src2); __ rvv_reduce_integral($dst$$Register, as_VectorRegister($tmp$$reg), @@ -1015,7 +1207,7 @@ instruct vreduce_maxL(iRegLNoSp dst, iRegL src1, vReg src2, vReg tmp) %{ match(Set dst (MaxReductionV src1 src2)); ins_cost(VEC_COST); effect(TEMP tmp); - format %{ "vreduce_maxL $dst, $src1, $src2, $tmp" %} + format %{ "vreduce_maxL $dst, $src1, $src2\t# KILL $tmp" %} ins_encode %{ BasicType bt = Matcher::vector_element_basic_type(this, $src2); __ rvv_reduce_integral($dst$$Register, as_VectorRegister($tmp$$reg), @@ -1034,7 +1226,7 @@ instruct vreduce_minI(iRegINoSp dst, iRegIorL2I src1, vReg src2, vReg tmp) %{ match(Set dst (MinReductionV src1 src2)); ins_cost(VEC_COST); effect(TEMP tmp); - format %{ "vreduce_minI $dst, $src1, $src2, $tmp" %} + format %{ "vreduce_minI $dst, $src1, $src2\t# KILL $tmp" %} ins_encode %{ BasicType bt = Matcher::vector_element_basic_type(this, $src2); __ rvv_reduce_integral($dst$$Register, as_VectorRegister($tmp$$reg), @@ -1049,7 +1241,7 @@ instruct vreduce_minL(iRegLNoSp dst, iRegL src1, vReg src2, vReg tmp) %{ match(Set dst (MinReductionV src1 src2)); ins_cost(VEC_COST); effect(TEMP tmp); - format %{ "vreduce_minL $dst, $src1, $src2, $tmp" %} + format %{ "vreduce_minL $dst, $src1, $src2\t# KILL $tmp" %} ins_encode %{ BasicType bt = Matcher::vector_element_basic_type(this, $src2); __ rvv_reduce_integral($dst$$Register, as_VectorRegister($tmp$$reg), @@ -1068,7 +1260,7 @@ instruct vreduce_maxF(fRegF dst, fRegF src1, vReg src2, vReg tmp1, vReg tmp2) %{ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); format %{ "reduce_maxF $dst, $src1, $src2, $tmp1, $tmp2" %} ins_encode %{ - __ reduce_minmax_FD_v($dst$$FloatRegister, + __ reduce_minmax_fp_v($dst$$FloatRegister, $src1$$FloatRegister, as_VectorRegister($src2$$reg), as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), false /* is_double */, false /* is_min */, Matcher::vector_length_in_bytes(this, $src2)); @@ -1083,7 +1275,7 @@ instruct vreduce_maxD(fRegD dst, fRegD src1, vReg src2, vReg tmp1, vReg tmp2) %{ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); format %{ "reduce_maxD $dst, $src1, $src2, $tmp1, $tmp2" %} ins_encode %{ - __ reduce_minmax_FD_v($dst$$FloatRegister, + __ reduce_minmax_fp_v($dst$$FloatRegister, $src1$$FloatRegister, as_VectorRegister($src2$$reg), as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), true /* is_double */, false /* is_min */, Matcher::vector_length_in_bytes(this, $src2)); @@ -1100,7 +1292,7 @@ instruct vreduce_minF(fRegF dst, fRegF src1, vReg src2, vReg tmp1, vReg tmp2) %{ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); format %{ "reduce_minF $dst, $src1, $src2, $tmp1, $tmp2" %} ins_encode %{ - __ reduce_minmax_FD_v($dst$$FloatRegister, + __ reduce_minmax_fp_v($dst$$FloatRegister, $src1$$FloatRegister, as_VectorRegister($src2$$reg), as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), false /* is_double */, true /* is_min */, Matcher::vector_length_in_bytes(this, $src2)); @@ -1115,7 +1307,7 @@ instruct vreduce_minD(fRegD dst, fRegD src1, vReg src2, vReg tmp1, vReg tmp2) %{ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2); format %{ "reduce_minD $dst, $src1, $src2, $tmp1, $tmp2" %} ins_encode %{ - __ reduce_minmax_FD_v($dst$$FloatRegister, + __ reduce_minmax_fp_v($dst$$FloatRegister, $src1$$FloatRegister, as_VectorRegister($src2$$reg), as_VectorRegister($tmp1$$reg), as_VectorRegister($tmp2$$reg), true /* is_double */, true /* is_min */, Matcher::vector_length_in_bytes(this, $src2)); @@ -1265,44 +1457,38 @@ instruct replicateD(vReg dst, fRegD src) %{ // vector shift -instruct vasrB(vReg dst, vReg src, vReg shift) %{ +instruct vasrB(vReg dst, vReg src, vReg shift, vRegMask_V0 v0) %{ match(Set dst (RShiftVB src shift)); ins_cost(VEC_COST); - effect(TEMP_DEF dst); - format %{ "vmsgtu.vi v0, $shift 7\t#@vasrB\n\t" - "vsra.vi $dst, $src, 7, Assembler::v0_t\n\t" - "vmnot.m v0, v0\n\t" - "vsra.vv $dst, $src, $shift, Assembler::v0_t" %} + effect(TEMP_DEF dst, TEMP v0); + format %{ "vasrB $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_BYTE, Matcher::vector_length_in_bytes(this)); // if shift > BitsPerByte - 1, clear the low BitsPerByte - 1 bits - __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerByte - 1); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerByte - 1); __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), BitsPerByte - 1, Assembler::v0_t); // otherwise, shift - __ vmnot_m(v0, v0); + __ vmnot_m(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg)); __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($shift$$reg), Assembler::v0_t); %} ins_pipe(pipe_slow); %} -instruct vasrS(vReg dst, vReg src, vReg shift) %{ +instruct vasrS(vReg dst, vReg src, vReg shift, vRegMask_V0 v0) %{ match(Set dst (RShiftVS src shift)); ins_cost(VEC_COST); - effect(TEMP_DEF dst); - format %{ "vmsgtu.vi v0, $shift, 15\t#@vasrS\n\t" - "vsra.vi $dst, $src, 15, Assembler::v0_t\n\t" - "vmnot.m v0, v0\n\t" - "vsra.vv $dst, $src, $shift, Assembler::v0_t" %} + effect(TEMP_DEF dst, TEMP v0); + format %{ "vasrS $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_SHORT, Matcher::vector_length_in_bytes(this)); // if shift > BitsPerShort - 1, clear the low BitsPerShort - 1 bits - __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerShort - 1); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerShort - 1); __ vsra_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), BitsPerShort - 1, Assembler::v0_t); // otherwise, shift - __ vmnot_m(v0, v0); + __ vmnot_m(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg)); __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($shift$$reg), Assembler::v0_t); %} @@ -1312,7 +1498,7 @@ instruct vasrS(vReg dst, vReg src, vReg shift) %{ instruct vasrI(vReg dst, vReg src, vReg shift) %{ match(Set dst (RShiftVI src shift)); ins_cost(VEC_COST); - format %{ "vsra.vv $dst, $src, $shift\t#@vasrI" %} + format %{ "vasrI $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_INT, Matcher::vector_length_in_bytes(this)); __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), @@ -1324,53 +1510,109 @@ instruct vasrI(vReg dst, vReg src, vReg shift) %{ instruct vasrL(vReg dst, vReg src, vReg shift) %{ match(Set dst (RShiftVL src shift)); ins_cost(VEC_COST); - format %{ "vsra.vv $dst, $src, $shift\t#@vasrL" %} + format %{ "vasrL $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); __ vsra_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), - as_VectorRegister($shift$$reg)); + as_VectorRegister($shift$$reg)); %} ins_pipe(pipe_slow); %} -instruct vlslB(vReg dst, vReg src, vReg shift) %{ +instruct vasrB_masked(vReg dst_src, vReg shift, vRegMask vmask, vRegMask_V0 v0) %{ + match(Set dst_src (RShiftVB (Binary dst_src shift) vmask)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src, TEMP v0); + format %{ "vasrB_masked $dst_src, $dst_src, $shift, $vmask\t# KILL $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_BYTE, Matcher::vector_length_in_bytes(this)); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerByte - 1); + // if shift > BitsPerByte - 1, clear the low BitsPerByte - 1 bits + __ vmerge_vim(as_VectorRegister($shift$$reg), as_VectorRegister($shift$$reg), BitsPerByte - 1); + // otherwise, shift + __ vmv1r_v(as_VectorRegister($v0$$reg), as_VectorRegister($vmask$$reg)); + __ vsra_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vasrS_masked(vReg dst_src, vReg shift, vRegMask vmask, vRegMask_V0 v0) %{ + match(Set dst_src (RShiftVS (Binary dst_src shift) vmask)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src, TEMP v0); + format %{ "vasrS_masked $dst_src, $dst_src, $shift, $vmask\t# KILL $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_SHORT, Matcher::vector_length_in_bytes(this)); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerShort - 1); + // if shift > BitsPerShort - 1, clear the low BitsPerShort - 1 bits + __ vmerge_vim(as_VectorRegister($shift$$reg), as_VectorRegister($shift$$reg), BitsPerShort - 1); + // otherwise, shift + __ vmv1r_v(as_VectorRegister($v0$$reg), as_VectorRegister($vmask$$reg)); + __ vsra_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vasrI_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst_src (RShiftVI (Binary dst_src shift) v0)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src); + format %{ "vasrI_masked $dst_src, $dst_src, $shift, $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_INT, Matcher::vector_length_in_bytes(this)); + __ vsra_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vasrL_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst_src (RShiftVL (Binary dst_src shift) v0)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src); + format %{ "vasrL_masked $dst_src, $dst_src, $shift, $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); + __ vsra_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vlslB(vReg dst, vReg src, vReg shift, vRegMask_V0 v0) %{ match(Set dst (LShiftVB src shift)); ins_cost(VEC_COST); - effect( TEMP_DEF dst); - format %{ "vmsgtu.vi v0, $shift, 7\t#@vlslB\n\t" - "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" - "vmnot.m v0, v0\n\t" - "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} + effect(TEMP_DEF dst, TEMP v0); + format %{ "vlslB $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_BYTE, Matcher::vector_length_in_bytes(this)); // if shift > BitsPerByte - 1, clear the element - __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerByte - 1); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerByte - 1); __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($src$$reg), Assembler::v0_t); // otherwise, shift - __ vmnot_m(v0, v0); + __ vmnot_m(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg)); __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($shift$$reg), Assembler::v0_t); %} ins_pipe(pipe_slow); %} -instruct vlslS(vReg dst, vReg src, vReg shift) %{ +instruct vlslS(vReg dst, vReg src, vReg shift, vRegMask_V0 v0) %{ match(Set dst (LShiftVS src shift)); ins_cost(VEC_COST); - effect(TEMP_DEF dst); - format %{ "vmsgtu.vi v0, $shift, 15\t#@vlslS\n\t" - "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" - "vmnot.m v0, v0\n\t" - "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} + effect(TEMP_DEF dst, TEMP v0); + format %{ "vlslS $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_SHORT, Matcher::vector_length_in_bytes(this)); // if shift > BitsPerShort - 1, clear the element - __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerShort - 1); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerShort - 1); __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($src$$reg), Assembler::v0_t); // otherwise, shift - __ vmnot_m(v0, v0); + __ vmnot_m(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg)); __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($shift$$reg), Assembler::v0_t); %} @@ -1380,7 +1622,7 @@ instruct vlslS(vReg dst, vReg src, vReg shift) %{ instruct vlslI(vReg dst, vReg src, vReg shift) %{ match(Set dst (LShiftVI src shift)); ins_cost(VEC_COST); - format %{ "vsll.vv $dst, $src, $shift\t#@vlslI" %} + format %{ "vlslI $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_INT, Matcher::vector_length_in_bytes(this)); __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), @@ -1392,7 +1634,7 @@ instruct vlslI(vReg dst, vReg src, vReg shift) %{ instruct vlslL(vReg dst, vReg src, vReg shift) %{ match(Set dst (LShiftVL src shift)); ins_cost(VEC_COST); - format %{ "vsll.vv $dst, $src, $shift\t# vector (D)" %} + format %{ "vlslL $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); __ vsll_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), @@ -1401,55 +1643,116 @@ instruct vlslL(vReg dst, vReg src, vReg shift) %{ ins_pipe(pipe_slow); %} -instruct vlsrB(vReg dst, vReg src, vReg shift) %{ - match(Set dst (URShiftVB src shift)); +instruct vlslB_masked(vReg dst_src, vReg shift, vRegMask vmask, vRegMask_V0 v0) %{ + match(Set dst_src (LShiftVB (Binary dst_src shift) vmask)); ins_cost(VEC_COST); - effect(TEMP_DEF dst); - format %{ "vmsgtu.vi v0, $shift, 7\t#@vlsrB\n\t" - "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" - "vmnot.m v0, v0, v0\n\t" - "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} + effect(TEMP_DEF dst_src, TEMP v0); + format %{ "vlslB_masked $dst_src, $dst_src, $shift, $vmask\t# KILL $v0" %} ins_encode %{ __ rvv_vsetvli(T_BYTE, Matcher::vector_length_in_bytes(this)); // if shift > BitsPerByte - 1, clear the element - __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerByte - 1); - __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), - as_VectorRegister($src$$reg), Assembler::v0_t); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerByte - 1); + __ vmand_mm(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg), + as_VectorRegister($vmask$$reg)); + __ vxor_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($dst_src$$reg), Assembler::v0_t); // otherwise, shift - __ vmnot_m(v0, v0); - __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), + __ vmv1r_v(as_VectorRegister($v0$$reg), as_VectorRegister($vmask$$reg)); + __ vsll_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), as_VectorRegister($shift$$reg), Assembler::v0_t); %} ins_pipe(pipe_slow); %} -instruct vlsrS(vReg dst, vReg src, vReg shift) %{ - match(Set dst (URShiftVS src shift)); +instruct vlslS_masked(vReg dst_src, vReg shift, vRegMask vmask, vRegMask_V0 v0) %{ + match(Set dst_src (LShiftVS (Binary dst_src shift) vmask)); ins_cost(VEC_COST); - effect(TEMP_DEF dst); - format %{ "vmsgtu.vi v0, $shift, 15\t#@vlsrS\n\t" - "vxor.vv $dst, $src, $src, Assembler::v0_t\n\t" - "vmnot.m v0, v0\n\t" - "vsll.vv $dst, $src, $shift, Assembler::v0_t" %} + effect(TEMP_DEF dst_src, TEMP v0); + format %{ "vlslS_masked $dst_src, $dst_src, $shift, $vmask\t# KILL $v0" %} ins_encode %{ __ rvv_vsetvli(T_SHORT, Matcher::vector_length_in_bytes(this)); // if shift > BitsPerShort - 1, clear the element - __ vmsgtu_vi(v0, as_VectorRegister($shift$$reg), BitsPerShort - 1); + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerShort - 1); + __ vmand_mm(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg), + as_VectorRegister($vmask$$reg)); + __ vxor_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($dst_src$$reg), Assembler::v0_t); + // otherwise, shift + __ vmv1r_v(as_VectorRegister($v0$$reg), as_VectorRegister($vmask$$reg)); + __ vsll_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vlslI_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst_src (LShiftVI (Binary dst_src shift) v0)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src); + format %{ "vlslI_masked $dst_src, $dst_src, $shift, $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_INT, Matcher::vector_length_in_bytes(this)); + __ vsll_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vlslL_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst_src (LShiftVL (Binary dst_src shift) v0)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src); + format %{ "vlslL_masked $dst_src, $dst_src, $shift, $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); + __ vsll_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vlsrB(vReg dst, vReg src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst (URShiftVB src shift)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst, TEMP v0); + format %{ "vlsrB $dst, $src, $shift" %} + ins_encode %{ + __ rvv_vsetvli(T_BYTE, Matcher::vector_length_in_bytes(this)); + // if shift > BitsPerByte - 1, clear the element + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerByte - 1); __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($src$$reg), Assembler::v0_t); // otherwise, shift - __ vmnot_m(v0, v0); + __ vmnot_m(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg)); __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), as_VectorRegister($shift$$reg), Assembler::v0_t); %} ins_pipe(pipe_slow); %} +instruct vlsrS(vReg dst, vReg src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst (URShiftVS src shift)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst, TEMP v0); + format %{ "vlsrS $dst, $src, $shift" %} + ins_encode %{ + __ rvv_vsetvli(T_SHORT, Matcher::vector_length_in_bytes(this)); + // if shift > BitsPerShort - 1, clear the element + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerShort - 1); + __ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), + as_VectorRegister($src$$reg), Assembler::v0_t); + // otherwise, shift + __ vmnot_m(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg)); + __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} instruct vlsrI(vReg dst, vReg src, vReg shift) %{ match(Set dst (URShiftVI src shift)); ins_cost(VEC_COST); - format %{ "vsrl.vv $dst, $src, $shift\t#@vlsrI" %} + format %{ "vlsrI $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_INT, Matcher::vector_length_in_bytes(this)); __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), @@ -1458,11 +1761,10 @@ instruct vlsrI(vReg dst, vReg src, vReg shift) %{ ins_pipe(pipe_slow); %} - instruct vlsrL(vReg dst, vReg src, vReg shift) %{ match(Set dst (URShiftVL src shift)); ins_cost(VEC_COST); - format %{ "vsrl.vv $dst, $src, $shift\t#@vlsrL" %} + format %{ "vlsrL $dst, $src, $shift" %} ins_encode %{ __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); __ vsrl_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), @@ -1471,6 +1773,74 @@ instruct vlsrL(vReg dst, vReg src, vReg shift) %{ ins_pipe(pipe_slow); %} +instruct vlsrB_masked(vReg dst_src, vReg shift, vRegMask vmask, vRegMask_V0 v0) %{ + match(Set dst_src (URShiftVB (Binary dst_src shift) vmask)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src, TEMP v0); + format %{ "vlsrB_masked $dst_src, $dst_src, $shift, $vmask\t# KILL $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_BYTE, Matcher::vector_length_in_bytes(this)); + // if shift > BitsPerByte - 1, clear the element + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerByte - 1); + __ vmand_mm(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg), + as_VectorRegister($vmask$$reg)); + __ vxor_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($dst_src$$reg), Assembler::v0_t); + // otherwise, shift + __ vmv1r_v(as_VectorRegister($v0$$reg), as_VectorRegister($vmask$$reg)); + __ vsrl_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vlsrS_masked(vReg dst_src, vReg shift, vRegMask vmask, vRegMask_V0 v0) %{ + match(Set dst_src (URShiftVS (Binary dst_src shift) vmask)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src, TEMP v0); + format %{ "vlsrS_masked $dst_src, $dst_src, $shift, $vmask\t# KILL $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_SHORT, Matcher::vector_length_in_bytes(this)); + // if shift > BitsPerShort - 1, clear the element + __ vmsgtu_vi(as_VectorRegister($v0$$reg), as_VectorRegister($shift$$reg), BitsPerShort - 1); + __ vmand_mm(as_VectorRegister($v0$$reg), as_VectorRegister($v0$$reg), + as_VectorRegister($vmask$$reg)); + __ vxor_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($dst_src$$reg), Assembler::v0_t); + // otherwise, shift + __ vmv1r_v(as_VectorRegister($v0$$reg), as_VectorRegister($vmask$$reg)); + __ vsrl_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vlsrI_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst_src (URShiftVI (Binary dst_src shift) v0)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src); + format %{ "vlsrI_masked $dst_src, $dst_src, $shift, $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_INT, Matcher::vector_length_in_bytes(this)); + __ vsrl_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vlsrL_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{ + match(Set dst_src (URShiftVL (Binary dst_src shift) v0)); + ins_cost(VEC_COST); + effect(TEMP_DEF dst_src); + format %{ "vlsrL_masked $dst_src, $dst_src, $shift, $v0" %} + ins_encode %{ + __ rvv_vsetvli(T_LONG, Matcher::vector_length_in_bytes(this)); + __ vsrl_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg), + as_VectorRegister($shift$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + instruct vasrB_imm(vReg dst, vReg src, immI shift) %{ match(Set dst (RShiftVB src (RShiftCntV shift))); ins_cost(VEC_COST); @@ -1827,13 +2197,45 @@ instruct vsubD(vReg dst, vReg src1, vReg src2) %{ ins_pipe(pipe_slow); %} +// vector sub - predicated + +instruct vsub_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst_src1 (SubVB (Binary dst_src1 src2) v0)); + match(Set dst_src1 (SubVS (Binary dst_src1 src2) v0)); + match(Set dst_src1 (SubVI (Binary dst_src1 src2) v0)); + match(Set dst_src1 (SubVL (Binary dst_src1 src2) v0)); + ins_cost(VEC_COST); + format %{ "vsub.vv $dst_src1, $src2, $v0\t#@vsub_masked" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vsub_vv(as_VectorRegister($dst_src1$$reg), as_VectorRegister($dst_src1$$reg), + as_VectorRegister($src2$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vsub_fp_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst_src1 (SubVF (Binary dst_src1 src2) v0)); + match(Set dst_src1 (SubVD (Binary dst_src1 src2) v0)); + ins_cost(VEC_COST); + format %{ "vfsub.vv $dst_src1, $src2, $v0\t#@vsub_fp_masked" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vfsub_vv(as_VectorRegister($dst_src1$$reg), as_VectorRegister($dst_src1$$reg), + as_VectorRegister($src2$$reg), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + instruct vstring_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, iRegI_R10 result, vReg_V1 v1, - vReg_V2 v2, vReg_V3 v3, rFlagsReg cr) + vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, rFlagsReg cr) %{ predicate(UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL); match(Set result (StrEquals (Binary str1 str2) cnt)); - effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP v1, TEMP v2, TEMP v3, KILL cr); + effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP v1, TEMP v2, TEMP v3, TEMP v0, KILL cr); format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %} ins_encode %{ @@ -1846,11 +2248,11 @@ instruct vstring_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, instruct vstring_equalsU(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, iRegI_R10 result, vReg_V1 v1, - vReg_V2 v2, vReg_V3 v3, rFlagsReg cr) + vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, rFlagsReg cr) %{ predicate(UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU); match(Set result (StrEquals (Binary str1 str2) cnt)); - effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP v1, TEMP v2, TEMP v3, KILL cr); + effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP v1, TEMP v2, TEMP v3, TEMP v0, KILL cr); format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsU" %} ins_encode %{ @@ -1862,11 +2264,11 @@ instruct vstring_equalsU(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt, %} instruct varray_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegP_R28 tmp, rFlagsReg cr) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, iRegP_R28 tmp, rFlagsReg cr) %{ predicate(UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); match(Set result (AryEq ary1 ary2)); - effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP v1, TEMP v2, TEMP v3, KILL cr); + effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP v1, TEMP v2, TEMP v3, TEMP v0, KILL cr); format %{ "Array Equals $ary1, ary2 -> $result\t#@array_equalsB // KILL $tmp" %} ins_encode %{ @@ -1877,11 +2279,11 @@ instruct varray_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, %} instruct varray_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegP_R28 tmp, rFlagsReg cr) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, iRegP_R28 tmp, rFlagsReg cr) %{ predicate(UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); match(Set result (AryEq ary1 ary2)); - effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP v1, TEMP v2, TEMP v3, KILL cr); + effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP v1, TEMP v2, TEMP v3, TEMP v0, KILL cr); format %{ "Array Equals $ary1, ary2 -> $result\t#@array_equalsC // KILL $tmp" %} ins_encode %{ @@ -1893,12 +2295,12 @@ instruct varray_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result, instruct vstring_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, - iRegP_R28 tmp1, iRegL_R29 tmp2) + vRegMask_V0 v0, iRegP_R28 tmp1, iRegL_R29 tmp2) %{ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU); match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, - TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); + TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5, TEMP v0); format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %} ins_encode %{ @@ -1912,12 +2314,12 @@ instruct vstring_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_ %} instruct vstring_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, - iRegP_R28 tmp1, iRegL_R29 tmp2) + vRegMask_V0 v0, iRegP_R28 tmp1, iRegL_R29 tmp2) %{ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL); match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, - TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); + TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5, TEMP v0); format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %} ins_encode %{ @@ -1931,12 +2333,12 @@ instruct vstring_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_ instruct vstring_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, - iRegP_R28 tmp1, iRegL_R29 tmp2) + vRegMask_V0 v0, iRegP_R28 tmp1, iRegL_R29 tmp2) %{ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL); match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, - TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); + TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5, TEMP v0); format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %} ins_encode %{ @@ -1949,12 +2351,12 @@ instruct vstring_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI %} instruct vstring_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2, iRegI_R10 result, vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vReg_V4 v4, vReg_V5 v5, - iRegP_R28 tmp1, iRegL_R29 tmp2) + vRegMask_V0 v0, iRegP_R28 tmp1, iRegL_R29 tmp2) %{ predicate(UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU); match(Set result(StrComp(Binary str1 cnt1)(Binary str2 cnt2))); effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, - TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5); + TEMP v1, TEMP v2, TEMP v3, TEMP v4, TEMP v5, TEMP v0); format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %} ins_encode %{ @@ -1968,11 +2370,11 @@ instruct vstring_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI // fast byte[] to char[] inflation instruct vstring_inflate(Universe dummy, iRegP_R10 src, iRegP_R11 dst, iRegI_R12 len, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, iRegLNoSp tmp) %{ predicate(UseRVV); match(Set dummy (StrInflatedCopy src (Binary dst len))); - effect(TEMP v1, TEMP v2, TEMP v3, TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len); + effect(TEMP v1, TEMP v2, TEMP v3, TEMP v0, TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len); format %{ "String Inflate $src,$dst" %} ins_encode %{ @@ -1983,12 +2385,12 @@ instruct vstring_inflate(Universe dummy, iRegP_R10 src, iRegP_R11 dst, iRegI_R12 // encode char[] to byte[] in ISO_8859_1 instruct vencode_iso_array(iRegP_R12 src, iRegP_R11 dst, iRegI_R13 len, iRegI_R10 result, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, iRegLNoSp tmp) %{ predicate(UseRVV); match(Set result (EncodeISOArray src (Binary dst len))); effect(TEMP_DEF result, USE_KILL src, USE_KILL dst, USE_KILL len, - TEMP v1, TEMP v2, TEMP v3, TEMP tmp); + TEMP v1, TEMP v2, TEMP v3, TEMP tmp, TEMP v0); format %{ "Encode array $src,$dst,$len -> $result" %} ins_encode %{ @@ -2000,12 +2402,12 @@ instruct vencode_iso_array(iRegP_R12 src, iRegP_R11 dst, iRegI_R13 len, iRegI_R1 // fast char[] to byte[] compression instruct vstring_compress(iRegP_R12 src, iRegP_R11 dst, iRegI_R13 len, iRegI_R10 result, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, iRegLNoSp tmp) %{ predicate(UseRVV); match(Set result (StrCompressedCopy src (Binary dst len))); effect(TEMP_DEF result, USE_KILL src, USE_KILL dst, USE_KILL len, - TEMP v1, TEMP v2, TEMP v3, TEMP tmp); + TEMP v1, TEMP v2, TEMP v3, TEMP tmp, TEMP v0); format %{ "String Compress $src,$dst -> $result // KILL R11, R12, R13" %} ins_encode %{ @@ -2016,11 +2418,11 @@ instruct vstring_compress(iRegP_R12 src, iRegP_R11 dst, iRegI_R13 len, iRegI_R10 %} instruct vcount_positives(iRegP_R11 ary, iRegI_R12 len, iRegI_R10 result, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, iRegLNoSp tmp) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0, iRegLNoSp tmp) %{ predicate(UseRVV); match(Set result (CountPositives ary len)); - effect(TEMP_DEF result, USE_KILL ary, USE_KILL len, TEMP v1, TEMP v2, TEMP v3, TEMP tmp); + effect(TEMP_DEF result, USE_KILL ary, USE_KILL len, TEMP v1, TEMP v2, TEMP v3, TEMP tmp, TEMP v0); format %{ "count positives byte[] $ary, $len -> $result" %} ins_encode %{ @@ -2032,12 +2434,12 @@ instruct vcount_positives(iRegP_R11 ary, iRegI_R12 len, iRegI_R10 result, instruct vstringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0) %{ predicate(UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U)); match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); effect(TEMP_DEF result, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, - TEMP tmp1, TEMP tmp2, TEMP v1, TEMP v2, TEMP v3); + TEMP tmp1, TEMP tmp2, TEMP v1, TEMP v2, TEMP v3, TEMP v0); format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %} @@ -2052,12 +2454,12 @@ instruct vstringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, instruct vstringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, - vReg_V1 v1, vReg_V2 v2, vReg_V3 v3) + vReg_V1 v1, vReg_V2 v2, vReg_V3 v3, vRegMask_V0 v0) %{ predicate(UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L)); match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); effect(TEMP_DEF result, USE_KILL str1, USE_KILL cnt1, USE_KILL ch, - TEMP tmp1, TEMP tmp2, TEMP v1, TEMP v2, TEMP v3); + TEMP tmp1, TEMP tmp2, TEMP v1, TEMP v2, TEMP v3, TEMP v0); format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %} @@ -2072,11 +2474,11 @@ instruct vstringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, // clearing of an array instruct vclearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, Universe dummy, - vReg_V1 vReg1, vReg_V2 vReg2, vReg_V3 vReg3) + vReg_V1 vReg1, vReg_V2 vReg2, vReg_V3 vReg3, vRegMask_V0 v0) %{ predicate(!UseBlockZeroing && UseRVV); match(Set dummy (ClearArray cnt base)); - effect(USE_KILL cnt, USE_KILL base, TEMP vReg1, TEMP vReg2, TEMP vReg3); + effect(USE_KILL cnt, USE_KILL base, TEMP vReg1, TEMP vReg2, TEMP vReg3, TEMP v0); format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %} @@ -2101,4 +2503,188 @@ instruct vloadcon(vReg dst, immI0 src) %{ } %} ins_pipe(pipe_slow); +%} + +instruct vmask_gen_I(vRegMask dst, iRegI src) %{ + match(Set dst (VectorMaskGen (ConvI2L src))); + format %{ "vmask_gen_I $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + Assembler::SEW sew = Assembler::elemtype_to_sew(bt); + __ vsetvli(t0, $src$$Register, sew); + __ vmset_m(as_VectorRegister($dst$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmask_gen_L(vRegMask dst, iRegL src) %{ + match(Set dst (VectorMaskGen src)); + format %{ "vmask_gen_L $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + Assembler::SEW sew = Assembler::elemtype_to_sew(bt); + __ vsetvli(t0, $src$$Register, sew); + __ vmset_m(as_VectorRegister($dst$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmask_gen_imm(vRegMask dst, immL con) %{ + match(Set dst (VectorMaskGen con)); + format %{ "vmask_gen_imm $dst, $con" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, (uint)($con$$constant)); + __ vmset_m(as_VectorRegister($dst$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmaskAll_immI(vRegMask dst, immI src) %{ + match(Set dst (MaskAll src)); + format %{ "vmaskAll_immI $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + int con = (int)$src$$constant; + if (con == 0) { + __ vmclr_m(as_VectorRegister($dst$$reg)); + } else { + assert(con == -1, "invalid constant value for mask"); + __ vmset_m(as_VectorRegister($dst$$reg)); + } + %} + ins_pipe(pipe_slow); +%} + +instruct vmaskAllI(vRegMask dst, iRegI src) %{ + match(Set dst (MaskAll src)); + format %{ "vmaskAllI $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($src$$reg)); + __ vmsne_vx(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), zr); + %} + ins_pipe(pipe_slow); +%} + +instruct vmaskAll_immL(vRegMask dst, immL src) %{ + match(Set dst (MaskAll src)); + format %{ "vmaskAll_immL $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + long con = (long)$src$$constant; + if (con == 0) { + __ vmclr_m(as_VectorRegister($dst$$reg)); + } else { + assert(con == -1, "invalid constant value for mask"); + __ vmset_m(as_VectorRegister($dst$$reg)); + } + %} + ins_pipe(pipe_slow); +%} + +instruct vmaskAllL(vRegMask dst, iRegL src) %{ + match(Set dst (MaskAll src)); + format %{ "vmaskAllL $dst, $src" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vmv_v_x(as_VectorRegister($dst$$reg), as_Register($src$$reg)); + __ vmsne_vx(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), zr); + %} + ins_pipe(pipe_slow); +%} + +// ------------------------------ Vector mask basic OPs ------------------------ + +// vector mask logical ops: and/or/xor + +instruct vmask_and(vRegMask dst, vRegMask src1, vRegMask src2) %{ + match(Set dst (AndVMask src1 src2)); + format %{ "vmask_and $dst, $src1, $src2" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vmand_mm(as_VectorRegister($dst$$reg), + as_VectorRegister($src1$$reg), + as_VectorRegister($src2$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmask_or(vRegMask dst, vRegMask src1, vRegMask src2) %{ + match(Set dst (OrVMask src1 src2)); + format %{ "vmask_or $dst, $src1, $src2" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vmor_mm(as_VectorRegister($dst$$reg), + as_VectorRegister($src1$$reg), + as_VectorRegister($src2$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmask_xor(vRegMask dst, vRegMask src1, vRegMask src2) %{ + match(Set dst (XorVMask src1 src2)); + format %{ "vmask_xor $dst, $src1, $src2" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vmxor_mm(as_VectorRegister($dst$$reg), + as_VectorRegister($src1$$reg), + as_VectorRegister($src2$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vmaskcast(vRegMask dst_src) %{ + match(Set dst_src (VectorMaskCast dst_src)); + ins_cost(0); + format %{ "vmaskcast $dst_src\t# do nothing" %} + ins_encode(/* empty encoding */); + ins_pipe(pipe_class_empty); +%} + +// vector load/store - predicated + +instruct loadV_masked(vReg dst, vmemA mem, vRegMask_V0 v0) %{ + match(Set dst (LoadVectorMasked mem v0)); + format %{ "loadV_masked $dst, $mem, $v0" %} + ins_encode %{ + VectorRegister dst_reg = as_VectorRegister($dst$$reg); + loadStore(C2_MacroAssembler(&cbuf), false, dst_reg, + Matcher::vector_element_basic_type(this), as_Register($mem$$base), + Matcher::vector_length_in_bytes(this), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct storeV_masked(vReg src, vmemA mem, vRegMask_V0 v0) %{ + match(Set mem (StoreVectorMasked mem (Binary src v0))); + format %{ "storeV_masked $mem, $src, $v0" %} + ins_encode %{ + VectorRegister src_reg = as_VectorRegister($src$$reg); + loadStore(C2_MacroAssembler(&cbuf), true, src_reg, + Matcher::vector_element_basic_type(this, $src), as_Register($mem$$base), + Matcher::vector_length_in_bytes(this, $src), Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +// ------------------------------ Vector blend --------------------------------- + +instruct vblend(vReg dst, vReg src1, vReg src2, vRegMask_V0 v0) %{ + match(Set dst (VectorBlend (Binary src1 src2) v0)); + format %{ "vmerge_vvm $dst, $src1, $src2, v0\t#@vector blend" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ rvv_vsetvli(bt, Matcher::vector_length_in_bytes(this)); + __ vmerge_vvm(as_VectorRegister($dst$$reg), as_VectorRegister($src1$$reg), + as_VectorRegister($src2$$reg)); + %} + ins_pipe(pipe_slow); %} \ No newline at end of file diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp index 82858f0c7dc..deff3080a81 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp @@ -1600,12 +1600,6 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) { } void TemplateTable::branch(bool is_jsr, bool is_wide) { - // We might be moving to a safepoint. The thread which calls - // Interpreter::notice_safepoints() will effectively flush its cache - // when it makes a system call, but we need to do something to - // ensure that we see the changed dispatch table. - __ membar(MacroAssembler::LoadLoad); - __ profile_taken_branch(x10, x11); const ByteSize be_offset = MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset(); @@ -1854,12 +1848,6 @@ void TemplateTable::if_acmp(Condition cc) { void TemplateTable::ret() { transition(vtos, vtos); - // We might be moving to a safepoint. The thread which calls - // Interpreter::notice_safepoints() will effectively flush its cache - // when it makes a system call, but we need to do something to - // ensure that we see the changed dispatch table. - __ membar(MacroAssembler::LoadLoad); - locals_index(x11); __ ld(x11, aaddress(x11, t1, _masm)); // get return bci, compute return bcp __ profile_ret(x11, x12); diff --git a/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp b/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp index 98f441452cf..c24c2b56bf7 100644 --- a/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp +++ b/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -121,17 +121,17 @@ int AbstractInterpreter::size_activation(int max_stack, // // Parameters: // -// interpreter_frame != NULL: +// interpreter_frame isn't null: // set up the method, locals, and monitors. -// The frame interpreter_frame, if not NULL, is guaranteed to be the +// The frame interpreter_frame, if not null, is guaranteed to be the // right size, as determined by a previous call to this method. // It is also guaranteed to be walkable even though it is in a skeletal state // -// is_top_frame == true: +// is_top_frame is true: // We're processing the *oldest* interpreter frame! // // pop_frame_extra_args: -// If this is != 0 we are returning to a deoptimized frame by popping +// If this isn't 0 we are returning to a deoptimized frame by popping // off the callee frame. We want to re-execute the call that called the // callee interpreted, but since the return to the interpreter would pop // the arguments off advance the esp by dummy popframe_extra_args slots. diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp index 0a138151ace..1e2152ad718 100644 --- a/src/hotspot/cpu/s390/assembler_s390.hpp +++ b/src/hotspot/cpu/s390/assembler_s390.hpp @@ -137,7 +137,7 @@ class RelAddr { assert(((uint64_t)target & 0x0001L) == 0, "target of a relative address must be aligned"); assert(((uint64_t)pc & 0x0001L) == 0, "origin of a relative address must be aligned"); - if ((target == NULL) || (target == pc)) { + if ((target == nullptr) || (target == pc)) { return 0; // Yet unknown branch destination. } else { guarantee(is_in_range_of_RelAddr(target, pc, shortForm), "target not within reach"); @@ -295,7 +295,7 @@ class AddressLiteral { protected: // creation - AddressLiteral() : _address(NULL), _rspec() {} + AddressLiteral() : _address(nullptr), _rspec() {} public: AddressLiteral(address addr, RelocationHolder const& rspec) diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp index b99027f09d5..200f7ee978d 100644 --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -375,7 +375,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address entry = __ pc(); NativeGeneralJump::insert_unconditional((address)_pc_start, entry); - address target = NULL; + address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break; diff --git a/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp b/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp index ef20d0399c1..66ccc8de876 100644 --- a/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp +++ b/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ enum { nof_reg_args = 5, // Registers Z_ARG1 - Z_ARG5 are available for parameter passing. - first_available_sp_in_frame = frame::z_abi_16_size, + first_available_sp_in_frame = frame::z_common_abi_size, frame_pad_in_bytes = 0 }; diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp index 4fef086bc8e..c704931a445 100644 --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2019 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -143,7 +143,7 @@ void LIR_Assembler::osr_entry() { for (int i = 0; i < number_of_locks; i++) { int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); // Verify the interpreter's monitor has a non-null object. - __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is NULL", __LINE__); + __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is null", __LINE__); // Copy the lock field into the compiled activation. __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf); __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i)); @@ -158,7 +158,7 @@ void LIR_Assembler::osr_entry() { address LIR_Assembler::emit_call_c(address a) { __ align_call_far_patchable(__ pc()); address call_addr = __ call_c_opt(a); - if (call_addr == NULL) { + if (call_addr == nullptr) { bailout("const section overflow"); } return call_addr; @@ -167,7 +167,7 @@ address LIR_Assembler::emit_call_c(address a) { int LIR_Assembler::emit_exception_handler() { // Generate code for exception handler. address handler_base = __ start_a_stub(exception_handler_size()); - if (handler_base == NULL) { + if (handler_base == nullptr) { // Not enough space left for the handler. bailout("exception handler overflow"); return -1; @@ -213,7 +213,7 @@ int LIR_Assembler::emit_unwind_handler() { } // Perform needed unlocking. - MonitorExitStub* stub = NULL; + MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { // Runtime1::monitorexit_id expects lock address in Z_R1_scratch. LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); @@ -238,7 +238,7 @@ int LIR_Assembler::emit_unwind_handler() { // Remove the activation and dispatch to the unwind handler. __ pop_frame(); - __ z_lg(Z_EXC_PC, _z_abi16(return_pc), Z_SP); + __ z_lg(Z_EXC_PC, _z_common_abi(return_pc), Z_SP); // Z_EXC_OOP: exception oop // Z_EXC_PC: exception pc @@ -248,7 +248,7 @@ int LIR_Assembler::emit_unwind_handler() { __ z_br(Z_R5); // Emit the slow path assembly. - if (stub != NULL) { + if (stub != nullptr) { stub->emit_code(this); } @@ -258,7 +258,7 @@ int LIR_Assembler::emit_unwind_handler() { int LIR_Assembler::emit_deopt_handler() { // Generate code for exception handler. address handler_base = __ start_a_stub(deopt_handler_size()); - if (handler_base == NULL) { + if (handler_base == nullptr) { // Not enough space left for the handler. bailout("deopt handler overflow"); return -1; @@ -273,7 +273,7 @@ int LIR_Assembler::emit_deopt_handler() { } void LIR_Assembler::jobject2reg(jobject o, Register reg) { - if (o == NULL) { + if (o == nullptr) { __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove. } else { AddressLiteral a = __ allocate_oop_address(o); @@ -286,12 +286,12 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) { void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the object once it's been patched. - int oop_index = __ oop_recorder()->allocate_oop_index(NULL); + int oop_index = __ oop_recorder()->allocate_oop_index(nullptr); PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index)); assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); - // The NULL will be dynamically patched later so the sequence to + // The null will be dynamically patched later so the sequence to // load the address literal must not be optimized. __ load_const(reg, addrlit); @@ -308,11 +308,11 @@ void LIR_Assembler::metadata2reg(Metadata* md, Register reg) { void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the klass once it's been patched. - int index = __ oop_recorder()->allocate_metadata_index(NULL); + int index = __ oop_recorder()->allocate_metadata_index(nullptr); PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index)); assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); - // The NULL will be dynamically patched later so the sequence to + // The null will be dynamically patched later so the sequence to // load the address literal must not be optimized. __ load_const(reg, addrlit); @@ -353,18 +353,18 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) { void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { #ifdef ASSERT - assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); - if (op->block() != NULL) { _branch_target_blocks.append(op->block()); } - if (op->ublock() != NULL) { _branch_target_blocks.append(op->ublock()); } + assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); + if (op->block() != nullptr) { _branch_target_blocks.append(op->block()); } + if (op->ublock() != nullptr) { _branch_target_blocks.append(op->ublock()); } #endif if (op->cond() == lir_cond_always) { - if (op->info() != NULL) { add_debug_info_for_branch(op->info()); } + if (op->info() != nullptr) { add_debug_info_for_branch(op->info()); } __ branch_optimized(Assembler::bcondAlways, *(op->label())); } else { Assembler::branch_condition acond = Assembler::bcondZero; if (op->code() == lir_cond_float_branch) { - assert(op->ublock() != NULL, "must have unordered successor"); + assert(op->ublock() != nullptr, "must have unordered successor"); __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label())); } switch (op->cond()) { @@ -504,7 +504,7 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { } void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { - address virtual_call_oop_addr = NULL; + address virtual_call_oop_addr = nullptr; AddressLiteral empty_ic((address) Universe::non_oop_word()); virtual_call_oop_addr = __ pc(); bool success = __ load_const_from_toc(Z_inline_cache, empty_ic); @@ -546,7 +546,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { case T_OBJECT: dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); - if (c->as_jobject() == NULL) { + if (c->as_jobject() == nullptr) { __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8); } else { jobject2reg(c->as_jobject(), Z_R1_scratch); @@ -596,7 +596,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi case T_OBJECT: // fall through case T_ARRAY: - if (c->as_jobject() == NULL) { + if (c->as_jobject() == nullptr) { if (UseCompressedOops && !wide) { __ clear_reg(Z_R1_scratch, false); store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); @@ -666,7 +666,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi case T_OBJECT: // fall through case T_ARRAY: - if (c->as_jobject() == NULL) { + if (c->as_jobject() == nullptr) { if (UseCompressedOops && !wide) { store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4); } else { @@ -709,7 +709,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi } } - if (info != NULL) { + if (info != nullptr) { add_debug_info_for_null_check(store_offset, info); } } @@ -760,7 +760,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod Register toc_reg = Z_R1_scratch; __ load_toc(toc_reg); address const_addr = __ float_constant(c->as_jfloat()); - if (const_addr == NULL) { + if (const_addr == nullptr) { bailout("const section overflow"); break; } @@ -778,7 +778,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod Register toc_reg = Z_R1_scratch; __ load_toc(toc_reg); address const_addr = __ double_constant(c->as_jdouble()); - if (const_addr == NULL) { + if (const_addr == nullptr) { bailout("const section overflow"); break; } @@ -881,7 +881,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_P __ verify_oop(src, FILE_AND_LINE); } - PatchingStub* patch = NULL; + PatchingStub* patch = nullptr; if (needs_patching) { patch = new PatchingStub(_masm, PatchingStub::access_field_id); assert(!to_reg->is_double_cpu() || @@ -969,10 +969,10 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_P default : ShouldNotReachHere(); } - if (patch != NULL) { + if (patch != nullptr) { patching_epilog(patch, patch_code, src, info); } - if (info != NULL) add_debug_info_for_null_check(offset, info); + if (info != nullptr) add_debug_info_for_null_check(offset, info); } void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { @@ -1074,7 +1074,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, __ verify_oop(dest, FILE_AND_LINE); } - PatchingStub* patch = NULL; + PatchingStub* patch = nullptr; if (needs_patching) { patch = new PatchingStub(_masm, PatchingStub::access_field_id); assert(!from->is_double_cpu() || @@ -1176,11 +1176,11 @@ void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, default: ShouldNotReachHere(); } - if (patch != NULL) { + if (patch != nullptr) { patching_epilog(patch, patch_code, dest, info); } - if (info != NULL) add_debug_info_for_null_check(offset, info); + if (info != nullptr) add_debug_info_for_null_check(offset, info); } @@ -1211,7 +1211,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { const Register poll_addr = tmp->as_register_lo(); __ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset())); - guarantee(info != NULL, "Shouldn't be NULL"); + guarantee(info != nullptr, "Shouldn't be null"); add_debug_info_for_branch(info); int offset = __ offset(); __ relocate(relocInfo::poll_type); @@ -1226,7 +1226,7 @@ void LIR_Assembler::emit_static_call_stub() { address call_pc = __ pc(); address stub = __ start_a_stub(call_stub_size()); - if (stub == NULL) { + if (stub == nullptr) { bailout("static call stub overflow"); return; } @@ -1236,7 +1236,7 @@ void LIR_Assembler::emit_static_call_stub() { __ relocate(static_stub_Relocation::spec(call_pc)); // See also Matcher::interpreter_method_reg(). - AddressLiteral meta = __ allocate_metadata_address(NULL); + AddressLiteral meta = __ allocate_metadata_address(nullptr); bool success = __ load_const_from_toc(Z_method, meta); __ set_inst_mark(); @@ -1289,10 +1289,10 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, __ z_cfi(reg1, c->as_jint()); } } else if (c->type() == T_METADATA) { - // We only need, for now, comparison with NULL for metadata. + // We only need, for now, comparison with null for metadata. assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); Metadata* m = c->as_metadata(); - if (m == NULL) { + if (m == nullptr) { __ z_cghi(reg1, 0); } else { ShouldNotReachHere(); @@ -1300,7 +1300,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, } else if (is_reference_type(c->type())) { // In 64bit oops are single register. jobject o = c->as_jobject(); - if (o == NULL) { + if (o == nullptr) { __ z_ltgr(reg1, reg1); } else { jobject2reg(o, Z_R1_scratch); @@ -1311,7 +1311,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, } // cpu register - address } else if (opr2->is_address()) { - if (op->info() != NULL) { + if (op->info() != nullptr) { add_debug_info_for_null_check_here(op->info()); } if (unsigned_comp) { @@ -1449,7 +1449,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L } else if (opr1->is_stack()) { stack2reg(opr1, result, result->type()); } else if (opr1->is_constant()) { - const2reg(opr1, result, lir_patch_none, NULL); + const2reg(opr1, result, lir_patch_none, nullptr); } else { ShouldNotReachHere(); } @@ -1478,7 +1478,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L } else if (opr2->is_stack()) { stack2reg(opr2, result, result->type()); } else if (opr2->is_constant()) { - const2reg(opr2, result, lir_patch_none, NULL); + const2reg(opr2, result, lir_patch_none, nullptr); } else { ShouldNotReachHere(); } @@ -1488,7 +1488,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { - assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); + assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); if (left->is_single_cpu()) { assert(left == dest, "left and dest must be equal"); @@ -1935,14 +1935,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { CodeStub* stub = op->stub(); int flags = op->flags(); - BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; + BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; if (basic_type == T_ARRAY) basic_type = T_OBJECT; // If we don't know anything, just go through the generic arraycopy. - if (default_type == NULL) { + if (default_type == nullptr) { address copyfunc_addr = StubRoutines::generic_arraycopy(); - if (copyfunc_addr == NULL) { + if (copyfunc_addr == nullptr) { // Take a slow path for generic arraycopy. __ branch_optimized(Assembler::bcondAlways, *stub->entry()); __ bind(*stub->continuation()); @@ -2007,7 +2007,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { return; } - assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); + assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); int elem_size = type2aelembytes(basic_type); int shift_amount; @@ -2037,7 +2037,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // Length and pos's are all sign extended at this point on 64bit. - // test for NULL + // test for null if (flags & LIR_OpArrayCopy::src_null_check) { __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry()); } @@ -2115,7 +2115,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ load_klass(src_klass, src); __ load_klass(dst_klass, dst); - __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, NULL); + __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, nullptr); store_parameter(src_klass, 0); // sub store_parameter(dst_klass, 1); // super @@ -2127,7 +2127,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ bind(slow); address copyfunc_addr = StubRoutines::checkcast_arraycopy(); - if (copyfunc_addr != NULL) { // use stub if available + if (copyfunc_addr != nullptr) { // use stub if available // Src is not a sub class of dst so we have to do a // per-element check. @@ -2456,17 +2456,17 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L assert(!op->tmp3()->is_valid(), "tmp3's not needed"); // Check if it needs to be profiled. - ciMethodData* md = NULL; - ciProfileData* data = NULL; + ciMethodData* md = nullptr; + ciProfileData* data = nullptr; if (op->should_profile()) { ciMethod* method = op->profiled_method(); - assert(method != NULL, "Should have method"); + assert(method != nullptr, "Should have method"); int bci = op->profiled_bci(); md = method->method_data_or_null(); - assert(md != NULL, "Sanity"); + assert(md != nullptr, "Sanity"); data = md->bci_to_data(bci); - assert(data != NULL, "need data for type check"); + assert(data != nullptr, "need data for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); } @@ -2527,8 +2527,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ load_klass(klass_RInfo, obj); // Perform the fast part of the checking logic. __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, - (need_slow_path ? success_target : NULL), - failure_target, NULL, + (need_slow_path ? success_target : nullptr), + failure_target, nullptr, RegisterOrConstant(super_check_offset)); if (need_slow_path) { // Call out-of-line instance of __ check_klass_subtype_slow_path(...): @@ -2572,19 +2572,19 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { CodeStub* stub = op->stub(); // Check if it needs to be profiled. - ciMethodData* md = NULL; - ciProfileData* data = NULL; + ciMethodData* md = nullptr; + ciProfileData* data = nullptr; assert_different_registers(value, k_RInfo, klass_RInfo); if (op->should_profile()) { ciMethod* method = op->profiled_method(); - assert(method != NULL, "Should have method"); + assert(method != nullptr, "Should have method"); int bci = op->profiled_bci(); md = method->method_data_or_null(); - assert(md != NULL, "Sanity"); + assert(md != nullptr, "Sanity"); data = md->bci_to_data(bci); - assert(data != NULL, "need data for type check"); + assert(data != nullptr, "need data for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); } NearLabel profile_cast_success, profile_cast_failure, done; @@ -2613,7 +2613,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { // Get instance klass (it's already uncompressed). __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); // Perform the fast part of the checking logic. - __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); + __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // Call out-of-line instance of __ check_klass_subtype_slow_path(...): address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); store_parameter(klass_RInfo, 0); // sub @@ -2723,7 +2723,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register hdr = op->hdr_opr()->as_register(); Register lock = op->lock_opr()->as_register(); if (UseHeavyMonitors) { - if (op->info() != NULL) { + if (op->info() != nullptr) { add_debug_info_for_null_check_here(op->info()); __ null_check(obj); } @@ -2731,7 +2731,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { } else if (op->code() == lir_lock) { assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // Add debug info for NullPointerException only if one is possible. - if (op->info() != NULL) { + if (op->info() != nullptr) { add_debug_info_for_null_check_here(op->info()); } __ lock_object(hdr, obj, lock, *op->stub()->entry()); @@ -2750,7 +2750,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { Register result = op->result_opr()->as_pointer_register(); CodeEmitInfo* info = op->info(); - if (info != NULL) { + if (info != nullptr) { add_debug_info_for_null_check_here(info); } @@ -2768,9 +2768,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { // Update counter for all call types. ciMethodData* md = method->method_data_or_null(); - assert(md != NULL, "Sanity"); + assert(md != nullptr, "Sanity"); ciProfileData* data = md->bci_to_data(bci); - assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); + assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); Register mdo = op->mdo()->as_register(); assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); @@ -2786,7 +2786,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { assert_different_registers(mdo, tmp1, recv); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); ciKlass* known_klass = op->known_holder(); - if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { + if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { // We know the type that will be seen at this call site; we can // statically update the MethodData* rather than needing to do // dynamic tests on the receiver type. @@ -2811,7 +2811,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { // VirtualCallData rather than just the first time. for (i = 0; i < VirtualCallData::row_limit(); i++) { ciKlass* receiver = vc_data->receiver(i); - if (receiver == NULL) { + if (receiver == nullptr) { Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); metadata2reg(known_klass->constant_encoding(), tmp1); __ z_stg(tmp1, recv_addr); @@ -2865,7 +2865,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, assert(!tmp->is_valid(), "don't need temporary"); emit_call_c(dest); CHECK_BAILOUT(); - if (info != NULL) { + if (info != nullptr) { add_call_info_here(info); } } @@ -2962,7 +2962,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { Label update, next, none, null_seen, init_klass; bool do_null = !not_null; - bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; + bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; assert(do_null || do_update, "why are we here?"); @@ -2984,18 +2984,18 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { __ z_bru(next); } } else { - __ asm_assert_ne("unexpected null obj", __LINE__); + __ asm_assert(Assembler::bcondNotZero, "unexpected null obj", __LINE__); } __ bind(update); if (do_update) { #ifdef ASSERT - if (exact_klass != NULL) { + if (exact_klass != nullptr) { __ load_klass(tmp1, tmp1); metadata2reg(exact_klass->constant_encoding(), tmp2); __ z_cgr(tmp1, tmp2); - __ asm_assert_eq("exact klass and actual klass differ", __LINE__); + __ asm_assert(Assembler::bcondEqual, "exact klass and actual klass differ", __LINE__); } #endif @@ -3003,8 +3003,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { __ z_lg(tmp2, mdo_addr); if (!no_conflict) { - if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { - if (exact_klass != NULL) { + if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { + if (exact_klass != nullptr) { metadata2reg(exact_klass->constant_encoding(), tmp1); } else { __ load_klass(tmp1, tmp1); @@ -3027,7 +3027,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass); } } else { - assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); // Already unknown: Nothing to do anymore. @@ -3040,7 +3040,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { __ z_bru(do_update); } else { // There's a single possible klass at this profile point. - assert(exact_klass != NULL, "should be"); + assert(exact_klass != nullptr, "should be"); if (TypeEntries::is_type_none(current_klass)) { metadata2reg(exact_klass->constant_encoding(), tmp1); __ z_lgr(Z_R0_scratch, tmp2); @@ -3060,7 +3060,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { #endif } else { - assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); // Already unknown: Nothing to do anymore. diff --git a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp index d5e5c01ebde..c216897d68f 100644 --- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -103,11 +103,11 @@ LIR_Opr LIRGenerator::rlock_byte(BasicType type) { // z/Architecture cannot inline all constants. bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { - if (v->type()->as_IntConstant() != NULL) { + if (v->type()->as_IntConstant() != nullptr) { return Immediate::is_simm16(v->type()->as_IntConstant()->value()); - } else if (v->type()->as_LongConstant() != NULL) { + } else if (v->type()->as_LongConstant() != nullptr) { return Immediate::is_simm16(v->type()->as_LongConstant()->value()); - } else if (v->type()->as_ObjectConstant() != NULL) { + } else if (v->type()->as_ObjectConstant() != nullptr) { return v->type()->as_ObjectConstant()->value()->is_null_object(); } else { return false; @@ -115,9 +115,9 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { } bool LIRGenerator::can_inline_as_constant(Value i, int bits) const { - if (i->type()->as_IntConstant() != NULL) { + if (i->type()->as_IntConstant() != nullptr) { return Assembler::is_simm(i->type()->as_IntConstant()->value(), bits); - } else if (i->type()->as_LongConstant() != NULL) { + } else if (i->type()->as_LongConstant() != nullptr) { return Assembler::is_simm(i->type()->as_LongConstant()->value(), bits); } else { return can_store_as_constant(i, as_BasicType(i->type())); @@ -267,7 +267,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { // "lock" stores the address of the monitor stack slot, so this is not an oop. LIR_Opr lock = new_register(T_INT); - CodeEmitInfo* info_for_exception = NULL; + CodeEmitInfo* info_for_exception = nullptr; if (x->needs_null_check()) { info_for_exception = state_for (x); } @@ -326,7 +326,7 @@ void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { default: ShouldNotReachHere(); } - LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL); + LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), nullptr); set_result(x, result); } else { LIR_Opr reg = rlock(x); @@ -387,7 +387,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); __ branch(lir_cond_equal, new DivByZeroStub(info)); // Idiv/irem cannot trap (passing info would generate an assertion). - info = NULL; + info = nullptr; } if (x->op() == Bytecodes::_lrem) { @@ -408,7 +408,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { left.load_item(); right.load_nonconstant(32); rlock_result(x); - arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); + arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr); } } @@ -463,7 +463,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); __ branch(lir_cond_equal, new DivByZeroStub(info)); // Idiv/irem cannot trap (passing info would generate an assertion). - info = NULL; + info = nullptr; } if (x->op() == Bytecodes::_irem) { @@ -519,7 +519,7 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { // If an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary. - if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { + if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { x->swap_operands(); } @@ -558,7 +558,7 @@ void LIRGenerator::do_ShiftOp(ShiftOp* x) { void LIRGenerator::do_LogicOp(LogicOp* x) { // IF an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary. - if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { + if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { x->swap_operands(); } @@ -659,7 +659,7 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { case vmIntrinsics::_dexp: { assert(x->number_of_arguments() == 1, "wrong type"); - address runtime_entry = NULL; + address runtime_entry = nullptr; switch (x->id()) { case vmIntrinsics::_dsin: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); @@ -683,14 +683,14 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { ShouldNotReachHere(); } - LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); + LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), nullptr); set_result(x, result); break; } case vmIntrinsics::_dpow: { assert(x->number_of_arguments() == 2, "wrong type"); address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); - LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL); + LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), nullptr); set_result(x, result); break; } @@ -795,7 +795,7 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { CodeEmitInfo* info = state_for (x, x->state()); // In case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed. - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for (x, x->state_before()); } @@ -826,14 +826,14 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { Values* dims = x->dims(); int i = dims->length(); - LIRItemList* items = new LIRItemList(i, i, NULL); + LIRItemList* items = new LIRItemList(i, i, nullptr); while (i-- > 0) { LIRItem* size = new LIRItem(dims->at(i), this); items->at_put(i, size); } // Evaluate state_for early since it may emit code. - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for (x, x->state_before()); @@ -882,7 +882,7 @@ void LIRGenerator::do_BlockBegin(BlockBegin* x) { void LIRGenerator::do_CheckCast(CheckCast* x) { LIRItem obj(x->obj(), this); - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { // Must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization). @@ -897,10 +897,10 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { - assert(patching_info == NULL, "can't patch this"); + assert(patching_info == nullptr, "can't patch this"); stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { - assert(patching_info == NULL, "can't patch this"); + assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); @@ -920,7 +920,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { void LIRGenerator::do_InstanceOf(InstanceOf* x) { LIRItem obj(x->obj(), this); - CodeEmitInfo* patching_info = NULL; + CodeEmitInfo* patching_info = nullptr; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for (x, x->state_before()); } diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp index d74be71fe9c..91f8fe16be8 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -130,7 +130,7 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place)); z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0). // For recursive locking, the result is zero. => Save it in the displaced header - // location (NULL in the displaced hdr location indicates recursive locking). + // location (null in the displaced hdr location indicates recursive locking). z_stg(hdr, Address(disp_hdr, (intptr_t)0)); // Otherwise we don't care about the result and handle locking via runtime call. branch_optimized(Assembler::bcondNotZero, slow_case); @@ -146,7 +146,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ // Load displaced header. z_ltg(hdr, Address(disp_hdr, (intptr_t)0)); - // If the loaded hdr is NULL we had recursive locking, and we are done. + // If the loaded hdr is null we had recursive locking, and we are done. z_bre(done); // Load object. z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp index 6afbeebec6f..1ff914b7b71 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -96,6 +96,6 @@ Register preserve3 = noreg) PRODUCT_RETURN; // This platform only uses signal-based null checks. The Label is not needed. - void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); } + void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); } #endif // CPU_S390_C1_MACROASSEMBLER_S390_HPP diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp index 77329cdefd9..28acb398c1f 100644 --- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp +++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,10 +66,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre // ARG1 must hold thread address. z_lgr(Z_ARG1, Z_thread); - address return_pc = NULL; + address return_pc = nullptr; align_call_far_patchable(this->pc()); return_pc = call_c_opt(entry_point); - assert(return_pc != NULL, "const section overflow"); + assert(return_pc != nullptr, "const section overflow"); reset_last_Java_frame(); @@ -282,7 +282,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { // deoptmized, return to the deoptimization handler entry that will // cause re-execution of the current bytecode. DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); - assert(deopt_blob != NULL, "deoptimization blob must have been created"); + assert(deopt_blob != nullptr, "deoptimization blob must have been created"); __ z_ltr(Z_RET, Z_RET); // return value == 0 @@ -311,7 +311,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { bool save_fpu_registers = true; // Stub code and info for the different stubs. - OopMapSet* oop_maps = NULL; + OopMapSet* oop_maps = nullptr; switch (id) { case forward_exception_id: { @@ -527,7 +527,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { const Register Rarray_ptr = Z_ARG5; // Current value from cache array. if (UseCompressedOops) { - assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub"); + assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub"); } const int frame_size = 4*BytesPerWord + frame::z_abi_160_size; @@ -547,7 +547,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); - __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss); + __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, nullptr, &miss); // Match falls through here. i = 0; @@ -627,7 +627,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); - assert(deopt_blob != NULL, "deoptimization blob must have been created"); + assert(deopt_blob != nullptr, "deoptimization blob must have been created"); AddressLiteral dest(deopt_blob->unpack_with_reexecution()); __ load_const_optimized(Z_R1_scratch, dest); __ z_br(Z_R1_scratch); @@ -761,7 +761,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { restore_live_registers(sasm); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); - assert(deopt_blob != NULL, "deoptimization blob must have been created"); + assert(deopt_blob != nullptr, "deoptimization blob must have been created"); __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution()); __ z_br(Z_R1_scratch); @@ -784,7 +784,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { // Save registers if required. OopMapSet* oop_maps = new OopMapSet(); - OopMap* oop_map = NULL; + OopMap* oop_map = nullptr; Register reg_fp = Z_R1_scratch; switch (id) { @@ -807,7 +807,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { // Load issuing PC (the return address for this stub). const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; - __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_abi16(return_pc))); + __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_common_abi(return_pc))); DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));) // Make sure that the vm_results are cleared (may be unnecessary). @@ -850,7 +850,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { #ifdef ASSERT { NearLabel ok; - __ z_cg(Z_EXC_PC, Address(reg_fp, _z_abi16(return_pc))); + __ z_cg(Z_EXC_PC, Address(reg_fp, _z_common_abi(return_pc))); __ branch_optimized(Assembler::bcondEqual, ok); __ stop("use throwing pc as return address (has bci & oop map)"); __ bind(ok); diff --git a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp index 6fac285f738..62c1bd943b6 100644 --- a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -1016,7 +1016,7 @@ unsigned int C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, R // Return true if the same array. compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true); - // Return false if one of them is NULL. + // Return false if one of them is null. compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false); compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false); diff --git a/src/hotspot/cpu/s390/compiledIC_s390.cpp b/src/hotspot/cpu/s390/compiledIC_s390.cpp index 6660a34aeff..09822425e12 100644 --- a/src/hotspot/cpu/s390/compiledIC_s390.cpp +++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -40,34 +40,34 @@ #undef __ #define __ _masm. -address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) { +address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) { #ifdef COMPILER2 // Stub is fixed up when the corresponding call is converted from calling // compiled code to calling interpreted code. - if (mark == NULL) { + if (mark == nullptr) { // Get the mark within main instrs section which is set to the address of the call. mark = cbuf.insts_mark(); } - assert(mark != NULL, "mark must not be NULL"); + assert(mark != nullptr, "mark must not be null"); // Note that the code buffer's insts_mark is always relative to insts. // That's why we must use the macroassembler to generate a stub. MacroAssembler _masm(&cbuf); address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size()); - if (stub == NULL) { - return NULL; // CodeBuffer::expand failed. + if (stub == nullptr) { + return nullptr; // CodeBuffer::expand failed. } __ relocate(static_stub_Relocation::spec(mark)); - AddressLiteral meta = __ allocate_metadata_address(NULL); + AddressLiteral meta = __ allocate_metadata_address(nullptr); bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta); __ set_inst_mark(); AddressLiteral a((address)-1); success = success && __ load_const_from_toc(Z_R1, a); if (!success) { - return NULL; // CodeCache is full. + return nullptr; // CodeCache is full. } __ z_br(Z_R1); @@ -75,7 +75,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* return stub; #else ShouldNotReachHere(); - return NULL; + return nullptr; #endif } @@ -93,7 +93,7 @@ int CompiledStaticCall::reloc_to_interp_stub() { void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { address stub = find_stub(); - guarantee(stub != NULL, "stub not found"); + guarantee(stub != nullptr, "stub not found"); if (TraceICs) { ResourceMark rm; @@ -118,7 +118,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { // Reset stub. address stub = static_stub->addr(); - assert(stub != NULL, "stub not found"); + assert(stub != nullptr, "stub not found"); assert(CompiledICLocker::is_safe(stub), "mt unsafe call"); // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub()); @@ -138,7 +138,7 @@ void CompiledDirectStaticCall::verify() { // Verify stub. address stub = find_stub(); - assert(stub != NULL, "no stub found for static call"); + assert(stub != nullptr, "no stub found for static call"); // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub()); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); diff --git a/src/hotspot/cpu/s390/continuationFreezeThaw_s390.inline.hpp b/src/hotspot/cpu/s390/continuationFreezeThaw_s390.inline.hpp index 5b9c5d9a8bd..084ee189211 100644 --- a/src/hotspot/cpu/s390/continuationFreezeThaw_s390.inline.hpp +++ b/src/hotspot/cpu/s390/continuationFreezeThaw_s390.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,17 +70,13 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& return frame(); } -inline void ThawBase::set_interpreter_frame_bottom(const frame& f, intptr_t* bottom) { - Unimplemented(); -} - inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) { Unimplemented(); } inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) { Unimplemented(); - return NULL; + return nullptr; } inline void ThawBase::patch_pd(frame& f, const frame& caller) { diff --git a/src/hotspot/cpu/s390/continuationHelper_s390.inline.hpp b/src/hotspot/cpu/s390/continuationHelper_s390.inline.hpp index 6c1a152339f..4cea0459551 100644 --- a/src/hotspot/cpu/s390/continuationHelper_s390.inline.hpp +++ b/src/hotspot/cpu/s390/continuationHelper_s390.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ template static inline intptr_t** link_address(const frame& f) { Unimplemented(); - return NULL; + return nullptr; } inline int ContinuationHelper::frame_align_words(int size) { @@ -42,7 +42,7 @@ inline int ContinuationHelper::frame_align_words(int size) { inline intptr_t* ContinuationHelper::frame_align_pointer(intptr_t* sp) { Unimplemented(); - return NULL; + return nullptr; } template @@ -75,18 +75,18 @@ inline bool ContinuationHelper::Frame::assert_frame_laid_out(frame f) { inline intptr_t** ContinuationHelper::Frame::callee_link_address(const frame& f) { Unimplemented(); - return NULL; + return nullptr; } template static inline intptr_t* real_fp(const frame& f) { Unimplemented(); - return NULL; + return nullptr; } inline address* ContinuationHelper::InterpretedFrame::return_pc_address(const frame& f) { Unimplemented(); - return NULL; + return nullptr; } inline void ContinuationHelper::InterpretedFrame::patch_sender_sp(frame& f, const frame& caller) { @@ -95,12 +95,12 @@ inline void ContinuationHelper::InterpretedFrame::patch_sender_sp(frame& f, cons inline address* ContinuationHelper::Frame::return_pc_address(const frame& f) { Unimplemented(); - return NULL; + return nullptr; } inline address ContinuationHelper::Frame::real_pc(const frame& f) { Unimplemented(); - return NULL; + return nullptr; } inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) { @@ -109,22 +109,22 @@ inline void ContinuationHelper::Frame::patch_pc(const frame& f, address pc) { inline intptr_t* ContinuationHelper::InterpretedFrame::frame_top(const frame& f, InterpreterOopMap* mask) { // inclusive; this will be copied with the frame Unimplemented(); - return NULL; + return nullptr; } inline intptr_t* ContinuationHelper::InterpretedFrame::frame_bottom(const frame& f) { // exclusive; this will not be copied with the frame Unimplemented(); - return NULL; + return nullptr; } inline intptr_t* ContinuationHelper::InterpretedFrame::frame_top(const frame& f, int callee_argsize, bool callee_interpreted) { Unimplemented(); - return NULL; + return nullptr; } inline intptr_t* ContinuationHelper::InterpretedFrame::callers_sp(const frame& f) { Unimplemented(); - return NULL; + return nullptr; } #endif // CPU_S390_CONTINUATIONHELPER_S390_INLINE_HPP diff --git a/src/hotspot/cpu/s390/disassembler_s390.hpp b/src/hotspot/cpu/s390/disassembler_s390.hpp index 53019c2f0d7..4549bc3611b 100644 --- a/src/hotspot/cpu/s390/disassembler_s390.hpp +++ b/src/hotspot/cpu/s390/disassembler_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -39,7 +39,7 @@ // the perfect job. In those cases, decode_instruction0 may kick in // and do it right. // If nothing had to be done, just return "here", otherwise return "here + instr_len(here)" - static address decode_instruction0(address here, outputStream* st, address virtual_begin = NULL); + static address decode_instruction0(address here, outputStream* st, address virtual_begin = nullptr); // platform-specific instruction annotations (like value of loaded constants) static void annotate(address pc, outputStream* st); diff --git a/src/hotspot/cpu/s390/downcallLinker_s390.cpp b/src/hotspot/cpu/s390/downcallLinker_s390.cpp index 37b6f43ac14..baee7d7a043 100644 --- a/src/hotspot/cpu/s390/downcallLinker_s390.cpp +++ b/src/hotspot/cpu/s390/downcallLinker_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,7 +33,8 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { Unimplemented(); return nullptr; } diff --git a/src/hotspot/cpu/s390/foreignGlobals_s390.cpp b/src/hotspot/cpu/s390/foreignGlobals_s390.cpp index 5438cbe5cd6..d3a318536bd 100644 --- a/src/hotspot/cpu/s390/foreignGlobals_s390.cpp +++ b/src/hotspot/cpu/s390/foreignGlobals_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,6 +29,10 @@ class MacroAssembler; +bool ForeignGlobals::is_foreign_linker_supported() { + return false; +} + const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) { Unimplemented(); return {}; diff --git a/src/hotspot/cpu/s390/frame_s390.cpp b/src/hotspot/cpu/s390/frame_s390.cpp index 72d78d23d33..23547fa6617 100644 --- a/src/hotspot/cpu/s390/frame_s390.cpp +++ b/src/hotspot/cpu/s390/frame_s390.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2022 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,7 +82,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // construct the sender and do some validation of it. This goes a long way // toward eliminating issues when we get in frame construction code - if (_cb != NULL ) { + if (_cb != nullptr ) { // First check if the frame is complete and the test is reliable. // Unfortunately we can only check frame completeness for runtime stubs. @@ -111,19 +111,19 @@ bool frame::safe_for_sender(JavaThread *thread) { } // At this point, there still is a chance that fp_safe is false. - // In particular, (fp == NULL) might be true. So let's check and + // In particular, fp might be null. So let's check and // bail out before we actually dereference from fp. if (!fp_safe) { return false; } - z_abi_16* sender_abi = (z_abi_16*)fp; + z_common_abi* sender_abi = (z_common_abi*)fp; intptr_t* sender_sp = (intptr_t*) fp; address sender_pc = (address) sender_abi->return_pc; // We must always be able to find a recognizable pc. CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); - if (sender_blob == NULL) { + if (sender_blob == nullptr) { return false; } @@ -196,7 +196,7 @@ intptr_t* frame::interpreter_frame_sender_sp() const { } frame frame::sender_for_entry_frame(RegisterMap *map) const { - assert(map != NULL, "map must be set"); + assert(map != nullptr, "map must be set"); // Java frame called from C. Skip all C frames and return top C // frame of that chunk as the sender. JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); @@ -208,7 +208,7 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const { assert(map->include_argument_oops(), "should be set by clear"); - if (jfa->last_Java_pc() != NULL) { + if (jfa->last_Java_pc() != nullptr) { frame fr(jfa->last_Java_sp(), jfa->last_Java_pc()); return fr; } @@ -249,7 +249,7 @@ void frame::patch_pc(Thread* thread, address pc) { own_abi()->return_pc = (uint64_t)pc; _pc = pc; // must be set before call to get_deopt_original_pc address original_pc = CompiledMethod::get_deopt_original_pc(this); - if (original_pc != NULL) { + if (original_pc != nullptr) { // assert(original_pc == _pc, "expected original to be stored before patching"); _deopt_state = is_deoptimized; _pc = original_pc; @@ -278,7 +278,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const { if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) { return false; } - int min_frame_slots = (z_abi_16_size + z_ijava_state_size) / sizeof(intptr_t); + int min_frame_slots = (z_common_abi_size + z_ijava_state_size) / sizeof(intptr_t); if (fp() - min_frame_slots < sp()) { return false; } @@ -403,12 +403,12 @@ void frame::back_trace(outputStream* st, intptr_t* start_sp, intptr_t* top_pc, u st->print("#%-3d ", num); const char* type_name = " "; - const char* function_name = NULL; + const char* function_name = nullptr; // Detect current frame's frame_type, default to 'C frame'. frame_type = 0; - CodeBlob* blob = NULL; + CodeBlob* blob = nullptr; if (Interpreter::contains(current_pc)) { frame_type = 1; diff --git a/src/hotspot/cpu/s390/frame_s390.hpp b/src/hotspot/cpu/s390/frame_s390.hpp index a7c53f23c70..2c2aee26270 100644 --- a/src/hotspot/cpu/s390/frame_s390.hpp +++ b/src/hotspot/cpu/s390/frame_s390.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ // 0 [ABI_160] // // ABI_160: - // 0 [ABI_16] + // 0 [Z_COMMON_ABI] // 16 CARG_1: spill slot for outgoing arg 1. used by next callee. // 24 CARG_2: spill slot for outgoing arg 2. used by next callee. // 32 CARG_3: spill slot for outgoing arg 3. used by next callee. @@ -61,7 +61,7 @@ // 152 CFARG_4: spill slot for outgoing fp arg 4. used by next callee. // 160 [REMAINING CARGS] // - // ABI_16: + // Z_COMMON_ABI: // 0 callers_sp // 8 return_pc @@ -76,17 +76,23 @@ log_2_of_alignment_in_bits = 6 } frame_constants; - struct z_abi_16 { + // Common ABI. On top of all frames, C and Java + struct z_common_abi { uint64_t callers_sp; uint64_t return_pc; }; enum { - z_abi_16_size = sizeof(z_abi_16) + z_common_abi_size = sizeof(z_common_abi) }; - #define _z_abi16(_component) \ - (offset_of(frame::z_abi_16, _component)) + #define _z_common_abi(_component) \ + (offset_of(frame::z_common_abi, _component)) + + // Z_NATIVE_ABI for native C frames. + struct z_native_abi: z_common_abi { + // Nothing to add here! + }; // ABI_160: @@ -98,9 +104,7 @@ // long as we do not provide extra infrastructure, one should use // either z_abi_160_size, or _z_abi(remaining_cargs) instead of // sizeof(...). - struct z_abi_160 { - uint64_t callers_sp; - uint64_t return_pc; + struct z_abi_160 : z_native_abi { uint64_t carg_1; uint64_t carg_2; uint64_t carg_3; @@ -123,6 +127,7 @@ }; enum { + z_native_abi_size = sizeof(z_native_abi), z_abi_160_size = 160 }; @@ -158,6 +163,10 @@ // Frame layout for the Java template interpreter on z/Architecture. // + // We differentiate between TOP and PARENT frames. + // TOP frames allow for calling native C code. + // A TOP frame is trimmed to a PARENT frame when calling a Java method. + // // In these figures the stack grows upwards, while memory grows // downwards. Square brackets denote regions possibly larger than // single 64 bit slots. @@ -250,13 +259,14 @@ public: - // PARENT_IJAVA_FRAME_ABI + // ABI for every Java frame, compiled and interpreted - struct z_parent_ijava_frame_abi : z_abi_16 { + struct z_java_abi : z_common_abi { + // Nothing to add here! }; - enum { - z_parent_ijava_frame_abi_size = sizeof(z_parent_ijava_frame_abi) + struct z_parent_ijava_frame_abi : z_java_abi { + // Nothing to add here! }; #define _z_parent_ijava_frame_abi(_component) \ @@ -268,6 +278,8 @@ }; enum { + z_java_abi_size = sizeof(z_java_abi), + z_parent_ijava_frame_abi_size = sizeof(z_parent_ijava_frame_abi), z_top_ijava_frame_abi_size = sizeof(z_top_ijava_frame_abi) }; @@ -357,17 +369,8 @@ // [monitor] (optional) // [in_preserve] added / removed by prolog / epilog - public: - - struct z_top_jit_abi_32 { - uint64_t callers_sp; - uint64_t return_pc; - uint64_t toc; - uint64_t tmp; - }; - - #define _z_top_jit_abi(_component) \ - (offset_of(frame::z_top_jit_abi_32, _component)) + // For JIT frames we don't differentiate between TOP and PARENT frames. + // Runtime calls go through stubs which push a new frame. struct jit_monitor { uint64_t monitor[1]; @@ -378,7 +381,7 @@ // nothing to add here! }; - struct jit_out_preserve : z_top_jit_abi_32 { + struct jit_out_preserve : z_java_abi { // Nothing to add here! }; @@ -473,7 +476,7 @@ inline intptr_t sp_at( int index) const { return *sp_addr_at(index); } // Access ABIs. - inline z_abi_16* own_abi() const { return (z_abi_16*) sp(); } + inline z_common_abi* own_abi() const { return (z_common_abi*) sp(); } inline z_abi_160* callers_abi() const { return (z_abi_160*) fp(); } private: diff --git a/src/hotspot/cpu/s390/frame_s390.inline.hpp b/src/hotspot/cpu/s390/frame_s390.inline.hpp index ef2a1aa1a66..dfa68940bac 100644 --- a/src/hotspot/cpu/s390/frame_s390.inline.hpp +++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp @@ -124,7 +124,7 @@ inline void frame::interpreter_frame_set_monitors(BasicObjectLock* monitors) { // Accessors // Return unique id for this frame. The id must have a value where we -// can distinguish identity and younger/older relationship. NULL +// can distinguish identity and younger/older relationship. null // represents an invalid (incomparable) frame. inline intptr_t* frame::id(void) const { // Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing. @@ -134,7 +134,7 @@ inline intptr_t* frame::id(void) const { // Return true if this frame is older (less recent activation) than // the frame represented by id. inline bool frame::is_older(intptr_t* id) const { - assert(this->id() != NULL && id != NULL, "NULL frame id"); + assert(this->id() != nullptr && id != nullptr, "null frame id"); // Stack grows towards smaller addresses on z/Architecture. return this->id() > id; } @@ -304,17 +304,17 @@ inline intptr_t* frame::real_fp() const { } inline const ImmutableOopMap* frame::get_oop_map() const { - if (_cb == NULL) return NULL; - if (_cb->oop_maps() != NULL) { + if (_cb == nullptr) return nullptr; + if (_cb->oop_maps() != nullptr) { NativePostCallNop* nop = nativePostCallNop_at(_pc); - if (nop != NULL && nop->displacement() != 0) { + if (nop != nullptr && nop->displacement() != 0) { int slot = ((nop->displacement() >> 24) & 0xff); return _cb->oop_map_for_slot(slot, _pc); } const ImmutableOopMap* oop_map = OopMapSet::find_map(this); return oop_map; } - return NULL; + return nullptr; } inline int frame::compiled_frame_stack_argsize() const { diff --git a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp index 8cb4b444ab7..3ed99f68c47 100644 --- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2018, 2019 SAP SE. All rights reserved. + * Copyright (c) 2018, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,13 +107,13 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; bool on_reference = on_weak || on_phantom; Label done; - if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; } + if (on_oop && on_reference && L_handle_null == nullptr) { L_handle_null = &done; } ModRefBarrierSetAssembler::load_at(masm, decorators, type, src, dst, tmp1, tmp2, L_handle_null); if (on_oop && on_reference) { // Generate the G1 pre-barrier code to log the value of // the referent field in an SATB buffer. g1_write_barrier_pre(masm, decorators | IS_NOT_NULL, - NULL /* obj */, + nullptr /* obj */, dst /* pre_val */, noreg/* preserve */ , tmp1, tmp2 /* tmp */, @@ -132,7 +132,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator ) { bool not_null = (decorators & IS_NOT_NULL) != 0, - preloaded = obj == NULL; + preloaded = obj == nullptr; const Register Robj = obj ? obj->base() : noreg, Roff = obj ? obj->index() : noreg; @@ -170,23 +170,23 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator } } - // Is the previous value NULL? + // Is the previous value null? // If so, we don't need to record it and we're done. // Note: pre_val is loaded, decompressed and stored (directly or via runtime call). // Register contents is preserved across runtime call if caller requests to do so. if (preloaded && not_null) { #ifdef ASSERT __ z_ltgr(Rpre_val, Rpre_val); - __ asm_assert_ne("null oop not allowed (G1 pre)", 0x321); // Checked by caller. + __ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 pre)", 0x321); // Checked by caller. #endif } else { __ z_ltgr(Rpre_val, Rpre_val); - __ z_bre(filtered); // previous value is NULL, so we don't need to record it. + __ z_bre(filtered); // previous value is null, so we don't need to record it. } - // Decode the oop now. We know it's not NULL. + // Decode the oop now. We know it's not null. if (Robj != noreg && UseCompressedOops) { - __ oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false); + __ oop_decoder(Rpre_val, Rpre_val, /*maybenullptr=*/false); } // OK, it's not filtered, so we'll need to call enqueue. @@ -285,11 +285,11 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); __ z_bre(filtered); - // Crosses regions, storing NULL? + // Crosses regions, storing null? if (not_null) { #ifdef ASSERT __ z_ltgr(Rnew_val, Rnew_val); - __ asm_assert_ne("null oop not allowed (G1 post)", 0x322); // Checked by caller. + __ asm_assert(Assembler::bcondNotZero, "null oop not allowed (G1 post)", 0x322); // Checked by caller. #endif } else { __ z_ltgr(Rnew_val, Rnew_val); @@ -298,7 +298,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato Rnew_val = noreg; // end of lifetime - // Storing region crossing non-NULL, is card already dirty? + // Storing region crossing non-null, is card already dirty? assert_different_registers(Rtmp1, Rtmp2, Rtmp3); // Make sure not to use Z_R0 for any of these registers. Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3; @@ -320,7 +320,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato __ z_cli(0, Rcard_addr, G1CardTable::dirty_card_val()); // Reload after membar. __ z_bre(filtered); - // Storing a region crossing, non-NULL oop, card is clean. + // Storing a region crossing, non-null oop, card is clean. // Dirty card and log. __ z_mvi(0, Rcard_addr, G1CardTable::dirty_card_val()); @@ -380,7 +380,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); - // No need for post barrier if storing NULL + // No need for post barrier if storing null if (val != noreg) { const Register base = dst.base(), idx = dst.index(); @@ -395,7 +395,7 @@ void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet deco void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) { NearLabel Ldone, Lnot_weak; __ z_ltgr(tmp1, value); - __ z_bre(Ldone); // Use NULL result as-is. + __ z_bre(Ldone); // Use null result as-is. __ z_nill(value, ~JNIHandles::tag_mask); __ z_lg(value, 0, value); // Resolve (untagged) jobject. @@ -404,7 +404,7 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value __ z_braz(Lnot_weak); __ verify_oop(value, FILE_AND_LINE); DecoratorSet decorators = IN_NATIVE | ON_PHANTOM_OOP_REF; - g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true); + g1_write_barrier_pre(masm, decorators, (const Address*)nullptr, value, noreg, tmp1, tmp2, true); __ bind(Lnot_weak); __ verify_oop(value, FILE_AND_LINE); __ bind(Ldone); diff --git a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp index e13de5708a6..cc1d51d2fa1 100644 --- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -42,7 +42,7 @@ class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { bool do_return); void g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators, - const Address* obj, // Address of oop or NULL if pre-loaded. + const Address* obj, // Address of oop or null if pre-loaded. Register Rpre_val, // Ideally, this is a non-volatile register. Register Rval, // Will be preserved. Register Rtmp1, // If Rpre_val is volatile, either Rtmp1 @@ -65,7 +65,7 @@ class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { #endif virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = NULL); + const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = nullptr); virtual void resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2); }; diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp index 31fb02ebc66..9613de903d9 100644 --- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp @@ -53,7 +53,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, case T_OBJECT: { if (UseCompressedOops && in_heap) { __ z_llgf(dst, addr); - if (L_handle_null != NULL) { // Label provided. + if (L_handle_null != nullptr) { // Label provided. __ compareU32_and_branch(dst, (intptr_t)0, Assembler::bcondEqual, *L_handle_null); __ oop_decoder(dst, dst, false); } else { @@ -61,7 +61,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, } } else { __ z_lg(dst, addr); - if (L_handle_null != NULL) { + if (L_handle_null != nullptr) { __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondEqual, *L_handle_null); } } @@ -108,7 +108,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) { NearLabel Ldone; __ z_ltgr(tmp1, value); - __ z_bre(Ldone); // Use NULL result as-is. + __ z_bre(Ldone); // Use null result as-is. __ z_nill(value, ~JNIHandles::tag_mask); __ z_lg(value, 0, value); // Resolve (untagged) jobject. diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp index 1590268aac8..b0f7233812b 100644 --- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -40,7 +40,7 @@ public: Register dst, Register count, bool do_return = false); virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - const Address& addr, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = NULL); + const Address& addr, Register dst, Register tmp1, Register tmp2, Label *L_handle_null = nullptr); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, const Address& addr, Register val, Register tmp1, Register tmp2, Register tmp3); diff --git a/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp index 0124868e46a..760f77951fa 100644 --- a/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -160,7 +160,7 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); - // No need for post barrier if storing NULL + // No need for post barrier if storing null if (val != noreg) { const Register base = dst.base(), idx = dst.index(); diff --git a/src/hotspot/cpu/s390/globals_s390.hpp b/src/hotspot/cpu/s390/globals_s390.hpp index a3923147185..df38f3133d7 100644 --- a/src/hotspot/cpu/s390/globals_s390.hpp +++ b/src/hotspot/cpu/s390/globals_s390.hpp @@ -34,7 +34,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks. define_pd_global(bool, TrapBasedNullChecks, true); -define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast. +define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls passed to check cast. define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI); diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index b4a7a041725..576f69e7e4d 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -57,7 +57,7 @@ #endif void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) { - assert(entry != NULL, "Entry must have been generated by now"); + assert(entry != nullptr, "Entry must have been generated by now"); assert(Rscratch != Z_R0, "Can't use R0 for addressing"); branch_optimized(Assembler::bcondAlways, entry); } @@ -93,7 +93,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, bo verify_FPU(1, state); #ifdef ASSERT - address reentry = NULL; + address reentry = nullptr; { Label OK; // Check if the frame pointer in Z_fp is correct. z_cg(Z_fp, 0, Z_SP); @@ -274,7 +274,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) Register jvmti_thread_state = Z_ARG2; Register tmp = Z_ARG3; load_and_test_long(jvmti_thread_state, Address(Z_thread, JavaThread::jvmti_thread_state_offset())); - z_bre(L); // if (thread->jvmti_thread_state() == NULL) exit; + z_bre(L); // if (thread->jvmti_thread_state() == nullptr) exit; // Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code @@ -617,7 +617,7 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register void InterpreterMacroAssembler::verify_esp(Register Resp, Register Rtemp) { // About to read or write Resp[0]. // Make sure it is not in the monitors or the TOP_IJAVA_FRAME_ABI. - address reentry = NULL; + address reentry = nullptr; { // Check if the frame pointer in Z_fp is correct. @@ -995,7 +995,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { // // We stored the monitor address into the object's mark word. // } else if (THREAD->is_lock_owned((address)displaced_header)) // // Simple recursive case. - // monitor->lock()->set_displaced_header(NULL); + // monitor->lock()->set_displaced_header(nullptr); // } else { // // Slow path. // InterpreterRuntime::monitorenter(THREAD, monitor); @@ -1040,7 +1040,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { // } else if (THREAD->is_lock_owned((address)displaced_header)) // // Simple recursive case. - // monitor->lock()->set_displaced_header(NULL); + // monitor->lock()->set_displaced_header(nullptr); // We did not see an unlocked object so try the fast recursive case. @@ -1094,12 +1094,12 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) // else { // template code: // - // if ((displaced_header = monitor->displaced_header()) == NULL) { - // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. - // monitor->set_obj(NULL); + // if ((displaced_header = monitor->displaced_header()) == nullptr) { + // // Recursive unlock. Mark the monitor unlocked by setting the object field to null. + // monitor->set_obj(nullptr); // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. - // monitor->set_obj(NULL); + // monitor->set_obj(nullptr); // } else { // // Slow path. // InterpreterRuntime::monitorexit(monitor); @@ -1120,9 +1120,9 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) assert_different_registers(monitor, object, displaced_header, current_header); - // if ((displaced_header = monitor->displaced_header()) == NULL) { - // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. - // monitor->set_obj(NULL); + // if ((displaced_header = monitor->displaced_header()) == nullptr) { + // // Recursive unlock. Mark the monitor unlocked by setting the object field to null. + // monitor->set_obj(nullptr); clear_mem(obj_entry, sizeof(oop)); @@ -1134,7 +1134,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. - // monitor->set_obj(NULL); + // monitor->set_obj(nullptr); // If we still have a lightweight lock, unlock the object and be done. @@ -1176,7 +1176,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { Register method = Z_ARG5; get_method(method); - // Test MDO to avoid the call if it is NULL. + // Test MDO to avoid the call if it is null. load_and_test_long(mdp, method2_(method, method_data)); z_brz(set_mdp); @@ -1462,7 +1462,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper( } // In the fall-through case, we found no matching receiver, but we - // observed the receiver[start_row] is NULL. + // observed the receiver[start_row] is null. // Fill in the receiver field and increment the count. int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); @@ -1478,13 +1478,13 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper( // Example state machine code for three profile rows: // // main copy of decision tree, rooted at row[1] // if (row[0].rec == rec) { row[0].incr(); goto done; } -// if (row[0].rec != NULL) { +// if (row[0].rec != nullptr) { // // inner copy of decision tree, rooted at row[1] // if (row[1].rec == rec) { row[1].incr(); goto done; } -// if (row[1].rec != NULL) { +// if (row[1].rec != nullptr) { // // degenerate decision tree, rooted at row[2] // if (row[2].rec == rec) { row[2].incr(); goto done; } -// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow +// if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow // row[2].init(rec); goto done; // } else { // // remember row[1] is empty diff --git a/src/hotspot/cpu/s390/interpreterRT_s390.cpp b/src/hotspot/cpu/s390/interpreterRT_s390.cpp index 6d88d71b862..d1f4a48b93b 100644 --- a/src/hotspot/cpu/s390/interpreterRT_s390.cpp +++ b/src/hotspot/cpu/s390/interpreterRT_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -118,10 +118,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { as_Register(int_arg_nr) + Z_ARG1->encoding() : Z_R0; // The handle for a receiver will never be null. - bool do_NULL_check = offset() != 0 || is_static(); + bool do_nullptr_check = offset() != 0 || is_static(); Label do_null; - if (do_NULL_check) { + if (do_nullptr_check) { __ clear_reg(r, true, false); __ load_and_test_long(Z_R0, locals_j_arg_at(offset())); __ z_bre(do_null); diff --git a/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp b/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp index 9f401d89c28..ae8b8766159 100644 --- a/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp +++ b/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -37,11 +37,11 @@ inline void clear(void) { // Clearing _last_Java_sp must be first. OrderAccess::release(); - _last_Java_sp = NULL; + _last_Java_sp = nullptr; // Fence? OrderAccess::fence(); - _last_Java_pc = NULL; + _last_Java_pc = nullptr; } inline void set(intptr_t* sp, address pc) { @@ -55,12 +55,12 @@ // In order to make sure the transition state is valid for "this" // we must clear _last_Java_sp before copying the rest of the new data. // Hack Alert: Temporary bugfix for 4717480/4721647 - // To act like previous version (pd_cache_state) don't NULL _last_Java_sp + // To act like previous version (pd_cache_state) don't null _last_Java_sp // unless the value is changing. // if (_last_Java_sp != src->_last_Java_sp) { OrderAccess::release(); - _last_Java_sp = NULL; + _last_Java_sp = nullptr; OrderAccess::fence(); } _last_Java_pc = src->_last_Java_pc; @@ -77,7 +77,7 @@ public: // We don't have a frame pointer. - intptr_t* last_Java_fp(void) { return NULL; } + intptr_t* last_Java_fp(void) { return nullptr; } intptr_t* last_Java_sp() const { return _last_Java_sp; } void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; } diff --git a/src/hotspot/cpu/s390/jniFastGetField_s390.cpp b/src/hotspot/cpu/s390/jniFastGetField_s390.cpp index c58090b5a2f..01b0bd528a8 100644 --- a/src/hotspot/cpu/s390/jniFastGetField_s390.cpp +++ b/src/hotspot/cpu/s390/jniFastGetField_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -58,7 +58,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; default: ShouldNotReachHere(); - name = NULL; // unreachable + name = nullptr; // unreachable } ResourceMark rm; BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); @@ -129,7 +129,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; default: ShouldNotReachHere(); - slow_case_addr = NULL; // unreachable + slow_case_addr = nullptr; // unreachable } __ load_const_optimized(Robj, slow_case_addr); __ z_br(Robj); // tail call diff --git a/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp b/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp index 8849c65c37d..4318703ad38 100644 --- a/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp +++ b/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -77,7 +77,7 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark) { // Convert JVMCI register indices (as used in oop maps) to HotSpot registers. VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) { - return NULL; + return nullptr; } bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) { diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index a1c72d69262..ebd678fea08 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -887,7 +887,7 @@ void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Regi // Returns 0 (zero) if no consts section exists or if it has size zero. long MacroAssembler::toc_distance() { CodeSection* cs = code()->consts(); - return (long)((cs != NULL) ? cs->start()-pc() : 0); + return (long)((cs != nullptr) ? cs->start()-pc() : 0); } // Implementation on x86/sparc assumes that constant and instruction section are @@ -1142,9 +1142,9 @@ Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, // referring to a position-fixed target location. // If not so, relocations and patching must be used. void MacroAssembler::load_absolute_address(Register d, address addr) { - assert(addr != NULL, "should not happen"); + assert(addr != nullptr, "should not happen"); BLOCK_COMMENT("load_absolute_address:"); - if (addr == NULL) { + if (addr == nullptr) { z_larl(d, pc()); // Dummy emit for size calc. return; } @@ -1795,27 +1795,27 @@ void MacroAssembler::compare_and_branch_optimized(Register r1, //=========================================================================== AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { - assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); + assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int index = oop_recorder()->allocate_metadata_index(obj); RelocationHolder rspec = metadata_Relocation::spec(index); return AddressLiteral((address)obj, rspec); } AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { - assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); + assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int index = oop_recorder()->find_index(obj); RelocationHolder rspec = metadata_Relocation::spec(index); return AddressLiteral((address)obj, rspec); } AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { - assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); + assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int oop_index = oop_recorder()->allocate_oop_index(obj); return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); } AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { - assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); + assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); int oop_index = oop_recorder()->find_index(obj); return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); } @@ -2074,7 +2074,7 @@ void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, b assert_different_registers(bytes, old_sp, Z_SP); if (!copy_sp) { z_cgr(old_sp, Z_SP); - asm_assert_eq("[old_sp]!=[Z_SP]", 0x211); + asm_assert(bcondEqual, "[old_sp]!=[Z_SP]", 0x211); } #endif if (copy_sp) { z_lgr(old_sp, Z_SP); } @@ -2125,7 +2125,7 @@ void MacroAssembler::pop_frame() { // Pop current C frame and restore return PC register (Z_R14). void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) { BLOCK_COMMENT("pop_frame_restore_retPC:"); - int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes; + int retPC_offset = _z_common_abi(return_pc) + frame_size_in_bytes; // If possible, pop frame by add instead of load (a penny saved is a penny got :-). if (Displacement::is_validDisp(retPC_offset)) { z_lg(Z_R14, retPC_offset, Z_SP); @@ -2172,7 +2172,7 @@ void MacroAssembler::call_VM_base(Register oop_result, // ARG1 must hold thread address. z_lgr(Z_ARG1, Z_thread); - address return_pc = NULL; + address return_pc = nullptr; if (allow_relocation) { return_pc = call_c(entry_point); } else { @@ -2377,7 +2377,7 @@ address MacroAssembler::call_c_static(address function_entry) { address MacroAssembler::call_c_opt(address function_entry) { bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */); - _last_calls_return_pc = success ? pc() : NULL; + _last_calls_return_pc = success ? pc() : nullptr; return _last_calls_return_pc; } @@ -2571,7 +2571,7 @@ address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_ad call_far_patchable_size()); Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size()); ShouldNotReachHere(); - return NULL; + return nullptr; } } @@ -2632,7 +2632,7 @@ bool MacroAssembler::is_load_from_polling_page(address instr_loc) { // Extract poll address from instruction and ucontext. address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { - assert(ucontext != NULL, "must have ucontext"); + assert(ucontext != nullptr, "must have ucontext"); ucontext_t* uc = (ucontext_t*) ucontext; unsigned long z_instruction; unsigned int ilen = get_instruction(instr_loc, &z_instruction); @@ -2650,7 +2650,7 @@ address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { } ShouldNotReachHere(); - return NULL; + return nullptr; } // Extract poll register from instruction. @@ -2778,7 +2778,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass, bind(search); // Handle IncompatibleClassChangeError. - // If the entry is NULL then we've reached the end of the table + // If the entry is null then we've reached the end of the table // without finding the expected interface, so throw an exception. load_and_test_long(itable_interface, Address(itable_entry_addr)); z_bre(no_such_interface); @@ -2945,12 +2945,12 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, NearLabel L_fallthrough; int label_nulls = 0; - if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } - if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } - if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } + if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } + if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } + if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } assert(label_nulls <= 1 || (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), - "at most one NULL in the batch, usually"); + "at most one null in the batch, usually"); BLOCK_COMMENT("check_klass_subtype_fast_path {"); // If the pointers are equal, we are done (e.g., String[] elements). @@ -3031,9 +3031,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass, assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength); NearLabel L_fallthrough; int label_nulls = 0; - if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } - if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } - assert(label_nulls <= 1, "at most one NULL in the batch"); + if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } + if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } + assert(label_nulls <= 1, "at most one null in the batch"); const int ss_offset = in_bytes(Klass::secondary_supers_offset()); const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); @@ -3100,20 +3100,20 @@ void MacroAssembler::check_klass_subtype(Register sub_klass, NearLabel failure; BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name())); check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, - &L_success, &failure, NULL); + &L_success, &failure, nullptr); check_klass_subtype_slow_path(sub_klass, super_klass, - temp1_reg, temp2_reg, &L_success, NULL); + temp1_reg, temp2_reg, &L_success, nullptr); BIND(failure); BLOCK_COMMENT("} check_klass_subtype"); } void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { - assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); + assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); Label L_fallthrough; - if (L_fast_path == NULL) { + if (L_fast_path == nullptr) { L_fast_path = &L_fallthrough; - } else if (L_slow_path == NULL) { + } else if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; } @@ -3203,10 +3203,10 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis Register zero = temp; Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value. bind(object_has_monitor); - // The object's monitor m is unlocked iff m->owner == NULL, + // The object's monitor m is unlocked iff m->owner is null, // otherwise m->owner may contain a thread or a stack address. // - // Try to CAS m->owner from NULL to current thread. + // Try to CAS m->owner from null to current thread. z_lghi(zero, 0); // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); @@ -3306,7 +3306,7 @@ void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Ja } // When returning from calling out from Java mode the frame anchor's - // last_Java_pc will always be set to NULL. It is set here so that + // last_Java_pc will always be set to null. It is set here so that // if we are doing a call to native (not VM) that we capture the // known pc and don't have to rely on the native call having a // standard frame linkage where we can find the pc. @@ -3402,13 +3402,13 @@ void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) { bind(ok); } else { if (needs_explicit_null_check((intptr_t)offset)) { - // Provoke OS NULL exception if reg = NULL by + // Provoke OS null exception if reg is null by // accessing M[reg] w/o changing any registers. z_lg(tmp, 0, reg); } // else // Nothing to do, (later) access of M[reg + offset] - // will provoke OS NULL exception if reg = NULL. + // will provoke OS null exception if reg is null. } } @@ -3447,7 +3447,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { current = dst; } - if (base != NULL) { + if (base != nullptr) { // Use scaled-down base address parts to match scaled-down klass pointer. unsigned int base_h = ((unsigned long)base)>>(32+shift); unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift); @@ -3514,7 +3514,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { // This function calculates the size of the code generated by // decode_klass_not_null(register dst, Register src) -// when (Universe::heap() != NULL). Hence, if the instructions +// when Universe::heap() isn't null. Hence, if the instructions // it generates change, then this method needs to be updated. int MacroAssembler::instr_size_for_decode_klass_not_null() { address base = CompressedKlassPointers::base(); @@ -3522,7 +3522,7 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() { int addbase_size = 0; assert(UseCompressedClassPointers, "only for compressed klass ptrs"); - if (base != NULL) { + if (base != nullptr) { unsigned int base_h = ((unsigned long)base)>>32; unsigned int base_l = (unsigned int)((unsigned long)base); if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { @@ -3557,7 +3557,7 @@ void MacroAssembler::decode_klass_not_null(Register dst) { if (shift != 0) { // Shift required? z_sllg(dst, dst, shift); } - if (base != NULL) { + if (base != nullptr) { unsigned int base_h = ((unsigned long)base)>>32; unsigned int base_l = (unsigned int)((unsigned long)base); if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { @@ -3604,7 +3604,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { lgr_if_needed(dst, src); } - if (base != NULL) { + if (base != nullptr) { unsigned int base_h = ((unsigned long)base)>>32; unsigned int base_l = (unsigned int)((unsigned long)base); if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { @@ -3679,8 +3679,8 @@ void MacroAssembler::store_klass_gap(Register s, Register d) { // Rop1 - klass in register, always uncompressed. // disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag. // Rbase - Base address of cKlass in memory. -// maybeNULL - True if Rop1 possibly is a NULL. -void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) { +// maybenull - True if Rop1 possibly is a null. +void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybenull) { BLOCK_COMMENT("compare klass ptr {"); @@ -3694,7 +3694,7 @@ void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rba // First encode register oop and then compare with cOop in memory. // This sequence saves an unnecessary cOop load and decode. - if (base == NULL) { + if (base == nullptr) { if (shift == 0) { z_cl(Rop1, disp, Rbase); // Unscaled } else { @@ -3709,7 +3709,7 @@ void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rba Register current = Rop1; Label done; - if (maybeNULL) { // NULL ptr must be preserved! + if (maybenull) { // null ptr must be preserved! z_ltgr(Z_R0, current); z_bre(done); current = Z_R0; @@ -3812,9 +3812,9 @@ int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) { // Rop1 - Oop in register. // disp - Offset of cOop in memory. // Rbase - Base address of cOop in memory. -// maybeNULL - True if Rop1 possibly is a NULL. -// maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction. -void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) { +// maybenull - True if Rop1 possibly is a null. +// maybenulltarget - Branch target for Rop1 == nullptr, if flow control shall NOT continue with compare instruction. +void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybenull) { Register Rbase = mem.baseOrR0(); Register Rindex = mem.indexOrR0(); int64_t disp = mem.disp(); @@ -3823,7 +3823,7 @@ void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL address base = CompressedOops::base(); assert(UseCompressedOops, "must be on to call this method"); - assert(Universe::heap() != NULL, "java heap must be initialized to call this method"); + assert(Universe::heap() != nullptr, "java heap must be initialized to call this method"); assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); assert_different_registers(Rop1, Z_R0); assert_different_registers(Rop1, Rbase, Z_R1); @@ -3833,7 +3833,7 @@ void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL // First encode register oop and then compare with cOop in memory. // This sequence saves an unnecessary cOop load and decode. - if (base == NULL) { + if (base == nullptr) { if (shift == 0) { z_cl(Rop1, disp, Rindex, Rbase); // Unscaled } else { @@ -3848,7 +3848,7 @@ void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL Label done; int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); - if (maybeNULL) { // NULL ptr must be preserved! + if (maybenull) { // null ptr must be preserved! z_ltgr(Z_R0, Rop1); z_bre(done); } @@ -3928,7 +3928,7 @@ void MacroAssembler::store_heap_oop(Register Roop, const Address &a, // // only32bitValid is set, if later code only uses the lower 32 bits. In this // case we must not fix the upper 32 bits. -void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, +void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybenull, Register Rbase, int pow2_offset, bool only32bitValid) { const address oop_base = CompressedOops::base(); @@ -3936,20 +3936,20 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, const bool disjoint = CompressedOops::base_disjoint(); assert(UseCompressedOops, "must be on to call this method"); - assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder"); + assert(Universe::heap() != nullptr, "java heap must be initialized to call this encoder"); assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); - if (disjoint || (oop_base == NULL)) { + if (disjoint || (oop_base == nullptr)) { BLOCK_COMMENT("cOop encoder zeroBase {"); if (oop_shift == 0) { - if (oop_base != NULL && !only32bitValid) { + if (oop_base != nullptr && !only32bitValid) { z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again. } else { lgr_if_needed(Rdst, Rsrc); } } else { z_srlg(Rdst, Rsrc, oop_shift); - if (oop_base != NULL && !only32bitValid) { + if (oop_base != nullptr && !only32bitValid) { z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. } } @@ -3963,7 +3963,7 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, BLOCK_COMMENT("cOop encoder general {"); assert_different_registers(Rdst, Z_R1); assert_different_registers(Rsrc, Rbase); - if (maybeNULL) { + if (maybenull) { Label done; // We reorder shifting and subtracting, so that we can compare // and shift in parallel: @@ -3990,7 +3990,7 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, } assert_different_registers(Rdst, Rbase); - // Check for NULL oop (must be left alone) and shift. + // Check for null oop (must be left alone) and shift. if (oop_shift != 0) { // Shift out alignment bits if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set. z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. @@ -4001,7 +4001,7 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, // z_cghi(Rsrc, 0); } } else { - z_ltgr(Rdst, Rsrc); // Move NULL to result register. + z_ltgr(Rdst, Rsrc); // Move null to result register. } z_bre(done); @@ -4064,20 +4064,20 @@ void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, // - avoid Z_R0 for any of the argument registers. // - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance. // - avoid Z_R1 for Rdst if Rdst == Rbase. -void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) { +void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybenull, Register Rbase, int pow2_offset) { const address oop_base = CompressedOops::base(); const int oop_shift = CompressedOops::shift(); const bool disjoint = CompressedOops::base_disjoint(); assert(UseCompressedOops, "must be on to call this method"); - assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder"); + assert(Universe::heap() != nullptr, "java heap must be initialized to call this decoder"); assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary. - if (oop_base != NULL) { + if (oop_base != nullptr) { unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff; unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff; unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff; @@ -4088,7 +4088,7 @@ void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, R Label done; // Rsrc contains a narrow oop. Thus we are sure the leftmost bits will never be set. - if (maybeNULL) { // NULL ptr must be preserved! + if (maybenull) { // null ptr must be preserved! z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. z_bre(done); } else { @@ -4148,9 +4148,9 @@ void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, R } if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase); - // Scale oop and check for NULL. + // Scale oop and check for null. // Rsrc contains a narrow oop. Thus we are sure the leftmost bits will never be set. - if (maybeNULL) { // NULL ptr must be preserved! + if (maybenull) { // null ptr must be preserved! z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code. z_bre(done); } else { @@ -4428,11 +4428,11 @@ int MacroAssembler::store_const_in_toc(AddressLiteral& val) { long value = val.value(); address tocPos = long_constant(value); - if (tocPos != NULL) { + if (tocPos != nullptr) { int tocOffset = (int)(tocPos - code()->consts()->start()); return tocOffset; } - // Address_constant returned NULL, so no constant entry has been created. + // Address_constant returned null, so no constant entry has been created. // In that case, we return a "fatal" offset, just in case that subsequently // generated access code is executed. return -1; @@ -4446,7 +4446,7 @@ int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { // where x is the address of the constant pool entry. address tocPos = address_constant((address)oop.value(), RelocationHolder::none); - if (tocPos != NULL) { + if (tocPos != nullptr) { int tocOffset = (int)(tocPos - code()->consts()->start()); RelocationHolder rsp = oop.rspec(); Relocation *rel = rsp.reloc(); @@ -4460,7 +4460,7 @@ int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { return tocOffset; } - // Address_constant returned NULL, so no constant entry has been created + // Address_constant returned null, so no constant entry has been created // in that case, we return a "fatal" offset, just in case that subsequently // generated access code is executed. return -1; @@ -4470,7 +4470,7 @@ bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Regist int tocOffset = store_const_in_toc(a); if (tocOffset == -1) return false; address tocPos = tocOffset + code()->consts()->start(); - assert((address)code()->consts()->start() != NULL, "Please add CP address"); + assert((address)code()->consts()->start() != nullptr, "Please add CP address"); relocate(a.rspec()); load_long_pcrelative(dst, tocPos); return true; @@ -4480,7 +4480,7 @@ bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register int tocOffset = store_oop_in_toc(a); if (tocOffset == -1) return false; address tocPos = tocOffset + code()->consts()->start(); - assert((address)code()->consts()->start() != NULL, "Please add CP address"); + assert((address)code()->consts()->start() != nullptr, "Please add CP address"); load_addr_pcrelative(dst, tocPos); return true; @@ -4494,7 +4494,7 @@ intptr_t MacroAssembler::get_const_from_toc(address pc) { assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); long offset = get_load_const_from_toc_offset(pc); - address dataLoc = NULL; + address dataLoc = nullptr; if (is_load_const_from_toc_pcrelative(pc)) { dataLoc = pc + offset; } else { @@ -4513,12 +4513,12 @@ void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBl assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); long offset = MacroAssembler::get_load_const_from_toc_offset(pc); - address dataLoc = NULL; + address dataLoc = nullptr; if (is_load_const_from_toc_pcrelative(pc)) { dataLoc = pc+offset; } else { nmethod* nm = CodeCache::find_nmethod(pc); - assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); + assert((cb == nullptr) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); dataLoc = nm->ctable_begin() + offset; } if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary. @@ -5326,47 +5326,25 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); } -#ifndef PRODUCT +void MacroAssembler::asm_assert(branch_condition cond, const char* msg, int id, bool is_static) { +#ifdef ASSERT + Label ok; + z_brc(cond, ok); + is_static ? stop_static(msg, id) : stop(msg, id); + bind(ok); +#endif // ASSERT +} + // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false). void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { - Label ok; - if (check_equal) { - z_bre(ok); - } else { - z_brne(ok); - } - stop(msg, id); - bind(ok); -} - -// Assert if CC indicates "low". -void MacroAssembler::asm_assert_low(const char *msg, int id) { - Label ok; - z_brnl(ok); - stop(msg, id); - bind(ok); -} - -// Assert if CC indicates "high". -void MacroAssembler::asm_assert_high(const char *msg, int id) { - Label ok; - z_brnh(ok); - stop(msg, id); - bind(ok); -} - -// Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false) -// generate non-relocatable code. -void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) { - Label ok; - if (check_equal) { z_bre(ok); } - else { z_brne(ok); } - stop_static(msg, id); - bind(ok); +#ifdef ASSERT + asm_assert(check_equal ? bcondEqual : bcondNotEqual, msg, id); +#endif // ASSERT } void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, Register mem_base, const char* msg, int id) { +#ifdef ASSERT switch (size) { case 4: load_and_test_int(Z_R0, Address(mem_base, mem_offset)); @@ -5377,8 +5355,9 @@ void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocatio default: ShouldNotReachHere(); } - if (allow_relocation) { asm_assert(check_equal, msg, id); } - else { asm_assert_static(check_equal, msg, id); } + // if relocation is not allowed then stop_static() will be called otherwise call stop() + asm_assert(check_equal ? bcondEqual : bcondNotEqual, msg, id, !allow_relocation); +#endif // ASSERT } // Check the condition @@ -5387,18 +5366,13 @@ void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocatio // expected_size - FP + SP == 0 // Destroys Register expected_size if no tmp register is passed. void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) { - if (tmp == noreg) { - tmp = expected_size; - } else { - if (tmp != expected_size) { - z_lgr(tmp, expected_size); - } - z_algr(tmp, Z_SP); - z_slg(tmp, 0, Z_R0, Z_SP); - asm_assert_eq(msg, id); - } +#ifdef ASSERT + lgr_if_needed(tmp, expected_size); + z_algr(tmp, Z_SP); + z_slg(tmp, 0, Z_R0, Z_SP); + asm_assert(bcondEqual, msg, id); +#endif // ASSERT } -#endif // !PRODUCT // Save and restore functions: Exclude Z_R0. void MacroAssembler::save_volatile_regs(Register dst, int offset, bool include_fp, bool include_flags) { @@ -5519,8 +5493,8 @@ void MacroAssembler::stop(int type, const char* msg, int id) { // The plain disassembler does not recognize illtrap. It instead displays // a 32-bit value. Issuing two illtraps assures the disassembler finds // the proper beginning of the next instruction. - z_illtrap(); // Illegal instruction. - z_illtrap(); // Illegal instruction. + z_illtrap(id); // Illegal instruction. + z_illtrap(id); // Illegal instruction. BLOCK_COMMENT(" } stop"); } @@ -5534,7 +5508,7 @@ void MacroAssembler::stop(int type, const char* msg, int id) { // should be given for "hand-written" code, if all chain calls are in the same code blob. // Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe. address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) { - BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg)); + BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==nullptr?"init":"cont", allow_relocation?"reloc ":"static", msg)); // Setup arguments. if (allow_relocation) { @@ -5545,7 +5519,7 @@ address MacroAssembler::stop_chain(address reentry, int type, const char* msg, i load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]); load_absolute_address(Z_ARG2, (address)msg); } - if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { + if ((reentry != nullptr) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { BLOCK_COMMENT("branch to reentry point:"); z_brc(bcondAlways, reentry); } else { @@ -5554,12 +5528,12 @@ address MacroAssembler::stop_chain(address reentry, int type, const char* msg, i save_return_pc(); // Saves return pc Z_R14. push_frame_abi160(0); if (allow_relocation) { - reentry = NULL; // Prevent reentry if code relocation is allowed. + reentry = nullptr; // Prevent reentry if code relocation is allowed. call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); } else { call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); } - z_illtrap(); // Illegal instruction as emergency stop, should the above call return. + z_illtrap(id); // Illegal instruction as emergency stop, should the above call return. } BLOCK_COMMENT(" } stop_chain"); @@ -5569,7 +5543,7 @@ address MacroAssembler::stop_chain(address reentry, int type, const char* msg, i // Special version of stop() for code size reduction. // Assumes constant relative addresses for data and runtime call. void MacroAssembler::stop_static(int type, const char* msg, int id) { - stop_chain(NULL, type, msg, id, false); + stop_chain(nullptr, type, msg, id, false); } void MacroAssembler::stop_subroutine() { diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp index f5181bbb6da..fad35cf08b2 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2022 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -679,7 +679,7 @@ class MacroAssembler: public Assembler { // Test sub_klass against super_klass, with fast and slow paths. // The fast path produces a tri-state answer: yes / no / maybe-slow. - // One of the three labels can be NULL, meaning take the fall-through. + // One of the three labels can be null, meaning take the fall-through. // If super_check_offset is -1, the value is loaded up from super_klass. // No registers are killed, except temp_reg and temp2_reg. // If super_check_offset is not -1, temp1_reg is not used and can be noreg. @@ -713,8 +713,8 @@ class MacroAssembler: public Assembler { void clinit_barrier(Register klass, Register thread, - Label* L_fast_path = NULL, - Label* L_slow_path = NULL); + Label* L_fast_path = nullptr, + Label* L_slow_path = nullptr); // Increment a counter at counter_address when the eq condition code is set. // Kills registers tmp1_reg and tmp2_reg and preserves the condition code. @@ -747,9 +747,9 @@ class MacroAssembler: public Assembler { // Vm result is currently getting hijacked to for oop preservation. void set_vm_result(Register oop_result); - // Support for NULL-checks + // Support for null-checks // - // Generates code that causes a NULL OS exception if the content of reg is NULL. + // Generates code that causes a null OS exception if the content of reg is null. // If the accessed location is M[reg + offset] and the offset is known, provide the // offset. No explicit code generation is needed if the offset is within a certain // range (0 <= offset <= page_size). @@ -771,7 +771,7 @@ class MacroAssembler: public Assembler { // This function calculates the size of the code generated by // decode_klass_not_null(register dst) - // when (Universe::heap() != NULL). Hence, if the instructions + // when Universe::heap() isn't null. Hence, if the instructions // it generates change, then this method needs to be updated. static int instr_size_for_decode_klass_not_null(); @@ -781,8 +781,8 @@ class MacroAssembler: public Assembler { static int get_oop_base_pow2_offset(uint64_t oop_base); int get_oop_base(Register Rbase, uint64_t oop_base); int get_oop_base_complement(Register Rbase, uint64_t oop_base); - void compare_heap_oop(Register Rop1, Address mem, bool maybeNULL); - void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL); + void compare_heap_oop(Register Rop1, Address mem, bool maybenull); + void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybenull); // Access heap oop, handle encoding and GC barriers. private: @@ -791,20 +791,20 @@ class MacroAssembler: public Assembler { Register tmp1, Register tmp2, Register tmp3); void access_load_at(BasicType type, DecoratorSet decorators, const Address& addr, Register dst, - Register tmp1, Register tmp2, Label *is_null = NULL); + Register tmp1, Register tmp2, Label *is_null = nullptr); public: // tmp1 and tmp2 are used with decorators ON_PHANTOM_OOP_REF or ON_WEAK_OOP_REF. void load_heap_oop(Register dest, const Address &a, Register tmp1, Register tmp2, - DecoratorSet decorators = 0, Label *is_null = NULL); + DecoratorSet decorators = 0, Label *is_null = nullptr); void store_heap_oop(Register Roop, const Address &a, Register tmp1, Register tmp2, Register tmp3, DecoratorSet decorators = 0); - void oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, + void oop_encoder(Register Rdst, Register Rsrc, bool maybenull, Register Rbase = Z_R1, int pow2_offset = -1, bool only32bitValid = false); - void oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, + void oop_decoder(Register Rdst, Register Rsrc, bool maybenull, Register Rbase = Z_R1, int pow2_offset = -1); void resolve_oop_handle(Register result); @@ -863,18 +863,13 @@ class MacroAssembler: public Assembler { // // Assert on CC (condition code in CPU state). - void asm_assert(bool check_equal, const char* msg, int id) PRODUCT_RETURN; - void asm_assert_low(const char *msg, int id) PRODUCT_RETURN; - void asm_assert_high(const char *msg, int id) PRODUCT_RETURN; - void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); } - void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); } - - void asm_assert_static(bool check_equal, const char* msg, int id) PRODUCT_RETURN; + void asm_assert(branch_condition cond, const char* msg, int id, bool is_static=true); + void asm_assert(bool check_equal, const char* msg, int id); private: // Emit assertions. void asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, - Register mem_base, const char* msg, int id) PRODUCT_RETURN; + Register mem_base, const char* msg, int id); public: inline void asm_assert_mem4_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { @@ -889,7 +884,6 @@ class MacroAssembler: public Assembler { inline void asm_assert_mem8_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { asm_assert_mems_zero(false, true, 8, mem_offset, mem_base, msg, id); } - inline void asm_assert_mem4_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { asm_assert_mems_zero(true, false, 4, mem_offset, mem_base, msg, id); } @@ -902,7 +896,7 @@ class MacroAssembler: public Assembler { inline void asm_assert_mem8_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { asm_assert_mems_zero(false, false, 8, mem_offset, mem_base, msg, id); } - void asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) PRODUCT_RETURN; + void asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id); // Save and restore functions: Exclude Z_R0. void save_volatile_regs( Register dst, int offset, bool include_fp, bool include_flags); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.inline.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.inline.hpp index 08c81e908f4..d81562d9e9a 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.inline.hpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.inline.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -250,11 +250,11 @@ inline bool MacroAssembler::is_load_addr_pcrel(address a) { // Save the return pc in the register that should be stored as the return pc // in the current frame (default is R14). inline void MacroAssembler::save_return_pc(Register pc) { - z_stg(pc, _z_abi16(return_pc), Z_SP); + z_stg(pc, _z_common_abi(return_pc), Z_SP); } inline void MacroAssembler::restore_return_pc() { - z_lg(Z_R14, _z_abi16(return_pc), Z_SP); + z_lg(Z_R14, _z_common_abi(return_pc), Z_SP); } // Call a function with given entry. diff --git a/src/hotspot/cpu/s390/matcher_s390.hpp b/src/hotspot/cpu/s390/matcher_s390.hpp index d683f35a8a4..0ab944a5426 100644 --- a/src/hotspot/cpu/s390/matcher_s390.hpp +++ b/src/hotspot/cpu/s390/matcher_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -71,7 +71,7 @@ // Set this as clone_shift_expressions. static bool narrow_oop_use_complex_address() { - if (CompressedOops::base() == NULL && CompressedOops::shift() == 0) return true; + if (CompressedOops::base() == nullptr && CompressedOops::shift() == 0) return true; return false; } @@ -84,12 +84,12 @@ static bool const_oop_prefer_decode() { // Prefer ConN+DecodeN over ConP in simple compressed oops mode. - return CompressedOops::base() == NULL; + return CompressedOops::base() == nullptr; } static bool const_klass_prefer_decode() { // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode. - return CompressedKlassPointers::base() == NULL; + return CompressedKlassPointers::base() == nullptr; } // Is it better to copy float constants, or load them directly from memory? diff --git a/src/hotspot/cpu/s390/methodHandles_s390.cpp b/src/hotspot/cpu/s390/methodHandles_s390.cpp index 0841d22db72..318f51cf97e 100644 --- a/src/hotspot/cpu/s390/methodHandles_s390.cpp +++ b/src/hotspot/cpu/s390/methodHandles_s390.cpp @@ -180,7 +180,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth __ z_br(target); __ bind(L_no_such_method); - assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!"); + assert(StubRoutines::throw_AbstractMethodError_entry() != nullptr, "not yet generated!"); __ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry()); __ z_br(target); } @@ -249,14 +249,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* // adapters via MethodHandleNatives.linkMethod. They all allow an // appendix argument. __ should_not_reach_here(); // Empty stubs make SG sick. - return NULL; + return nullptr; } // No need in interpreter entry for linkToNative for now. // Interpreter calls compiled entry through i2c. if (iid == vmIntrinsics::_linkToNative) { __ should_not_reach_here(); // Empty stubs make SG sick. - return NULL; + return nullptr; } // Z_R10: sender SP (must preserve; see prepare_to_jump_from_interprted) @@ -559,8 +559,8 @@ void trace_method_handle_stub(const char* adaptername, intptr_t* sender_sp, intptr_t* args, intptr_t* tracing_fp) { - bool has_mh = (strstr(adaptername, "/static") == NULL && - strstr(adaptername, "linkTo") == NULL); // Static linkers don't have MH. + bool has_mh = (strstr(adaptername, "/static") == nullptr && + strstr(adaptername, "linkTo") == nullptr); // Static linkers don't have MH. const char* mh_reg_name = has_mh ? "Z_R4_mh" : "Z_R4"; log_info(methodhandles)("MH %s %s=" INTPTR_FORMAT " sender_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, mh_reg_name, diff --git a/src/hotspot/cpu/s390/nativeInst_s390.cpp b/src/hotspot/cpu/s390/nativeInst_s390.cpp index c1c395b6697..95178e9ae74 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.cpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -55,7 +55,7 @@ void NativeInstruction::verify() { // Make sure code pattern is actually an instruction address. // Do not allow: - // - NULL + // - null // - any address in first page (0x0000 .. 0x0fff) // - odd address (will cause a "specification exception") address addr = addr_at(0); @@ -68,7 +68,7 @@ void NativeInstruction::verify() { // Print location and value (hex representation) of current NativeInstruction void NativeInstruction::print(const char* msg) const { int len = Assembler::instr_len(addr_at(0)); - if (msg == NULL) { // Output line without trailing blanks. + if (msg == nullptr) { // Output line without trailing blanks. switch (len) { case 2: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x", p2i(addr_at(0)), len, halfword_at(0)); break; case 4: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x %4.4x", p2i(addr_at(0)), len, halfword_at(0), halfword_at(2)); break; @@ -89,20 +89,20 @@ void NativeInstruction::print(const char* msg) const { } } void NativeInstruction::print() const { - print(NULL); + print(nullptr); } // Hex-Dump of storage around current NativeInstruction. Also try disassembly. void NativeInstruction::dump(const unsigned int range, const char* msg) const { - Assembler::dump_code_range(tty, addr_at(0), range, (msg == NULL) ? "":msg); + Assembler::dump_code_range(tty, addr_at(0), range, (msg == nullptr) ? "":msg); } void NativeInstruction::dump(const unsigned int range) const { - dump(range, NULL); + dump(range, nullptr); } void NativeInstruction::dump() const { - dump(32, NULL); + dump(32, nullptr); } void NativeInstruction::set_halfword_at(int offset, short i) { @@ -176,7 +176,7 @@ bool NativeInstruction::is_sigill_not_entrant() { // (see implementation of is_illegal() for details). CodeBlob* cb = CodeCache::find_blob(addr_at(0)); - if (cb == NULL || !cb->is_nmethod()) { + if (cb == nullptr || !cb->is_nmethod()) { return false; } @@ -255,7 +255,7 @@ void NativeFarCall::verify() { address NativeFarCall::destination() { assert(MacroAssembler::is_call_far_patchable_at((address)this), "unexpected call type"); - address ctable = NULL; + address ctable = nullptr; return MacroAssembler::get_dest_of_call_far_patchable_at((address)this, ctable); } @@ -368,7 +368,7 @@ address NativeMovConstReg::next_instruction_address(int offset) const { #else guarantee(false, "Not a NativeMovConstReg site"); #endif - return NULL; + return nullptr; } intptr_t NativeMovConstReg::data() const { @@ -385,7 +385,7 @@ intptr_t NativeMovConstReg::data() const { #else ShouldNotReachHere(); #endif - return *(intptr_t *)NULL; + return *(intptr_t *)nullptr; } else { // Otherwise, assume data resides in TOC. Is asserted in called method. return MacroAssembler::get_const_from_toc(loc); @@ -481,15 +481,15 @@ void NativeMovConstReg::set_data(intptr_t data, relocInfo::relocType expected_ty address next_address = set_data_plain(data, cb); // 'RelocIterator' requires an nmethod - nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL; - if (nm != NULL) { + nmethod* nm = cb ? cb->as_nmethod_or_null() : nullptr; + if (nm != nullptr) { RelocIterator iter(nm, instruction_address(), next_address); - oop* oop_addr = NULL; - Metadata** metadata_addr = NULL; + oop* oop_addr = nullptr; + Metadata** metadata_addr = nullptr; while (iter.next()) { if (iter.type() == relocInfo::oop_type) { oop_Relocation *r = iter.oop_reloc(); - if (oop_addr == NULL) { + if (oop_addr == nullptr) { oop_addr = r->oop_addr(); *oop_addr = cast_to_oop(data); } else { @@ -498,7 +498,7 @@ void NativeMovConstReg::set_data(intptr_t data, relocInfo::relocType expected_ty } if (iter.type() == relocInfo::metadata_type) { metadata_Relocation *r = iter.metadata_reloc(); - if (metadata_addr == NULL) { + if (metadata_addr == nullptr) { metadata_addr = r->metadata_addr(); *metadata_addr = (Metadata*)data; } else { @@ -507,8 +507,8 @@ void NativeMovConstReg::set_data(intptr_t data, relocInfo::relocType expected_ty } } assert(expected_type == relocInfo::none || - (expected_type == relocInfo::metadata_type && metadata_addr != NULL) || - (expected_type == relocInfo::oop_type && oop_addr != NULL), + (expected_type == relocInfo::metadata_type && metadata_addr != nullptr) || + (expected_type == relocInfo::oop_type && oop_addr != nullptr), "%s relocation not found", expected_type == relocInfo::oop_type ? "oop" : "metadata"); } } @@ -540,7 +540,7 @@ void NativeMovConstReg::set_narrow_klass(intptr_t data) { ICache::invalidate_range(start, range); } -void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passed_nm /* = NULL */) { +void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passed_nm /* = nullptr */) { address next_address; address loc = addr_at(0); @@ -565,7 +565,7 @@ void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passe } } -void NativeMovConstReg::set_pcrel_data(intptr_t newData, CompiledMethod *passed_nm /* = NULL */) { +void NativeMovConstReg::set_pcrel_data(intptr_t newData, CompiledMethod *passed_nm /* = nullptr */) { address next_address; address loc = addr_at(0); diff --git a/src/hotspot/cpu/s390/nativeInst_s390.hpp b/src/hotspot/cpu/s390/nativeInst_s390.hpp index 8cd03e0cfa0..65bfe499370 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.hpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -257,7 +257,7 @@ class NativeCall: public NativeInstruction { ((NativeCall*)iaddr)->print(); guarantee(false, "Not a NativeCall site"); - return NULL; + return nullptr; } address return_address() const { @@ -325,7 +325,7 @@ class NativeCall: public NativeInstruction { // instruction, is always prepended with a NOP. This measure avoids // ambiguities with load_const_from_toc_call. friend NativeCall* nativeCall_before(address return_address) { - NativeCall *call = NULL; + NativeCall *call = nullptr; // Make sure not to return garbage address instp = return_address - MacroAssembler::load_const_call_size(); @@ -486,8 +486,8 @@ class NativeMovConstReg: public NativeInstruction { // Patch narrow oop constant in code stream. void set_narrow_oop(intptr_t data); void set_narrow_klass(intptr_t data); - void set_pcrel_addr(intptr_t addr, CompiledMethod *nm = NULL); - void set_pcrel_data(intptr_t data, CompiledMethod *nm = NULL); + void set_pcrel_addr(intptr_t addr, CompiledMethod *nm = nullptr); + void set_pcrel_data(intptr_t data, CompiledMethod *nm = nullptr); void verify(); @@ -664,13 +664,13 @@ public: inline NativePostCallNop* nativePostCallNop_at(address address) { // Unimplemented(); - return NULL; + return nullptr; } class NativeDeoptInstruction: public NativeInstruction { public: - address instruction_address() const { Unimplemented(); return NULL; } - address next_instruction_address() const { Unimplemented(); return NULL; } + address instruction_address() const { Unimplemented(); return nullptr; } + address next_instruction_address() const { Unimplemented(); return nullptr; } void verify() { Unimplemented(); } diff --git a/src/hotspot/cpu/s390/registerMap_s390.hpp b/src/hotspot/cpu/s390/registerMap_s390.hpp index 74cf3855fa6..827e3b44e04 100644 --- a/src/hotspot/cpu/s390/registerMap_s390.hpp +++ b/src/hotspot/cpu/s390/registerMap_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,8 +33,8 @@ private: // This is the hook for finding a register in a "well-known" location, // such as a register block of a predetermined format. - // Since there is none, we just return NULL. - address pd_location(VMReg reg) const {return NULL;} + // Since there is none, we just return null. + address pd_location(VMReg reg) const {return nullptr;} address pd_location(VMReg base_reg, int slot_idx) const { return location(base_reg->next(slot_idx), nullptr); diff --git a/src/hotspot/cpu/s390/relocInfo_s390.cpp b/src/hotspot/cpu/s390/relocInfo_s390.cpp index 9fcefd77962..747ae9c535d 100644 --- a/src/hotspot/cpu/s390/relocInfo_s390.cpp +++ b/src/hotspot/cpu/s390/relocInfo_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -99,7 +99,7 @@ address Relocation::pd_call_destination(address orig_addr) { return (address)(-1); } NativeFarCall* call; - if (orig_addr == NULL) { + if (orig_addr == nullptr) { call = nativeFarCall_at(inst_addr); } else { // must access location (in CP) where destination is stored in unmoved code, because load from CP is pc-relative diff --git a/src/hotspot/cpu/s390/runtime_s390.cpp b/src/hotspot/cpu/s390/runtime_s390.cpp index 978c036316e..18f40e87876 100644 --- a/src/hotspot/cpu/s390/runtime_s390.cpp +++ b/src/hotspot/cpu/s390/runtime_s390.cpp @@ -114,12 +114,12 @@ void OptoRuntime::generate_exception_blob() { // Pop the exception blob's C frame that has been pushed before. __ z_lgr(Z_SP, saved_sp); - // [Z_RET]!=NULL was possible in hotspot5 but not in sapjvm6. + // [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6. // C2I adapter extensions are now removed by a resize in the frame manager // (unwind_initial_activation_pending_exception). #ifdef ASSERT __ z_ltgr(handle_exception, handle_exception); - __ asm_assert_ne("handler must not be NULL", 0x852); + __ asm_assert(Assembler::bcondNotZero, "handler must not be null", 0x852); #endif // Handle_exception contains the handler address. If the associated frame @@ -145,6 +145,6 @@ void OptoRuntime::generate_exception_blob() { masm->flush(); // Set exception blob. - OopMapSet *oop_maps = NULL; + OopMapSet *oop_maps = nullptr; _exception_blob = ExceptionBlob::create(&buffer, oop_maps, frame_size/wordSize); } diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp index c0ef3bb8c78..089ccb51163 100644 --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2019 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -293,7 +293,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg OopMap* map = new OopMap(frame_size_in_slots, 0); int regstosave_num = 0; - const RegisterSaver::LiveRegType* live_regs = NULL; + const RegisterSaver::LiveRegType* live_regs = nullptr; switch (reg_set) { case all_registers: @@ -329,7 +329,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg // We have to restore return_pc right away. // Nobody else will. Furthermore, return_pc isn't necessarily the default (Z_R14). // Nobody else knows which register we saved. - __ z_lg(return_pc, _z_abi16(return_pc) + frame_size_in_bytes, Z_SP); + __ z_lg(return_pc, _z_common_abi(return_pc) + frame_size_in_bytes, Z_SP); // Register save area in new frame starts above z_abi_160 area. int offset = register_save_offset; @@ -398,7 +398,7 @@ OopMap* RegisterSaver::generate_oop_map(MacroAssembler* masm, RegisterSet reg_se OopMap* map = new OopMap(frame_size_in_slots, 0); int regstosave_num = 0; - const RegisterSaver::LiveRegType* live_regs = NULL; + const RegisterSaver::LiveRegType* live_regs = nullptr; switch (reg_set) { case all_registers: @@ -448,7 +448,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, RegisterSet reg bool float_spilled = false; int regstosave_num = 0; - const RegisterSaver::LiveRegType* live_regs = NULL; + const RegisterSaver::LiveRegType* live_regs = nullptr; switch (reg_set) { case all_registers: @@ -762,7 +762,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2, int total_args_passed) { - assert(regs2 == NULL, "second VMRegPair array not used on this platform"); + assert(regs2 == nullptr, "second VMRegPair array not used on this platform"); // Calling conventions for C runtime calls and calls to JNI native methods. const VMReg z_iarg_reg[5] = { @@ -1017,7 +1017,7 @@ static void object_move(MacroAssembler *masm, __ add2reg(rHandle, reg2offset(src.first())+frame_offset, Z_SP); __ load_and_test_long(Z_R0, Address(rHandle)); __ z_brne(skip); - // Use a NULL handle if oop is NULL. + // Use a null handle if oop is null. __ clear_reg(rHandle, true, false); __ bind(skip); @@ -1043,7 +1043,7 @@ static void object_move(MacroAssembler *masm, __ z_stg(rOop, oop_slot_offset, Z_SP); __ add2reg(rHandle, oop_slot_offset, Z_SP); - // If Oop == NULL, use a NULL handle. + // If Oop is null, use a null handle. __ compare64_and_branch(rOop, (RegisterOrConstant)0L, Assembler::bcondNotEqual, skip); __ clear_reg(rHandle, true, false); __ bind(skip); @@ -1324,7 +1324,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, stack_slots / VMRegImpl::slots_per_word, in_ByteSize(-1), in_ByteSize(-1), - (OopMapSet *) NULL); + (OopMapSet *) nullptr); } @@ -1335,7 +1335,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, /////////////////////////////////////////////////////////////////////// address native_func = method->native_function(); - assert(native_func != NULL, "must have function"); + assert(native_func != nullptr, "must have function"); //--------------------------------------------------------------------- // We have received a description of where all the java args are located @@ -1363,7 +1363,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); - BasicType* in_elem_bt = NULL; + BasicType* in_elem_bt = nullptr; // Create the signature for the C call: // 1) add the JNIEnv* @@ -1457,7 +1457,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // *_slot_offset indicates offset from SP in #stack slots // *_offset indicates offset from SP in #bytes - int stack_slots = c_calling_convention(out_sig_bt, out_regs, /*regs2=*/NULL, total_c_args) + // 1+2 + int stack_slots = c_calling_convention(out_sig_bt, out_regs, /*regs2=*/nullptr, total_c_args) + // 1+2 SharedRuntime::out_preserve_stack_slots(); // see c_calling_convention // Now the space for the inbound oop handle area. @@ -2075,7 +2075,7 @@ static address gen_c2i_adapter(MacroAssembler *masm, // Call patching needed? __ load_and_test_long(Z_R0_scratch, method_(code)); __ z_lg(ientry, method_(interpreter_entry)); // Preload interpreter entry (also if patching). - __ z_brne(patch_callsite); // Patch required if code != NULL (compiled target exists). + __ z_brne(patch_callsite); // Patch required if code isn't null (compiled target exists). __ bind(skip_fixup); // Return point from patch_callsite. @@ -2358,7 +2358,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm address c2i_entry = __ pc(); // Class initialization barrier for static methods - address c2i_no_clinit_check_entry = NULL; + address c2i_no_clinit_check_entry = nullptr; if (VM_Version::supports_fast_class_init_checks()) { Label L_skip_barrier; @@ -2478,7 +2478,7 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt, // Make sure that there is at least one entry in the array. DEBUG_ONLY(__ z_ltgr(number_of_frames_reg, number_of_frames_reg)); - __ asm_assert_ne("array_size must be > 0", 0x205); + __ asm_assert(Assembler::bcondNotZero, "array_size must be > 0", 0x205); __ z_bru(loop_entry); @@ -2510,7 +2510,7 @@ void SharedRuntime::generate_deopt_blob() { CodeBuffer buffer("deopt_blob", 2048, 1024); InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); Label exec_mode_initialized; - OopMap* map = NULL; + OopMap* map = nullptr; OopMapSet *oop_maps = new OopMapSet(); unsigned int start_off = __ offset(); @@ -2627,7 +2627,7 @@ void SharedRuntime::generate_deopt_blob() { // occur so we don't need an oopmap. the value of the pc in the // frame is not particularly important. it just needs to identify the blob. - // Don't set last_Java_pc anymore here (is implicitly NULL then). + // Don't set last_Java_pc anymore here (is implicitly null then). // the correct PC is retrieved in pd_last_frame() in that case. __ set_last_Java_frame(/*sp*/Z_SP, noreg); // With EscapeAnalysis turned on, this call may safepoint @@ -2788,7 +2788,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { } else { __ z_cliy(unpack_kind_byte_offset, unroll_block_reg, Deoptimization::Unpack_uncommon_trap); } - __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0); + __ asm_assert(Assembler::bcondEqual, "SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0); #endif __ zap_from_to(Z_SP, Z_SP, Z_R0_scratch, Z_R1, 500, -1); @@ -2844,7 +2844,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { __ z_br(Z_R14); masm->flush(); - _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, framesize_in_bytes/wordSize); + _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, nullptr, framesize_in_bytes/wordSize); } #endif // COMPILER2 @@ -2854,7 +2854,7 @@ void SharedRuntime::generate_uncommon_trap_blob() { // Generate a special Compile2Runtime blob that saves all registers, // and setup oopmap. SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { - assert(StubRoutines::forward_exception_entry() != NULL, + assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); ResourceMark rm; @@ -2866,7 +2866,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t MacroAssembler* masm = new MacroAssembler(&buffer); unsigned int start_off = __ offset(); - address call_pc = NULL; + address call_pc = nullptr; int frame_size_in_bytes; bool cause_return = (poll_type == POLL_AT_RETURN); @@ -2921,7 +2921,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t if (!cause_return) { Label no_adjust; // If our stashed return pc was modified by the runtime we avoid touching it - const int offset_of_return_pc = _z_abi16(return_pc) + RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers); + const int offset_of_return_pc = _z_common_abi(return_pc) + RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers); __ z_cg(Z_R6, offset_of_return_pc, Z_SP); __ z_brne(no_adjust); @@ -2955,7 +2955,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t // must do any gc of the args. // RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { - assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); + assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); // allocate space for the code ResourceMark rm; @@ -2964,7 +2964,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha MacroAssembler* masm = new MacroAssembler(&buffer); OopMapSet *oop_maps = new OopMapSet(); - OopMap* map = NULL; + OopMap* map = nullptr; unsigned int start_off = __ offset(); diff --git a/src/hotspot/cpu/s390/smallRegisterMap_s390.inline.hpp b/src/hotspot/cpu/s390/smallRegisterMap_s390.inline.hpp index aaef8670c50..8c74eb7dd6d 100644 --- a/src/hotspot/cpu/s390/smallRegisterMap_s390.inline.hpp +++ b/src/hotspot/cpu/s390/smallRegisterMap_s390.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,7 @@ public: inline address location(VMReg reg, intptr_t* sp) const { Unimplemented(); - return NULL; + return nullptr; } inline void set_location(VMReg reg, address loc) { assert_is_rfp(reg); } @@ -77,7 +77,7 @@ public: bool should_skip_missing() const { return false; } VMReg find_register_spilled_here(void* p, intptr_t* sp) { Unimplemented(); - return NULL; + return nullptr; } void print() const { print_on(tty); } void print_on(outputStream* st) const { st->print_cr("Small register map"); } diff --git a/src/hotspot/cpu/s390/stackChunkFrameStream_s390.inline.hpp b/src/hotspot/cpu/s390/stackChunkFrameStream_s390.inline.hpp index 4f372c982d5..d94dea33e55 100644 --- a/src/hotspot/cpu/s390/stackChunkFrameStream_s390.inline.hpp +++ b/src/hotspot/cpu/s390/stackChunkFrameStream_s390.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,25 +46,25 @@ inline frame StackChunkFrameStream::to_frame() const { template inline address StackChunkFrameStream::get_pc() const { Unimplemented(); - return NULL; + return nullptr; } template inline intptr_t* StackChunkFrameStream::fp() const { Unimplemented(); - return NULL; + return nullptr; } template inline intptr_t* StackChunkFrameStream::derelativize(int offset) const { Unimplemented(); - return NULL; + return nullptr; } template inline intptr_t* StackChunkFrameStream::unextended_sp_for_interpreter_frame() const { Unimplemented(); - return NULL; + return nullptr; } template diff --git a/src/hotspot/cpu/s390/stubGenerator_s390.cpp b/src/hotspot/cpu/s390/stubGenerator_s390.cpp index 5d6b836f1e2..d5465343131 100644 --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp @@ -372,9 +372,9 @@ class StubGenerator: public StubCodeGenerator { #ifdef ASSERT char assertMsg[] = "check BasicType definition in globalDefinitions.hpp"; __ z_chi(r_arg_result_type, T_BOOLEAN); - __ asm_assert_low(assertMsg, 0x0234); + __ asm_assert(Assembler::bcondNotLow, assertMsg, 0x0234); __ z_chi(r_arg_result_type, T_NARROWOOP); - __ asm_assert_high(assertMsg, 0x0235); + __ asm_assert(Assembler::bcondNotHigh, assertMsg, 0x0235); #endif __ add2reg(r_arg_result_type, -T_BOOLEAN); // Remove offset. __ z_larl(Z_R1, firstHandler); // location of first handler @@ -483,7 +483,7 @@ class StubGenerator: public StubCodeGenerator { __ z_st(exception_line, thread_(exception_line)); // Complete return to VM. - assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); + assert(StubRoutines::_call_stub_return_address != nullptr, "must have been generated before"); // Continue in call stub. __ z_br(Z_ARG2); @@ -649,7 +649,7 @@ class StubGenerator: public StubCodeGenerator { RuntimeStub::new_runtime_stub(name, &code, frame_complete_pc - start, framesize_in_bytes/wordSize, - NULL /*oop_maps*/, false); + nullptr /*oop_maps*/, false); return stub->entry_point(); } @@ -685,12 +685,12 @@ class StubGenerator: public StubCodeGenerator { const Register Rarray_ptr = Z_ARG5; // Current value from cache array. if (UseCompressedOops) { - assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub"); + assert(Universe::heap() != nullptr, "java heap must be initialized to generate partial_subtype_check stub"); } // Always take the slow path. __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, - Rarray_ptr, Rlength, NULL, &miss); + Rarray_ptr, Rlength, nullptr, &miss); // Match falls through here. __ clear_reg(Z_RET); // Zero indicates a match. Set EQ flag in CC. @@ -740,7 +740,7 @@ class StubGenerator: public StubCodeGenerator { void assert_positive_int(Register count) { #ifdef ASSERT __ z_srag(Z_R0, count, 31); // Just leave the sign (must be zero) in Z_R0. - __ asm_assert_eq("missing zero extend", 0xAFFE); + __ asm_assert(Assembler::bcondZero, "missing zero extend", 0xAFFE); #endif } @@ -3155,7 +3155,7 @@ class StubGenerator: public StubCodeGenerator { // nmethod entry barriers for concurrent class unloading BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm != NULL) { + if (bs_nm != nullptr) { StubRoutines::zarch::_nmethod_entry_barrier = generate_nmethod_entry_barrier(); } @@ -3171,7 +3171,7 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining"); StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining"); } else { - // In PRODUCT builds, the function pointers will keep their initial (NULL) value. + // In PRODUCT builds, the function pointers will keep their initial (null) value. // LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called. assert(VM_Version::has_Crypto_AES(), "Inconsistent settings. Check vm_version_s390.cpp"); } @@ -3181,7 +3181,7 @@ class StubGenerator: public StubCodeGenerator { if (VM_Version::has_Crypto_AES_CTR()) { StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt("counterMode_AESCrypt"); } else { - // In PRODUCT builds, the function pointers will keep their initial (NULL) value. + // In PRODUCT builds, the function pointers will keep their initial (null) value. // LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called. assert(VM_Version::has_Crypto_AES_CTR(), "Inconsistent settings. Check vm_version_s390.cpp"); } diff --git a/src/hotspot/cpu/s390/stubRoutines_s390.cpp b/src/hotspot/cpu/s390/stubRoutines_s390.cpp index eac2ccf5f20..711de63fdea 100644 --- a/src/hotspot/cpu/s390/stubRoutines_s390.cpp +++ b/src/hotspot/cpu/s390/stubRoutines_s390.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2017 SAP SE. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,12 +33,12 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::zarch::_partial_subtype_check = NULL; +address StubRoutines::zarch::_partial_subtype_check = nullptr; // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. -address StubRoutines::zarch::_trot_table_addr = NULL; +address StubRoutines::zarch::_trot_table_addr = nullptr; -address StubRoutines::zarch::_nmethod_entry_barrier = NULL; +address StubRoutines::zarch::_nmethod_entry_barrier = nullptr; int StubRoutines::zarch::_atomic_memory_operation_lock = StubRoutines::zarch::unlocked; @@ -48,15 +48,14 @@ void StubRoutines::zarch::generate_load_absolute_address(MacroAssembler* masm, R __ load_absolute_address(table, table_addr); #ifdef ASSERT - assert(table_addr != NULL, "CRC lookup table address must be initialized by now"); + assert(table_addr != nullptr, "CRC lookup table address must be initialized by now"); assert(*((uint32_t*)(table_addr+4)) == (uint32_t)table_contents, "Bad CRC lookup table: 0x%8.8x, expected 0x%8.8x", *((uint32_t*)(table_addr+4)), (uint32_t)table_contents); { Label L; __ load_const_optimized(Z_R0, table_addr); __ z_cgr(table, Z_R0); // safety net __ z_bre(L); - __ z_illtrap(); - __ asm_assert_eq("crc_table: external word relocation required for load_absolute_address", 0x33); + __ stop("crc_table: external word relocation required for load_absolute_address", 0x33); __ bind(L); } { @@ -65,8 +64,7 @@ void StubRoutines::zarch::generate_load_absolute_address(MacroAssembler* masm, R __ z_cl(Z_R0, Address(table, 4)); // safety net __ z_bre(L); __ z_l(Z_R0, Address(table, 4)); // Load data from memory, we know the constant we compared against. - __ z_illtrap(); - __ asm_assert_eq("crc_table: address or contents seems to be messed up", 0x22); + __ stop("crc_table: address or contents seems to be messed up", 0x22); __ bind(L); } #endif @@ -90,7 +88,7 @@ void StubRoutines::zarch::generate_load_trot_table_addr(MacroAssembler* masm, Re __ relocate(rspec); __ load_absolute_address(table, _trot_table_addr); #ifdef ASSERT - assert(_trot_table_addr != NULL, "Translate table address must be initialized by now"); + assert(_trot_table_addr != nullptr, "Translate table address must be initialized by now"); assert((p2i(_trot_table_addr) & (TROT_ALIGNMENT-1)) == 0, "Translate table alignment error"); for (int i = 0; i < 256; i++) { assert(i == *((jshort*)(_trot_table_addr+2*i)), "trot_table[%d] = %d", i, *((jshort*)(_trot_table_addr+2*i))); @@ -100,8 +98,7 @@ void StubRoutines::zarch::generate_load_trot_table_addr(MacroAssembler* masm, Re __ load_const_optimized(Z_R0, StubRoutines::zarch::_trot_table_addr); __ z_cgr(table, Z_R0); // safety net __ z_bre(L); - __ z_illtrap(); - __ asm_assert_eq("crc_table: external word relocation does not work for load_absolute_address", 0x33); + __ stop("crc_table: external word relocation does not work for load_absolute_address", 0x33); __ bind(L); } { @@ -110,8 +107,7 @@ void StubRoutines::zarch::generate_load_trot_table_addr(MacroAssembler* masm, Re __ z_clg(Z_R0, Address(table, 8)); // safety net __ z_bre(L); __ z_lg(Z_R0, Address(table, 8)); // Load data from memory, we know the constant we compared against. - __ z_illtrap(); - __ asm_assert_eq("trot_table: address or contents seems to be messed up", 0x22); + __ stop("trot_table: address or contents seems to be messed up", 0x22); __ bind(L); } #endif diff --git a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp index 60ca0020907..efb4a38315b 100644 --- a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp @@ -281,7 +281,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() { // don't dereference it as in case of ints, floats, etc.. // UNBOX argument - // Load reference and check for NULL. + // Load reference and check for null. Label do_int_Entry4Boxed; __ bind(do_boxed); { @@ -589,7 +589,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() { } address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { - assert(!pass_oop || message == NULL, "either oop or message but not both"); + assert(!pass_oop || message == nullptr, "either oop or message but not both"); address entry = __ pc(); BLOCK_COMMENT("exception_handler_common {"); @@ -597,7 +597,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch // Expression stack must be empty before entering the VM if an // exception happened. __ empty_expression_stack(); - if (name != NULL) { + if (name != nullptr) { __ load_absolute_address(Z_ARG2, (address)name); } else { __ clear_reg(Z_ARG2, true, false); @@ -608,7 +608,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), Z_ARG2, Z_tos /*object (see TT::aastore())*/); } else { - if (message != NULL) { + if (message != nullptr) { __ load_absolute_address(Z_ARG3, (address)message); } else { __ clear_reg(Z_ARG3, true, false); @@ -638,7 +638,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, __ resize_frame_absolute(sp_before_i2c_extension, Z_locals/*tmp*/, true/*load_fp*/); // TODO(ZASM): necessary?? - // // and NULL it as marker that esp is now tos until next java call + // // and null it as marker that esp is now tos until next java call // __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ restore_bcp(); @@ -683,7 +683,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, BLOCK_COMMENT("deopt_entry {"); - // TODO(ZASM): necessary? NULL last_sp until next java call + // TODO(ZASM): necessary? null last_sp until next java call // __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); __ z_lg(Z_fp, _z_abi(callers_sp), Z_SP); // Restore frame pointer. __ restore_bcp(); @@ -701,7 +701,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, __ should_not_reach_here(); __ bind(L); } - if (continuation == NULL) { + if (continuation == nullptr) { __ dispatch_next(state, step); } else { __ jump_to_entry(continuation, Z_R1_scratch); @@ -780,8 +780,8 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) // InterpreterRuntime::frequency_counter_overflow takes two // arguments, the first (thread) is passed by call_VM, the second // indicates if the counter overflow occurs at a backwards branch - // (NULL bcp). We pass zero for it. The call returns the address - // of the verified entry point for the method or NULL if the + // (null bcp). We pass zero for it. The call returns the address + // of the verified entry point for the method or null if the // compilation did not complete (either went background or bailed // out). __ clear_reg(Z_ARG2); @@ -812,7 +812,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_ // Get the stack base, and in debug, verify it is non-zero. __ z_lg(tmp1, thread_(stack_base)); #ifdef ASSERT - address reentry = NULL; + address reentry = nullptr; NearLabel base_not_zero; __ compareU64_and_branch(tmp1, (intptr_t)0L, Assembler::bcondNotEqual, base_not_zero); reentry = __ stop_chain_static(reentry, "stack base is zero in generate_stack_overflow_check"); @@ -850,7 +850,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_ // Note also that the restored frame is not necessarily interpreted. // Use the shared runtime version of the StackOverflowError. - assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); + assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); __ load_absolute_address(tmp1, StubRoutines::throw_StackOverflowError_entry()); __ z_br(tmp1); @@ -875,7 +875,7 @@ void TemplateInterpreterGenerator::lock_method(void) { __ get_method(method); #ifdef ASSERT - address reentry = NULL; + address reentry = nullptr; { Label L; __ testbit(method2_(method, access_flags), JVM_ACC_SYNCHRONIZED_BIT); @@ -909,7 +909,7 @@ void TemplateInterpreterGenerator::lock_method(void) { { NearLabel L; __ compare64_and_branch(object, (intptr_t) 0, Assembler::bcondNotEqual, L); - reentry = __ stop_chain_static(reentry, "synchronization object is NULL"); + reentry = __ stop_chain_static(reentry, "synchronization object is null"); __ bind(L); } #endif // ASSERT @@ -1087,8 +1087,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { } // asm_assert* is a nop in product builds - NOT_PRODUCT(__ z_cg(Z_R14, _z_abi16(return_pc), Z_SP)); - NOT_PRODUCT(__ asm_assert_eq("killed Z_R14", 0)); + NOT_PRODUCT(__ z_cg(Z_R14, _z_common_abi(return_pc), Z_SP)); + NOT_PRODUCT(__ asm_assert(Assembler::bcondEqual, "killed Z_R14", 0)); __ resize_frame_absolute(sp_after_resize, fp, true); __ save_return_pc(Z_R14); @@ -1132,12 +1132,12 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { // z_ijava_state->locals = Z_esp + parameter_count bytes __ z_stg(Z_locals, _z_ijava_state_neg(locals), fp); - // z_ijava_state->oop_temp = NULL; + // z_ijava_state->oop_temp = nullptr; __ store_const(Address(fp, oop_tmp_offset), 0); // Initialize z_ijava_state->mdx. Register Rmdp = Z_bcp; - // native_call: assert that mdo == NULL + // native_call: assert that mdo is null const bool check_for_mdo = !native_call DEBUG_ONLY(|| native_call); if (ProfileInterpreter && check_for_mdo) { Label get_continue; @@ -1207,7 +1207,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M // Decide what to do: Use same platform specific instructions and runtime calls as compilers. bool use_instruction = false; - address runtime_entry = NULL; + address runtime_entry = nullptr; int num_args = 1; bool double_precision = true; @@ -1236,7 +1236,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M } // Use normal entry if neither instruction nor runtime call is used. - if (!use_instruction && runtime_entry == NULL) return NULL; + if (!use_instruction && runtime_entry == nullptr) return nullptr; address entry = __ pc(); @@ -1343,7 +1343,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { // Make sure method is native and not abstract. #ifdef ASSERT - address reentry = NULL; + address reentry = nullptr; { Label L; __ testbit(method_(access_flags), JVM_ACC_NATIVE_BIT); __ z_btrue(L); @@ -1711,7 +1711,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { // Make sure method is not native and not abstract. // Rethink these assertions - they can be simplified and shared. #ifdef ASSERT - address reentry = NULL; + address reentry = nullptr; { Label L; __ testbit(method_(access_flags), JVM_ACC_NATIVE_BIT); __ z_bfalse(L); @@ -2165,7 +2165,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() { // The member name argument must be restored if _invokestatic is // re-executed after a PopFrame call. Detect such a case in the // InterpreterRuntime function and return the member name - // argument, or NULL. + // argument, or null. __ z_lg(Z_ARG2, Address(Z_locals)); __ get_method(Z_ARG3); __ call_VM(Z_tmp_1, @@ -2378,7 +2378,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) { // The run-time runtime saves the right registers, depending on // the tosca in-state for the given template. address entry = Interpreter::trace_code(t->tos_in()); - guarantee(entry != NULL, "entry must have been generated"); + guarantee(entry != nullptr, "entry must have been generated"); __ call_stub(entry); } diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp index c072cf5fa70..441600eea38 100644 --- a/src/hotspot/cpu/s390/templateTable_s390.cpp +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp @@ -138,7 +138,7 @@ static inline Address aaddress(int n) { return iaddress(n); } -// Pass NULL, if no shift instruction should be emitted. +// Pass null, if no shift instruction should be emitted. static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) { if (masm) { masm->z_sllg(r, r, LogBytesPerWord); // index2bytes @@ -146,7 +146,7 @@ static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) { return Address(Z_locals, r, Interpreter::local_offset_in_bytes(0)); } -// Pass NULL, if no shift instruction should be emitted. +// Pass null, if no shift instruction should be emitted. static inline Address laddress(InterpreterMacroAssembler *masm, Register r) { if (masm) { masm->z_sllg(r, r, LogBytesPerWord); // index2bytes @@ -464,7 +464,7 @@ void TemplateTable::fast_aldc(LdcType type) { __ z_ltgr(Z_tos, Z_tos); __ z_bre(L_do_resolve); - // Convert null sentinel to NULL. + // Convert null sentinel to null. __ load_const_optimized(Z_R1_scratch, (intptr_t)Universe::the_null_sentinel_addr()); __ resolve_oop_handle(Z_R1_scratch); __ z_cg(Z_tos, Address(Z_R1_scratch)); @@ -1167,7 +1167,7 @@ void TemplateTable::aastore() { // Address where the store goes to, i.e. &(Rarry[index]) __ load_address(Rstore_addr, Address(Rarray, Rindex, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); - // do array store check - check for NULL value first. + // do array store check - check for null value first. __ compareU64_and_branch(Rvalue, (intptr_t)0, Assembler::bcondEqual, is_null); Register Rsub_klass = Z_ARG4; @@ -1191,11 +1191,11 @@ void TemplateTable::aastore() { Register tmp3 = Rsub_klass; - // Have a NULL in Rvalue. + // Have a null in Rvalue. __ bind(is_null); __ profile_null_seen(tmp1); - // Store a NULL. + // Store a null. do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), noreg, tmp3, tmp2, tmp1, IS_ARRAY); __ z_bru(done); @@ -1937,7 +1937,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { const Address mask(mdo, MethodData::backedge_mask_offset()); __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, Z_ARG2, false, Assembler::bcondZero, - UseOnStackReplacement ? &backedge_counter_overflow : NULL); + UseOnStackReplacement ? &backedge_counter_overflow : nullptr); __ z_bru(dispatch); __ bind(no_mdo); } @@ -1948,7 +1948,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ increment_mask_and_jump(Address(m_counters, be_offset), increment, mask, Z_ARG2, false, Assembler::bcondZero, - UseOnStackReplacement ? &backedge_counter_overflow : NULL); + UseOnStackReplacement ? &backedge_counter_overflow : nullptr); __ bind(dispatch); } @@ -1972,7 +1972,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), Z_ARG2); - // Z_RET: osr nmethod (osr ok) or NULL (osr not possible). + // Z_RET: osr nmethod (osr ok) or null (osr not possible). __ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch); // Nmethod may have been invalidated (VM may block upon call_VM return). @@ -2385,7 +2385,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no, __ load_resolved_method_at_index(byte_no, cache, cpe_offset, method); __ load_method_holder(klass, method); - __ clinit_barrier(klass, Z_thread, NULL /*L_fast_path*/, &clinit_barrier_slow); + __ clinit_barrier(klass, Z_thread, nullptr /*L_fast_path*/, &clinit_barrier_slow); } BLOCK_COMMENT("} resolve_cache_and_index"); @@ -2425,7 +2425,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) { __ load_resolved_indy_entry(cache, index); __ z_lg(method, Address(cache, in_bytes(ResolvedIndyEntry::method_offset()))); - // The invokedynamic is unresolved iff method is NULL + // The invokedynamic is unresolved iff method is null __ z_clgij(method, (unsigned long)nullptr, Assembler::bcondNotEqual, resolved); // method != 0, jump to resolved Bytecodes::Code code = bytecode(); // Call to the interpreter runtime to resolve invokedynamic @@ -2547,12 +2547,12 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, Z_ARG3, Z_R1_scratch); if (is_static) { - __ clear_reg(Z_ARG2, true, false); // NULL object reference. Don't set CC. + __ clear_reg(Z_ARG2, true, false); // null object reference. Don't set CC. } else { __ mem2reg_opt(Z_ARG2, at_tos()); // Get object pointer without popping it. __ verify_oop(Z_ARG2); } - // Z_ARG2: object pointer or NULL + // Z_ARG2: object pointer or null // Z_ARG3: cache entry pointer __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), @@ -2864,7 +2864,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, // object(tos) __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize)); - // Z_ARG2: object pointer set up above (NULL if static) + // Z_ARG2: object pointer set up above (null if static) // Z_ARG3: cache entry pointer // Z_ARG4: jvalue object on the stack __ call_VM(noreg, @@ -3786,7 +3786,7 @@ void TemplateTable::invokedynamic(int byte_no) { // spec jbb2005 shows no measurable performance degradation. void TemplateTable::_new() { transition(vtos, atos); - address prev_instr_address = NULL; + address prev_instr_address = nullptr; Register tags = Z_tmp_1; Register RallocatedObject = Z_tos; Register cpool = Z_ARG2; @@ -3930,7 +3930,7 @@ void TemplateTable::checkcast() { NearLabel done, is_null, ok_is_subtype, quicked, resolved; BLOCK_COMMENT("checkcast {"); - // If object is NULL, we are almost done. + // If object is null, we are almost done. __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null); // Get cpool & tags index. @@ -3984,7 +3984,7 @@ void TemplateTable::checkcast() { __ z_lgr(Z_tos, receiver); // Restore object. - // Collect counts on whether this test sees NULLs a lot or not. + // Collect counts on whether this test sees nulls a lot or not. if (ProfileInterpreter) { __ z_bru(done); __ bind(is_null); @@ -4003,7 +4003,7 @@ void TemplateTable::instanceof() { NearLabel done, is_null, ok_is_subtype, quicked, resolved; BLOCK_COMMENT("instanceof {"); - // If object is NULL, we are almost done. + // If object is null, we are almost done. __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null); // Get cpool & tags index. @@ -4054,7 +4054,7 @@ void TemplateTable::instanceof() { __ bind(ok_is_subtype); __ load_const_optimized(Z_tos, 1); - // Collect counts on whether this test sees NULLs a lot or not. + // Collect counts on whether this test sees nulls a lot or not. if (ProfileInterpreter) { __ z_bru(done); __ bind(is_null); @@ -4064,8 +4064,8 @@ void TemplateTable::instanceof() { } __ bind(done); - // tos = 0: obj == NULL or obj is not an instanceof the specified klass - // tos = 1: obj != NULL and obj is an instanceof the specified klass + // tos = 0: obj is null or obj is not an instanceof the specified klass + // tos = 1: obj isn't null and obj is an instanceof the specified klass BLOCK_COMMENT("} instanceof"); } @@ -4141,13 +4141,13 @@ void TemplateTable::monitorenter() { BLOCK_COMMENT("monitorenter {"); - // Check for NULL object. + // Check for null object. __ null_check(Z_tos); const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; NearLabel allocated; // Initialize entry pointer. const Register Rfree_slot = Z_tmp_1; - __ clear_reg(Rfree_slot, true, false); // Points to free slot or NULL. Don't set CC. + __ clear_reg(Rfree_slot, true, false); // Points to free slot or null. Don't set CC. // Find a free slot in the monitor block from top to bot (result in Rfree_slot). { @@ -4160,7 +4160,7 @@ void TemplateTable::monitorenter() { __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp); #ifdef ASSERT - address reentry = NULL; + address reentry = nullptr; { NearLabel ok; __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok); reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom"); @@ -4193,7 +4193,7 @@ void TemplateTable::monitorenter() { __ bind(exit); } - // Rfree_slot != NULL -> found one + // Rfree_slot isn't null -> found one __ compareU64_and_branch(Rfree_slot, (intptr_t)0L, Assembler::bcondNotEqual, allocated); // Allocate one if there's no free slot. @@ -4230,7 +4230,7 @@ void TemplateTable::monitorexit() { BLOCK_COMMENT("monitorexit {"); - // Check for NULL object. + // Check for null object. __ null_check(Z_tos); NearLabel found, not_found; @@ -4248,7 +4248,7 @@ void TemplateTable::monitorexit() { __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp); #ifdef ASSERT - address reentry = NULL; + address reentry = nullptr; { NearLabel ok; __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok); reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom"); diff --git a/src/hotspot/cpu/s390/vm_version_s390.cpp b/src/hotspot/cpu/s390/vm_version_s390.cpp index e53d6fc5635..60f726edacc 100644 --- a/src/hotspot/cpu/s390/vm_version_s390.cpp +++ b/src/hotspot/cpu/s390/vm_version_s390.cpp @@ -717,7 +717,7 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) { // - LPAR // - whole "Box" (CPUs ) // - z/VM / KVM (VM); this is not available in an LPAR-only setup - const char* kw[] = { "LPAR", "CPUs", "VM", NULL }; + const char* kw[] = { "LPAR", "CPUs", "VM", nullptr }; const char* info_file = "/proc/sysinfo"; if (!print_matching_lines_from_file(info_file, st, kw)) { @@ -842,7 +842,7 @@ void VM_Version::set_features_from(const char* march) { bool err = false; bool prt = false; - if ((march != NULL) && (march[0] != '\0')) { + if ((march != nullptr) && (march[0] != '\0')) { const int buf_len = 16; const int hdr_len = 5; char buf[buf_len]; @@ -909,10 +909,10 @@ void VM_Version::set_features_from(const char* march) { // < 0: failure: required number of feature bit string words (buffer too small). // == 0: failure: operation aborted. // -static long (*getFeatures)(unsigned long*, int, int) = NULL; +static long (*getFeatures)(unsigned long*, int, int) = nullptr; void VM_Version::set_getFeatures(address entryPoint) { - if (getFeatures == NULL) { + if (getFeatures == nullptr) { getFeatures = (long(*)(unsigned long*, int, int))entryPoint; } } diff --git a/src/hotspot/cpu/s390/vtableStubs_s390.cpp b/src/hotspot/cpu/s390/vtableStubs_s390.cpp index e89cf421f6c..6d6eb5ebf2a 100644 --- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp +++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -49,9 +49,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(true); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); - // Can be NULL if there is no free space in the code cache. - if (s == NULL) { - return NULL; + // Can be null if there is no free space in the code cache. + if (s == nullptr) { + return nullptr; } // Count unused bytes in instruction sequences of variable size. @@ -82,7 +82,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1"); const Register rcvr_klass = Z_R1_scratch; - address npe_addr = __ pc(); // npe == NULL ptr exception + address npe_addr = __ pc(); // npe is short for null pointer exception // Get receiver klass. __ load_klass(rcvr_klass, Z_ARG1); @@ -152,9 +152,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(false); VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); - // Can be NULL if there is no free space in the code cache. - if (s == NULL) { - return NULL; + // Can be null if there is no free space in the code cache. + if (s == nullptr) { + return nullptr; } // Count unused bytes in instruction sequences of variable size. @@ -193,7 +193,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Get receiver klass. // Must do an explicit check if offset too large or implicit checks are disabled. - address npe_addr = __ pc(); // npe == NULL ptr exception + address npe_addr = __ pc(); // npe is short for null pointer exception __ load_klass(rcvr_klass, Z_ARG1); // Receiver subtype check against REFC. diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp index ccf3b236afc..bc2611ac801 100644 --- a/src/hotspot/cpu/x86/assembler_x86.cpp +++ b/src/hotspot/cpu/x86/assembler_x86.cpp @@ -217,6 +217,17 @@ void Assembler::init_attributes(void) { _attributes = nullptr; } +void Assembler::set_attributes(InstructionAttr* attributes) { + // Record the assembler in the attributes, so the attributes destructor can + // clear the assembler's attributes, cleaning up the otherwise dangling + // pointer. gcc13 has a false positive warning, because it doesn't tie that + // cleanup to the assignment of _attributes here. + attributes->set_current_assembler(this); + PRAGMA_DIAG_PUSH + PRAGMA_DANGLING_POINTER_IGNORED + _attributes = attributes; + PRAGMA_DIAG_POP +} void Assembler::membar(Membar_mask_bits order_constraint) { // We only have to handle StoreLoad @@ -11442,7 +11453,6 @@ void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix vex_x = adr.index_needs_rex(); } set_attributes(attributes); - attributes->set_current_assembler(this); // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction // is allowed in legacy mode and has resources which will fit in it. @@ -11489,7 +11499,6 @@ int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexS bool vex_b = (src_enc & 8) == 8; bool vex_x = false; set_attributes(attributes); - attributes->set_current_assembler(this); // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction // is allowed in legacy mode and has resources which will fit in it. @@ -12547,7 +12556,7 @@ void Assembler::prefix(Register dst, Address adr, Prefix p) { if (adr.index_needs_rex()) { assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); } else { - prefix(REX_B); + p = (Prefix)(p | REX_B); } } else { if (adr.index_needs_rex()) { diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp index a8acf7aacb1..c45a1580cb9 100644 --- a/src/hotspot/cpu/x86/assembler_x86.hpp +++ b/src/hotspot/cpu/x86/assembler_x86.hpp @@ -678,7 +678,8 @@ private: bool _legacy_mode_vlbw; NOT_LP64(bool _is_managed;) - class InstructionAttr *_attributes; + InstructionAttr *_attributes; + void set_attributes(InstructionAttr* attributes); // 64bit prefixes void prefix(Register reg); @@ -917,8 +918,6 @@ private: // belong in macro assembler but there is no need for both varieties to exist void init_attributes(void); - - void set_attributes(InstructionAttr *attributes) { _attributes = attributes; } void clear_attributes(void) { _attributes = nullptr; } void set_managed(void) { NOT_LP64(_is_managed = true;) } @@ -2892,7 +2891,6 @@ public: if (_current_assembler != nullptr) { _current_assembler->clear_attributes(); } - _current_assembler = nullptr; } private: diff --git a/src/hotspot/cpu/x86/continuationFreezeThaw_x86.inline.hpp b/src/hotspot/cpu/x86/continuationFreezeThaw_x86.inline.hpp index bc71662467e..1078e1dca67 100644 --- a/src/hotspot/cpu/x86/continuationFreezeThaw_x86.inline.hpp +++ b/src/hotspot/cpu/x86/continuationFreezeThaw_x86.inline.hpp @@ -81,11 +81,11 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) { if (FKind::interpreted) { assert((intptr_t*)f.at(frame::interpreter_frame_last_sp_offset) == nullptr || f.unextended_sp() == (intptr_t*)f.at(frame::interpreter_frame_last_sp_offset), ""); - int locals = f.interpreter_frame_method()->max_locals(); + intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset); // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty(); - fp = caller.unextended_sp() - (locals + frame::sender_sp_offset) + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0); + fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0); sp = fp - (f.fp() - f.unextended_sp()); assert(sp <= fp, ""); assert(fp <= caller.unextended_sp(), ""); @@ -94,7 +94,8 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) { assert(_cont.tail()->is_in_chunk(sp), ""); frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */); - *hf.addr_at(frame::interpreter_frame_locals_offset) = frame::sender_sp_offset + locals - 1; + // copy relativized locals from the stack frame + *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset; return hf; } else { // We need to re-read fp out of the frame because it may be an oop and we might have @@ -223,10 +224,10 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& frame f(frame_sp, frame_sp, fp, hf.pc()); // we need to set the locals so that the caller of new_stack_frame() can call // ContinuationHelper::InterpretedFrame::frame_bottom - intptr_t offset = *hf.addr_at(frame::interpreter_frame_locals_offset); - assert((int)offset == frame::sender_sp_offset + locals - 1, ""); - // set relativized locals - *f.addr_at(frame::interpreter_frame_locals_offset) = offset; + intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset); + assert((int)locals_offset == frame::sender_sp_offset + locals - 1, ""); + // copy relativized locals from the heap frame + *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset; return f; } else { int fsize = FKind::size(hf); @@ -285,8 +286,4 @@ inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, c derelativize_one(vfp, frame::interpreter_frame_initial_sp_offset); } -inline void ThawBase::set_interpreter_frame_bottom(const frame& f, intptr_t* bottom) { - // Nothing to do. Just make sure the relativized locals is already set. - assert((*f.addr_at(frame::interpreter_frame_locals_offset) == (bottom - 1) - f.fp()), ""); -} #endif // CPU_X86_CONTINUATIONFREEZE_THAW_X86_INLINE_HPP diff --git a/src/hotspot/cpu/x86/downcallLinker_x86_32.cpp b/src/hotspot/cpu/x86/downcallLinker_x86_32.cpp index 3f1241970f2..aabe49a3002 100644 --- a/src/hotspot/cpu/x86/downcallLinker_x86_32.cpp +++ b/src/hotspot/cpu/x86/downcallLinker_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,8 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { Unimplemented(); return nullptr; } diff --git a/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp b/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp index 787a66eb9e5..6c6b44a158b 100644 --- a/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp +++ b/src/hotspot/cpu/x86/downcallLinker_x86_64.cpp @@ -46,6 +46,7 @@ class DowncallStubGenerator : public StubCodeGenerator { bool _needs_return_buffer; int _captured_state_mask; + bool _needs_transition; int _frame_complete; int _frame_size_slots; @@ -59,7 +60,8 @@ public: const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) + int captured_state_mask, + bool needs_transition) : StubCodeGenerator(buffer, PrintMethodHandleStubs), _signature(signature), _num_args(num_args), @@ -69,6 +71,7 @@ public: _output_registers(output_registers), _needs_return_buffer(needs_return_buffer), _captured_state_mask(captured_state_mask), + _needs_transition(needs_transition), _frame_complete(0), _frame_size_slots(0), _oop_maps(nullptr) { @@ -99,13 +102,15 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { int code_size = native_invoker_code_base_size + (num_args * native_invoker_size_per_arg); int locs_size = 1; // can not be zero CodeBuffer code("nep_invoker_blob", code_size, locs_size); DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi, input_registers, output_registers, - needs_return_buffer, captured_state_mask); + needs_return_buffer, captured_state_mask, + needs_transition); g.generate(); code.log_section_sizes("nep_invoker_blob"); @@ -161,7 +166,7 @@ void DowncallStubGenerator::generate() { allocated_frame_size += arg_shuffle.out_arg_bytes(); // when we don't use a return buffer we need to spill the return value around our slow path calls - bool should_save_return_value = !_needs_return_buffer; + bool should_save_return_value = !_needs_return_buffer && _needs_transition; RegSpiller out_reg_spiller(_output_registers); int spill_rsp_offset = -1; @@ -190,7 +195,7 @@ void DowncallStubGenerator::generate() { _frame_size_slots += framesize_base + (allocated_frame_size >> LogBytesPerInt); assert(is_even(_frame_size_slots/2), "sp not 16-byte aligned"); - _oop_maps = new OopMapSet(); + _oop_maps = _needs_transition ? new OopMapSet() : nullptr; address start = __ pc(); __ enter(); @@ -200,16 +205,17 @@ void DowncallStubGenerator::generate() { _frame_complete = __ pc() - start; - address the_pc = __ pc(); + if (_needs_transition) { + __ block_comment("{ thread java2native"); + address the_pc = __ pc(); + __ set_last_Java_frame(rsp, rbp, (address)the_pc, rscratch1); + OopMap* map = new OopMap(_frame_size_slots, 0); + _oop_maps->add_gc_map(the_pc - start, map); - __ block_comment("{ thread java2native"); - __ set_last_Java_frame(rsp, rbp, (address)the_pc, rscratch1); - OopMap* map = new OopMap(_frame_size_slots, 0); - _oop_maps->add_gc_map(the_pc - start, map); - - // State transition - __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); - __ block_comment("} thread java2native"); + // State transition + __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); + __ block_comment("} thread java2native"); + } __ block_comment("{ argument shuffle"); arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes, locs); @@ -263,93 +269,95 @@ void DowncallStubGenerator::generate() { ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ thread native2java"); - __ restore_cpu_control_state_after_jni(rscratch1); - - __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); - - // Force this write out before the read below - if (!UseSystemMemoryBarrier) { - __ membar(Assembler::Membar_mask_bits( - Assembler::LoadLoad | Assembler::LoadStore | - Assembler::StoreLoad | Assembler::StoreStore)); - } - Label L_after_safepoint_poll; Label L_safepoint_poll_slow_path; - - __ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, true /* at_return */, false /* in_nmethod */); - __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); - __ jcc(Assembler::notEqual, L_safepoint_poll_slow_path); - - __ bind(L_after_safepoint_poll); - - // change thread state - __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); - - __ block_comment("reguard stack check"); Label L_reguard; Label L_after_reguard; - __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled); - __ jcc(Assembler::equal, L_reguard); - __ bind(L_after_reguard); + if (_needs_transition) { + __ block_comment("{ thread native2java"); + __ restore_cpu_control_state_after_jni(rscratch1); - __ reset_last_Java_frame(r15_thread, true); - __ block_comment("} thread native2java"); + __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans); + + // Force this write out before the read below + if (!UseSystemMemoryBarrier) { + __ membar(Assembler::Membar_mask_bits( + Assembler::LoadLoad | Assembler::LoadStore | + Assembler::StoreLoad | Assembler::StoreStore)); + } + + __ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, true /* at_return */, false /* in_nmethod */); + __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); + __ jcc(Assembler::notEqual, L_safepoint_poll_slow_path); + + __ bind(L_after_safepoint_poll); + + // change thread state + __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); + + __ block_comment("reguard stack check"); + __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled); + __ jcc(Assembler::equal, L_reguard); + __ bind(L_after_reguard); + + __ reset_last_Java_frame(r15_thread, true); + __ block_comment("} thread native2java"); + } __ leave(); // required for proper stackwalking of RuntimeStub frame __ ret(0); ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ L_safepoint_poll_slow_path"); - __ bind(L_safepoint_poll_slow_path); - __ vzeroupper(); + if (_needs_transition) { + __ block_comment("{ L_safepoint_poll_slow_path"); + __ bind(L_safepoint_poll_slow_path); + __ vzeroupper(); - if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_rsp_offset); - } + if (should_save_return_value) { + out_reg_spiller.generate_spill(_masm, spill_rsp_offset); + } - __ mov(c_rarg0, r15_thread); - __ mov(r12, rsp); // remember sp - __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows - __ andptr(rsp, -16); // align stack as required by ABI - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); - __ mov(rsp, r12); // restore sp - __ reinit_heapbase(); + __ mov(c_rarg0, r15_thread); + __ mov(r12, rsp); // remember sp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); + __ mov(rsp, r12); // restore sp + __ reinit_heapbase(); - if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_rsp_offset); - } + if (should_save_return_value) { + out_reg_spiller.generate_fill(_masm, spill_rsp_offset); + } - __ jmp(L_after_safepoint_poll); - __ block_comment("} L_safepoint_poll_slow_path"); + __ jmp(L_after_safepoint_poll); + __ block_comment("} L_safepoint_poll_slow_path"); ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ L_reguard"); - __ bind(L_reguard); - __ vzeroupper(); + __ block_comment("{ L_reguard"); + __ bind(L_reguard); + __ vzeroupper(); - if (should_save_return_value) { - out_reg_spiller.generate_spill(_masm, spill_rsp_offset); + if (should_save_return_value) { + out_reg_spiller.generate_spill(_masm, spill_rsp_offset); + } + + __ mov(r12, rsp); // remember sp + __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows + __ andptr(rsp, -16); // align stack as required by ABI + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); + __ mov(rsp, r12); // restore sp + __ reinit_heapbase(); + + if (should_save_return_value) { + out_reg_spiller.generate_fill(_masm, spill_rsp_offset); + } + + __ jmp(L_after_reguard); + + __ block_comment("} L_reguard"); } - - __ mov(r12, rsp); // remember sp - __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows - __ andptr(rsp, -16); // align stack as required by ABI - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); - __ mov(rsp, r12); // restore sp - __ reinit_heapbase(); - - if (should_save_return_value) { - out_reg_spiller.generate_fill(_masm, spill_rsp_offset); - } - - __ jmp(L_after_reguard); - - __ block_comment("} L_reguard"); - ////////////////////////////////////////////////////////////////////////////// __ flush(); diff --git a/src/hotspot/cpu/x86/foreignGlobals_x86_32.cpp b/src/hotspot/cpu/x86/foreignGlobals_x86_32.cpp index 8a31955f4d1..3752bf577d5 100644 --- a/src/hotspot/cpu/x86/foreignGlobals_x86_32.cpp +++ b/src/hotspot/cpu/x86/foreignGlobals_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,10 @@ class MacroAssembler; +bool ForeignGlobals::is_foreign_linker_supported() { + return false; +} + const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) { Unimplemented(); return {}; diff --git a/src/hotspot/cpu/x86/foreignGlobals_x86_64.cpp b/src/hotspot/cpu/x86/foreignGlobals_x86_64.cpp index 74afbe4fd61..8710d4f79f9 100644 --- a/src/hotspot/cpu/x86/foreignGlobals_x86_64.cpp +++ b/src/hotspot/cpu/x86/foreignGlobals_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,10 @@ #include "runtime/sharedRuntime.hpp" #include "utilities/formatBuffer.hpp" +bool ForeignGlobals::is_foreign_linker_supported() { + return true; +} + bool ABIDescriptor::is_volatile_reg(Register reg) const { return _integer_argument_registers.contains(reg) || _integer_additional_volatile_registers.contains(reg); diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp index b194f42e195..b4e9e721b5a 100644 --- a/src/hotspot/cpu/x86/vm_version_x86.cpp +++ b/src/hotspot/cpu/x86/vm_version_x86.cpp @@ -2079,11 +2079,14 @@ bool VM_Version::is_default_intel_cascade_lake() { return FLAG_IS_DEFAULT(UseAVX) && FLAG_IS_DEFAULT(MaxVectorSize) && UseAVX > 2 && - is_intel_skylake() && - _stepping >= 5; + is_intel_cascade_lake(); } #endif +bool VM_Version::is_intel_cascade_lake() { + return is_intel_skylake() && _stepping >= 5; +} + // avx3_threshold() sets the threshold at which 64-byte instructions are used // for implementing the array copy and clear operations. // The Intel platforms that supports the serialize instruction diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp index 65f8c5b3cba..3074621229a 100644 --- a/src/hotspot/cpu/x86/vm_version_x86.hpp +++ b/src/hotspot/cpu/x86/vm_version_x86.hpp @@ -716,6 +716,8 @@ public: static bool is_default_intel_cascade_lake(); #endif + static bool is_intel_cascade_lake(); + static int avx3_threshold(); static bool is_intel_tsc_synched_at_init(); diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad index 46e489f45e5..0b8bd0b2157 100644 --- a/src/hotspot/cpu/x86/x86_32.ad +++ b/src/hotspot/cpu/x86/x86_32.ad @@ -1243,18 +1243,30 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo if (src_first_rc == rc_stack && dst_first_rc == rc_kreg) { assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair"); assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair"); - MacroAssembler _masm(cbuf); int offset = ra_->reg2offset(src_first); - __ kmov(as_KRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset)); + if (cbuf != nullptr) { + MacroAssembler _masm(cbuf); + __ kmov(as_KRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset)); +#ifndef PRODUCT + } else { + st->print("KMOV %s, [ESP + %d]", Matcher::regName[dst_first], offset); +#endif + } return 0; } if (src_first_rc == rc_kreg && dst_first_rc == rc_stack) { assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair"); assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair"); - MacroAssembler _masm(cbuf); int offset = ra_->reg2offset(dst_first); - __ kmov(Address(rsp, offset), as_KRegister(Matcher::_regEncode[src_first])); + if (cbuf != nullptr) { + MacroAssembler _masm(cbuf); + __ kmov(Address(rsp, offset), as_KRegister(Matcher::_regEncode[src_first])); +#ifndef PRODUCT + } else { + st->print("KMOV [ESP + %d], %s", offset, Matcher::regName[src_first]); +#endif + } return 0; } @@ -1271,8 +1283,14 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo if (src_first_rc == rc_kreg && dst_first_rc == rc_kreg) { assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair"); assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair"); - MacroAssembler _masm(cbuf); - __ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first])); + if (cbuf != nullptr) { + MacroAssembler _masm(cbuf); + __ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first])); +#ifndef PRODUCT + } else { + st->print("KMOV %s, %s", Matcher::regName[dst_first], Matcher::regName[src_first]); +#endif + } return 0; } diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index 7c1eb99bf58..ff381e5e7a2 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -5374,7 +5374,7 @@ instruct loadD(regD dst, memory mem) // max = java.lang.Math.max(float a, float b) instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ - predicate(UseAVX > 0 && !n->is_reduction()); + predicate(UseAVX > 0 && !SuperWord::is_reduction(n)); match(Set dst (MaxF a b)); effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); format %{ @@ -5396,7 +5396,7 @@ instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, %} instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{ - predicate(UseAVX > 0 && n->is_reduction()); + predicate(UseAVX > 0 && SuperWord::is_reduction(n)); match(Set dst (MaxF a b)); effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); @@ -5410,7 +5410,7 @@ instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe // max = java.lang.Math.max(double a, double b) instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ - predicate(UseAVX > 0 && !n->is_reduction()); + predicate(UseAVX > 0 && !SuperWord::is_reduction(n)); match(Set dst (MaxD a b)); effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp); format %{ @@ -5432,7 +5432,7 @@ instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, %} instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{ - predicate(UseAVX > 0 && n->is_reduction()); + predicate(UseAVX > 0 && SuperWord::is_reduction(n)); match(Set dst (MaxD a b)); effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); @@ -5446,7 +5446,7 @@ instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRe // min = java.lang.Math.min(float a, float b) instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{ - predicate(UseAVX > 0 && !n->is_reduction()); + predicate(UseAVX > 0 && !SuperWord::is_reduction(n)); match(Set dst (MinF a b)); effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); format %{ @@ -5468,7 +5468,7 @@ instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, %} instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{ - predicate(UseAVX > 0 && n->is_reduction()); + predicate(UseAVX > 0 && SuperWord::is_reduction(n)); match(Set dst (MinF a b)); effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); @@ -5482,7 +5482,7 @@ instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe // min = java.lang.Math.min(double a, double b) instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{ - predicate(UseAVX > 0 && !n->is_reduction()); + predicate(UseAVX > 0 && !SuperWord::is_reduction(n)); match(Set dst (MinD a b)); effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp); format %{ @@ -5504,7 +5504,7 @@ instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, %} instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{ - predicate(UseAVX > 0 && n->is_reduction()); + predicate(UseAVX > 0 && SuperWord::is_reduction(n)); match(Set dst (MinD a b)); effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr); @@ -13732,6 +13732,13 @@ instruct leaL_rReg_immI2_peep(rRegL dst, rRegL src, immI2 shift) ins_pipe(ialu_reg_reg); %} +// These peephole rules replace mov + I pairs (where I is one of {add, inc, dec, +// sal}) with lea instructions. The {add, sal} rules are beneficial in +// processors with at least partial ALU support for lea +// (supports_fast_2op_lea()), whereas the {inc, dec} rules are only generally +// beneficial for processors with full ALU support +// (VM_Version::supports_fast_3op_lea()) and Intel Cascade Lake. + peephole %{ peeppredicate(VM_Version::supports_fast_2op_lea()); @@ -13750,7 +13757,8 @@ peephole peephole %{ - peeppredicate(VM_Version::supports_fast_2op_lea()); + peeppredicate(VM_Version::supports_fast_3op_lea() || + VM_Version::is_intel_cascade_lake()); peepmatch (incI_rReg); peepprocedure (lea_coalesce_imm); peepreplace (leaI_rReg_immI_peep()); @@ -13758,7 +13766,8 @@ peephole peephole %{ - peeppredicate(VM_Version::supports_fast_2op_lea()); + peeppredicate(VM_Version::supports_fast_3op_lea() || + VM_Version::is_intel_cascade_lake()); peepmatch (decI_rReg); peepprocedure (lea_coalesce_imm); peepreplace (leaI_rReg_immI_peep()); @@ -13790,7 +13799,8 @@ peephole peephole %{ - peeppredicate(VM_Version::supports_fast_2op_lea()); + peeppredicate(VM_Version::supports_fast_3op_lea() || + VM_Version::is_intel_cascade_lake()); peepmatch (incL_rReg); peepprocedure (lea_coalesce_imm); peepreplace (leaL_rReg_immL32_peep()); @@ -13798,7 +13808,8 @@ peephole peephole %{ - peeppredicate(VM_Version::supports_fast_2op_lea()); + peeppredicate(VM_Version::supports_fast_3op_lea() || + VM_Version::is_intel_cascade_lake()); peepmatch (decL_rReg); peepprocedure (lea_coalesce_imm); peepreplace (leaL_rReg_immL32_peep()); diff --git a/src/hotspot/cpu/zero/continuationFreezeThaw_zero.inline.hpp b/src/hotspot/cpu/zero/continuationFreezeThaw_zero.inline.hpp index 83f163d2bd5..cb05ab1389e 100644 --- a/src/hotspot/cpu/zero/continuationFreezeThaw_zero.inline.hpp +++ b/src/hotspot/cpu/zero/continuationFreezeThaw_zero.inline.hpp @@ -70,10 +70,6 @@ template frame ThawBase::new_stack_frame(const frame& hf, frame& return frame(); } -inline void ThawBase::set_interpreter_frame_bottom(const frame& f, intptr_t* bottom) { - Unimplemented(); -} - inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) { Unimplemented(); } diff --git a/src/hotspot/cpu/zero/downcallLinker_zero.cpp b/src/hotspot/cpu/zero/downcallLinker_zero.cpp index 3f1241970f2..aabe49a3002 100644 --- a/src/hotspot/cpu/zero/downcallLinker_zero.cpp +++ b/src/hotspot/cpu/zero/downcallLinker_zero.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,8 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask) { + int captured_state_mask, + bool needs_transition) { Unimplemented(); return nullptr; } diff --git a/src/hotspot/cpu/zero/foreignGlobals_zero.cpp b/src/hotspot/cpu/zero/foreignGlobals_zero.cpp index 7c35da7e3e0..2cd83af6b6e 100644 --- a/src/hotspot/cpu/zero/foreignGlobals_zero.cpp +++ b/src/hotspot/cpu/zero/foreignGlobals_zero.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,10 @@ class MacroAssembler; +bool ForeignGlobals::is_foreign_linker_supported() { + return false; +} + const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) { ShouldNotCallThis(); return {}; diff --git a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp index 9db2060b8dd..271d95ee72c 100644 --- a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp +++ b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright 2009, 2021, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,7 +32,7 @@ #define SUPPORT_MONITOR_COUNT -#ifndef FFI_GO_CLOSURES +#ifdef __APPLE__ #define FFI_GO_CLOSURES 0 #endif diff --git a/src/hotspot/os/aix/osThread_aix.cpp b/src/hotspot/os/aix/osThread_aix.cpp index ba564899253..4049d6b58b7 100644 --- a/src/hotspot/os/aix/osThread_aix.cpp +++ b/src/hotspot/os/aix/osThread_aix.cpp @@ -34,7 +34,6 @@ #include "runtime/vmThread.hpp" void OSThread::pd_initialize() { - assert(this != nullptr, "check"); _thread_id = 0; _kernel_thread_id = 0; _siginfo = nullptr; diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp index a4c5a03b847..fe86adcc40a 100644 --- a/src/hotspot/os/aix/os_aix.cpp +++ b/src/hotspot/os/aix/os_aix.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2021 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -892,9 +892,9 @@ bool os::create_attached_thread(JavaThread* thread) { PosixSignals::hotspot_sigmask(thread); log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT - ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k) ).", + ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).", os::current_thread_id(), (uintx) kernel_thread_id, - p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size()); + p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size() / K); return true; } @@ -1077,7 +1077,17 @@ bool os::dll_address_to_library_name(address addr, char* buf, return false; } - return AixSymbols::get_module_name(addr, buf, buflen); + address base = nullptr; + if (!AixSymbols::get_module_name_and_base(addr, buf, buflen, &base) + || base == nullptr) { + return false; + } + assert(addr >= base && addr <= base + INT_MAX, "address not in library text range"); + if (offset != nullptr) { + *offset = addr - base; + } + + return true; } // Loads .dll/.so and in case of error it checks if .dll/.so was built diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp index 1d6ce26f5a1..ab84dc81027 100644 --- a/src/hotspot/os/aix/porting_aix.cpp +++ b/src/hotspot/os/aix/porting_aix.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2019 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -278,6 +278,24 @@ bool AixSymbols::get_module_name(address pc, return false; } +bool AixSymbols::get_module_name_and_base(address pc, + char* p_name, size_t namelen, + address* p_base) { + + if (p_base && p_name && namelen > 0) { + p_name[0] = '\0'; + loaded_module_t lm; + if (LoadedLibraries::find_for_text_address(pc, &lm)) { + strncpy(p_name, lm.shortname, namelen); + p_name[namelen - 1] = '\0'; + *p_base = (address) lm.text; + return true; + } + } + + return false; +} + // Special implementation of dladdr for Aix based on LoadedLibraries // Note: dladdr returns non-zero for ok, 0 for error! // Note: dladdr is not posix, but a non-standard GNU extension. So this tries to diff --git a/src/hotspot/os/aix/porting_aix.hpp b/src/hotspot/os/aix/porting_aix.hpp index 5c02d0efa88..2c4c0e002a8 100644 --- a/src/hotspot/os/aix/porting_aix.hpp +++ b/src/hotspot/os/aix/porting_aix.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015 SAP SE. All rights reserved. + * Copyright (c) 2012, 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,6 +83,14 @@ class AixSymbols { char* p_name, size_t namelen // [out] module name ); + // Given a program counter, returns the name of the module (library and module) the pc points to + // and the base address of the module the pc points to + static bool get_module_name_and_base ( + address pc, // [in] program counter + char* p_name, size_t namelen, // [out] module name + address* p_base // [out] base address of library + ); + }; class AixNativeCallstack { diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp index c8af751250b..f4799e76a32 100644 --- a/src/hotspot/os/bsd/os_bsd.cpp +++ b/src/hotspot/os/bsd/os_bsd.cpp @@ -708,9 +708,9 @@ bool os::create_attached_thread(JavaThread* thread) { PosixSignals::hotspot_sigmask(thread); log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT - ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k) ).", + ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).", os::current_thread_id(), (uintx) pthread_self(), - p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size()); + p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size() / K); return true; } diff --git a/src/hotspot/os/linux/osThread_linux.cpp b/src/hotspot/os/linux/osThread_linux.cpp index eb4d062511c..9c77cb32f6d 100644 --- a/src/hotspot/os/linux/osThread_linux.cpp +++ b/src/hotspot/os/linux/osThread_linux.cpp @@ -30,7 +30,6 @@ #include void OSThread::pd_initialize() { - assert(this != nullptr, "check"); _thread_id = 0; _pthread_id = 0; _siginfo = nullptr; diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 86de46df3d1..b6d2721343c 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -760,20 +760,14 @@ static void *thread_native_entry(Thread *thread) { // As a workaround, we call a private but assumed-stable glibc function, // __pthread_get_minstack() to obtain the minstack size and derive the // static TLS size from it. We then increase the user requested stack -// size by this TLS size. +// size by this TLS size. The same function is used to determine whether +// adjustStackSizeForGuardPages() needs to be true. // // Due to compatibility concerns, this size adjustment is opt-in and // controlled via AdjustStackSizeForTLS. typedef size_t (*GetMinStack)(const pthread_attr_t *attr); -GetMinStack _get_minstack_func = nullptr; - -static void get_minstack_init() { - _get_minstack_func = - (GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack"); - log_info(os, thread)("Lookup of __pthread_get_minstack %s", - _get_minstack_func == nullptr ? "failed" : "succeeded"); -} +GetMinStack _get_minstack_func = nullptr; // Initialized via os::init_2() // Returns the size of the static TLS area glibc puts on thread stacks. // The value is cached on first use, which occurs when the first thread @@ -786,8 +780,8 @@ static size_t get_static_tls_area_size(const pthread_attr_t *attr) { // Remove non-TLS area size included in minstack size returned // by __pthread_get_minstack() to get the static TLS size. - // In glibc before 2.27, minstack size includes guard_size. - // In glibc 2.27 and later, guard_size is automatically added + // If adjustStackSizeForGuardPages() is true, minstack size includes + // guard_size. Otherwise guard_size is automatically added // to the stack size by pthread_create and is no longer included // in minstack size. In both cases, the guard_size is taken into // account, so there is no need to adjust the result for that. @@ -816,6 +810,42 @@ static size_t get_static_tls_area_size(const pthread_attr_t *attr) { return tls_size; } +// In glibc versions prior to 2.27 the guard size mechanism +// was not implemented properly. The POSIX standard requires adding +// the size of the guard pages to the stack size, instead glibc +// took the space out of 'stacksize'. Thus we need to adapt the requested +// stack_size by the size of the guard pages to mimic proper behaviour. +// The fix in glibc 2.27 has now been backported to numerous earlier +// glibc versions so we need to do a dynamic runtime check. +static bool _adjustStackSizeForGuardPages = true; +bool os::Linux::adjustStackSizeForGuardPages() { + return _adjustStackSizeForGuardPages; +} + +#ifdef __GLIBC__ +static void init_adjust_stacksize_for_guard_pages() { + assert(_get_minstack_func == nullptr, "initialization error"); + _get_minstack_func =(GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack"); + log_info(os, thread)("Lookup of __pthread_get_minstack %s", + _get_minstack_func == nullptr ? "failed" : "succeeded"); + + if (_get_minstack_func != nullptr) { + pthread_attr_t attr; + pthread_attr_init(&attr); + size_t min_stack = _get_minstack_func(&attr); + size_t guard = 16 * K; // Actual value doesn't matter as it is not examined + pthread_attr_setguardsize(&attr, guard); + size_t min_stack2 = _get_minstack_func(&attr); + pthread_attr_destroy(&attr); + // If the minimum stack size changed when we added the guard page space + // then we need to perform the adjustment. + _adjustStackSizeForGuardPages = (min_stack2 != min_stack); + log_info(os)("Glibc stack size guard page adjustment is %sneeded", + _adjustStackSizeForGuardPages ? "" : "not "); + } +} +#endif // GLIBC + bool os::create_thread(Thread* thread, ThreadType thr_type, size_t req_stack_size) { assert(thread->osthread() == nullptr, "caller responsible"); @@ -841,23 +871,18 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, // Calculate stack size if it's not specified by caller. size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); - // In glibc versions prior to 2.27 the guard size mechanism - // is not implemented properly. The posix standard requires adding - // the size of the guard pages to the stack size, instead Linux - // takes the space out of 'stacksize'. Thus we adapt the requested - // stack_size by the size of the guard pages to mimic proper - // behaviour. However, be careful not to end up with a size - // of zero due to overflow. Don't add the guard page in that case. size_t guard_size = os::Linux::default_guard_size(thr_type); // Configure glibc guard page. Must happen before calling // get_static_tls_area_size(), which uses the guard_size. pthread_attr_setguardsize(&attr, guard_size); + // Apply stack size adjustments if needed. However, be careful not to end up + // with a size of zero due to overflow. Don't add the adjustment in that case. size_t stack_adjust_size = 0; if (AdjustStackSizeForTLS) { // Adjust the stack_size for on-stack TLS - see get_static_tls_area_size(). stack_adjust_size += get_static_tls_area_size(&attr); - } else { + } else if (os::Linux::adjustStackSizeForGuardPages()) { stack_adjust_size += guard_size; } @@ -1000,9 +1025,9 @@ bool os::create_attached_thread(JavaThread* thread) { PosixSignals::hotspot_sigmask(thread); log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT - ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k) ).", + ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).", os::current_thread_id(), (uintx) pthread_self(), - p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size()); + p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size() / K); return true; } @@ -1326,7 +1351,7 @@ void os::Linux::fast_thread_clock_init() { // Note, that some kernels may support the current thread // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks // returned by the pthread_getcpuclockid(). - // If the fast Posix clocks are supported then the clock_getres() + // If the fast POSIX clocks are supported then the clock_getres() // must return at least tp.tv_sec == 0 which means a resolution // better than 1 sec. This is extra check for reliability. @@ -4499,10 +4524,6 @@ jint os::init_2(void) { return JNI_ERR; } - if (AdjustStackSizeForTLS) { - get_minstack_init(); - } - // Check and sets minimum stack sizes against command line options if (set_minimum_stack_sizes() == JNI_ERR) { return JNI_ERR; @@ -4525,6 +4546,11 @@ jint os::init_2(void) { log_info(os)("HotSpot is running with %s, %s", Linux::libc_version(), Linux::libpthread_version()); +#ifdef __GLIBC__ + // Check if we need to adjust the stack size for glibc guard pages. + init_adjust_stacksize_for_guard_pages(); +#endif + if (UseNUMA || UseNUMAInterleaving) { Linux::numa_init(); } @@ -5244,9 +5270,9 @@ bool os::start_debugging(char *buf, int buflen) { // // ** P1 (aka bottom) and size (P2 = P1 - size) are the address and stack size // returned from pthread_attr_getstack(). -// ** Due to NPTL implementation error, linux takes the glibc guard page out -// of the stack size given in pthread_attr. We work around this for -// threads created by the VM. (We adapt bottom to be P1 and size accordingly.) +// ** If adjustStackSizeForGuardPages() is true the guard pages have been taken +// out of the stack size given in pthread_attr. We work around this for +// threads created by the VM. We adjust bottom to be P1 and size accordingly. // #ifndef ZERO static void current_stack_region(address * bottom, size_t * size) { @@ -5273,14 +5299,15 @@ static void current_stack_region(address * bottom, size_t * size) { fatal("Cannot locate current stack attributes!"); } - // Work around NPTL stack guard error. - size_t guard_size = 0; - rslt = pthread_attr_getguardsize(&attr, &guard_size); - if (rslt != 0) { - fatal("pthread_attr_getguardsize failed with error = %d", rslt); + if (os::Linux::adjustStackSizeForGuardPages()) { + size_t guard_size = 0; + rslt = pthread_attr_getguardsize(&attr, &guard_size); + if (rslt != 0) { + fatal("pthread_attr_getguardsize failed with error = %d", rslt); + } + *bottom += guard_size; + *size -= guard_size; } - *bottom += guard_size; - *size -= guard_size; pthread_attr_destroy(&attr); diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp index 20639e4031f..e33a1af1072 100644 --- a/src/hotspot/os/linux/os_linux.hpp +++ b/src/hotspot/os/linux/os_linux.hpp @@ -149,6 +149,8 @@ class os::Linux { // Return default guard size for the specified thread type static size_t default_guard_size(os::ThreadType thr_type); + static bool adjustStackSizeForGuardPages(); // See comments in os_linux.cpp + static void capture_initial_stack(size_t max_size); // Stack overflow handling diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp index 9bd67526600..b12a1160ac3 100644 --- a/src/hotspot/os/posix/os_posix.cpp +++ b/src/hotspot/os/posix/os_posix.cpp @@ -905,8 +905,8 @@ char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_a int detachstate = 0; pthread_attr_getstacksize(attr, &stack_size); pthread_attr_getguardsize(attr, &guard_size); - // Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp. - LINUX_ONLY(stack_size -= guard_size); + // Work around glibc stack guard issue, see os::create_thread() in os_linux.cpp. + LINUX_ONLY(if (os::Linux::adjustStackSizeForGuardPages()) stack_size -= guard_size;) pthread_attr_getdetachstate(attr, &detachstate); jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s", stack_size / K, guard_size / K, @@ -1538,6 +1538,12 @@ void PlatformEvent::park() { // AKA "down()" } int PlatformEvent::park(jlong millis) { + return park_nanos(millis_to_nanos_bounded(millis)); +} + +int PlatformEvent::park_nanos(jlong nanos) { + assert(nanos > 0, "nanos are positive"); + // Transitions for _event: // -1 => -1 : illegal // 1 => 0 : pass - return immediately @@ -1557,7 +1563,7 @@ int PlatformEvent::park(jlong millis) { if (v == 0) { // Do this the hard way by blocking ... struct timespec abst; - to_abstime(&abst, millis_to_nanos_bounded(millis), false, false); + to_abstime(&abst, nanos, false, false); int ret = OS_TIMEOUT; int status = pthread_mutex_lock(_mutex); diff --git a/src/hotspot/os/posix/park_posix.hpp b/src/hotspot/os/posix/park_posix.hpp index 36aefce23b6..c0e3bd48db1 100644 --- a/src/hotspot/os/posix/park_posix.hpp +++ b/src/hotspot/os/posix/park_posix.hpp @@ -54,6 +54,7 @@ class PlatformEvent : public CHeapObj { PlatformEvent(); void park(); int park(jlong millis); + int park_nanos(jlong nanos); void unpark(); // Use caution with reset() and fired() -- they may require MEMBARs diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 686e27add56..3a5dade91af 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -621,9 +621,9 @@ bool os::create_attached_thread(JavaThread* thread) { thread->set_osthread(osthread); log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", stack: " - PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k) ).", + PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).", os::current_thread_id(), p2i(thread->stack_base()), - p2i(thread->stack_end()), thread->stack_size()); + p2i(thread->stack_end()), thread->stack_size() / K); return true; } @@ -5249,6 +5249,21 @@ class HighResolutionInterval : public CHeapObj { // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. // +int PlatformEvent::park_nanos(jlong nanos) { + assert(nanos > 0, "nanos are positive"); + + // Windows timers are still quite unpredictable to handle sub-millisecond granularity. + // Instead of implementing sub-millisecond sleeps, fall back to the usual behavior of + // rounding up any excess requested nanos to the full millisecond. This is how + // Thread.sleep(millis, nanos) has always behaved with only millisecond granularity. + jlong millis = nanos / NANOSECS_PER_MILLISEC; + if (nanos > millis * NANOSECS_PER_MILLISEC) { + millis++; + } + assert(millis > 0, "should always be positive"); + return park(millis); +} + int PlatformEvent::park(jlong Millis) { // Transitions for _Event: // -1 => -1 : illegal diff --git a/src/hotspot/os/windows/park_windows.hpp b/src/hotspot/os/windows/park_windows.hpp index 3754201799d..41bf18b39f9 100644 --- a/src/hotspot/os/windows/park_windows.hpp +++ b/src/hotspot/os/windows/park_windows.hpp @@ -48,9 +48,10 @@ class PlatformEvent : public CHeapObj { // Exercise caution using reset() and fired() - they may require MEMBARs void reset() { _Event = 0 ; } int fired() { return _Event; } - void park () ; - void unpark () ; - int park (jlong millis) ; + void park(); + void unpark(); + int park(jlong millis); + int park_nanos(jlong nanos); }; class PlatformParker { diff --git a/src/hotspot/share/adlc/main.cpp b/src/hotspot/share/adlc/main.cpp index dce3f2309f8..ff379809b0c 100644 --- a/src/hotspot/share/adlc/main.cpp +++ b/src/hotspot/share/adlc/main.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -271,6 +271,7 @@ int main(int argc, char *argv[]) AD.addInclude(AD._DFA_file, "opto/narrowptrnode.hpp"); AD.addInclude(AD._DFA_file, "opto/opcodes.hpp"); AD.addInclude(AD._DFA_file, "opto/convertnode.hpp"); + AD.addInclude(AD._DFA_file, "opto/superword.hpp"); AD.addInclude(AD._DFA_file, "utilities/powerOfTwo.hpp"); // Make sure each .cpp file starts with include lines: diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp index 616265ca6c0..029691c95fd 100644 --- a/src/hotspot/share/asm/codeBuffer.hpp +++ b/src/hotspot/share/asm/codeBuffer.hpp @@ -35,6 +35,11 @@ #include "utilities/resizeableResourceHash.hpp" #include "utilities/macros.hpp" +template +static inline void put_native(address p, T x) { + memcpy((void*)p, &x, sizeof x); +} + class PhaseCFG; class Compile; class BufferBlob; @@ -218,7 +223,10 @@ class CodeSection { set_end(curr); } - void emit_int16(uint16_t x) { *((uint16_t*) end()) = x; set_end(end() + sizeof(uint16_t)); } + template + void emit_native(T x) { put_native(end(), x); set_end(end() + sizeof x); } + + void emit_int16(uint16_t x) { emit_native(x); } void emit_int16(uint8_t x1, uint8_t x2) { address curr = end(); *((uint8_t*) curr++) = x1; @@ -234,11 +242,7 @@ class CodeSection { set_end(curr); } - void emit_int32(uint32_t x) { - address curr = end(); - *((uint32_t*) curr) = x; - set_end(curr + sizeof(uint32_t)); - } + void emit_int32(uint32_t x) { emit_native(x); } void emit_int32(uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4) { address curr = end(); *((uint8_t*) curr++) = x1; @@ -248,11 +252,10 @@ class CodeSection { set_end(curr); } - void emit_int64( uint64_t x) { *((uint64_t*) end()) = x; set_end(end() + sizeof(uint64_t)); } - - void emit_float( jfloat x) { *((jfloat*) end()) = x; set_end(end() + sizeof(jfloat)); } - void emit_double(jdouble x) { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); } - void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); } + void emit_int64(uint64_t x) { emit_native(x); } + void emit_float(jfloat x) { emit_native(x); } + void emit_double(jdouble x) { emit_native(x); } + void emit_address(address x) { emit_native(x); } // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.) void initialize_shared_locs(relocInfo* buf, int length); diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index f69596dd41e..1ab95f889da 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -161,8 +161,7 @@ ArchiveBuilder::ArchiveBuilder() : _ro_src_objs(), _src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE), _buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE), - _total_closed_heap_region_size(0), - _total_open_heap_region_size(0), + _total_heap_region_size(0), _estimated_metaspaceobj_bytes(0), _estimated_hashtable_bytes(0) { @@ -331,7 +330,7 @@ address ArchiveBuilder::reserve_buffer() { ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size()); if (!rs.is_reserved()) { log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size); - os::_exit(0); + MetaspaceShared::unrecoverable_writing_error(); } // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when @@ -381,7 +380,7 @@ address ArchiveBuilder::reserve_buffer() { log_error(cds)("my_archive_requested_top = " INTPTR_FORMAT, p2i(my_archive_requested_top)); log_error(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. " "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom)); - os::_exit(0); + MetaspaceShared::unrecoverable_writing_error(); } if (DumpSharedSpaces) { @@ -1051,42 +1050,40 @@ class ArchiveBuilder::CDSMapLogger : AllStatic { } #if INCLUDE_CDS_JAVA_HEAP - // open and closed archive regions - static void log_heap_regions(const char* which, GrowableArray *regions) { - for (int i = 0; i < regions->length(); i++) { - address start = address(regions->at(i).start()); - address end = address(regions->at(i).end()); - log_region(which, start, end, to_requested(start)); + static void log_heap_region(ArchiveHeapInfo* heap_info) { + MemRegion r = heap_info->memregion(); + address start = address(r.start()); + address end = address(r.end()); + log_region("heap", start, end, to_requested(start)); - while (start < end) { - size_t byte_size; - oop original_oop = ArchiveHeapWriter::buffered_addr_to_source_obj(start); - if (original_oop != nullptr) { - ResourceMark rm; - log_info(cds, map)(PTR_FORMAT ": @@ Object %s", - p2i(to_requested(start)), original_oop->klass()->external_name()); - byte_size = original_oop->size() * BytesPerWord; - } else if (start == ArchiveHeapWriter::buffered_heap_roots_addr()) { - // HeapShared::roots() is copied specially so it doesn't exist in - // HeapShared::OriginalObjectTable. See HeapShared::copy_roots(). - log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared::roots (ObjArray)", - p2i(to_requested(start))); - byte_size = ArchiveHeapWriter::heap_roots_word_size() * BytesPerWord; - } else { - // We have reached the end of the region - break; - } - address oop_end = start + byte_size; - log_data(start, oop_end, to_requested(start), /*is_heap=*/true); - start = oop_end; - } - if (start < end) { + while (start < end) { + size_t byte_size; + oop original_oop = ArchiveHeapWriter::buffered_addr_to_source_obj(start); + if (original_oop != nullptr) { + ResourceMark rm; + log_info(cds, map)(PTR_FORMAT ": @@ Object %s", + p2i(to_requested(start)), original_oop->klass()->external_name()); + byte_size = original_oop->size() * BytesPerWord; + } else if (start == ArchiveHeapWriter::buffered_heap_roots_addr()) { + // HeapShared::roots() is copied specially so it doesn't exist in + // HeapShared::OriginalObjectTable. See HeapShared::copy_roots(). + log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared::roots (ObjArray)", + p2i(to_requested(start))); + byte_size = ArchiveHeapWriter::heap_roots_word_size() * BytesPerWord; + } else { + // We have reached the end of the region, but have some unused space + // at the end. log_info(cds, map)(PTR_FORMAT ": @@ Unused heap space " SIZE_FORMAT " bytes", p2i(to_requested(start)), size_t(end - start)); log_data(start, end, to_requested(start), /*is_heap=*/true); + break; } + address oop_end = start + byte_size; + log_data(start, oop_end, to_requested(start), /*is_heap=*/true); + start = oop_end; } } + static address to_requested(address p) { return ArchiveHeapWriter::buffered_addr_to_requested_addr(p); } @@ -1118,8 +1115,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic { public: static void log(ArchiveBuilder* builder, FileMapInfo* mapinfo, - GrowableArray *closed_heap_regions, - GrowableArray *open_heap_regions, + ArchiveHeapInfo* heap_info, char* bitmap, size_t bitmap_size_in_bytes) { log_info(cds, map)("%s CDS archive map for %s", DumpSharedSpaces ? "Static" : "Dynamic", mapinfo->full_path()); @@ -1140,11 +1136,8 @@ public: log_data((address)bitmap, bitmap_end, 0); #if INCLUDE_CDS_JAVA_HEAP - if (closed_heap_regions != nullptr) { - log_heap_regions("closed heap region", closed_heap_regions); - } - if (open_heap_regions != nullptr) { - log_heap_regions("open heap region", open_heap_regions); + if (heap_info->is_used()) { + log_heap_region(heap_info); } #endif @@ -1161,11 +1154,7 @@ void ArchiveBuilder::clean_up_src_obj_table() { _src_obj_table.iterate(&cleaner); } -void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, - GrowableArray* closed_heap_regions, - GrowableArray* open_heap_regions, - GrowableArray* closed_heap_bitmaps, - GrowableArray* open_heap_bitmaps) { +void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info) { // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with // MetaspaceShared::n_regions (internal to hotspot). assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity"); @@ -1174,23 +1163,14 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); size_t bitmap_size_in_bytes; - char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_bitmaps, open_heap_bitmaps, + char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), heap_info, bitmap_size_in_bytes); - if (closed_heap_regions != nullptr) { - _total_closed_heap_region_size = mapinfo->write_heap_regions( - closed_heap_regions, - closed_heap_bitmaps, - MetaspaceShared::first_closed_heap_region, - MetaspaceShared::max_num_closed_heap_regions); - _total_open_heap_region_size = mapinfo->write_heap_regions( - open_heap_regions, - open_heap_bitmaps, - MetaspaceShared::first_open_heap_region, - MetaspaceShared::max_num_open_heap_regions); + if (heap_info->is_used()) { + _total_heap_region_size = mapinfo->write_heap_region(heap_info); } - print_region_stats(mapinfo, closed_heap_regions, open_heap_regions); + print_region_stats(mapinfo, heap_info); mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address()); mapinfo->set_header_crc(mapinfo->compute_header_crc()); @@ -1204,7 +1184,7 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, } if (log_is_enabled(Info, cds, map)) { - CDSMapLogger::log(this, mapinfo, closed_heap_regions, open_heap_regions, + CDSMapLogger::log(this, mapinfo, heap_info, bitmap, bitmap_size_in_bytes); } CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache()); @@ -1215,20 +1195,16 @@ void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegi mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); } -void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, - GrowableArray* closed_heap_regions, - GrowableArray* open_heap_regions) { +void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, ArchiveHeapInfo* heap_info) { // Print statistics of all the regions const size_t bitmap_used = mapinfo->region_at(MetaspaceShared::bm)->used(); const size_t bitmap_reserved = mapinfo->region_at(MetaspaceShared::bm)->used_aligned(); const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + bitmap_reserved + - _total_closed_heap_region_size + - _total_open_heap_region_size; + _total_heap_region_size; const size_t total_bytes = _ro_region.used() + _rw_region.used() + bitmap_used + - _total_closed_heap_region_size + - _total_open_heap_region_size; + _total_heap_region_size; const double total_u_perc = percent_of(total_bytes, total_reserved); _rw_region.print(total_reserved); @@ -1236,30 +1212,25 @@ void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, print_bitmap_region_stats(bitmap_used, total_reserved); - if (closed_heap_regions != nullptr) { - print_heap_region_stats(closed_heap_regions, "ca", total_reserved); - print_heap_region_stats(open_heap_regions, "oa", total_reserved); + if (heap_info->is_used()) { + print_heap_region_stats(heap_info, total_reserved); } - log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", + log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", total_bytes, total_reserved, total_u_perc); } void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) { - log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]", + log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]", size, size/double(total_size)*100.0, size); } -void ArchiveBuilder::print_heap_region_stats(GrowableArray* regions, - const char *name, size_t total_size) { - int arr_len = regions == nullptr ? 0 : regions->length(); - for (int i = 0; i < arr_len; i++) { - char* start = (char*)regions->at(i).start(); - size_t size = regions->at(i).byte_size(); - char* top = start + size; - log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, - name, i, size, size/double(total_size)*100.0, size, p2i(start)); - } +void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) { + char* start = info->start(); + size_t size = info->byte_size(); + char* top = start + size; + log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, + size, size/double(total_size)*100.0, size, p2i(start)); } void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) { @@ -1269,8 +1240,8 @@ void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) _rw_region.print_out_of_space_msg(name, needed_bytes); _ro_region.print_out_of_space_msg(name, needed_bytes); - vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), - "Please reduce the number of shared classes."); + log_error(cds)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name); + MetaspaceShared::unrecoverable_writing_error(); } diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index 9d39892fdc4..222a13e660a 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -36,7 +36,7 @@ #include "utilities/resizeableResourceHash.hpp" #include "utilities/resourceHash.hpp" -struct ArchiveHeapBitmapInfo; +class ArchiveHeapInfo; class CHeapBitMap; class FileMapInfo; class Klass; @@ -234,15 +234,11 @@ private: // statistics DumpAllocStats _alloc_stats; - size_t _total_closed_heap_region_size; - size_t _total_open_heap_region_size; + size_t _total_heap_region_size; - void print_region_stats(FileMapInfo *map_info, - GrowableArray* closed_heap_regions, - GrowableArray* open_heap_regions); + void print_region_stats(FileMapInfo *map_info, ArchiveHeapInfo* heap_info); void print_bitmap_region_stats(size_t size, size_t total_size); - void print_heap_region_stats(GrowableArray* regions, - const char *name, size_t total_size); + void print_heap_region_stats(ArchiveHeapInfo* heap_info, size_t total_size); // For global access. static ArchiveBuilder* _current; @@ -403,11 +399,7 @@ public: void relocate_vm_classes(); void make_klasses_shareable(); void relocate_to_requested(); - void write_archive(FileMapInfo* mapinfo, - GrowableArray* closed_heap_regions, - GrowableArray* open_heap_regions, - GrowableArray* closed_heap_oopmaps, - GrowableArray* open_heap_oopmaps); + void write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info); void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec); diff --git a/src/hotspot/share/cds/archiveHeapLoader.cpp b/src/hotspot/share/cds/archiveHeapLoader.cpp index d674ab5e3c1..28d2718b75a 100644 --- a/src/hotspot/share/cds/archiveHeapLoader.cpp +++ b/src/hotspot/share/cds/archiveHeapLoader.cpp @@ -38,8 +38,7 @@ #if INCLUDE_CDS_JAVA_HEAP -bool ArchiveHeapLoader::_closed_regions_mapped = false; -bool ArchiveHeapLoader::_open_regions_mapped = false; +bool ArchiveHeapLoader::_is_mapped = false; bool ArchiveHeapLoader::_is_loaded = false; bool ArchiveHeapLoader::_narrow_oop_base_initialized = false; @@ -49,15 +48,9 @@ int ArchiveHeapLoader::_narrow_oop_shift; // Support for loaded heap. uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0; uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0; -uintptr_t ArchiveHeapLoader::_dumptime_base_0 = UINTPTR_MAX; -uintptr_t ArchiveHeapLoader::_dumptime_base_1 = UINTPTR_MAX; -uintptr_t ArchiveHeapLoader::_dumptime_base_2 = UINTPTR_MAX; -uintptr_t ArchiveHeapLoader::_dumptime_base_3 = UINTPTR_MAX; -uintptr_t ArchiveHeapLoader::_dumptime_top = 0; -intx ArchiveHeapLoader::_runtime_offset_0 = 0; -intx ArchiveHeapLoader::_runtime_offset_1 = 0; -intx ArchiveHeapLoader::_runtime_offset_2 = 0; -intx ArchiveHeapLoader::_runtime_offset_3 = 0; +uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX; +uintptr_t ArchiveHeapLoader::_dumptime_top = 0; +intx ArchiveHeapLoader::_runtime_offset = 0; bool ArchiveHeapLoader::_loading_failed = false; // Support for mapped heap. @@ -84,10 +77,10 @@ void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) { _narrow_oop_shift = shift; } -void ArchiveHeapLoader::fixup_regions() { +void ArchiveHeapLoader::fixup_region() { FileMapInfo* mapinfo = FileMapInfo::current_info(); if (is_mapped()) { - mapinfo->fixup_mapped_heap_regions(); + mapinfo->fixup_mapped_heap_region(); } else if (_loading_failed) { fill_failed_loaded_heap(); } @@ -160,9 +153,8 @@ class PatchUncompressedEmbeddedPointers: public BitMapClosure { void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm, FileMapInfo* info, - FileMapRegion* map_region, MemRegion region) { - narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address(map_region); + narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address(); narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start())); log_info(cds)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x", (uint)dt_encoded_bottom, (uint)rt_encoded_bottom); @@ -188,7 +180,6 @@ void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm, // Patch all the non-null pointers that are embedded in the archived heap objects // in this (mapped) region void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info, - FileMapRegion* map_region, MemRegion region, address oopmap, size_t oopmap_size_in_bits) { BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); @@ -200,7 +191,7 @@ void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info, #endif if (UseCompressedOops) { - patch_compressed_embedded_pointers(bm, info, map_region, region); + patch_compressed_embedded_pointers(bm, info, region); } else { PatchUncompressedEmbeddedPointers patcher((oop*)region.start()); bm.iterate(&patcher); @@ -219,44 +210,15 @@ struct LoadedArchiveHeapRegion { uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region intx _runtime_offset; // If an object's dump time address P is within in this region, its // runtime address is P + _runtime_offset - - static int comparator(const void* a, const void* b) { - LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a; - LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b; - if (reg_a->_dumptime_base < reg_b->_dumptime_base) { - return -1; - } else if (reg_a->_dumptime_base == reg_b->_dumptime_base) { - return 0; - } else { - return 1; - } - } - uintptr_t top() { return _dumptime_base + _region_size; } }; -void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions, - int num_loaded_regions) { - _dumptime_base_0 = loaded_regions[0]._dumptime_base; - _dumptime_base_1 = loaded_regions[1]._dumptime_base; - _dumptime_base_2 = loaded_regions[2]._dumptime_base; - _dumptime_base_3 = loaded_regions[3]._dumptime_base; - _dumptime_top = loaded_regions[num_loaded_regions-1].top(); - - _runtime_offset_0 = loaded_regions[0]._runtime_offset; - _runtime_offset_1 = loaded_regions[1]._runtime_offset; - _runtime_offset_2 = loaded_regions[2]._runtime_offset; - _runtime_offset_3 = loaded_regions[3]._runtime_offset; - - assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be"); - if (num_loaded_regions < 4) { - _dumptime_base_3 = UINTPTR_MAX; - } - if (num_loaded_regions < 3) { - _dumptime_base_2 = UINTPTR_MAX; - } +void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) { + _dumptime_base = loaded_region->_dumptime_base; + _dumptime_top = loaded_region->top(); + _runtime_offset = loaded_region->_runtime_offset; } bool ArchiveHeapLoader::can_load() { @@ -267,36 +229,18 @@ bool ArchiveHeapLoader::can_load() { return Universe::heap()->can_load_archived_objects(); } -template -class PatchLoadedRegionPointers: public BitMapClosure { +class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure { narrowOop* _start; - intx _offset_0; - intx _offset_1; - intx _offset_2; - intx _offset_3; - uintptr_t _base_0; - uintptr_t _base_1; - uintptr_t _base_2; - uintptr_t _base_3; + intx _offset; + uintptr_t _base; uintptr_t _top; - static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions"); - static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions"); - static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions"); - public: - PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions) + PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region) : _start(start), - _offset_0(loaded_regions[0]._runtime_offset), - _offset_1(loaded_regions[1]._runtime_offset), - _offset_2(loaded_regions[2]._runtime_offset), - _offset_3(loaded_regions[3]._runtime_offset), - _base_0(loaded_regions[0]._dumptime_base), - _base_1(loaded_regions[1]._dumptime_base), - _base_2(loaded_regions[2]._dumptime_base), - _base_3(loaded_regions[3]._dumptime_base) { - _top = loaded_regions[NUM_LOADED_REGIONS-1].top(); - } + _offset(loaded_region->_runtime_offset), + _base(loaded_region->_dumptime_base), + _top(loaded_region->top()) {} bool do_bit(size_t offset) { assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented"); @@ -304,138 +248,94 @@ class PatchLoadedRegionPointers: public BitMapClosure { narrowOop v = *p; assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); uintptr_t o = cast_from_oop(ArchiveHeapLoader::decode_from_archive(v)); - assert(_base_0 <= o && o < _top, "must be"); + assert(_base <= o && o < _top, "must be"); - // We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons. - if (NUM_LOADED_REGIONS > 3 && o >= _base_3) { - o += _offset_3; - } else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) { - o += _offset_2; - } else if (o >= _base_1) { - o += _offset_1; - } else { - o += _offset_0; - } + o += _offset; ArchiveHeapLoader::assert_in_loaded_heap(o); RawAccess::oop_store(p, cast_to_oop(o)); return true; } }; -int ArchiveHeapLoader::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, +bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, MemRegion& archive_space) { size_t total_bytes = 0; - int num_loaded_regions = 0; - for (int i = MetaspaceShared::first_archive_heap_region; - i <= MetaspaceShared::last_archive_heap_region; i++) { - FileMapRegion* r = mapinfo->region_at(i); - r->assert_is_heap_region(); - if (r->used() > 0) { - assert(is_aligned(r->used(), HeapWordSize), "must be"); - total_bytes += r->used(); - LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++]; - ri->_region_index = i; - ri->_region_size = r->used(); - ri->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address(r); - } + FileMapRegion* r = mapinfo->region_at(MetaspaceShared::hp); + r->assert_is_heap_region(); + if (r->used() == 0) { + return false; } + assert(is_aligned(r->used(), HeapWordSize), "must be"); + total_bytes += r->used(); + loaded_region->_region_index = MetaspaceShared::hp; + loaded_region->_region_size = r->used(); + loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address(); + assert(is_aligned(total_bytes, HeapWordSize), "must be"); size_t word_size = total_bytes / HeapWordSize; HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size); if (buffer == nullptr) { - return 0; + return false; } archive_space = MemRegion(buffer, word_size); _loaded_heap_bottom = (uintptr_t)archive_space.start(); _loaded_heap_top = _loaded_heap_bottom + total_bytes; - return num_loaded_regions; + loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base; + + return true; } -void ArchiveHeapLoader::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions, - uintptr_t buffer) { - // Find the relocation offset of the pointers in each region - qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion), - LoadedArchiveHeapRegion::comparator); - - uintptr_t p = buffer; - for (int i = 0; i < num_loaded_regions; i++) { - // This region will be loaded at p, so all objects inside this - // region will be shifted by ri->offset - LoadedArchiveHeapRegion* ri = &loaded_regions[i]; - ri->_runtime_offset = p - ri->_dumptime_base; - p += ri->_region_size; - } - assert(p == _loaded_heap_top, "must be"); -} - -bool ArchiveHeapLoader::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, - int num_loaded_regions, uintptr_t buffer) { +bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, + uintptr_t load_address) { uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region(); if (bitmap_base == 0) { _loading_failed = true; return false; // OOM or CRC error } - uintptr_t load_address = buffer; - for (int i = 0; i < num_loaded_regions; i++) { - LoadedArchiveHeapRegion* ri = &loaded_regions[i]; - FileMapRegion* r = mapinfo->region_at(ri->_region_index); - if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) { - // There's no easy way to free the buffer, so we will fill it with zero later - // in fill_failed_loaded_heap(), and it will eventually be GC'ed. - log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i); - _loading_failed = true; - return false; - } - log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT - " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT, - ri->_region_index, load_address, load_address + ri->_region_size, - ri->_region_size, ri->_runtime_offset); - - uintptr_t oopmap = bitmap_base + r->oopmap_offset(); - BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits()); - - if (num_loaded_regions == 4) { - PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions); - bm.iterate(&patcher); - } else if (num_loaded_regions == 3) { - PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions); - bm.iterate(&patcher); - } else { - assert(num_loaded_regions == 2, "must be"); - PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions); - bm.iterate(&patcher); - } - - assert(r->mapped_base() == (char*)load_address, "sanity"); - load_address += r->used(); + FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index); + if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) { + // There's no easy way to free the buffer, so we will fill it with zero later + // in fill_failed_loaded_heap(), and it will eventually be GC'ed. + log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index); + _loading_failed = true; + return false; } + assert(r->mapped_base() == (char*)load_address, "sanity"); + log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT + " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT, + loaded_region->_region_index, load_address, load_address + loaded_region->_region_size, + loaded_region->_region_size, loaded_region->_runtime_offset); + uintptr_t oopmap = bitmap_base + r->oopmap_offset(); + BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits()); + + PatchLoadedRegionPointers patcher((narrowOop*)load_address, loaded_region); + bm.iterate(&patcher); return true; } -bool ArchiveHeapLoader::load_heap_regions(FileMapInfo* mapinfo) { +bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) { assert(UseCompressedOops, "loaded heap for !UseCompressedOops is unimplemented"); init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift()); - LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions]; - memset(loaded_regions, 0, sizeof(loaded_regions)); + LoadedArchiveHeapRegion loaded_region; + memset(&loaded_region, 0, sizeof(loaded_region)); MemRegion archive_space; - int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, archive_space); - if (num_loaded_regions <= 0) { + if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) { return false; } - sort_loaded_regions(loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start()); - if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start())) { + + if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) { assert(_loading_failed, "must be"); return false; } - init_loaded_heap_relocation(loaded_regions, num_loaded_regions); + init_loaded_heap_relocation(&loaded_region); _is_loaded = true; return true; @@ -448,14 +348,14 @@ class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure { VerifyLoadedHeapEmbeddedPointers(ResourceHashtable* table) : _table(table) {} virtual void do_oop(narrowOop* p) { - // This should be called before the loaded regions are modified, so all the embedded pointers - // must be null, or must point to a valid object in the loaded regions. + // This should be called before the loaded region is modified, so all the embedded pointers + // must be null, or must point to a valid object in the loaded region. narrowOop v = *p; if (!CompressedOops::is_null(v)) { oop o = CompressedOops::decode_not_null(v); uintptr_t u = cast_from_oop(o); ArchiveHeapLoader::assert_in_loaded_heap(u); - guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions"); + guarantee(_table->contains(u), "must point to beginning of object in loaded archived region"); } } virtual void do_oop(oop* p) { @@ -539,15 +439,12 @@ void ArchiveHeapLoader::patch_native_pointers() { return; } - for (int i = MetaspaceShared::first_archive_heap_region; - i <= MetaspaceShared::last_archive_heap_region; i++) { - FileMapRegion* r = FileMapInfo::current_info()->region_at(i); - if (r->mapped_base() != nullptr && r->has_ptrmap()) { - log_info(cds, heap)("Patching native pointers in heap region %d", i); - BitMapView bm = r->ptrmap_view(); - PatchNativePointers patcher((Metadata**)r->mapped_base()); - bm.iterate(&patcher); - } + FileMapRegion* r = FileMapInfo::current_info()->region_at(MetaspaceShared::hp); + if (r->mapped_base() != nullptr && r->has_ptrmap()) { + log_info(cds, heap)("Patching native pointers in heap region"); + BitMapView bm = r->ptrmap_view(); + PatchNativePointers patcher((Metadata**)r->mapped_base()); + bm.iterate(&patcher); } } #endif // INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/cds/archiveHeapLoader.hpp b/src/hotspot/share/cds/archiveHeapLoader.hpp index 530e4ff5bc2..ac87efdadb1 100644 --- a/src/hotspot/share/cds/archiveHeapLoader.hpp +++ b/src/hotspot/share/cds/archiveHeapLoader.hpp @@ -40,24 +40,21 @@ struct LoadedArchiveHeapRegion; class ArchiveHeapLoader : AllStatic { public: - // At runtime, heap regions in the CDS archive can be used in two different ways, + // At runtime, the heap region in the CDS archive can be used in two different ways, // depending on the GC type: - // - Mapped: (G1 only) the regions are directly mapped into the Java heap - // - Loaded: At VM start-up, the objects in the heap regions are copied into the + // - Mapped: (G1 only) the region is directly mapped into the Java heap + // - Loaded: At VM start-up, the objects in the heap region are copied into the // Java heap. This is easier to implement than mapping but // slightly less efficient, as the embedded pointers need to be relocated. static bool can_use() { return can_map() || can_load(); } - // Can this VM map archived heap regions? Currently only G1+compressed{oops,cp} + // Can this VM map archived heap region? Currently only G1+compressed{oops,cp} static bool can_map() { CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedClassPointers);) NOT_CDS_JAVA_HEAP(return false;) } - static bool is_mapped() { - return closed_regions_mapped() && open_regions_mapped(); - } - // Can this VM load the objects from archived heap regions into the heap at start-up? + // Can this VM load the objects from archived heap region into the heap at start-up? static bool can_load() NOT_CDS_JAVA_HEAP_RETURN_(false); static void finish_initialization() NOT_CDS_JAVA_HEAP_RETURN; static bool is_loaded() { @@ -76,25 +73,17 @@ public: NOT_CDS_JAVA_HEAP_RETURN_(0L); } - static void set_closed_regions_mapped() { - CDS_JAVA_HEAP_ONLY(_closed_regions_mapped = true;) + static void set_mapped() { + CDS_JAVA_HEAP_ONLY(_is_mapped = true;) NOT_CDS_JAVA_HEAP_RETURN; } - static bool closed_regions_mapped() { - CDS_JAVA_HEAP_ONLY(return _closed_regions_mapped;) - NOT_CDS_JAVA_HEAP_RETURN_(false); - } - static void set_open_regions_mapped() { - CDS_JAVA_HEAP_ONLY(_open_regions_mapped = true;) - NOT_CDS_JAVA_HEAP_RETURN; - } - static bool open_regions_mapped() { - CDS_JAVA_HEAP_ONLY(return _open_regions_mapped;) + static bool is_mapped() { + CDS_JAVA_HEAP_ONLY(return _is_mapped;) NOT_CDS_JAVA_HEAP_RETURN_(false); } // NarrowOops stored in the CDS archive may use a different encoding scheme - // than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_regions_impl. + // than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_region_impl. // To decode them, do not use CompressedOops::decode_not_null. Use this // function instead. inline static oop decode_from_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr); @@ -104,34 +93,25 @@ public: static void patch_compressed_embedded_pointers(BitMapView bm, FileMapInfo* info, - FileMapRegion* map_region, MemRegion region) NOT_CDS_JAVA_HEAP_RETURN; static void patch_embedded_pointers(FileMapInfo* info, - FileMapRegion* map_region, MemRegion region, address oopmap, size_t oopmap_size_in_bits) NOT_CDS_JAVA_HEAP_RETURN; - static void fixup_regions() NOT_CDS_JAVA_HEAP_RETURN; + static void fixup_region() NOT_CDS_JAVA_HEAP_RETURN; #if INCLUDE_CDS_JAVA_HEAP static void init_mapped_heap_relocation(ptrdiff_t delta, int dumptime_oop_shift); private: - static bool _closed_regions_mapped; - static bool _open_regions_mapped; + static bool _is_mapped; static bool _is_loaded; // Support for loaded archived heap. These are cached values from // LoadedArchiveHeapRegion's. - static uintptr_t _dumptime_base_0; - static uintptr_t _dumptime_base_1; - static uintptr_t _dumptime_base_2; - static uintptr_t _dumptime_base_3; + static uintptr_t _dumptime_base; static uintptr_t _dumptime_top; - static intx _runtime_offset_0; - static intx _runtime_offset_1; - static intx _runtime_offset_2; - static intx _runtime_offset_3; + static intx _runtime_offset; static uintptr_t _loaded_heap_bottom; static uintptr_t _loaded_heap_top; @@ -148,14 +128,10 @@ private: static bool _mapped_heap_relocation_initialized; static void init_narrow_oop_decoding(address base, int shift); - static int init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, + static bool init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, MemRegion& archive_space); - static void sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions, - uintptr_t buffer); - static bool load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, - int num_loaded_regions, uintptr_t buffer); - static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info, - int num_loaded_regions); + static bool load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, uintptr_t buffer); + static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info); static void patch_native_pointers(); static void finish_loaded_heap(); static void verify_loaded_heap(); @@ -168,9 +144,11 @@ private: template inline static oop decode_from_archive_impl(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr); + class PatchLoadedRegionPointers; + public: - static bool load_heap_regions(FileMapInfo* mapinfo); + static bool load_heap_region(FileMapInfo* mapinfo); static void assert_in_loaded_heap(uintptr_t o) { assert(is_in_loaded_heap(o), "must be"); } diff --git a/src/hotspot/share/cds/archiveHeapLoader.inline.hpp b/src/hotspot/share/cds/archiveHeapLoader.inline.hpp index 6f344ddf526..9efd39b8d26 100644 --- a/src/hotspot/share/cds/archiveHeapLoader.inline.hpp +++ b/src/hotspot/share/cds/archiveHeapLoader.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,18 +38,10 @@ inline oop ArchiveHeapLoader::decode_from_archive_impl(narrowOop v) { assert(_narrow_oop_base_initialized, "relocation information must have been initialized"); uintptr_t p = ((uintptr_t)_narrow_oop_base) + ((uintptr_t)v << _narrow_oop_shift); if (IS_MAPPED) { - assert(_dumptime_base_0 == UINTPTR_MAX, "must be"); - } else if (p >= _dumptime_base_0) { + assert(_dumptime_base == UINTPTR_MAX, "must be"); + } else if (p >= _dumptime_base) { assert(p < _dumptime_top, "must be"); - if (p >= _dumptime_base_3) { - p += _runtime_offset_3; - } else if (p >= _dumptime_base_2) { - p += _runtime_offset_2; - } else if (p >= _dumptime_base_1) { - p += _runtime_offset_1; - } else { - p += _runtime_offset_0; - } + p += _runtime_offset; } oop result = cast_to_oop((uintptr_t)p); diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp index 91cd15c762d..530754d2e03 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.cpp +++ b/src/hotspot/share/cds/archiveHeapWriter.cpp @@ -47,26 +47,16 @@ #if INCLUDE_CDS_JAVA_HEAP - GrowableArrayCHeap* ArchiveHeapWriter::_buffer; // The following are offsets from buffer_bottom() -size_t ArchiveHeapWriter::_buffer_top; -size_t ArchiveHeapWriter::_open_bottom; -size_t ArchiveHeapWriter::_open_top; -size_t ArchiveHeapWriter::_closed_bottom; -size_t ArchiveHeapWriter::_closed_top; -size_t ArchiveHeapWriter::_heap_roots_bottom; +size_t ArchiveHeapWriter::_buffer_used; +size_t ArchiveHeapWriter::_heap_roots_bottom_offset; size_t ArchiveHeapWriter::_heap_roots_word_size; -address ArchiveHeapWriter::_requested_open_region_bottom; -address ArchiveHeapWriter::_requested_open_region_top; -address ArchiveHeapWriter::_requested_closed_region_bottom; -address ArchiveHeapWriter::_requested_closed_region_top; - -ResourceBitMap* ArchiveHeapWriter::_closed_oopmap; -ResourceBitMap* ArchiveHeapWriter::_open_oopmap; +address ArchiveHeapWriter::_requested_bottom; +address ArchiveHeapWriter::_requested_top; GrowableArrayCHeap* ArchiveHeapWriter::_native_pointers; GrowableArrayCHeap* ArchiveHeapWriter::_source_objs; @@ -80,10 +70,8 @@ void ArchiveHeapWriter::init() { _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(); - _requested_open_region_bottom = nullptr; - _requested_open_region_top = nullptr; - _requested_closed_region_bottom = nullptr; - _requested_closed_region_top = nullptr; + _requested_bottom = nullptr; + _requested_top = nullptr; _native_pointers = new GrowableArrayCHeap(2048); _source_objs = new GrowableArrayCHeap(10000); @@ -97,17 +85,13 @@ void ArchiveHeapWriter::add_source_obj(oop src_obj) { _source_objs->append(src_obj); } -// For the time being, always support two regions (to be strictly compatible with existing G1 -// mapping code. We might eventually use a single region (JDK-8298048). void ArchiveHeapWriter::write(GrowableArrayCHeap* roots, - GrowableArray* closed_regions, GrowableArray* open_regions, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps) { + ArchiveHeapInfo* heap_info) { assert(HeapShared::can_write(), "sanity"); allocate_buffer(); copy_source_objs_to_buffer(roots); - set_requested_address_for_regions(closed_regions, open_regions); - relocate_embedded_oops(roots, closed_bitmaps, open_bitmaps); + set_requested_address(heap_info); + relocate_embedded_oops(roots, heap_info); } bool ArchiveHeapWriter::is_too_large_to_archive(oop o) { @@ -133,18 +117,15 @@ bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) { } // Various lookup functions between source_obj, buffered_obj and requested_obj -bool ArchiveHeapWriter::is_in_requested_regions(oop o) { - assert(_requested_open_region_bottom != nullptr, "do not call before this is initialized"); - assert(_requested_closed_region_bottom != nullptr, "do not call before this is initialized"); - +bool ArchiveHeapWriter::is_in_requested_range(oop o) { + assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized"); address a = cast_from_oop
(o); - return (_requested_open_region_bottom <= a && a < _requested_open_region_top) || - (_requested_closed_region_bottom <= a && a < _requested_closed_region_top); + return (_requested_bottom <= a && a < _requested_top); } oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) { - oop req_obj = cast_to_oop(_requested_open_region_bottom + offset); - assert(is_in_requested_regions(req_obj), "must be"); + oop req_obj = cast_to_oop(_requested_bottom + offset); + assert(is_in_requested_range(req_obj), "must be"); return req_obj; } @@ -168,30 +149,22 @@ oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) { } address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) { - return _requested_open_region_bottom + buffered_address_to_offset(buffered_addr); + return _requested_bottom + buffered_address_to_offset(buffered_addr); } oop ArchiveHeapWriter::heap_roots_requested_address() { - return requested_obj_from_buffer_offset(_heap_roots_bottom); + return cast_to_oop(_requested_bottom + _heap_roots_bottom_offset); } -address ArchiveHeapWriter::heap_region_requested_bottom(int heap_region_idx) { +address ArchiveHeapWriter::requested_address() { assert(_buffer != nullptr, "must be initialized"); - switch (heap_region_idx) { - case MetaspaceShared::first_closed_heap_region: - return _requested_closed_region_bottom; - case MetaspaceShared::first_open_heap_region: - return _requested_open_region_bottom; - default: - ShouldNotReachHere(); - return nullptr; - } + return _requested_bottom; } void ArchiveHeapWriter::allocate_buffer() { int initial_buffer_size = 100000; _buffer = new GrowableArrayCHeap(initial_buffer_size); - _open_bottom = _buffer_top = 0; + _buffer_used = 0; ensure_buffer_space(1); // so that buffer_bottom() works } @@ -203,7 +176,7 @@ void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) { void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap* roots) { Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass - int length = roots != nullptr ? roots->length() : 0; + int length = roots->length(); _heap_roots_word_size = objArrayOopDesc::object_size(length); size_t byte_size = _heap_roots_word_size * HeapWordSize; if (byte_size >= MIN_GC_REGION_ALIGNMENT) { @@ -213,10 +186,10 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap(_buffer_top); + HeapWord* mem = offset_to_buffered_address(_buffer_used); memset(mem, 0, byte_size); { // This is copied from MemAllocator::finish @@ -238,40 +211,27 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeapobj_at_addr(i) = o; } } - log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem); + log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem); - _heap_roots_bottom = _buffer_top; - _buffer_top = new_top; + _heap_roots_bottom_offset = _buffer_used; + _buffer_used = new_used; } void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap* roots) { - copy_source_objs_to_buffer_by_region(/*copy_open_region=*/true); - copy_roots_to_buffer(roots); - _open_top = _buffer_top; - - // Align the closed region to the next G1 region - _buffer_top = _closed_bottom = align_up(_buffer_top, HeapRegion::GrainBytes); - copy_source_objs_to_buffer_by_region(/*copy_open_region=*/false); - _closed_top = _buffer_top; - - log_info(cds, heap)("Size of open region = " SIZE_FORMAT " bytes", _open_top - _open_bottom); - log_info(cds, heap)("Size of closed region = " SIZE_FORMAT " bytes", _closed_top - _closed_bottom); -} - -void ArchiveHeapWriter::copy_source_objs_to_buffer_by_region(bool copy_open_region) { for (int i = 0; i < _source_objs->length(); i++) { oop src_obj = _source_objs->at(i); HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj); assert(info != nullptr, "must be"); - if (info->in_open_region() == copy_open_region) { - // For region-based collectors such as G1, we need to make sure that we don't have - // an object that can possible span across two regions. - size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj); - info->set_buffer_offset(buffer_offset); + size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj); + info->set_buffer_offset(buffer_offset); - _buffer_offset_to_source_obj_table->put(buffer_offset, src_obj); - } + _buffer_offset_to_source_obj_table->put(buffer_offset, src_obj); } + + copy_roots_to_buffer(roots); + + log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots", + _buffer_used, _source_objs->length() + 1, roots->length()); } size_t ArchiveHeapWriter::filler_array_byte_size(int length) { @@ -298,7 +258,7 @@ int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) { void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) { assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass - HeapWord* mem = offset_to_buffered_address(_buffer_top); + HeapWord* mem = offset_to_buffered_address(_buffer_used); memset(mem, 0, fill_bytes); oopDesc::set_mark(mem, markWord::prototype()); narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak); @@ -313,10 +273,10 @@ void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) { // required_byte_size has been allocated. If not, fill the remainder of the current // region. size_t min_filler_byte_size = filler_array_byte_size(0); - size_t new_top = _buffer_top + required_byte_size + min_filler_byte_size; + size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size; - const size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT); - const size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT); + const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); + const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); if (cur_min_region_bottom != next_min_region_bottom) { // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way @@ -326,16 +286,16 @@ void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) { "no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT); const size_t filler_end = next_min_region_bottom; - const size_t fill_bytes = filler_end - _buffer_top; + const size_t fill_bytes = filler_end - _buffer_used; assert(fill_bytes > 0, "must be"); ensure_buffer_space(filler_end); int array_length = filler_array_length(fill_bytes); log_info(cds, heap)("Inserting filler obj array of %d elements (" SIZE_FORMAT " bytes total) @ buffer offset " SIZE_FORMAT, - array_length, fill_bytes, _buffer_top); + array_length, fill_bytes, _buffer_used); init_filler_array_at_buffer_top(array_length, fill_bytes); - _buffer_top = filler_end; + _buffer_used = filler_end; } } @@ -344,72 +304,58 @@ size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) { size_t byte_size = src_obj->size() * HeapWordSize; assert(byte_size > 0, "no zero-size objects"); + // For region-based collectors such as G1, the archive heap may be mapped into + // multiple regions. We need to make sure that we don't have an object that can possible + // span across two regions. maybe_fill_gc_region_gap(byte_size); - size_t new_top = _buffer_top + byte_size; - assert(new_top > _buffer_top, "no wrap around"); + size_t new_used = _buffer_used + byte_size; + assert(new_used > _buffer_used, "no wrap around"); - size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT); - size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT); + size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); + size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries"); - ensure_buffer_space(new_top); + ensure_buffer_space(new_used); address from = cast_from_oop
(src_obj); - address to = offset_to_buffered_address
(_buffer_top); - assert(is_object_aligned(_buffer_top), "sanity"); + address to = offset_to_buffered_address
(_buffer_used); + assert(is_object_aligned(_buffer_used), "sanity"); assert(is_object_aligned(byte_size), "sanity"); memcpy(to, from, byte_size); - size_t buffered_obj_offset = _buffer_top; - _buffer_top = new_top; + size_t buffered_obj_offset = _buffer_used; + _buffer_used = new_used; return buffered_obj_offset; } -void ArchiveHeapWriter::set_requested_address_for_regions(GrowableArray* closed_regions, - GrowableArray* open_regions) { - assert(closed_regions->length() == 0, "must be"); - assert(open_regions->length() == 0, "must be"); - +void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) { + assert(!info->is_used(), "only set once"); assert(UseG1GC, "must be"); address heap_end = (address)G1CollectedHeap::heap()->reserved().end(); log_info(cds, heap)("Heap end = %p", heap_end); - size_t closed_region_byte_size = _closed_top - _closed_bottom; - size_t open_region_byte_size = _open_top - _open_bottom; - assert(closed_region_byte_size > 0, "must archived at least one object for closed region!"); - assert(open_region_byte_size > 0, "must archived at least one object for open region!"); + size_t heap_region_byte_size = _buffer_used; + assert(heap_region_byte_size > 0, "must archived at least one object!"); - // The following two asserts are ensured by copy_source_objs_to_buffer_by_region(). - assert(is_aligned(_closed_bottom, HeapRegion::GrainBytes), "sanity"); - assert(is_aligned(_open_bottom, HeapRegion::GrainBytes), "sanity"); + _requested_bottom = align_down(heap_end - heap_region_byte_size, HeapRegion::GrainBytes); + assert(is_aligned(_requested_bottom, HeapRegion::GrainBytes), "sanity"); - _requested_closed_region_bottom = align_down(heap_end - closed_region_byte_size, HeapRegion::GrainBytes); - _requested_open_region_bottom = _requested_closed_region_bottom - (_closed_bottom - _open_bottom); + _requested_top = _requested_bottom + _buffer_used; - assert(is_aligned(_requested_closed_region_bottom, HeapRegion::GrainBytes), "sanity"); - assert(is_aligned(_requested_open_region_bottom, HeapRegion::GrainBytes), "sanity"); - - _requested_open_region_top = _requested_open_region_bottom + (_open_top - _open_bottom); - _requested_closed_region_top = _requested_closed_region_bottom + (_closed_top - _closed_bottom); - - assert(_requested_open_region_top <= _requested_closed_region_bottom, "no overlap"); - - closed_regions->append(MemRegion(offset_to_buffered_address(_closed_bottom), - offset_to_buffered_address(_closed_top))); - open_regions->append( MemRegion(offset_to_buffered_address(_open_bottom), - offset_to_buffered_address(_open_top))); + info->set_memregion(MemRegion(offset_to_buffered_address(0), + offset_to_buffered_address(_buffer_used))); } // Oop relocation template T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) { - assert(is_in_requested_regions(cast_to_oop(p)), "must be"); + assert(is_in_requested_range(cast_to_oop(p)), "must be"); address addr = address(p); - assert(addr >= _requested_open_region_bottom, "must be"); - size_t offset = addr - _requested_open_region_bottom; + assert(addr >= _requested_bottom, "must be"); + size_t offset = addr - _requested_bottom; return offset_to_buffered_address(offset); } @@ -421,7 +367,7 @@ template oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buff template void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr, oop request_oop) { - assert(is_in_requested_regions(request_oop), "must be"); + assert(is_in_requested_range(request_oop), "must be"); store_oop_in_buffer(buffered_addr, request_oop); } @@ -445,30 +391,22 @@ oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) { return CompressedOops::decode(*buffered_addr); } -template void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer) { +template void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) { oop source_referent = load_source_oop_from_buffer(field_addr_in_buffer); if (!CompressedOops::is_null(source_referent)) { oop request_referent = source_obj_to_requested_obj(source_referent); store_requested_oop_in_buffer(field_addr_in_buffer, request_referent); - mark_oop_pointer(field_addr_in_buffer); + mark_oop_pointer(field_addr_in_buffer, oopmap); } } -template void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr) { +template void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) { T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr)); - ResourceBitMap* oopmap; address requested_region_bottom; - if (request_p >= (T*)_requested_closed_region_bottom) { - assert(request_p < (T*)_requested_closed_region_top, "sanity"); - oopmap = _closed_oopmap; - requested_region_bottom = _requested_closed_region_bottom; - } else { - assert(request_p >= (T*)_requested_open_region_bottom, "sanity"); - assert(request_p < (T*)_requested_open_region_top, "sanity"); - oopmap = _open_oopmap; - requested_region_bottom = _requested_open_region_bottom; - } + assert(request_p >= (T*)_requested_bottom, "sanity"); + assert(request_p < (T*)_requested_top, "sanity"); + requested_region_bottom = _requested_bottom; // Mark the pointer in the oopmap T* region_bottom = (T*)requested_region_bottom; @@ -501,18 +439,19 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s } // Relocate an element in the buffered copy of HeapShared::roots() -template void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index) { +template void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) { size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset(index); - relocate_field_in_buffer((T*)(buffered_heap_roots_addr() + offset)); + relocate_field_in_buffer((T*)(buffered_heap_roots_addr() + offset), oopmap); } class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure { oop _src_obj; address _buffered_obj; + CHeapBitMap* _oopmap; public: - EmbeddedOopRelocator(oop src_obj, address buffered_obj) : - _src_obj(src_obj), _buffered_obj(buffered_obj) {} + EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) : + _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {} void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); } void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); } @@ -520,82 +459,40 @@ public: private: template void do_oop_work(T *p) { size_t field_offset = pointer_delta(p, _src_obj, sizeof(char)); - ArchiveHeapWriter::relocate_field_in_buffer((T*)(_buffered_obj + field_offset)); + ArchiveHeapWriter::relocate_field_in_buffer((T*)(_buffered_obj + field_offset), _oopmap); } }; // Update all oop fields embedded in the buffered objects void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap* roots, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps) { + ArchiveHeapInfo* heap_info) { size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); - size_t closed_region_byte_size = _closed_top - _closed_bottom; - size_t open_region_byte_size = _open_top - _open_bottom; - ResourceBitMap closed_oopmap(closed_region_byte_size / oopmap_unit); - ResourceBitMap open_oopmap (open_region_byte_size / oopmap_unit); - - _closed_oopmap = &closed_oopmap; - _open_oopmap = &open_oopmap; + size_t heap_region_byte_size = _buffer_used; + heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit); auto iterator = [&] (oop src_obj, HeapShared::CachedOopInfo& info) { oop requested_obj = requested_obj_from_buffer_offset(info.buffer_offset()); update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass()); - address buffered_obj = offset_to_buffered_address
(info.buffer_offset()); - EmbeddedOopRelocator relocator(src_obj, buffered_obj); - + EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap()); src_obj->oop_iterate(&relocator); }; HeapShared::archived_object_cache()->iterate_all(iterator); // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it. - oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_bottom); + oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_bottom_offset); update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlassObj()); int length = roots != nullptr ? roots->length() : 0; for (int i = 0; i < length; i++) { if (UseCompressedOops) { - relocate_root_at(requested_roots, i); + relocate_root_at(requested_roots, i, heap_info->oopmap()); } else { - relocate_root_at(requested_roots, i); + relocate_root_at(requested_roots, i, heap_info->oopmap()); } } - closed_bitmaps->append(make_bitmap_info(&closed_oopmap, /*is_open=*/false, /*is_oopmap=*/true)); - open_bitmaps ->append(make_bitmap_info(&open_oopmap, /*is_open=*/false, /*is_oopmap=*/true)); - - closed_bitmaps->append(compute_ptrmap(/*is_open=*/false)); - open_bitmaps ->append(compute_ptrmap(/*is_open=*/true)); - - _closed_oopmap = nullptr; - _open_oopmap = nullptr; -} - -ArchiveHeapBitmapInfo ArchiveHeapWriter::make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap) { - size_t size_in_bits = bitmap->size(); - size_t size_in_bytes; - uintptr_t* buffer; - - if (size_in_bits > 0) { - size_in_bytes = bitmap->size_in_bytes(); - buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal); - bitmap->write_to(buffer, size_in_bytes); - } else { - size_in_bytes = 0; - buffer = nullptr; - } - - log_info(cds, heap)("%s @ " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for %s heap region", - is_oopmap ? "Oopmap" : "Ptrmap", - p2i(buffer), size_in_bytes, - is_open? "open" : "closed"); - - ArchiveHeapBitmapInfo info; - info._map = (address)buffer; - info._size_in_bits = size_in_bits; - info._size_in_bytes = size_in_bytes; - - return info; + compute_ptrmap(heap_info); } void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) { @@ -608,50 +505,44 @@ void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) { } } -ArchiveHeapBitmapInfo ArchiveHeapWriter::compute_ptrmap(bool is_open) { +void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) { int num_non_null_ptrs = 0; - Metadata** bottom = (Metadata**) (is_open ? _requested_open_region_bottom: _requested_closed_region_bottom); - Metadata** top = (Metadata**) (is_open ? _requested_open_region_top: _requested_closed_region_top); // exclusive - ResourceBitMap ptrmap(top - bottom); + Metadata** bottom = (Metadata**) _requested_bottom; + Metadata** top = (Metadata**) _requested_top; // exclusive + heap_info->ptrmap()->resize(top - bottom); + BitMap::idx_t max_idx = 32; // paranoid - don't make it too small for (int i = 0; i < _native_pointers->length(); i++) { NativePointerInfo info = _native_pointers->at(i); oop src_obj = info._src_obj; int field_offset = info._field_offset; HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj); - if (p->in_open_region() == is_open) { - // requested_field_addr = the address of this field in the requested space - oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset()); - Metadata** requested_field_addr = (Metadata**)(cast_from_oop
(requested_obj) + field_offset); - assert(bottom <= requested_field_addr && requested_field_addr < top, "range check"); + // requested_field_addr = the address of this field in the requested space + oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset()); + Metadata** requested_field_addr = (Metadata**)(cast_from_oop
(requested_obj) + field_offset); + assert(bottom <= requested_field_addr && requested_field_addr < top, "range check"); - // Mark this field in the bitmap - BitMap::idx_t idx = requested_field_addr - bottom; - ptrmap.set_bit(idx); - num_non_null_ptrs ++; + // Mark this field in the bitmap + BitMap::idx_t idx = requested_field_addr - bottom; + heap_info->ptrmap()->set_bit(idx); + num_non_null_ptrs ++; + max_idx = MAX2(max_idx, idx); - // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have - // this address if the RO/RW regions are mapped at the default location). + // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have + // this address if the RO/RW regions are mapped at the default location). - Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr); - Metadata* native_ptr = *buffered_field_addr; - assert(native_ptr != nullptr, "sanity"); + Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr); + Metadata* native_ptr = *buffered_field_addr; + assert(native_ptr != nullptr, "sanity"); - address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr); - address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr); - *buffered_field_addr = (Metadata*)requested_native_ptr; - } + address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr); + address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr); + *buffered_field_addr = (Metadata*)requested_native_ptr; } - log_info(cds, heap)("compute_ptrmap: marked %d non-null native pointers for %s heap region", - num_non_null_ptrs, is_open ? "open" : "closed"); - - if (num_non_null_ptrs == 0) { - ResourceBitMap empty; - return make_bitmap_info(&empty, is_open, /*is_oopmap=*/ false); - } else { - return make_bitmap_info(&ptrmap, is_open, /*is_oopmap=*/ false); - } + heap_info->ptrmap()->resize(max_idx + 1); + log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)", + num_non_null_ptrs, size_t(heap_info->ptrmap()->size())); } #endif // INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/cds/archiveHeapWriter.hpp b/src/hotspot/share/cds/archiveHeapWriter.hpp index 18eef9bcf46..6001012ba85 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.hpp +++ b/src/hotspot/share/cds/archiveHeapWriter.hpp @@ -35,11 +35,28 @@ #include "utilities/macros.hpp" #include "utilities/resourceHash.hpp" -#if INCLUDE_CDS_JAVA_HEAP - -struct ArchiveHeapBitmapInfo; class MemRegion; +class ArchiveHeapInfo { + MemRegion _memregion; + CHeapBitMap _oopmap; + CHeapBitMap _ptrmap; + +public: + ArchiveHeapInfo() : _memregion(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {} + bool is_used() { return !_memregion.is_empty(); } + + MemRegion memregion() { return _memregion; } + void set_memregion(MemRegion r) { _memregion = r; } + + char* start() { return (char*)_memregion.start(); } + size_t byte_size() { return _memregion.byte_size(); } + + CHeapBitMap* oopmap() { return &_oopmap; } + CHeapBitMap* ptrmap() { return &_ptrmap; } +}; + +#if INCLUDE_CDS_JAVA_HEAP class ArchiveHeapWriter : AllStatic { class EmbeddedOopRelocator; struct NativePointerInfo { @@ -72,31 +89,16 @@ class ArchiveHeapWriter : AllStatic { static GrowableArrayCHeap* _buffer; - // The exclusive top of the last object that has been copied into this->_buffer. - static size_t _buffer_top; - - // The bounds of the open region inside this->_buffer. - static size_t _open_bottom; // inclusive - static size_t _open_top; // exclusive - - // The bounds of the closed region inside this->_buffer. - static size_t _closed_bottom; // inclusive - static size_t _closed_top; // exclusive + // The number of bytes that have written into _buffer (may be smaller than _buffer->length()). + static size_t _buffer_used; // The bottom of the copy of Heap::roots() inside this->_buffer. - static size_t _heap_roots_bottom; + static size_t _heap_roots_bottom_offset; static size_t _heap_roots_word_size; - static address _requested_open_region_bottom; - static address _requested_open_region_top; - static address _requested_closed_region_bottom; - static address _requested_closed_region_top; - - static ResourceBitMap* _closed_oopmap; - static ResourceBitMap* _open_oopmap; - - static ArchiveHeapBitmapInfo _closed_oopmap_info; - static ArchiveHeapBitmapInfo _open_oopmap_info; + // The address range of the requested location of the archived heap objects. + static address _requested_bottom; + static address _requested_top; static GrowableArrayCHeap* _native_pointers; static GrowableArrayCHeap* _source_objs; @@ -127,8 +129,9 @@ class ArchiveHeapWriter : AllStatic { return offset_to_buffered_address
(0); } + // The exclusive end of the last object that was copied into the buffer. static address buffer_top() { - return buffer_bottom() + _buffer_top; + return buffer_bottom() + _buffer_used; } static bool in_buffer(address buffered_addr) { @@ -142,7 +145,6 @@ class ArchiveHeapWriter : AllStatic { static void copy_roots_to_buffer(GrowableArrayCHeap* roots); static void copy_source_objs_to_buffer(GrowableArrayCHeap* roots); - static void copy_source_objs_to_buffer_by_region(bool copy_open_region); static size_t copy_one_source_obj_to_buffer(oop src_obj); static void maybe_fill_gc_region_gap(size_t required_byte_size); @@ -150,14 +152,10 @@ class ArchiveHeapWriter : AllStatic { static int filler_array_length(size_t fill_bytes); static void init_filler_array_at_buffer_top(int array_length, size_t fill_bytes); - static void set_requested_address_for_regions(GrowableArray* closed_regions, - GrowableArray* open_regions); - static void relocate_embedded_oops(GrowableArrayCHeap* roots, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps); - static ArchiveHeapBitmapInfo compute_ptrmap(bool is_open); - static ArchiveHeapBitmapInfo make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap); - static bool is_in_requested_regions(oop o); + static void set_requested_address(ArchiveHeapInfo* info); + static void relocate_embedded_oops(GrowableArrayCHeap* roots, ArchiveHeapInfo* info); + static void compute_ptrmap(ArchiveHeapInfo *info); + static bool is_in_requested_range(oop o); static oop requested_obj_from_buffer_offset(size_t offset); static oop load_oop_from_buffer(oop* buffered_addr); @@ -169,9 +167,9 @@ class ArchiveHeapWriter : AllStatic { template static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop); template static T* requested_addr_to_buffered_addr(T* p); - template static void relocate_field_in_buffer(T* field_addr_in_buffer); - template static void mark_oop_pointer(T* buffered_addr); - template static void relocate_root_at(oop requested_roots, int index); + template static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap); + template static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap); + template static void relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap); static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass); public: @@ -180,14 +178,11 @@ public: static bool is_too_large_to_archive(size_t size); static bool is_too_large_to_archive(oop obj); static bool is_string_too_large_to_archive(oop string); - static void write(GrowableArrayCHeap*, - GrowableArray* closed_regions, GrowableArray* open_regions, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps); - static address heap_region_requested_bottom(int heap_region_idx); - static oop heap_roots_requested_address(); + static void write(GrowableArrayCHeap*, ArchiveHeapInfo* heap_info); + static address requested_address(); // requested address of the lowest achived heap object + static oop heap_roots_requested_address(); // requested address of HeapShared::roots() static address buffered_heap_roots_addr() { - return offset_to_buffered_address
(_heap_roots_bottom); + return offset_to_buffered_address
(_heap_roots_bottom_offset); } static size_t heap_roots_word_size() { return _heap_roots_word_size; diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp index 260424fcb37..952f69749b3 100644 --- a/src/hotspot/share/cds/archiveUtils.cpp +++ b/src/hotspot/share/cds/archiveUtils.cpp @@ -164,8 +164,8 @@ char* DumpRegion::expand_top_to(char* newtop) { // This is just a sanity check and should not appear in any real world usage. This // happens only if you allocate more than 2GB of shared objects and would require // millions of shared classes. - vm_exit_during_initialization("Out of memory in the CDS archive", - "Please reduce the number of shared classes."); + log_error(cds)("Out of memory in the CDS archive: Please reduce the number of shared classes."); + MetaspaceShared::unrecoverable_writing_error(); } } @@ -190,8 +190,9 @@ void DumpRegion::commit_to(char* newtop) { assert(commit <= uncommitted, "sanity"); if (!_vs->expand_by(commit, false)) { - vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", - need_committed_size)); + log_error(cds)("Failed to expand shared space to " SIZE_FORMAT " bytes", + need_committed_size); + MetaspaceShared::unrecoverable_writing_error(); } const char* which; @@ -225,7 +226,7 @@ void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { } void DumpRegion::print(size_t total_bytes) const { - log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, + log_debug(cds)("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(ArchiveBuilder::current()->to_requested(_base))); } diff --git a/src/hotspot/share/cds/dumpAllocStats.cpp b/src/hotspot/share/cds/dumpAllocStats.cpp index 523fe858704..21a40ca8f28 100644 --- a/src/hotspot/share/cds/dumpAllocStats.cpp +++ b/src/hotspot/share/cds/dumpAllocStats.cpp @@ -54,7 +54,7 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all) { LogMessage(cds) msg; - msg.debug("Detailed metadata info (excluding heap regions):"); + msg.debug("Detailed metadata info (excluding heap region):"); msg.debug("%s", hdr); msg.debug("%s", sep); for (int type = 0; type < int(_number_of_types); type ++) { diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp index e553f6598fd..0aa11ef8d5b 100644 --- a/src/hotspot/share/cds/dynamicArchive.cpp +++ b/src/hotspot/share/cds/dynamicArchive.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "cds/archiveBuilder.hpp" +#include "cds/archiveHeapWriter.hpp" #include "cds/archiveUtils.inline.hpp" #include "cds/cds_globals.hpp" #include "cds/classPrelinker.hpp" @@ -323,7 +324,8 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) { assert(dynamic_info != nullptr, "Sanity"); dynamic_info->open_for_write(); - ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr, nullptr, nullptr); + ArchiveHeapInfo no_heap_for_dynamic_dump; + ArchiveBuilder::write_archive(dynamic_info, &no_heap_for_dynamic_dump); address base = _requested_dynamic_archive_bottom; address top = _requested_dynamic_archive_top; @@ -367,7 +369,8 @@ void DynamicArchive::check_for_dynamic_dump() { #define __THEMSG " is unsupported when base CDS archive is not loaded. Run with -Xlog:cds for more info." if (RecordDynamicDumpInfo) { - vm_exit_during_initialization("-XX:+RecordDynamicDumpInfo" __THEMSG, nullptr); + log_error(cds)("-XX:+RecordDynamicDumpInfo%s", __THEMSG); + MetaspaceShared::unrecoverable_loading_error(); } else { assert(ArchiveClassesAtExit != nullptr, "sanity"); log_warning(cds)("-XX:ArchiveClassesAtExit" __THEMSG); diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index da3775d28e2..8e2a5b29ee8 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -80,29 +80,6 @@ #define O_BINARY 0 // otherwise do nothing. #endif -// Complain and stop. All error conditions occurring during the writing of -// an archive file should stop the process. Unrecoverable errors during -// the reading of the archive file should stop the process. - -static void fail_exit(const char *msg, va_list ap) { - // This occurs very early during initialization: tty is not initialized. - jio_fprintf(defaultStream::error_stream(), - "An error has occurred while processing the" - " shared archive file.\n"); - jio_vfprintf(defaultStream::error_stream(), msg, ap); - jio_fprintf(defaultStream::error_stream(), "\n"); - // Do not change the text of the below message because some tests check for it. - vm_exit_during_initialization("Unable to use shared archive.", nullptr); -} - - -void FileMapInfo::fail_stop(const char *msg, ...) { - va_list ap; - va_start(ap, msg); - fail_exit(msg, ap); // Never returns. - va_end(ap); // for completeness. -} - // Fill in the fileMapInfo structure with data about this VM instance. // This method copies the vm version info into header_version. If the version is too @@ -367,7 +344,8 @@ void SharedClassPathEntry::init(bool is_modules_image, // // If we can't access a jar file in the boot path, then we can't // make assumptions about where classes get loaded from. - FileMapInfo::fail_stop("Unable to open file %s.", cpe->name()); + log_error(cds)("Unable to open file %s.", cpe->name()); + MetaspaceShared::unrecoverable_loading_error(); } // No need to save the name of the module file, as it will be computed at run time @@ -383,6 +361,7 @@ void SharedClassPathEntry::set_name(const char* name, TRAPS) { } void SharedClassPathEntry::copy_from(SharedClassPathEntry* ent, ClassLoaderData* loader_data, TRAPS) { + assert(ent != NULL, "sanity"); _type = ent->_type; _is_module_path = ent->_is_module_path; _timestamp = ent->_timestamp; @@ -1097,7 +1076,8 @@ bool FileMapInfo::validate_shared_path_table() { const char* hint_msg = log_is_enabled(Info, class, path) ? "" : " (hint: enable -Xlog:class+path=info to diagnose the failure)"; if (RequireSharedSpaces) { - fail_stop("%s%s", mismatch_msg, hint_msg); + log_error(cds)("%s%s", mismatch_msg, hint_msg); + MetaspaceShared::unrecoverable_loading_error(); } else { log_warning(cds)("%s%s", mismatch_msg, hint_msg); } @@ -1434,7 +1414,7 @@ bool FileMapInfo::init_from_file(int fd) { size_t len = os::lseek(fd, 0, SEEK_END); - for (int i = 0; i <= MetaspaceShared::last_valid_region; i++) { + for (int i = 0; i < MetaspaceShared::n_regions; i++) { FileMapRegion* r = region_at(i); if (r->file_offset() > len || len - r->file_offset() < r->used()) { log_warning(cds)("The shared archive file has been truncated."); @@ -1447,7 +1427,8 @@ bool FileMapInfo::init_from_file(int fd) { void FileMapInfo::seek_to_position(size_t pos) { if (os::lseek(_fd, (long)pos, SEEK_SET) < 0) { - fail_stop("Unable to seek to position " SIZE_FORMAT, pos); + log_error(cds)("Unable to seek to position " SIZE_FORMAT, pos); + MetaspaceShared::unrecoverable_loading_error(); } } @@ -1493,8 +1474,9 @@ void FileMapInfo::open_for_write() { remove(_full_path); int fd = os::open(_full_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444); if (fd < 0) { - fail_stop("Unable to create shared archive file %s: (%s).", _full_path, - os::strerror(errno)); + log_error(cds)("Unable to create shared archive file %s: (%s).", _full_path, + os::strerror(errno)); + MetaspaceShared::unrecoverable_writing_error(); } _fd = fd; _file_open = true; @@ -1534,12 +1516,14 @@ void FileMapRegion::init(int region_index, size_t mapping_offset, size_t size, b _mapped_base = nullptr; } -void FileMapRegion::init_bitmaps(ArchiveHeapBitmapInfo oopmap, ArchiveHeapBitmapInfo ptrmap) { - _oopmap_offset = oopmap._bm_region_offset; - _oopmap_size_in_bits = oopmap._size_in_bits; +void FileMapRegion::init_oopmap(size_t offset, size_t size_in_bits) { + _oopmap_offset = offset; + _oopmap_size_in_bits = size_in_bits; +} - _ptrmap_offset = ptrmap._bm_region_offset; - _ptrmap_size_in_bits = ptrmap._size_in_bits; +void FileMapRegion::init_ptrmap(size_t offset, size_t size_in_bits) { + _ptrmap_offset = offset; + _ptrmap_size_in_bits = size_in_bits; } BitMapView FileMapRegion::bitmap_view(bool is_oopmap) { @@ -1578,7 +1562,7 @@ bool FileMapRegion::check_region_crc() const { static const char* region_name(int region_index) { static const char* names[] = { - "rw", "ro", "bm", "ca0", "ca1", "oa0", "oa1" + "rw", "ro", "bm", "hp" }; const int num_regions = sizeof(names)/sizeof(names[0]); assert(0 <= region_index && region_index < num_regions, "sanity"); @@ -1619,7 +1603,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size, assert(HeapShared::can_write(), "sanity"); #if INCLUDE_CDS_JAVA_HEAP assert(!DynamicDumpSharedSpaces, "must be"); - requested_base = (char*)ArchiveHeapWriter::heap_region_requested_bottom(region); + requested_base = (char*)ArchiveHeapWriter::requested_address(); if (UseCompressedOops) { mapping_offset = (size_t)((address)requested_base - CompressedOops::base()); assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be"); @@ -1639,7 +1623,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size, r->set_file_offset(_file_offset); int crc = ClassLoader::crc32(0, base, (jint)size); if (size > 0) { - log_info(cds)("Shared file region (%-3s) %d: " SIZE_FORMAT_W(8) + log_info(cds)("Shared file region (%s) %d: " SIZE_FORMAT_W(8) " bytes, addr " INTPTR_FORMAT " file offset 0x%08" PRIxPTR " crc 0x%08x", region_name(region), region, size, p2i(requested_base), _file_offset, crc); @@ -1652,112 +1636,49 @@ void FileMapInfo::write_region(int region, char* base, size_t size, } } -size_t FileMapInfo::set_bitmaps_offset(GrowableArray* bitmaps, size_t curr_size) { - for (int i = 0; i < bitmaps->length(); i++) { - bitmaps->at(i)._bm_region_offset = curr_size; - curr_size += bitmaps->at(i)._size_in_bytes; - } - return curr_size; +static size_t write_bitmap(const CHeapBitMap* map, char* output, size_t offset) { + size_t size_in_bytes = map->size_in_bytes(); + map->write_to((BitMap::bm_word_t*)(output + offset), size_in_bytes); + return offset + size_in_bytes; } -size_t FileMapInfo::write_bitmaps(GrowableArray* bitmaps, size_t curr_offset, char* buffer) { - for (int i = 0; i < bitmaps->length(); i++) { - memcpy(buffer + curr_offset, bitmaps->at(i)._map, bitmaps->at(i)._size_in_bytes); - curr_offset += bitmaps->at(i)._size_in_bytes; - } - return curr_offset; -} - -char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps, +char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info, size_t &size_in_bytes) { - size_t size_in_bits = ptrmap->size(); size_in_bytes = ptrmap->size_in_bytes(); - if (closed_bitmaps != nullptr && open_bitmaps != nullptr) { - size_in_bytes = set_bitmaps_offset(closed_bitmaps, size_in_bytes); - size_in_bytes = set_bitmaps_offset(open_bitmaps, size_in_bytes); + if (heap_info->is_used()) { + size_in_bytes += heap_info->oopmap()->size_in_bytes(); + size_in_bytes += heap_info->ptrmap()->size_in_bytes(); } + // The bitmap region contains up to 3 parts: + // ptrmap: metaspace pointers inside the ro/rw regions + // heap_info->oopmap(): Java oop pointers in the heap region + // heap_info->ptrmap(): metaspace pointers in the heap region char* buffer = NEW_C_HEAP_ARRAY(char, size_in_bytes, mtClassShared); - ptrmap->write_to((BitMap::bm_word_t*)buffer, ptrmap->size_in_bytes()); - header()->set_ptrmap_size_in_bits(size_in_bits); + size_t written = 0; + written = write_bitmap(ptrmap, buffer, written); + header()->set_ptrmap_size_in_bits(ptrmap->size()); - if (closed_bitmaps != nullptr && open_bitmaps != nullptr) { - size_t curr_offset = write_bitmaps(closed_bitmaps, ptrmap->size_in_bytes(), buffer); - write_bitmaps(open_bitmaps, curr_offset, buffer); + if (heap_info->is_used()) { + FileMapRegion* r = region_at(MetaspaceShared::hp); + + r->init_oopmap(written, heap_info->oopmap()->size()); + written = write_bitmap(heap_info->oopmap(), buffer, written); + + r->init_ptrmap(written, heap_info->ptrmap()->size()); + written = write_bitmap(heap_info->ptrmap(), buffer, written); } write_region(MetaspaceShared::bm, (char*)buffer, size_in_bytes, /*read_only=*/true, /*allow_exec=*/false); return buffer; } -// Write out the given archive heap memory regions. GC code combines multiple -// consecutive archive GC regions into one MemRegion whenever possible and -// produces the 'regions' array. -// -// If the archive heap memory size is smaller than a single dump time GC region -// size, there is only one MemRegion in the array. -// -// If the archive heap memory size is bigger than one dump time GC region size, -// the 'regions' array may contain more than one consolidated MemRegions. When -// the first/bottom archive GC region is a partial GC region (with the empty -// portion at the higher address within the region), one MemRegion is used for -// the bottom partial archive GC region. The rest of the consecutive archive -// GC regions are combined into another MemRegion. -// -// Here's the mapping from (archive heap GC regions) -> (GrowableArray *regions). -// + We have 1 or more archive heap regions: ah0, ah1, ah2 ..... ahn -// + We have 1 or 2 consolidated heap memory regions: r0 and r1 -// -// If there's a single archive GC region (ah0), then r0 == ah0, and r1 is empty. -// Otherwise: -// -// "X" represented space that's occupied by heap objects. -// "_" represented unused spaced in the heap region. -// -// -// |ah0 | ah1 | ah2| ...... | ahn| -// |XXXXXX|__ |XXXXX|XXXX|XXXXXXXX|XXXX| -// |<-r0->| |<- r1 ----------------->| -// ^^^ -// | -// +-- gap -size_t FileMapInfo::write_heap_regions(GrowableArray* regions, - GrowableArray* bitmaps, - int first_region_id, int max_num_regions) { - assert(max_num_regions <= 2, "Only support maximum 2 memory regions"); - - int arr_len = regions == nullptr ? 0 : regions->length(); - if (arr_len > max_num_regions) { - fail_stop("Unable to write archive heap memory regions: " - "number of memory regions exceeds maximum due to fragmentation. " - "Please increase java heap size " - "(current MaxHeapSize is " SIZE_FORMAT ", InitialHeapSize is " SIZE_FORMAT ").", - MaxHeapSize, InitialHeapSize); - } - - size_t total_size = 0; - for (int i = 0; i < max_num_regions; i++) { - char* start = nullptr; - size_t size = 0; - if (i < arr_len) { - start = (char*)regions->at(i).start(); - size = regions->at(i).byte_size(); - total_size += size; - } - - int region_idx = i + first_region_id; - write_region(region_idx, start, size, false, false); - if (size > 0) { - int oopmap_idx = i * 2; - int ptrmap_idx = i * 2 + 1; - region_at(region_idx)->init_bitmaps(bitmaps->at(oopmap_idx), - bitmaps->at(ptrmap_idx)); - } - } - return total_size; +size_t FileMapInfo::write_heap_region(ArchiveHeapInfo* heap_info) { + char* start = heap_info->start(); + size_t size = heap_info->byte_size(); + write_region(MetaspaceShared::hp, start, size, false, false); + return size; } // Dump bytes to file -- at the current file position. @@ -1769,7 +1690,7 @@ void FileMapInfo::write_bytes(const void* buffer, size_t nbytes) { // If the shared archive is corrupted, close it and remove it. close(); remove(_full_path); - fail_stop("Unable to write to shared archive file."); + MetaspaceShared::unrecoverable_writing_error("Unable to write to shared archive file."); } _file_offset += nbytes; } @@ -1810,7 +1731,7 @@ void FileMapInfo::write_bytes_aligned(const void* buffer, size_t nbytes) { void FileMapInfo::close() { if (_file_open) { if (::close(_fd) < 0) { - fail_stop("Unable to close the shared archive file."); + MetaspaceShared::unrecoverable_loading_error("Unable to close the shared archive file."); } _file_open = false; _fd = -1; @@ -1850,8 +1771,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() { } // Memory map a region in the address space. -static const char* shared_region_name[] = { "ReadWrite", "ReadOnly", "Bitmap", - "String1", "String2", "OpenArchive1", "OpenArchive2" }; +static const char* shared_region_name[] = { "ReadWrite", "ReadOnly", "Bitmap", "Heap" }; MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs) { DEBUG_ONLY(FileMapRegion* last_region = nullptr); @@ -2072,58 +1992,38 @@ size_t FileMapInfo::readonly_total() { return total; } -static MemRegion *closed_heap_regions = nullptr; -static MemRegion *open_heap_regions = nullptr; -static int num_closed_heap_regions = 0; -static int num_open_heap_regions = 0; - #if INCLUDE_CDS_JAVA_HEAP -bool FileMapInfo::has_heap_regions() { - return (region_at(MetaspaceShared::first_closed_heap_region)->used() > 0); +MemRegion FileMapInfo::_mapped_heap_memregion; + +bool FileMapInfo::has_heap_region() { + return (region_at(MetaspaceShared::hp)->used() > 0); } -// Returns the address range of the archived heap regions computed using the +// Returns the address range of the archived heap region computed using the // current oop encoding mode. This range may be different than the one seen at // dump time due to encoding mode differences. The result is used in determining // if/how these regions should be relocated at run time. -MemRegion FileMapInfo::get_heap_regions_requested_range() { - address start = (address) max_uintx; - address end = nullptr; +MemRegion FileMapInfo::get_heap_region_requested_range() { + FileMapRegion* r = region_at(MetaspaceShared::hp); + size_t size = r->used(); + assert(size > 0, "must have non-empty heap region"); - for (int i = MetaspaceShared::first_closed_heap_region; - i <= MetaspaceShared::last_valid_region; - i++) { - FileMapRegion* r = region_at(i); - size_t size = r->used(); - if (size > 0) { - address s = heap_region_requested_address(r); - address e = s + size; - log_info(cds)("Heap region %s = " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes", - region_name(i), p2i(s), p2i(e), size); - if (start > s) { - start = s; - } - if (end < e) { - end = e; - } - } - } - assert(end != nullptr, "must have at least one used heap region"); - - start = align_down(start, HeapRegion::GrainBytes); - end = align_up(end, HeapRegion::GrainBytes); + address start = heap_region_requested_address(); + address end = start + size; + log_info(cds)("Requested heap region [" INTPTR_FORMAT " - " INTPTR_FORMAT "] = " SIZE_FORMAT_W(8) " bytes", + p2i(start), p2i(end), size); return MemRegion((HeapWord*)start, (HeapWord*)end); } -void FileMapInfo::map_or_load_heap_regions() { +void FileMapInfo::map_or_load_heap_region() { bool success = false; - if (can_use_heap_regions()) { + if (can_use_heap_region()) { if (ArchiveHeapLoader::can_map()) { - success = map_heap_regions(); + success = map_heap_region(); } else if (ArchiveHeapLoader::can_load()) { - success = ArchiveHeapLoader::load_heap_regions(this); + success = ArchiveHeapLoader::load_heap_region(this); } else { if (!UseCompressedOops && !ArchiveHeapLoader::can_map()) { // TODO - remove implicit knowledge of G1 @@ -2139,8 +2039,8 @@ void FileMapInfo::map_or_load_heap_regions() { } } -bool FileMapInfo::can_use_heap_regions() { - if (!has_heap_regions()) { +bool FileMapInfo::can_use_heap_region() { + if (!has_heap_region()) { return false; } if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) { @@ -2186,22 +2086,22 @@ bool FileMapInfo::can_use_heap_regions() { } // The actual address of this region during dump time. -address FileMapInfo::heap_region_dumptime_address(FileMapRegion* r) { +address FileMapInfo::heap_region_dumptime_address() { + FileMapRegion* r = region_at(MetaspaceShared::hp); assert(UseSharedSpaces, "runtime only"); - r->assert_is_heap_region(); assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be"); if (UseCompressedOops) { return /*dumptime*/ narrow_oop_base() + r->mapping_offset(); } else { - return heap_region_requested_address(r); + return heap_region_requested_address(); } } // The address where this region can be mapped into the runtime heap without // patching any of the pointers that are embedded in this region. -address FileMapInfo::heap_region_requested_address(FileMapRegion* r) { +address FileMapInfo::heap_region_requested_address() { assert(UseSharedSpaces, "runtime only"); - r->assert_is_heap_region(); + FileMapRegion* r = region_at(MetaspaceShared::hp); assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be"); assert(ArchiveHeapLoader::can_map(), "cannot be used by ArchiveHeapLoader::can_load() mode"); if (UseCompressedOops) { @@ -2227,284 +2127,171 @@ address FileMapInfo::heap_region_requested_address(FileMapRegion* r) { // The address where this shared heap region is actually mapped at runtime. This function // can be called only after we have determined the value for ArchiveHeapLoader::mapped_heap_delta(). -address FileMapInfo::heap_region_mapped_address(FileMapRegion* r) { +address FileMapInfo::heap_region_mapped_address() { assert(UseSharedSpaces, "runtime only"); - r->assert_is_heap_region(); assert(ArchiveHeapLoader::can_map(), "cannot be used by ArchiveHeapLoader::can_load() mode"); - return heap_region_requested_address(r) + ArchiveHeapLoader::mapped_heap_delta(); + return heap_region_requested_address() + ArchiveHeapLoader::mapped_heap_delta(); } -// -// Map the closed and open archive heap objects to the runtime java heap. -// -// The shared objects are mapped at (or close to ) the java heap top in -// closed archive regions. The mapped objects contain no out-going -// references to any other java heap regions. GC does not write into the -// mapped closed archive heap region. -// -// The open archive heap objects are mapped below the shared objects in -// the runtime java heap. The mapped open archive heap data only contains -// references to the shared objects and open archive objects initially. -// During runtime execution, out-going references to any other java heap -// regions may be added. GC may mark and update references in the mapped -// open archive objects. -void FileMapInfo::map_heap_regions_impl() { - // G1 -- always map at the very top of the heap to avoid fragmentation. - assert(UseG1GC, "the following code assumes G1"); - _heap_pointers_need_patching = false; - - MemRegion heap_range = G1CollectedHeap::heap()->reserved(); - MemRegion archive_range = get_heap_regions_requested_range(); - - address heap_end = (address)heap_range.end(); - address archive_end = (address)archive_range.end(); - - assert(is_aligned(heap_end, HeapRegion::GrainBytes), "must be"); - assert(is_aligned(archive_end, HeapRegion::GrainBytes), "must be"); - - if (UseCompressedOops && - (narrow_oop_mode() != CompressedOops::mode() || - narrow_oop_shift() != CompressedOops::shift())) { - log_info(cds)("CDS heap data needs to be relocated because the archive was created with an incompatible oop encoding mode."); - _heap_pointers_need_patching = true; - } else if (!heap_range.contains(archive_range)) { - log_info(cds)("CDS heap data needs to be relocated because"); - log_info(cds)("the desired range " PTR_FORMAT " - " PTR_FORMAT, p2i(archive_range.start()), p2i(archive_range.end())); - log_info(cds)("is outside of the heap " PTR_FORMAT " - " PTR_FORMAT, p2i(heap_range.start()), p2i(heap_range.end())); - _heap_pointers_need_patching = true; - } else { - assert(heap_end >= archive_end, "must be"); - if (heap_end != archive_end) { - log_info(cds)("CDS heap data needs to be relocated to the end of the runtime heap to reduce fragmentation"); - _heap_pointers_need_patching = true; - } - } - - ptrdiff_t delta = 0; - if (_heap_pointers_need_patching) { - delta = heap_end - archive_end; - } - - log_info(cds)("CDS heap data relocation delta = " INTX_FORMAT " bytes", delta); - - FileMapRegion* r = region_at(MetaspaceShared::first_closed_heap_region); - address relocated_closed_heap_region_bottom = heap_region_requested_address(r) + delta; - - if (!is_aligned(relocated_closed_heap_region_bottom, HeapRegion::GrainBytes)) { - // Align the bottom of the closed archive heap regions at G1 region boundary. - // This will avoid the situation where the highest open region and the lowest - // closed region sharing the same G1 region. Otherwise we will fail to map the - // open regions. - size_t align = size_t(relocated_closed_heap_region_bottom) % HeapRegion::GrainBytes; - delta -= align; - log_info(cds)("CDS heap data needs to be relocated lower by a further " SIZE_FORMAT - " bytes to " INTX_FORMAT " to be aligned with HeapRegion::GrainBytes", - align, delta); - _heap_pointers_need_patching = true; - } - - ArchiveHeapLoader::init_mapped_heap_relocation(delta, narrow_oop_shift()); - relocated_closed_heap_region_bottom = heap_region_mapped_address(r); - - assert(is_aligned(relocated_closed_heap_region_bottom, HeapRegion::GrainBytes), - "must be"); +bool FileMapInfo::map_heap_region() { + init_heap_region_relocation(); if (_heap_pointers_need_patching) { char* bitmap_base = map_bitmap_region(); if (bitmap_base == nullptr) { log_info(cds)("CDS heap cannot be used because bitmap region cannot be mapped"); _heap_pointers_need_patching = false; - return; + return false; } } - // Map the closed heap regions: GC does not write into these regions. - if (map_heap_regions(MetaspaceShared::first_closed_heap_region, - MetaspaceShared::max_num_closed_heap_regions, - /*is_open_archive=*/ false, - &closed_heap_regions, &num_closed_heap_regions)) { - ArchiveHeapLoader::set_closed_regions_mapped(); + if (map_heap_region_impl()) { +#ifdef ASSERT + // The "old" regions must be parsable -- we cannot have any unused space + // at the start of the lowest G1 region that contains archived objects. + assert(is_aligned(_mapped_heap_memregion.start(), HeapRegion::GrainBytes), "must be"); - // Now, map the open heap regions: GC can write into these regions. - if (map_heap_regions(MetaspaceShared::first_open_heap_region, - MetaspaceShared::max_num_open_heap_regions, - /*is_open_archive=*/ true, - &open_heap_regions, &num_open_heap_regions)) { - ArchiveHeapLoader::set_open_regions_mapped(); - } - } -} + // Make sure we map at the very top of the heap - see comments in + // init_heap_region_relocation(). + MemRegion heap_range = G1CollectedHeap::heap()->reserved(); + assert(heap_range.contains(_mapped_heap_memregion), "must be"); -bool FileMapInfo::map_heap_regions() { - map_heap_regions_impl(); + address heap_end = (address)heap_range.end(); + address mapped_heap_region_end = (address)_mapped_heap_memregion.end(); + assert(heap_end >= mapped_heap_region_end, "must be"); + assert(heap_end - mapped_heap_region_end < (intx)(HeapRegion::GrainBytes), + "must be at the top of the heap to avoid fragmentation"); +#endif - if (!ArchiveHeapLoader::closed_regions_mapped()) { - assert(closed_heap_regions == nullptr && - num_closed_heap_regions == 0, "sanity"); - } - - if (!ArchiveHeapLoader::open_regions_mapped()) { - assert(open_heap_regions == nullptr && num_open_heap_regions == 0, "sanity"); - return false; - } else { + ArchiveHeapLoader::set_mapped(); return true; + } else { + return false; } } -bool FileMapInfo::map_heap_regions(int first, int max, bool is_open_archive, - MemRegion** regions_ret, int* num_regions_ret) { - MemRegion* regions = MemRegion::create_array(max, mtInternal); +void FileMapInfo::init_heap_region_relocation() { + assert(UseG1GC, "the following code assumes G1"); + _heap_pointers_need_patching = false; - struct Cleanup { - MemRegion* _regions; - uint _length; - bool _aborted; - Cleanup(MemRegion* regions, uint length) : _regions(regions), _length(length), _aborted(true) { } - ~Cleanup() { if (_aborted) { MemRegion::destroy_array(_regions, _length); } } - } cleanup(regions, max); + MemRegion heap_range = G1CollectedHeap::heap()->reserved(); + MemRegion archive_range = get_heap_region_requested_range(); - FileMapRegion* r; - int num_regions = 0; + address requested_bottom = (address)archive_range.start(); + address heap_end = (address)heap_range.end(); + assert(is_aligned(heap_end, HeapRegion::GrainBytes), "must be"); - for (int i = first; - i < first + max; i++) { - r = region_at(i); - size_t size = r->used(); - if (size > 0) { - HeapWord* start = (HeapWord*)heap_region_mapped_address(r); - regions[num_regions] = MemRegion(start, size / HeapWordSize); - num_regions ++; - log_info(cds)("Trying to map heap data: region[%d] at " INTPTR_FORMAT ", size = " SIZE_FORMAT_W(8) " bytes", - i, p2i(start), size); - } + // We map the archive heap region at the very top of the heap to avoid fragmentation. + // To do that, we make sure that the bottom of the archived region is at the same + // address as the bottom of the highest possible G1 region. + address mapped_bottom = heap_end - align_up(archive_range.byte_size(), HeapRegion::GrainBytes); + + if (UseCompressedOops && + (narrow_oop_mode() != CompressedOops::mode() || + narrow_oop_shift() != CompressedOops::shift())) { + log_info(cds)("CDS heap data needs to be relocated because the archive was created with an incompatible oop encoding mode."); + _heap_pointers_need_patching = true; + } else if (requested_bottom != mapped_bottom) { + log_info(cds)("CDS heap data needs to be relocated because it is mapped at a different address @ " INTPTR_FORMAT, + p2i(mapped_bottom)); + _heap_pointers_need_patching = true; } - if (num_regions == 0) { + ptrdiff_t delta = 0; + if (_heap_pointers_need_patching) { + delta = mapped_bottom - requested_bottom; + } + + log_info(cds)("CDS heap data relocation delta = " INTX_FORMAT " bytes", delta); + ArchiveHeapLoader::init_mapped_heap_relocation(delta, narrow_oop_shift()); +} + +bool FileMapInfo::map_heap_region_impl() { + FileMapRegion* r = region_at(MetaspaceShared::hp); + size_t size = r->used(); + + if (size > 0) { + HeapWord* start = (HeapWord*)heap_region_mapped_address(); + _mapped_heap_memregion = MemRegion(start, size / HeapWordSize); + log_info(cds)("Trying to map heap data at " INTPTR_FORMAT ", size = " SIZE_FORMAT_W(8) " bytes", + p2i(start), size); + } else { return false; // no archived java heap data } - // Check that regions are within the java heap - if (!G1CollectedHeap::heap()->check_archive_addresses(regions, num_regions)) { + // Check that the region is within the java heap + if (!G1CollectedHeap::heap()->check_archive_addresses(_mapped_heap_memregion)) { log_info(cds)("Unable to allocate region, range is not within java heap."); return false; } // allocate from java heap - if (!G1CollectedHeap::heap()->alloc_archive_regions( - regions, num_regions, is_open_archive)) { + if (!G1CollectedHeap::heap()->alloc_archive_regions(_mapped_heap_memregion)) { log_info(cds)("Unable to allocate region, java heap range is already in use."); return false; } // Map the archived heap data. No need to call MemTracker::record_virtual_memory_type() - // for mapped regions as they are part of the reserved java heap, which is - // already recorded. - for (int i = 0; i < num_regions; i++) { - r = region_at(first + i); - char* addr = (char*)regions[i].start(); - char* base = os::map_memory(_fd, _full_path, r->file_offset(), - addr, regions[i].byte_size(), r->read_only(), - r->allow_exec()); - if (base == nullptr || base != addr) { - // dealloc the regions from java heap - dealloc_heap_regions(regions, num_regions); - log_info(cds)("Unable to map at required address in java heap. " - INTPTR_FORMAT ", size = " SIZE_FORMAT " bytes", - p2i(addr), regions[i].byte_size()); - return false; - } - - r->set_mapped_base(base); - if (VerifySharedSpaces && !r->check_region_crc()) { - // dealloc the regions from java heap - dealloc_heap_regions(regions, num_regions); - log_info(cds)("mapped heap regions are corrupt"); - return false; - } + // for mapped region as it is part of the reserved java heap, which is already recorded. + char* addr = (char*)_mapped_heap_memregion.start(); + char* base = os::map_memory(_fd, _full_path, r->file_offset(), + addr, _mapped_heap_memregion.byte_size(), r->read_only(), + r->allow_exec()); + if (base == nullptr || base != addr) { + dealloc_heap_region(); + log_info(cds)("UseSharedSpaces: Unable to map at required address in java heap. " + INTPTR_FORMAT ", size = " SIZE_FORMAT " bytes", + p2i(addr), _mapped_heap_memregion.byte_size()); + return false; + } + + r->set_mapped_base(base); + if (VerifySharedSpaces && !r->check_region_crc()) { + dealloc_heap_region(); + log_info(cds)("mapped heap region is corrupt"); + return false; } - cleanup._aborted = false; - // the shared heap data is mapped successfully - *regions_ret = regions; - *num_regions_ret = num_regions; return true; } -void FileMapInfo::patch_heap_embedded_pointers() { - if (!_heap_pointers_need_patching) { - return; - } - - patch_heap_embedded_pointers(closed_heap_regions, - num_closed_heap_regions, - MetaspaceShared::first_closed_heap_region); - - patch_heap_embedded_pointers(open_heap_regions, - num_open_heap_regions, - MetaspaceShared::first_open_heap_region); -} - -narrowOop FileMapInfo::encoded_heap_region_dumptime_address(FileMapRegion* r) { +narrowOop FileMapInfo::encoded_heap_region_dumptime_address() { assert(UseSharedSpaces, "runtime only"); assert(UseCompressedOops, "sanity"); - r->assert_is_heap_region(); + FileMapRegion* r = region_at(MetaspaceShared::hp); return CompressedOops::narrow_oop_cast(r->mapping_offset() >> narrow_oop_shift()); } -void FileMapInfo::patch_heap_embedded_pointers(MemRegion* regions, int num_regions, - int first_region_idx) { +void FileMapInfo::patch_heap_embedded_pointers() { + if (!ArchiveHeapLoader::is_mapped() || !_heap_pointers_need_patching) { + return; + } + char* bitmap_base = map_bitmap_region(); assert(bitmap_base != nullptr, "must have already been mapped"); - for (int i=0; imapped_base()) + r->oopmap_offset(), r->oopmap_size_in_bits()); - } } -// This internally allocates objects using vmClasses::Object_klass(), so it -// must be called after the Object_klass is loaded -void FileMapInfo::fixup_mapped_heap_regions() { - assert(vmClasses::Object_klass_loaded(), "must be"); - // If any closed regions were found, call the fill routine to make them parseable. - // Note that closed_heap_regions may be non-null even if no regions were found. - if (num_closed_heap_regions != 0) { - assert(closed_heap_regions != nullptr, - "Null closed_heap_regions array with non-zero count"); - G1CollectedHeap::heap()->fill_archive_regions(closed_heap_regions, - num_closed_heap_regions); - // G1 marking uses the BOT for object chunking during marking in - // G1CMObjArrayProcessor::process_slice(); for this reason we need to - // initialize the BOT for closed archive regions too. - G1CollectedHeap::heap()->populate_archive_regions_bot_part(closed_heap_regions, - num_closed_heap_regions); - } +void FileMapInfo::fixup_mapped_heap_region() { + if (ArchiveHeapLoader::is_mapped()) { + assert(!_mapped_heap_memregion.is_empty(), "sanity"); - // do the same for mapped open archive heap regions - if (num_open_heap_regions != 0) { - assert(open_heap_regions != nullptr, "Null open_heap_regions array with non-zero count"); - G1CollectedHeap::heap()->fill_archive_regions(open_heap_regions, - num_open_heap_regions); - - // Populate the open archive regions' G1BlockOffsetTableParts. That ensures + // Populate the archive regions' G1BlockOffsetTableParts. That ensures // fast G1BlockOffsetTablePart::block_start operations for any given address - // within the open archive regions when trying to find start of an object + // within the archive regions when trying to find start of an object // (e.g. during card table scanning). - G1CollectedHeap::heap()->populate_archive_regions_bot_part(open_heap_regions, - num_open_heap_regions); + G1CollectedHeap::heap()->populate_archive_regions_bot_part(_mapped_heap_memregion); } } // dealloc the archive regions from java heap -void FileMapInfo::dealloc_heap_regions(MemRegion* regions, int num) { - if (num > 0) { - assert(regions != nullptr, "Null archive regions array with non-zero count"); - G1CollectedHeap::heap()->dealloc_archive_regions(regions, num); - } +void FileMapInfo::dealloc_heap_region() { + G1CollectedHeap::heap()->dealloc_archive_regions(_mapped_heap_memregion); } #endif // INCLUDE_CDS_JAVA_HEAP @@ -2537,7 +2324,7 @@ void FileMapInfo::unmap_region(int i) { void FileMapInfo::assert_mark(bool check) { if (!check) { - fail_stop("Mark mismatch while restoring from shared file."); + MetaspaceShared::unrecoverable_loading_error("Mark mismatch while restoring from shared file."); } } diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp index 0a21613e300..dce1ad94b62 100644 --- a/src/hotspot/share/cds/filemap.hpp +++ b/src/hotspot/share/cds/filemap.hpp @@ -40,6 +40,7 @@ static const int JVM_IDENT_MAX = 256; +class ArchiveHeapInfo; class BitMapView; class CHeapBitMap; class ClassFileStream; @@ -104,13 +105,6 @@ public: } }; -struct ArchiveHeapBitmapInfo { - address _map; // bitmap for relocating embedded oops - size_t _bm_region_offset; // this bitmap is stored at this offset from the bottom of the BM region - size_t _size_in_bits; - size_t _size_in_bytes; -}; - class SharedPathTable { Array* _table; int _size; @@ -173,7 +167,8 @@ public: void set_mapped_from_file(bool v) { _mapped_from_file = v; } void init(int region_index, size_t mapping_offset, size_t size, bool read_only, bool allow_exec, int crc); - void init_bitmaps(ArchiveHeapBitmapInfo oopmap, ArchiveHeapBitmapInfo ptrmap); + void init_oopmap(size_t offset, size_t size_in_bits); + void init_ptrmap(size_t offset, size_t size_in_bits); BitMapView oopmap_view(); BitMapView ptrmap_view(); bool has_ptrmap() { return _ptrmap_size_in_bits != 0; } @@ -451,26 +446,20 @@ public: void write_header(); void write_region(int region, char* base, size_t size, bool read_only, bool allow_exec); - char* write_bitmap_region(const CHeapBitMap* ptrmap, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps, + char* write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info, size_t &size_in_bytes); - size_t write_heap_regions(GrowableArray* regions, - GrowableArray* bitmaps, - int first_region_id, int max_num_regions); + size_t write_heap_region(ArchiveHeapInfo* heap_info); void write_bytes(const void* buffer, size_t count); void write_bytes_aligned(const void* buffer, size_t count); size_t read_bytes(void* buffer, size_t count); static size_t readonly_total(); MapArchiveResult map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs); void unmap_regions(int regions[], int num_regions); - void map_or_load_heap_regions() NOT_CDS_JAVA_HEAP_RETURN; - void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN; + void map_or_load_heap_region() NOT_CDS_JAVA_HEAP_RETURN; + void fixup_mapped_heap_region() NOT_CDS_JAVA_HEAP_RETURN; void patch_heap_embedded_pointers() NOT_CDS_JAVA_HEAP_RETURN; - void patch_heap_embedded_pointers(MemRegion* regions, int num_regions, - int first_region_idx) NOT_CDS_JAVA_HEAP_RETURN; - bool has_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false); - MemRegion get_heap_regions_requested_range() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion()); + bool has_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false); + MemRegion get_heap_region_requested_range() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion()); bool read_region(int i, char* base, size_t size, bool do_commit); char* map_bitmap_region(); void unmap_region(int i); @@ -482,8 +471,6 @@ public: // Remap the shared readonly space to shared readwrite, private. bool remap_shared_readonly_as_readwrite(); - // Errors. - static void fail_stop(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2); static bool memory_mapping_failed() { CDS_ONLY(return _memory_mapping_failed;) NOT_CDS(return false;) @@ -568,23 +555,22 @@ public: unsigned int runtime_prefix_len) NOT_CDS_RETURN_(false); bool validate_boot_class_paths() NOT_CDS_RETURN_(false); bool validate_app_class_paths(int shared_app_paths_len) NOT_CDS_RETURN_(false); - bool map_heap_regions(int first, int max, bool is_open_archive, - MemRegion** regions_ret, int* num_regions_ret) NOT_CDS_JAVA_HEAP_RETURN_(false); - void dealloc_heap_regions(MemRegion* regions, int num) NOT_CDS_JAVA_HEAP_RETURN; - bool can_use_heap_regions(); - bool load_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false); - bool map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false); - void map_heap_regions_impl() NOT_CDS_JAVA_HEAP_RETURN; + bool map_heap_region_impl() NOT_CDS_JAVA_HEAP_RETURN_(false); + void dealloc_heap_region() NOT_CDS_JAVA_HEAP_RETURN; + bool can_use_heap_region(); + bool load_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false); + bool map_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false); + void init_heap_region_relocation(); MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs); bool relocate_pointers_in_core_regions(intx addr_delta); - static size_t set_bitmaps_offset(GrowableArray *bitmaps, size_t curr_size); - static size_t write_bitmaps(GrowableArray *bitmaps, size_t curr_offset, char* buffer); + + static MemRegion _mapped_heap_memregion; public: - address heap_region_dumptime_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr); - address heap_region_requested_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr); - address heap_region_mapped_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr); - narrowOop encoded_heap_region_dumptime_address(FileMapRegion* r); + address heap_region_dumptime_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr); + address heap_region_requested_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr); + address heap_region_mapped_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr); + narrowOop encoded_heap_region_dumptime_address(); private: diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp index 1ef4ea67734..1302d587747 100644 --- a/src/hotspot/share/cds/heapShared.cpp +++ b/src/hotspot/share/cds/heapShared.cpp @@ -82,7 +82,6 @@ struct ArchivableStaticFieldInfo { }; bool HeapShared::_disable_writing = false; -bool HeapShared::_copying_open_region_objects = false; DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr; size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS]; @@ -103,10 +102,7 @@ static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr; // If you add new entries to the following tables, you should know what you're doing! // -// Entry fields for shareable subgraphs archived in the closed archive heap -// region. Warning: Objects in the subgraphs should not have reference fields -// assigned at runtime. -static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { +static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = { {"java/lang/Integer$IntegerCache", "archivedCache"}, {"java/lang/Long$LongCache", "archivedCache"}, {"java/lang/Byte$ByteCache", "archivedCache"}, @@ -114,10 +110,6 @@ static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { {"java/lang/Character$CharacterCache", "archivedCache"}, {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, {"sun/util/locale/BaseLocale", "constantBaseLocales"}, - {nullptr, nullptr}, -}; -// Entry fields for subgraphs archived in the open archive heap region. -static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, {"java/util/ImmutableCollections", "archivedObjects"}, {"java/lang/ModuleLayer", "EMPTY_LAYER"}, @@ -129,8 +121,8 @@ static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { {nullptr, nullptr}, }; -// Entry fields for subgraphs archived in the open archive heap region (full module graph). -static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { +// full module graph +static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = { {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, {"java/lang/Module$ArchivedData", "archivedData"}, @@ -153,9 +145,8 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan } bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { - return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, ik) || - is_subgraph_root_class_of(open_archive_subgraph_entry_fields, ik) || - is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, ik); + return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) || + is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik); } unsigned HeapShared::oop_hash(oop const& p) { @@ -383,7 +374,7 @@ void HeapShared::archive_java_mirrors() { if (!is_reference_type(bt)) { oop m = _scratch_basic_type_mirrors[i].resolve(); assert(m != nullptr, "sanity"); - bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false); + bool success = archive_reachable_objects_from(1, _default_subgraph_info, m); assert(success, "sanity"); log_trace(cds, heap, mirror)( @@ -401,7 +392,7 @@ void HeapShared::archive_java_mirrors() { oop m = scratch_java_mirror(orig_k); if (m != nullptr) { Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k); - bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false); + bool success = archive_reachable_objects_from(1, _default_subgraph_info, m); guarantee(success, "scratch mirrors must point to only archivable objects"); buffered_k->set_archived_java_mirror(append_root(m)); ResourceMark rm; @@ -414,8 +405,7 @@ void HeapShared::archive_java_mirrors() { InstanceKlass* ik = InstanceKlass::cast(buffered_k); oop rr = ik->constants()->prepare_resolved_references_for_archiving(); if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) { - bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr, - /*is_closed_archive=*/false); + bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr); assert(success, "must be"); int root_index = append_root(rr); ik->constants()->cache()->set_archived_references(root_index); @@ -427,7 +417,7 @@ void HeapShared::archive_java_mirrors() { void HeapShared::archive_strings() { oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings); - bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array, /*is_closed_archive=*/ false); + bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array); // We must succeed because: // - _dumped_interned_strings do not contain any large strings. // - StringTable::init_shared_table() doesn't create any large arrays. @@ -463,8 +453,7 @@ void HeapShared::mark_native_pointers(oop orig_obj) { // the static fields out of the archived heap. void HeapShared::check_enum_obj(int level, KlassSubGraphInfo* subgraph_info, - oop orig_obj, - bool is_closed_archive) { + oop orig_obj) { assert(level > 1, "must never be called at the first (outermost) level"); Klass* k = orig_obj->klass(); Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k); @@ -493,7 +482,7 @@ void HeapShared::check_enum_obj(int level, guarantee(false, "static field %s::%s is of the wrong type", ik->external_name(), fd.name()->as_C_string()); } - bool success = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive); + bool success = archive_reachable_objects_from(level, subgraph_info, oop_field); assert(success, "VM should have exited with unarchivable objects for _level > 1"); int root_index = append_root(oop_field); log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT ")", @@ -532,10 +521,7 @@ bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) { return true; } -void HeapShared::archive_objects(GrowableArray* closed_regions, - GrowableArray* open_regions, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps) { +void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) { { NoSafepointVerifier nsv; @@ -549,19 +535,13 @@ void HeapShared::archive_objects(GrowableArray* closed_regions, p2i((address)G1CollectedHeap::heap()->reserved().start()), UseCompressedOops ? p2i(CompressedOops::end()) : p2i((address)G1CollectedHeap::heap()->reserved().end())); - log_info(cds)("Dumping objects to closed archive heap region ..."); - copy_closed_objects(); - - _copying_open_region_objects = true; - - log_info(cds)("Dumping objects to open archive heap region ..."); - copy_open_objects(); + copy_objects(); CDSHeapVerifier::verify(); check_default_subgraph_classes(); } - ArchiveHeapWriter::write(_pending_roots, closed_regions, open_regions, closed_bitmaps, open_bitmaps); + ArchiveHeapWriter::write(_pending_roots, heap_info); } void HeapShared::copy_interned_strings() { @@ -570,9 +550,8 @@ void HeapShared::copy_interned_strings() { auto copier = [&] (oop s, bool value_ignored) { assert(s != nullptr, "sanity"); assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered"); - bool success = archive_reachable_objects_from(1, _default_subgraph_info, - s, /*is_closed_archive=*/true); - assert(success, "string must be short enough to be archived"); + bool success = archive_reachable_objects_from(1, _default_subgraph_info, s); + assert(success, "must be"); // Prevent string deduplication from changing the value field to // something not in the archive. java_lang_String::set_deduplication_forbidden(s); @@ -582,18 +561,7 @@ void HeapShared::copy_interned_strings() { delete_seen_objects_table(); } -void HeapShared::copy_closed_objects() { - assert(HeapShared::can_write(), "must be"); - - // Archive interned string objects - copy_interned_strings(); - - archive_object_subgraphs(closed_archive_subgraph_entry_fields, - true /* is_closed_archive */, - false /* is_full_module_graph */); -} - -void HeapShared::copy_special_open_objects() { +void HeapShared::copy_special_objects() { // Archive special objects that do not belong to any subgraphs init_seen_objects_table(); archive_java_mirrors(); @@ -601,17 +569,17 @@ void HeapShared::copy_special_open_objects() { delete_seen_objects_table(); } -void HeapShared::copy_open_objects() { +void HeapShared::copy_objects() { assert(HeapShared::can_write(), "must be"); - copy_special_open_objects(); + copy_interned_strings(); + copy_special_objects(); - archive_object_subgraphs(open_archive_subgraph_entry_fields, - false /* is_closed_archive */, + archive_object_subgraphs(archive_subgraph_entry_fields, false /* is_full_module_graph */); + if (MetaspaceShared::use_full_module_graph()) { - archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, - false /* is_closed_archive */, + archive_object_subgraphs(fmg_archive_subgraph_entry_fields, true /* is_full_module_graph */); Modules::verify_archived_modules(); } @@ -645,8 +613,7 @@ KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { } // Add an entry field to the current KlassSubGraphInfo. -void KlassSubGraphInfo::add_subgraph_entry_field( - int static_field_offset, oop v, bool is_closed_archive) { +void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { assert(DumpSharedSpaces, "dump time only"); if (_subgraph_entry_fields == nullptr) { _subgraph_entry_fields = @@ -735,7 +702,7 @@ void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) { ResourceMark rm; log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s", ik->external_name(), extra_msg); - os::_exit(1); + MetaspaceShared::unrecoverable_writing_error(); } bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { @@ -836,7 +803,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj { // Build the records of archived subgraph infos, which include: // - Entry points to all subgraphs from the containing class mirror. The entry // points are static fields in the mirror. For each entry point, the field -// offset, value and is_closed_archive flag are recorded in the sub-graph +// offset, and value are recorded in the sub-graph // info. The value is stored back to the corresponding field at runtime. // - A list of klasses that need to be loaded/initialized before archived // java object sub-graph can be accessed at runtime. @@ -936,9 +903,8 @@ void HeapShared::resolve_classes(JavaThread* current) { if (!ArchiveHeapLoader::is_in_use()) { return; // nothing to do } - resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields); - resolve_classes_for_subgraphs(current, open_archive_subgraph_entry_fields); - resolve_classes_for_subgraphs(current, fmg_open_archive_subgraph_entry_fields); + resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields); + resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields); } void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) { @@ -1115,7 +1081,6 @@ void HeapShared::clear_archived_roots_of(Klass* k) { class WalkOopAndArchiveClosure: public BasicOopIterateClosure { int _level; - bool _is_closed_archive; bool _record_klasses_only; KlassSubGraphInfo* _subgraph_info; oop _referencing_obj; @@ -1126,11 +1091,10 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure { WalkOopAndArchiveClosure* _last; public: WalkOopAndArchiveClosure(int level, - bool is_closed_archive, bool record_klasses_only, KlassSubGraphInfo* subgraph_info, oop orig) : - _level(level), _is_closed_archive(is_closed_archive), + _level(level), _record_klasses_only(record_klasses_only), _subgraph_info(subgraph_info), _referencing_obj(orig) { @@ -1162,7 +1126,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure { } bool success = HeapShared::archive_reachable_objects_from( - _level + 1, _subgraph_info, obj, _is_closed_archive); + _level + 1, _subgraph_info, obj); assert(success, "VM should have exited with unarchivable objects for _level > 1"); } } @@ -1178,23 +1142,7 @@ WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr; HeapShared::CachedOopInfo HeapShared::make_cached_oop_info() { WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current(); oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj(); - return CachedOopInfo(referrer, _copying_open_region_objects); -} - -void HeapShared::check_closed_region_object(InstanceKlass* k) { - // Check fields in the object - for (JavaFieldStream fs(k); !fs.done(); fs.next()) { - if (!fs.access_flags().is_static()) { - BasicType ft = fs.field_descriptor().field_type(); - if (!fs.access_flags().is_final() && is_reference_type(ft)) { - ResourceMark rm; - log_warning(cds, heap)( - "Please check reference field in %s instance in closed archive heap region: %s %s", - k->external_name(), (fs.name())->as_C_string(), - (fs.signature())->as_C_string()); - } - } - } + return CachedOopInfo(referrer); } // (1) If orig_obj has not been archived yet, archive it. @@ -1203,8 +1151,7 @@ void HeapShared::check_closed_region_object(InstanceKlass* k) { // (3) Record the klasses of all orig_obj and all reachable objects. bool HeapShared::archive_reachable_objects_from(int level, KlassSubGraphInfo* subgraph_info, - oop orig_obj, - bool is_closed_archive) { + oop orig_obj) { assert(orig_obj != nullptr, "must be"); if (!JavaClasses::is_supported_for_archiving(orig_obj)) { @@ -1213,7 +1160,7 @@ bool HeapShared::archive_reachable_objects_from(int level, // these objects that are referenced (directly or indirectly) by static fields. ResourceMark rm; log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); - os::_exit(1); + MetaspaceShared::unrecoverable_writing_error(); } // java.lang.Class instances cannot be included in an archived object sub-graph. We only support @@ -1223,7 +1170,7 @@ bool HeapShared::archive_reachable_objects_from(int level, // object that is referenced (directly or indirectly) by static fields. if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _default_subgraph_info) { log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); - os::_exit(1); + MetaspaceShared::unrecoverable_writing_error(); } if (has_been_seen_during_subgraph_recording(orig_obj)) { @@ -1252,7 +1199,7 @@ bool HeapShared::archive_reachable_objects_from(int level, // We don't know how to handle an object that has been archived, but some of its reachable // objects cannot be archived. Bail out for now. We might need to fix this in the future if // we have a real use case. - os::_exit(1); + MetaspaceShared::unrecoverable_writing_error(); } } } @@ -1260,14 +1207,10 @@ bool HeapShared::archive_reachable_objects_from(int level, Klass *orig_k = orig_obj->klass(); subgraph_info->add_subgraph_object_klass(orig_k); - WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, - subgraph_info, orig_obj); + WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj); orig_obj->oop_iterate(&walker); - if (is_closed_archive && orig_k->is_instance_klass()) { - check_closed_region_object(InstanceKlass::cast(orig_k)); - } - check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive); + check_enum_obj(level + 1, subgraph_info, orig_obj); return true; } @@ -1308,8 +1251,7 @@ bool HeapShared::archive_reachable_objects_from(int level, void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, const char* klass_name, int field_offset, - const char* field_name, - bool is_closed_archive) { + const char* field_name) { assert(DumpSharedSpaces, "dump time only"); assert(k->is_shared_boot_class(), "must be boot class"); @@ -1327,8 +1269,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, f->print_on(&out); } - bool success = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive); - + bool success = archive_reachable_objects_from(1, subgraph_info, f); if (!success) { log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", klass_name, field_name); @@ -1336,13 +1277,13 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, // Note: the field value is not preserved in the archived mirror. // Record the field as a new subGraph entry point. The recorded // information is restored from the archive at runtime. - subgraph_info->add_subgraph_entry_field(field_offset, f, is_closed_archive); + subgraph_info->add_subgraph_entry_field(field_offset, f); log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f)); } } else { // The field contains null, we still need to record the entry point, // so it can be restored at runtime. - subgraph_info->add_subgraph_entry_field(field_offset, nullptr, false); + subgraph_info->add_subgraph_entry_field(field_offset, nullptr); } } @@ -1572,17 +1513,16 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], void HeapShared::init_subgraph_entry_fields(TRAPS) { assert(HeapShared::can_write(), "must be"); _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable(); - init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, CHECK); - init_subgraph_entry_fields(open_archive_subgraph_entry_fields, CHECK); + init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK); if (MetaspaceShared::use_full_module_graph()) { - init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, CHECK); + init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK); } } #ifndef PRODUCT void HeapShared::setup_test_class(const char* test_class_name) { - ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields; - int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); + ArchivableStaticFieldInfo* p = archive_subgraph_entry_fields; + int num_slots = sizeof(archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below"); assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list"); @@ -1649,7 +1589,6 @@ void HeapShared::init_for_dumping(TRAPS) { } void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], - bool is_closed_archive, bool is_full_module_graph) { _num_total_subgraph_recordings = 0; _num_total_walked_objs = 0; @@ -1680,14 +1619,12 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], } archive_reachable_objects_from_static_field(f->klass, f->klass_name, - f->offset, f->field_name, - is_closed_archive); + f->offset, f->field_name); } done_recording_subgraph(info->klass, klass_name); } - log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", - is_closed_archive ? "closed" : "open", + log_info(cds, heap)("Archived subgraph records = %d", _num_total_subgraph_recordings); log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp index 3e60ac9663b..35c90b237a6 100644 --- a/src/hotspot/share/cds/heapShared.hpp +++ b/src/hotspot/share/cds/heapShared.hpp @@ -47,6 +47,7 @@ class KlassToOopHandleTable; class ResourceBitMap; struct ArchivableStaticFieldInfo; +class ArchiveHeapInfo; // A dump time sub-graph info for Klass _k. It includes the entry points // (static fields in _k's mirror) of the archived sub-graphs reachable @@ -61,8 +62,7 @@ class KlassSubGraphInfo: public CHeapObj { // object sub-graphs can be accessed at runtime. GrowableArray* _subgraph_object_klasses; // A list of _k's static fields as the entry points of archived sub-graphs. - // For each entry field, it is a tuple of field_offset, field_value and - // is_closed_archive flag. + // For each entry field, it is a tuple of field_offset, field_value GrowableArray* _subgraph_entry_fields; // Does this KlassSubGraphInfo belong to the archived full module graph @@ -97,8 +97,7 @@ class KlassSubGraphInfo: public CHeapObj { GrowableArray* subgraph_entry_fields() { return _subgraph_entry_fields; } - void add_subgraph_entry_field(int static_field_offset, oop v, - bool is_closed_archive); + void add_subgraph_entry_field(int static_field_offset, oop v); void add_subgraph_object_klass(Klass *orig_k); int num_subgraph_object_klasses() { return _subgraph_object_klasses == nullptr ? 0 : @@ -141,7 +140,7 @@ class HeapShared: AllStatic { friend class VerifySharedOopClosure; public: - // Can this VM write heap regions into the CDS archive? Currently only G1+compressed{oops,cp} + // Can this VM write a heap region into the CDS archive? Currently only G1+compressed{oops,cp} static bool can_write() { CDS_JAVA_HEAP_ONLY( if (_disable_writing) { @@ -165,7 +164,6 @@ public: private: #if INCLUDE_CDS_JAVA_HEAP static bool _disable_writing; - static bool _copying_open_region_objects; static DumpedInternedStrings *_dumped_interned_strings; // statistics @@ -189,22 +187,18 @@ public: // The location of this object inside ArchiveHeapWriter::_buffer size_t _buffer_offset; - bool _in_open_region; public: - CachedOopInfo(oop orig_referrer, bool in_open_region) + CachedOopInfo(oop orig_referrer) : _orig_referrer(orig_referrer), - _buffer_offset(0), _in_open_region(in_open_region) {} + _buffer_offset(0) {} oop orig_referrer() const { return _orig_referrer; } - bool in_open_region() const { return _in_open_region; } void set_buffer_offset(size_t offset) { _buffer_offset = offset; } size_t buffer_offset() const { return _buffer_offset; } }; private: - static void check_enum_obj(int level, - KlassSubGraphInfo* subgraph_info, - oop orig_obj, - bool is_closed_archive); + static void check_enum_obj(int level, KlassSubGraphInfo* subgraph_info, + oop orig_obj); typedef ResourceHashtable* closed_regions, - GrowableArray* open_regions, - GrowableArray* closed_bitmaps, - GrowableArray* open_bitmaps); - static void copy_closed_objects(); - static void copy_open_objects(); - static void copy_special_open_objects(); + static void archive_objects(ArchiveHeapInfo* heap_info); + static void copy_objects(); + static void copy_special_objects(); static bool archive_reachable_objects_from(int level, KlassSubGraphInfo* subgraph_info, - oop orig_obj, - bool is_closed_archive); + oop orig_obj); static ResourceBitMap calculate_oopmap(MemRegion region); // marks all the oop pointers static void add_to_dumped_interned_strings(oop string); @@ -380,7 +366,7 @@ private: static void remove_scratch_objects(Klass* k); // We use the HeapShared::roots() array to make sure that objects stored in the - // archived heap regions are not prematurely collected. These roots include: + // archived heap region are not prematurely collected. These roots include: // // - mirrors of classes that have not yet been loaded. // - ConstantPool::resolved_references() of classes that have not yet been loaded. @@ -410,8 +396,7 @@ private: public: static void init_scratch_objects(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; static bool is_heap_region(int idx) { - CDS_JAVA_HEAP_ONLY(return (idx >= MetaspaceShared::first_closed_heap_region && - idx <= MetaspaceShared::last_open_heap_region);) + CDS_JAVA_HEAP_ONLY(return (idx == MetaspaceShared::hp);) NOT_CDS_JAVA_HEAP_RETURN_(false); } diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index 2e762d29a31..cfcbc6cc666 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -98,12 +98,7 @@ bool MetaspaceShared::_use_full_module_graph = true; // The CDS archive is divided into the following regions: // rw - read-write metadata // ro - read-only metadata and read-only tables -// -// ca0 - closed archive heap space #0 -// ca1 - closed archive heap space #1 (may be empty) -// oa0 - open archive heap space #0 -// oa1 - open archive heap space #1 (may be empty) -// +// hp - heap region // bm - bitmap for relocating the above 7 regions. // // The rw and ro regions are linearly allocated, in the order of rw->ro. @@ -119,8 +114,9 @@ bool MetaspaceShared::_use_full_module_graph = true; // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data // are copied into the ro region as read-only tables. // -// The ca0/ca1 and oa0/oa1 regions are populated inside HeapShared::archive_objects. -// Their layout is independent of the rw/ro regions. +// The heap region is populated by HeapShared::archive_objects. +// +// The bitmap region is used to relocate the ro/rw/hp regions. static DumpRegion _symbol_region("symbols"); @@ -267,8 +263,8 @@ void MetaspaceShared::initialize_for_static_dump() { size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M); _symbol_rs = ReservedSpace(symbol_rs_size); if (!_symbol_rs.is_reserved()) { - vm_exit_during_initialization("Unable to reserve memory for symbols", - err_msg(SIZE_FORMAT " bytes.", symbol_rs_size)); + log_error(cds)("Unable to reserve memory for symbols: " SIZE_FORMAT " bytes.", symbol_rs_size); + MetaspaceShared::unrecoverable_writing_error(); } _symbol_region.init(&_symbol_rs, &_symbol_vs); } @@ -309,7 +305,8 @@ void MetaspaceShared::read_extra_data(JavaThread* current, const char* filename) ResourceMark rm(current); if (utf8_length == 0x7fffffff) { // buf_len will overflown 32-bit value. - vm_exit_during_initialization(err_msg("string length too large: %d", utf8_length)); + log_error(cds)("string length too large: %d", utf8_length); + MetaspaceShared::unrecoverable_loading_error(); } int buf_len = utf8_length+1; char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); @@ -430,11 +427,7 @@ void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread class VM_PopulateDumpSharedSpace : public VM_Operation { private: - GrowableArray *_closed_heap_regions; - GrowableArray *_open_heap_regions; - - GrowableArray *_closed_heap_bitmaps; - GrowableArray *_open_heap_bitmaps; + ArchiveHeapInfo _heap_info; void dump_java_heap_objects(GrowableArray* klasses) NOT_CDS_JAVA_HEAP_RETURN; void dump_shared_symbol_table(GrowableArray* symbols) { @@ -445,11 +438,7 @@ private: public: - VM_PopulateDumpSharedSpace() : VM_Operation(), - _closed_heap_regions(nullptr), - _open_heap_regions(nullptr), - _closed_heap_bitmaps(nullptr), - _open_heap_bitmaps(nullptr) {} + VM_PopulateDumpSharedSpace() : VM_Operation(), _heap_info() {} bool skip_operation() const { return false; } @@ -549,11 +538,7 @@ void VM_PopulateDumpSharedSpace::doit() { mapinfo->set_serialized_data(serialized_data); mapinfo->set_cloned_vtables(cloned_vtables); mapinfo->open_for_write(); - builder.write_archive(mapinfo, - _closed_heap_regions, - _open_heap_regions, - _closed_heap_bitmaps, - _open_heap_bitmaps); + builder.write_archive(mapinfo, &_heap_info); if (PrintSystemDictionaryAtExit) { SystemDictionary::print(); @@ -564,10 +549,7 @@ void VM_PopulateDumpSharedSpace::doit() { "for testing purposes only and should not be used in a production environment"); } - // There may be pending VM operations. We have changed some global states - // (such as vmClasses::_klasses) that may cause these VM operations - // to fail. For safety, forget these operations and exit the VM directly. - os::_exit(0); + MetaspaceShared::exit_after_static_dump(); } class CollectCLDClosure : public CLDClosure { @@ -677,12 +659,13 @@ void MetaspaceShared::preload_and_dump() { preload_and_dump_impl(THREAD); if (HAS_PENDING_EXCEPTION) { if (PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())) { - vm_direct_exit(-1, err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " - SIZE_FORMAT "M", MaxHeapSize/M)); + log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " + SIZE_FORMAT "M", MaxHeapSize/M); + MetaspaceShared::unrecoverable_writing_error(); } else { log_error(cds)("%s: %s", PENDING_EXCEPTION->klass()->external_name(), java_lang_String::as_utf8_string(java_lang_Throwable::message(PENDING_EXCEPTION))); - vm_direct_exit(-1, "VM exits due to exception, use -Xlog:cds,exceptions=trace for detail"); + MetaspaceShared::unrecoverable_writing_error("VM exits due to exception, use -Xlog:cds,exceptions=trace for detail"); } } else { // On success, the VM_PopulateDumpSharedSpace op should have @@ -839,6 +822,7 @@ bool MetaspaceShared::try_link_class(JavaThread* current, InstanceKlass* ik) { SystemDictionaryShared::set_class_has_failed_verification(ik); _has_error_classes = true; } + ik->compute_has_loops_flag_for_methods(); BytecodeVerificationLocal = saved; return true; } else { @@ -874,14 +858,7 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray* k } } - // The closed and open archive heap space has maximum two regions. - // See FileMapInfo::write_heap_regions() for details. - _closed_heap_regions = new GrowableArray(2); - _open_heap_regions = new GrowableArray(2); - _closed_heap_bitmaps = new GrowableArray(2); - _open_heap_bitmaps = new GrowableArray(2); - HeapShared::archive_objects(_closed_heap_regions, _open_heap_regions, - _closed_heap_bitmaps, _open_heap_bitmaps); + HeapShared::archive_objects(&_heap_info); ArchiveBuilder::OtherROAllocMark mark; HeapShared::write_subgraph_info_table(); } @@ -902,6 +879,37 @@ bool MetaspaceShared::is_shared_dynamic(void* p) { } } +// This function is called when the JVM is unable to load the specified archive(s) due to one +// of the following conditions. +// - There's an error that indicates that the archive(s) files were corrupt or otherwise damaged. +// - When -XX:+RequireSharedSpaces is specified, AND the JVM cannot load the archive(s) due +// to version or classpath mismatch. +void MetaspaceShared::unrecoverable_loading_error(const char* message) { + log_error(cds)("An error has occurred while processing the shared archive file."); + if (message != nullptr) { + log_error(cds)("%s", message); + } + vm_exit_during_initialization("Unable to use shared archive.", nullptr); +} + +// This function is called when the JVM is unable to write the specified CDS archive due to an +// unrecoverable error. +void MetaspaceShared::unrecoverable_writing_error(const char* message) { + log_error(cds)("An error has occurred while writing the shared archive file."); + if (message != nullptr) { + log_error(cds)("%s", message); + } + vm_direct_exit(1); +} + +// We have finished dumping the static archive. At this point, there may be pending VM +// operations. We have changed some global states (such as vmClasses::_klasses) that +// may cause these VM operations to fail. For safety, forget these operations and +// exit the VM directly. +void MetaspaceShared::exit_after_static_dump() { + os::_exit(0); +} + void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; @@ -950,9 +958,9 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { DynamicDumpSharedSpaces = false; log_info(cds)("Unable to map shared spaces"); if (PrintSharedArchiveAndExit) { - vm_exit_during_initialization("Unable to use shared archive."); + MetaspaceShared::unrecoverable_loading_error("Unable to use shared archive."); } else if (RequireSharedSpaces) { - FileMapInfo::fail_stop("Unable to map shared spaces"); + MetaspaceShared::unrecoverable_loading_error("Unable to map shared spaces"); } } @@ -967,7 +975,7 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { delete dynamic_mapinfo; } if (RequireSharedSpaces && has_failed) { - FileMapInfo::fail_stop("Unable to map shared spaces"); + MetaspaceShared::unrecoverable_loading_error("Unable to map shared spaces"); } } @@ -995,7 +1003,7 @@ FileMapInfo* MetaspaceShared::open_dynamic_archive() { if (!mapinfo->initialize()) { delete(mapinfo); if (RequireSharedSpaces) { - FileMapInfo::fail_stop("Failed to initialize dynamic archive"); + MetaspaceShared::unrecoverable_loading_error("Failed to initialize dynamic archive"); } return nullptr; } @@ -1146,9 +1154,9 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File assert(ccs_end > cds_base, "Sanity check"); CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base); - // map_heap_regions() compares the current narrow oop and klass encodings + // map_or_load_heap_region() compares the current narrow oop and klass encodings // with the archived ones, so it must be done after all encodings are determined. - static_mapinfo->map_or_load_heap_regions(); + static_mapinfo->map_or_load_heap_region(); } }); log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled"); diff --git a/src/hotspot/share/cds/metaspaceShared.hpp b/src/hotspot/share/cds/metaspaceShared.hpp index 9c79cb17a36..38fa2dbbc5e 100644 --- a/src/hotspot/share/cds/metaspaceShared.hpp +++ b/src/hotspot/share/cds/metaspaceShared.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,23 +60,9 @@ class MetaspaceShared : AllStatic { rw = 0, // read-write shared space ro = 1, // read-only shared space bm = 2, // relocation bitmaps (freed after file mapping is finished) + hp = 3, // heap region num_core_region = 2, // rw and ro - num_non_heap_regions = 3, // rw and ro and bm - - // java heap regions - first_closed_heap_region = bm + 1, - max_num_closed_heap_regions = 2, - last_closed_heap_region = first_closed_heap_region + max_num_closed_heap_regions - 1, - first_open_heap_region = last_closed_heap_region + 1, - max_num_open_heap_regions = 2, - last_open_heap_region = first_open_heap_region + max_num_open_heap_regions - 1, - max_num_heap_regions = max_num_closed_heap_regions + max_num_open_heap_regions, - - first_archive_heap_region = first_closed_heap_region, - last_archive_heap_region = last_open_heap_region, - - last_valid_region = last_open_heap_region, - n_regions = last_valid_region + 1 // total number of regions + n_regions = 4 // total number of regions }; static void prepare_for_dumping() NOT_CDS_RETURN; @@ -106,8 +92,8 @@ public: static void initialize_shared_spaces() NOT_CDS_RETURN; - // Return true if given address is in the shared metaspace regions (i.e., excluding any - // mapped heap regions.) + // Return true if given address is in the shared metaspace regions (i.e., excluding the + // mapped heap region.) static bool is_in_shared_metaspace(const void* p) { return MetaspaceObj::is_shared((const MetaspaceObj*)p); } @@ -116,6 +102,10 @@ public: static bool is_shared_dynamic(void* p) NOT_CDS_RETURN_(false); + static void unrecoverable_loading_error(const char* message = nullptr); + static void unrecoverable_writing_error(const char* message = nullptr); + static void exit_after_static_dump(); + static void serialize(SerializeClosure* sc) NOT_CDS_RETURN; // JVM/TI RedefineClasses() support: diff --git a/src/hotspot/share/ci/ciMethod.cpp b/src/hotspot/share/ci/ciMethod.cpp index adb7e7e2ea5..73e7c46391c 100644 --- a/src/hotspot/share/ci/ciMethod.cpp +++ b/src/hotspot/share/ci/ciMethod.cpp @@ -84,8 +84,8 @@ ciMethod::ciMethod(const methodHandle& h_m, ciInstanceKlass* holder) : _code_size = h_m->code_size(); _handler_count = h_m->exception_table_length(); _size_of_parameters = h_m->size_of_parameters(); - _uses_monitors = h_m->access_flags().has_monitor_bytecodes(); - _balanced_monitors = !_uses_monitors || h_m->access_flags().is_monitor_matching(); + _uses_monitors = h_m->has_monitor_bytecodes(); + _balanced_monitors = !_uses_monitors || h_m->guaranteed_monitor_matching(); _is_c1_compilable = !h_m->is_not_c1_compilable(); _is_c2_compilable = !h_m->is_not_c2_compilable(); _can_be_parsed = true; diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp index 74625830093..6a076c45995 100644 --- a/src/hotspot/share/classfile/classFileParser.cpp +++ b/src/hotspot/share/classfile/classFileParser.cpp @@ -1978,27 +1978,27 @@ ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() { void MethodAnnotationCollector::apply_to(const methodHandle& m) { if (has_annotation(_method_CallerSensitive)) - m->set_caller_sensitive(true); + m->set_caller_sensitive(); if (has_annotation(_method_ForceInline)) - m->set_force_inline(true); + m->set_force_inline(); if (has_annotation(_method_DontInline)) - m->set_dont_inline(true); + m->set_dont_inline(); if (has_annotation(_method_ChangesCurrentThread)) - m->set_changes_current_thread(true); + m->set_changes_current_thread(); if (has_annotation(_method_JvmtiMountTransition)) - m->set_jvmti_mount_transition(true); + m->set_jvmti_mount_transition(); if (has_annotation(_method_InjectedProfile)) - m->set_has_injected_profile(true); + m->set_has_injected_profile(); if (has_annotation(_method_LambdaForm_Compiled) && m->intrinsic_id() == vmIntrinsics::_none) m->set_intrinsic_id(vmIntrinsics::_compiledLambdaForm); if (has_annotation(_method_Hidden)) - m->set_hidden(true); + m->set_is_hidden(); if (has_annotation(_method_Scoped)) - m->set_scoped(true); + m->set_scoped(); if (has_annotation(_method_IntrinsicCandidate) && !m->is_synthetic()) - m->set_intrinsic_candidate(true); + m->set_intrinsic_candidate(); if (has_annotation(_jdk_internal_vm_annotation_ReservedStackAccess)) - m->set_has_reserved_stack_access(true); + m->set_has_reserved_stack_access(); } void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) { @@ -2739,7 +2739,7 @@ Method* ClassFileParser::parse_method(const ClassFileStream* const cfs, parsed_annotations.apply_to(methodHandle(THREAD, m)); if (is_hidden()) { // Mark methods in hidden classes as 'hidden'. - m->set_hidden(true); + m->set_is_hidden(); } // Copy annotations @@ -4070,7 +4070,7 @@ void OopMapBlocksBuilder::print_value_on(outputStream* st) const { void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) { assert(ik != nullptr, "invariant"); - const Klass* const super = ik->super(); + const InstanceKlass* const super = ik->java_super(); // Check if this klass has an empty finalize method (i.e. one with return bytecode only), // in which case we don't have to register objects as finalizable @@ -4349,7 +4349,7 @@ static void check_final_method_override(const InstanceKlass* this_klass, TRAPS) const Symbol* const name = m->name(); const Symbol* const signature = m->signature(); - const Klass* k = this_klass->super(); + const InstanceKlass* k = this_klass->java_super(); const Method* super_m = nullptr; while (k != nullptr) { // skip supers that don't have final methods. @@ -4381,11 +4381,11 @@ static void check_final_method_override(const InstanceKlass* this_klass, TRAPS) } // continue to look from super_m's holder's super. - k = super_m->method_holder()->super(); + k = super_m->method_holder()->java_super(); continue; } - k = k->super(); + k = k->java_super(); } } } diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp index 4477ce4e789..4dd259080b1 100644 --- a/src/hotspot/share/classfile/classLoaderData.hpp +++ b/src/hotspot/share/classfile/classLoaderData.hpp @@ -160,7 +160,7 @@ class ClassLoaderData : public CHeapObj { Symbol* _name_and_id; JFR_ONLY(DEFINE_TRACE_ID_FIELD;) - void set_next(ClassLoaderData* next) { _next = next; } + void set_next(ClassLoaderData* next) { Atomic::store(&_next, next); } ClassLoaderData* next() const { return Atomic::load(&_next); } ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder); diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp index 7a1c499b9e3..7c4aab966a4 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp @@ -45,7 +45,6 @@ #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" -#include "utilities/vmError.hpp" volatile size_t ClassLoaderDataGraph::_num_array_classes = 0; volatile size_t ClassLoaderDataGraph::_num_instance_classes = 0; @@ -195,7 +194,7 @@ void ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces() { // on the stack or in the code cache, so we only have to repeat the full walk if // they were found at that time. // TODO: have redefinition clean old methods out of the code cache. They still exist in some places. - bool walk_all_metadata = InstanceKlass::has_previous_versions_and_reset(); + bool walk_all_metadata = InstanceKlass::should_clean_previous_versions_and_reset(); MetadataOnStackMark md_on_stack(walk_all_metadata, /*redefinition_walk*/false); clean_deallocate_lists(walk_all_metadata); @@ -262,8 +261,15 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool has_class_mirror_ return loader_data; } +inline void assert_is_safepoint_or_gc() { + assert(SafepointSynchronize::is_at_safepoint() || + Thread::current()->is_ConcurrentGC_thread() || + Thread::current()->is_Worker_thread(), + "Must be called by safepoint or GC"); +} + void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) { - assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock); + assert_is_safepoint_or_gc(); for (ClassLoaderData* cld = _unloading; cld != nullptr; cld = cld->next()) { assert(cld->is_unloading(), "invariant"); cl->do_cld(cld); @@ -273,15 +279,15 @@ void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) { // These are functions called by the GC, which require all of the CLDs, including the // unloading ones. void ClassLoaderDataGraph::cld_do(CLDClosure* cl) { - assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock); - for (ClassLoaderData* cld = _head; cld != nullptr; cld = cld->_next) { + assert_is_safepoint_or_gc(); + for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) { cl->do_cld(cld); } } void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) { - assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock); - for (ClassLoaderData* cld = _head; cld != nullptr; cld = cld->_next) { + assert_is_safepoint_or_gc(); + for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) { CLDClosure* closure = cld->keep_alive() ? strong : weak; if (closure != nullptr) { closure->do_cld(cld); @@ -290,7 +296,7 @@ void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) { } void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) { - assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock); + assert_is_safepoint_or_gc(); if (ClassUnloading) { roots_cld_do(cl, nullptr); } else { @@ -476,7 +482,7 @@ bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) { #endif // PRODUCT bool ClassLoaderDataGraph::is_valid(ClassLoaderData* loader_data) { - DEBUG_ONLY( if (!VMError::is_error_reported()) { assert_locked_or_safepoint(ClassLoaderDataGraph_lock); } ) + assert_locked_or_safepoint(ClassLoaderDataGraph_lock); if (loader_data != nullptr) { if (loader_data == ClassLoaderData::the_null_class_loader_data()) { return true; @@ -521,7 +527,8 @@ bool ClassLoaderDataGraph::do_unloading() { prev->set_next(data); } else { assert(dead == _head, "sanity check"); - _head = data; + // The GC might be walking this concurrently + Atomic::store(&_head, data); } dead->set_next(_unloading); _unloading = dead; diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp index 4d290c0dfdd..6b390a05079 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp @@ -73,7 +73,7 @@ bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() { // Only clean metaspaces after full GC. bool do_cleaning = _safepoint_cleanup_needed; #if INCLUDE_JVMTI - do_cleaning = do_cleaning && (_should_clean_deallocate_lists || InstanceKlass::has_previous_versions()); + do_cleaning = do_cleaning && (_should_clean_deallocate_lists || InstanceKlass::should_clean_previous_versions()); #else do_cleaning = do_cleaning && _should_clean_deallocate_lists; #endif diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index dddfa3435ec..d38c32b234e 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -765,8 +765,13 @@ void java_lang_String::print(oop java_string, outputStream* st) { st->print("\""); for (int index = 0; index < length; index++) { - st->print("%c", (!is_latin1) ? value->char_at(index) : - ((jchar) value->byte_at(index)) & 0xff ); + jchar c = (!is_latin1) ? value->char_at(index) : + ((jchar) value->byte_at(index)) & 0xff; + if (c < ' ') { + st->print("\\x%02X", c); // print control characters e.g. \x0A + } else { + st->print("%c", c); + } } st->print("\""); } @@ -1121,9 +1126,6 @@ bool java_lang_Class::restore_archived_mirror(Klass *k, // mirror is archived, restore log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m)); - if (ArchiveHeapLoader::is_mapped()) { - assert(Universe::heap()->is_archived_object(m), "must be archived mirror object"); - } assert(as_Klass(m) == k, "must be"); Handle mirror(THREAD, m); diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index f84491ce06a..8db4c08c966 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -797,7 +797,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) { // refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern // but bail out for safety. log_error(cds)("Too many strings to be archived: " SIZE_FORMAT, _items_count); - os::_exit(1); + MetaspaceShared::unrecoverable_writing_error(); } objArrayOop primary = oopFactory::new_objArray(vmClasses::Object_klass(), primary_array_length, CHECK); diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp index 3dec3cb1b4a..3fdb98420c7 100644 --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -1583,11 +1583,13 @@ void SystemDictionary::methods_do(void f(Method*)) { } auto doit = [&] (InvokeMethodKey key, Method* method) { - f(method); + if (method != nullptr) { + f(method); + } }; { - MutexLocker ml(InvokeMethodTable_lock); + MutexLocker ml(InvokeMethodIntrinsicTable_lock); _invoke_method_intrinsic_table.iterate_all(doit); } @@ -1939,40 +1941,68 @@ Method* SystemDictionary::find_method_handle_intrinsic(vmIntrinsicID iid, iid != vmIntrinsics::_invokeGeneric, "must be a known MH intrinsic iid=%d: %s", iid_as_int, vmIntrinsics::name_at(iid)); + InvokeMethodKey key(signature, iid_as_int); + Method** met = nullptr; + + // We only want one entry in the table for this (signature/id, method) pair but the code + // to create the intrinsic method needs to be outside the lock. + // The first thread claims the entry by adding the key and the other threads wait, until the + // Method has been added as the value. { - MutexLocker ml(THREAD, InvokeMethodTable_lock); - InvokeMethodKey key(signature, iid_as_int); - Method** met = _invoke_method_intrinsic_table.get(key); - if (met != nullptr) { - return *met; + MonitorLocker ml(THREAD, InvokeMethodIntrinsicTable_lock); + while (true) { + bool created; + met = _invoke_method_intrinsic_table.put_if_absent(key, &created); + assert(met != nullptr, "either created or found"); + if (*met != nullptr) { + return *met; + } else if (created) { + // The current thread won the race and will try to create the full entry. + break; + } else { + // Another thread beat us to it, so wait for them to complete + // and return *met; or if they hit an error we get another try. + ml.wait(); + // Note it is not safe to read *met here as that entry could have + // been deleted, so we must loop and try put_if_absent again. + } } + } - bool throw_error = false; - // This function could get an OOM but it is safe to call inside of a lock because - // throwing OutOfMemoryError doesn't call Java code. - methodHandle m = Method::make_method_handle_intrinsic(iid, signature, CHECK_NULL); - if (!Arguments::is_interpreter_only() || iid == vmIntrinsics::_linkToNative) { - // Generate a compiled form of the MH intrinsic - // linkToNative doesn't have interpreter-specific implementation, so always has to go through compiled version. - AdapterHandlerLibrary::create_native_wrapper(m); - // Check if have the compiled code. - throw_error = (!m->has_compiled_code()); - } + methodHandle m = Method::make_method_handle_intrinsic(iid, signature, THREAD); + bool throw_error = HAS_PENDING_EXCEPTION; + if (!throw_error && (!Arguments::is_interpreter_only() || iid == vmIntrinsics::_linkToNative)) { + // Generate a compiled form of the MH intrinsic + // linkToNative doesn't have interpreter-specific implementation, so always has to go through compiled version. + AdapterHandlerLibrary::create_native_wrapper(m); + // Check if have the compiled code. + throw_error = (!m->has_compiled_code()); + } - if (!throw_error) { + { + MonitorLocker ml(THREAD, InvokeMethodIntrinsicTable_lock); + if (throw_error) { + // Remove the entry and let another thread try, or get the same exception. + bool removed = _invoke_method_intrinsic_table.remove(key); + assert(removed, "must be the owner"); + ml.notify_all(); + } else { signature->make_permanent(); // The signature is never unloaded. - bool created = _invoke_method_intrinsic_table.put(key, m()); - assert(created, "must be since we still hold the lock"); assert(Arguments::is_interpreter_only() || (m->has_compiled_code() && m->code()->entry_point() == m->from_compiled_entry()), "MH intrinsic invariant"); + *met = m(); // insert the element + ml.notify_all(); return m(); } } - // Throw error outside of the lock. - THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), - "Out of space in CodeCache for method handle intrinsic"); + // Throw VirtualMachineError or the pending exception in the JavaThread + if (throw_error && !HAS_PENDING_EXCEPTION) { + THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), + "Out of space in CodeCache for method handle intrinsic"); + } + return nullptr; } // Helper for unpacking the return value from linkMethod and linkCallSite. @@ -2115,7 +2145,7 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature, Handle empty; OopHandle* o; { - MutexLocker ml(THREAD, InvokeMethodTable_lock); + MutexLocker ml(THREAD, InvokeMethodTypeTable_lock); o = _invoke_method_type_table.get(signature); } @@ -2184,7 +2214,7 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature, if (can_be_cached) { // We can cache this MethodType inside the JVM. - MutexLocker ml(THREAD, InvokeMethodTable_lock); + MutexLocker ml(THREAD, InvokeMethodTypeTable_lock); bool created = false; assert(method_type != nullptr, "unexpected null"); OopHandle* h = _invoke_method_type_table.get(signature); diff --git a/src/hotspot/share/classfile/vmClasses.cpp b/src/hotspot/share/classfile/vmClasses.cpp index 2f004f02f90..82c2ff8fa7b 100644 --- a/src/hotspot/share/classfile/vmClasses.cpp +++ b/src/hotspot/share/classfile/vmClasses.cpp @@ -142,7 +142,7 @@ void vmClasses::resolve_all(TRAPS) { // Object_klass is resolved. See the above resolve_through() // call. No mirror objects are accessed/restored in the above call. // Mirrors are restored after java.lang.Class is loaded. - ArchiveHeapLoader::fixup_regions(); + ArchiveHeapLoader::fixup_region(); // Initialize the constant pool for the Object_class assert(Object_klass()->is_shared(), "must be"); diff --git a/src/hotspot/share/classfile/vmIntrinsics.hpp b/src/hotspot/share/classfile/vmIntrinsics.hpp index 96c9f78577f..86d5cc9ce5f 100644 --- a/src/hotspot/share/classfile/vmIntrinsics.hpp +++ b/src/hotspot/share/classfile/vmIntrinsics.hpp @@ -584,9 +584,11 @@ class methodHandle; do_alias( continuationDoYield_signature, void_int_signature) \ \ /* java/lang/VirtualThread */ \ - do_intrinsic(_notifyJvmtiMount, java_lang_VirtualThread, notifyJvmtiMount_name, bool_bool_void_signature, F_RN) \ - do_intrinsic(_notifyJvmtiUnmount, java_lang_VirtualThread, notifyJvmtiUnmount_name, bool_bool_void_signature, F_RN) \ - do_intrinsic(_notifyJvmtiHideFrames, java_lang_VirtualThread, notifyJvmtiHideFrames_name, bool_void_signature, F_RN) \ + do_intrinsic(_notifyJvmtiVThreadStart, java_lang_VirtualThread, notifyJvmtiStart_name, void_method_signature, F_RN) \ + do_intrinsic(_notifyJvmtiVThreadEnd, java_lang_VirtualThread, notifyJvmtiEnd_name, void_method_signature, F_RN) \ + do_intrinsic(_notifyJvmtiVThreadMount, java_lang_VirtualThread, notifyJvmtiMount_name, bool_void_signature, F_RN) \ + do_intrinsic(_notifyJvmtiVThreadUnmount, java_lang_VirtualThread, notifyJvmtiUnmount_name, bool_void_signature, F_RN) \ + do_intrinsic(_notifyJvmtiVThreadHideFrames, java_lang_VirtualThread, notifyJvmtiHideFrames_name, bool_void_signature, F_RN) \ \ /* support for UnsafeConstants */ \ do_class(jdk_internal_misc_UnsafeConstants, "jdk/internal/misc/UnsafeConstants") \ diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp index caf6788c028..df5ea30484b 100644 --- a/src/hotspot/share/classfile/vmSymbols.hpp +++ b/src/hotspot/share/classfile/vmSymbols.hpp @@ -412,6 +412,8 @@ template(run_finalization_name, "runFinalization") \ template(dispatchUncaughtException_name, "dispatchUncaughtException") \ template(loadClass_name, "loadClass") \ + template(notifyJvmtiStart_name, "notifyJvmtiStart") \ + template(notifyJvmtiEnd_name, "notifyJvmtiEnd") \ template(notifyJvmtiMount_name, "notifyJvmtiMount") \ template(notifyJvmtiUnmount_name, "notifyJvmtiUnmount") \ template(notifyJvmtiHideFrames_name, "notifyJvmtiHideFrames") \ @@ -756,6 +758,8 @@ template(encodeThrowable_name, "encodeThrowable") \ template(encodeThrowable_signature, "(Ljava/lang/Throwable;JI)I") \ template(decodeAndThrowThrowable_name, "decodeAndThrowThrowable") \ + template(encodeAnnotations_name, "encodeAnnotations") \ + template(encodeAnnotations_signature, "([BLjava/lang/Class;Ljdk/internal/reflect/ConstantPool;Z[Ljava/lang/Class;)[B")\ template(decodeAndThrowThrowable_signature, "(JZ)V") \ template(classRedefinedCount_name, "classRedefinedCount") \ template(classLoader_name, "classLoader") \ diff --git a/src/hotspot/share/compiler/oopMap.cpp b/src/hotspot/share/compiler/oopMap.cpp index a6b8a156450..58e9daa43a5 100644 --- a/src/hotspot/share/compiler/oopMap.cpp +++ b/src/hotspot/share/compiler/oopMap.cpp @@ -392,7 +392,7 @@ class AddDerivedOop : public DerivedOopClosure { SkipNull = true, NeedsLock = true }; - virtual void do_derived_oop(oop* base, derived_pointer* derived) { + virtual void do_derived_oop(derived_base* base, derived_pointer* derived) { #if COMPILER2_OR_JVMCI DerivedPointerTable::add(derived, base); #endif // COMPILER2_OR_JVMCI @@ -410,7 +410,7 @@ public: SkipNull = true, NeedsLock = true }; - virtual void do_derived_oop(oop* base, derived_pointer* derived) { + virtual void do_derived_oop(derived_base* base, derived_pointer* derived) { // All derived pointers must be processed before the base pointer of any derived pointer is processed. // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured // offset, if the base pointer is processed in the first derived pointer. @@ -430,7 +430,7 @@ public: SkipNull = true, NeedsLock = true }; - virtual void do_derived_oop(oop* base, derived_pointer* derived) {} + virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {} }; void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) { @@ -915,8 +915,8 @@ void DerivedPointerTable::clear() { _active = true; } -void DerivedPointerTable::add(derived_pointer* derived_loc, oop *base_loc) { - assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop"); +void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) { + assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop"); assert(derived_loc != (void*)base_loc, "Base and derived in same location"); derived_pointer base_loc_as_derived_pointer = static_cast(reinterpret_cast(base_loc)); @@ -933,7 +933,7 @@ void DerivedPointerTable::add(derived_pointer* derived_loc, oop *base_loc) { "Add derived pointer@" INTPTR_FORMAT " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")", - p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(*base_loc), p2i(base_loc), offset + p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset ); } // Set derived oop location to point to base. diff --git a/src/hotspot/share/compiler/oopMap.hpp b/src/hotspot/share/compiler/oopMap.hpp index 3c23943e097..4b20464f69c 100644 --- a/src/hotspot/share/compiler/oopMap.hpp +++ b/src/hotspot/share/compiler/oopMap.hpp @@ -48,6 +48,7 @@ class OopClosure; class CodeBlob; class ImmutableOopMap; +enum class derived_base : intptr_t {}; enum class derived_pointer : intptr_t {}; class OopMapValue: public StackObj { @@ -481,12 +482,12 @@ class DerivedPointerTable : public AllStatic { friend class VMStructs; private: class Entry; - static bool _active; // do not record pointers for verify pass etc. + static bool _active; // do not record pointers for verify pass etc. public: - static void clear(); // Called before scavenge/GC - static void add(derived_pointer* derived, oop *base); // Called during scavenge/GC - static void update_pointers(); // Called after scavenge/GC + static void clear(); // Called before scavenge/GC + static void add(derived_pointer* derived, derived_base* base); // Called during scavenge/GC + static void update_pointers(); // Called after scavenge/GC static bool is_empty(); static bool is_active() { return _active; } static void set_active(bool value) { _active = value; } diff --git a/src/hotspot/share/compiler/oopMap.inline.hpp b/src/hotspot/share/compiler/oopMap.inline.hpp index 3f8a81d773b..c6531b1cd3a 100644 --- a/src/hotspot/share/compiler/oopMap.inline.hpp +++ b/src/hotspot/share/compiler/oopMap.inline.hpp @@ -84,15 +84,15 @@ void OopMapDo::iterate_oops_do(const frame } guarantee(loc != nullptr, "missing saved register"); derived_pointer* derived_loc = (derived_pointer*)loc; - void** base_loc = (void**) fr->oopmapreg_to_location(omv.content_reg(), reg_map); + derived_base* base_loc = (derived_base*) fr->oopmapreg_to_location(omv.content_reg(), reg_map); // Ignore nullptr oops and decoded null narrow oops which // equal to CompressedOops::base() when a narrow oop // implicit null check is used in compiled code. // The narrow_oop_base could be nullptr or be the address // of the page below heap depending on compressed oops mode. - if (base_loc != nullptr && !SkipNullValue::should_skip(*base_loc)) { - _derived_oop_fn->do_derived_oop((oop*)base_loc, derived_loc); + if (base_loc != nullptr && !SkipNullValue::should_skip((void*)*base_loc)) { + _derived_oop_fn->do_derived_oop(base_loc, derived_loc); } } } diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp index 5500f809d55..741f1d65603 100644 --- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp @@ -54,7 +54,7 @@ public: static EpsilonHeap* heap(); EpsilonHeap() : - _memory_manager("Epsilon Heap", ""), + _memory_manager("Epsilon Heap"), _space(nullptr) {}; Name kind() const override { diff --git a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp index f4a03e50ea3..3668a51a32e 100644 --- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp +++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,7 +106,7 @@ void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr, assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); assert(pre_val->is_register(), "must be"); assert(pre_val->type() == T_OBJECT, "must be an object"); - assert(info == NULL, "sanity"); + assert(info == nullptr, "sanity"); slow = new G1PreBarrierStub(pre_val); } @@ -123,9 +123,9 @@ void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_v return; } - // If the "new_val" is a constant NULL, no barrier is necessary. + // If the "new_val" is a constant null, no barrier is necessary. if (new_val->is_constant() && - new_val->as_constant_ptr()->as_jobject() == NULL) return; + new_val->as_constant_ptr()->as_jobject() == nullptr) return; if (!new_val->is_register()) { LIR_Opr new_val_reg = gen->new_register(T_OBJECT); @@ -204,7 +204,7 @@ class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { virtual OopMapSet* generate_code(StubAssembler* sasm) { G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); bs->generate_c1_pre_barrier_runtime_stub(sasm); - return NULL; + return nullptr; } }; @@ -212,7 +212,7 @@ class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { virtual OopMapSet* generate_code(StubAssembler* sasm) { G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); bs->generate_c1_post_barrier_runtime_stub(sasm); - return NULL; + return nullptr; } }; diff --git a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.hpp b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.hpp index b6620a22dc4..ce6f7277ed2 100644 --- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.hpp +++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,7 @@ class G1PreBarrierStub: public CodeStub { // previous value is assumed to have already been loaded into pre_val. G1PreBarrierStub(LIR_Opr pre_val) : _do_load(false), _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), - _patch_code(lir_patch_none), _info(NULL) + _patch_code(lir_patch_none), _info(nullptr) { assert(_pre_val->is_register(), "should be a register"); FrameMap* f = Compilation::current()->frame_map(); @@ -74,7 +74,7 @@ class G1PreBarrierStub: public CodeStub { if (_do_load) { // don't pass in the code emit info since it's processed in the fast // path - if (_info != NULL) + if (_info != nullptr) visitor->do_slow_case(_info); else visitor->do_slow_case(); @@ -134,8 +134,8 @@ class G1BarrierSetC1 : public ModRefBarrierSetC1 { public: G1BarrierSetC1() - : _pre_barrier_c1_runtime_code_blob(NULL), - _post_barrier_c1_runtime_code_blob(NULL) {} + : _pre_barrier_c1_runtime_code_blob(nullptr), + _post_barrier_c1_runtime_code_blob(nullptr) {} CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; } CodeBlob* post_barrier_c1_runtime_code_blob() { return _post_barrier_c1_runtime_code_blob; } diff --git a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp index d08acc137c9..a553494874f 100644 --- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp +++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,12 +73,12 @@ const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() { * marking are kept alive, all reference updates need to any previous * reference stored before writing. * - * If the previous value is NULL there is no need to save the old value. - * References that are NULL are filtered during runtime by the barrier + * If the previous value is null there is no need to save the old value. + * References that are null are filtered during runtime by the barrier * code to avoid unnecessary queuing. * * However in the case of newly allocated objects it might be possible to - * prove that the reference about to be overwritten is NULL during compile + * prove that the reference about to be overwritten is null during compile * time and avoid adding the barrier code completely. * * The compiler needs to determine that the object in which a field is about @@ -100,7 +100,7 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit, return false; // cannot unalias unless there are precise offsets } - if (alloc == NULL) { + if (alloc == nullptr) { return false; // No allocation found } @@ -116,7 +116,7 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit, intptr_t st_offset = 0; Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); - if (st_base == NULL) { + if (st_base == nullptr) { break; // inscrutable pointer } @@ -156,12 +156,12 @@ bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit, // Make sure that we are looking at the same allocation site. // The alloc variable is guaranteed to not be null here from earlier check. if (alloc == st_alloc) { - // Check that the initialization is storing NULL so that no previous store + // Check that the initialization is storing null so that no previous store // has been moved up and directly write a reference Node* captured_store = st_init->find_captured_store(offset, type2aelembytes(T_OBJECT), phase); - if (captured_store == NULL || captured_store == st_init->zero_memory()) { + if (captured_store == nullptr || captured_store == st_init->zero_memory()) { return true; } } @@ -191,10 +191,10 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit, if (do_load) { // We need to generate the load of the previous value - assert(obj != NULL, "must have a base"); - assert(adr != NULL, "where are loading from?"); - assert(pre_val == NULL, "loaded already?"); - assert(val_type != NULL, "need a type"); + assert(obj != nullptr, "must have a base"); + assert(adr != nullptr, "where are loading from?"); + assert(pre_val == nullptr, "loaded already?"); + assert(val_type != nullptr, "need a type"); if (use_ReduceInitialCardMarks() && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) { @@ -203,7 +203,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit, } else { // In this case both val_type and alias_idx are unused. - assert(pre_val != NULL, "must be loaded already"); + assert(pre_val != nullptr, "must be loaded already"); // Nothing to be done if pre_val is null. if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); @@ -248,7 +248,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit, pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx, false, MemNode::unordered, LoadNode::Pinned); } - // if (pre_val != NULL) + // if (pre_val != nullptr) __ if_then(pre_val, BoolTest::ne, kit->null()); { Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); @@ -270,7 +270,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit, const TypeFunc *tf = write_ref_field_pre_entry_Type(); __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls); } __ end_if(); // (!index) - } __ end_if(); // (pre_val != NULL) + } __ end_if(); // (pre_val != nullptr) } __ end_if(); // (!marking) // Final sync IdealKit and GraphKit. @@ -288,7 +288,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit, * * To reduce the number of updates to the remembered set the post-barrier * filters updates to fields in objects located in the Young Generation, - * the same region as the reference, when the NULL is being written or + * the same region as the reference, when the null is being written or * if the card is already marked as dirty by an earlier write. * * Under certain circumstances it is possible to avoid generating the @@ -313,7 +313,7 @@ bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit, return false; // cannot unalias unless there are precise offsets } - if (alloc == NULL) { + if (alloc == nullptr) { return false; // No allocation found } @@ -377,13 +377,13 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit, Node* val, BasicType bt, bool use_precise) const { - // If we are writing a NULL then we need no post barrier + // If we are writing a null then we need no post barrier - if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) { - // Must be NULL + if (val != nullptr && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) { + // Must be null const Type* t = val->bottom_type(); - assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL"); - // No post barrier if writing NULLx + assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be null"); + // No post barrier if writing null return; } @@ -406,7 +406,7 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit, adr = obj; } // (Else it's an array (or unknown), and we want more precise card marks.) - assert(adr != NULL, ""); + assert(adr != nullptr, ""); IdealKit ideal(kit, true); @@ -448,7 +448,7 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit, // If we know the value being stored does it cross regions? - if (val != NULL) { + if (val != nullptr) { // Does the store cause us to cross regions? // Should be able to do an unsigned compare of region_size instead of @@ -459,7 +459,7 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit, // if (xor_res == 0) same region so skip __ if_then(xor_res, BoolTest::ne, zeroX, likely); { - // No barrier if we are storing a NULL + // No barrier if we are storing a null. __ if_then(val, BoolTest::ne, kit->null(), likely); { // Ok must mark the card if not already dirty @@ -509,7 +509,7 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? const TypeX* otype = offset->find_intptr_t_type(); - if (otype != NULL && otype->is_con() && + if (otype != nullptr && otype->is_con() && otype->get_con() != java_lang_ref_Reference::referent_offset()) { // Constant offset but not the reference_offset so just return return; @@ -517,14 +517,14 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off // We only need to generate the runtime guards for instances. const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); - if (btype != NULL) { + if (btype != nullptr) { if (btype->isa_aryptr()) { // Array type so nothing to do return; } const TypeInstPtr* itype = btype->isa_instptr(); - if (itype != NULL) { + if (itype != nullptr) { // Can the klass of base_oop be statically determined to be // _not_ a sub-class of Reference and _not_ Object? ciKlass* klass = itype->instance_klass(); @@ -563,7 +563,7 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off __ sync_kit(kit); Node* one = __ ConI(1); - // is_instof == 0 if base_oop == NULL + // is_instof == 0 if base_oop == nullptr __ if_then(is_instof, BoolTest::eq, one, unlikely); { // Update graphKit from IdeakKit. @@ -572,7 +572,7 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off // Use the pre-barrier to record the value in the referent field pre_barrier(kit, false /* do_load */, __ ctrl(), - NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, + nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */, pre_val /* pre_val */, T_OBJECT); if (need_mem_bar) { @@ -647,7 +647,7 @@ Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) c // Use the pre-barrier to record the value in the referent field pre_barrier(kit, false /* do_load */, kit->control(), - NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, + nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */, load /* pre_val */, T_OBJECT); // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. @@ -669,7 +669,7 @@ bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const { return false; } CallLeafNode *call = node->as_CallLeaf(); - if (call->_name == NULL) { + if (call->_name == nullptr) { return false; } @@ -720,14 +720,14 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c // but the new allocation is passed to arraycopy stub and it could not // be scalar replaced. So we don't check the case. - // An other case of only one user (Xor) is when the value check for NULL + // An other case of only one user (Xor) is when the value check for null // in G1 post barrier is folded after CCP so the code which used URShift // is removed. // Take Region node before eliminating post barrier since it also // eliminates CastP2X node when it has only one user. Node* this_region = node->in(0); - assert(this_region != NULL, ""); + assert(this_region != nullptr, ""); // Remove G1 post barrier. @@ -735,7 +735,7 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c // checks if the store done to a different from the value's region. // And replace Cmp with #0 (false) to collapse G1 post barrier. Node* xorx = node->find_out_with(Op_XorX); - if (xorx != NULL) { + if (xorx != nullptr) { Node* shift = xorx->unique_out(); Node* cmpx = shift->unique_out(); assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && @@ -746,7 +746,7 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c // Remove G1 pre barrier. // Search "if (marking != 0)" check and set it to "false". - // There is no G1 pre barrier if previous stored value is NULL + // There is no G1 pre barrier if previous stored value is null // (for example, after initialization). if (this_region->is_Region() && this_region->req() == 3) { int ind = 1; @@ -777,10 +777,10 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier. Node* shift = node->find_out_with(Op_URShiftX); - assert(shift != NULL, "missing G1 post barrier"); + assert(shift != nullptr, "missing G1 post barrier"); Node* addp = shift->unique_out(); Node* load = addp->find_out_with(Op_LoadB); - assert(load != NULL, "missing G1 post barrier"); + assert(load != nullptr, "missing G1 post barrier"); Node* cmpx = load->unique_out(); assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne, @@ -797,27 +797,27 @@ void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) c Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const { if (!use_ReduceInitialCardMarks() && - c != NULL && c->is_Region() && c->req() == 3) { + c != nullptr && c->is_Region() && c->req() == 3) { for (uint i = 1; i < c->req(); i++) { - if (c->in(i) != NULL && c->in(i)->is_Region() && + if (c->in(i) != nullptr && c->in(i)->is_Region() && c->in(i)->req() == 3) { Node* r = c->in(i); for (uint j = 1; j < r->req(); j++) { - if (r->in(j) != NULL && r->in(j)->is_Proj() && - r->in(j)->in(0) != NULL && + if (r->in(j) != nullptr && r->in(j)->is_Proj() && + r->in(j)->in(0) != nullptr && r->in(j)->in(0)->Opcode() == Op_CallLeaf && r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) { Node* call = r->in(j)->in(0); c = c->in(i == 1 ? 2 : 1); - if (c != NULL && c->Opcode() != Op_Parm) { + if (c != nullptr && c->Opcode() != Op_Parm) { c = c->in(0); - if (c != NULL) { + if (c != nullptr) { c = c->in(0); - assert(call->in(0) == NULL || - call->in(0)->in(0) == NULL || - call->in(0)->in(0)->in(0) == NULL || - call->in(0)->in(0)->in(0)->in(0) == NULL || - call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL || + assert(call->in(0) == nullptr || + call->in(0)->in(0) == nullptr || + call->in(0)->in(0)->in(0) == nullptr || + call->in(0)->in(0)->in(0)->in(0) == nullptr || + call->in(0)->in(0)->in(0)->in(0)->in(0) == nullptr || c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape"); return c; } @@ -864,7 +864,7 @@ bool G1BarrierSetC2::has_cas_in_use_chain(Node *n) const { void G1BarrierSetC2::verify_pre_load(Node* marking_if, Unique_Node_List& loads /*output*/) const { assert(loads.size() == 0, "Loads list should be empty"); Node* pre_val_if = marking_if->find_out_with(Op_IfTrue)->find_out_with(Op_If); - if (pre_val_if != NULL) { + if (pre_val_if != nullptr) { Unique_Node_List visited; Node_List worklist; Node* pre_val = pre_val_if->in(1)->in(1)->in(1); @@ -891,7 +891,7 @@ void G1BarrierSetC2::verify_pre_load(Node* marking_if, Unique_Node_List& loads / continue; } if (x->is_Load() || x->is_LoadStore()) { - assert(x->in(0) != NULL, "Pre-val load has to have a control"); + assert(x->in(0) != nullptr, "Pre-val load has to have a control"); loads.push(x); continue; } @@ -935,7 +935,7 @@ void G1BarrierSetC2::verify_no_safepoints(Compile* compile, Node* marking_check_ worklist.push(marking_check_if); while (worklist.size() > 0 && found < controls.size()) { Node* x = worklist.pop(); - if (x == NULL || x == compile->top()) continue; + if (x == nullptr || x == compile->top()) continue; if (visited.member(x)) { continue; } else { @@ -973,7 +973,7 @@ void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) co worklist.push(compile->root()); while (worklist.size() > 0) { Node* x = worklist.pop(); - if (x == NULL || x == compile->top()) continue; + if (x == nullptr || x == compile->top()) continue; if (visited.member(x)) { continue; } else { @@ -1013,7 +1013,7 @@ void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) co if_ctrl = if_ctrl->in(0)->in(0); } } - assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); + assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match"); Unique_Node_List loads; verify_pre_load(iff, loads); diff --git a/src/hotspot/share/gc/g1/g1AllocRegion.cpp b/src/hotspot/share/gc/g1/g1AllocRegion.cpp index 3a7f41535a2..f20f24bcde4 100644 --- a/src/hotspot/share/gc/g1/g1AllocRegion.cpp +++ b/src/hotspot/share/gc/g1/g1AllocRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,25 +33,25 @@ #include "runtime/orderAccess.hpp" #include "utilities/align.hpp" -G1CollectedHeap* G1AllocRegion::_g1h = NULL; -HeapRegion* G1AllocRegion::_dummy_region = NULL; +G1CollectedHeap* G1AllocRegion::_g1h = nullptr; +HeapRegion* G1AllocRegion::_dummy_region = nullptr; void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) { - assert(_dummy_region == NULL, "should be set once"); - assert(dummy_region != NULL, "pre-condition"); + assert(_dummy_region == nullptr, "should be set once"); + assert(dummy_region != nullptr, "pre-condition"); assert(dummy_region->free() == 0, "pre-condition"); // Make sure that any allocation attempt on this region will fail // and will not trigger any asserts. DEBUG_ONLY(size_t assert_tmp); - assert(dummy_region->par_allocate(1, 1, &assert_tmp) == NULL, "should fail"); + assert(dummy_region->par_allocate(1, 1, &assert_tmp) == nullptr, "should fail"); _g1h = g1h; _dummy_region = dummy_region; } size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) { - assert(alloc_region != NULL && alloc_region != _dummy_region, + assert(alloc_region != nullptr && alloc_region != _dummy_region, "pre-condition"); size_t result = 0; @@ -73,7 +73,7 @@ size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) { while (free_word_size >= min_word_size_to_fill) { HeapWord* dummy = par_allocate(alloc_region, free_word_size); - if (dummy != NULL) { + if (dummy != nullptr) { // If the allocation was successful we should fill in the space. If the // allocation was in old any necessary BOT updates will be done. alloc_region->fill_with_dummy_object(dummy, free_word_size); @@ -115,7 +115,7 @@ size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) { } size_t G1AllocRegion::retire(bool fill_up) { - assert_alloc_region(_alloc_region != NULL, "not initialized properly"); + assert_alloc_region(_alloc_region != nullptr, "not initialized properly"); size_t waste = 0; @@ -137,12 +137,12 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size, trace("attempting region allocation"); HeapRegion* new_alloc_region = allocate_new_region(word_size, force); - if (new_alloc_region != NULL) { + if (new_alloc_region != nullptr) { new_alloc_region->reset_pre_dummy_top(); // Need to do this before the allocation _used_bytes_before = new_alloc_region->used(); HeapWord* result = allocate(new_alloc_region, word_size); - assert_alloc_region(result != NULL, "the allocation should succeeded"); + assert_alloc_region(result != nullptr, "the allocation should succeeded"); OrderAccess::storestore(); // Note that we first perform the allocation and then we store the @@ -153,15 +153,15 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size, return result; } else { trace("region allocation failed"); - return NULL; + return nullptr; } ShouldNotReachHere(); } void G1AllocRegion::init() { trace("initializing"); - assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition"); - assert_alloc_region(_dummy_region != NULL, "should have been set"); + assert_alloc_region(_alloc_region == nullptr && _used_bytes_before == 0, "pre-condition"); + assert_alloc_region(_dummy_region != nullptr, "should have been set"); _alloc_region = _dummy_region; _count = 0; trace("initialized"); @@ -171,7 +171,7 @@ void G1AllocRegion::set(HeapRegion* alloc_region) { trace("setting"); // We explicitly check that the region is not empty to make sure we // maintain the "the alloc region cannot be empty" invariant. - assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition"); + assert_alloc_region(alloc_region != nullptr && !alloc_region->is_empty(), "pre-condition"); assert_alloc_region(_alloc_region == _dummy_region && _used_bytes_before == 0 && _count == 0, "pre-condition"); @@ -186,7 +186,7 @@ void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) { trace("update"); // We explicitly check that the region is not empty to make sure we // maintain the "the alloc region cannot be empty" invariant. - assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition"); + assert_alloc_region(alloc_region != nullptr && !alloc_region->is_empty(), "pre-condition"); _alloc_region = alloc_region; _count += 1; @@ -198,9 +198,9 @@ HeapRegion* G1AllocRegion::release() { HeapRegion* alloc_region = _alloc_region; retire(false /* fill_up */); assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()"); - _alloc_region = NULL; + _alloc_region = nullptr; trace("released"); - return (alloc_region == _dummy_region) ? NULL : alloc_region; + return (alloc_region == _dummy_region) ? nullptr : alloc_region; } #ifndef PRODUCT @@ -217,7 +217,7 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_ bool detailed_info = log.is_trace(); - if ((actual_word_size == 0 && result == NULL) || detailed_info) { + if ((actual_word_size == 0 && result == nullptr) || detailed_info) { ResourceMark rm; LogStream ls_trace(log.trace()); LogStream ls_debug(log.debug()); @@ -225,8 +225,8 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_ out->print("%s: %u ", _name, _count); - if (_alloc_region == NULL) { - out->print("NULL"); + if (_alloc_region == nullptr) { + out->print("null"); } else if (_alloc_region == _dummy_region) { out->print("DUMMY"); } else { @@ -236,7 +236,7 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_ out->print(" : %s", str); if (detailed_info) { - if (result != NULL) { + if (result != nullptr) { out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT, min_word_size, desired_word_size, actual_word_size, p2i(result)); } else if (min_word_size != 0) { @@ -251,7 +251,7 @@ void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_ G1AllocRegion::G1AllocRegion(const char* name, bool bot_updates, uint node_index) - : _alloc_region(NULL), + : _alloc_region(nullptr), _count(0), _used_bytes_before(0), _name(name), @@ -269,7 +269,7 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, } void MutatorAllocRegion::init() { - assert(_retained_alloc_region == NULL, "Pre-condition"); + assert(_retained_alloc_region == nullptr, "Pre-condition"); G1AllocRegion::init(); _wasted_bytes = 0; } @@ -280,7 +280,7 @@ bool MutatorAllocRegion::should_retain(HeapRegion* region) { return false; } - if (_retained_alloc_region != NULL && + if (_retained_alloc_region != nullptr && free_bytes < _retained_alloc_region->free()) { return false; } @@ -292,12 +292,12 @@ size_t MutatorAllocRegion::retire(bool fill_up) { size_t waste = 0; trace("retiring"); HeapRegion* current_region = get(); - if (current_region != NULL) { + if (current_region != nullptr) { // Retain the current region if it fits a TLAB and has more // free than the currently retained region. if (should_retain(current_region)) { trace("mutator retained"); - if (_retained_alloc_region != NULL) { + if (_retained_alloc_region != nullptr) { waste = retire_internal(_retained_alloc_region, true); } _retained_alloc_region = current_region; @@ -315,12 +315,12 @@ size_t MutatorAllocRegion::retire(bool fill_up) { size_t MutatorAllocRegion::used_in_alloc_regions() { size_t used = 0; HeapRegion* hr = get(); - if (hr != NULL) { + if (hr != nullptr) { used += hr->used(); } hr = _retained_alloc_region; - if (hr != NULL) { + if (hr != nullptr) { used += hr->used(); } return used; @@ -332,9 +332,9 @@ HeapRegion* MutatorAllocRegion::release() { // The retained alloc region must be retired and this must be // done after the above call to release the mutator alloc region, // since it might update the _retained_alloc_region member. - if (_retained_alloc_region != NULL) { + if (_retained_alloc_region != nullptr) { _wasted_bytes += retire_internal(_retained_alloc_region, false); - _retained_alloc_region = NULL; + _retained_alloc_region = nullptr; } log_debug(gc, alloc, region)("Mutator Allocation stats, regions: %u, wasted size: " SIZE_FORMAT "%s (%4.1f%%)", count(), @@ -359,7 +359,7 @@ size_t G1GCAllocRegion::retire(bool fill_up) { HeapRegion* retired = get(); size_t end_waste = G1AllocRegion::retire(fill_up); // Do not count retirement of the dummy allocation region. - if (retired != NULL) { + if (retired != nullptr) { _stats->add_region_end_waste(end_waste / HeapWordSize); } return end_waste; diff --git a/src/hotspot/share/gc/g1/g1AllocRegion.hpp b/src/hotspot/share/gc/g1/g1AllocRegion.hpp index 5fdd37d3a01..83accb1ba38 100644 --- a/src/hotspot/share/gc/g1/g1AllocRegion.hpp +++ b/src/hotspot/share/gc/g1/g1AllocRegion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,10 +46,10 @@ private: // of. The invariant is that if this object is initialized (i.e., // init() has been called and release() has not) then _alloc_region // is either an active allocating region or the dummy region (i.e., - // it can never be NULL) and this object can be used to satisfy + // it can never be null) and this object can be used to satisfy // allocation requests. If this object is not initialized // (i.e. init() has not been called or release() has been called) - // then _alloc_region is NULL and this object should not be used to + // then _alloc_region is null and this object should not be used to // satisfy allocation requests (it was done this way to force the // correct use of init() and release()). HeapRegion* volatile _alloc_region; @@ -75,7 +75,7 @@ private: // purpose and it is not part of the heap) that is full (i.e., top() // == end()). When we don't have a valid active region we make // _alloc_region point to this. This allows us to skip checking - // whether the _alloc_region is NULL or not. + // whether the _alloc_region is null or not. static HeapRegion* _dummy_region; // After a region is allocated by alloc_new_region, this @@ -144,7 +144,7 @@ public: HeapRegion* get() const { HeapRegion * hr = _alloc_region; // Make sure that the dummy region does not escape this class. - return (hr == _dummy_region) ? NULL : hr; + return (hr == _dummy_region) ? nullptr : hr; } uint count() { return _count; } @@ -153,14 +153,14 @@ public: // First-level allocation: Should be called without holding a // lock. It will try to allocate lock-free out of the active region, - // or return NULL if it was unable to. + // or return null if it was unable to. inline HeapWord* attempt_allocation(size_t word_size); // Perform an allocation out of the current allocation region, with the given // minimum and desired size. Returns the actual size allocated (between // minimum and desired size) in actual_word_size if the allocation has been // successful. // Should be called without holding a lock. It will try to allocate lock-free - // out of the active region, or return NULL if it was unable to. + // out of the active region, or return null if it was unable to. inline HeapWord* attempt_allocation(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); @@ -199,7 +199,7 @@ public: size_t min_word_size = 0, size_t desired_word_size = 0, size_t actual_word_size = 0, - HeapWord* result = NULL) PRODUCT_RETURN; + HeapWord* result = nullptr) PRODUCT_RETURN; }; class MutatorAllocRegion : public G1AllocRegion { @@ -224,7 +224,7 @@ public: MutatorAllocRegion(uint node_index) : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */, node_index), _wasted_bytes(0), - _retained_alloc_region(NULL) { } + _retained_alloc_region(nullptr) { } // Returns the combined used memory in the current alloc region and // the retained alloc region. @@ -235,13 +235,13 @@ public: // minimum and desired size) in actual_word_size if the allocation has been // successful. // Should be called without holding a lock. It will try to allocate lock-free - // out of the retained region, or return NULL if it was unable to. + // out of the retained region, or return null if it was unable to. inline HeapWord* attempt_retained_allocation(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); // This specialization of release() makes sure that the retained alloc - // region is retired and set to NULL. + // region is retired and set to null. virtual HeapRegion* release(); virtual void init(); @@ -261,7 +261,7 @@ protected: G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, G1HeapRegionAttr::region_type_t purpose, uint node_index = G1NUMA::AnyNodeIndex) : G1AllocRegion(name, bot_updates, node_index), _stats(stats), _purpose(purpose) { - assert(stats != NULL, "Must pass non-NULL PLAB statistics"); + assert(stats != nullptr, "Must pass non-null PLAB statistics"); } }; diff --git a/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp b/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp index b1ccc4cefb0..4ab202bf285 100644 --- a/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ inline void G1AllocRegion::reset_alloc_region() { inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region, size_t word_size) { - assert(alloc_region != NULL, "pre-condition"); + assert(alloc_region != nullptr, "pre-condition"); return alloc_region->allocate(word_size); } @@ -57,7 +57,7 @@ inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region, size_t min_word_size, size_t desired_word_size, size_t* actual_word_size) { - assert(alloc_region != NULL, "pre-condition"); + assert(alloc_region != nullptr, "pre-condition"); assert(!alloc_region->is_empty(), "pre-condition"); return alloc_region->par_allocate(min_word_size, desired_word_size, actual_word_size); @@ -72,15 +72,15 @@ inline HeapWord* G1AllocRegion::attempt_allocation(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size) { HeapRegion* alloc_region = _alloc_region; - assert_alloc_region(alloc_region != NULL, "not initialized properly"); + assert_alloc_region(alloc_region != nullptr, "not initialized properly"); HeapWord* result = par_allocate(alloc_region, min_word_size, desired_word_size, actual_word_size); - if (result != NULL) { + if (result != nullptr) { trace("alloc", min_word_size, desired_word_size, *actual_word_size, result); return result; } trace("alloc failed", min_word_size, desired_word_size); - return NULL; + return nullptr; } inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size) { @@ -92,7 +92,7 @@ inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size) { HeapWord* result = attempt_allocation(min_word_size, desired_word_size, actual_word_size); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -104,39 +104,39 @@ inline HeapWord* G1AllocRegion::attempt_allocation_using_new_region(size_t min_w size_t* actual_word_size) { retire(true /* fill_up */); HeapWord* result = new_alloc_region_and_allocate(desired_word_size, false /* force */); - if (result != NULL) { + if (result != nullptr) { *actual_word_size = desired_word_size; trace("alloc locked (second attempt)", min_word_size, desired_word_size, *actual_word_size, result); return result; } trace("alloc locked failed", min_word_size, desired_word_size); - return NULL; + return nullptr; } inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size) { - assert_alloc_region(_alloc_region != NULL, "not initialized properly"); + assert_alloc_region(_alloc_region != nullptr, "not initialized properly"); trace("forcing alloc", word_size, word_size); HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */); - if (result != NULL) { + if (result != nullptr) { trace("alloc forced", word_size, word_size, word_size, result); return result; } trace("alloc forced failed", word_size, word_size); - return NULL; + return nullptr; } inline HeapWord* MutatorAllocRegion::attempt_retained_allocation(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size) { - if (_retained_alloc_region != NULL) { + if (_retained_alloc_region != nullptr) { HeapWord* result = par_allocate(_retained_alloc_region, min_word_size, desired_word_size, actual_word_size); - if (result != NULL) { + if (result != nullptr) { trace("alloc retained", min_word_size, desired_word_size, *actual_word_size, result); return result; } } - return NULL; + return nullptr; } #endif // SHARE_GC_G1_G1ALLOCREGION_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1Allocator.cpp b/src/hotspot/share/gc/g1/g1Allocator.cpp index 41859e910e7..a8520d78a64 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.cpp +++ b/src/hotspot/share/gc/g1/g1Allocator.cpp @@ -43,10 +43,10 @@ G1Allocator::G1Allocator(G1CollectedHeap* heap) : _survivor_is_full(false), _old_is_full(false), _num_alloc_regions(_numa->num_active_nodes()), - _mutator_alloc_regions(NULL), - _survivor_gc_alloc_regions(NULL), + _mutator_alloc_regions(nullptr), + _survivor_gc_alloc_regions(nullptr), _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)), - _retained_old_gc_alloc_region(NULL) { + _retained_old_gc_alloc_region(nullptr) { _mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC); _survivor_gc_alloc_regions = NEW_C_HEAP_ARRAY(SurvivorGCAllocRegion, _num_alloc_regions, mtGC); @@ -70,13 +70,13 @@ G1Allocator::~G1Allocator() { #ifdef ASSERT bool G1Allocator::has_mutator_alloc_region() { uint node_index = current_node_index(); - return mutator_alloc_region(node_index)->get() != NULL; + return mutator_alloc_region(node_index)->get() != nullptr; } #endif void G1Allocator::init_mutator_alloc_regions() { for (uint i = 0; i < _num_alloc_regions; i++) { - assert(mutator_alloc_region(i)->get() == NULL, "pre-condition"); + assert(mutator_alloc_region(i)->get() == nullptr, "pre-condition"); mutator_alloc_region(i)->init(); } } @@ -84,7 +84,7 @@ void G1Allocator::init_mutator_alloc_regions() { void G1Allocator::release_mutator_alloc_regions() { for (uint i = 0; i < _num_alloc_regions; i++) { mutator_alloc_region(i)->release(); - assert(mutator_alloc_region(i)->get() == NULL, "post-condition"); + assert(mutator_alloc_region(i)->get() == nullptr, "post-condition"); } } @@ -96,9 +96,7 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info, OldGCAllocRegion* old, HeapRegion** retained_old) { HeapRegion* retained_region = *retained_old; - *retained_old = NULL; - assert(retained_region == NULL || !retained_region->is_archive(), - "Archive region should not be alloc region (index %u)", retained_region->hrm_index()); + *retained_old = nullptr; // We will discard the current GC alloc region if: // a) it's in the collection set (it can happen!), @@ -109,7 +107,7 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info, // during a cleanup and was added to the free list, but // has been subsequently used to allocate a humongous // object that may be less than the region size). - if (retained_region != NULL && + if (retained_region != nullptr && !retained_region->in_collection_set() && !(retained_region->top() == retained_region->end()) && !retained_region->is_empty() && @@ -152,7 +150,7 @@ void G1Allocator::release_gc_alloc_regions(G1EvacInfo* evacuation_info) { // If we have an old GC alloc region to release, we'll save it in // _retained_old_gc_alloc_region. If we don't - // _retained_old_gc_alloc_region will become NULL. This is what we + // _retained_old_gc_alloc_region will become null. This is what we // want either way so no reason to check explicitly for either // condition. _retained_old_gc_alloc_region = old_gc_alloc_region()->release(); @@ -160,10 +158,10 @@ void G1Allocator::release_gc_alloc_regions(G1EvacInfo* evacuation_info) { void G1Allocator::abandon_gc_alloc_regions() { for (uint i = 0; i < _num_alloc_regions; i++) { - assert(survivor_gc_alloc_region(i)->get() == NULL, "pre-condition"); + assert(survivor_gc_alloc_region(i)->get() == nullptr, "pre-condition"); } - assert(old_gc_alloc_region()->get() == NULL, "pre-condition"); - _retained_old_gc_alloc_region = NULL; + assert(old_gc_alloc_region()->get() == nullptr, "pre-condition"); + _retained_old_gc_alloc_region = nullptr; } bool G1Allocator::survivor_is_full() const { @@ -193,7 +191,7 @@ size_t G1Allocator::unsafe_max_tlab_alloc() { uint node_index = current_node_index(); HeapRegion* hr = mutator_alloc_region(node_index)->get(); size_t max_tlab = _g1h->max_tlab_size() * wordSize; - if (hr == NULL) { + if (hr == nullptr) { return max_tlab; } else { return clamp(hr->free(), MinTLABSize, max_tlab); @@ -201,7 +199,7 @@ size_t G1Allocator::unsafe_max_tlab_alloc() { } size_t G1Allocator::used_in_alloc_regions() { - assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf."); + assert(Heap_lock->owner() != nullptr, "Should be owned on this thread's behalf."); size_t used = 0; for (uint i = 0; i < _num_alloc_regions; i++) { used += mutator_alloc_region(i)->used_in_alloc_regions(); @@ -215,7 +213,7 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, uint node_index) { size_t temp = 0; HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, node_index); - assert(result == NULL || temp == word_size, + assert(result == nullptr || temp == word_size, "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, word_size, temp, p2i(result)); return result; @@ -233,7 +231,7 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size); default: ShouldNotReachHere(); - return NULL; // Keep some compilers happy + return nullptr; // Keep some compilers happy } } @@ -247,7 +245,7 @@ HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, HeapWord* result = survivor_gc_alloc_region(node_index)->attempt_allocation(min_word_size, desired_word_size, actual_word_size); - if (result == NULL && !survivor_is_full()) { + if (result == nullptr && !survivor_is_full()) { MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); // Multiple threads may have queued at the FreeList_lock above after checking whether there // actually is still memory available. Redo the check under the lock to avoid unnecessary work; @@ -256,12 +254,12 @@ HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size, desired_word_size, actual_word_size); - if (result == NULL) { + if (result == nullptr) { set_survivor_full(); } } } - if (result != NULL) { + if (result != nullptr) { _g1h->dirty_young_block(result, *actual_word_size); } return result; @@ -276,7 +274,7 @@ HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size, desired_word_size, actual_word_size); - if (result == NULL && !old_is_full()) { + if (result == nullptr && !old_is_full()) { MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); // Multiple threads may have queued at the FreeList_lock above after checking whether there // actually is still memory available. Redo the check under the lock to avoid unnecessary work; @@ -285,7 +283,7 @@ HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size, desired_word_size, actual_word_size); - if (result == NULL) { + if (result == nullptr) { set_old_full(); } } @@ -400,15 +398,15 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest, &actual_plab_size, node_index); - assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), + assert(buf == nullptr || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), "Requested at minimum %zu, desired %zu words, but got %zu at " PTR_FORMAT, required_in_plab, plab_word_size, actual_plab_size, p2i(buf)); - if (buf != NULL) { + if (buf != nullptr) { alloc_buf->set_buf(buf, actual_plab_size); HeapWord* const obj = alloc_buf->allocate(word_sz); - assert(obj != NULL, "PLAB should have been big enough, tried to allocate " + assert(obj != nullptr, "PLAB should have been big enough, tried to allocate " "%zu requiring %zu PLAB size %zu", word_sz, required_in_plab, plab_word_size); return obj; @@ -418,7 +416,7 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest, } // Try direct allocation. HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, node_index); - if (result != NULL) { + if (result != nullptr) { plab_data->_direct_allocated += word_sz; plab_data->_num_direct_allocations++; } @@ -434,7 +432,7 @@ void G1PLABAllocator::flush_and_retire_stats(uint num_workers) { G1EvacStats* stats = _g1h->alloc_buffer_stats(state); for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) { PLAB* const buf = alloc_buffer(state, node_index); - if (buf != NULL) { + if (buf != nullptr) { buf->flush_and_retire_stats(stats); } } @@ -460,7 +458,7 @@ size_t G1PLABAllocator::waste() const { for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) { for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) { PLAB* const buf = alloc_buffer(state, node_index); - if (buf != NULL) { + if (buf != nullptr) { result += buf->waste(); } } @@ -477,7 +475,7 @@ size_t G1PLABAllocator::undo_waste() const { for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) { for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) { PLAB* const buf = alloc_buffer(state, node_index); - if (buf != NULL) { + if (buf != nullptr) { result += buf->undo_waste(); } } diff --git a/src/hotspot/share/gc/g1/g1Allocator.hpp b/src/hotspot/share/gc/g1/g1Allocator.hpp index 99eaab310c4..32f37778a18 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.hpp @@ -203,7 +203,7 @@ public: size_t plab_size(G1HeapRegionAttr which) const; // Allocate word_sz words in dest, either directly into the regions or by - // allocating a new PLAB. Returns the address of the allocated memory, NULL if + // allocating a new PLAB. Returns the address of the allocated memory, null if // not successful. Plab_refill_failed indicates whether an attempt to refill the // PLAB failed or not. HeapWord* allocate_direct_or_new_plab(G1HeapRegionAttr dest, @@ -212,7 +212,7 @@ public: uint node_index); // Allocate word_sz words in the PLAB of dest. Returns the address of the - // allocated memory, NULL if not successful. + // allocated memory, null if not successful. inline HeapWord* plab_allocate(G1HeapRegionAttr dest, size_t word_sz, uint node_index); diff --git a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp index aa10347ad4e..13ae9b9bbbd 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size, uint node_index = current_node_index(); HeapWord* result = mutator_alloc_region(node_index)->attempt_retained_allocation(min_word_size, desired_word_size, actual_word_size); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -66,7 +66,7 @@ inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) { uint node_index = current_node_index(); HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_locked(word_size); - assert(result != NULL || mutator_alloc_region(node_index)->get() == NULL, + assert(result != nullptr || mutator_alloc_region(node_index)->get() == nullptr, "Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(node_index)->get())); return result; } @@ -80,7 +80,7 @@ inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest, uint node_inde assert(dest.is_valid(), "Allocation buffer index out of bounds: %s", dest.get_type_str()); assert(_dest_data[dest.type()]._alloc_buffer != nullptr, - "Allocation buffer is NULL: %s", dest.get_type_str()); + "Allocation buffer is null: %s", dest.get_type_str()); return alloc_buffer(dest.type(), node_index); } @@ -117,7 +117,7 @@ inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest, bool* refill_failed, uint node_index) { HeapWord* const obj = plab_allocate(dest, word_sz, node_index); - if (obj != NULL) { + if (obj != nullptr) { return obj; } return allocate_direct_or_new_plab(dest, word_sz, refill_failed, node_index); diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp index b85d361fbb9..28f850938c4 100644 --- a/src/hotspot/share/gc/g1/g1Arguments.cpp +++ b/src/hotspot/share/gc/g1/g1Arguments.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -81,9 +81,9 @@ void G1Arguments::initialize_verification_types() { char* save_ptr; char* token = strtok_r(type_list, delimiter, &save_ptr); - while (token != NULL) { + while (token != nullptr) { parse_verification_type(token); - token = strtok_r(NULL, delimiter, &save_ptr); + token = strtok_r(nullptr, delimiter, &save_ptr); } FREE_C_HEAP_ARRAY(char, type_list); } @@ -167,7 +167,7 @@ void G1Arguments::initialize() { FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads()); if (ParallelGCThreads == 0) { assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0."); - vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL); + vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", nullptr); } // When dumping the CDS archive we want to reduce fragmentation by diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp index e5d871c57d1..e5b477ad156 100644 --- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp +++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,7 +98,7 @@ inline void G1BarrierSet::enqueue_preloaded_if_weak(DecoratorSet decorators, oop const bool peek = (decorators & AS_NO_KEEPALIVE) != 0; const bool needs_enqueue = (!peek && !on_strong_oop_ref); - if (needs_enqueue && value != NULL) { + if (needs_enqueue && value != nullptr) { enqueue_preloaded(value); } } diff --git a/src/hotspot/share/gc/g1/g1BiasedArray.cpp b/src/hotspot/share/gc/g1/g1BiasedArray.cpp index ce0196ee5bb..22b07db3d85 100644 --- a/src/hotspot/share/gc/g1/g1BiasedArray.cpp +++ b/src/hotspot/share/gc/g1/g1BiasedArray.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,10 +27,10 @@ #include "memory/padded.inline.hpp" G1BiasedMappedArrayBase::G1BiasedMappedArrayBase() : - _alloc_base(NULL), - _base(NULL), + _alloc_base(nullptr), + _base(nullptr), _length(0), - _biased_base(NULL), + _biased_base(nullptr), _bias(0), _shift_by(0) { } @@ -47,19 +47,19 @@ address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t ele #ifndef PRODUCT void G1BiasedMappedArrayBase::verify_index(idx_t index) const { - guarantee(_base != NULL, "Array not initialized"); + guarantee(_base != nullptr, "Array not initialized"); guarantee(index < length(), "Index out of bounds index: " SIZE_FORMAT " length: " SIZE_FORMAT, index, length()); } void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const { - guarantee(_biased_base != NULL, "Array not initialized"); + guarantee(_biased_base != nullptr, "Array not initialized"); guarantee(biased_index >= bias() && biased_index < (bias() + length()), "Biased index out of bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT, biased_index, bias(), length()); } void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const { - guarantee(_biased_base != NULL, "Array not initialized"); + guarantee(_biased_base != nullptr, "Array not initialized"); guarantee(biased_index >= bias() && biased_index <= (bias() + length()), "Biased index out of inclusive bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT, biased_index, bias(), length()); diff --git a/src/hotspot/share/gc/g1/g1BiasedArray.hpp b/src/hotspot/share/gc/g1/g1BiasedArray.hpp index 8080b76fe57..67b4c6bbe30 100644 --- a/src/hotspot/share/gc/g1/g1BiasedArray.hpp +++ b/src/hotspot/share/gc/g1/g1BiasedArray.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ protected: // Initialize the members of this class. The biased start address of this array // is the bias (in elements) multiplied by the element size. void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) { - assert(base != NULL, "just checking"); + assert(base != nullptr, "just checking"); assert(length > 0, "just checking"); assert(shift_by < sizeof(uintptr_t) * 8, "Shifting by %u, larger than word size?", shift_by); _base = base; diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp index 32979545493..c8b60b4c982 100644 --- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp +++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ ////////////////////////////////////////////////////////////////////// G1BlockOffsetTable::G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage) : - _reserved(heap), _offset_array(NULL) { + _reserved(heap), _offset_array(nullptr) { MemRegion bot_reserved = storage->reserved(); @@ -186,7 +186,7 @@ void G1BlockOffsetTablePart::update_for_block_work(HeapWord* blk_start, HeapWord* const cur_card_boundary = align_up_by_card_size(blk_start); size_t const index = _bot->index_for_raw(cur_card_boundary); - assert(blk_start != NULL && blk_end > blk_start, + assert(blk_start != nullptr && blk_end > blk_start, "phantom block"); assert(blk_end > cur_card_boundary, "should be past cur_card_boundary"); assert(blk_start <= cur_card_boundary, "blk_start should be at or before cur_card_boundary"); diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp index a2eb4bbe442..4e3f08ddc9d 100644 --- a/src/hotspot/share/gc/g1/g1CardSet.cpp +++ b/src/hotspot/share/gc/g1/g1CardSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -276,18 +276,6 @@ class G1CardSetHashTable : public CHeapObj { G1CardSetHashTableValue* value() const { return _value; } }; - class G1CardSetHashTableScan : public StackObj { - G1CardSet::ContainerPtrClosure* _scan_f; - public: - explicit G1CardSetHashTableScan(G1CardSet::ContainerPtrClosure* f) : _scan_f(f) { } - - bool operator()(G1CardSetHashTableValue* value) { - _scan_f->do_containerptr(value->_region_idx, value->_num_occupied, value->_container); - return true; - } - }; - - public: static const size_t InitialLogTableSize = 2; @@ -335,14 +323,14 @@ public: return found.value(); } - void iterate_safepoint(G1CardSet::ContainerPtrClosure* cl2) { - G1CardSetHashTableScan cl(cl2); - _table_scanner.do_safepoint_scan(cl); + template + void iterate_safepoint(SCAN_FUNC& scan_f) { + _table_scanner.do_safepoint_scan(scan_f); } - void iterate(G1CardSet::ContainerPtrClosure* cl2) { - G1CardSetHashTableScan cl(cl2); - _table.do_scan(Thread::current(), cl); + template + void iterate(SCAN_FUNC& scan_f) { + _table.do_scan(Thread::current(), scan_f); } void reset() { @@ -871,7 +859,7 @@ void G1CardSet::print_info(outputStream* st, uintptr_t card) { G1CardSetHashTableValue* table_entry = get_container(card_region); if (table_entry == nullptr) { - st->print("NULL card set"); + st->print("null card set"); return; } @@ -924,10 +912,16 @@ void G1CardSet::iterate_cards_during_transfer(ContainerPtr const container, Card } void G1CardSet::iterate_containers(ContainerPtrClosure* cl, bool at_safepoint) { + auto do_value = + [&] (G1CardSetHashTableValue* value) { + cl->do_containerptr(value->_region_idx, value->_num_occupied, value->_container); + return true; + }; + if (at_safepoint) { - _table->iterate_safepoint(cl); + _table->iterate_safepoint(do_value); } else { - _table->iterate(cl); + _table->iterate(do_value); } } diff --git a/src/hotspot/share/gc/g1/g1CardTable.cpp b/src/hotspot/share/gc/g1/g1CardTable.cpp index 417453be967..0dc845825d6 100644 --- a/src/hotspot/share/gc/g1/g1CardTable.cpp +++ b/src/hotspot/share/gc/g1/g1CardTable.cpp @@ -55,7 +55,6 @@ void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) { HeapWord* low_bound = _whole_heap.start(); HeapWord* high_bound = _whole_heap.end(); - _cur_covered_regions = 1; _covered[0] = _whole_heap; _byte_map = (CardValue*) mapper->reserved().start(); diff --git a/src/hotspot/share/gc/g1/g1CardTable.hpp b/src/hotspot/share/gc/g1/g1CardTable.hpp index 3bf1bd07d04..9d4370d27ff 100644 --- a/src/hotspot/share/gc/g1/g1CardTable.hpp +++ b/src/hotspot/share/gc/g1/g1CardTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ class G1CardTableChangedListener : public G1MappingChangedListener { private: G1CardTable* _card_table; public: - G1CardTableChangedListener() : _card_table(NULL) { } + G1CardTableChangedListener() : _card_table(nullptr) { } void set_card_table(G1CardTable* card_table) { _card_table = card_table; } @@ -103,7 +103,7 @@ public: inline void mark_range_dirty(size_t start_card_index, size_t num_cards); // Change the given range of dirty cards to "which". All of these cards must be Dirty. - inline void change_dirty_cards_to(size_t start_card_index, size_t num_cards, CardValue which); + inline void change_dirty_cards_to(CardValue* start_card, CardValue* end_card, CardValue which); inline uint region_idx_for(CardValue* p); @@ -115,11 +115,8 @@ public: // Returns how many bytes of the heap a single byte of the Card Table corresponds to. static size_t heap_map_factor() { return _card_size; } - void initialize() override {} void initialize(G1RegionToSpaceMapper* mapper); - void resize_covered_region(MemRegion new_region) override { ShouldNotReachHere(); } - bool is_in_young(const void* p) const override; }; diff --git a/src/hotspot/share/gc/g1/g1CardTable.inline.hpp b/src/hotspot/share/gc/g1/g1CardTable.inline.hpp index b8a0e9b5408..5d9742c629a 100644 --- a/src/hotspot/share/gc/g1/g1CardTable.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CardTable.inline.hpp @@ -72,14 +72,12 @@ inline void G1CardTable::mark_range_dirty(size_t start_card_index, size_t num_ca } } -inline void G1CardTable::change_dirty_cards_to(size_t start_card_index, size_t num_cards, CardValue which) { - CardValue* start = &_byte_map[start_card_index]; - CardValue* const end = start + num_cards; - while (start < end) { - CardValue value = *start; +inline void G1CardTable::change_dirty_cards_to(CardValue* start_card, CardValue* end_card, CardValue which) { + for (CardValue* i_card = start_card; i_card < end_card; ++i_card) { + CardValue value = *i_card; assert(value == dirty_card_val(), - "Must have been dirty %d start " PTR_FORMAT " " PTR_FORMAT, value, p2i(start), p2i(end)); - *start++ = which; + "Must have been dirty %d start " PTR_FORMAT " " PTR_FORMAT, value, p2i(start_card), p2i(end_card)); + *i_card = which; } } diff --git a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp index cb0f4e81729..77211af4364 100644 --- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp +++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,7 +85,7 @@ void G1CodeBlobClosure::do_evacuation_and_fixup(nmethod* nm) { nm->mark_as_maybe_on_stack(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm != NULL) { + if (bs_nm != nullptr) { bs_nm->disarm(nm); } } @@ -101,7 +101,7 @@ void G1CodeBlobClosure::do_marking(nmethod* nm) { nm->mark_as_maybe_on_stack(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs_nm != NULL) { + if (bs_nm != nullptr) { bs_nm->disarm(nm); } @@ -125,7 +125,7 @@ public: void G1CodeBlobClosure::do_code_blob(CodeBlob* cb) { nmethod* nm = cb->as_nmethod_or_null(); - if (nm == NULL) { + if (nm == nullptr) { return; } diff --git a/src/hotspot/share/gc/g1/g1CodeBlobClosure.hpp b/src/hotspot/share/gc/g1/g1CodeBlobClosure.hpp index 962bab81c12..e20073bc287 100644 --- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.hpp +++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ class G1CodeBlobClosure : public CodeBlobClosure { void do_oop_work(T* p); public: - HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {} + HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(nullptr) {} void do_oop(oop* o); void do_oop(narrowOop* o); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 4e249e3e2de..4be9d170d9e 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -170,7 +170,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegion* res = _hrm.allocate_free_region(type, node_index); - if (res == NULL && do_expand) { + if (res == nullptr && do_expand) { // Currently, only attempts to allocate GC alloc regions set // do_expand to true. So, we should only reach here during a // safepoint. @@ -186,7 +186,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, // Given that expand_single_region() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. - // In either case allocate_free_region() will check for NULL. + // In either case allocate_free_region() will check for null. res = _hrm.allocate_free_region(type, node_index); } } @@ -279,7 +279,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr, uint num_regions, size_t word_size) { - assert(first_hr != NULL, "pre-condition"); + assert(first_hr != nullptr, "pre-condition"); assert(is_humongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); @@ -348,12 +348,12 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { // Policy: First try to allocate a humongous object in the free list. HeapRegion* humongous_start = _hrm.allocate_humongous(obj_regions); - if (humongous_start == NULL) { + if (humongous_start == nullptr) { // Policy: We could not find enough regions for the humongous object in the // free list. Look through the heap to find a mix of free and uncommitted regions. // If so, expand the heap and allocate the humongous object. humongous_start = _hrm.expand_and_allocate_humongous(obj_regions); - if (humongous_start != NULL) { + if (humongous_start != nullptr) { // We managed to find a region by expanding the heap. log_debug(gc, ergo, heap)("Heap expansion (humongous allocation request). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); @@ -363,10 +363,10 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { } } - HeapWord* result = NULL; - if (humongous_start != NULL) { + HeapWord* result = nullptr; + if (humongous_start != nullptr) { result = humongous_obj_allocate_initialize_regions(humongous_start, obj_regions, word_size); - assert(result != NULL, "it should always return a valid result"); + assert(result != nullptr, "it should always return a valid result"); // A successful humongous object allocation changes the used space // information of the old generation so we need to recalculate the @@ -415,8 +415,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { // We will loop until a) we manage to successfully perform the // allocation or b) we successfully schedule a collection which // fails to perform the allocation. b) is the only case when we'll - // return NULL. - HeapWord* result = NULL; + // return null. + HeapWord* result = nullptr; for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { bool should_try_gc; uint gc_count_before; @@ -427,7 +427,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { // Now that we have the lock, we first retry the allocation in case another // thread changed the region while we were waiting to acquire the lock. result = _allocator->attempt_allocation_locked(word_size); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -438,7 +438,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { // No need for an ergo message here, can_expand_young_list() does this when // it returns true. result = _allocator->attempt_allocation_force(word_size); - if (result != NULL) { + if (result != nullptr) { return result; } } @@ -454,8 +454,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { if (should_try_gc) { bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause); - if (result != NULL) { - assert(succeeded, "only way to get back a non-NULL result"); + if (result != nullptr) { + assert(succeeded, "only way to get back a non-null result"); log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, Thread::current()->name(), p2i(result)); return result; @@ -463,10 +463,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { if (succeeded) { // We successfully scheduled a collection which failed to allocate. No - // point in trying to allocate further. We'll just return NULL. + // point in trying to allocate further. We'll just return null. log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate " SIZE_FORMAT " words", Thread::current()->name(), word_size); - return NULL; + return nullptr; } log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words", Thread::current()->name(), word_size); @@ -475,7 +475,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { if (gclocker_retry_count > GCLockerRetryAllocationCount) { log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating " SIZE_FORMAT " words", Thread::current()->name(), word_size); - return NULL; + return nullptr; } log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name()); // The GCLocker is either active or the GCLocker initiated @@ -495,7 +495,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { // iteration (after taking the Heap_lock). size_t dummy = 0; result = _allocator->attempt_allocation(word_size, word_size, &dummy); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -508,176 +508,124 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { } ShouldNotReachHere(); - return NULL; + return nullptr; } -bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) { - assert(ranges != NULL, "MemRegion array NULL"); - assert(count != 0, "No MemRegions provided"); - MemRegion reserved = _hrm.reserved(); - for (size_t i = 0; i < count; i++) { - if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) { - return false; - } +bool G1CollectedHeap::check_archive_addresses(MemRegion range) { + return _hrm.reserved().contains(range); +} + +template +void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func) { + // Mark each G1 region touched by the range as old, add it to + // the old set, and set top. + HeapRegion* curr_region = _hrm.addr_to_region(range.start()); + HeapRegion* end_region = _hrm.addr_to_region(range.last()); + + while (curr_region != nullptr) { + bool is_last = curr_region == end_region; + HeapRegion* next_region = is_last ? nullptr : _hrm.next_region_in_heap(curr_region); + + func(curr_region, is_last); + + curr_region = next_region; } - return true; } -bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, - size_t count, - bool open) { +bool G1CollectedHeap::alloc_archive_regions(MemRegion range) { assert(!is_init_completed(), "Expect to be called at JVM init time"); - assert(ranges != NULL, "MemRegion array NULL"); - assert(count != 0, "No MemRegions provided"); MutexLocker x(Heap_lock); MemRegion reserved = _hrm.reserved(); - HeapWord* prev_last_addr = NULL; - HeapRegion* prev_last_region = NULL; // Temporarily disable pretouching of heap pages. This interface is used // when mmap'ing archived heap data in, so pre-touching is wasted. FlagSetting fs(AlwaysPreTouch, false); - // For each specified MemRegion range, allocate the corresponding G1 - // regions and mark them as archive regions. We expect the ranges - // in ascending starting address order, without overlap. - for (size_t i = 0; i < count; i++) { - MemRegion curr_range = ranges[i]; - HeapWord* start_address = curr_range.start(); - size_t word_size = curr_range.word_size(); - HeapWord* last_address = curr_range.last(); - size_t commits = 0; + // For the specified MemRegion range, allocate the corresponding G1 + // region(s) and mark them as old region(s). + HeapWord* start_address = range.start(); + size_t word_size = range.word_size(); + HeapWord* last_address = range.last(); + size_t commits = 0; - guarantee(reserved.contains(start_address) && reserved.contains(last_address), - "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", - p2i(start_address), p2i(last_address)); - guarantee(start_address > prev_last_addr, - "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , - p2i(start_address), p2i(prev_last_addr)); - prev_last_addr = last_address; + guarantee(reserved.contains(start_address) && reserved.contains(last_address), + "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", + p2i(start_address), p2i(last_address)); - // Check for ranges that start in the same G1 region in which the previous - // range ended, and adjust the start address so we don't try to allocate - // the same region again. If the current range is entirely within that - // region, skip it, just adjusting the recorded top. - HeapRegion* start_region = _hrm.addr_to_region(start_address); - if ((prev_last_region != NULL) && (start_region == prev_last_region)) { - start_address = start_region->end(); - if (start_address > last_address) { - increase_used(word_size * HeapWordSize); - start_region->set_top(last_address + 1); - continue; - } - start_region->set_top(start_address); - curr_range = MemRegion(start_address, last_address + 1); - start_region = _hrm.addr_to_region(start_address); - } - - // Perform the actual region allocation, exiting if it fails. - // Then note how much new space we have allocated. - if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) { - return false; - } - increase_used(word_size * HeapWordSize); - if (commits != 0) { - log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B", - HeapRegion::GrainWords * HeapWordSize * commits); - - } - - // Mark each G1 region touched by the range as archive, add it to - // the old set, and set top. - HeapRegion* curr_region = _hrm.addr_to_region(start_address); - HeapRegion* last_region = _hrm.addr_to_region(last_address); - prev_last_region = last_region; - - while (curr_region != NULL) { - assert(curr_region->is_empty() && !curr_region->is_pinned(), - "Region already in use (index %u)", curr_region->hrm_index()); - if (open) { - curr_region->set_open_archive(); - } else { - curr_region->set_closed_archive(); - } - _hr_printer.alloc(curr_region); - _archive_set.add(curr_region); - HeapWord* top; - HeapRegion* next_region; - if (curr_region != last_region) { - top = curr_region->end(); - next_region = _hrm.next_region_in_heap(curr_region); - } else { - top = last_address + 1; - next_region = NULL; - } - curr_region->set_top(top); - curr_region = next_region; - } + // Perform the actual region allocation, exiting if it fails. + // Then note how much new space we have allocated. + if (!_hrm.allocate_containing_regions(range, &commits, workers())) { + return false; } + increase_used(word_size * HeapWordSize); + if (commits != 0) { + log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B", + HeapRegion::GrainWords * HeapWordSize * commits); + + } + + // Mark each G1 region touched by the range as old, add it to + // the old set, and set top. + auto set_region_to_old = [&] (HeapRegion* r, bool is_last) { + assert(r->is_empty(), "Region already in use (%u)", r->hrm_index()); + + HeapWord* top = is_last ? last_address + 1 : r->end(); + r->set_top(top); + + r->set_old(); + _hr_printer.alloc(r); + _old_set.add(r); + }; + + iterate_regions_in_range(range, set_region_to_old); return true; } -void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) { +void G1CollectedHeap::populate_archive_regions_bot_part(MemRegion range) { + assert(!is_init_completed(), "Expect to be called at JVM init time"); + + iterate_regions_in_range(range, + [&] (HeapRegion* r, bool is_last) { + r->update_bot(); + }); +} + +void G1CollectedHeap::dealloc_archive_regions(MemRegion range) { assert(!is_init_completed(), "Expect to be called at JVM init time"); - assert(ranges != NULL, "MemRegion array NULL"); - assert(count != 0, "No MemRegions provided"); MemRegion reserved = _hrm.reserved(); - HeapWord *prev_last_addr = NULL; - HeapRegion* prev_last_region = NULL; + size_t size_used = 0; + uint shrink_count = 0; - // For each MemRegion, create filler objects, if needed, in the G1 regions - // that contain the address range. The address range actually within the - // MemRegion will not be modified. That is assumed to have been initialized - // elsewhere, probably via an mmap of archived heap data. + // Free the G1 regions that are within the specified range. MutexLocker x(Heap_lock); - for (size_t i = 0; i < count; i++) { - HeapWord* start_address = ranges[i].start(); - HeapWord* last_address = ranges[i].last(); + HeapWord* start_address = range.start(); + HeapWord* last_address = range.last(); - assert(reserved.contains(start_address) && reserved.contains(last_address), - "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", - p2i(start_address), p2i(last_address)); - assert(start_address > prev_last_addr, - "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , - p2i(start_address), p2i(prev_last_addr)); + assert(reserved.contains(start_address) && reserved.contains(last_address), + "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", + p2i(start_address), p2i(last_address)); + size_used += range.byte_size(); - HeapRegion* start_region = _hrm.addr_to_region(start_address); - HeapRegion* last_region = _hrm.addr_to_region(last_address); - HeapWord* bottom_address = start_region->bottom(); + // Free, empty and uncommit regions with CDS archive content. + auto dealloc_archive_region = [&] (HeapRegion* r, bool is_last) { + guarantee(r->is_old(), "Expected old region at index %u", r->hrm_index()); + _old_set.remove(r); + r->set_free(); + r->set_top(r->bottom()); + _hrm.shrink_at(r->hrm_index(), 1); + shrink_count++; + }; - // Check for a range beginning in the same region in which the - // previous one ended. - if (start_region == prev_last_region) { - bottom_address = prev_last_addr + 1; - } + iterate_regions_in_range(range, dealloc_archive_region); - // Verify that the regions were all marked as archive regions by - // alloc_archive_regions. - HeapRegion* curr_region = start_region; - while (curr_region != NULL) { - guarantee(curr_region->is_archive(), - "Expected archive region at index %u", curr_region->hrm_index()); - if (curr_region != last_region) { - curr_region = _hrm.next_region_in_heap(curr_region); - } else { - curr_region = NULL; - } - } - - prev_last_addr = last_address; - prev_last_region = last_region; - - // Fill the memory below the allocated range with dummy object(s), - // if the region bottom does not match the range start, or if the previous - // range ended within the same G1 region, and there is a gap. - assert(start_address >= bottom_address, "bottom address should not be greater than start address"); - if (start_address > bottom_address) { - size_t fill_size = pointer_delta(start_address, bottom_address); - G1CollectedHeap::fill_with_objects(bottom_address, fill_size); - increase_used(fill_size * HeapWordSize); - } + if (shrink_count != 0) { + log_debug(gc, ergo, heap)("Attempt heap shrinking (CDS archive regions). Total size: " SIZE_FORMAT "B", + HeapRegion::GrainWords * HeapWordSize * shrink_count); + // Explicit uncommit. + uncommit_regions(shrink_count); } + decrease_used(size_used); } inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, @@ -689,13 +637,13 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size); - if (result == NULL) { + if (result == nullptr) { *actual_word_size = desired_word_size; result = attempt_allocation_slow(desired_word_size); } assert_heap_not_locked(); - if (result != NULL) { + if (result != nullptr) { assert(*actual_word_size != 0, "Actual size must have been set here"); dirty_young_block(result, *actual_word_size); } else { @@ -705,99 +653,6 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, return result; } -void G1CollectedHeap::populate_archive_regions_bot_part(MemRegion* ranges, size_t count) { - assert(!is_init_completed(), "Expect to be called at JVM init time"); - assert(ranges != NULL, "MemRegion array NULL"); - assert(count != 0, "No MemRegions provided"); - - HeapWord* st = ranges[0].start(); - HeapWord* last = ranges[count-1].last(); - HeapRegion* hr_st = _hrm.addr_to_region(st); - HeapRegion* hr_last = _hrm.addr_to_region(last); - - HeapRegion* hr_curr = hr_st; - while (hr_curr != NULL) { - hr_curr->update_bot(); - if (hr_curr != hr_last) { - hr_curr = _hrm.next_region_in_heap(hr_curr); - } else { - hr_curr = NULL; - } - } -} - -void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) { - assert(!is_init_completed(), "Expect to be called at JVM init time"); - assert(ranges != NULL, "MemRegion array NULL"); - assert(count != 0, "No MemRegions provided"); - MemRegion reserved = _hrm.reserved(); - HeapWord* prev_last_addr = NULL; - HeapRegion* prev_last_region = NULL; - size_t size_used = 0; - uint shrink_count = 0; - - // For each Memregion, free the G1 regions that constitute it, and - // notify mark-sweep that the range is no longer to be considered 'archive.' - MutexLocker x(Heap_lock); - for (size_t i = 0; i < count; i++) { - HeapWord* start_address = ranges[i].start(); - HeapWord* last_address = ranges[i].last(); - - assert(reserved.contains(start_address) && reserved.contains(last_address), - "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", - p2i(start_address), p2i(last_address)); - assert(start_address > prev_last_addr, - "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , - p2i(start_address), p2i(prev_last_addr)); - size_used += ranges[i].byte_size(); - prev_last_addr = last_address; - - HeapRegion* start_region = _hrm.addr_to_region(start_address); - HeapRegion* last_region = _hrm.addr_to_region(last_address); - - // Check for ranges that start in the same G1 region in which the previous - // range ended, and adjust the start address so we don't try to free - // the same region again. If the current range is entirely within that - // region, skip it. - if (start_region == prev_last_region) { - start_address = start_region->end(); - if (start_address > last_address) { - continue; - } - start_region = _hrm.addr_to_region(start_address); - } - prev_last_region = last_region; - - // After verifying that each region was marked as an archive region by - // alloc_archive_regions, set it free and empty and uncommit it. - HeapRegion* curr_region = start_region; - while (curr_region != NULL) { - guarantee(curr_region->is_archive(), - "Expected archive region at index %u", curr_region->hrm_index()); - uint curr_index = curr_region->hrm_index(); - _archive_set.remove(curr_region); - curr_region->set_free(); - curr_region->set_top(curr_region->bottom()); - if (curr_region != last_region) { - curr_region = _hrm.next_region_in_heap(curr_region); - } else { - curr_region = NULL; - } - - _hrm.shrink_at(curr_index, 1); - shrink_count++; - } - } - - if (shrink_count != 0) { - log_debug(gc, ergo, heap)("Attempt heap shrinking (archive regions). Total size: " SIZE_FORMAT "B", - HeapRegion::GrainWords * HeapWordSize * shrink_count); - // Explicit uncommit. - uncommit_regions(shrink_count); - } - decrease_used(size_used); -} - HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { ResourceMark rm; // For retrieving the thread names in log messages. @@ -829,8 +684,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { // We will loop until a) we manage to successfully perform the // allocation or b) we successfully schedule a collection which // fails to perform the allocation. b) is the only case when we'll - // return NULL. - HeapWord* result = NULL; + // return null. + HeapWord* result = nullptr; for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { bool should_try_gc; uint gc_count_before; @@ -844,7 +699,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { // regions, we'll first try to do the allocation without doing a // collection hoping that there's enough space in the heap. result = humongous_obj_allocate(word_size); - if (result != NULL) { + if (result != nullptr) { policy()->old_gen_alloc_tracker()-> add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes); return result; @@ -861,8 +716,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { if (should_try_gc) { bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation); - if (result != NULL) { - assert(succeeded, "only way to get back a non-NULL result"); + if (result != nullptr) { + assert(succeeded, "only way to get back a non-null result"); log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, Thread::current()->name(), p2i(result)); size_t size_in_regions = humongous_obj_size_in_regions(word_size); @@ -873,10 +728,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { if (succeeded) { // We successfully scheduled a collection which failed to allocate. No - // point in trying to allocate further. We'll just return NULL. + // point in trying to allocate further. We'll just return null. log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate " SIZE_FORMAT " words", Thread::current()->name(), word_size); - return NULL; + return nullptr; } log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "", Thread::current()->name(), word_size); @@ -885,7 +740,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { if (gclocker_retry_count > GCLockerRetryAllocationCount) { log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating " SIZE_FORMAT " words", Thread::current()->name(), word_size); - return NULL; + return nullptr; } log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name()); // The GCLocker is either active or the GCLocker initiated @@ -913,20 +768,20 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { } ShouldNotReachHere(); - return NULL; + return nullptr; } HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, bool expect_null_mutator_alloc_region) { assert_at_safepoint_on_vm_thread(); assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region, - "the current alloc region was unexpectedly found to be non-NULL"); + "the current alloc region was unexpectedly found to be non-null"); if (!is_humongous(word_size)) { return _allocator->attempt_allocation_locked(word_size); } else { HeapWord* result = humongous_obj_allocate(word_size); - if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) { + if (result != nullptr && policy()->need_to_start_conc_mark("STW humongous allocation")) { collector_state()->set_initiate_conc_mark_if_possible(true); } return result; @@ -986,8 +841,7 @@ void G1CollectedHeap::prepare_heap_for_full_collection() { _hrm.remove_all_free_regions(); } -void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) { - assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant"); +void G1CollectedHeap::verify_before_full_collection() { assert_used_and_recalculate_used_equal(this); if (!VerifyBeforeGC) { return; @@ -1058,8 +912,7 @@ void G1CollectedHeap::verify_after_full_collection() { _ref_processor_cm->verify_no_references_recorded(); } -bool G1CollectedHeap::do_full_collection(bool explicit_gc, - bool clear_all_soft_refs, +bool G1CollectedHeap::do_full_collection(bool clear_all_soft_refs, bool do_maximal_compaction) { assert_at_safepoint_on_vm_thread(); @@ -1072,8 +925,8 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc, soft_ref_policy()->should_clear_all_soft_refs(); G1FullGCMark gc_mark; - GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); - G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs, do_maximal_compaction, gc_mark.tracer()); + GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause(), true); + G1FullCollector collector(this, do_clear_all_soft_refs, do_maximal_compaction, gc_mark.tracer()); collector.prepare_collection(); collector.collect(); @@ -1088,16 +941,14 @@ void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { // the caller that the collection did not succeed (e.g., because it was locked // out by the GC locker). So, right now, we'll ignore the return value. - do_full_collection(false, /* explicit_gc */ - clear_all_soft_refs, + do_full_collection(clear_all_soft_refs, false /* do_maximal_compaction */); } bool G1CollectedHeap::upgrade_to_full_collection() { GCCauseSetter compaction(this, GCCause::_g1_compaction_pause); log_info(gc, ergo)("Attempting full compaction clearing soft references"); - bool success = do_full_collection(false /* explicit gc */, - true /* clear_all_soft_refs */, + bool success = do_full_collection(true /* clear_all_soft_refs */, false /* do_maximal_compaction */); // do_full_collection only fails if blocked by GC locker and that can't // be the case here since we only call this when already completed one gc. @@ -1130,7 +981,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size, HeapWord* result = attempt_allocation_at_safepoint(word_size, expect_null_mutator_alloc_region); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -1139,7 +990,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size, // expansion over collection. (This might change in the future if we can // do something smarter than full collection to satisfy a failed alloc.) result = expand_and_allocate(word_size); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -1153,12 +1004,11 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size, } else { log_info(gc, ergo)("Attempting full compaction"); } - *gc_succeeded = do_full_collection(false, /* explicit_gc */ - maximal_compaction /* clear_all_soft_refs */ , + *gc_succeeded = do_full_collection(maximal_compaction /* clear_all_soft_refs */ , maximal_compaction /* do_maximal_compaction */); } - return NULL; + return nullptr; } HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, @@ -1173,7 +1023,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, false, /* expect_null_mutator_alloc_region */ succeeded); - if (result != NULL || !*succeeded) { + if (result != nullptr || !*succeeded) { return result; } @@ -1184,7 +1034,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, true, /* expect_null_mutator_alloc_region */ succeeded); - if (result != NULL || !*succeeded) { + if (result != nullptr || !*succeeded) { return result; } @@ -1195,7 +1045,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, true, /* expect_null_mutator_alloc_region */ succeeded); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -1206,13 +1056,13 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be // appropriate. - return NULL; + return nullptr; } // Attempting to expand the heap sufficiently // to support an allocation of the given "word_size". If // successful, perform the allocation and return the address of the -// allocated block, or else "NULL". +// allocated block, or else null. HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { assert_at_safepoint_on_vm_thread(); @@ -1230,7 +1080,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { return attempt_allocation_at_safepoint(word_size, false /* expect_null_mutator_alloc_region */); } - return NULL; + return nullptr; } bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_workers, double* expand_time_ms) { @@ -1251,7 +1101,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_worker assert(regions_to_expand > 0, "Must expand by at least one region"); uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers); - if (expand_time_ms != NULL) { + if (expand_time_ms != nullptr) { *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS; } @@ -1344,16 +1194,6 @@ public: const char* get_description() { return "Old Regions"; } }; -class ArchiveRegionSetChecker : public HeapRegionSetChecker { -public: - void check_mt_safety() { - guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(), - "May only change archive regions during initialization or safepoint."); - } - bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); } - const char* get_description() { return "Archive Regions"; } -}; - class HumongousRegionSetChecker : public HeapRegionSetChecker { public: void check_mt_safety() { @@ -1380,23 +1220,22 @@ public: G1CollectedHeap::G1CollectedHeap() : CollectedHeap(), - _service_thread(NULL), - _periodic_gc_task(NULL), - _free_arena_memory_task(NULL), - _workers(NULL), - _card_table(NULL), + _service_thread(nullptr), + _periodic_gc_task(nullptr), + _free_arena_memory_task(nullptr), + _workers(nullptr), + _card_table(nullptr), _collection_pause_end(Ticks::now()), _soft_ref_policy(), _old_set("Old Region Set", new OldRegionSetChecker()), - _archive_set("Archive Region Set", new ArchiveRegionSetChecker()), _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()), - _bot(NULL), + _bot(nullptr), _listener(), _numa(G1NUMA::create()), _hrm(), - _allocator(NULL), + _allocator(nullptr), _evac_failure_injector(), - _verifier(NULL), + _verifier(nullptr), _summary_bytes_used(0), _bytes_used_during_gc(0), _survivor_evac_stats("Young", YoungPLABSize, PLABWeight), @@ -1413,19 +1252,19 @@ G1CollectedHeap::G1CollectedHeap() : _gc_timer_stw(new STWGCTimer()), _gc_tracer_stw(new G1NewTracer()), _policy(new G1Policy(_gc_timer_stw)), - _heap_sizing_policy(NULL), + _heap_sizing_policy(nullptr), _collection_set(this, _policy), - _rem_set(NULL), + _rem_set(nullptr), _card_set_config(), _card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()), - _cm(NULL), - _cm_thread(NULL), - _cr(NULL), - _task_queues(NULL), - _ref_processor_stw(NULL), + _cm(nullptr), + _cm_thread(nullptr), + _cr(nullptr), + _task_queues(nullptr), + _ref_processor_stw(nullptr), _is_alive_closure_stw(this), _is_subject_to_discovery_stw(this), - _ref_processor_cm(NULL), + _ref_processor_cm(nullptr), _is_alive_closure_cm(this), _is_subject_to_discovery_cm(this), _region_attr() { @@ -1455,7 +1294,7 @@ G1CollectedHeap::G1CollectedHeap() : _gc_tracer_stw->initialize(); - guarantee(_task_queues != NULL, "task_queues allocation failure."); + guarantee(_task_queues != nullptr, "task_queues allocation failure."); } G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description, @@ -1491,7 +1330,7 @@ jint G1CollectedHeap::initialize_concurrent_refinement() { jint G1CollectedHeap::initialize_service_thread() { _service_thread = new G1ServiceThread(); - if (_service_thread->osthread() == NULL) { + if (_service_thread->osthread() == nullptr) { vm_shutdown_during_initialization("Could not create G1ServiceThread"); return JNI_ENOMEM; } @@ -1539,7 +1378,6 @@ jint G1CollectedHeap::initialize() { // Create the barrier set for the entire reserved region. G1CardTable* ct = new G1CardTable(heap_rs.region()); - ct->initialize(); G1BarrierSet* bs = new G1BarrierSet(ct); bs->initialize(); assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity"); @@ -1561,7 +1399,7 @@ jint G1CollectedHeap::initialize() { HeapRegion::GrainBytes, 1, mtJavaHeap); - if(heap_storage == NULL) { + if(heap_storage == nullptr) { vm_shutdown_during_initialization("Could not initialize G1 heap"); return JNI_ERR; } @@ -1623,7 +1461,7 @@ jint G1CollectedHeap::initialize() { } _workers = new WorkerThreads("GC Thread", ParallelGCThreads); - if (_workers == NULL) { + if (_workers == nullptr) { return JNI_ENOMEM; } _workers->initialize_workers(); @@ -2084,6 +1922,35 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause, } } +bool G1CollectedHeap::try_collect_fullgc(GCCause::Cause cause, + const G1GCCounters& counters_before) { + assert_heap_not_locked(); + + while(true) { + VM_G1CollectFull op(counters_before.total_collections(), + counters_before.total_full_collections(), + cause); + VMThread::execute(&op); + + // Request is trivially finished. + if (!GCCause::is_explicit_full_gc(cause) || op.gc_succeeded()) { + return op.gc_succeeded(); + } + + { + MutexLocker ml(Heap_lock); + if (counters_before.total_full_collections() != total_full_collections()) { + return true; + } + } + + if (GCLocker::is_active_and_needs_gc()) { + // If GCLocker is active, wait until clear before retrying. + GCLocker::stall_until_clear(); + } + } +} + bool G1CollectedHeap::try_collect(GCCause::Cause cause, const G1GCCounters& counters_before) { if (should_do_concurrent_full_gc(cause)) { @@ -2107,11 +1974,7 @@ bool G1CollectedHeap::try_collect(GCCause::Cause cause, return op.gc_succeeded(); } else { // Schedule a Full GC. - VM_G1CollectFull op(counters_before.total_collections(), - counters_before.total_full_collections(), - cause); - VMThread::execute(&op); - return op.gc_succeeded(); + return try_collect_fullgc(cause, counters_before); } } @@ -2230,7 +2093,7 @@ void G1CollectedHeap::par_iterate_regions_array(HeapRegionClosure* cl, do { uint region_idx = regions[cur_pos]; - if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) { + if (hr_claimer == nullptr || hr_claimer->claim_region(region_idx)) { HeapRegion* r = region_at(region_idx); bool result = cl->do_heap_region(r); guarantee(!result, "Must not cancel iteration"); @@ -2293,10 +2156,6 @@ bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const { return true; } -bool G1CollectedHeap::is_archived_object(oop object) const { - return object != NULL && heap_region_containing(object)->is_archive(); -} - class PrintRegionClosure: public HeapRegionClosure { outputStream* _st; public: @@ -2378,7 +2237,6 @@ void G1CollectedHeap::print_regions_on(outputStream* st) const { st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, " "HS=humongous(starts), HC=humongous(continues), " "CS=collection set, F=free, " - "OA=open archive, CA=closed archive, " "TAMS=top-at-mark-start, " "PB=parsable bottom"); PrintRegionClosure blk(st); @@ -2396,7 +2254,7 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const { void G1CollectedHeap::print_on_error(outputStream* st) const { this->CollectedHeap::print_on_error(st); - if (_cm != NULL) { + if (_cm != nullptr) { st->cr(); _cm->print_on_error(st); } @@ -2516,8 +2374,8 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, HeapWord* result = op.result(); bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded(); - assert(result == NULL || ret_succeeded, - "the result should be NULL if the VM did not succeed"); + assert(result == nullptr || ret_succeeded, + "the result should be null if the VM did not succeed"); *succeeded = ret_succeeded; assert_heap_not_locked(); @@ -2739,7 +2597,7 @@ void G1CollectedHeap::complete_cleaning(bool class_unloading_occurred) { } bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) { - assert(obj != NULL, "must not be NULL"); + assert(obj != nullptr, "must not be null"); assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj)); // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below // may falsely indicate that this is not the case here: however the collection set only @@ -2750,7 +2608,7 @@ bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) { void G1CollectedHeap::make_pending_list_reachable() { if (collector_state()->in_concurrent_start_gc()) { oop pll_head = Universe::reference_pending_list(); - if (pll_head != NULL) { + if (pll_head != nullptr) { // Any valid worker id is fine here as we are in the VM thread and single-threaded. _cm->mark_in_bitmap(0 /* worker_id */, pll_head); } @@ -2764,7 +2622,7 @@ void G1CollectedHeap::set_humongous_stats(uint num_humongous_total, uint num_hum bool G1CollectedHeap::should_sample_collection_set_candidates() const { G1CollectionSetCandidates* candidates = G1CollectedHeap::heap()->collection_set()->candidates(); - return candidates != NULL && candidates->num_remaining() > 0; + return candidates != nullptr && candidates->num_remaining() > 0; } void G1CollectedHeap::set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats) { @@ -2796,7 +2654,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) { hr->hr_clear(true /* clear_space */); _policy->remset_tracker()->update_at_free(hr); - if (free_list != NULL) { + if (free_list != nullptr) { free_list->add_ordered(hr); } } @@ -2809,19 +2667,17 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr, } void G1CollectedHeap::remove_from_old_gen_sets(const uint old_regions_removed, - const uint archive_regions_removed, const uint humongous_regions_removed) { - if (old_regions_removed > 0 || archive_regions_removed > 0 || humongous_regions_removed > 0) { + if (old_regions_removed > 0 || humongous_regions_removed > 0) { MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag); _old_set.bulk_remove(old_regions_removed); - _archive_set.bulk_remove(archive_regions_removed); _humongous_set.bulk_remove(humongous_regions_removed); } } void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { - assert(list != NULL, "list can't be null"); + assert(list != nullptr, "list can't be null"); if (!list->is_empty()) { MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); _hrm.insert_list_into_free_list(list); @@ -2905,9 +2761,7 @@ bool G1CollectedHeap::check_young_list_empty() { // Remove the given HeapRegion from the appropriate region set. void G1CollectedHeap::prepare_region_for_full_compaction(HeapRegion* hr) { - if (hr->is_archive()) { - _archive_set.remove(hr); - } else if (hr->is_humongous()) { + if (hr->is_humongous()) { _humongous_set.remove(hr); } else if (hr->is_old()) { _old_set.remove(hr); @@ -2943,7 +2797,6 @@ private: bool _free_list_only; HeapRegionSet* _old_set; - HeapRegionSet* _archive_set; HeapRegionSet* _humongous_set; HeapRegionManager* _hrm; @@ -2953,15 +2806,13 @@ private: public: RebuildRegionSetsClosure(bool free_list_only, HeapRegionSet* old_set, - HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* hrm) : - _free_list_only(free_list_only), _old_set(old_set), _archive_set(archive_set), + _free_list_only(free_list_only), _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), _total_used(0) { assert(_hrm->num_free_regions() == 0, "pre-condition"); if (!free_list_only) { assert(_old_set->is_empty(), "pre-condition"); - assert(_archive_set->is_empty(), "pre-condition"); assert(_humongous_set->is_empty(), "pre-condition"); } } @@ -2977,11 +2828,9 @@ public: if (r->is_humongous()) { _humongous_set->add(r); - } else if (r->is_archive()) { - _archive_set->add(r); } else { assert(r->is_young() || r->is_free() || r->is_old(), "invariant"); - // We now move all (non-humongous, non-old, non-archive) regions to old gen, + // We now move all (non-humongous, non-old) regions to old gen, // and register them as such. r->move_to_old(); _old_set->add(r); @@ -3006,7 +2855,7 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { } RebuildRegionSetsClosure cl(free_list_only, - &_old_set, &_archive_set, &_humongous_set, + &_old_set, &_humongous_set, &_hrm); heap_region_iterate(&cl); @@ -3028,14 +2877,14 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, HeapRegionType::Eden, false /* do_expand */, node_index); - if (new_alloc_region != NULL) { + if (new_alloc_region != nullptr) { set_region_short_lived_locked(new_alloc_region); _hr_printer.alloc(new_alloc_region, !should_allocate); _policy->remset_tracker()->update_at_allocate(new_alloc_region); return new_alloc_region; } } - return NULL; + return nullptr; } void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, @@ -3068,7 +2917,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA assert(FreeList_lock->owned_by_self(), "pre-condition"); if (!has_more_regions(dest)) { - return NULL; + return nullptr; } HeapRegionType type; @@ -3083,7 +2932,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA true /* do_expand */, node_index); - if (new_alloc_region != NULL) { + if (new_alloc_region != nullptr) { if (type.is_survivor()) { new_alloc_region->set_survivor(); _survivor.add(new_alloc_region); @@ -3096,7 +2945,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA _hr_printer.alloc(new_alloc_region); return new_alloc_region; } - return NULL; + return nullptr; } void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, @@ -3128,7 +2977,7 @@ HeapRegion* G1CollectedHeap::alloc_highest_free_region() { } return _hrm.allocate_free_regions_starting_at(index, 1); } - return NULL; + return nullptr; } void G1CollectedHeap::mark_evac_failure_object(uint worker_id, const oop obj, size_t obj_size) const { @@ -3193,13 +3042,13 @@ public: }; void G1CollectedHeap::register_nmethod(nmethod* nm) { - guarantee(nm != NULL, "sanity"); + guarantee(nm != nullptr, "sanity"); RegisterNMethodOopClosure reg_cl(this, nm); nm->oops_do(®_cl); } void G1CollectedHeap::unregister_nmethod(nmethod* nm) { - guarantee(nm != NULL, "sanity"); + guarantee(nm != nullptr, "sanity"); UnregisterNMethodOopClosure reg_cl(this, nm); nm->oops_do(®_cl, true); } @@ -3226,7 +3075,7 @@ public: void do_code_blob(CodeBlob* cb) { nmethod* nm = cb->as_nmethod_or_null(); - if (nm != NULL) { + if (nm != nullptr) { _g1h->register_nmethod(nm); } } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 3f2b433c054..73f31d3f969 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -182,9 +182,8 @@ private: static size_t _humongous_object_threshold_in_words; - // These sets keep track of old, archive and humongous regions respectively. + // These sets keep track of old and humongous regions respectively. HeapRegionSet _old_set; - HeapRegionSet _archive_set; HeapRegionSet _humongous_set; // Young gen memory statistics before GC. @@ -284,6 +283,9 @@ private: uint gc_counter, uint old_marking_started_before); + bool try_collect_fullgc(GCCause::Cause cause, + const G1GCCounters& counters_before); + // indicates whether we are in young or mixed GC mode G1CollectorState _collector_state; @@ -350,10 +352,10 @@ private: "should not be at a safepoint")); \ } while (0) -#define assert_at_safepoint_on_vm_thread() \ - do { \ - assert_at_safepoint(); \ - assert(Thread::current_or_null() != NULL, "no current thread"); \ +#define assert_at_safepoint_on_vm_thread() \ + do { \ + assert_at_safepoint(); \ + assert(Thread::current_or_null() != nullptr, "no current thread"); \ assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \ } while (0) @@ -402,7 +404,7 @@ private: size_t word_size); // Attempt to allocate a humongous object of the given size. Return - // NULL if unsuccessful. + // null if unsuccessful. HeapWord* humongous_obj_allocate(size_t word_size); // The following two methods, allocate_new_tlab() and @@ -425,7 +427,7 @@ private: // retry the allocation. // // * If all allocation attempts fail, even after trying to schedule - // an evacuation pause, allocate_new_tlab() will return NULL, + // an evacuation pause, allocate_new_tlab() will return null, // whereas mem_allocate() will attempt a heap expansion and/or // schedule a Full GC. // @@ -459,7 +461,7 @@ private: // Allocation attempt that should be called during safepoints (e.g., // at the end of a successful GC). expect_null_mutator_alloc_region - // specifies whether the mutator alloc region is expected to be NULL + // specifies whether the mutator alloc region is expected to be null // or not. HeapWord* attempt_allocation_at_safepoint(size_t word_size, bool expect_null_mutator_alloc_region); @@ -477,16 +479,13 @@ private: void retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, G1HeapRegionAttr dest); - // - if explicit_gc is true, the GC is for a System.gc() etc, - // otherwise it's for a failed allocation. // - if clear_all_soft_refs is true, all soft references should be // cleared during the GC. // - if do_maximal_compaction is true, full gc will do a maximally // compacting collection, leaving no dead wood. // - it returns false if it is unable to do the collection due to the // GC locker being active, true otherwise. - bool do_full_collection(bool explicit_gc, - bool clear_all_soft_refs, + bool do_full_collection(bool clear_all_soft_refs, bool do_maximal_compaction); // Callback from VM_G1CollectFull operation, or collect_as_vm_thread. @@ -503,7 +502,7 @@ private: // Internal helpers used during full GC to split it up to // increase readability. bool abort_concurrent_cycle(); - void verify_before_full_collection(bool explicit_gc); + void verify_before_full_collection(); void prepare_heap_for_full_collection(); void prepare_for_mutator_after_full_collection(); void abort_refinement(); @@ -520,7 +519,7 @@ private: // Attempting to expand the heap sufficiently // to support an allocation of the given "word_size". If // successful, perform the allocation and return the address of the - // allocated block, or else "NULL". + // allocated block, or else null. HeapWord* expand_and_allocate(size_t word_size); void verify_numa_regions(const char* desc); @@ -575,7 +574,7 @@ public: // Returns true if the heap was expanded by the requested amount; // false otherwise. // (Rounds up to a HeapRegion boundary.) - bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = NULL, double* expand_time_ms = NULL); + bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = nullptr, double* expand_time_ms = nullptr); bool expand_single_region(uint node_index); // Returns the PLAB statistics for a given destination. @@ -679,7 +678,7 @@ public: // Frees a region by resetting its metadata and adding it to the free list // passed as a parameter (this is usually a local list which will be appended - // to the master free list later or NULL if free list management is handled + // to the master free list later or null if free list management is handled // in another way). // Callers must ensure they are the only one calling free on the given region // at the same time. @@ -702,31 +701,30 @@ public: FreeRegionList* free_list); // Facility for allocating a fixed range within the heap and marking - // the containing regions as 'archive'. For use at JVM init time, when the - // caller may mmap archived heap data at the specified range(s). - // Verify that the MemRegions specified in the argument array are within the - // reserved heap. - bool check_archive_addresses(MemRegion* range, size_t count); + // the containing regions as 'old'. For use at JVM init time, when the + // caller may mmap archived heap data at the specified range. - // Commit the appropriate G1 regions containing the specified MemRegions - // and mark them as 'archive' regions. The regions in the array must be - // non-overlapping and in order of ascending address. - bool alloc_archive_regions(MemRegion* range, size_t count, bool open); + // Verify that the range is within the reserved heap. + bool check_archive_addresses(MemRegion range); - // Insert any required filler objects in the G1 regions around the specified - // ranges to make the regions parseable. This must be called after - // alloc_archive_regions, and after class loading has occurred. - void fill_archive_regions(MemRegion* range, size_t count); + // Execute func(HeapRegion* r, bool is_last) on every region covered by the + // given range. + template + void iterate_regions_in_range(MemRegion range, const Func& func); + + // Commit the appropriate G1 region(s) containing the specified range + // and mark them as 'old' region(s). + bool alloc_archive_regions(MemRegion range); // Populate the G1BlockOffsetTablePart for archived regions with the given - // memory ranges. - void populate_archive_regions_bot_part(MemRegion* range, size_t count); + // memory range. + void populate_archive_regions_bot_part(MemRegion range); - // For each of the specified MemRegions, uncommit the containing G1 regions + // For the specified range, uncommit the containing G1 regions // which had been allocated by alloc_archive_regions. This should be called - // rather than fill_archive_regions at JVM init time if the archive file - // mapping failed, with the same non-overlapping and sorted MemRegion array. - void dealloc_archive_regions(MemRegion* range, size_t count); + // at JVM init time if the archive heap's contents cannot be used (e.g., if + // CRC check fails). + void dealloc_archive_regions(MemRegion range); private: @@ -1003,10 +1001,8 @@ public: inline void old_set_add(HeapRegion* hr); inline void old_set_remove(HeapRegion* hr); - inline void archive_set_add(HeapRegion* hr); - size_t non_young_capacity_bytes() { - return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; + return (old_regions_count() + humongous_regions_count()) * HeapRegion::GrainBytes; } // Determine whether the given region is one that we are using as an @@ -1025,7 +1021,6 @@ public: void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause); void remove_from_old_gen_sets(const uint old_regions_removed, - const uint archive_regions_removed, const uint humongous_regions_removed); void prepend_to_freelist(FreeRegionList* list); void decrement_summary_bytes(size_t bytes); @@ -1123,7 +1118,7 @@ public: // The variant with the HeapRegionClaimer guarantees that the closure will be // applied to a particular region exactly once. void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) { - collection_set_iterate_increment_from(blk, NULL, worker_id); + collection_set_iterate_increment_from(blk, nullptr, worker_id); } void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id); // Iterate over the array of region indexes, uint regions[length], applying @@ -1135,11 +1130,11 @@ public: size_t length, uint worker_id) const; - // Returns the HeapRegion that contains addr. addr must not be nullptr. + // Returns the HeapRegion that contains addr. addr must not be null. inline HeapRegion* heap_region_containing(const void* addr) const; - // Returns the HeapRegion that contains addr, or nullptr if that is an uncommitted - // region. addr must not be nullptr. + // Returns the HeapRegion that contains addr, or null if that is an uncommitted + // region. addr must not be null. inline HeapRegion* heap_region_containing_or_null(const void* addr) const; // A CollectedHeap is divided into a dense sequence of "blocks"; that is, @@ -1215,7 +1210,6 @@ public: size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); } uint young_regions_count() const { return _eden.length() + _survivor.length(); } uint old_regions_count() const { return _old_set.length(); } - uint archive_regions_count() const { return _archive_set.length(); } uint humongous_regions_count() const { return _humongous_set.length(); } #ifdef ASSERT @@ -1232,7 +1226,7 @@ public: // Determine if an object is dead, given only the object itself. // This will find the region to which the object belongs and // then call the region version of the same function. - // If obj is NULL it is not dead. + // If obj is null it is not dead. inline bool is_obj_dead(const oop obj) const; inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const; @@ -1282,8 +1276,6 @@ public: WorkerThreads* safepoint_workers() override { return _workers; } - bool is_archived_object(oop object) const override; - // The methods below are here for convenience and dispatch the // appropriate method depending on value of the given VerifyOption // parameter. The values for that parameter, and their meanings, diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp index f49c804f8b3..cf5b05b18f4 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) { return &_old_evac_stats; default: ShouldNotReachHere(); - return NULL; // Keep some compilers happy + return nullptr; // Keep some compilers happy } } @@ -104,7 +104,7 @@ inline size_t G1CollectedHeap::clamp_plab_size(size_t value) const { // Return the region with the given index. It assumes the index is valid. inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } -// Return the region with the given index, or NULL if unmapped. It assumes the index is valid. +// Return the region with the given index, or null if unmapped. It assumes the index is valid. inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); } template @@ -148,10 +148,6 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { _old_set.remove(hr); } -inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) { - _archive_set.add(hr); -} - // It dirties the cards that cover the block so that the post // write barrier never queues anything when updating objects on this // block. It is assumed (and in fact we assert) that the block @@ -234,15 +230,15 @@ void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) { } inline bool G1CollectedHeap::is_in_young(const oop obj) const { - if (obj == NULL) { + if (obj == nullptr) { return false; } return heap_region_containing(obj)->is_young(); } inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const { - assert(obj != NULL, ""); - return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary NULL check + assert(obj != nullptr, ""); + return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary null check } inline bool G1CollectedHeap::is_obj_filler(const oop obj) { @@ -255,14 +251,14 @@ inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) co } inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { - if (obj == NULL) { + if (obj == nullptr) { return false; } return is_obj_dead(obj, heap_region_containing(obj)); } inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const { - return !is_marked(obj) && !hr->is_closed_archive(); + return !is_marked(obj); } inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const { diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp index 3353294c959..180236d38b0 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,11 +50,11 @@ G1GCPhaseTimes* G1CollectionSet::phase_times() { G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) : _g1h(g1h), _policy(policy), - _candidates(NULL), + _candidates(nullptr), _eden_region_length(0), _survivor_region_length(0), _old_region_length(0), - _collection_set_regions(NULL), + _collection_set_regions(nullptr), _collection_set_cur_length(0), _collection_set_max_length(0), _num_optional_regions(0), @@ -83,7 +83,7 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length, } void G1CollectionSet::initialize(uint max_region_length) { - guarantee(_collection_set_regions == NULL, "Must only initialize once."); + guarantee(_collection_set_regions == nullptr, "Must only initialize once."); _collection_set_max_length = max_region_length; _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC); } @@ -94,7 +94,7 @@ void G1CollectionSet::free_optional_regions() { void G1CollectionSet::clear_candidates() { delete _candidates; - _candidates = NULL; + _candidates = nullptr; } // Add the heap region at the head of the non-incremental collection set diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.hpp b/src/hotspot/share/gc/g1/g1CollectionSet.hpp index e62a39d1d02..6ff6d3dcf46 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -211,7 +211,7 @@ public: void clear_candidates(); void set_candidates(G1CollectionSetCandidates* candidates) { - assert(_candidates == NULL, "Trying to replace collection set candidates."); + assert(_candidates == nullptr, "Trying to replace collection set candidates."); _candidates = candidates; } G1CollectionSetCandidates* candidates() { return _candidates; } diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp index 587aa336183..ac3cde49030 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ void G1CollectionSetCandidates::remove_from_end(uint num_remove, size_t wasted) uint cur_idx = _num_regions - i - 1; reclaimable += at(cur_idx)->reclaimable_bytes(); // Make sure we crash if we access it. - _regions[cur_idx] = NULL; + _regions[cur_idx] = nullptr; } assert(reclaimable == wasted, "Recalculated reclaimable inconsistent"); @@ -79,17 +79,16 @@ void G1CollectionSetCandidates::verify() const { guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions); uint idx = _front_idx; size_t sum_of_reclaimable_bytes = 0; - HeapRegion *prev = NULL; + HeapRegion *prev = nullptr; for (; idx < _num_regions; idx++) { HeapRegion *cur = _regions[idx]; - guarantee(cur != NULL, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx); - // The first disjunction filters out regions with objects that were explicitly - // pinned after being added to the collection set candidates. Archive regions - // should never have been added to the collection set though. - guarantee((cur->is_pinned() && !cur->is_archive()) || - G1CollectionSetChooser::should_add(cur), + guarantee(cur != nullptr, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx); + // Currently the decision whether young gc moves region contents is determined + // at region allocation time. It is not possible that a region becomes non-movable + // at a later point, which means below condition always holds true. + guarantee(G1CollectionSetChooser::should_add(cur), "Region %u should be eligible for addition.", cur->hrm_index()); - if (prev != NULL) { + if (prev != nullptr) { guarantee(prev->gc_efficiency() >= cur->gc_efficiency(), "GC efficiency for region %u: %1.4f smaller than for region %u: %1.4f", prev->hrm_index(), prev->gc_efficiency(), cur->hrm_index(), cur->gc_efficiency()); diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp index cc943a78630..472917de271 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,10 +66,10 @@ public: uint cur_idx() const { return _front_idx; } HeapRegion* at(uint idx) const { - HeapRegion* res = NULL; + HeapRegion* res = nullptr; if (idx < _num_regions) { res = _regions[idx]; - assert(res != NULL, "Unexpected NULL HeapRegion at index %u", idx); + assert(res != nullptr, "Unexpected null HeapRegion at index %u", idx); } return res; } diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp index 749b3565c41..efeab18cc1a 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,14 +39,14 @@ // a lot of live objects, not the ones with just a lot of live objects if we // ordered according to the amount of reclaimable bytes per region. static int order_regions(HeapRegion* hr1, HeapRegion* hr2) { - // Make sure that NULL entries are moved to the end. - if (hr1 == NULL) { - if (hr2 == NULL) { + // Make sure that null entries are moved to the end. + if (hr1 == nullptr) { + if (hr2 == nullptr) { return 0; } else { return 1; } - } else if (hr2 == NULL) { + } else if (hr2 == nullptr) { return -1; } @@ -74,7 +74,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { // Work area for building the set of collection set candidates. Contains references // to heap regions with their GC efficiencies calculated. To reduce contention // on claiming array elements, worker threads claim parts of this array in chunks; - // Array elements may be NULL as threads might not get enough regions to fill + // Array elements may be null as threads might not get enough regions to fill // up their chunks completely. // Final sorting will remove them. class G1BuildCandidateArray : public StackObj { @@ -102,7 +102,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { _data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)), _cur_claim_idx(0) { for (uint i = 0; i < _max_size; i++) { - _data[i] = NULL; + _data[i] = nullptr; } } @@ -123,7 +123,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { // Set element in array. void set(uint idx, HeapRegion* hr) { assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size); - assert(_data[idx] == NULL, "Value must not have been set."); + assert(_data[idx] == nullptr, "Value must not have been set."); _data[idx] = hr; } @@ -132,11 +132,11 @@ class G1BuildCandidateRegionsTask : public WorkerTask { return; } for (uint i = _cur_claim_idx; i < _max_size; i++) { - assert(_data[i] == NULL, "must be"); + assert(_data[i] == nullptr, "must be"); } QuickSort::sort(_data, _cur_claim_idx, order_regions, true); for (uint i = num_regions; i < _max_size; i++) { - assert(_data[i] == NULL, "must be"); + assert(_data[i] == nullptr, "must be"); } for (uint i = 0; i < num_regions; i++) { dest[i] = _data[i]; @@ -192,7 +192,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { // sets for old regions. r->rem_set()->clear(true /* only_cardset */); } else { - assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(), + assert(!r->is_old() || !r->rem_set()->is_tracked(), "Missed to clear unused remembered set of region %u (%s) that is %s", r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str()); } @@ -252,7 +252,7 @@ uint G1CollectionSetChooser::calculate_work_chunk_size(uint num_workers, uint nu bool G1CollectionSetChooser::should_add(HeapRegion* hr) { return !hr->is_young() && - !hr->is_pinned() && + !hr->is_humongous() && region_occupancy_low_enough_for_evac(hr->live_bytes()) && hr->rem_set()->is_complete(); } diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp index 266da565e09..a253cb84808 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,8 +53,8 @@ public: } // Determine whether to add the given region to the collection set candidates or - // not. Currently, we skip pinned regions and regions whose live - // bytes are over the threshold. Humongous regions may be reclaimed during cleanup. + // not. Currently, we skip regions that we will never move during young gc, and + // regions which liveness is over the occupancy threshold. // Regions also need a complete remembered set to be a candidate. static bool should_add(HeapRegion* hr); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index a62bfd88819..4fe0959da99 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,7 +97,7 @@ bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { G1CMMarkStack::G1CMMarkStack() : _max_chunk_capacity(0), - _base(NULL), + _base(nullptr), _chunk_capacity(0) { set_empty(); } @@ -109,12 +109,12 @@ bool G1CMMarkStack::resize(size_t new_capacity) { TaskQueueEntryChunk* new_base = MmapArrayAllocator::allocate_or_null(new_capacity, mtGC); - if (new_base == NULL) { + if (new_base == nullptr) { log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); return false; } // Release old mapping. - if (_base != NULL) { + if (_base != nullptr) { MmapArrayAllocator::free(_base, _chunk_capacity); } @@ -167,7 +167,7 @@ void G1CMMarkStack::expand() { } G1CMMarkStack::~G1CMMarkStack() { - if (_base != NULL) { + if (_base != nullptr) { MmapArrayAllocator::free(_base, _chunk_capacity); } } @@ -190,7 +190,7 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { TaskQueueEntryChunk* result = *list; - if (result != NULL) { + if (result != nullptr) { *list = (*list)->next; } return result; @@ -199,7 +199,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQu G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); - if (result != NULL) { + if (result != nullptr) { _chunks_in_chunk_list--; } return result; @@ -215,16 +215,16 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding // wraparound of _hwm. if (_hwm >= _chunk_capacity) { - return NULL; + return nullptr; } size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u); if (cur_idx >= _chunk_capacity) { - return NULL; + return nullptr; } TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; - result->next = NULL; + result->next = nullptr; return result; } @@ -232,11 +232,11 @@ bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { // Get a new chunk. TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); - if (new_chunk == NULL) { + if (new_chunk == nullptr) { // Did not get a chunk from the free list. Allocate from backing memory. new_chunk = allocate_new_chunk(); - if (new_chunk == NULL) { + if (new_chunk == nullptr) { return false; } } @@ -251,7 +251,7 @@ bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); - if (cur == NULL) { + if (cur == nullptr) { return false; } @@ -264,8 +264,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { void G1CMMarkStack::set_empty() { _chunks_in_chunk_list = 0; _hwm = 0; - _chunk_list = NULL; - _free_list = NULL; + _chunk_list = nullptr; + _free_list = nullptr; } G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) : @@ -288,7 +288,7 @@ void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { assert_at_safepoint(); size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u); assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions); - assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " + assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " "end (" PTR_FORMAT ")", p2i(start), p2i(end)); _root_regions[idx].set_start(start); _root_regions[idx].set_end(end); @@ -305,20 +305,20 @@ void G1CMRootMemRegions::prepare_for_scan() { const MemRegion* G1CMRootMemRegions::claim_next() { if (_should_abort) { - // If someone has set the should_abort flag, we return NULL to + // If someone has set the should_abort flag, we return null to // force the caller to bail out of their loop. - return NULL; + return nullptr; } if (_claimed_root_regions >= _num_root_regions) { - return NULL; + return nullptr; } size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u); if (claimed_index < _num_root_regions) { return &_root_regions[claimed_index]; } - return NULL; + return nullptr; } uint G1CMRootMemRegions::num_root_regions() const { @@ -404,9 +404,9 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, _cleanup_times(), _total_cleanup_time(0.0), - _accum_task_vtime(NULL), + _accum_task_vtime(nullptr), - _concurrent_workers(NULL), + _concurrent_workers(nullptr), _num_concurrent_workers(0), _max_concurrent_workers(0), @@ -414,13 +414,13 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_reserved_regions(), mtGC)), _needs_remembered_set_rebuild(false) { - assert(CGC_lock != NULL, "CGC_lock must be initialized"); + assert(CGC_lock != nullptr, "CGC_lock must be initialized"); _mark_bitmap.initialize(g1h->reserved(), bitmap_storage); // Create & start ConcurrentMark thread. _cm_thread = new G1ConcurrentMarkThread(this); - if (_cm_thread->osthread() == NULL) { + if (_cm_thread->osthread() == nullptr) { vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); } @@ -468,7 +468,7 @@ void G1ConcurrentMark::reset() { uint max_reserved_regions = _g1h->max_reserved_regions(); for (uint i = 0; i < max_reserved_regions; i++) { - _top_at_rebuild_starts[i] = NULL; + _top_at_rebuild_starts[i] = nullptr; _region_mark_stats[i].clear(); } @@ -480,7 +480,7 @@ void G1ConcurrentMark::clear_statistics(HeapRegion* r) { for (uint j = 0; j < _max_num_tasks; ++j) { _tasks[j]->clear_mark_stats_cache(region_idx); } - _top_at_rebuild_starts[region_idx] = NULL; + _top_at_rebuild_starts[region_idx] = nullptr; _region_mark_stats[region_idx].clear(); } @@ -953,7 +953,7 @@ public: void work(uint worker_id) { G1CMRootMemRegions* root_regions = _cm->root_regions(); const MemRegion* region = root_regions->claim_next(); - while (region != NULL) { + while (region != nullptr) { _cm->scan_root_region(region, worker_id); region = root_regions->claim_next(); } @@ -1329,7 +1329,6 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask { size_t _freed_bytes; FreeRegionList* _local_cleanup_list; uint _old_regions_removed; - uint _archive_regions_removed; uint _humongous_regions_removed; public: @@ -1339,26 +1338,21 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask { _freed_bytes(0), _local_cleanup_list(local_cleanup_list), _old_regions_removed(0), - _archive_regions_removed(0), _humongous_regions_removed(0) { } size_t freed_bytes() { return _freed_bytes; } const uint old_regions_removed() { return _old_regions_removed; } - const uint archive_regions_removed() { return _archive_regions_removed; } const uint humongous_regions_removed() { return _humongous_regions_removed; } bool do_heap_region(HeapRegion *hr) { - if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young() && !hr->is_closed_archive()) { + if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young()) { log_trace(gc)("Reclaimed empty old gen region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); _freed_bytes += hr->used(); - hr->set_containing_set(NULL); + hr->set_containing_set(nullptr); if (hr->is_humongous()) { _humongous_regions_removed++; _g1h->free_humongous_region(hr, _local_cleanup_list); - } else if (hr->is_open_archive()) { - _archive_regions_removed++; - _g1h->free_region(hr, _local_cleanup_list); } else { _old_regions_removed++; _g1h->free_region(hr, _local_cleanup_list); @@ -1389,9 +1383,8 @@ public: _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); assert(cl.is_complete(), "Shouldn't have aborted!"); - // Now update the old/archive/humongous region sets + // Now update the old/humongous region sets _g1h->remove_from_old_gen_sets(cl.old_regions_removed(), - cl.archive_regions_removed(), cl.humongous_regions_removed()); { MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag); @@ -1728,7 +1721,7 @@ public: G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } bool do_object_b(oop obj) { - return obj != NULL && + return obj != nullptr && (!_g1h->is_in_reserved(obj) || !_g1h->is_obj_dead(obj)); } }; @@ -1874,7 +1867,7 @@ HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) { HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger); // Make sure that the reads below do not float before loading curr_region. OrderAccess::loadload(); - // Above heap_region_containing may return NULL as we always scan claim + // Above heap_region_containing may return null as we always scan claim // until the end of the heap. In this case, just jump to the next region. HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + HeapRegion::GrainWords; @@ -1890,12 +1883,11 @@ HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) { assert(_finger >= end, "the finger should have moved forward"); if (limit > bottom) { - assert(!curr_region->is_closed_archive(), "CA regions should be skipped"); return curr_region; } else { assert(limit == bottom, "the region limit should be at bottom"); - // we return NULL and the caller should try calling + // We return null and the caller should try calling // claim_region() again. return nullptr; } @@ -1955,7 +1947,7 @@ void G1ConcurrentMark::verify_no_collection_set_oops() { // Verify the global finger HeapWord* global_finger = finger(); if (global_finger != nullptr && global_finger < _heap.end()) { - // Since we always iterate over all regions, we might get a nullptr HeapRegion + // Since we always iterate over all regions, we might get a null HeapRegion // here. HeapRegion* global_hr = _g1h->heap_region_containing_or_null(global_finger); guarantee(global_hr == nullptr || global_finger == global_hr->bottom(), @@ -2088,7 +2080,7 @@ void G1ConcurrentMark::print_on_error(outputStream* st) const { static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { ReferenceProcessor* result = g1h->ref_processor_cm(); - assert(result != NULL, "CM reference processor should not be NULL"); + assert(result != nullptr, "CM reference processor should not be null"); return result; } @@ -2099,8 +2091,8 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, { } void G1CMTask::setup_for_region(HeapRegion* hr) { - assert(hr != NULL, - "claim_region() should have filtered out NULL regions"); + assert(hr != nullptr, + "claim_region() should have filtered out null regions"); _curr_region = hr; _finger = hr->bottom(); update_region_limit(); @@ -2139,29 +2131,29 @@ void G1CMTask::update_region_limit() { } void G1CMTask::giveup_current_region() { - assert(_curr_region != NULL, "invariant"); + assert(_curr_region != nullptr, "invariant"); clear_region_fields(); } void G1CMTask::clear_region_fields() { // Values for these three fields that indicate that we're not // holding on to a region. - _curr_region = NULL; - _finger = NULL; - _region_limit = NULL; + _curr_region = nullptr; + _finger = nullptr; + _region_limit = nullptr; } void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { - if (cm_oop_closure == NULL) { - assert(_cm_oop_closure != NULL, "invariant"); + if (cm_oop_closure == nullptr) { + assert(_cm_oop_closure != nullptr, "invariant"); } else { - assert(_cm_oop_closure == NULL, "invariant"); + assert(_cm_oop_closure == nullptr, "invariant"); } _cm_oop_closure = cm_oop_closure; } void G1CMTask::reset(G1CMBitMap* mark_bitmap) { - guarantee(mark_bitmap != NULL, "invariant"); + guarantee(mark_bitmap != nullptr, "invariant"); _mark_bitmap = mark_bitmap; clear_region_fields(); @@ -2612,10 +2604,10 @@ void G1CMTask::do_marking_step(double time_target_ms, drain_global_stack(true); do { - if (!has_aborted() && _curr_region != NULL) { + if (!has_aborted() && _curr_region != nullptr) { // This means that we're already holding on to a region. - assert(_finger != NULL, "if region is not NULL, then the finger " - "should not be NULL either"); + assert(_finger != nullptr, "if region is not null, then the finger " + "should not be null either"); // We might have restarted this task after an evacuation pause // which might have evacuated the region we're holding on to @@ -2664,7 +2656,7 @@ void G1CMTask::do_marking_step(double time_target_ms, // do_bit() method we move the _finger to point to the // object currently being looked at. So, if we bail out, we // have definitely set _finger to something non-null. - assert(_finger != NULL, "invariant"); + assert(_finger != nullptr, "invariant"); // Region iteration was actually aborted. So now _finger // points to the address of the object we last scanned. If we @@ -2690,18 +2682,18 @@ void G1CMTask::do_marking_step(double time_target_ms, drain_global_stack(true); // Read the note on the claim_region() method on why it might - // return NULL with potentially more regions available for + // return null with potentially more regions available for // claiming and why we have to check out_of_regions() to determine // whether we're done or not. - while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { + while (!has_aborted() && _curr_region == nullptr && !_cm->out_of_regions()) { // We are going to try to claim a new region. We should have // given up on the previous one. // Separated the asserts so that we know which one fires. - assert(_curr_region == NULL, "invariant"); - assert(_finger == NULL, "invariant"); - assert(_region_limit == NULL, "invariant"); + assert(_curr_region == nullptr, "invariant"); + assert(_finger == nullptr, "invariant"); + assert(_region_limit == nullptr, "invariant"); HeapRegion* claimed_region = _cm->claim_region(_worker_id); - if (claimed_region != NULL) { + if (claimed_region != nullptr) { // Yes, we managed to claim one setup_for_region(claimed_region); assert(_curr_region == claimed_region, "invariant"); @@ -2714,11 +2706,11 @@ void G1CMTask::do_marking_step(double time_target_ms, abort_marking_if_regular_check_fail(); } - if (!has_aborted() && _curr_region == NULL) { + if (!has_aborted() && _curr_region == nullptr) { assert(_cm->out_of_regions(), "at this point we should be out of regions"); } - } while ( _curr_region != NULL && !has_aborted()); + } while ( _curr_region != nullptr && !has_aborted()); if (!has_aborted()) { // We cannot check whether the global stack is empty, since other @@ -2800,7 +2792,7 @@ void G1CMTask::do_marking_step(double time_target_ms, // Mainly for debugging purposes to make sure that a pointer to the // closure which was statically allocated in this frame doesn't // escape it by accident. - set_cm_oop_closure(NULL); + set_cm_oop_closure(nullptr); double end_time_ms = os::elapsedVTime() * 1000.0; double elapsed_time_ms = end_time_ms - _start_time_ms; // Update the step history. @@ -2872,16 +2864,16 @@ G1CMTask::G1CMTask(uint worker_id, _worker_id(worker_id), _g1h(G1CollectedHeap::heap()), _cm(cm), - _mark_bitmap(NULL), + _mark_bitmap(nullptr), _task_queue(task_queue), _mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize), _calls(0), _time_target_ms(0.0), _start_time_ms(0.0), - _cm_oop_closure(NULL), - _curr_region(NULL), - _finger(NULL), - _region_limit(NULL), + _cm_oop_closure(nullptr), + _curr_region(nullptr), + _finger(nullptr), + _region_limit(nullptr), _words_scanned(0), _words_scanned_limit(0), _real_words_scanned_limit(0), @@ -2897,7 +2889,7 @@ G1CMTask::G1CMTask(uint worker_id, _termination_start_time_ms(0.0), _marking_step_diff_ms() { - guarantee(task_queue != NULL, "invariant"); + guarantee(task_queue != nullptr, "invariant"); _marking_step_diff_ms.add(0.5); } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index 187eaf1c2ca..a824d50ac5f 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,12 +60,12 @@ private: static const uintptr_t ArraySliceBit = 1; G1TaskQueueEntry(oop obj) : _holder(obj) { - assert(_holder != NULL, "Not allowed to set NULL task queue element"); + assert(_holder != nullptr, "Not allowed to set null task queue element"); } G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { } public: - G1TaskQueueEntry() : _holder(NULL) { } + G1TaskQueueEntry() : _holder(nullptr) { } // Trivially copyable, for use in GenericTaskQueue. static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); } @@ -83,7 +83,7 @@ public: bool is_oop() const { return !is_array_slice(); } bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; } - bool is_null() const { return _holder == NULL; } + bool is_null() const { return _holder == nullptr; } }; typedef GenericTaskQueue G1CMTaskQueue; @@ -118,7 +118,7 @@ public: // stack memory is split into evenly sized chunks of oops. Users can only // add or remove entries on that basis. // Chunks are filled in increasing address order. Not completely filled chunks -// have a NULL element as a terminating element. +// have a null element as a terminating element. // // Every chunk has a header containing a single pointer element used for memory // management. This wastes some space, but is negligible (< .1% with current sizing). @@ -152,12 +152,12 @@ private: char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; // Allocate a new chunk from the reserved memory, using the high water mark. Returns - // NULL if out of memory. + // null if out of memory. TaskQueueEntryChunk* allocate_new_chunk(); // Atomically add the given chunk to the list. void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem); - // Atomically remove and return a chunk from the given list. Returns NULL if the + // Atomically remove and return a chunk from the given list. Returns null if the // list is empty. TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list); @@ -183,19 +183,19 @@ private: // Pushes the given buffer containing at most EntriesPerChunk elements on the mark // stack. If less than EntriesPerChunk elements are to be pushed, the array must - // be terminated with a NULL. + // be terminated with a null. // Returns whether the buffer contents were successfully pushed to the global mark // stack. bool par_push_chunk(G1TaskQueueEntry* buffer); // Pops a chunk from this mark stack, copying them into the given buffer. This // chunk may contain up to EntriesPerChunk elements. If there are less, the last - // element in the array is a NULL pointer. + // element in the array is a null pointer. bool par_pop_chunk(G1TaskQueueEntry* buffer); // Return whether the chunk list is empty. Racy due to unsynchronized access to // _chunk_list. - bool is_empty() const { return _chunk_list == NULL; } + bool is_empty() const { return _chunk_list == nullptr; } size_t capacity() const { return _chunk_capacity; } @@ -250,14 +250,14 @@ public: // Reset the claiming / scanning of the root regions. void prepare_for_scan(); - // Forces get_next() to return NULL so that the iteration aborts early. + // Forces get_next() to return null so that the iteration aborts early. void abort() { _should_abort = true; } // Return true if the CM thread are actively scanning root regions, // false otherwise. bool scan_in_progress() { return _scan_in_progress; } - // Claim the next root MemRegion to scan atomically, or return NULL if + // Claim the next root MemRegion to scan atomically, or return null if // all have been claimed. const MemRegion* claim_next(); @@ -406,7 +406,7 @@ class G1ConcurrentMark : public CHeapObj { TaskTerminator* terminator() { return &_terminator; } // Claims the next available region to be scanned by a marking - // task/thread. It might return NULL if the next region is empty or + // task/thread. It might return null if the next region is empty or // we have run out of regions. In the latter case, out_of_regions() // determines whether we've really run out of regions or the task // should call claim_region() again. This might seem a bit @@ -454,7 +454,7 @@ class G1ConcurrentMark : public CHeapObj { // Region statistics gathered during marking. G1RegionMarkStats* _region_mark_stats; // Top pointer for each region at the start of the rebuild remembered set process - // for regions which remembered sets need to be rebuilt. A NULL for a given region + // for regions which remembered sets need to be rebuilt. A null for a given region // means that this region does not be scanned during the rebuilding remembered // set phase at all. HeapWord* volatile* _top_at_rebuild_starts; @@ -655,11 +655,11 @@ private: // Oop closure used for iterations over oops G1CMOopClosure* _cm_oop_closure; - // Region this task is scanning, NULL if we're not scanning any + // Region this task is scanning, null if we're not scanning any HeapRegion* _curr_region; - // Local finger of this task, NULL if we're not scanning a region + // Local finger of this task, null if we're not scanning a region HeapWord* _finger; - // Limit of the region this task is scanning, NULL if we're not scanning one + // Limit of the region this task is scanning, null if we're not scanning one HeapWord* _region_limit; // Number of words this task has scanned diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp index 1e5bb0de49c..9f074218d92 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,8 +43,8 @@ inline bool G1CMIsAliveClosure::do_object_b(oop obj) { // Check whether the passed in object is null. During discovery the referent // may be cleared between the initial check and being passed in here. - if (obj == NULL) { - // Return true to avoid discovery when the referent is NULL. + if (obj == nullptr) { + // Return true to avoid discovery when the referent is null. return true; } @@ -54,11 +54,6 @@ inline bool G1CMIsAliveClosure::do_object_b(oop obj) { return true; } - // All objects in closed archive regions are live. - if (hr->is_closed_archive()) { - return true; - } - // All objects that are marked are live. return _g1h->is_marked(obj); } @@ -68,11 +63,11 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) { // mutator may have changed the referent's value (i.e. cleared it) between the // time the referent was determined to be potentially alive and calling this // method. - if (obj == NULL) { + if (obj == nullptr) { return false; } assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj)); - return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive(); + return _g1h->heap_region_containing(obj)->is_old_or_humongous(); } inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, oop const obj) { @@ -101,7 +96,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const { size_t num_chunks = 0; TaskQueueEntryChunk* cur = _chunk_list; - while (cur != NULL) { + while (cur != nullptr) { guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks); for (size_t i = 0; i < EntriesPerChunk; ++i) { @@ -147,13 +142,13 @@ inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const { // local check will be more accurate and so result in fewer pushes, // but may also be a little slower. HeapWord* objAddr = cast_from_oop(obj); - if (_finger != NULL) { + if (_finger != nullptr) { // We have a current region. - // Finger and region values are all NULL or all non-NULL. We + // Finger and region values are all null or all non-null. We // use _finger to check since we immediately use its value. - assert(_curr_region != NULL, "invariant"); - assert(_region_limit != NULL, "invariant"); + assert(_curr_region != nullptr, "invariant"); + assert(_region_limit != nullptr, "invariant"); assert(_region_limit <= global_finger, "invariant"); // True if obj is less than the local finger, or is between @@ -202,14 +197,14 @@ inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(uint region) const { inline void G1ConcurrentMark::update_top_at_rebuild_start(HeapRegion* r) { uint const region = r->hrm_index(); assert(region < _g1h->max_reserved_regions(), "Tried to access TARS for region %u out of bounds", region); - assert(_top_at_rebuild_starts[region] == NULL, - "TARS for region %u has already been set to " PTR_FORMAT " should be NULL", + assert(_top_at_rebuild_starts[region] == nullptr, + "TARS for region %u has already been set to " PTR_FORMAT " should be null", region, p2i(_top_at_rebuild_starts[region])); G1RemSetTrackingPolicy* tracker = _g1h->policy()->remset_tracker(); if (tracker->needs_scan_for_rebuild(r)) { _top_at_rebuild_starts[region] = r->top(); } else { - // Leave TARS at NULL. + // Leave TARS at null. } } @@ -274,7 +269,7 @@ template inline bool G1CMTask::deal_with_reference(T* p) { increment_refs_reached(); oop const obj = RawAccess::oop_load(p); - if (obj == NULL) { + if (obj == nullptr) { return false; } return make_reference_grey(obj); @@ -285,7 +280,7 @@ inline void G1ConcurrentMark::raw_mark_in_bitmap(oop obj) { } bool G1ConcurrentMark::is_marked_in_bitmap(oop p) const { - assert(p != NULL && oopDesc::is_oop(p), "expected an oop"); + assert(p != nullptr && oopDesc::is_oop(p), "expected an oop"); return _mark_bitmap.is_marked(cast_from_oop(p)); } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp index fdb17b3366c..3df68aaf6c2 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ public: class G1CMBitMapMappingChangedListener : public G1MappingChangedListener { G1CMBitMap* _bm; public: - G1CMBitMapMappingChangedListener() : _bm(NULL) {} + G1CMBitMapMappingChangedListener() : _bm(nullptr) {} void set_bitmap(G1CMBitMap* bm) { _bm = bm; } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp index 272982f706c..5967b82f39e 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentRebuildAndScrub.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,7 @@ // this address (live) objects need to be scanned for references // that might need to be added to the remembered sets. // -// Note that bottom <= parsable_bottom <= tars; if there is no tars (i.e. NULL), +// Note that bottom <= parsable_bottom <= tars; if there is no tars (i.e. null), // obviously there can not be a parsable_bottom. // // We need to scrub and scan objects to rebuild remembered sets until parsable_bottom; @@ -99,7 +99,7 @@ class G1RebuildRSAndScrubTask : public WorkerTask { // that there is some rebuild or scrubbing work. // // Based on the results of G1RemSetTrackingPolicy::needs_scan_for_rebuild(), - // the value may be changed to nullptr during rebuilding if the region has either: + // the value may be changed to null during rebuilding if the region has either: // - been allocated after rebuild start, or // - been eagerly reclaimed by a young collection (only humongous) bool should_rebuild_or_scrub(HeapRegion* hr) const { diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp index 3d6fd3163a9..ff4e95dceca 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,7 @@ G1ConcurrentRefineThreadControl::~G1ConcurrentRefineThreadControl() { } jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint max_num_threads) { - assert(cr != NULL, "G1ConcurrentRefine must not be NULL"); + assert(cr != nullptr, "G1ConcurrentRefine must not be null"); _cr = cr; _max_num_threads = max_num_threads; @@ -138,7 +138,7 @@ bool G1ConcurrentRefineThreadControl::activate(uint worker_id) { void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) { for (uint i = 0; i < _max_num_threads; i++) { - if (_threads[i] != NULL) { + if (_threads[i] != nullptr) { tc->do_thread(_threads[i]); } } @@ -146,7 +146,7 @@ void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) { void G1ConcurrentRefineThreadControl::stop() { for (uint i = 0; i < _max_num_threads; i++) { - if (_threads[i] != NULL) { + if (_threads[i] != nullptr) { _threads[i]->stop(); } } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp index 333009107f1..30a8fe7f757 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -155,7 +155,7 @@ public: ~G1ConcurrentRefine(); // Returns a G1ConcurrentRefine instance if succeeded to create/initialize the - // G1ConcurrentRefine instance. Otherwise, returns nullptr with error code. + // G1ConcurrentRefine instance. Otherwise, returns null with error code. static G1ConcurrentRefine* create(G1Policy* policy, jint* ecode); // Stop all the refinement threads. diff --git a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp index fa8bd43ae68..41d67dacde1 100644 --- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp +++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp @@ -121,7 +121,7 @@ size_t G1DirtyCardQueueSet::num_cards() const { } void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) { - assert(cbn != NULL, "precondition"); + assert(cbn != nullptr, "precondition"); // Increment _num_cards before adding to queue, so queue removal doesn't // need to deal with _num_cards possibly going negative. Atomic::add(&_num_cards, buffer_size() - cbn->index()); @@ -134,11 +134,11 @@ void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) { // Thread-safe attempt to remove and return the first buffer from // the _completed queue, using the NonblockingQueue::try_pop() underneath. -// It has a limitation that it may return NULL when there are objects +// It has a limitation that it may return null when there are objects // in the queue if there is a concurrent push/append operation. BufferNode* G1DirtyCardQueueSet::dequeue_completed_buffer() { Thread* current_thread = Thread::current(); - BufferNode* result = NULL; + BufferNode* result = nullptr; while (true) { // Use GlobalCounter critical section to avoid ABA problem. // The release of a buffer to its allocator's free list uses @@ -155,10 +155,10 @@ BufferNode* G1DirtyCardQueueSet::dequeue_completed_buffer() { BufferNode* G1DirtyCardQueueSet::get_completed_buffer() { BufferNode* result = dequeue_completed_buffer(); - if (result == NULL) { // Unlikely if no paused buffers. + if (result == nullptr) { // Unlikely if no paused buffers. enqueue_previous_paused_buffers(); result = dequeue_completed_buffer(); - if (result == NULL) return NULL; + if (result == nullptr) return nullptr; } Atomic::sub(&_num_cards, buffer_size() - result->index()); return result; @@ -179,14 +179,14 @@ void G1DirtyCardQueueSet::verify_num_cards() const { #endif // ASSERT G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() : - _head(NULL), _tail(NULL), + _head(nullptr), _tail(nullptr), _safepoint_id(SafepointSynchronize::safepoint_id()) {} #ifdef ASSERT G1DirtyCardQueueSet::PausedBuffers::PausedList::~PausedList() { - assert(Atomic::load(&_head) == NULL, "precondition"); - assert(_tail == NULL, "precondition"); + assert(Atomic::load(&_head) == nullptr, "precondition"); + assert(_tail == nullptr, "precondition"); } #endif // ASSERT @@ -199,8 +199,8 @@ void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) { assert_not_at_safepoint(); assert(is_next(), "precondition"); BufferNode* old_head = Atomic::xchg(&_head, node); - if (old_head == NULL) { - assert(_tail == NULL, "invariant"); + if (old_head == nullptr) { + assert(_tail == nullptr, "invariant"); _tail = node; } else { node->set_next(old_head); @@ -210,27 +210,27 @@ void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) { G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() { BufferNode* head = Atomic::load(&_head); BufferNode* tail = _tail; - Atomic::store(&_head, (BufferNode*)NULL); - _tail = NULL; + Atomic::store(&_head, (BufferNode*)nullptr); + _tail = nullptr; return HeadTail(head, tail); } -G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(NULL) {} +G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(nullptr) {} #ifdef ASSERT G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() { - assert(Atomic::load(&_plist) == NULL, "invariant"); + assert(Atomic::load(&_plist) == nullptr, "invariant"); } #endif // ASSERT void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) { assert_not_at_safepoint(); PausedList* plist = Atomic::load_acquire(&_plist); - if (plist == NULL) { + if (plist == nullptr) { // Try to install a new next list. plist = new PausedList(); - PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)NULL, plist); - if (old_plist != NULL) { + PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)nullptr, plist); + if (old_plist != nullptr) { // Some other thread installed a new next list. Use it instead. delete plist; plist = old_plist; @@ -248,10 +248,10 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous( // deleted out from under us by a concurrent take_previous(). GlobalCounter::CriticalSection cs(Thread::current()); previous = Atomic::load_acquire(&_plist); - if ((previous == NULL) || // Nothing to take. + if ((previous == nullptr) || // Nothing to take. previous->is_next() || // Not from a previous safepoint. // Some other thread stole it. - (Atomic::cmpxchg(&_plist, previous, (PausedList*)NULL) != previous)) { + (Atomic::cmpxchg(&_plist, previous, (PausedList*)nullptr) != previous)) { return HeadTail(); } } @@ -269,8 +269,8 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() { assert_at_safepoint(); HeadTail result; PausedList* plist = Atomic::load(&_plist); - if (plist != NULL) { - Atomic::store(&_plist, (PausedList*)NULL); + if (plist != nullptr) { + Atomic::store(&_plist, (PausedList*)nullptr); result = plist->take(); delete plist; } @@ -279,7 +279,7 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() { void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) { assert_not_at_safepoint(); - assert(node->next() == NULL, "precondition"); + assert(node->next() == nullptr, "precondition"); // Ensure there aren't any paused buffers from a previous safepoint. enqueue_previous_paused_buffers(); // Cards for paused buffers are included in count, to contribute to @@ -291,8 +291,8 @@ void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) { } void G1DirtyCardQueueSet::enqueue_paused_buffers_aux(const HeadTail& paused) { - if (paused._head != NULL) { - assert(paused._tail != NULL, "invariant"); + if (paused._head != nullptr) { + assert(paused._tail != nullptr, "invariant"); // Cards from paused buffers are already recorded in the queue count. _completed.append(*paused._head, *paused._tail); } @@ -311,10 +311,10 @@ void G1DirtyCardQueueSet::enqueue_all_paused_buffers() { void G1DirtyCardQueueSet::abandon_completed_buffers() { BufferNodeList list = take_all_completed_buffers(); BufferNode* buffers_to_delete = list._head; - while (buffers_to_delete != NULL) { + while (buffers_to_delete != nullptr) { BufferNode* bn = buffers_to_delete; buffers_to_delete = bn->next(); - bn->set_next(NULL); + bn->set_next(nullptr); deallocate_buffer(bn); } } @@ -324,7 +324,7 @@ void G1DirtyCardQueueSet::abandon_completed_buffers() { void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) { assert(allocator() == src->allocator(), "precondition"); const BufferNodeList from = src->take_all_completed_buffers(); - if (from._head != NULL) { + if (from._head != nullptr) { Atomic::add(&_num_cards, from._entry_count); _completed.append(*from._head, *from._tail); } @@ -504,7 +504,7 @@ void G1DirtyCardQueueSet::handle_completed_buffer(BufferNode* new_node, } BufferNode* node = get_completed_buffer(); - if (node == NULL) return; // Didn't get a buffer to process. + if (node == nullptr) return; // Didn't get a buffer to process. // Refine cards in buffer. @@ -523,7 +523,7 @@ bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id, if (Atomic::load(&_num_cards) <= stop_at) return false; BufferNode* node = get_completed_buffer(); - if (node == NULL) return false; // Didn't get a buffer to process. + if (node == nullptr) return false; // Didn't get a buffer to process. bool fully_processed = refine_buffer(node, worker_id, stats); handle_refined_buffer(node, fully_processed); diff --git a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp index 8d6e0fd92a3..b3a2237f555 100644 --- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp +++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp @@ -73,7 +73,7 @@ class G1DirtyCardQueueSet: public PtrQueueSet { struct HeadTail { BufferNode* _head; BufferNode* _tail; - HeadTail() : _head(NULL), _tail(NULL) {} + HeadTail() : _head(nullptr), _tail(nullptr) {} HeadTail(BufferNode* head, BufferNode* tail) : _head(head), _tail(tail) {} }; @@ -126,7 +126,7 @@ class G1DirtyCardQueueSet: public PtrQueueSet { }; // The most recently created list, which might be for either the next or - // a previous safepoint, or might be NULL if the next list hasn't been + // a previous safepoint, or might be null if the next list hasn't been // created yet. We only need one list because of the requirement that // threads calling add() must first ensure there are no paused buffers // from a previous safepoint. There might be many list instances existing @@ -211,10 +211,10 @@ class G1DirtyCardQueueSet: public PtrQueueSet { // Thread-safe attempt to remove and return the first buffer from // the _completed queue. - // Returns NULL if the queue is empty, or if a concurrent push/append + // Returns null if the queue is empty, or if a concurrent push/append // interferes. It uses GlobalCounter critical section to avoid ABA problem. BufferNode* dequeue_completed_buffer(); - // Remove and return a completed buffer from the list, or return NULL + // Remove and return a completed buffer from the list, or return null // if none available. BufferNode* get_completed_buffer(); diff --git a/src/hotspot/share/gc/g1/g1FreeIdSet.cpp b/src/hotspot/share/gc/g1/g1FreeIdSet.cpp index 37e663b9e6b..ca0961bd963 100644 --- a/src/hotspot/share/gc/g1/g1FreeIdSet.cpp +++ b/src/hotspot/share/gc/g1/g1FreeIdSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ G1FreeIdSet::G1FreeIdSet(uint start, uint size) : _sem(size), // counting semaphore for available ids - _next(NULL), // array of "next" indices + _next(nullptr), // array of "next" indices _start(start), // first id value _size(size), // number of available ids _head_index_mask(0), // mask for extracting index from a _head value. diff --git a/src/hotspot/share/gc/g1/g1FromCardCache.cpp b/src/hotspot/share/gc/g1/g1FromCardCache.cpp index 0a566af9d13..d3e7af768b5 100644 --- a/src/hotspot/share/gc/g1/g1FromCardCache.cpp +++ b/src/hotspot/share/gc/g1/g1FromCardCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "runtime/globals.hpp" #include "utilities/debug.hpp" -uintptr_t** G1FromCardCache::_cache = NULL; +uintptr_t** G1FromCardCache::_cache = nullptr; uint G1FromCardCache::_max_reserved_regions = 0; size_t G1FromCardCache::_static_mem_size = 0; #ifdef ASSERT @@ -40,7 +40,7 @@ uint G1FromCardCache::_max_workers = 0; void G1FromCardCache::initialize(uint max_reserved_regions) { guarantee(max_reserved_regions > 0, "Heap size must be valid"); - guarantee(_cache == NULL, "Should not call this multiple times"); + guarantee(_cache == nullptr, "Should not call this multiple times"); _max_reserved_regions = max_reserved_regions; #ifdef ASSERT diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index 1fd30111c96..8fb36e73550 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -111,12 +111,11 @@ uint G1FullCollector::calc_active_workers() { } G1FullCollector::G1FullCollector(G1CollectedHeap* heap, - bool explicit_gc, bool clear_soft_refs, bool do_maximal_compaction, G1FullGCTracer* tracer) : _heap(heap), - _scope(heap->monitoring_support(), explicit_gc, clear_soft_refs, do_maximal_compaction, tracer), + _scope(heap->monitoring_support(), clear_soft_refs, do_maximal_compaction, tracer), _num_workers(calc_active_workers()), _has_compaction_targets(false), _has_humongous(false), @@ -183,7 +182,7 @@ void G1FullCollector::prepare_collection() { // Verification needs the bitmap, so we should clear the bitmap only later. bool in_concurrent_cycle = _heap->abort_concurrent_cycle(); - _heap->verify_before_full_collection(scope()->is_explicit_gc()); + _heap->verify_before_full_collection(); if (in_concurrent_cycle) { GCTraceTime(Debug, gc) debug("Clear Bitmap"); _heap->concurrent_mark()->clear_bitmap(_heap->workers()); @@ -257,9 +256,9 @@ void G1FullCollector::complete_collection() { void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) { if (hr->is_free()) { _region_attr_table.set_free(hr->hrm_index()); - } else if (hr->is_closed_archive()) { - _region_attr_table.set_skip_marking(hr->hrm_index()); - } else if (hr->is_pinned()) { + } else if (hr->is_humongous()) { + // Humongous objects will never be moved in the "main" compaction phase, but + // afterwards in a special phase if needed. _region_attr_table.set_skip_compacting(hr->hrm_index()); } else { // Everything else should be compacted. @@ -326,7 +325,10 @@ void G1FullCollector::phase1_mark_live_objects() { _heap->complete_cleaning(purged_class); } - scope()->tracer()->report_object_count_after_gc(&_is_alive); + { + GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer()); + scope()->tracer()->report_object_count_after_gc(&_is_alive); + } #if TASKQUEUE_STATS oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue"); array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue"); diff --git a/src/hotspot/share/gc/g1/g1FullCollector.hpp b/src/hotspot/share/gc/g1/g1FullCollector.hpp index 619a8d0548b..b1ceb599fd2 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.hpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp @@ -53,7 +53,7 @@ class ReferenceProcessor; class G1FullGCSubjectToDiscoveryClosure: public BoolObjectClosure { public: bool do_object_b(oop p) { - assert(p != NULL, "must be"); + assert(p != nullptr, "must be"); return true; } }; @@ -100,7 +100,6 @@ class G1FullCollector : StackObj { public: G1FullCollector(G1CollectedHeap* heap, - bool explicit_gc, bool clear_soft_refs, bool do_maximal_compaction, G1FullGCTracer* tracer); @@ -130,7 +129,6 @@ public: inline bool is_compacting(oop obj) const; inline bool is_skip_compacting(uint region_index) const; - inline bool is_skip_marking(oop obj) const; // Are we (potentially) going to compact into this region? inline bool is_compaction_target(uint region_index) const; diff --git a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp index 353143a6219..e36afc01cdb 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp @@ -40,10 +40,6 @@ bool G1FullCollector::is_skip_compacting(uint region_index) const { return _region_attr_table.is_skip_compacting(region_index); } -bool G1FullCollector::is_skip_marking(oop obj) const { - return _region_attr_table.is_skip_marking(cast_from_oop(obj)); -} - bool G1FullCollector::is_compaction_target(uint region_index) const { return _region_attr_table.is_compacting(region_index) || is_free(region_index); } diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp index 2d09f8e817b..a45c3eb17de 100644 --- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,10 +68,8 @@ class G1AdjustRegionClosure : public HeapRegionClosure { // work distribution. oop obj = cast_to_oop(r->humongous_start_region()->bottom()); obj->oop_iterate(&cl, MemRegion(r->bottom(), r->top())); - } else if (!r->is_closed_archive() && !r->is_free()) { - // Closed archive regions never change references and only contain - // references into other closed regions and are always live. Free - // regions do not contain objects to iterate. So skip both. + } else if (!r->is_free()) { + // Free regions do not contain objects to iterate. So skip them. G1AdjustLiveClosure adjust(&cl); r->apply_to_marked_objects(_bitmap, &adjust); } diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp index 8cc97c763f5..8c4fa4eb2e0 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp @@ -67,7 +67,6 @@ void G1FullGCCompactTask::copy_object_to_new_location(oop obj) { } void G1FullGCCompactTask::compact_region(HeapRegion* hr) { - assert(!hr->is_pinned(), "Should be no pinned region in compaction queue"); assert(!hr->is_humongous(), "Should be no humongous regions in compaction queue"); if (!collector()->is_free(hr->hrm_index())) { diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp index 7a0541838bf..dabff2bc9fb 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp @@ -57,7 +57,7 @@ bool G1FullGCCompactionPoint::has_regions() { } bool G1FullGCCompactionPoint::is_initialized() { - return _current_region != NULL; + return _current_region != nullptr; } void G1FullGCCompactionPoint::initialize(HeapRegion* hr) { @@ -71,7 +71,7 @@ HeapRegion* G1FullGCCompactionPoint::current_region() { HeapRegion* G1FullGCCompactionPoint::next_region() { HeapRegion* next = *(++_compaction_region_iterator); - assert(next != NULL, "Must return valid region"); + assert(next != nullptr, "Must return valid region"); return next; } @@ -93,7 +93,7 @@ void G1FullGCCompactionPoint::switch_region() { } void G1FullGCCompactionPoint::forward(oop object, size_t size) { - assert(_current_region != NULL, "Must have been initialized"); + assert(_current_region != nullptr, "Must have been initialized"); // Ensure the object fit in the current region. while (!object_will_fit(size)) { diff --git a/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp b/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp index ff613c9e4af..6acac0b0fa5 100644 --- a/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,17 +31,16 @@ // fast access during the full collection. In particular some parts of the // region type information is encoded in these per-region bytes. Value encoding // has been specifically chosen to make required accesses fast. In particular, -// the table specifies whether a Full GC cycle should be compacting, skip -// compacting, or skip marking (liveness analysis) a region. +// the table specifies whether a Full GC cycle should be compacting or skip +// compacting a region. // Reasons for not compacting a region: -// (1) the HeapRegion itself has been pinned at the start of Full GC. +// (1) the HeapRegion itself can not be moved during this phase of the full gc +// (e.g. Humongous regions). // (2) the occupancy of the region is too high to be considered eligible for compaction. -// The only examples for skipping marking for regions are Closed Archive regions. class G1FullGCHeapRegionAttr : public G1BiasedMappedArray { static const uint8_t Compacting = 0; // Region will be compacted. static const uint8_t SkipCompacting = 1; // Region should not be compacted, but otherwise handled as usual. - static const uint8_t SkipMarking = 2; // Region contents are not even marked through, but contain live objects. - static const uint8_t Free = 3; // Regions is free. + static const uint8_t Free = 2; // Region is free. static const uint8_t Invalid = 255; @@ -56,15 +55,9 @@ public: void set_invalid(uint idx) { set_by_index(idx, Invalid); } void set_compacting(uint idx) { set_by_index(idx, Compacting); } - void set_skip_marking(uint idx) { set_by_index(idx, SkipMarking); } void set_skip_compacting(uint idx) { set_by_index(idx, SkipCompacting); } void set_free(uint idx) { set_by_index(idx, Free); } - bool is_skip_marking(HeapWord* obj) const { - assert(!is_free(obj), "Should not have objects in free regions."); - return get_by_address(obj) == SkipMarking; - } - bool is_compacting(HeapWord* obj) const { assert(!is_free(obj), "Should not have objects in free regions."); return get_by_address(obj) == Compacting; diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp index 43c2d9d2f1c..8080a1d113e 100644 --- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp @@ -45,10 +45,6 @@ #include "utilities/debug.hpp" inline bool G1FullGCMarker::mark_object(oop obj) { - if (_collector->is_skip_marking(obj)) { - return false; - } - // Try to mark. if (!_bitmap->par_mark(obj)) { // Lost mark race. @@ -83,11 +79,8 @@ template inline void G1FullGCMarker::mark_and_push(T* p) { oop obj = CompressedOops::decode_not_null(heap_oop); if (mark_object(obj)) { _oop_stack.push(obj); - assert(_bitmap->is_marked(obj), "Must be marked now - map self"); - } else { - assert(_bitmap->is_marked(obj) || _collector->is_skip_marking(obj), - "Must be marked by other or object in skip marking region"); } + assert(_bitmap->is_marked(obj), "Must be marked"); } } diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp index e8c0d27bc62..ebc7852d150 100644 --- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp index 73e5fee3dd8..aa194d16203 100644 --- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp @@ -78,7 +78,7 @@ inline void G1AdjustClosure::do_oop(oop* p) { do_oop_work(p); } inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_work(p); } inline bool G1IsAliveClosure::do_object_b(oop p) { - return _bitmap->is_marked(p) || _collector->is_skip_marking(p); + return _bitmap->is_marked(p); } template diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp index 59c28e2482a..7cef6029397 100644 --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp @@ -47,9 +47,7 @@ bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* uint region_idx = hr->hrm_index(); assert(_collector->is_compaction_target(region_idx), "must be"); - assert(!hr->is_pinned(), "must be"); - assert(!hr->is_closed_archive(), "must be"); - assert(!hr->is_open_archive(), "must be"); + assert(!hr->is_humongous(), "must be"); prepare_for_compaction(hr); diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp index aeefd9fb530..7f09f0553e9 100644 --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp @@ -42,8 +42,7 @@ class G1DetermineCompactionQueueClosure : public HeapRegionClosure { G1FullCollector* _collector; uint _cur_worker; - template - inline void free_pinned_region(HeapRegion* hr); + inline void free_empty_humongous_region(HeapRegion* hr); inline bool should_compact(HeapRegion* hr) const; diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp index 70fcc4b2c19..76a647a3da3 100644 --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp @@ -33,21 +33,16 @@ #include "gc/g1/g1FullGCScope.hpp" #include "gc/g1/heapRegion.inline.hpp" -template -void G1DetermineCompactionQueueClosure::free_pinned_region(HeapRegion* hr) { - if (is_humongous) { - _g1h->free_humongous_region(hr, nullptr); - } else { - _g1h->free_region(hr, nullptr); - } +void G1DetermineCompactionQueueClosure::free_empty_humongous_region(HeapRegion* hr) { + _g1h->free_humongous_region(hr, nullptr); _collector->set_free(hr->hrm_index()); add_to_compaction_queue(hr); } inline bool G1DetermineCompactionQueueClosure::should_compact(HeapRegion* hr) const { - // There is no need to iterate and forward objects in pinned regions ie. + // There is no need to iterate and forward objects in non-movable regions ie. // prepare them for compaction. - if (hr->is_pinned()) { + if (hr->is_humongous()) { return false; } size_t live_words = _collector->live_words(hr->hrm_index()); @@ -88,17 +83,10 @@ inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) { oop obj = cast_to_oop(hr->humongous_start_region()->bottom()); bool is_empty = !_collector->mark_bitmap()->is_marked(obj); if (is_empty) { - free_pinned_region(hr); + free_empty_humongous_region(hr); } else { _collector->set_has_humongous(); } - } else if (hr->is_open_archive()) { - bool is_empty = _collector->live_words(hr->hrm_index()) == 0; - if (is_empty) { - free_pinned_region(hr); - } - } else if (hr->is_closed_archive()) { - // nothing to do with closed archive region } else { assert(MarkSweepDeadRatio > 0, "only skip compaction for other regions when MarkSweepDeadRatio > 0"); diff --git a/src/hotspot/share/gc/g1/g1FullGCResetMetadataTask.cpp b/src/hotspot/share/gc/g1/g1FullGCResetMetadataTask.cpp index b6a46111f14..003cc2b3f2b 100644 --- a/src/hotspot/share/gc/g1/g1FullGCResetMetadataTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCResetMetadataTask.cpp @@ -40,7 +40,7 @@ bool G1FullGCResetMetadataTask::G1ResetMetadataClosure::do_heap_region(HeapRegio uint const region_idx = hr->hrm_index(); if (!_collector->is_compaction_target(region_idx)) { assert(!hr->is_free(), "all free regions should be compaction targets"); - assert(_collector->is_skip_compacting(region_idx) || hr->is_closed_archive(), "must be"); + assert(_collector->is_skip_compacting(region_idx), "must be"); if (hr->needs_scrubbing_during_full_gc()) { scrub_skip_compacting_region(hr, hr->is_young()); } @@ -90,12 +90,6 @@ void G1FullGCResetMetadataTask::G1ResetMetadataClosure::reset_skip_compacting(He if (hr->is_humongous()) { oop obj = cast_to_oop(hr->humongous_start_region()->bottom()); assert(_collector->mark_bitmap()->is_marked(obj), "must be live"); - } else if (hr->is_open_archive()) { - bool is_empty = (_collector->live_words(hr->hrm_index()) == 0); - assert(!is_empty, "should contain at least one live obj"); - } else if (hr->is_closed_archive()) { - // should early-return above - ShouldNotReachHere(); } else { assert(_collector->live_words(region_index) > _collector->scope()->region_compaction_threshold(), "should be quite full"); diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.cpp b/src/hotspot/share/gc/g1/g1FullGCScope.cpp index 25abb4b05ea..5bbc5c4e2d4 100644 --- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp @@ -37,12 +37,10 @@ G1FullGCJFRTracerMark::~G1FullGCJFRTracerMark() { } G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support, - bool explicit_gc, bool clear_soft, bool do_maximal_compaction, G1FullGCTracer* tracer) : _rm(), - _explicit_gc(explicit_gc), _do_maximal_compaction(do_maximal_compaction), _g1h(G1CollectedHeap::heap()), _svc_marker(SvcGCMarker::FULL), @@ -57,10 +55,6 @@ G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support, HeapRegion::GrainWords : (1 - MarkSweepDeadRatio / 100.0) * HeapRegion::GrainWords) { } -bool G1FullGCScope::is_explicit_gc() { - return _explicit_gc; -} - bool G1FullGCScope::should_clear_soft_refs() { return _soft_refs.should_clear(); } diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.hpp b/src/hotspot/share/gc/g1/g1FullGCScope.hpp index 398b7ba386c..f3d89c7646f 100644 --- a/src/hotspot/share/gc/g1/g1FullGCScope.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCScope.hpp @@ -47,7 +47,6 @@ public: // Class used to group scoped objects used in the Full GC together. class G1FullGCScope : public StackObj { ResourceMark _rm; - bool _explicit_gc; bool _do_maximal_compaction; G1CollectedHeap* _g1h; SvcGCMarker _svc_marker; @@ -62,12 +61,10 @@ class G1FullGCScope : public StackObj { public: G1FullGCScope(G1MonitoringSupport* monitoring_support, - bool explicit_gc, bool clear_soft, bool do_maximal_compaction, G1FullGCTracer* tracer); - bool is_explicit_gc(); bool should_clear_soft_refs(); bool do_maximal_compaction() { return _do_maximal_compaction; } diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp index 2339996bbe5..5fc8b54df0d 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp @@ -188,7 +188,7 @@ void G1GCPhaseTimes::reset() { _cur_verify_after_time_ms = 0.0; for (int i = 0; i < GCParPhasesSentinel; i++) { - if (_gc_par_phases[i] != NULL) { + if (_gc_par_phases[i] != nullptr) { _gc_par_phases[i]->reset(); } } @@ -203,10 +203,10 @@ void G1GCPhaseTimes::record_gc_pause_start() { } #define ASSERT_PHASE_UNINITIALIZED(phase) \ - assert(_gc_par_phases[phase] == NULL || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started"); + assert(_gc_par_phases[phase] == nullptr || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started"); double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) { - if (_gc_par_phases[phase] == NULL) { + if (_gc_par_phases[phase] == nullptr) { return 0.0; } double value = _gc_par_phases[phase]->get(worker); @@ -287,17 +287,17 @@ size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, u // return the average time for a phase in milliseconds double G1GCPhaseTimes::average_time_ms(GCParPhases phase) const { - if (_gc_par_phases[phase] == NULL) { + if (_gc_par_phases[phase] == nullptr) { return 0.0; } return _gc_par_phases[phase]->average() * 1000.0; } size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) { - if (_gc_par_phases[phase] == NULL) { + if (_gc_par_phases[phase] == nullptr) { return 0; } - assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count"); + assert(_gc_par_phases[phase]->thread_work_items(index) != nullptr, "No sub count"); return _gc_par_phases[phase]->thread_work_items(index)->sum(); } @@ -314,7 +314,7 @@ void G1GCPhaseTimes::details(T* phase, uint indent_level) const { void G1GCPhaseTimes::print_thread_work_items(WorkerDataArray* phase, uint indent_level, outputStream* out) const { for (uint i = 0; i < phase->MaxThreadWorkItems; i++) { WorkerDataArray* work_items = phase->thread_work_items(i); - if (work_items != NULL) { + if (work_items != nullptr) { out->sp((indent_level + 1) * 2); work_items->print_summary_on(out, true); details(work_items, indent_level + 1); @@ -602,13 +602,13 @@ void G1EvacPhaseWithTrimTimeTracker::stop() { G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id, bool allow_multiple_record) : _start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id), _event(), _allow_multiple_record(allow_multiple_record) { - if (_phase_times != NULL) { + if (_phase_times != nullptr) { _start_time = Ticks::now(); } } G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() { - if (_phase_times != NULL) { + if (_phase_times != nullptr) { if (_allow_multiple_record) { _phase_times->record_or_add_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); } else { @@ -629,7 +629,7 @@ G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times, } G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() { - if (_phase_times != NULL) { + if (_phase_times != nullptr) { // Explicitly stop the trim tracker since it's not yet destructed. _trim_tracker.stop(); // Exclude trim time by increasing the start time. diff --git a/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp b/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp index 1c5690e566d..0ad5712fea0 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,8 +37,6 @@ class G1HeapRegionTraceType : AllStatic { StartsHumongous, ContinuesHumongous, Old, - OpenArchive, - ClosedArchive, G1HeapRegionTypeEndSentinel }; @@ -50,9 +48,7 @@ class G1HeapRegionTraceType : AllStatic { case StartsHumongous: return "Starts Humongous"; case ContinuesHumongous: return "Continues Humongous"; case Old: return "Old"; - case OpenArchive: return "OpenArchive"; - case ClosedArchive: return "ClosedArchive"; - default: ShouldNotReachHere(); return NULL; + default: ShouldNotReachHere(); return nullptr; } } }; diff --git a/src/hotspot/share/gc/g1/g1HeapTransition.cpp b/src/hotspot/share/gc/g1/g1HeapTransition.cpp index 0c534c15317..baead337d83 100644 --- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp +++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,11 +33,10 @@ G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) : _eden_length(g1_heap->eden_regions_count()), _survivor_length(g1_heap->survivor_regions_count()), _old_length(g1_heap->old_regions_count()), - _archive_length(g1_heap->archive_regions_count()), _humongous_length(g1_heap->humongous_regions_count()), _meta_sizes(MetaspaceUtils::get_combined_statistics()), - _eden_length_per_node(NULL), - _survivor_length_per_node(NULL) { + _eden_length_per_node(nullptr), + _survivor_length_per_node(nullptr) { uint node_count = G1NUMA::numa()->num_active_nodes(); @@ -67,19 +66,17 @@ struct DetailedUsage : public StackObj { size_t _eden_used; size_t _survivor_used; size_t _old_used; - size_t _archive_used; size_t _humongous_used; size_t _eden_region_count; size_t _survivor_region_count; size_t _old_region_count; - size_t _archive_region_count; size_t _humongous_region_count; DetailedUsage() : - _eden_used(0), _survivor_used(0), _old_used(0), _archive_used(0), _humongous_used(0), + _eden_used(0), _survivor_used(0), _old_used(0), _humongous_used(0), _eden_region_count(0), _survivor_region_count(0), _old_region_count(0), - _archive_region_count(0), _humongous_region_count(0) {} + _humongous_region_count(0) {} }; class DetailedUsageClosure: public HeapRegionClosure { @@ -89,9 +86,6 @@ public: if (r->is_old()) { _usage._old_used += r->used(); _usage._old_region_count++; - } else if (r->is_archive()) { - _usage._archive_used += r->used(); - _usage._archive_region_count++; } else if (r->is_survivor()) { _usage._survivor_used += r->used(); _usage._survivor_region_count++; @@ -117,8 +111,8 @@ static void log_regions(const char* msg, size_t before_length, size_t after_leng ls.print("%s regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")", msg, before_length, after_length, capacity); - // Not NULL only if gc+heap+numa at Debug level is enabled. - if (before_per_node_length != NULL && after_per_node_length != NULL) { + // Not null only if gc+heap+numa at Debug level is enabled. + if (before_per_node_length != nullptr && after_per_node_length != nullptr) { G1NUMA* numa = G1NUMA::numa(); uint num_nodes = numa->num_active_nodes(); const int* node_ids = numa->node_ids(); @@ -152,8 +146,6 @@ void G1HeapTransition::print() { after._survivor_length, usage._survivor_region_count); assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT, after._old_length, usage._old_region_count); - assert(usage._archive_region_count == after._archive_length, "Expected archive to be " SIZE_FORMAT " but was " SIZE_FORMAT, - after._archive_length, usage._archive_region_count); assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT, after._humongous_length, usage._humongous_region_count); } @@ -172,11 +164,6 @@ void G1HeapTransition::print() { log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K", usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K); - log_info(gc, heap)("Archive regions: " SIZE_FORMAT "->" SIZE_FORMAT, - _before._archive_length, after._archive_length); - log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K", - usage._archive_used / K, ((after._archive_length * HeapRegion::GrainBytes) - usage._archive_used) / K); - log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT, _before._humongous_length, after._humongous_length); log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K", diff --git a/src/hotspot/share/gc/g1/g1HeapTransition.hpp b/src/hotspot/share/gc/g1/g1HeapTransition.hpp index dc7325a26a1..09d901f283c 100644 --- a/src/hotspot/share/gc/g1/g1HeapTransition.hpp +++ b/src/hotspot/share/gc/g1/g1HeapTransition.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,6 @@ class G1HeapTransition { size_t _eden_length; size_t _survivor_length; size_t _old_length; - size_t _archive_length; size_t _humongous_length; const MetaspaceCombinedStats _meta_sizes; diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp index 8e30eb94a73..ecdf0810e73 100644 --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp @@ -126,7 +126,7 @@ class G1VerifyCodeRootOopClosure: public OopClosure { public: G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo): - _g1h(g1h), _root_cl(root_cl), _nm(NULL), _vo(vo), _failures(false) {} + _g1h(g1h), _root_cl(root_cl), _nm(nullptr), _vo(vo), _failures(false) {} void do_oop(oop* p) { do_oop_work(p); } void do_oop(narrowOop* p) { do_oop_work(p); } @@ -144,7 +144,7 @@ public: void do_code_blob(CodeBlob* cb) { nmethod* nm = cb->as_nmethod_or_null(); - if (nm != NULL) { + if (nm != nullptr) { _oop_cl->set_nmethod(nm); nm->oops_do(_oop_cl); } @@ -191,7 +191,7 @@ public: template void do_oop_work(T *p) { oop obj = RawAccess<>::oop_load(p); - guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), + guarantee(obj == nullptr || !_g1h->is_obj_dead_cond(obj, _vo), "Dead object referenced by a not dead object"); } }; @@ -210,7 +210,7 @@ public: } void do_object(oop o) { VerifyLivenessOopClosure isLive(_g1h, _vo); - assert(o != NULL, "Huh?"); + assert(o != nullptr, "Huh?"); if (!_g1h->is_obj_dead_cond(o, _vo)) { // If the object is alive according to the full gc mark, // then verify that the marking information agrees. @@ -233,99 +233,6 @@ public: size_t live_bytes() { return _live_bytes; } }; -class VerifyArchiveOopClosure: public BasicOopIterateClosure { - HeapRegion* _hr; -public: - VerifyArchiveOopClosure(HeapRegion *hr) : _hr(hr) { } - void do_oop(narrowOop *p) { do_oop_work(p); } - void do_oop( oop *p) { do_oop_work(p); } - - template void do_oop_work(T *p) { - oop obj = RawAccess<>::oop_load(p); - - if (_hr->is_open_archive()) { - guarantee(obj == NULL || G1CollectedHeap::heap()->heap_region_containing(obj)->is_archive(), - "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, - p2i(p), p2i(obj)); - } else { - assert(_hr->is_closed_archive(), "should be closed archive region"); - guarantee(obj == NULL || G1CollectedHeap::heap()->heap_region_containing(obj)->is_closed_archive(), - "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, - p2i(p), p2i(obj)); - } - } -}; - -class VerifyObjectInArchiveRegionClosure: public ObjectClosure { - HeapRegion* _hr; -public: - VerifyObjectInArchiveRegionClosure(HeapRegion *hr, bool verbose) - : _hr(hr) { } - // Verify that all object pointers are to archive regions. - void do_object(oop o) { - VerifyArchiveOopClosure checkOop(_hr); - assert(o != NULL, "Should not be here for NULL oops"); - o->oop_iterate(&checkOop); - } -}; - -// Should be only used at CDS dump time -class VerifyReadyForArchivingRegionClosure : public HeapRegionClosure { - bool _seen_free; - bool _has_holes; - bool _has_unexpected_holes; - bool _has_humongous; -public: - bool has_holes() {return _has_holes;} - bool has_unexpected_holes() {return _has_unexpected_holes;} - bool has_humongous() {return _has_humongous;} - - VerifyReadyForArchivingRegionClosure() : HeapRegionClosure() { - _seen_free = false; - _has_holes = false; - _has_unexpected_holes = false; - _has_humongous = false; - } - virtual bool do_heap_region(HeapRegion* hr) { - const char* hole = ""; - - if (hr->is_free()) { - _seen_free = true; - } else { - if (_seen_free) { - _has_holes = true; - if (hr->is_humongous()) { - hole = " hole"; - } else { - _has_unexpected_holes = true; - hole = " hole **** unexpected ****"; - } - } - } - if (hr->is_humongous()) { - _has_humongous = true; - } - log_info(gc, region, cds)("HeapRegion " PTR_FORMAT " %s%s", p2i(hr->bottom()), hr->get_type_str(), hole); - return false; - } -}; - -class VerifyArchivePointerRegionClosure: public HeapRegionClosure { - virtual bool do_heap_region(HeapRegion* r) { - if (r->is_archive()) { - VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); - r->object_iterate(&verify_oop_pointers); - } - return false; - } -}; - -void G1HeapVerifier::verify_archive_regions() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - VerifyArchivePointerRegionClosure cl; - g1h->heap_region_iterate(&cl); -} - class VerifyRegionClosure: public HeapRegionClosure { private: VerifyOption _vo; @@ -346,14 +253,7 @@ public: // Humongous and old regions regions might be of any state, so can't check here. guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str()); - // For archive regions, verify there are no heap pointers to non-pinned regions. - if (r->is_closed_archive()) { - VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); - r->object_iterate(&verify_oop_pointers); - } else if (r->is_open_archive()) { - VerifyObjsInRegionClosure verify_open_archive_oop(r, _vo); - r->object_iterate(&verify_open_archive_oop); - } else if (r->is_continues_humongous()) { + if (r->is_continues_humongous()) { // Verify that the continues humongous regions' remembered set state // matches the one from the starts humongous region. if (r->rem_set()->get_state_str() != r->humongous_start_region()->rem_set()->get_state_str()) { @@ -482,22 +382,19 @@ void G1HeapVerifier::verify(VerifyOption vo) { class VerifyRegionListsClosure : public HeapRegionClosure { private: HeapRegionSet* _old_set; - HeapRegionSet* _archive_set; HeapRegionSet* _humongous_set; HeapRegionManager* _hrm; public: uint _old_count; - uint _archive_count; uint _humongous_count; uint _free_count; VerifyRegionListsClosure(HeapRegionSet* old_set, - HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* hrm) : - _old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm), - _old_count(), _archive_count(), _humongous_count(), _free_count(){ } + _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), + _old_count(), _humongous_count(), _free_count(){ } bool do_heap_region(HeapRegion* hr) { if (hr->is_young()) { @@ -508,24 +405,17 @@ public: } else if (hr->is_empty()) { assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); _free_count++; - } else if (hr->is_archive()) { - assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set.", hr->hrm_index()); - _archive_count++; } else if (hr->is_old()) { assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index()); _old_count++; } else { - // There are no other valid region types. Check for one invalid - // one we can identify: pinned without old or humongous set. - assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()); - ShouldNotReachHere(); + fatal("Invalid region type for region %u (%s)", hr->hrm_index(), hr->get_short_type_str()); } return false; } - void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { + void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count); - guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u.", archive_set->length(), _archive_count); guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count); guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count); } @@ -540,9 +430,9 @@ void G1HeapVerifier::verify_region_sets() { // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap. - VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm); + VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm); _g1h->heap_region_iterate(&cl); - cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm); + cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm); } void G1HeapVerifier::prepare_for_verify() { @@ -693,11 +583,6 @@ public: return true; } if (region_attr.is_in_cset()) { - if (hr->is_archive()) { - log_error(gc, verify)("## is_archive in collection set for region %u", i); - _failures = true; - return true; - } if (hr->is_young() != (region_attr.is_young())) { log_error(gc, verify)("## is_young %d / region attr type %s inconsistency for region %u", hr->is_young(), region_attr.get_type_str(), i); diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.hpp b/src/hotspot/share/gc/g1/g1HeapVerifier.hpp index 8d25457e060..86482ac7a20 100644 --- a/src/hotspot/share/gc/g1/g1HeapVerifier.hpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.hpp @@ -80,8 +80,6 @@ public: void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_young_regions() PRODUCT_RETURN; - - static void verify_archive_regions(); }; #endif // SHARE_GC_G1_G1HEAPVERIFIER_HPP diff --git a/src/hotspot/share/gc/g1/g1MMUTracker.cpp b/src/hotspot/share/gc/g1/g1MMUTracker.cpp index 77366c0b73c..7fb29eb39f1 100644 --- a/src/hotspot/share/gc/g1/g1MMUTracker.cpp +++ b/src/hotspot/share/gc/g1/g1MMUTracker.cpp @@ -115,13 +115,13 @@ void G1MMUTracker::add_pause(double start, double end) { // GC events / pause_time // / | \ \ | / / // -------------[----]-[---]--[--]---[---]------|[--]-----> Time -// | | | -// | | | -// |<- limit | | -// | |<- balance_timestamp | -// | ^ | -// | | -// |<-------- _time_slice ------>| +// | | | +// | | | +// |<- limit | | +// | |<- balance_timestamp | +// | ^ | +// | | +// |<-------- _time_slice --------->| // // The MMU constraint requires that we can spend up to `max_gc_time()` on GC // pauses inside a window of `_time_slice` length. Therefore, we have a GC @@ -134,7 +134,7 @@ void G1MMUTracker::add_pause(double start, double end) { // time inside [balance_timestamp, current_timestamp] is equal to the budget. // Next, return `balance_timestamp - limit`. // -// When there are no enough GC events, i.e. we have a surplus buget, a new GC +// When there are not enough GC events, i.e. we have a surplus budget, a new GC // pause can start right away, so return 0. double G1MMUTracker::when_sec(double current_timestamp, double pause_time) { assert(pause_time > 0.0, "precondition"); diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp index 680e989c853..fccb4aa7cd5 100644 --- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp +++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,21 +88,21 @@ public: G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) : _g1h(g1h), - _young_gc_memory_manager("G1 Young Generation", "end of minor GC"), - _full_gc_memory_manager("G1 Old Generation", "end of major GC"), - _conc_gc_memory_manager("G1 Concurrent GC", "end of concurrent GC pause"), - _eden_space_pool(NULL), - _survivor_space_pool(NULL), - _old_gen_pool(NULL), - _young_collection_counters(NULL), - _full_collection_counters(NULL), - _conc_collection_counters(NULL), - _young_gen_counters(NULL), - _old_gen_counters(NULL), - _old_space_counters(NULL), - _eden_space_counters(NULL), - _from_space_counters(NULL), - _to_space_counters(NULL), + _young_gc_memory_manager("G1 Young Generation"), + _full_gc_memory_manager("G1 Old Generation"), + _conc_gc_memory_manager("G1 Concurrent GC"), + _eden_space_pool(nullptr), + _survivor_space_pool(nullptr), + _old_gen_pool(nullptr), + _young_collection_counters(nullptr), + _full_collection_counters(nullptr), + _conc_collection_counters(nullptr), + _young_gen_counters(nullptr), + _old_gen_counters(nullptr), + _old_space_counters(nullptr), + _eden_space_counters(nullptr), + _from_space_counters(nullptr), + _to_space_counters(nullptr), _overall_committed(0), _overall_used(0), @@ -351,11 +351,14 @@ MemoryUsage G1MonitoringSupport::old_gen_memory_usage(size_t initial_size, size_ G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* monitoring_support, CollectorCounters* collection_counters, GCMemoryManager* gc_memory_manager, + const char* end_message, bool all_memory_pools_affected) : _monitoring_support(monitoring_support), _tcs(collection_counters), _tms(gc_memory_manager, - G1CollectedHeap::heap()->gc_cause(), all_memory_pools_affected) { + G1CollectedHeap::heap()->gc_cause(), + end_message, + all_memory_pools_affected) { } G1MonitoringScope::~G1MonitoringScope() { @@ -369,17 +372,20 @@ G1YoungGCMonitoringScope::G1YoungGCMonitoringScope(G1MonitoringSupport* monitori G1MonitoringScope(monitoring_support, monitoring_support->_young_collection_counters, &monitoring_support->_young_gc_memory_manager, + "end of minor GC", all_memory_pools_affected) { } G1FullGCMonitoringScope::G1FullGCMonitoringScope(G1MonitoringSupport* monitoring_support) : G1MonitoringScope(monitoring_support, monitoring_support->_full_collection_counters, - &monitoring_support->_full_gc_memory_manager) { + &monitoring_support->_full_gc_memory_manager, + "end of major GC") { } G1ConcGCMonitoringScope::G1ConcGCMonitoringScope(G1MonitoringSupport* monitoring_support) : G1MonitoringScope(monitoring_support, monitoring_support->_conc_collection_counters, - &monitoring_support->_conc_gc_memory_manager) { + &monitoring_support->_conc_gc_memory_manager, + "end of concurrent GC pause") { } diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp index bae4ade0e43..d812d811aee 100644 --- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp +++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp @@ -244,6 +244,7 @@ protected: G1MonitoringScope(G1MonitoringSupport* monitoring_support, CollectorCounters* collection_counters, GCMemoryManager* gc_memory_manager, + const char* end_message, bool all_memory_pools_affected = true); ~G1MonitoringScope(); }; diff --git a/src/hotspot/share/gc/g1/g1NUMA.cpp b/src/hotspot/share/gc/g1/g1NUMA.cpp index 10ebfa40103..c94dbc32bc6 100644 --- a/src/hotspot/share/gc/g1/g1NUMA.cpp +++ b/src/hotspot/share/gc/g1/g1NUMA.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "runtime/globals.hpp" #include "runtime/os.hpp" -G1NUMA* G1NUMA::_inst = NULL; +G1NUMA* G1NUMA::_inst = nullptr; size_t G1NUMA::region_size() const { assert(_region_size > 0, "Heap region size is not yet set"); @@ -44,7 +44,7 @@ size_t G1NUMA::page_size() const { bool G1NUMA::is_enabled() const { return num_active_nodes() > 1; } G1NUMA* G1NUMA::create() { - guarantee(_inst == NULL, "Should be called once."); + guarantee(_inst == nullptr, "Should be called once."); _inst = new G1NUMA(); // NUMA only supported on Linux. @@ -72,9 +72,9 @@ uint G1NUMA::index_of_node_id(int node_id) const { } G1NUMA::G1NUMA() : - _node_id_to_index_map(NULL), _len_node_id_to_index_map(0), - _node_ids(NULL), _num_active_node_ids(0), - _region_size(0), _page_size(0), _stats(NULL) { + _node_id_to_index_map(nullptr), _len_node_id_to_index_map(0), + _node_ids(nullptr), _num_active_node_ids(0), + _region_size(0), _page_size(0), _stats(nullptr) { } void G1NUMA::initialize_without_numa() { @@ -232,7 +232,7 @@ uint G1NUMA::max_search_depth() const { void G1NUMA::update_statistics(G1NUMAStats::NodeDataItems phase, uint requested_node_index, uint allocated_node_index) { - if (_stats == NULL) { + if (_stats == nullptr) { return; } @@ -250,7 +250,7 @@ void G1NUMA::update_statistics(G1NUMAStats::NodeDataItems phase, void G1NUMA::copy_statistics(G1NUMAStats::NodeDataItems phase, uint requested_node_index, size_t* allocated_stat) { - if (_stats == NULL) { + if (_stats == nullptr) { return; } @@ -258,7 +258,7 @@ void G1NUMA::copy_statistics(G1NUMAStats::NodeDataItems phase, } void G1NUMA::print_statistics() const { - if (_stats == NULL) { + if (_stats == nullptr) { return; } diff --git a/src/hotspot/share/gc/g1/g1NUMAStats.cpp b/src/hotspot/share/gc/g1/g1NUMAStats.cpp index 5509f1e7424..e674868401a 100644 --- a/src/hotspot/share/gc/g1/g1NUMAStats.cpp +++ b/src/hotspot/share/gc/g1/g1NUMAStats.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result) const { } } - assert(result != NULL, "Invariant"); + assert(result != nullptr, "Invariant"); result->_hit = hit; result->_requested = requested; } @@ -77,7 +77,7 @@ void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result, uint req_index) c requested += _data[req_index][column]; } - assert(result != NULL, "Invariant"); + assert(result != nullptr, "Invariant"); result->_hit = hit; result->_requested = requested; } @@ -112,7 +112,7 @@ size_t G1NUMAStats::NodeDataArray::get(uint req_index, uint alloc_index) { } void G1NUMAStats::NodeDataArray::copy(uint req_index, size_t* stat) { - assert(stat != NULL, "Invariant"); + assert(stat != nullptr, "Invariant"); for (uint column = 0; column < _num_column; column++) { _data[req_index][column] += stat[column]; diff --git a/src/hotspot/share/gc/g1/g1OopClosures.cpp b/src/hotspot/share/gc/g1/g1OopClosures.cpp index 50f76de3250..9b30595e82f 100644 --- a/src/hotspot/share/gc/g1/g1OopClosures.cpp +++ b/src/hotspot/share/gc/g1/g1OopClosures.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1h, G1ParScanThreadState* pa _g1h(g1h), _par_scan_state(par_scan_state), _worker_id(par_scan_state->worker_id()), - _scanned_cld(NULL), + _scanned_cld(nullptr), _cm(_g1h->concurrent_mark()) { } @@ -52,7 +52,7 @@ void G1CLDScanClosure::do_cld(ClassLoaderData* cld) { // Clean modified oops since we're going to scavenge all the metadata. cld->oops_do(_closure, ClassLoaderData::_claim_none, true /*clear_modified_oops*/); - _closure->set_scanned_cld(NULL); + _closure->set_scanned_cld(nullptr); _closure->trim_queue_partially(); } diff --git a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp index fb7b0dafa15..ea7f690d325 100644 --- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -148,7 +148,7 @@ inline void G1ConcurrentRefineOopClosure::do_oop_work(T* p) { HeapRegionRemSet* to_rem_set = _g1h->heap_region_containing(obj)->rem_set(); - assert(to_rem_set != NULL, "Need per-region 'into' remsets."); + assert(to_rem_set != nullptr, "Need per-region 'into' remsets."); if (to_rem_set->is_tracked()) { to_rem_set->add_reference(p, _worker_id); } @@ -232,7 +232,7 @@ void G1ParCopyClosure::do_oop_work(T* p) { } else { forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); } - assert(forwardee != NULL, "forwardee should not be NULL"); + assert(forwardee != nullptr, "forwardee should not be null"); RawAccess::oop_store(p, forwardee); if (barrier == G1BarrierCLD) { @@ -257,7 +257,7 @@ void G1ParCopyClosure::do_oop_work(T* p) { template void G1RebuildRemSetClosure::do_oop_work(T* p) { oop const obj = RawAccess::oop_load(p); - if (obj == NULL) { + if (obj == nullptr) { return; } diff --git a/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp b/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp index 24542dbde22..67cc9f1ecd1 100644 --- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp +++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ class G1OopStarChunkedList : public CHeapObj { inline void push(ChunkedList** field, T* p); public: - G1OopStarChunkedList() : _used_memory(0), _roots(NULL), _croots(NULL), _oops(NULL), _coops(NULL) {} + G1OopStarChunkedList() : _used_memory(0), _roots(nullptr), _croots(nullptr), _oops(nullptr), _coops(nullptr) {} ~G1OopStarChunkedList(); size_t used_memory() { return _used_memory; } diff --git a/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp b/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp index 9fc33f4eef0..649d9bdebc4 100644 --- a/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp +++ b/src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ template inline void G1OopStarChunkedList::push(ChunkedList** field, T* p) { ChunkedList* list = *field; - if (list == NULL) { + if (list == nullptr) { *field = new ChunkedList(); _used_memory += sizeof(ChunkedList); } else if (list->is_full()) { @@ -65,7 +65,7 @@ inline void G1OopStarChunkedList::push_oop(oop* p) { template void G1OopStarChunkedList::delete_list(ChunkedList* c) { - while (c != NULL) { + while (c != nullptr) { ChunkedList* next = c->next_used(); delete c; c = next; @@ -75,7 +75,7 @@ void G1OopStarChunkedList::delete_list(ChunkedList* c) { template size_t G1OopStarChunkedList::chunks_do(ChunkedList* head, OopClosure* cl) { size_t result = 0; - for (ChunkedList* c = head; c != NULL; c = c->next_used()) { + for (ChunkedList* c = head; c != nullptr; c = c->next_used()) { result += c->size(); for (size_t i = 0; i < c->size(); i++) { T* p = c->at(i); diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp index a42e5350ad1..87606d5c23f 100644 --- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp +++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp @@ -35,7 +35,7 @@ #include "utilities/bitMap.inline.hpp" G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) : - _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0), + _low_boundary(nullptr), _high_boundary(nullptr), _tail_size(0), _page_size(0), _committed(mtGC), _dirty(mtGC), _special(false) { assert(!rs.executable(), "precondition"); initialize_with_page_size(rs, used_size, page_size); @@ -44,7 +44,7 @@ G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_s void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) { guarantee(rs.is_reserved(), "Given reserved space must have been reserved already."); - vmassert(_low_boundary == NULL, "VirtualSpace already initialized"); + vmassert(_low_boundary == nullptr, "VirtualSpace already initialized"); vmassert(page_size > 0, "Page size must be non-zero."); guarantee(is_aligned(rs.base(), page_size), @@ -76,8 +76,8 @@ void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { // This does not release memory it never reserved. // Caller must release via rs.release(); - _low_boundary = NULL; - _high_boundary = NULL; + _low_boundary = nullptr; + _high_boundary = nullptr; _special = false; _page_size = 0; _tail_size = 0; diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp index cbb6dff9043..5bbc387ed4e 100644 --- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp +++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,7 +111,7 @@ class G1PageBasedVirtualSpace { // Uncommit the given area of pages starting at start being size_in_pages large. void uncommit(size_t start_page, size_t size_in_pages); - void pretouch(size_t start_page, size_t size_in_pages, WorkerThreads* pretouch_workers = NULL); + void pretouch(size_t start_page, size_t size_in_pages, WorkerThreads* pretouch_workers = nullptr); // Initialize the given reserved space with the given base address and the size // actually used. diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp index 4b650004630..5c558bb237e 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, G1RedirtyCardsQueueSet* rdcqs, PreservedMarks* preserved_marks, uint worker_id, - uint n_workers, + uint num_workers, size_t young_cset_length, size_t optional_cset_length, G1EvacFailureRegions* evac_failure_regions) @@ -66,8 +66,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _task_queue(g1h->task_queue(worker_id)), _rdc_local_qset(rdcqs), _ct(g1h->card_table()), - _closures(NULL), - _plab_allocator(NULL), + _closures(nullptr), + _plab_allocator(nullptr), _age_table(false), _tenuring_threshold(g1h->policy()->tenuring_threshold()), _scanner(g1h, this), @@ -76,16 +76,16 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1), _stack_trim_lower_threshold(GCDrainStackTargetSize), _trim_ticks(), - _surviving_young_words_base(NULL), - _surviving_young_words(NULL), + _surviving_young_words_base(nullptr), + _surviving_young_words(nullptr), _surviving_words_length(young_cset_length + 1), _old_gen_is_full(false), _partial_objarray_chunk_size(ParGCArrayScanChunk), - _partial_array_stepper(n_workers), + _partial_array_stepper(num_workers), _string_dedup_requests(), - _num_optional_regions(optional_cset_length), + _max_num_optional_regions(optional_cset_length), _numa(g1h->numa()), - _obj_alloc_stat(NULL), + _obj_alloc_stat(nullptr), EVAC_FAILURE_INJECTOR_ONLY(_evac_failure_inject_counter(0) COMMA) _preserved_marks(preserved_marks), _evacuation_failed_info(), @@ -106,7 +106,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); - _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions]; + _oops_into_optional_regions = new G1OopStarChunkedList[_max_num_optional_regions]; initialize_numa_stats(); } @@ -148,7 +148,7 @@ size_t G1ParScanThreadState::lab_undo_waste_words() const { #ifdef ASSERT void G1ParScanThreadState::verify_task(narrowOop* task) const { - assert(task != NULL, "invariant"); + assert(task != nullptr, "invariant"); assert(UseCompressedOops, "sanity"); oop p = RawAccess<>::oop_load(task); assert(_g1h->is_in_reserved(p), @@ -156,7 +156,7 @@ void G1ParScanThreadState::verify_task(narrowOop* task) const { } void G1ParScanThreadState::verify_task(oop* task) const { - assert(task != NULL, "invariant"); + assert(task != nullptr, "invariant"); oop p = RawAccess<>::oop_load(task); assert(_g1h->is_in_reserved(p), "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p)); @@ -184,7 +184,7 @@ void G1ParScanThreadState::verify_task(ScannerTask task) const { template MAYBE_INLINE_EVACUATION void G1ParScanThreadState::do_oop_evac(T* p) { - // Reference should not be NULL here as such are never pushed to the task queue. + // Reference should not be null here as such are never pushed to the task queue. oop obj = RawAccess::oop_load(p); // Although we never intentionally push references outside of the collection @@ -346,7 +346,7 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest, _tenuring_threshold = 0; } - if (obj_ptr != NULL) { + if (obj_ptr != nullptr) { dest->set_old(); } else { // We just failed to allocate in old gen. The same idea as explained above @@ -358,7 +358,7 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest, _old_gen_is_full = previous_plab_refill_failed; assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str()); // no other space to try. - return NULL; + return nullptr; } } @@ -396,7 +396,7 @@ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, size_t word_sz, uint age, uint node_index) { - HeapWord* obj_ptr = NULL; + HeapWord* obj_ptr = nullptr; // Try slow-path allocation unless we're allocating old and old is already full. if (!(dest_attr->is_old() && _old_gen_is_full)) { bool plab_refill_failed = false; @@ -404,14 +404,14 @@ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, word_sz, &plab_refill_failed, node_index); - if (obj_ptr == NULL) { + if (obj_ptr == nullptr) { obj_ptr = allocate_in_next_plab(dest_attr, word_sz, plab_refill_failed, node_index); } } - if (obj_ptr != NULL) { + if (obj_ptr != nullptr) { update_numa_stats(node_index); if (_g1h->gc_tracer_stw()->should_report_promotion_events()) { // The events are checked individually as part of the actual commit @@ -463,17 +463,17 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index); // PLAB allocations should succeed most of the time, so we'll - // normally check against NULL once and that's it. - if (obj_ptr == NULL) { + // normally check against null once and that's it. + if (obj_ptr == nullptr) { obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index); - if (obj_ptr == NULL) { + if (obj_ptr == nullptr) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. return handle_evacuation_failure_par(old, old_mark, word_sz); } } - assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); + assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded"); assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); // Should this evacuation fail? @@ -494,7 +494,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio // examine its contents without other synchronization, since the contents // may not be up to date for them. const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); - if (forward_ptr == NULL) { + if (forward_ptr == nullptr) { { const uint young_index = from_region->young_index_in_cset(); @@ -562,13 +562,15 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr, } G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { - assert(worker_id < _n_workers, "out of bounds access"); - if (_states[worker_id] == NULL) { + assert(worker_id < _num_workers, "out of bounds access"); + if (_states[worker_id] == nullptr) { _states[worker_id] = new G1ParScanThreadState(_g1h, rdcqs(), _preserved_marks_set.get(worker_id), - worker_id, _n_workers, - _young_cset_length, _optional_cset_length, + worker_id, + _num_workers, + _young_cset_length, + _optional_cset_length, _evac_failure_regions); } return _states[worker_id]; @@ -582,7 +584,7 @@ const size_t* G1ParScanThreadStateSet::surviving_young_words() const { void G1ParScanThreadStateSet::flush_stats() { assert(!_flushed, "thread local state from the per thread states should be flushed once"); - for (uint worker_id = 0; worker_id < _n_workers; ++worker_id) { + for (uint worker_id = 0; worker_id < _num_workers; ++worker_id) { G1ParScanThreadState* pss = _states[worker_id]; assert(pss != nullptr, "must be initialized"); @@ -592,20 +594,20 @@ void G1ParScanThreadStateSet::flush_stats() { // because it resets the PLAB allocator where we get this info from. size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize; size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize; - size_t copied_bytes = pss->flush_stats(_surviving_young_words_total, _n_workers) * HeapWordSize; + size_t copied_bytes = pss->flush_stats(_surviving_young_words_total, _num_workers) * HeapWordSize; p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes); p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes); p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes); delete pss; - _states[worker_id] = NULL; + _states[worker_id] = nullptr; } _flushed = true; } void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) { - for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { + for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) { G1ParScanThreadState* pss = _states[worker_index]; assert(pss != nullptr, "must be initialized"); @@ -619,7 +621,7 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); - if (forward_ptr == NULL) { + if (forward_ptr == nullptr) { // Forward-to-self succeeded. We are the "owner" of the object. HeapRegion* r = _g1h->heap_region_containing(old); @@ -674,36 +676,36 @@ void G1ParScanThreadState::initialize_numa_stats() { } void G1ParScanThreadState::flush_numa_stats() { - if (_obj_alloc_stat != NULL) { + if (_obj_alloc_stat != nullptr) { uint node_index = _numa->index_of_current_thread(); _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat); } } void G1ParScanThreadState::update_numa_stats(uint node_index) { - if (_obj_alloc_stat != NULL) { + if (_obj_alloc_stat != nullptr) { _obj_alloc_stat[node_index]++; } } G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, - uint n_workers, + uint num_workers, size_t young_cset_length, size_t optional_cset_length, G1EvacFailureRegions* evac_failure_regions) : _g1h(g1h), _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()), _preserved_marks_set(true /* in_c_heap */), - _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), + _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)), _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)), _young_cset_length(young_cset_length), _optional_cset_length(optional_cset_length), - _n_workers(n_workers), + _num_workers(num_workers), _flushed(false), _evac_failure_regions(evac_failure_regions) { - _preserved_marks_set.init(n_workers); - for (uint i = 0; i < n_workers; ++i) { - _states[i] = NULL; + _preserved_marks_set.init(num_workers); + for (uint i = 0; i < num_workers; ++i) { + _states[i] = nullptr; } memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t)); } diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp index bd8da47fefc..ab31a6d88ca 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,7 +92,8 @@ class G1ParScanThreadState : public CHeapObj { G1CardTable* ct() { return _ct; } - size_t _num_optional_regions; + // Maximum number of optional regions at start of gc. + size_t _max_num_optional_regions; G1OopStarChunkedList* _oops_into_optional_regions; G1NUMA* _numa; @@ -115,7 +116,7 @@ public: G1RedirtyCardsQueueSet* rdcqs, PreservedMarks* preserved_marks, uint worker_id, - uint n_workers, + uint num_workers, size_t young_cset_length, size_t optional_cset_length, G1EvacFailureRegions* evac_failure_regions); @@ -184,7 +185,7 @@ private: // Tries to allocate word_sz in the PLAB of the next "generation" after trying to // allocate into dest. Previous_plab_refill_failed indicates whether previous // PLAB refill for the original (source) object failed. - // Returns a non-NULL pointer if successful, and updates dest if required. + // Returns a non-null pointer if successful, and updates dest if required. // Also determines whether we should continue to try to allocate into the various // generations or just end trying to allocate. HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest, @@ -236,13 +237,13 @@ class G1ParScanThreadStateSet : public StackObj { size_t* _surviving_young_words_total; size_t _young_cset_length; size_t _optional_cset_length; - uint _n_workers; + uint _num_workers; bool _flushed; G1EvacFailureRegions* _evac_failure_regions; public: G1ParScanThreadStateSet(G1CollectedHeap* g1h, - uint n_workers, + uint num_workers, size_t young_cset_length, size_t optional_cset_length, G1EvacFailureRegions* evac_failure_regions); diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp index c37f0f38e51..41553e6bf19 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp @@ -74,8 +74,8 @@ template inline void G1ParScanThreadState::remember_root_into_optional_region(T* p) { oop o = RawAccess::oop_load(p); uint index = _g1h->heap_region_containing(o)->index_in_opt_cset(); - assert(index < _num_optional_regions, - "Trying to access optional region idx %u beyond " SIZE_FORMAT, index, _num_optional_regions); + assert(index < _max_num_optional_regions, + "Trying to access optional region idx %u beyond " SIZE_FORMAT, index, _max_num_optional_regions); _oops_into_optional_regions[index].push_root(p); } @@ -83,16 +83,16 @@ template inline void G1ParScanThreadState::remember_reference_into_optional_region(T* p) { oop o = RawAccess::oop_load(p); uint index = _g1h->heap_region_containing(o)->index_in_opt_cset(); - assert(index < _num_optional_regions, - "Trying to access optional region idx %u beyond " SIZE_FORMAT, index, _num_optional_regions); + assert(index < _max_num_optional_regions, + "Trying to access optional region idx %u beyond " SIZE_FORMAT, index, _max_num_optional_regions); _oops_into_optional_regions[index].push_oop(p); verify_task(p); } G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const HeapRegion* hr) { - assert(hr->index_in_opt_cset() < _num_optional_regions, + assert(hr->index_in_opt_cset() < _max_num_optional_regions, "Trying to access optional region idx %u beyond " SIZE_FORMAT " " HR_FORMAT, - hr->index_in_opt_cset(), _num_optional_regions, HR_FORMAT_PARAMS(hr)); + hr->index_in_opt_cset(), _max_num_optional_regions, HR_FORMAT_PARAMS(hr)); return &_oops_into_optional_regions[hr->index_in_opt_cset()]; } diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index c37bc12b86a..7847179a55d 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,10 +73,10 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) : _rs_length(0), _pending_cards_at_gc_start(0), _concurrent_start_to_mixed(), - _collection_set(NULL), - _g1h(NULL), + _collection_set(nullptr), + _g1h(nullptr), _phase_times_timer(gc_timer), - _phase_times(NULL), + _phase_times(nullptr), _mark_remark_start_sec(0), _mark_cleanup_start_sec(0), _tenuring_threshold(MaxTenuringThreshold), @@ -517,7 +517,7 @@ double G1Policy::predict_survivor_regions_evac_time() const { G1GCPhaseTimes* G1Policy::phase_times() const { // Lazy allocation because it must follow initialization of all the // OopStorage objects by various other subsystems. - if (_phase_times == NULL) { + if (_phase_times == nullptr) { _phase_times = new G1GCPhaseTimes(_phase_times_timer, ParallelGCThreads); } return _phase_times; @@ -722,8 +722,15 @@ double G1Policy::logged_cards_processing_time() const { size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); - // This may happen if there are duplicate cards in different log buffers. - if (logged_dirty_cards > scan_heap_roots_cards) { + // Approximate the time spent processing cards from log buffers by scaling + // the total processing time by the ratio of logged cards to total cards + // processed. There might be duplicate cards in different log buffers, + // leading to an overestimate. That effect should be relatively small + // unless there are few cards to process, because cards in buffers are + // dirtied to limit duplication. Also need to avoid scaling when both + // counts are zero, which happens especially during early GCs. So ascribe + // all of the time to the logged cards unless there are more total cards. + if (logged_dirty_cards >= scan_heap_roots_cards) { return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB); } return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB); @@ -1028,7 +1035,7 @@ double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) co return 0.0; } size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes; - if (bytes_to_copy != NULL) { + if (bytes_to_copy != nullptr) { *bytes_to_copy = expected_bytes; } return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->in_young_only_phase()); @@ -1274,7 +1281,7 @@ class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { }; void G1Policy::clear_collection_set_candidates() { - if (_collection_set->candidates() == NULL) { + if (_collection_set->candidates() == nullptr) { return; } // Clear remembered sets of remaining candidate regions and the actual candidate @@ -1366,7 +1373,7 @@ void G1Policy::abort_time_to_mixed_tracking() { bool G1Policy::next_gc_should_be_mixed(const char* no_candidates_str) const { G1CollectionSetCandidates* candidates = _collection_set->candidates(); - if (candidates == NULL || candidates->is_empty()) { + if (candidates == nullptr || candidates->is_empty()) { if (no_candidates_str != nullptr) { log_debug(gc, ergo)("%s (candidate old regions not available)", no_candidates_str); } @@ -1411,7 +1418,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c double time_remaining_ms, uint& num_initial_regions, uint& num_optional_regions) { - assert(candidates != NULL, "Must be"); + assert(candidates != nullptr, "Must be"); num_initial_regions = 0; num_optional_regions = 0; @@ -1434,7 +1441,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms); HeapRegion* hr = candidates->at(candidate_idx); - while (hr != NULL) { + while (hr != nullptr) { if (num_initial_regions + num_optional_regions >= max_old_cset_length) { // Added maximum number of old regions to the CSet. log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). " @@ -1475,7 +1482,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c } hr = candidates->at(++candidate_idx); } - if (hr == NULL) { + if (hr == nullptr) { log_debug(gc, ergo, cset)("Old candidate collection set empty."); } @@ -1502,7 +1509,7 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidat HeapRegion* r = candidates->at(candidate_idx); while (num_optional_regions < max_optional_regions) { - assert(r != NULL, "Region must exist"); + assert(r != nullptr, "Region must exist"); double prediction_ms = predict_region_total_time_ms(r, false); if (prediction_ms > time_remaining_ms) { diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index 24c8d3bd937..41ee9bfb9a8 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -154,7 +154,7 @@ public: // Predict other time for count young regions. double predict_young_region_other_time_ms(uint count) const; // Predict copying live data time for count eden regions. Return the predict bytes if - // bytes_to_copy is non-nullptr. + // bytes_to_copy is non-null. double predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy = nullptr) const; // Total time for a region is handling remembered sets (as a single unit), copying its live data // and other time. @@ -313,7 +313,7 @@ public: bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); - bool concurrent_operation_is_full_mark(const char* msg = NULL); + bool concurrent_operation_is_full_mark(const char* msg = nullptr); bool about_to_start_mixed_phase() const; diff --git a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp index d6c2ae148be..a9a06b2c82e 100644 --- a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp +++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,8 +39,8 @@ G1RedirtyCardsLocalQueueSet::G1RedirtyCardsLocalQueueSet(G1RedirtyCardsQueueSet* #ifdef ASSERT G1RedirtyCardsLocalQueueSet::~G1RedirtyCardsLocalQueueSet() { - assert(_buffers._head == NULL, "unflushed qset"); - assert(_buffers._tail == NULL, "invariant"); + assert(_buffers._head == nullptr, "unflushed qset"); + assert(_buffers._tail == nullptr, "invariant"); assert(_buffers._entry_count == 0, "invariant"); } #endif // ASSERT @@ -49,7 +49,7 @@ void G1RedirtyCardsLocalQueueSet::enqueue_completed_buffer(BufferNode* node) { _buffers._entry_count += buffer_size() - node->index(); node->set_next(_buffers._head); _buffers._head = node; - if (_buffers._tail == NULL) { + if (_buffers._tail == nullptr) { _buffers._tail = node; } } @@ -88,7 +88,7 @@ G1RedirtyCardsQueueSet::G1RedirtyCardsQueueSet(BufferNode::Allocator* allocator) PtrQueueSet(allocator), _list(), _entry_count(0), - _tail(NULL) + _tail(nullptr) DEBUG_ONLY(COMMA _collecting(true)) {} @@ -99,7 +99,7 @@ G1RedirtyCardsQueueSet::~G1RedirtyCardsQueueSet() { #ifdef ASSERT void G1RedirtyCardsQueueSet::verify_empty() const { assert(_list.empty(), "precondition"); - assert(_tail == NULL, "invariant"); + assert(_tail == nullptr, "invariant"); assert(_entry_count == 0, "invariant"); } #endif // ASSERT @@ -112,7 +112,7 @@ BufferNode* G1RedirtyCardsQueueSet::all_completed_buffers() const { BufferNodeList G1RedirtyCardsQueueSet::take_all_completed_buffers() { DEBUG_ONLY(_collecting = false;) BufferNodeList result(_list.pop_all(), _tail, _entry_count); - _tail = NULL; + _tail = nullptr; _entry_count = 0; DEBUG_ONLY(_collecting = true;) return result; @@ -120,10 +120,10 @@ BufferNodeList G1RedirtyCardsQueueSet::take_all_completed_buffers() { void G1RedirtyCardsQueueSet::update_tail(BufferNode* node) { // Node is the tail of a (possibly single element) list just prepended to - // _list. If, after that prepend, node's follower is NULL, then node is + // _list. If, after that prepend, node's follower is null, then node is // also the tail of _list, so record it as such. - if (node->next() == NULL) { - assert(_tail == NULL, "invariant"); + if (node->next() == nullptr) { + assert(_tail == nullptr, "invariant"); _tail = node; } } @@ -137,8 +137,8 @@ void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) { void G1RedirtyCardsQueueSet::add_bufferlist(const BufferNodeList& buffers) { assert(_collecting, "precondition"); - if (buffers._head != NULL) { - assert(buffers._tail != NULL, "invariant"); + if (buffers._head != nullptr) { + assert(buffers._tail != nullptr, "invariant"); Atomic::add(&_entry_count, buffers._entry_count); _list.prepend(*buffers._head, *buffers._tail); update_tail(buffers._tail); diff --git a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp index 26e92415c32..028b2a07487 100644 --- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp +++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp @@ -41,7 +41,7 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, size_t region_granularity, size_t commit_factor, MEMFLAGS type) : - _listener(NULL), + _listener(nullptr), _storage(rs, used_size, page_size), _region_commit_map(rs.size() * commit_factor / region_granularity, mtGC), _memory_type(type) { @@ -253,7 +253,7 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { }; void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) { - if (_listener != NULL) { + if (_listener != nullptr) { _listener->on_commit(start_idx, num_regions, zero_filled); } } diff --git a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp index 075de2bbe1f..02498b394b3 100644 --- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp +++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,7 +69,7 @@ class G1RegionToSpaceMapper : public CHeapObj { virtual ~G1RegionToSpaceMapper() {} - virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkerThreads* pretouch_workers = NULL) = 0; + virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkerThreads* pretouch_workers = nullptr) = 0; virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0; // Creates an appropriate G1RegionToSpaceMapper for the given parameters. diff --git a/src/hotspot/share/gc/g1/g1RegionsOnNodes.cpp b/src/hotspot/share/gc/g1/g1RegionsOnNodes.cpp index b580b925287..5ec3c759369 100644 --- a/src/hotspot/share/gc/g1/g1RegionsOnNodes.cpp +++ b/src/hotspot/share/gc/g1/g1RegionsOnNodes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "gc/g1/g1RegionsOnNodes.hpp" #include "gc/g1/heapRegion.hpp" -G1RegionsOnNodes::G1RegionsOnNodes() : _count_per_node(NULL), _numa(G1NUMA::numa()) { +G1RegionsOnNodes::G1RegionsOnNodes() : _count_per_node(nullptr), _numa(G1NUMA::numa()) { _count_per_node = NEW_C_HEAP_ARRAY(uint, _numa->num_active_nodes(), mtGC); clear(); } diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index 9e25b05935d..d300240b00d 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,32 +199,26 @@ private: // collection. Subsumes common checks like filtering out everything but old and // humongous regions outside the collection set. // This is valid because we are not interested in scanning stray remembered set - // entries from free or archive regions. + // entries from free regions. HeapWord** _scan_top; class G1ClearCardTableTask : public G1AbstractSubTask { G1CollectedHeap* _g1h; G1DirtyRegions* _regions; - uint _chunk_length; - uint volatile _cur_dirty_regions; G1RemSetScanState* _scan_state; + static constexpr uint num_cards_per_worker = M; public: G1ClearCardTableTask(G1CollectedHeap* g1h, G1DirtyRegions* regions, - uint chunk_length, G1RemSetScanState* scan_state) : G1AbstractSubTask(G1GCPhaseTimes::ClearCardTable), _g1h(g1h), _regions(regions), - _chunk_length(chunk_length), _cur_dirty_regions(0), - _scan_state(scan_state) { - - assert(chunk_length > 0, "must be"); - } + _scan_state(scan_state) {} double worker_cost() const override { uint num_regions = _regions->size(); @@ -233,9 +227,10 @@ private: // There is no card table clean work, only some cleanup of memory. return AlmostNoWork; } - return ((double)align_up((size_t)num_regions << HeapRegion::LogCardsPerRegion, chunk_size()) / chunk_size()); - } + double num_cards = num_regions << HeapRegion::LogCardsPerRegion; + return ceil(num_cards / num_cards_per_worker); + } virtual ~G1ClearCardTableTask() { _scan_state->cleanup(); @@ -244,12 +239,12 @@ private: #endif } - static uint chunk_size() { return M; } - void do_work(uint worker_id) override { + const uint num_regions_per_worker = num_cards_per_worker / (uint)HeapRegion::CardsPerRegion; + while (_cur_dirty_regions < _regions->size()) { - uint next = Atomic::fetch_and_add(&_cur_dirty_regions, _chunk_length); - uint max = MIN2(next + _chunk_length, _regions->size()); + uint next = Atomic::fetch_and_add(&_cur_dirty_regions, num_regions_per_worker); + uint max = MIN2(next + num_regions_per_worker, _regions->size()); for (uint i = next; i < max; i++) { HeapRegion* r = _g1h->region_at(_regions->at(i)); @@ -262,16 +257,16 @@ private: public: G1RemSetScanState() : _max_reserved_regions(0), - _collection_set_iter_state(NULL), - _card_table_scan_state(NULL), + _collection_set_iter_state(nullptr), + _card_table_scan_state(nullptr), _scan_chunks_per_region(G1CollectedHeap::get_chunks_per_region()), _log_scan_chunks_per_region(log2i(_scan_chunks_per_region)), - _region_scan_chunks(NULL), + _region_scan_chunks(nullptr), _num_total_scan_chunks(0), _scan_chunks_shift(0), - _all_dirty_regions(NULL), - _next_dirty_regions(NULL), - _scan_top(NULL) { + _all_dirty_regions(nullptr), + _next_dirty_regions(nullptr), + _scan_top(nullptr) { } ~G1RemSetScanState() { @@ -282,7 +277,7 @@ public: } void initialize(size_t max_reserved_regions) { - assert(_collection_set_iter_state == NULL, "Must not be initialized twice"); + assert(_collection_set_iter_state == nullptr, "Must not be initialized twice"); _max_reserved_regions = max_reserved_regions; _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_reserved_regions, mtGC); _card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC); @@ -332,7 +327,7 @@ public: // as we do not clean up remembered sets before merging heap roots. bool contains_cards_to_process(uint const region_idx) const { HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx); - return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive()); + return (hr != nullptr && !hr->in_collection_set() && hr->is_old_or_humongous()); } size_t num_visited_cards() const { @@ -368,17 +363,15 @@ public: } G1AbstractSubTask* create_cleanup_after_scan_heap_roots_task() { - uint const chunk_length = G1ClearCardTableTask::chunk_size() / (uint)HeapRegion::CardsPerRegion; - - return new G1ClearCardTableTask(G1CollectedHeap::heap(), _all_dirty_regions, chunk_length, this); + return new G1ClearCardTableTask(G1CollectedHeap::heap(), _all_dirty_regions, this); } void cleanup() { delete _all_dirty_regions; - _all_dirty_regions = NULL; + _all_dirty_regions = nullptr; delete _next_dirty_regions; - _next_dirty_regions = NULL; + _next_dirty_regions = nullptr; } void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) { @@ -433,7 +426,7 @@ public: void add_dirty_region(uint const region) { #ifdef ASSERT HeapRegion* hr = G1CollectedHeap::heap()->region_at(region); - assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(), + assert(!hr->in_collection_set() && hr->is_old_or_humongous(), "Region %u is not suitable for scanning, is %sin collection set or %s", hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str()); #endif @@ -459,7 +452,7 @@ public: } void clear_scan_top(uint region_idx) { - set_scan_top(region_idx, NULL); + set_scan_top(region_idx, nullptr); } }; @@ -480,107 +473,6 @@ void G1RemSet::initialize(uint max_reserved_regions) { _scan_state->initialize(max_reserved_regions); } -// Helper class to scan and detect ranges of cards that need to be scanned on the -// card table. -class G1CardTableScanner : public StackObj { -public: - typedef CardTable::CardValue CardValue; - -private: - CardValue* const _base_addr; - - CardValue* _cur_addr; - CardValue* const _end_addr; - - static const size_t ToScanMask = G1CardTable::g1_card_already_scanned; - static const size_t ExpandedToScanMask = G1CardTable::WordAlreadyScanned; - - bool cur_addr_aligned() const { - return ((uintptr_t)_cur_addr) % sizeof(size_t) == 0; - } - - bool cur_card_is_dirty() const { - CardValue value = *_cur_addr; - return (value & ToScanMask) == 0; - } - - bool cur_word_of_cards_contains_any_dirty_card() const { - assert(cur_addr_aligned(), "Current address should be aligned"); - size_t const value = *(size_t*)_cur_addr; - return (~value & ExpandedToScanMask) != 0; - } - - bool cur_word_of_cards_all_dirty_cards() const { - size_t const value = *(size_t*)_cur_addr; - return value == G1CardTable::WordAllDirty; - } - - size_t get_and_advance_pos() { - _cur_addr++; - return pointer_delta(_cur_addr, _base_addr, sizeof(CardValue)) - 1; - } - -public: - G1CardTableScanner(CardValue* start_card, size_t size) : - _base_addr(start_card), - _cur_addr(start_card), - _end_addr(start_card + size) { - - assert(is_aligned(start_card, sizeof(size_t)), "Unaligned start addr " PTR_FORMAT, p2i(start_card)); - assert(is_aligned(size, sizeof(size_t)), "Unaligned size " SIZE_FORMAT, size); - } - - size_t find_next_dirty() { - while (!cur_addr_aligned()) { - if (cur_card_is_dirty()) { - return get_and_advance_pos(); - } - _cur_addr++; - } - - assert(cur_addr_aligned(), "Current address should be aligned now."); - while (_cur_addr != _end_addr) { - if (cur_word_of_cards_contains_any_dirty_card()) { - for (size_t i = 0; i < sizeof(size_t); i++) { - if (cur_card_is_dirty()) { - return get_and_advance_pos(); - } - _cur_addr++; - } - assert(false, "Should not reach here given we detected a dirty card in the word."); - } - _cur_addr += sizeof(size_t); - } - return get_and_advance_pos(); - } - - size_t find_next_non_dirty() { - assert(_cur_addr <= _end_addr, "Not allowed to search for marks after area."); - - while (!cur_addr_aligned()) { - if (!cur_card_is_dirty()) { - return get_and_advance_pos(); - } - _cur_addr++; - } - - assert(cur_addr_aligned(), "Current address should be aligned now."); - while (_cur_addr != _end_addr) { - if (!cur_word_of_cards_all_dirty_cards()) { - for (size_t i = 0; i < sizeof(size_t); i++) { - if (!cur_card_is_dirty()) { - return get_and_advance_pos(); - } - _cur_addr++; - } - assert(false, "Should not reach here given we detected a non-dirty card in the word."); - } - _cur_addr += sizeof(size_t); - } - return get_and_advance_pos(); - } -}; - // Helper class to claim dirty chunks within the card table. class G1CardTableChunkClaimer { G1RemSetScanState* _scan_state; @@ -613,9 +505,10 @@ public: // Scans a heap region for dirty cards. class G1ScanHRForRegionClosure : public HeapRegionClosure { + using CardValue = CardTable::CardValue; + G1CollectedHeap* _g1h; G1CardTable* _ct; - G1BlockOffsetTable* _bot; G1ParScanThreadState* _pss; @@ -636,28 +529,27 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure { // The address to which this thread already scanned (walked the heap) up to during // card scanning (exclusive). HeapWord* _scanned_to; - G1CardTable::CardValue _scanned_card_value; + CardValue _scanned_card_value; HeapWord* scan_memregion(uint region_idx_for_card, MemRegion mr) { HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); G1ScanCardClosure card_cl(_g1h, _pss, _heap_roots_found); HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful(mr, &card_cl); - assert(scanned_to != NULL, "Should be able to scan range"); + assert(scanned_to != nullptr, "Should be able to scan range"); assert(scanned_to >= mr.end(), "Scanned to " PTR_FORMAT " less than range " PTR_FORMAT, p2i(scanned_to), p2i(mr.end())); _pss->trim_queue_partially(); return scanned_to; } - void do_claimed_block(uint const region_idx_for_card, size_t const first_card, size_t const num_cards) { - HeapWord* const card_start = _bot->address_for_index_raw(first_card); -#ifdef ASSERT - HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card); - assert(hr == NULL || hr->is_in_reserved(card_start), - "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); -#endif - HeapWord* const top = _scan_state->scan_top(region_idx_for_card); + void do_claimed_block(uint const region_idx, CardValue* const dirty_l, CardValue* const dirty_r) { + _ct->change_dirty_cards_to(dirty_l, dirty_r, _scanned_card_value); + size_t num_cards = dirty_r - dirty_l; + _blocks_scanned++; + + HeapWord* const card_start = _ct->addr_for(dirty_l); + HeapWord* const top = _scan_state->scan_top(region_idx); if (card_start >= top) { return; } @@ -667,16 +559,108 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure { return; } MemRegion mr(MAX2(card_start, _scanned_to), scan_end); - _scanned_to = scan_memregion(region_idx_for_card, mr); + _scanned_to = scan_memregion(region_idx, mr); _cards_scanned += num_cards; } - ALWAYSINLINE void do_card_block(uint const region_idx, size_t const first_card, size_t const num_cards) { - _ct->change_dirty_cards_to(first_card, num_cards, _scanned_card_value); - do_claimed_block(region_idx, first_card, num_cards); - _blocks_scanned++; - } + // To locate consecutive dirty cards inside a chunk. + class ChunkScanner { + using Word = size_t; + + CardValue* const _start_card; + CardValue* const _end_card; + + static const size_t ExpandedToScanMask = G1CardTable::WordAlreadyScanned; + static const size_t ToScanMask = G1CardTable::g1_card_already_scanned; + + static bool is_card_dirty(const CardValue* const card) { + return (*card & ToScanMask) == 0; + } + + static bool is_word_aligned(const void* const addr) { + return ((uintptr_t)addr) % sizeof(Word) == 0; + } + + CardValue* find_first_dirty_card(CardValue* i_card) const { + while (!is_word_aligned(i_card)) { + if (is_card_dirty(i_card)) { + return i_card; + } + i_card++; + } + + for (/* empty */; i_card < _end_card; i_card += sizeof(Word)) { + Word word_value = *reinterpret_cast(i_card); + bool has_dirty_cards_in_word = (~word_value & ExpandedToScanMask) != 0; + + if (has_dirty_cards_in_word) { + for (uint i = 0; i < sizeof(Word); ++i) { + if (is_card_dirty(i_card)) { + return i_card; + } + i_card++; + } + assert(false, "should have early-returned"); + } + } + + return _end_card; + } + + CardValue* find_first_non_dirty_card(CardValue* i_card) const { + while (!is_word_aligned(i_card)) { + if (!is_card_dirty(i_card)) { + return i_card; + } + i_card++; + } + + for (/* empty */; i_card < _end_card; i_card += sizeof(Word)) { + Word word_value = *reinterpret_cast(i_card); + bool all_cards_dirty = (word_value == G1CardTable::WordAllDirty); + + if (!all_cards_dirty) { + for (uint i = 0; i < sizeof(Word); ++i) { + if (!is_card_dirty(i_card)) { + return i_card; + } + i_card++; + } + assert(false, "should have early-returned"); + } + } + + return _end_card; + } + + public: + ChunkScanner(CardValue* const start_card, CardValue* const end_card) : + _start_card(start_card), + _end_card(end_card) { + assert(is_word_aligned(start_card), "precondition"); + assert(is_word_aligned(end_card), "precondition"); + } + + template + void on_dirty_cards(Func&& f) { + for (CardValue* cur_card = _start_card; cur_card < _end_card; /* empty */) { + CardValue* dirty_l = find_first_dirty_card(cur_card); + CardValue* dirty_r = find_first_non_dirty_card(dirty_l); + + assert(dirty_l <= dirty_r, "inv"); + + if (dirty_l == dirty_r) { + assert(dirty_r == _end_card, "finished the entire chunk"); + return; + } + + f(dirty_l, dirty_r); + + cur_card = dirty_r + 1; + } + } + }; void scan_heap_roots(HeapRegion* r) { uint const region_idx = r->hrm_index(); @@ -685,39 +669,24 @@ class G1ScanHRForRegionClosure : public HeapRegionClosure { G1CardTableChunkClaimer claim(_scan_state, region_idx); - // Set the current scan "finger" to NULL for every heap region to scan. Since + // Set the current scan "finger" to null for every heap region to scan. Since // the claim value is monotonically increasing, the check to not scan below this // will filter out objects spanning chunks within the region too then, as opposed // to resetting this value for every claim. - _scanned_to = NULL; + _scanned_to = nullptr; while (claim.has_next()) { - size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value(); - CardTable::CardValue* const base_addr = _ct->byte_for_index(region_card_base_idx); - - G1CardTableScanner scan(base_addr, claim.size()); - - size_t first_scan_idx = scan.find_next_dirty(); - while (first_scan_idx != claim.size()) { -#ifdef ASSERT - { - CardTable::CardValue value = *_ct->byte_for_index(region_card_base_idx + first_scan_idx); - assert(value == CardTable::dirty_card_val(), "is %d at region %u idx " SIZE_FORMAT, value, region_idx, first_scan_idx); - } -#endif - - size_t const last_scan_idx = scan.find_next_non_dirty(); - size_t const len = last_scan_idx - first_scan_idx; - - do_card_block(region_idx, region_card_base_idx + first_scan_idx, len); - - if (last_scan_idx == claim.size()) { - break; - } - - first_scan_idx = scan.find_next_dirty(); - } _chunks_claimed++; + + size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value(); + + CardValue* const start_card = _ct->byte_for_index(region_card_base_idx); + CardValue* const end_card = start_card + claim.size(); + + ChunkScanner chunk_scanner{start_card, end_card}; + chunk_scanner.on_dirty_cards([&] (CardValue* dirty_l, CardValue* dirty_r) { + do_claimed_block(region_idx, dirty_l, dirty_r); + }); } } @@ -729,7 +698,6 @@ public: bool remember_already_scanned_cards) : _g1h(G1CollectedHeap::heap()), _ct(_g1h->card_table()), - _bot(_g1h->bot()), _pss(pss), _scan_state(scan_state), _phase(phase), @@ -740,13 +708,13 @@ public: _heap_roots_found(0), _rem_set_root_scan_time(), _rem_set_trim_partially_time(), - _scanned_to(NULL), + _scanned_to(nullptr), _scanned_card_value(remember_already_scanned_cards ? G1CardTable::g1_scanned_card_val() : G1CardTable::clean_card_val()) { } bool do_heap_region(HeapRegion* r) { - assert(!r->in_collection_set() && r->is_old_or_humongous_or_archive(), + assert(!r->in_collection_set() && r->is_old_or_humongous(), "Should only be called on old gen non-collection set regions but region %u is not.", r->hrm_index()); uint const region_idx = r->hrm_index(); @@ -900,7 +868,7 @@ void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss, #ifdef ASSERT void G1RemSet::assert_scan_top_is_null(uint hrm_index) { - assert(_scan_state->scan_top(hrm_index) == NULL, + assert(_scan_state->scan_top(hrm_index) == nullptr, "scan_top of region %u is unexpectedly " PTR_FORMAT, hrm_index, p2i(_scan_state->scan_top(hrm_index))); } @@ -912,10 +880,10 @@ void G1RemSet::prepare_region_for_scan(HeapRegion* r) { r->prepare_remset_for_scan(); // Only update non-collection set old regions, others must have already been set - // to NULL (don't scan) in the initialization. + // to null (don't scan) in the initialization. if (r->in_collection_set()) { assert_scan_top_is_null(hrm_index); - } else if (r->is_old_or_humongous_or_archive()) { + } else if (r->is_old_or_humongous()) { _scan_state->set_scan_top(hrm_index, r->top()); } else { assert_scan_top_is_null(hrm_index); @@ -1476,14 +1444,14 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) { HeapRegion* r = _g1h->heap_region_containing_or_null(start); // If this is a (stale) card into an uncommitted region, exit. - if (r == NULL) { + if (r == nullptr) { return false; } check_card_ptr(card_ptr, _ct); // If the card is no longer dirty, nothing to do. - // We cannot load the card value before the "r == NULL" check, because G1 + // We cannot load the card value before the "r == nullptr" check above, because G1 // could uncommit parts of the card table covering uncommitted regions. if (*card_ptr != G1CardTable::dirty_card_val()) { return false; @@ -1508,7 +1476,7 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) { // In the normal (non-stale) case, the synchronization between the // enqueueing of the card and processing it here will have ensured // we see the up-to-date region type here. - if (!r->is_old_or_humongous_or_archive()) { + if (!r->is_old_or_humongous()) { return false; } @@ -1517,9 +1485,8 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) { // (part of) an object at the end of the allocated space and extend // beyond the end of allocation. - // Non-humongous objects are either allocated in the old regions during GC, - // or mapped in archive regions during startup. So if region is old or - // archive then top is stable. + // Non-humongous objects are either allocated in the old regions during GC. + // So if region is old then top is stable. // Humongous object allocation sets top last; if top has not yet been set, // this is a stale card and we'll end up with an empty intersection. // If this is not a stale card, the synchronization between the @@ -1550,7 +1517,7 @@ void G1RemSet::refine_card_concurrently(CardValue* const card_ptr, // And find the region containing it. HeapRegion* r = _g1h->heap_region_containing(start); // This reload of the top is safe even though it happens after the full - // fence, because top is stable for old, archive and unfiltered humongous + // fence, because top is stable for old and unfiltered humongous // regions, so it must return the same value as the previous load when // cleaning the card. Also cleaning the card and refinement of the card // cannot span across safepoint, so we don't need to worry about top being @@ -1565,7 +1532,7 @@ void G1RemSet::refine_card_concurrently(CardValue* const card_ptr, assert(!dirty_region.is_empty(), "sanity"); G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id); - if (r->oops_on_memregion_seq_iterate_careful(dirty_region, &conc_refine_cl) != NULL) { + if (r->oops_on_memregion_seq_iterate_careful(dirty_region, &conc_refine_cl) != nullptr) { return; } diff --git a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp index d8ec513957a..cedbefe1fe8 100644 --- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,13 +55,13 @@ void G1RemSetSummary::update() { } void G1RemSetSummary::set_rs_thread_vtime(uint thread, double value) { - assert(_rs_threads_vtimes != NULL, "just checking"); + assert(_rs_threads_vtimes != nullptr, "just checking"); assert(thread < _num_vtimes, "just checking"); _rs_threads_vtimes[thread] = value; } double G1RemSetSummary::rs_thread_vtime(uint thread) const { - assert(_rs_threads_vtimes != NULL, "just checking"); + assert(_rs_threads_vtimes != nullptr, "just checking"); assert(thread < _num_vtimes, "just checking"); return _rs_threads_vtimes[thread]; } @@ -82,14 +82,14 @@ G1RemSetSummary::~G1RemSetSummary() { } void G1RemSetSummary::set(G1RemSetSummary* other) { - assert(other != NULL, "just checking"); + assert(other != nullptr, "just checking"); assert(_num_vtimes == other->_num_vtimes, "just checking"); memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes); } void G1RemSetSummary::subtract_from(G1RemSetSummary* other) { - assert(other != NULL, "just checking"); + assert(other != nullptr, "just checking"); assert(_num_vtimes == other->_num_vtimes, "just checking"); for (uint i = 0; i < _num_vtimes; i++) { @@ -187,7 +187,6 @@ private: RegionTypeCounter _humongous; RegionTypeCounter _free; RegionTypeCounter _old; - RegionTypeCounter _archive; RegionTypeCounter _all; size_t _max_rs_mem_sz; @@ -211,9 +210,9 @@ private: public: HRRSStatsIter() : _young("Young"), _humongous("Humongous"), - _free("Free"), _old("Old"), _archive("Archive"), _all("All"), - _max_rs_mem_sz(0), _max_rs_mem_sz_region(NULL), - _max_code_root_mem_sz(0), _max_code_root_mem_sz_region(NULL) + _free("Free"), _old("Old"), _all("All"), + _max_rs_mem_sz(0), _max_rs_mem_sz_region(nullptr), + _max_code_root_mem_sz(0), _max_code_root_mem_sz_region(nullptr) {} bool do_heap_region(HeapRegion* r) { @@ -235,7 +234,7 @@ public: } size_t code_root_elems = hrrs->code_roots_list_length(); - RegionTypeCounter* current = NULL; + RegionTypeCounter* current = nullptr; if (r->is_free()) { current = &_free; } else if (r->is_young()) { @@ -244,8 +243,6 @@ public: current = &_humongous; } else if (r->is_old()) { current = &_old; - } else if (r->is_archive()) { - current = &_archive; } else { ShouldNotReachHere(); } @@ -258,7 +255,7 @@ public: } void print_summary_on(outputStream* out) { - RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, &_archive, NULL }; + RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, nullptr }; out->print_cr(" Current rem set statistics"); out->print_cr(" Total per region rem sets sizes = " SIZE_FORMAT @@ -266,13 +263,13 @@ public: total_rs_mem_sz(), max_rs_mem_sz(), total_rs_unused_mem_sz()); - for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) { (*current)->print_rs_mem_info_on(out, total_rs_mem_sz()); } out->print_cr(" " SIZE_FORMAT " occupied cards represented.", total_cards_occupied()); - for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) { (*current)->print_cards_occupied_info_on(out, total_cards_occupied()); } @@ -296,13 +293,13 @@ public: proper_unit_for_byte_size(total_code_root_mem_sz()), byte_size_in_proper_unit(max_code_root_rem_set->code_roots_mem_size()), proper_unit_for_byte_size(max_code_root_rem_set->code_roots_mem_size())); - for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) { (*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz()); } out->print_cr(" " SIZE_FORMAT " code roots represented.", total_code_root_elems()); - for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) { + for (RegionTypeCounter** current = &counters[0]; *current != nullptr; current++) { (*current)->print_code_root_elems_info_on(out, total_code_root_elems()); } diff --git a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp index cec83231a26..6d1633786e6 100644 --- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp +++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,12 +30,11 @@ #include "runtime/safepoint.hpp" bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const { - // All non-free, non-young, non-closed archive regions need to be scanned for references; - // At every gc we gather references to other regions in young, and closed archive - // regions by definition do not have references going outside the closed archive. + // All non-free and non-young regions need to be scanned for references; + // At every gc we gather references to other regions in young. // Free regions trivially do not need scanning because they do not contain live // objects. - return !(r->is_young() || r->is_closed_archive() || r->is_free()); + return !(r->is_young() || r->is_free()); } void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) { @@ -45,9 +44,6 @@ void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) { } else if (r->is_humongous()) { // Collect remembered sets for humongous regions by default to allow eager reclaim. r->rem_set()->set_state_complete(); - } else if (r->is_archive()) { - // Archive regions never move ever. So never build remembered sets for them. - r->rem_set()->set_state_untracked(); } else if (r->is_old()) { // By default, do not create remembered set for new old regions. r->rem_set()->set_state_untracked(); @@ -79,10 +75,6 @@ bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r, bool assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(r->is_humongous(), "Region %u should be humongous", r->hrm_index()); - if (r->is_archive()) { - return false; - } - assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index()); bool selected_for_rebuild = false; @@ -104,9 +96,8 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(!r->is_humongous(), "Region %u is humongous", r->hrm_index()); - // Only consider updating the remembered set for old gen regions - excluding archive regions - // which never move (but are "Old" regions). - if (!r->is_old() || r->is_archive()) { + // Only consider updating the remembered set for old gen regions. + if (!r->is_old()) { return false; } @@ -137,9 +128,8 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); - if (r->is_old_or_humongous_or_archive()) { + if (r->is_old_or_humongous()) { if (r->rem_set()->is_updating()) { - assert(!r->is_archive(), "Archive region %u with remembered set", r->hrm_index()); r->rem_set()->set_state_complete(); } G1CollectedHeap* g1h = G1CollectedHeap::heap(); diff --git a/src/hotspot/share/gc/g1/g1RootClosures.cpp b/src/hotspot/share/gc/g1/g1RootClosures.cpp index 852bfcc95ac..946b2ed2fde 100644 --- a/src/hotspot/share/gc/g1/g1RootClosures.cpp +++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ public: }; G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) { - G1EvacuationRootClosures* res = NULL; + G1EvacuationRootClosures* res = nullptr; if (g1h->collector_state()->in_concurrent_start_gc()) { if (ClassUnloadingWithConcurrentMark) { res = new G1ConcurrentStartMarkClosures(g1h, pss); diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp index 31579c93479..3a9e30ec403 100644 --- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp +++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,7 +87,7 @@ public: OopClosure* strong_oops() { return _roots; } - CLDClosure* weak_clds() { return NULL; } + CLDClosure* weak_clds() { return nullptr; } CLDClosure* strong_clds() { return _clds; } CodeBlobClosure* strong_codeblobs() { return _blobs; } @@ -98,8 +98,8 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops, CodeBlobClosure* blobs) { StrongRootsClosures closures(oops, clds, blobs); - process_java_roots(&closures, NULL, 0); - process_vm_roots(&closures, NULL, 0); + process_java_roots(&closures, nullptr, 0); + process_vm_roots(&closures, nullptr, 0); // CodeCache is already processed in java roots // refProcessor is not needed since we are inside a safe point @@ -123,9 +123,9 @@ public: CLDClosure* weak_clds() { return _clds; } CLDClosure* strong_clds() { return _clds; } - // We don't want to visit code blobs more than once, so we return NULL for the + // We don't want to visit code blobs more than once, so we return null for the // strong case and walk the entire code cache as a separate step. - CodeBlobClosure* strong_codeblobs() { return NULL; } + CodeBlobClosure* strong_codeblobs() { return nullptr; } }; void G1RootProcessor::process_all_roots(OopClosure* oops, @@ -133,10 +133,10 @@ void G1RootProcessor::process_all_roots(OopClosure* oops, CodeBlobClosure* blobs) { AllRootsClosures closures(oops, clds); - process_java_roots(&closures, NULL, 0); - process_vm_roots(&closures, NULL, 0); + process_java_roots(&closures, nullptr, 0); + process_vm_roots(&closures, nullptr, 0); - process_code_cache_roots(blobs, NULL, 0); + process_code_cache_roots(blobs, nullptr, 0); // refProcessor is not needed since we are inside a safe point _process_strong_tasks.all_tasks_claimed(G1RP_PS_refProcessor_oops_do); diff --git a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp index 066633b6683..f1cf554ca46 100644 --- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp +++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,7 @@ SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const // requires marking. // // The entry must point into the G1 heap. In particular, it must not -// be a NULL pointer. NULL pointers are pre-filtered and never +// be a null pointer. null pointers are pre-filtered and never // inserted into a SATB buffer. // // An entry that is below the TAMS pointer for the containing heap @@ -81,7 +81,7 @@ SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const // in an unfiltered buffer refer to valid objects. static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) { - // Includes rejection of NULL pointers. + // Includes rejection of null pointers. assert(g1h->is_in_reserved(entry), "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)); diff --git a/src/hotspot/share/gc/g1/g1ServiceThread.cpp b/src/hotspot/share/gc/g1/g1ServiceThread.cpp index 7ae7f10bf58..8cda065f047 100644 --- a/src/hotspot/share/gc/g1/g1ServiceThread.cpp +++ b/src/hotspot/share/gc/g1/g1ServiceThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ G1ServiceThread::G1ServiceThread() : void G1ServiceThread::register_task(G1ServiceTask* task, jlong delay_ms) { guarantee(!task->is_registered(), "Task already registered"); - guarantee(task->next() == NULL, "Task already in queue"); + guarantee(task->next() == nullptr, "Task already in queue"); // Make sure the service thread is still up and running, there is a race // during shutdown where the service thread has been stopped, but other @@ -70,7 +70,7 @@ void G1ServiceThread::register_task(G1ServiceTask* task, jlong delay_ms) { void G1ServiceThread::schedule(G1ServiceTask* task, jlong delay_ms, bool notify) { guarantee(task->is_registered(), "Must be registered before scheduled"); - guarantee(task->next() == NULL, "Task already in queue"); + guarantee(task->next() == nullptr, "Task already in queue"); // Schedule task by setting the task time and adding it to queue. jlong delay = TimeHelper::millis_to_counter(delay_ms); @@ -114,7 +114,7 @@ G1ServiceTask* G1ServiceThread::wait_for_task() { } } } - return nullptr; // Return nullptr when terminating. + return nullptr; // Return null when terminating. } void G1ServiceThread::run_task(G1ServiceTask* task) { @@ -154,15 +154,15 @@ void G1ServiceThread::stop_service() { G1ServiceTask::G1ServiceTask(const char* name) : _time(), _name(name), - _next(NULL), - _service_thread(NULL) { } + _next(nullptr), + _service_thread(nullptr) { } void G1ServiceTask::set_service_thread(G1ServiceThread* thread) { _service_thread = thread; } bool G1ServiceTask::is_registered() { - return _service_thread != NULL; + return _service_thread != nullptr; } void G1ServiceTask::schedule(jlong delay_ms) { @@ -177,7 +177,7 @@ const char* G1ServiceTask::name() { } void G1ServiceTask::set_time(jlong time) { - assert(_next == NULL, "Not allowed to update time while in queue"); + assert(_next == nullptr, "Not allowed to update time while in queue"); _time = time; } @@ -200,7 +200,7 @@ void G1ServiceTaskQueue::remove_front() { G1ServiceTask* task = _sentinel.next(); _sentinel.set_next(task->next()); - task->set_next(NULL); + task->set_next(nullptr); } G1ServiceTask* G1ServiceTaskQueue::front() { @@ -213,8 +213,8 @@ bool G1ServiceTaskQueue::is_empty() { } void G1ServiceTaskQueue::add_ordered(G1ServiceTask* task) { - assert(task != NULL, "not a valid task"); - assert(task->next() == NULL, "invariant"); + assert(task != nullptr, "not a valid task"); + assert(task->next() == nullptr, "invariant"); assert(task->time() != max_jlong, "invalid time for task"); G1ServiceTask* current = &_sentinel; diff --git a/src/hotspot/share/gc/g1/g1ServiceThread.hpp b/src/hotspot/share/gc/g1/g1ServiceThread.hpp index 7de674d42cf..b51bf0baa9e 100644 --- a/src/hotspot/share/gc/g1/g1ServiceThread.hpp +++ b/src/hotspot/share/gc/g1/g1ServiceThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,7 +111,7 @@ class G1ServiceThread: public ConcurrentGCThread { void stop_service(); // Return the next ready task, waiting until a task is ready. - // Instead returns nullptr if termination requested. + // Instead returns null if termination requested. G1ServiceTask* wait_for_task(); void run_task(G1ServiceTask* task); diff --git a/src/hotspot/share/gc/g1/g1SurvRateGroup.cpp b/src/hotspot/share/gc/g1/g1SurvRateGroup.cpp index e698a313a0a..526530276f5 100644 --- a/src/hotspot/share/gc/g1/g1SurvRateGroup.cpp +++ b/src/hotspot/share/gc/g1/g1SurvRateGroup.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,9 +32,9 @@ G1SurvRateGroup::G1SurvRateGroup() : _stats_arrays_length(0), - _accum_surv_rate_pred(NULL), + _accum_surv_rate_pred(nullptr), _last_pred(0.0), - _surv_rate_predictors(NULL), + _surv_rate_predictors(nullptr), _num_added_regions(0) { reset(); start_adding_regions(); @@ -57,7 +57,7 @@ void G1SurvRateGroup::reset() { // Seed initial _surv_rate_pred and _accum_surv_rate_pred values guarantee(_stats_arrays_length == 1, "invariant" ); - guarantee(_surv_rate_predictors[0] != NULL, "invariant" ); + guarantee(_surv_rate_predictors[0] != nullptr, "invariant" ); const double initial_surv_rate = 0.4; _surv_rate_predictors[0]->add(initial_surv_rate); _last_pred = _accum_surv_rate_pred[0] = initial_surv_rate; diff --git a/src/hotspot/share/gc/g1/g1UncommitRegionTask.cpp b/src/hotspot/share/gc/g1/g1UncommitRegionTask.cpp index c0013cfcaa8..0fbaa5a4a8d 100644 --- a/src/hotspot/share/gc/g1/g1UncommitRegionTask.cpp +++ b/src/hotspot/share/gc/g1/g1UncommitRegionTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "runtime/globals.hpp" #include "utilities/ticks.hpp" -G1UncommitRegionTask* G1UncommitRegionTask::_instance = NULL; +G1UncommitRegionTask* G1UncommitRegionTask::_instance = nullptr; G1UncommitRegionTask::G1UncommitRegionTask() : G1ServiceTask("G1 Uncommit Region Task"), @@ -38,7 +38,7 @@ G1UncommitRegionTask::G1UncommitRegionTask() : _summary_region_count(0) { } void G1UncommitRegionTask::initialize() { - assert(_instance == NULL, "Already initialized"); + assert(_instance == nullptr, "Already initialized"); _instance = new G1UncommitRegionTask(); // Register the task with the service thread. This will automatically @@ -48,7 +48,7 @@ void G1UncommitRegionTask::initialize() { } G1UncommitRegionTask* G1UncommitRegionTask::instance() { - if (_instance == NULL) { + if (_instance == nullptr) { initialize(); } return _instance; diff --git a/src/hotspot/share/gc/g1/g1VMOperations.cpp b/src/hotspot/share/gc/g1/g1VMOperations.cpp index 8ccd7f2c9fb..c19c399cfb6 100644 --- a/src/hotspot/share/gc/g1/g1VMOperations.cpp +++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,8 +51,7 @@ bool VM_G1CollectFull::skip_operation() const { void VM_G1CollectFull::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); GCCauseSetter x(g1h, _gc_cause); - _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, - false /* clear_all_soft_refs */, + _gc_succeeded = g1h->do_full_collection(false /* clear_all_soft_refs */, false /* do_maximal_compaction */); } @@ -130,7 +129,7 @@ void VM_G1CollectForAllocation::doit() { // An allocation has been requested. So, try to do that first. _result = g1h->attempt_allocation_at_safepoint(_word_size, false /* expect_null_cur_alloc_region */); - if (_result != NULL) { + if (_result != nullptr) { // If we can successfully allocate before we actually do the // pause then we will consider this pause successful. _gc_succeeded = true; diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index 13883085789..607b5c51092 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -95,7 +95,7 @@ public: _pause_cause(cause), // Fake a "no cause" and manually add the correct string in update_young_gc_name() // to make the string look more natural. - _tt(update_young_gc_name(), NULL, GCCause::_no_gc, true) { + _tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) { } ~G1YoungGCTraceTime() { @@ -793,7 +793,7 @@ public: void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } void do_oop(oop* p) { oop obj = *p; - assert(obj != NULL, "the caller should have filtered out NULL values"); + assert(obj != nullptr, "the caller should have filtered out null values"); const G1HeapRegionAttr region_attr =_g1h->region_attr(obj); if (!region_attr.is_in_cset_or_humongous_candidate()) { diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp index 0d7b21eb70e..21fc522317f 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp @@ -263,7 +263,7 @@ public: virtual ~EagerlyReclaimHumongousObjectsTask() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - g1h->remove_from_old_gen_sets(0, 0, _humongous_regions_reclaimed); + g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed); g1h->decrement_summary_bytes(_bytes_freed); } @@ -557,8 +557,7 @@ class FreeCSetClosure : public HeapRegionClosure { stats()->account_failed_region(r); G1GCPhaseTimes* p = _g1h->phase_times(); - assert(!r->is_pinned(), "Unexpected pinned region at index %u", r->hrm_index()); - assert(r->in_collection_set(), "bad CS"); + assert(r->in_collection_set(), "Failed evacuation of region %u not in collection set", r->hrm_index()); p->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreRetainedRegions, _worker_id, diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp index 1c47b92b5f7..0c2f05f14d9 100644 --- a/src/hotspot/share/gc/g1/heapRegion.cpp +++ b/src/hotspot/share/gc/g1/heapRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,9 +110,9 @@ void HeapRegion::handle_evacuation_failure() { } void HeapRegion::unlink_from_list() { - set_next(NULL); - set_prev(NULL); - set_containing_set(NULL); + set_next(nullptr); + set_prev(nullptr); + set_containing_set(nullptr); } void HeapRegion::hr_clear(bool clear_space) { @@ -179,16 +179,6 @@ void HeapRegion::set_old() { _type.set_old(); } -void HeapRegion::set_open_archive() { - report_region_type_change(G1HeapRegionTraceType::OpenArchive); - _type.set_open_archive(); -} - -void HeapRegion::set_closed_archive() { - report_region_type_change(G1HeapRegionTraceType::ClosedArchive); - _type.set_closed_archive(); -} - void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { assert(!is_humongous(), "sanity / pre-condition"); assert(top() == bottom(), "should be empty"); @@ -214,7 +204,7 @@ void HeapRegion::clear_humongous() { assert(is_humongous(), "pre-condition"); assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); - _humongous_start_region = NULL; + _humongous_start_region = nullptr; } void HeapRegion::prepare_remset_for_scan() { @@ -227,23 +217,23 @@ HeapRegion::HeapRegion(uint hrm_index, G1CardSetConfiguration* config) : _bottom(mr.start()), _end(mr.end()), - _top(NULL), + _top(nullptr), _bot_part(bot, this), - _pre_dummy_top(NULL), - _rem_set(NULL), + _pre_dummy_top(nullptr), + _rem_set(nullptr), _hrm_index(hrm_index), _type(), - _humongous_start_region(NULL), + _humongous_start_region(nullptr), _index_in_opt_cset(InvalidCSetIndex), - _next(NULL), _prev(NULL), + _next(nullptr), _prev(nullptr), #ifdef ASSERT - _containing_set(NULL), + _containing_set(nullptr), #endif - _top_at_mark_start(NULL), - _parsable_bottom(NULL), + _top_at_mark_start(nullptr), + _parsable_bottom(nullptr), _garbage_bytes(0), _young_index_in_cset(-1), - _surv_rate_group(NULL), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0), + _surv_rate_group(nullptr), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0), _node_index(G1NUMA::UnknownNodeIndex) { assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()), @@ -364,8 +354,8 @@ public: _hr(hr), _failures(false) {} void do_code_blob(CodeBlob* cb) { - nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); - if (nm != NULL) { + nmethod* nm = (cb == nullptr) ? nullptr : cb->as_compiled_method()->as_nmethod_or_null(); + if (nm != nullptr) { // Verify that the nemthod is live VerifyCodeRootOopClosure oop_cl(_hr); nm->oops_do(&oop_cl); diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp index 08d6c26f830..218fd4d1ac5 100644 --- a/src/hotspot/share/gc/g1/heapRegion.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,8 +97,8 @@ public: assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition"); _pre_dummy_top = pre_dummy_top; } - HeapWord* pre_dummy_top() const { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; } - void reset_pre_dummy_top() { _pre_dummy_top = NULL; } + HeapWord* pre_dummy_top() const { return (_pre_dummy_top == nullptr) ? top() : _pre_dummy_top; } + void reset_pre_dummy_top() { _pre_dummy_top = nullptr; } // Returns true iff the given the heap region contains the // given address as part of an allocated object. This may @@ -128,13 +128,13 @@ private: void mangle_unused_area() PRODUCT_RETURN; // Try to allocate at least min_word_size and up to desired_size from this region. - // Returns NULL if not possible, otherwise sets actual_word_size to the amount of + // Returns null if not possible, otherwise sets actual_word_size to the amount of // space allocated. // This version assumes that all allocation requests to this HeapRegion are properly // synchronized. inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); // Try to allocate at least min_word_size and up to desired_size from this HeapRegion. - // Returns NULL if not possible, otherwise sets actual_word_size to the amount of + // Returns null if not possible, otherwise sets actual_word_size to the amount of // space allocated. // This version synchronizes with other calls to par_allocate_impl(). inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); @@ -269,7 +269,7 @@ private: // object and apply the given closure to them. // Humongous objects are allocated directly in the old-gen. So we need special // handling for concurrent processing encountering an in-progress allocation. - // Returns the address after the last actually scanned or NULL if the area could + // Returns the address after the last actually scanned or null if the area could // not be scanned (That should only happen when invoked concurrently with the // mutator). template @@ -312,8 +312,8 @@ public: // Returns whether a field is in the same region as the obj it points to. template static bool is_in_same_region(T* p, oop obj) { - assert(p != NULL, "p can't be NULL"); - assert(obj != NULL, "obj can't be NULL"); + assert(p != nullptr, "p can't be null"); + assert(obj != nullptr, "obj can't be null"); return (((uintptr_t) p ^ cast_from_oop(obj)) >> LogOfHRGrainBytes) == 0; } @@ -375,11 +375,10 @@ public: // During the concurrent scrubbing phase, can there be any areas with unloaded // classes or dead objects in this region? - // This set only includes old and open archive regions - humongous regions only - // contain a single object which is either dead or live, contents of closed archive - // regions never die (so is always contiguous), and young regions are never even + // This set only includes old regions - humongous regions only + // contain a single object which is either dead or live, and young regions are never even // considered during concurrent scrub. - bool needs_scrubbing() const { return is_old() || is_open_archive(); } + bool needs_scrubbing() const { return is_old(); } // Same question as above, during full gc. Full gc needs to scrub any region that // might be skipped for compaction. This includes young generation regions as the // region relabeling to old happens later than scrubbing. @@ -403,19 +402,6 @@ public: bool is_old_or_humongous() const { return _type.is_old_or_humongous(); } - bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); } - - // A pinned region contains objects which are not moved by garbage collections. - // Humongous regions and archive regions are pinned. - bool is_pinned() const { return _type.is_pinned(); } - - // An archive region is a pinned region, also tagged as old, which - // should not be marked during mark/sweep. This allows the address - // space to be shared by JVM instances. - bool is_archive() const { return _type.is_archive(); } - bool is_open_archive() const { return _type.is_open_archive(); } - bool is_closed_archive() const { return _type.is_closed_archive(); } - void set_free(); void set_eden(); @@ -425,9 +411,6 @@ public: void move_to_old(); void set_old(); - void set_open_archive(); - void set_closed_archive(); - // For a humongous region, region in which it starts. HeapRegion* humongous_start_region() const { return _humongous_start_region; @@ -477,8 +460,8 @@ public: // available in non-product builds. #ifdef ASSERT void set_containing_set(HeapRegionSetBase* containing_set) { - assert((containing_set != NULL && _containing_set == NULL) || - containing_set == NULL, + assert((containing_set != nullptr && _containing_set == nullptr) || + containing_set == nullptr, "containing_set: " PTR_FORMAT " " "_containing_set: " PTR_FORMAT, p2i(containing_set), p2i(_containing_set)); @@ -559,7 +542,7 @@ public: // mr must not be empty. Must be trimmed to the allocated/parseable space in this region. // This region must be old or humongous. // Returns the next unscanned address if the designated objects were successfully - // processed, NULL if an unparseable part of the heap was encountered (That should + // processed, null if an unparseable part of the heap was encountered (That should // only happen when invoked concurrently with the mutator). template inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl); diff --git a/src/hotspot/share/gc/g1/heapRegion.inline.hpp b/src/hotspot/share/gc/g1/heapRegion.inline.hpp index 4b87d0c9284..a3ea47c83a8 100644 --- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/g1MonotonicArena.inline.hpp" +#include "gc/g1/g1Policy.hpp" #include "gc/g1/g1Predictions.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" @@ -54,7 +55,7 @@ inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size, *actual_size = want_to_allocate; return obj; } else { - return NULL; + return nullptr; } } @@ -77,7 +78,7 @@ inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size, return obj; } } else { - return NULL; + return nullptr; } } while (true); } @@ -142,11 +143,6 @@ inline bool HeapRegion::block_is_obj(const HeapWord* const p, HeapWord* const pb inline bool HeapRegion::is_obj_dead(const oop obj, HeapWord* const pb) const { assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); - // Objects in closed archive regions are always live. - if (is_closed_archive()) { - return false; - } - // From Remark until a region has been concurrently scrubbed, parts of the // region is not guaranteed to be parsable. Use the bitmap for liveness. if (obj_in_unparsable_area(obj, pb)) { @@ -182,7 +178,7 @@ inline size_t HeapRegion::block_size(const HeapWord* p, HeapWord* const pb) cons inline void HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) { set_top(new_top); - // After a compaction the mark bitmap in a non-pinned regions is invalid. + // After a compaction the mark bitmap in a movable region is invalid. // But all objects are live, we get this by setting TAMS to bottom. init_top_at_mark_start(); @@ -294,10 +290,7 @@ inline void HeapRegion::reset_parsable_bottom() { } inline void HeapRegion::note_start_of_marking() { - assert(!is_closed_archive() || top_at_mark_start() == bottom(), "CA region's TAMS must always be at bottom"); - if (!is_closed_archive()) { - set_top_at_mark_start(top()); - } + set_top_at_mark_start(top()); _gc_efficiency = -1.0; } @@ -343,14 +336,14 @@ HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr, HeapRegion* sr = humongous_start_region(); oop obj = cast_to_oop(sr->bottom()); - // If concurrent and klass_or_null is NULL, then space has been + // If concurrent and klass_or_null is null, then space has been // allocated but the object has not yet been published by setting // the klass. That can only happen if the card is stale. However, // we've already set the card clean, so we must return failure, // since the allocating thread could have performed a write to the // card that might be missed otherwise. - if (!in_gc_pause && (obj->klass_or_null_acquire() == NULL)) { - return NULL; + if (!in_gc_pause && (obj->klass_or_null_acquire() == nullptr)) { + return nullptr; } // We have a well-formed humongous object at the start of sr. @@ -496,10 +489,10 @@ HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr, if (is_humongous()) { return do_oops_on_memregion_in_humongous(mr, cl); } - assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str()); + assert(is_old(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str()); // Because mr has been trimmed to what's been allocated in this - // region, the objects in these parts of the heap have non-NULL + // region, the objects in these parts of the heap have non-null // klass pointers. There's no need to use klass_or_null to detect // in-progress allocation. // We might be in the progress of scrubbing this region and in this @@ -520,7 +513,7 @@ inline bool HeapRegion::has_valid_age_in_surv_rate() const { } inline bool HeapRegion::has_surv_rate_group() const { - return _surv_rate_group != NULL; + return _surv_rate_group != nullptr; } inline double HeapRegion::surv_rate_prediction(G1Predictions const& predictor) const { @@ -529,7 +522,7 @@ inline double HeapRegion::surv_rate_prediction(G1Predictions const& predictor) c } inline void HeapRegion::install_surv_rate_group(G1SurvRateGroup* surv_rate_group) { - assert(surv_rate_group != NULL, "pre-condition"); + assert(surv_rate_group != nullptr, "pre-condition"); assert(!has_surv_rate_group(), "pre-condition"); assert(is_young(), "pre-condition"); @@ -542,7 +535,7 @@ inline void HeapRegion::uninstall_surv_rate_group() { assert(has_valid_age_in_surv_rate(), "pre-condition"); assert(is_young(), "pre-condition"); - _surv_rate_group = NULL; + _surv_rate_group = nullptr; _age_index = G1SurvRateGroup::InvalidAgeIndex; } else { assert(!has_valid_age_in_surv_rate(), "pre-condition"); diff --git a/src/hotspot/share/gc/g1/heapRegionManager.cpp b/src/hotspot/share/gc/g1/heapRegionManager.cpp index 41421101e09..fc445142bd5 100644 --- a/src/hotspot/share/gc/g1/heapRegionManager.cpp +++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,12 +62,12 @@ public: }; HeapRegionManager::HeapRegionManager() : - _bot_mapper(NULL), - _cardtable_mapper(NULL), + _bot_mapper(nullptr), + _cardtable_mapper(nullptr), _committed_map(), _allocated_heapregions_length(0), - _regions(), _heap_mapper(NULL), - _bitmap_mapper(NULL), + _regions(), _heap_mapper(nullptr), + _bitmap_mapper(nullptr), _free_list("Free list", new MasterFreeRegionListChecker()) { } @@ -90,7 +90,7 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, } HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) { - HeapRegion* hr = NULL; + HeapRegion* hr = nullptr; bool from_head = !type.is_young(); G1NUMA* numa = G1NUMA::numa(); @@ -99,14 +99,14 @@ HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint re hr = _free_list.remove_region_with_node_index(from_head, requested_node_index); } - if (hr == NULL) { + if (hr == nullptr) { // If there's a single active node or we did not get a region from our requested node, // try without requested node index. hr = _free_list.remove_region(from_head); } - if (hr != NULL) { - assert(hr->next() == NULL, "Single region should not have next"); + if (hr != nullptr) { + assert(hr->next() == nullptr, "Single region should not have next"); assert(is_available(hr->hrm_index()), "Must be committed"); if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) { @@ -120,7 +120,7 @@ HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint re HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) { uint candidate = find_contiguous_in_free_list(num_regions); if (candidate == G1_NO_HRM_INDEX) { - return NULL; + return nullptr; } return allocate_free_regions_starting_at(candidate, num_regions); } @@ -128,7 +128,7 @@ HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_region HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) { uint candidate = find_contiguous_allow_expand(num_regions); if (candidate == G1_NO_HRM_INDEX) { - return NULL; + return nullptr; } expand_exact(candidate, num_regions, G1CollectedHeap::heap()->workers()); return allocate_free_regions_starting_at(candidate, num_regions); @@ -164,7 +164,7 @@ void HeapRegionManager::expand(uint start, uint num_regions, WorkerThreads* pret commit_regions(start, num_regions, pretouch_workers); for (uint i = start; i < start + num_regions; i++) { HeapRegion* hr = _regions.get_by_index(i); - if (hr == NULL) { + if (hr == nullptr) { hr = new_heap_region(i); OrderAccess::storestore(); _regions.set_by_index(i, hr); @@ -198,7 +198,7 @@ void HeapRegionManager::uncommit_regions(uint start, uint num_regions) { for (uint i = start; i < end; i++) { // Can't use at() here since region is no longer marked available. HeapRegion* hr = _regions.get_by_index(i); - assert(hr != NULL, "Region should still be present"); + assert(hr != nullptr, "Region should still be present"); printer->uncommit(hr); } } @@ -416,7 +416,7 @@ uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) { return 0; } - expand_exact(expand_candidate, 1, NULL); + expand_exact(expand_candidate, 1, nullptr); return 1; } @@ -487,7 +487,7 @@ uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) { } HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { - guarantee(r != NULL, "Start region must be a valid region"); + guarantee(r != nullptr, "Start region must be a valid region"); guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index()); for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) { HeapRegion* hr = _regions.get_by_index(i); @@ -495,7 +495,7 @@ HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { return hr; } } - return NULL; + return nullptr; } void HeapRegionManager::iterate(HeapRegionClosure* blk) const { @@ -505,7 +505,7 @@ void HeapRegionManager::iterate(HeapRegionClosure* blk) const { if (!is_available(i)) { continue; } - guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i); + guarantee(at(i) != nullptr, "Tried to access region %u that has a null HeapRegion*", i); bool res = blk->do_heap_region(at(i)); if (res) { blk->set_incomplete(); @@ -535,9 +535,9 @@ uint HeapRegionManager::find_highest_free(bool* expanded) { // committed, expand at that index. for (uint curr = reserved_length(); curr-- > 0;) { HeapRegion *hr = _regions.get_by_index(curr); - if (hr == NULL || !is_available(curr)) { + if (hr == nullptr || !is_available(curr)) { // Found uncommitted and free region, expand to make it available for use. - expand_exact(curr, 1, NULL); + expand_exact(curr, 1, nullptr); assert(at(curr)->is_free(), "Region (%u) must be available and free after expand", curr); *expanded = true; @@ -647,7 +647,7 @@ void HeapRegionManager::shrink_at(uint index, size_t num_regions) { uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { guarantee(start_idx < _allocated_heapregions_length, "checking"); - guarantee(res_idx != NULL, "checking"); + guarantee(res_idx != nullptr, "checking"); uint num_regions_found = 0; @@ -695,7 +695,7 @@ void HeapRegionManager::verify() { } num_committed++; HeapRegion* hr = _regions.get_by_index(i); - guarantee(hr != NULL, "invariant: i: %u", i); + guarantee(hr != nullptr, "invariant: i: %u", i); guarantee(!prev_committed || hr->bottom() == prev_end, "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT, i, HR_FORMAT_PARAMS(hr), p2i(prev_end)); @@ -711,7 +711,7 @@ void HeapRegionManager::verify() { prev_end = hr->end(); } for (uint i = _allocated_heapregions_length; i < reserved_length(); i++) { - guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i); + guarantee(_regions.get_by_index(i) == nullptr, "invariant i: %u", i); } guarantee(num_committed == length(), "Found %u committed regions, but should be %u", num_committed, length()); @@ -725,7 +725,7 @@ void HeapRegionManager::verify_optional() { #endif // PRODUCT HeapRegionClaimer::HeapRegionClaimer(uint n_workers) : - _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) { + _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(nullptr) { uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC); memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions); _claims = new_claims; @@ -798,7 +798,7 @@ public: FreeRegionList *free_list = worker_freelist(worker_id); for (uint i = start; i < end; i++) { HeapRegion *region = _hrm->at_or_null(i); - if (region != NULL && region->is_free()) { + if (region != nullptr && region->is_free()) { // Need to clear old links to allow to be added to new freelist. region->unlink_from_list(); free_list->add_to_tail(region); diff --git a/src/hotspot/share/gc/g1/heapRegionManager.hpp b/src/hotspot/share/gc/g1/heapRegionManager.hpp index 79a6ae9a15e..ad985e1f852 100644 --- a/src/hotspot/share/gc/g1/heapRegionManager.hpp +++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ class WorkerThreads; class G1HeapRegionTable : public G1BiasedMappedArray { protected: - virtual HeapRegion* default_value() const { return NULL; } + virtual HeapRegion* default_value() const { return nullptr; } }; // This class keeps track of the actual heap memory, auxiliary data @@ -88,7 +88,7 @@ class HeapRegionManager: public CHeapObj { HeapWord* heap_end() const {return _regions.end_address_mapped(); } // Pass down commit calls to the VirtualSpace. - void commit_regions(uint index, size_t num_regions = 1, WorkerThreads* pretouch_workers = NULL); + void commit_regions(uint index, size_t num_regions = 1, WorkerThreads* pretouch_workers = nullptr); // Initialize the HeapRegions in the range and put them on the free list. void initialize_regions(uint start, uint num_regions); @@ -125,7 +125,7 @@ class HeapRegionManager: public CHeapObj { G1RegionToSpaceMapper* _bitmap_mapper; FreeRegionList _free_list; - void expand(uint index, uint num_regions, WorkerThreads* pretouch_workers = NULL); + void expand(uint index, uint num_regions, WorkerThreads* pretouch_workers = nullptr); // G1RegionCommittedMap helpers. These functions do the work that comes with // the state changes tracked by G1CommittedRegionMap. To make sure this is @@ -174,7 +174,7 @@ public: // is valid. inline HeapRegion* at(uint index) const; - // Return the HeapRegion at the given index, NULL if the index + // Return the HeapRegion at the given index, null if the index // is for an unavailable region. inline HeapRegion* at_or_null(uint index) const; @@ -186,7 +186,7 @@ public: inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const; // If addr is within the committed space return its corresponding - // HeapRegion, otherwise return NULL. + // HeapRegion, otherwise return null. inline HeapRegion* addr_to_region(HeapWord* addr) const; // Insert the given region into the free region list. diff --git a/src/hotspot/share/gc/g1/heapRegionManager.inline.hpp b/src/hotspot/share/gc/g1/heapRegionManager.inline.hpp index f4e587fa32b..1bcba292ffa 100644 --- a/src/hotspot/share/gc/g1/heapRegionManager.inline.hpp +++ b/src/hotspot/share/gc/g1/heapRegionManager.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,17 +46,17 @@ inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const { inline HeapRegion* HeapRegionManager::at(uint index) const { assert(is_available(index), "pre-condition"); HeapRegion* hr = _regions.get_by_index(index); - assert(hr != NULL, "sanity"); + assert(hr != nullptr, "sanity"); assert(hr->hrm_index() == index, "sanity"); return hr; } inline HeapRegion* HeapRegionManager::at_or_null(uint index) const { if (!is_available(index)) { - return NULL; + return nullptr; } HeapRegion* hr = _regions.get_by_index(index); - assert(hr != NULL, "All available regions must have a HeapRegion but index %u has not.", index); + assert(hr != nullptr, "All available regions must have a HeapRegion but index %u has not.", index); assert(hr->hrm_index() == index, "sanity"); return hr; } @@ -69,7 +69,7 @@ inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) c if (index < reserved_length() && is_available(index) && at(index)->is_continues_humongous()) { return at(index); } else { - return NULL; + return nullptr; } } diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp index be5535a00da..8289cdf553b 100644 --- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp @@ -103,7 +103,7 @@ void HeapRegionRemSet::print_static_mem_size(outputStream* out) { // When not at safepoint the CodeCache_lock must be held during modifications. void HeapRegionRemSet::add_code_root(nmethod* nm) { - assert(nm != NULL, "sanity"); + assert(nm != nullptr, "sanity"); assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()), "should call add_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s", BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint())); @@ -113,7 +113,7 @@ void HeapRegionRemSet::add_code_root(nmethod* nm) { } void HeapRegionRemSet::add_code_root_locked(nmethod* nm) { - assert(nm != NULL, "sanity"); + assert(nm != nullptr, "sanity"); assert((CodeCache_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && (_m.owned_by_self() || Thread::current()->is_VM_thread()))), @@ -127,10 +127,10 @@ void HeapRegionRemSet::add_code_root_locked(nmethod* nm) { } void HeapRegionRemSet::remove_code_root(nmethod* nm) { - assert(nm != NULL, "sanity"); + assert(nm != nullptr, "sanity"); assert_locked_or_safepoint(CodeCache_lock); - MutexLocker ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); + MutexLocker ml(CodeCache_lock->owned_by_self() ? nullptr : &_m, Mutex::_no_safepoint_check_flag); _code_roots.remove(nm); // Check that there were no duplicates diff --git a/src/hotspot/share/gc/g1/heapRegionSet.cpp b/src/hotspot/share/gc/g1/heapRegionSet.cpp index a4fbe33d580..d7100ef7cb7 100644 --- a/src/hotspot/share/gc/g1/heapRegionSet.cpp +++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,11 +34,10 @@ uint FreeRegionList::_unrealistically_long_length = 0; void HeapRegionSetBase::verify_region(HeapRegion* hr) { assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index()); assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions - assert(_checker == NULL || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s", + assert(_checker == nullptr || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s", hr->hrm_index(), hr->get_type_str(), name()); assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name()); - assert(!hr->is_empty() || hr->is_free() || hr->is_archive(), - "Empty region %u is not free or archive for set %s", hr->hrm_index(), name()); + assert(!hr->is_empty() || hr->is_free(), "Empty region %u is not free or old for set %s", hr->hrm_index(), name()); } #endif @@ -101,13 +100,13 @@ void FreeRegionList::remove_all() { verify_optional(); HeapRegion* curr = _head; - while (curr != NULL) { + while (curr != nullptr) { verify_region(curr); HeapRegion* next = curr->next(); - curr->set_next(NULL); - curr->set_prev(NULL); - curr->set_containing_set(NULL); + curr->set_next(nullptr); + curr->set_prev(nullptr); + curr->set_containing_set(nullptr); decrease_length(curr->node_index()); @@ -128,7 +127,7 @@ void FreeRegionList::add_list_common_start(FreeRegionList* from_list) { return; } - if (_node_info != NULL && from_list->_node_info != NULL) { + if (_node_info != nullptr && from_list->_node_info != nullptr) { _node_info->add(from_list->_node_info); } @@ -137,9 +136,9 @@ void FreeRegionList::add_list_common_start(FreeRegionList* from_list) { while (iter.more_available()) { HeapRegion* hr = iter.get_next(); // In set_containing_set() we check that we either set the value - // from NULL to non-NULL or vice versa to catch bugs. So, we have - // to NULL it first before setting it to the value. - hr->set_containing_set(NULL); + // from null to non-null or vice versa to catch bugs. So, we have + // to null it first before setting it to the value. + hr->set_containing_set(nullptr); hr->set_containing_set(this); } #endif // ASSERT @@ -162,7 +161,7 @@ void FreeRegionList::append_ordered(FreeRegionList* from_list) { if (is_empty()) { // Make from_list the current list. - assert_free_region_list(length() == 0 && _tail == NULL, "invariant"); + assert_free_region_list(length() == 0 && _tail == nullptr, "invariant"); _head = from_list->_head; _tail = from_list->_tail; } else { @@ -186,29 +185,29 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) { } if (is_empty()) { - assert_free_region_list(length() == 0 && _tail == NULL, "invariant"); + assert_free_region_list(length() == 0 && _tail == nullptr, "invariant"); _head = from_list->_head; _tail = from_list->_tail; } else { HeapRegion* curr_to = _head; HeapRegion* curr_from = from_list->_head; - while (curr_from != NULL) { - while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) { + while (curr_from != nullptr) { + while (curr_to != nullptr && curr_to->hrm_index() < curr_from->hrm_index()) { curr_to = curr_to->next(); } - if (curr_to == NULL) { + if (curr_to == nullptr) { // The rest of the from list should be added as tail _tail->set_next(curr_from); curr_from->set_prev(_tail); - curr_from = NULL; + curr_from = nullptr; } else { HeapRegion* next_from = curr_from->next(); curr_from->set_next(curr_to); curr_from->set_prev(curr_to->prev()); - if (curr_to->prev() == NULL) { + if (curr_to->prev() == nullptr) { _head = curr_from; } else { curr_to->prev()->set_next(curr_from); @@ -230,14 +229,14 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) { #ifdef ASSERT void FreeRegionList::verify_region_to_remove(HeapRegion* curr, HeapRegion* next) { assert_free_region_list(_head != next, "invariant"); - if (next != NULL) { + if (next != nullptr) { assert_free_region_list(next->prev() == curr, "invariant"); assert_free_region_list(_tail != curr, "invariant"); } else { assert_free_region_list(_tail == curr, "invariant"); } HeapRegion* prev = curr->prev(); - if (prev == NULL) { + if (prev == nullptr) { assert_free_region_list(_head == curr, "invariant"); } else { assert_free_region_list(_head != curr, "invariant"); @@ -269,11 +268,11 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) { verify_region_to_remove(curr, next); if (_last == curr) { - _last = NULL; + _last = nullptr; } - curr->set_next(NULL); - curr->set_prev(NULL); + curr->set_next(nullptr); + curr->set_prev(nullptr); remove(curr); count++; @@ -283,12 +282,12 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) { curr = next; } - if (prev == NULL) { + if (prev == nullptr) { _head = next; } else { prev->set_next(next); } - if (next == NULL) { + if (next == nullptr) { _tail = prev; } else { next->set_prev(prev); @@ -320,25 +319,25 @@ void FreeRegionList::verify() { void FreeRegionList::clear() { _length = 0; - _head = NULL; - _tail = NULL; - _last = NULL; + _head = nullptr; + _tail = nullptr; + _last = nullptr; - if (_node_info!= NULL) { + if (_node_info!= nullptr) { _node_info->clear(); } } void FreeRegionList::verify_list() { HeapRegion* curr = _head; - HeapRegion* prev1 = NULL; - HeapRegion* prev0 = NULL; + HeapRegion* prev1 = nullptr; + HeapRegion* prev0 = nullptr; uint count = 0; size_t capacity = 0; uint last_index = 0; - guarantee(_head == NULL || _head->prev() == NULL, "_head should not have a prev"); - while (curr != NULL) { + guarantee(_head == nullptr || _head->prev() == nullptr, "_head should not have a prev"); + while (curr != nullptr) { verify_region(curr); count++; @@ -346,7 +345,7 @@ void FreeRegionList::verify_list() { "[%s] the calculated length: %u seems very long, is there maybe a cycle? curr: " PTR_FORMAT " prev0: " PTR_FORMAT " " "prev1: " PTR_FORMAT " length: %u", name(), count, p2i(curr), p2i(prev0), p2i(prev1), length()); - if (curr->next() != NULL) { + if (curr->next() != nullptr) { guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up"); } guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted"); @@ -360,25 +359,25 @@ void FreeRegionList::verify_list() { } guarantee(_tail == prev0, "Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index()); - guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next"); + guarantee(_tail == nullptr || _tail->next() == nullptr, "_tail should not have a next"); guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count); } FreeRegionList::FreeRegionList(const char* name, HeapRegionSetChecker* checker): HeapRegionSetBase(name, checker), - _node_info(G1NUMA::numa()->is_enabled() ? new NodeInfo() : NULL) { + _node_info(G1NUMA::numa()->is_enabled() ? new NodeInfo() : nullptr) { clear(); } FreeRegionList::~FreeRegionList() { - if (_node_info != NULL) { + if (_node_info != nullptr) { delete _node_info; } } -FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(NULL), +FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(nullptr), _num_nodes(_numa->num_active_nodes()) { assert(UseNUMA, "Invariant"); diff --git a/src/hotspot/share/gc/g1/heapRegionSet.hpp b/src/hotspot/share/gc/g1/heapRegionSet.hpp index 323c54614dc..5db2fbb696a 100644 --- a/src/hotspot/share/gc/g1/heapRegionSet.hpp +++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,7 +82,7 @@ protected: void verify_region(HeapRegion* hr) PRODUCT_RETURN; void check_mt_safety() { - if (_checker != NULL) { + if (_checker != nullptr) { _checker->check_mt_safety(); } } @@ -190,7 +190,7 @@ protected: virtual void clear(); public: - FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL); + FreeRegionList(const char* name, HeapRegionSetChecker* checker = nullptr); ~FreeRegionList(); void verify_list(); @@ -249,7 +249,7 @@ private: public: bool more_available() { - return _curr != NULL; + return _curr != nullptr; } HeapRegion* get_next() { diff --git a/src/hotspot/share/gc/g1/heapRegionSet.inline.hpp b/src/hotspot/share/gc/g1/heapRegionSet.inline.hpp index 1f509ff3e31..0d7c7ed315d 100644 --- a/src/hotspot/share/gc/g1/heapRegionSet.inline.hpp +++ b/src/hotspot/share/gc/g1/heapRegionSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,9 +31,9 @@ inline void HeapRegionSetBase::add(HeapRegion* hr) { check_mt_safety(); - assert_heap_region_set(hr->containing_set() == NULL, "should not already have a containing set"); - assert_heap_region_set(hr->next() == NULL, "should not already be linked"); - assert_heap_region_set(hr->prev() == NULL, "should not already be linked"); + assert_heap_region_set(hr->containing_set() == nullptr, "should not already have a containing set"); + assert_heap_region_set(hr->next() == nullptr, "should not already be linked"); + assert_heap_region_set(hr->prev() == nullptr, "should not already be linked"); _length++; hr->set_containing_set(this); @@ -43,23 +43,23 @@ inline void HeapRegionSetBase::add(HeapRegion* hr) { inline void HeapRegionSetBase::remove(HeapRegion* hr) { check_mt_safety(); verify_region(hr); - assert_heap_region_set(hr->next() == NULL, "should already be unlinked"); - assert_heap_region_set(hr->prev() == NULL, "should already be unlinked"); + assert_heap_region_set(hr->next() == nullptr, "should already be unlinked"); + assert_heap_region_set(hr->prev() == nullptr, "should already be unlinked"); - hr->set_containing_set(NULL); + hr->set_containing_set(nullptr); assert_heap_region_set(_length > 0, "pre-condition"); _length--; } inline void FreeRegionList::add_to_tail(HeapRegion* region_to_add) { - assert_free_region_list((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) || - (length() > 0 && _head != NULL && _tail != NULL && _tail->hrm_index() < region_to_add->hrm_index()), + assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) || + (length() > 0 && _head != nullptr && _tail != nullptr && _tail->hrm_index() < region_to_add->hrm_index()), "invariant"); // add() will verify the region and check mt safety. add(region_to_add); - if (_head != NULL) { - // Link into list, next is already NULL, no need to set. + if (_head != nullptr) { + // Link into list, next is already null, no need to set. region_to_add->set_prev(_tail); _tail->set_next(region_to_add); _tail = region_to_add; @@ -72,37 +72,37 @@ inline void FreeRegionList::add_to_tail(HeapRegion* region_to_add) { } inline void FreeRegionList::add_ordered(HeapRegion* hr) { - assert_free_region_list((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) || - (length() > 0 && _head != NULL && _tail != NULL), + assert_free_region_list((length() == 0 && _head == nullptr && _tail == nullptr && _last == nullptr) || + (length() > 0 && _head != nullptr && _tail != nullptr), "invariant"); // add() will verify the region and check mt safety. add(hr); // Now link the region - if (_head != NULL) { + if (_head != nullptr) { HeapRegion* curr; - if (_last != NULL && _last->hrm_index() < hr->hrm_index()) { + if (_last != nullptr && _last->hrm_index() < hr->hrm_index()) { curr = _last; } else { curr = _head; } // Find first entry with a Region Index larger than entry to insert. - while (curr != NULL && curr->hrm_index() < hr->hrm_index()) { + while (curr != nullptr && curr->hrm_index() < hr->hrm_index()) { curr = curr->next(); } hr->set_next(curr); - if (curr == NULL) { + if (curr == nullptr) { // Adding at the end hr->set_prev(_tail); _tail->set_next(hr); _tail = hr; - } else if (curr->prev() == NULL) { + } else if (curr->prev() == nullptr) { // Adding at the beginning - hr->set_prev(NULL); + hr->set_prev(nullptr); _head = hr; curr->set_prev(hr); } else { @@ -123,12 +123,12 @@ inline void FreeRegionList::add_ordered(HeapRegion* hr) { inline HeapRegion* FreeRegionList::remove_from_head_impl() { HeapRegion* result = _head; _head = result->next(); - if (_head == NULL) { - _tail = NULL; + if (_head == nullptr) { + _tail = nullptr; } else { - _head->set_prev(NULL); + _head->set_prev(nullptr); } - result->set_next(NULL); + result->set_next(nullptr); return result; } @@ -136,12 +136,12 @@ inline HeapRegion* FreeRegionList::remove_from_tail_impl() { HeapRegion* result = _tail; _tail = result->prev(); - if (_tail == NULL) { - _head = NULL; + if (_tail == nullptr) { + _head = nullptr; } else { - _tail->set_next(NULL); + _tail->set_next(nullptr); } - result->set_prev(NULL); + result->set_prev(nullptr); return result; } @@ -150,9 +150,9 @@ inline HeapRegion* FreeRegionList::remove_region(bool from_head) { verify_optional(); if (is_empty()) { - return NULL; + return nullptr; } - assert_free_region_list(length() > 0 && _head != NULL && _tail != NULL, "invariant"); + assert_free_region_list(length() > 0 && _head != nullptr && _tail != nullptr, "invariant"); HeapRegion* hr; @@ -163,7 +163,7 @@ inline HeapRegion* FreeRegionList::remove_region(bool from_head) { } if (_last == hr) { - _last = NULL; + _last = nullptr; } // remove() will verify the region and check mt safety. @@ -185,7 +185,7 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head, size_t cur_depth = 0; if (from_head) { for (cur = _head; - cur != NULL && cur_depth < max_search_depth; + cur != nullptr && cur_depth < max_search_depth; cur = cur->next(), ++cur_depth) { if (requested_node_index == cur->node_index()) { break; @@ -193,7 +193,7 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head, } } else { for (cur = _tail; - cur != NULL && cur_depth < max_search_depth; + cur != nullptr && cur_depth < max_search_depth; cur = cur->prev(), ++cur_depth) { if (requested_node_index == cur->node_index()) { break; @@ -202,28 +202,28 @@ inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head, } // Didn't find a region to use. - if (cur == NULL || cur_depth >= max_search_depth) { - return NULL; + if (cur == nullptr || cur_depth >= max_search_depth) { + return nullptr; } // Splice the region out of the list. HeapRegion* prev = cur->prev(); HeapRegion* next = cur->next(); - if (prev == NULL) { + if (prev == nullptr) { _head = next; } else { prev->set_next(next); } - if (next == NULL) { + if (next == nullptr) { _tail = prev; } else { next->set_prev(prev); } - cur->set_prev(NULL); - cur->set_next(NULL); + cur->set_prev(nullptr); + cur->set_next(nullptr); if (_last == cur) { - _last = NULL; + _last = nullptr; } remove(cur); @@ -252,19 +252,19 @@ inline uint FreeRegionList::NodeInfo::length(uint node_index) const { } inline void FreeRegionList::increase_length(uint node_index) { - if (_node_info != NULL) { + if (_node_info != nullptr) { return _node_info->increase_length(node_index); } } inline void FreeRegionList::decrease_length(uint node_index) { - if (_node_info != NULL) { + if (_node_info != nullptr) { return _node_info->decrease_length(node_index); } } inline uint FreeRegionList::length(uint node_index) const { - if (_node_info != NULL) { + if (_node_info != nullptr) { return _node_info->length(node_index); } else { return 0; diff --git a/src/hotspot/share/gc/g1/heapRegionType.cpp b/src/hotspot/share/gc/g1/heapRegionType.cpp index 35f11950eb9..bb5c4e9392c 100644 --- a/src/hotspot/share/gc/g1/heapRegionType.cpp +++ b/src/hotspot/share/gc/g1/heapRegionType.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,8 +39,6 @@ bool HeapRegionType::is_valid(Tag tag) { case StartsHumongousTag: case ContinuesHumongousTag: case OldTag: - case OpenArchiveTag: - case ClosedArchiveTag: return true; default: return false; @@ -56,11 +54,9 @@ const char* HeapRegionType::get_str() const { case StartsHumongousTag: return "HUMS"; case ContinuesHumongousTag: return "HUMC"; case OldTag: return "OLD"; - case OpenArchiveTag: return "OARC"; - case ClosedArchiveTag: return "CARC"; default: ShouldNotReachHere(); - return NULL; // keep some compilers happy + return nullptr; // keep some compilers happy } } @@ -73,11 +69,9 @@ const char* HeapRegionType::get_short_str() const { case StartsHumongousTag: return "HS"; case ContinuesHumongousTag: return "HC"; case OldTag: return "O"; - case OpenArchiveTag: return "OA"; - case ClosedArchiveTag: return "CA"; default: ShouldNotReachHere(); - return NULL; // keep some compilers happy + return nullptr; // keep some compilers happy } } @@ -90,8 +84,6 @@ G1HeapRegionTraceType::Type HeapRegionType::get_trace_type() { case StartsHumongousTag: return G1HeapRegionTraceType::StartsHumongous; case ContinuesHumongousTag: return G1HeapRegionTraceType::ContinuesHumongous; case OldTag: return G1HeapRegionTraceType::Old; - case OpenArchiveTag: return G1HeapRegionTraceType::OpenArchive; - case ClosedArchiveTag: return G1HeapRegionTraceType::ClosedArchive; default: ShouldNotReachHere(); return G1HeapRegionTraceType::Free; // keep some compilers happy diff --git a/src/hotspot/share/gc/g1/heapRegionType.hpp b/src/hotspot/share/gc/g1/heapRegionType.hpp index 9e1c79ea0d0..85a89f77a94 100644 --- a/src/hotspot/share/gc/g1/heapRegionType.hpp +++ b/src/hotspot/share/gc/g1/heapRegionType.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ private: // We encode the value of the heap region type so the generation can be // determined quickly. The tag is split into two parts: // - // major type (young, old, humongous, archive) : top N-1 bits + // major type (young, old, humongous) : top N-1 bits // minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit // // If there's need to increase the number of minor types in the @@ -52,15 +52,11 @@ private: // 00001 1 [ 3] Survivor // // 00010 0 [ 4] Humongous Mask - // 00100 0 [ 8] Pinned Mask - // 00110 0 [12] Starts Humongous - // 00110 1 [13] Continues Humongous + // 00010 0 [ 4] Starts Humongous + // 00010 1 [ 5] Continues Humongous // - // 01000 0 [16] Old Mask - // - // 10000 0 [32] Archive Mask - // 10100 0 [40] Open Archive - // 10100 1 [41] Closed Archive + // 00100 0 [ 8] Old Mask + // 00100 0 [ 8] Old // typedef enum { FreeTag = 0, @@ -70,25 +66,11 @@ private: SurvTag = YoungMask + 1, HumongousMask = 4, - PinnedMask = 8, - StartsHumongousTag = HumongousMask | PinnedMask, - ContinuesHumongousTag = HumongousMask | PinnedMask + 1, + StartsHumongousTag = HumongousMask, + ContinuesHumongousTag = HumongousMask + 1, - OldMask = 16, - OldTag = OldMask, - - // Archive regions are regions with immutable content (i.e. not reclaimed, and - // not allocated into during regular operation). They differ in the kind of references - // allowed for the contained objects: - // - Closed archive regions form a separate self-contained (closed) object graph - // within the set of all of these regions. No references outside of closed - // archive regions are allowed. - // - Open archive regions have no restrictions on the references of their objects. - // Objects within these regions are allowed to have references to objects - // contained in any other kind of regions. - ArchiveMask = 32, - OpenArchiveTag = ArchiveMask | PinnedMask, - ClosedArchiveTag = ArchiveMask | PinnedMask + 1 + OldMask = 8, + OldTag = OldMask } Tag; volatile Tag _tag; @@ -134,20 +116,10 @@ public: bool is_starts_humongous() const { return get() == StartsHumongousTag; } bool is_continues_humongous() const { return get() == ContinuesHumongousTag; } - bool is_archive() const { return (get() & ArchiveMask) != 0; } - bool is_open_archive() const { return get() == OpenArchiveTag; } - bool is_closed_archive() const { return get() == ClosedArchiveTag; } - - // is_old regions may or may not also be pinned bool is_old() const { return (get() & OldMask) != 0; } bool is_old_or_humongous() const { return (get() & (OldMask | HumongousMask)) != 0; } - bool is_old_or_humongous_or_archive() const { return (get() & (OldMask | HumongousMask | ArchiveMask)) != 0; } - - // is_pinned regions may be archive or humongous - bool is_pinned() const { return (get() & PinnedMask) != 0; } - // Setters void set_free() { set(FreeTag); } @@ -180,8 +152,6 @@ public: return true; } } - void set_open_archive() { set_from(OpenArchiveTag, FreeTag); } - void set_closed_archive() { set_from(ClosedArchiveTag, FreeTag); } // Misc diff --git a/src/hotspot/share/gc/g1/vmStructs_g1.hpp b/src/hotspot/share/gc/g1/vmStructs_g1.hpp index bf2a8762937..351c1baf5df 100644 --- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp +++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,6 @@ nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \ nonstatic_field(G1CollectedHeap, _monitoring_support, G1MonitoringSupport*) \ nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \ - nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \ nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \ \ nonstatic_field(G1MonitoringSupport, _eden_space_committed, size_t) \ @@ -79,8 +78,6 @@ declare_constant(HeapRegionType::EdenTag) \ declare_constant(HeapRegionType::SurvTag) \ declare_constant(HeapRegionType::HumongousMask) \ - declare_constant(HeapRegionType::PinnedMask) \ - declare_constant(HeapRegionType::ArchiveMask) \ declare_constant(HeapRegionType::StartsHumongousTag) \ declare_constant(HeapRegionType::ContinuesHumongousTag) \ declare_constant(HeapRegionType::OldMask) \ diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 8d665d6bc60..39bbccf7785 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -69,22 +69,18 @@ jint ParallelScavengeHeap::initialize() { trace_actual_reserved_page_size(reserved_heap_size, heap_rs); initialize_reserved_region(heap_rs); - - PSCardTable* card_table = new PSCardTable(heap_rs.region()); - card_table->initialize(); - CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table); - barrier_set->initialize(); - BarrierSet::set_barrier_set(barrier_set); - - // Make up the generations - assert(MinOldSize <= OldSize && OldSize <= MaxOldSize, "Parameter check"); - assert(MinNewSize <= NewSize && NewSize <= MaxNewSize, "Parameter check"); - // Layout the reserved space for the generations. ReservedSpace old_rs = heap_rs.first_part(MaxOldSize); ReservedSpace young_rs = heap_rs.last_part(MaxOldSize); assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap"); + PSCardTable* card_table = new PSCardTable(heap_rs.region()); + card_table->initialize(old_rs.base(), young_rs.base()); + + CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table); + barrier_set->initialize(); + BarrierSet::set_barrier_set(barrier_set); + // Set up WorkerThreads _workers.initialize_workers(); @@ -151,8 +147,8 @@ void ParallelScavengeHeap::initialize_serviceability() { "PS Old Gen", true /* support_usage_threshold */); - _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC"); - _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC"); + _young_manager = new GCMemoryManager("PS Scavenge"); + _old_manager = new GCMemoryManager("PS MarkSweep"); _old_manager->add_pool(_eden_pool); _old_manager->add_pool(_survivor_pool); @@ -549,8 +545,26 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) { return; } - VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); - VMThread::execute(&op); + while (true) { + VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); + VMThread::execute(&op); + + if (!GCCause::is_explicit_full_gc(cause) || op.full_gc_succeeded()) { + return; + } + + { + MutexLocker ml(Heap_lock); + if (full_gc_count != total_full_collections()) { + return; + } + } + + if (GCLocker::is_active_and_needs_gc()) { + // If GCLocker is active, wait until clear before retrying. + GCLocker::stall_until_clear(); + } + } } void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index ab981c5851f..abf87b0e019 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -211,7 +211,7 @@ class ParallelScavengeHeap : public CollectedHeap { // will then attempt a full gc. The second collects the entire heap; if // maximum_compaction is true, it will compact everything and clear all soft // references. - inline void invoke_scavenge(); + inline bool invoke_scavenge(); // Perform a full collection void do_full_collection(bool clear_all_soft_refs) override; diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp index e852c11265f..ef5f6dedd29 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,8 +35,8 @@ inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const return size < eden_size / 2; } -inline void ParallelScavengeHeap::invoke_scavenge() { - PSScavenge::invoke(); +inline bool ParallelScavengeHeap::invoke_scavenge() { + return PSScavenge::invoke(); } inline bool ParallelScavengeHeap::is_in_young(const void* p) const { diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 7130cb7880a..3a882fb9b83 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -1678,7 +1678,7 @@ void PSParallelCompact::summary_phase(bool maximum_compaction) // may be true because this method can be called without intervening // activity. For example when the heap space is tight and full measure // are being taken to free space. -void PSParallelCompact::invoke(bool maximum_heap_compaction) { +bool PSParallelCompact::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); @@ -1695,8 +1695,8 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) { const bool clear_all_soft_refs = heap->soft_ref_policy()->should_clear_all_soft_refs(); - PSParallelCompact::invoke_no_policy(clear_all_soft_refs || - maximum_heap_compaction); + return PSParallelCompact::invoke_no_policy(clear_all_soft_refs || + maximum_heap_compaction); } // This method contains no policy. You should probably @@ -1749,7 +1749,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { heap->pre_full_gc_dump(&_gc_timer); TraceCollectorStats tcs(counters()); - TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause); + TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC"); if (log_is_enabled(Debug, gc, heap, exit)) { accumulated_time()->start(); @@ -2067,7 +2067,10 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { JVMCI_ONLY(JVMCI::do_unloading(purged_class)); } - _gc_tracer.report_object_count_after_gc(is_alive_closure()); + { + GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer); + _gc_tracer.report_object_count_after_gc(is_alive_closure()); + } #if TASKQUEUE_STATS ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue"); ParCompactionManager::_objarray_task_queues->print_and_reset_taskqueue_stats("ObjArrayOop Queue"); diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp index 194dc70ad0a..07c420bc0dd 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp @@ -1141,7 +1141,7 @@ class PSParallelCompact : AllStatic { PSParallelCompact(); - static void invoke(bool maximum_heap_compaction); + static bool invoke(bool maximum_heap_compaction); static bool invoke_no_policy(bool maximum_heap_compaction); static void post_initialize(); diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp index c76adc5fb9b..e148d0cebe8 100644 --- a/src/hotspot/share/gc/parallel/psScavenge.cpp +++ b/src/hotspot/share/gc/parallel/psScavenge.cpp @@ -408,7 +408,7 @@ bool PSScavenge::invoke_no_policy() { GCTraceCPUTime tcpu(&_gc_tracer); GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true); TraceCollectorStats tcs(counters()); - TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause); + TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC"); if (log_is_enabled(Debug, gc, heap, exit)) { accumulated_time()->start(); diff --git a/src/hotspot/share/gc/parallel/psVMOperations.cpp b/src/hotspot/share/gc/parallel/psVMOperations.cpp index a9f99a4dbf5..47eeffb34a5 100644 --- a/src/hotspot/share/gc/parallel/psVMOperations.cpp +++ b/src/hotspot/share/gc/parallel/psVMOperations.cpp @@ -58,7 +58,8 @@ static bool is_cause_full(GCCause::Cause cause) { VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause) : - VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)) + VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)), + _full_gc_succeeded(false) { } @@ -70,8 +71,8 @@ void VM_ParallelGCSystemGC::doit() { GCCauseSetter gccs(heap, _gc_cause); if (!_full) { // If (and only if) the scavenge fails, this will invoke a full gc. - heap->invoke_scavenge(); + _full_gc_succeeded = heap->invoke_scavenge(); } else { - heap->do_full_collection(false); + _full_gc_succeeded = PSParallelCompact::invoke(false); } } diff --git a/src/hotspot/share/gc/parallel/psVMOperations.hpp b/src/hotspot/share/gc/parallel/psVMOperations.hpp index 0cddac3d616..cc49eb631c7 100644 --- a/src/hotspot/share/gc/parallel/psVMOperations.hpp +++ b/src/hotspot/share/gc/parallel/psVMOperations.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,10 +40,12 @@ class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation { }; class VM_ParallelGCSystemGC: public VM_GC_Operation { + bool _full_gc_succeeded; public: VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause); virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; } virtual void doit(); + bool full_gc_succeeded() const { return _full_gc_succeeded; } }; #endif // SHARE_GC_PARALLEL_PSVMOPERATIONS_HPP diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp index 31ca98f9570..bd256c9c555 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.cpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp @@ -394,8 +394,12 @@ void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, uintx survivor_size = compute_survivor_size(size, SpaceAlignment); uintx eden_size = size - (2*survivor_size); if (eden_size > max_eden_size()) { - eden_size = max_eden_size(); - survivor_size = (size - eden_size)/2; + // Need to reduce eden_size to satisfy the max constraint. The delta needs + // to be 2*SpaceAlignment aligned so that both survivors are properly + // aligned. + uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment); + eden_size -= eden_delta; + survivor_size += eden_delta/2; } assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp index a700307e7aa..d3a15be7e55 100644 --- a/src/hotspot/share/gc/serial/genMarkSweep.cpp +++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp @@ -210,7 +210,10 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { JVMCI_ONLY(JVMCI::do_unloading(purged_class)); } - gc_tracer()->report_object_count_after_gc(&is_alive); + { + GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer()); + gc_tracer()->report_object_count_after_gc(&is_alive); + } } diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index ae77be8cf89..8f126c3129e 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -45,8 +45,8 @@ SerialHeap::SerialHeap() : _eden_pool(nullptr), _survivor_pool(nullptr), _old_pool(nullptr) { - _young_manager = new GCMemoryManager("Copy", "end of minor GC"); - _old_manager = new GCMemoryManager("MarkSweepCompact", "end of major GC"); + _young_manager = new GCMemoryManager("Copy"); + _old_manager = new GCMemoryManager("MarkSweepCompact"); } void SerialHeap::initialize_serviceability() { diff --git a/src/hotspot/share/gc/shared/ageTable.cpp b/src/hotspot/share/gc/shared/ageTable.cpp index b8586aa931c..879bf28797f 100644 --- a/src/hotspot/share/gc/shared/ageTable.cpp +++ b/src/hotspot/share/gc/shared/ageTable.cpp @@ -33,15 +33,16 @@ #include "oops/oop.inline.hpp" #include "runtime/perfData.hpp" #include "utilities/copy.hpp" +#include "logging/logStream.hpp" /* Copyright (c) 1992, 2021, Oracle and/or its affiliates, and Stanford University. See the LICENSE file for license information. */ -AgeTable::AgeTable(bool global) { +AgeTable::AgeTable(bool global) : _use_perf_data(UsePerfData && global) { clear(); - if (UsePerfData && global) { + if (_use_perf_data) { ResourceMark rm; EXCEPTION_MARK; @@ -70,7 +71,7 @@ void AgeTable::clear() { } } -void AgeTable::merge(AgeTable* subTable) { +void AgeTable::merge(const AgeTable* subTable) { for (int i = 0; i < table_size; i++) { sizes[i]+= subTable->sizes[i]; } @@ -105,25 +106,30 @@ uint AgeTable::compute_tenuring_threshold(size_t desired_survivor_size) { } void AgeTable::print_age_table(uint tenuring_threshold) { - if (log_is_enabled(Trace, gc, age) || UsePerfData || AgeTableTracer::is_tenuring_distribution_event_enabled()) { - log_trace(gc, age)("Age table with threshold %u (max threshold " UINTX_FORMAT ")", - tenuring_threshold, MaxTenuringThreshold); - - size_t total = 0; - uint age = 1; - while (age < table_size) { - size_t wordSize = sizes[age]; - total += wordSize; - if (wordSize > 0) { - log_trace(gc, age)("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total", - age, wordSize * oopSize, total * oopSize); - } - AgeTableTracer::send_tenuring_distribution_event(age, wordSize * oopSize); - if (UsePerfData) { - _perf_sizes[age]->set_value(wordSize * oopSize); - } - age++; - } + LogTarget(Trace, gc, age) lt; + if (lt.is_enabled() || _use_perf_data || AgeTableTracer::is_tenuring_distribution_event_enabled()) { + LogStream st(lt); + print_on(&st, tenuring_threshold); } } +void AgeTable::print_on(outputStream* st, uint tenuring_threshold) { + st->print_cr("Age table with threshold %u (max threshold " UINTX_FORMAT ")", + tenuring_threshold, MaxTenuringThreshold); + + size_t total = 0; + uint age = 1; + while (age < table_size) { + size_t word_size = sizes[age]; + total += word_size; + if (word_size > 0) { + st->print_cr("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total", + age, word_size * oopSize, total * oopSize); + } + AgeTableTracer::send_tenuring_distribution_event(age, word_size * oopSize); + if (_use_perf_data) { + _perf_sizes[age]->set_value(word_size * oopSize); + } + age++; + } +} diff --git a/src/hotspot/share/gc/shared/ageTable.hpp b/src/hotspot/share/gc/shared/ageTable.hpp index b05e1a161f2..9f0c10ec312 100644 --- a/src/hotspot/share/gc/shared/ageTable.hpp +++ b/src/hotspot/share/gc/shared/ageTable.hpp @@ -63,14 +63,15 @@ class AgeTable { // Merge another age table with the current one. Used // for parallel young generation gc. - void merge(AgeTable* subTable); + void merge(const AgeTable* subTable); // Calculate new tenuring threshold based on age information. uint compute_tenuring_threshold(size_t desired_survivor_size); void print_age_table(uint tenuring_threshold); + void print_on(outputStream* st, uint tenuring_threshold); private: - + bool _use_perf_data; PerfVariable* _perf_sizes[table_size]; }; diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp index 7d9cd15997d..4f2e0abbe95 100644 --- a/src/hotspot/share/gc/shared/cardTable.cpp +++ b/src/hotspot/share/gc/shared/cardTable.cpp @@ -30,6 +30,7 @@ #include "gc/shared/space.inline.hpp" #include "logging/log.hpp" #include "memory/virtualspace.hpp" +#include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/os.hpp" #include "services/memTracker.hpp" @@ -73,21 +74,13 @@ CardTable::CardTable(MemRegion whole_heap) : _byte_map_size(0), _byte_map(nullptr), _byte_map_base(nullptr), - _cur_covered_regions(0), - _covered(MemRegion::create_array(_max_covered_regions, mtGC)), - _committed(MemRegion::create_array(_max_covered_regions, mtGC)), _guard_region() { assert((uintptr_t(_whole_heap.start()) & (_card_size - 1)) == 0, "heap must start at card boundary"); assert((uintptr_t(_whole_heap.end()) & (_card_size - 1)) == 0, "heap must end at card boundary"); } -CardTable::~CardTable() { - MemRegion::destroy_array(_covered, _max_covered_regions); - MemRegion::destroy_array(_committed, _max_covered_regions); -} - -void CardTable::initialize() { +void CardTable::initialize(void* region0_start, void* region1_start) { size_t num_cards = cards_required(_whole_heap.word_size()); // each card takes 1 byte; + 1 for the guard card @@ -97,8 +90,6 @@ void CardTable::initialize() { HeapWord* low_bound = _whole_heap.start(); HeapWord* high_bound = _whole_heap.end(); - _cur_covered_regions = 0; - const size_t rs_align = _page_size == os::vm_page_size() ? 0 : MAX2(_page_size, os::vm_allocation_granularity()); ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size); @@ -125,215 +116,99 @@ void CardTable::initialize() { assert(is_aligned(guard_card, _page_size), "must be on its own OS page"); _guard_region = MemRegion((HeapWord*)guard_card, _page_size); + initialize_covered_region(region0_start, region1_start); + log_trace(gc, barrier)("CardTable::CardTable: "); log_trace(gc, barrier)(" &_byte_map[0]: " PTR_FORMAT " &_byte_map[last_valid_index()]: " PTR_FORMAT, - p2i(&_byte_map[0]), p2i(&_byte_map[last_valid_index()])); + p2i(&_byte_map[0]), p2i(&_byte_map[last_valid_index()])); log_trace(gc, barrier)(" _byte_map_base: " PTR_FORMAT, p2i(_byte_map_base)); } -int CardTable::find_covering_region_by_base(HeapWord* base) { - int i; - for (i = 0; i < _cur_covered_regions; i++) { - if (_covered[i].start() == base) return i; - if (_covered[i].start() > base) break; +MemRegion CardTable::committed_for(const MemRegion mr) const { + HeapWord* addr_l = (HeapWord*)align_down(byte_for(mr.start()), _page_size); + HeapWord* addr_r = mr.is_empty() + ? addr_l + : (HeapWord*)align_up(byte_after(mr.last()), _page_size); + + if (mr.start() == _covered[0].start()) { + // In case the card for gen-boundary is not page-size aligned, the crossing page belongs to _covered[1]. + addr_r = MIN2(addr_r, (HeapWord*)align_down(byte_for(_covered[1].start()), _page_size)); } - // If we didn't find it, create a new one. - assert(_cur_covered_regions < _max_covered_regions, - "too many covered regions"); - // Move the ones above up, to maintain sorted order. - for (int j = _cur_covered_regions; j > i; j--) { - _covered[j] = _covered[j-1]; - _committed[j] = _committed[j-1]; - } - int res = i; - _cur_covered_regions++; - _covered[res].set_start(base); - _covered[res].set_word_size(0); - CardValue* ct_start = byte_for(base); - HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size); - _committed[res].set_start(ct_start_aligned); - _committed[res].set_word_size(0); - return res; + + return MemRegion(addr_l, addr_r); } -HeapWord* CardTable::largest_prev_committed_end(int ind) const { - HeapWord* max_end = nullptr; - for (int j = 0; j < ind; j++) { - HeapWord* this_end = _committed[j].end(); - if (this_end > max_end) max_end = this_end; - } - return max_end; -} +void CardTable::initialize_covered_region(void* region0_start, void* region1_start) { + assert(_whole_heap.start() == region0_start, "precondition"); + assert(region0_start < region1_start, "precondition"); -MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const { - assert(mr.intersection(_guard_region).is_empty(), "precondition"); - MemRegion result = mr; - for (int r = 0; r < _cur_covered_regions; r += 1) { - if (r != self) { - result = result.minus(_committed[r]); - } - } - return result; + assert(_covered[0].start() == nullptr, "precondition"); + assert(_covered[1].start() == nullptr, "precondition"); + + _covered[0] = MemRegion((HeapWord*)region0_start, (size_t)0); + _covered[1] = MemRegion((HeapWord*)region1_start, (size_t)0); } void CardTable::resize_covered_region(MemRegion new_region) { - // We don't change the start of a region, only the end. + assert(UseSerialGC || UseParallelGC, "only these two collectors"); assert(_whole_heap.contains(new_region), - "attempt to cover area not in reserved area"); - // collided is true if the expansion would push into another committed region - debug_only(bool collided = false;) - int const ind = find_covering_region_by_base(new_region.start()); - MemRegion const old_region = _covered[ind]; - assert(old_region.start() == new_region.start(), "just checking"); - if (new_region.word_size() != old_region.word_size()) { - // Commit new or uncommit old pages, if necessary. - MemRegion cur_committed = _committed[ind]; - // Extend the end of this _committed region - // to cover the end of any lower _committed regions. - // This forms overlapping regions, but never interior regions. - HeapWord* const max_prev_end = largest_prev_committed_end(ind); - if (max_prev_end > cur_committed.end()) { - cur_committed.set_end(max_prev_end); - } - // Align the end up to a page size (starts are already aligned). - HeapWord* new_end = (HeapWord*) byte_after(new_region.last()); - HeapWord* new_end_aligned = align_up(new_end, _page_size); - assert(new_end_aligned >= new_end, "align up, but less"); - // Check the other regions (excludes "ind") to ensure that - // the new_end_aligned does not intrude onto the committed - // space of another region. - int ri = 0; - for (ri = ind + 1; ri < _cur_covered_regions; ri++) { - if (new_end_aligned > _committed[ri].start()) { - assert(new_end_aligned <= _committed[ri].end(), - "An earlier committed region can't cover a later committed region"); - // Any region containing the new end - // should start at or beyond the region found (ind) - // for the new end (committed regions are not expected to - // be proper subsets of other committed regions). - assert(_committed[ri].start() >= _committed[ind].start(), - "New end of committed region is inconsistent"); - new_end_aligned = _committed[ri].start(); - // new_end_aligned can be equal to the start of its - // committed region (i.e., of "ind") if a second - // region following "ind" also start at the same location - // as "ind". - assert(new_end_aligned >= _committed[ind].start(), - "New end of committed region is before start"); - debug_only(collided = true;) - // Should only collide with 1 region - break; - } - } -#ifdef ASSERT - for (++ri; ri < _cur_covered_regions; ri++) { - assert(!_committed[ri].contains(new_end_aligned), - "New end of committed region is in a second committed region"); - } -#endif - // The guard page is always committed and should not be committed over. - // "guarded" is used for assertion checking below and recalls the fact - // that the would-be end of the new committed region would have - // penetrated the guard page. - HeapWord* new_end_for_commit = new_end_aligned; + "attempt to cover area not in reserved area"); + assert(_covered[0].start() != nullptr, "precondition"); + assert(_covered[1].start() != nullptr, "precondition"); - DEBUG_ONLY(bool guarded = false;) - if (new_end_for_commit > _guard_region.start()) { - new_end_for_commit = _guard_region.start(); - DEBUG_ONLY(guarded = true;) - } + int idx = new_region.start() == _whole_heap.start() ? 0 : 1; - if (new_end_for_commit > cur_committed.end()) { - // Must commit new pages. - MemRegion const new_committed = - MemRegion(cur_committed.end(), new_end_for_commit); + // We don't allow changes to the start of a region, only the end. + assert(_covered[idx].start() == new_region.start(), "inv"); - assert(!new_committed.is_empty(), "Region should not be empty here"); - os::commit_memory_or_exit((char*)new_committed.start(), - new_committed.byte_size(), _page_size, - !ExecMem, "card table expansion"); - // Use new_end_aligned (as opposed to new_end_for_commit) because - // the cur_committed region may include the guard region. - } else if (new_end_aligned < cur_committed.end()) { - // Must uncommit pages. - MemRegion const uncommit_region = - committed_unique_to_self(ind, MemRegion(new_end_aligned, - cur_committed.end())); - if (!uncommit_region.is_empty()) { - if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { - assert(false, "Card table contraction failed"); - // The call failed so don't change the end of the - // committed region. This is better than taking the - // VM down. - new_end_aligned = _committed[ind].end(); - } - } - } - // In any case, we can reset the end of the current committed entry. - _committed[ind].set_end(new_end_aligned); + MemRegion old_committed = committed_for(_covered[idx]); -#ifdef ASSERT - // Check that the last card in the new region is committed according - // to the tables. - bool covered = false; - for (int cr = 0; cr < _cur_covered_regions; cr++) { - if (_committed[cr].contains(new_end - 1)) { - covered = true; - break; - } - } - assert(covered, "Card for end of new region not committed"); -#endif + _covered[idx] = new_region; - // The default of 0 is not necessarily clean cards. - CardValue* entry; - if (old_region.last() < _whole_heap.start()) { - entry = byte_for(_whole_heap.start()); - } else { - entry = byte_after(old_region.last()); - } - assert(index_for(new_region.last()) <= last_valid_index(), - "The guard card will be overwritten"); - // This line commented out cleans the newly expanded region and - // not the aligned up expanded region. - // CardValue* const end = byte_after(new_region.last()); - CardValue* const end = (CardValue*) new_end_for_commit; - assert((end >= byte_after(new_region.last())) || collided || guarded, - "Expect to be beyond new region unless impacting another region"); - // do nothing if we resized downward. -#ifdef ASSERT - for (int ri = 0; ri < _cur_covered_regions; ri++) { - if (ri != ind) { - // The end of the new committed region should not - // be in any existing region unless it matches - // the start of the next region. - assert(!_committed[ri].contains(end) || - (_committed[ri].start() == (HeapWord*) end), - "Overlapping committed regions"); - } - } -#endif - if (entry < end) { - memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue))); - } + MemRegion new_committed = committed_for(new_region); + + if (new_committed.word_size() == old_committed.word_size()) { + return; + } + + if (new_committed.word_size() > old_committed.word_size()) { + // Expand. + MemRegion delta = MemRegion(old_committed.end(), + new_committed.word_size() - old_committed.word_size()); + + os::commit_memory_or_exit((char*)delta.start(), + delta.byte_size(), + _page_size, + !ExecMem, + "card table expansion"); + + memset(delta.start(), clean_card, delta.byte_size()); + } else { + // Shrink. + MemRegion delta = MemRegion(new_committed.end(), + old_committed.word_size() - new_committed.word_size()); + bool res = os::uncommit_memory((char*)delta.start(), + delta.byte_size()); + assert(res, "uncommit should succeed"); } - // In any case, the covered size changes. - _covered[ind].set_word_size(new_region.word_size()); log_trace(gc, barrier)("CardTable::resize_covered_region: "); log_trace(gc, barrier)(" _covered[%d].start(): " PTR_FORMAT " _covered[%d].last(): " PTR_FORMAT, - ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); - log_trace(gc, barrier)(" _committed[%d].start(): " PTR_FORMAT " _committed[%d].last(): " PTR_FORMAT, - ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); + idx, p2i(_covered[idx].start()), idx, p2i(_covered[idx].last())); + log_trace(gc, barrier)(" committed_start: " PTR_FORMAT " committed_last: " PTR_FORMAT, + p2i(new_committed.start()), p2i(new_committed.last())); log_trace(gc, barrier)(" byte_for(start): " PTR_FORMAT " byte_for(last): " PTR_FORMAT, - p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); + p2i(byte_for(_covered[idx].start())), p2i(byte_for(_covered[idx].last()))); log_trace(gc, barrier)(" addr_for(start): " PTR_FORMAT " addr_for(last): " PTR_FORMAT, - p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last()))); + p2i(addr_for((CardValue*) new_committed.start())), p2i(addr_for((CardValue*) new_committed.last()))); +#ifdef ASSERT // Touch the last card of the covered region to show that it // is committed (or SEGV). - debug_only((void) (*byte_for(_covered[ind].last()));) + if (is_init_completed()) { + (void) (*(volatile CardValue*)byte_for(_covered[idx].last())); + } +#endif } // Note that these versions are precise! The scanning code has to handle the @@ -371,7 +246,7 @@ uintx CardTable::ct_max_alignment_constraint() { void CardTable::invalidate(MemRegion mr) { assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); - for (int i = 0; i < _cur_covered_regions; i++) { + for (int i = 0; i < max_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) dirty_MemRegion(mri); } diff --git a/src/hotspot/share/gc/shared/cardTable.hpp b/src/hotspot/share/gc/shared/cardTable.hpp index 7f27b9fdab6..f66c5b7c508 100644 --- a/src/hotspot/share/gc/shared/cardTable.hpp +++ b/src/hotspot/share/gc/shared/cardTable.hpp @@ -49,46 +49,20 @@ protected: CardValue* _byte_map; // the card marking array CardValue* _byte_map_base; - int _cur_covered_regions; - - // The covered regions should be in address order. - MemRegion* _covered; - // The committed regions correspond one-to-one to the covered regions. - // They represent the card-table memory that has been committed to service - // the corresponding covered region. It may be that committed region for - // one covered region corresponds to a larger region because of page-size - // roundings. Thus, a committed region for one covered region may - // actually extend onto the card-table space for the next covered region. - MemRegion* _committed; - - // The last card is a guard card; never committed. - MemRegion _guard_region; - - inline size_t compute_byte_map_size(size_t num_bytes); - - // Finds and return the index of the region, if any, to which the given - // region would be contiguous. If none exists, assign a new region and - // returns its index. Requires that no more than the maximum number of - // covered regions defined in the constructor are ever in use. - int find_covering_region_by_base(HeapWord* base); - - // Returns the leftmost end of a committed region corresponding to a - // covered region before covered region "ind", or else "null" if "ind" is - // the first covered region. - HeapWord* largest_prev_committed_end(int ind) const; - - // Returns the part of the region mr that doesn't intersect with - // any committed region other than self. Used to prevent uncommitting - // regions that are also committed by other regions. Also protects - // against uncommitting the guard region. - MemRegion committed_unique_to_self(int self, MemRegion mr) const; - // Some barrier sets create tables whose elements correspond to parts of // the heap; the CardTableBarrierSet is an example. Such barrier sets will // normally reserve space for such tables, and commit parts of the table // "covering" parts of the heap that are committed. At most one covered // region per generation is needed. - static const int _max_covered_regions = 2; + static constexpr int max_covered_regions = 2; + + // The covered regions should be in address order. + MemRegion _covered[max_covered_regions]; + + // The last card is a guard card; never committed. + MemRegion _guard_region; + + inline size_t compute_byte_map_size(size_t num_bytes); enum CardValues { clean_card = (CardValue)-1, @@ -108,10 +82,16 @@ protected: size_t last_valid_index() const { return cards_required(_whole_heap.word_size()) - 1; } + +private: + void initialize_covered_region(void* region0_start, void* region1_start); + + MemRegion committed_for(const MemRegion mr) const; public: CardTable(MemRegion whole_heap); - virtual ~CardTable(); - virtual void initialize(); + virtual ~CardTable() = default; + + void initialize(void* region0_start, void* region1_start); // *** Barrier set functions. @@ -155,7 +135,7 @@ public: return byte_for(p) + 1; } - virtual void invalidate(MemRegion mr); + void invalidate(MemRegion mr); // Provide read-only access to the card table array. const CardValue* byte_for_const(const void* p) const { @@ -196,7 +176,7 @@ public: } // Resize one of the regions covered by the remembered set. - virtual void resize_covered_region(MemRegion new_region); + void resize_covered_region(MemRegion new_region); // *** Card-table-RemSet-specific things. diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp index c59eba1af70..9d9d379cafe 100644 --- a/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp @@ -60,13 +60,13 @@ protected: CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti); - public: +public: CardTableBarrierSet(CardTable* card_table); - ~CardTableBarrierSet(); + virtual ~CardTableBarrierSet(); CardTable* card_table() const { return _card_table; } - virtual void initialize(); + void initialize(); void write_region(JavaThread* thread, MemRegion mr) { invalidate(mr); diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index de17e24628f..4c22dccd0f8 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -638,10 +638,6 @@ void CollectedHeap::reset_promotion_should_fail() { #endif // #ifndef PRODUCT -bool CollectedHeap::is_archived_object(oop object) const { - return false; -} - // It's the caller's responsibility to ensure glitch-freedom // (if required). void CollectedHeap::update_capacity_and_used_at_gc() { diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index d03aa76f4cd..e427e6de668 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -513,9 +513,6 @@ class CollectedHeap : public CHeapObj { virtual void pin_object(JavaThread* thread, oop obj) = 0; virtual void unpin_object(JavaThread* thread, oop obj) = 0; - // Is the given object inside a CDS archive area? - virtual bool is_archived_object(oop object) const; - // Support for loading objects from CDS archive into the heap // (usually as a snapshot of the old generation). virtual bool can_load_archived_objects() const { return false; } diff --git a/src/hotspot/share/gc/shared/gcCause.hpp b/src/hotspot/share/gc/shared/gcCause.hpp index bf794fad9f5..152ca787fc2 100644 --- a/src/hotspot/share/gc/shared/gcCause.hpp +++ b/src/hotspot/share/gc/shared/gcCause.hpp @@ -95,6 +95,12 @@ class GCCause : public AllStatic { cause == GCCause::_dcmd_gc_run); } + inline static bool is_explicit_full_gc(GCCause::Cause cause) { + return (is_user_requested_gc(cause) || + is_serviceability_requested_gc(cause) || + cause == GCCause::_wb_full_gc); + } + inline static bool is_serviceability_requested_gc(GCCause::Cause cause) { return (cause == GCCause::_jvmti_force_gc || diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp index 56cc45e2e80..42f174d10de 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -117,17 +117,17 @@ jint GenCollectedHeap::initialize() { initialize_reserved_region(heap_rs); + ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size()); + ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size()); + _rem_set = create_rem_set(heap_rs.region()); - _rem_set->initialize(); + _rem_set->initialize(young_rs.base(), old_rs.base()); + CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set); bs->initialize(); BarrierSet::set_barrier_set(bs); - ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size()); _young_gen = _young_gen_spec->init(young_rs, rem_set()); - ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size()); - - old_rs = old_rs.first_part(_old_gen_spec->max_size()); _old_gen = _old_gen_spec->init(old_rs, rem_set()); GCInitLogger::print(); @@ -432,7 +432,7 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz FormatBuffer<> title("Collect gen: %s", gen->short_name()); GCTraceTime(Trace, gc, phases) t1(title); TraceCollectorStats tcs(gen->counters()); - TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause()); + TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC"); gen->stat_record()->invocations++; gen->stat_record()->accumulated_time.start(); @@ -796,9 +796,28 @@ void GenCollectedHeap::collect(GCCause::Cause cause) { ? YoungGen : OldGen; - VM_GenCollectFull op(gc_count_before, full_gc_count_before, - cause, max_generation); - VMThread::execute(&op); + while (true) { + VM_GenCollectFull op(gc_count_before, full_gc_count_before, + cause, max_generation); + VMThread::execute(&op); + + if (!GCCause::is_explicit_full_gc(cause)) { + return; + } + + { + MutexLocker ml(Heap_lock); + // Read the GC count while holding the Heap_lock + if (full_gc_count_before != total_full_collections()) { + return; + } + } + + if (GCLocker::is_active_and_needs_gc()) { + // If GCLocker is active, wait until clear before retrying. + GCLocker::stall_until_clear(); + } + } } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { diff --git a/src/hotspot/share/gc/shared/plab.hpp b/src/hotspot/share/gc/shared/plab.hpp index 1be6e2e10f7..cb1a44ec167 100644 --- a/src/hotspot/share/gc/shared/plab.hpp +++ b/src/hotspot/share/gc/shared/plab.hpp @@ -138,6 +138,10 @@ public: // Fills in the unallocated portion of the buffer with a garbage object and updates // statistics. To be called during GC. void retire(); + + HeapWord* top() const { + return _top; + } }; // PLAB book-keeping. diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp index 91270da832b..7c37d642836 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,7 @@ #include "gc/shared/stringdedup/stringDedupStat.hpp" #include "gc/shared/stringdedup/stringDedupStorageUse.hpp" #include "gc/shared/stringdedup/stringDedupTable.hpp" +#include "gc/shared/stringdedup/stringDedupThread.hpp" #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" @@ -43,7 +44,6 @@ #include "oops/markWord.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/globals.hpp" -#include "runtime/javaThread.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" #include "runtime/safepoint.hpp" @@ -57,9 +57,12 @@ StringDedup::Processor* StringDedup::_processor = nullptr; StringDedup::Stat StringDedup::_cur_stat{}; StringDedup::Stat StringDedup::_total_stat{}; -const Klass* StringDedup::_string_klass_or_null = nullptr; -uint StringDedup::_enabled_age_threshold = 0; -uint StringDedup::_enabled_age_limit = 0; +// Configuration for predicates used to decide whether to deduplicate. +// The initial values are suitable for deduplication being disabled. +const Klass* StringDedup::_string_klass_or_null = nullptr; // No klass will match. +static_assert(markWord::max_age < UINT_MAX, "assumption"); +uint StringDedup::_enabled_age_threshold = UINT_MAX; // Age never equals max. +uint StringDedup::_enabled_age_limit = 0; // Age is never less than zero. bool StringDedup::ergo_initialize() { return Config::ergo_initialize(); @@ -82,30 +85,16 @@ void StringDedup::initialize() { _enabled_age_limit = Config::age_threshold(); Table::initialize(); Processor::initialize(); + // Don't create the thread yet. JavaThreads need to be created later. _enabled = true; log_info_p(stringdedup, init)("String Deduplication is enabled"); - } else { - // No klass will ever match. - _string_klass_or_null = nullptr; - // Age can never equal UINT_MAX. - static_assert(markWord::max_age < UINT_MAX, "assumption"); - _enabled_age_threshold = UINT_MAX; - // Age can never be less than zero. - _enabled_age_limit = 0; } _initialized = true; } -void StringDedup::stop() { +void StringDedup::start() { assert(is_enabled(), "precondition"); - assert(_processor != nullptr, "invariant"); - _processor->stop(); -} - -void StringDedup::threads_do(ThreadClosure* tc) { - assert(is_enabled(), "precondition"); - assert(_processor != nullptr, "invariant"); - tc->do_thread(_processor); + StringDedupThread::initialize(); } void StringDedup::forbid_deduplication(oop java_string) { diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp index a0b77cab654..79689ab8648 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +93,10 @@ // but before weak reference processing, the GC should flush or delete all // of its Requests objects. // +// The deduplication thread is a daemon JavaThread. No thread visitor is +// needed, as it is handled via the normal JavaThread visiting mechanism. +// Similarly, there is no need for a stop() function. +// // For additional information on string deduplication, please see JEP 192, // https://openjdk.org/jeps/192 @@ -102,6 +106,7 @@ #include "utilities/globalDefinitions.hpp" class Klass; +class StringDedupThread; class ThreadClosure; // The StringDedup class provides the API for the deduplication mechanism. @@ -110,6 +115,8 @@ class ThreadClosure; // feature. Other functions in the StringDedup class are called where // needed, without requiring GC-specific code. class StringDedup : public AllStatic { + friend class StringDedupThread; + class Config; class Processor; class Stat; @@ -140,13 +147,9 @@ public: // Returns true if string deduplication is enabled. static bool is_enabled() { return _enabled; } - // Stop the deduplication processor thread. + // Create and start the deduplication processor thread. // precondition: is_enabled() - static void stop(); - - // Visit the deduplication processor thread. - // precondition: is_enabled() - static void threads_do(ThreadClosure* tc); + static void start(); // Marks the String as not being subject to deduplication. This can be // used to prevent deduplication of Strings whose value array must remain diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp index 744ce1d7a08..163800e9abf 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,21 +33,17 @@ #include "gc/shared/stringdedup/stringDedupStat.hpp" #include "gc/shared/stringdedup/stringDedupStorageUse.hpp" #include "gc/shared/stringdedup/stringDedupTable.hpp" -#include "gc/shared/suspendibleThreadSet.hpp" #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" #include "oops/access.inline.hpp" #include "runtime/atomic.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/debug.hpp" #include "utilities/globalCounter.hpp" #include "utilities/globalDefinitions.hpp" -StringDedup::Processor::Processor() : ConcurrentGCThread() { - set_name("StringDedupProcessor"); -} - OopStorage* StringDedup::Processor::_storages[2] = {}; StringDedup::StorageUse* volatile StringDedup::Processor::_storage_for_requests = nullptr; @@ -64,71 +60,63 @@ void StringDedup::Processor::initialize_storage() { _storage_for_processing = new StorageUse(_storages[1]); } +StringDedup::Processor::Processor() : _thread(nullptr) {} + void StringDedup::Processor::initialize() { _processor = new Processor(); - _processor->create_and_start(); } -bool StringDedup::Processor::wait_for_requests() const { - // Wait for the current request storage object to be non-empty. The - // num-dead notification from the Table notifies the monitor. - if (!should_terminate()) { +void StringDedup::Processor::wait_for_requests() const { + assert(Thread::current() == _thread, "precondition"); + // Wait for the current request storage object to be non-empty, or for the + // table to need cleanup. The num-dead notification from the Table notifies + // the monitor. + { + ThreadBlockInVM tbivm(_thread); MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag); OopStorage* storage = Atomic::load(&_storage_for_requests)->storage(); - while (!should_terminate() && - (storage->allocation_count() == 0) && + while ((storage->allocation_count() == 0) && !Table::is_dead_entry_removal_needed()) { ml.wait(); } } // Swap the request and processing storage objects. - if (!should_terminate()) { - log_trace(stringdedup)("swapping request storages"); - _storage_for_processing = Atomic::xchg(&_storage_for_requests, _storage_for_processing); - GlobalCounter::write_synchronize(); - } + log_trace(stringdedup)("swapping request storages"); + _storage_for_processing = Atomic::xchg(&_storage_for_requests, _storage_for_processing); + GlobalCounter::write_synchronize(); // Wait for the now current processing storage object to no longer be used // by an in-progress GC. Again here, the num-dead notification from the // Table notifies the monitor. - if (!should_terminate()) { + { log_trace(stringdedup)("waiting for storage to process"); + ThreadBlockInVM tbivm(_thread); MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag); - while (_storage_for_processing->is_used_acquire() && !should_terminate()) { + while (_storage_for_processing->is_used_acquire()) { ml.wait(); } } - return !should_terminate(); } StringDedup::StorageUse* StringDedup::Processor::storage_for_requests() { return StorageUse::obtain(&_storage_for_requests); } -bool StringDedup::Processor::yield_or_continue(SuspendibleThreadSetJoiner* joiner, - Stat::Phase phase) const { - if (joiner->should_yield()) { - _cur_stat.block_phase(phase); - joiner->yield(); - _cur_stat.unblock_phase(); - } - return !should_terminate(); +void StringDedup::Processor::yield() const { + assert(Thread::current() == _thread, "precondition"); + ThreadBlockInVM tbivm(_thread); } -void StringDedup::Processor::cleanup_table(SuspendibleThreadSetJoiner* joiner, - bool grow_only, - bool force) const { +void StringDedup::Processor::cleanup_table(bool grow_only, bool force) const { if (Table::cleanup_start_if_needed(grow_only, force)) { - Stat::Phase phase = Table::cleanup_phase(); - while (yield_or_continue(joiner, phase)) { - if (!Table::cleanup_step()) break; - } + do { + yield(); + } while (Table::cleanup_step()); Table::cleanup_end(); } } class StringDedup::Processor::ProcessRequest final : public OopClosure { OopStorage* _storage; - SuspendibleThreadSetJoiner* _joiner; size_t _release_index; oop* _bulk_release[OopStorage::bulk_allocate_limit]; @@ -143,9 +131,8 @@ class StringDedup::Processor::ProcessRequest final : public OopClosure { } public: - ProcessRequest(OopStorage* storage, SuspendibleThreadSetJoiner* joiner) : + ProcessRequest(OopStorage* storage) : _storage(storage), - _joiner(joiner), _release_index(0), _bulk_release() {} @@ -157,64 +144,52 @@ public: virtual void do_oop(narrowOop*) { ShouldNotReachHere(); } virtual void do_oop(oop* ref) { - if (_processor->yield_or_continue(_joiner, Stat::Phase::process)) { - oop java_string = NativeAccess::oop_load(ref); - release_ref(ref); - // Dedup java_string, after checking for various reasons to skip it. - if (java_string == nullptr) { - // String became unreachable before we got a chance to process it. - _cur_stat.inc_skipped_dead(); - } else if (java_lang_String::value(java_string) == nullptr) { - // Request during String construction, before its value array has - // been initialized. - _cur_stat.inc_skipped_incomplete(); - } else { - Table::deduplicate(java_string); - if (Table::is_grow_needed()) { - _cur_stat.report_process_pause(); - _processor->cleanup_table(_joiner, true /* grow_only */, false /* force */); - _cur_stat.report_process_resume(); - } + _processor->yield(); + oop java_string = NativeAccess::oop_load(ref); + release_ref(ref); + // Dedup java_string, after checking for various reasons to skip it. + if (java_string == nullptr) { + // String became unreachable before we got a chance to process it. + _cur_stat.inc_skipped_dead(); + } else if (java_lang_String::value(java_string) == nullptr) { + // Request during String construction, before its value array has + // been initialized. + _cur_stat.inc_skipped_incomplete(); + } else { + Table::deduplicate(java_string); + if (Table::is_grow_needed()) { + _cur_stat.report_process_pause(); + _processor->cleanup_table(true /* grow_only */, false /* force */); + _cur_stat.report_process_resume(); } } } }; -void StringDedup::Processor::process_requests(SuspendibleThreadSetJoiner* joiner) const { +void StringDedup::Processor::process_requests() const { + _cur_stat.report_process_start(); OopStorage::ParState par_state{_storage_for_processing->storage(), 1}; - ProcessRequest processor{_storage_for_processing->storage(), joiner}; + ProcessRequest processor{_storage_for_processing->storage()}; par_state.oops_do(&processor); + _cur_stat.report_process_end(); } -void StringDedup::Processor::run_service() { - while (!should_terminate()) { +void StringDedup::Processor::run(JavaThread* thread) { + assert(thread == Thread::current(), "precondition"); + _thread = thread; + log_debug(stringdedup)("Starting string deduplication thread"); + while (true) { _cur_stat.report_idle_start(); - if (!wait_for_requests()) { - assert(should_terminate(), "invariant"); - break; - } - SuspendibleThreadSetJoiner sts_joiner{}; - if (should_terminate()) break; + wait_for_requests(); _cur_stat.report_idle_end(); - _cur_stat.report_concurrent_start(); - _cur_stat.report_process_start(); - process_requests(&sts_joiner); - if (should_terminate()) break; - _cur_stat.report_process_end(); - cleanup_table(&sts_joiner, - false /* grow_only */, - StringDeduplicationResizeALot /* force */); - if (should_terminate()) break; - _cur_stat.report_concurrent_end(); + _cur_stat.report_active_start(); + process_requests(); + cleanup_table(false /* grow_only */, StringDeduplicationResizeALot /* force */); + _cur_stat.report_active_end(); log_statistics(); } } -void StringDedup::Processor::stop_service() { - MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag); - ml.notify_all(); -} - void StringDedup::Processor::log_statistics() { _total_stat.add(&_cur_stat); Stat::log_summary(&_cur_stat, &_total_stat); diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.hpp index 07cfec73f31..3f1f1c19886 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.hpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,26 +25,23 @@ #ifndef SHARE_GC_SHARED_STRINGDEDUP_STRINGDEDUPPROCESSOR_HPP #define SHARE_GC_SHARED_STRINGDEDUP_STRINGDEDUPPROCESSOR_HPP -#include "gc/shared/concurrentGCThread.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" -#include "gc/shared/stringdedup/stringDedupStat.hpp" +#include "memory/allocation.hpp" #include "utilities/macros.hpp" +class JavaThread; class OopStorage; -class SuspendibleThreadSetJoiner; -// Thread class for string deduplication. There is only one instance of -// this class. This thread processes deduplication requests. It also -// manages the deduplication table, performing resize and cleanup operations -// as needed. This includes managing the OopStorage objects used to hold -// requests. +// This class performs string deduplication. There is only one instance of +// this class. It processes deduplication requests. It also manages the +// deduplication table, performing resize and cleanup operations as needed. +// This includes managing the OopStorage objects used to hold requests. // -// This thread uses the SuspendibleThreadSet mechanism to take part in the -// safepoint protocol. It checks for safepoints between processing requests -// in order to minimize safepoint latency. The Table provides incremental -// operations for resizing and for removing dead entries, so this thread can -// perform safepoint checks between steps in those operations. -class StringDedup::Processor : public ConcurrentGCThread { +// Processing periodically checks for and yields at safepoints. Processing of +// requests is performed in incremental chunks. The Table provides +// incremental operations for resizing and for removing dead entries, so +// safepoint checks can be performed between steps in those operations. +class StringDedup::Processor : public CHeapObj { Processor(); ~Processor() = default; @@ -54,27 +51,32 @@ class StringDedup::Processor : public ConcurrentGCThread { static StorageUse* volatile _storage_for_requests; static StorageUse* _storage_for_processing; - // Returns !should_terminate(); - bool wait_for_requests() const; + JavaThread* _thread; - // Yield if requested. Returns !should_terminate() after possible yield. - bool yield_or_continue(SuspendibleThreadSetJoiner* joiner, Stat::Phase phase) const; + // Wait until there are requests to be processed. The storage for requests + // and storage for processing are swapped; the former requests storage + // becomes the current processing storage, and vice versa. + // precondition: the processing storage is empty. + void wait_for_requests() const; + + // Yield if requested. + void yield() const; class ProcessRequest; - void process_requests(SuspendibleThreadSetJoiner* joiner) const; - void cleanup_table(SuspendibleThreadSetJoiner* joiner, bool grow_only, bool force) const; + void process_requests() const; + void cleanup_table(bool grow_only, bool force) const; void log_statistics(); -protected: - virtual void run_service(); - virtual void stop_service(); - public: static void initialize(); static void initialize_storage(); static StorageUse* storage_for_requests(); + + // Use thread as the deduplication thread. + // precondition: thread == Thread::current() + void run(JavaThread* thread); }; #endif // SHARE_GC_SHARED_STRINGDEDUP_STRINGDEDUPPROCESSOR_HPP diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp index ee37083ee42..cffda333b13 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,21 +40,19 @@ StringDedup::Stat::Stat() : _skipped_dead(0), _skipped_incomplete(0), _skipped_shared(0), - _concurrent(0), + _active(0), _idle(0), _process(0), _resize_table(0), _cleanup_table(0), - _block(0), - _concurrent_start(), - _concurrent_elapsed(), + _active_start(), + _active_elapsed(), _phase_start(), _idle_elapsed(), _process_elapsed(), _resize_table_elapsed(), - _cleanup_table_elapsed(), - _block_elapsed() { -} + _cleanup_table_elapsed() +{} void StringDedup::Stat::add(const Stat* const stat) { _inspected += stat->_inspected; @@ -69,18 +67,16 @@ void StringDedup::Stat::add(const Stat* const stat) { _skipped_dead += stat->_skipped_dead; _skipped_incomplete += stat->_skipped_incomplete; _skipped_shared += stat->_skipped_shared; - _concurrent += stat->_concurrent; + _active += stat->_active; _idle += stat->_idle; _process += stat->_process; _resize_table += stat->_resize_table; _cleanup_table += stat->_cleanup_table; - _block += stat->_block; - _concurrent_elapsed += stat->_concurrent_elapsed; + _active_elapsed += stat->_active_elapsed; _idle_elapsed += stat->_idle_elapsed; _process_elapsed += stat->_process_elapsed; _resize_table_elapsed += stat->_resize_table_elapsed; _cleanup_table_elapsed += stat->_cleanup_table_elapsed; - _block_elapsed += stat->_block_elapsed; } // Support for log output formatting @@ -113,19 +109,19 @@ void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_sta last_stat->_deduped, STRDEDUP_BYTES_PARAM(last_stat->_deduped_bytes), total_deduped_bytes_percent, strdedup_elapsed_param_ms(last_stat->_process_elapsed), - strdedup_elapsed_param_ms(last_stat->_concurrent_elapsed)); + strdedup_elapsed_param_ms(last_stat->_active_elapsed)); } -void StringDedup::Stat::report_concurrent_start() { - log_debug(stringdedup, phases, start)("Concurrent start"); - _concurrent_start = Ticks::now(); - _concurrent++; +void StringDedup::Stat::report_active_start() { + log_debug(stringdedup, phases, start)("Active start"); + _active_start = Ticks::now(); + _active++; } -void StringDedup::Stat::report_concurrent_end() { - _concurrent_elapsed += (Ticks::now() - _concurrent_start); - log_debug(stringdedup, phases)("Concurrent end: " STRDEDUP_ELAPSED_FORMAT_MS, - strdedup_elapsed_param_ms(_concurrent_elapsed)); +void StringDedup::Stat::report_active_end() { + _active_elapsed += (Ticks::now() - _active_start); + log_debug(stringdedup, phases)("Active end: " STRDEDUP_ELAPSED_FORMAT_MS, + strdedup_elapsed_param_ms(_active_elapsed)); } void StringDedup::Stat::report_phase_start(const char* phase) { @@ -194,38 +190,13 @@ void StringDedup::Stat::report_cleanup_table_end() { report_phase_end("Cleanup Table", &_cleanup_table_elapsed); } -Tickspan* StringDedup::Stat::elapsed_for_phase(Phase phase) { - switch (phase) { - case Phase::process: return &_process_elapsed; - case Phase::resize_table: return &_resize_table_elapsed; - case Phase::cleanup_table: return &_cleanup_table_elapsed; - } - ShouldNotReachHere(); - return nullptr; -} - -void StringDedup::Stat::block_phase(Phase phase) { - Ticks now = Ticks::now(); - *elapsed_for_phase(phase) += now - _phase_start; - _phase_start = now; - _block++; -} - -void StringDedup::Stat::unblock_phase() { - Ticks now = Ticks::now(); - _block_elapsed += now - _phase_start; - _phase_start = now; -} - void StringDedup::Stat::log_times(const char* prefix) const { log_debug(stringdedup)( " %s Process: %zu/" STRDEDUP_ELAPSED_FORMAT_MS - ", Idle: %zu/" STRDEDUP_ELAPSED_FORMAT_MS - ", Blocked: %zu/" STRDEDUP_ELAPSED_FORMAT_MS, + ", Idle: %zu/" STRDEDUP_ELAPSED_FORMAT_MS, prefix, _process, strdedup_elapsed_param_ms(_process_elapsed), - _idle, strdedup_elapsed_param_ms(_idle_elapsed), - _block, strdedup_elapsed_param_ms(_block_elapsed)); + _idle, strdedup_elapsed_param_ms(_idle_elapsed)); if (_resize_table > 0) { log_debug(stringdedup)( " %s Resize Table: %zu/" STRDEDUP_ELAPSED_FORMAT_MS, diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp index 546578b96a1..db753af3be5 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,14 +34,6 @@ // Operation counters are updated when deduplicating a string. // Phase timing information is collected by the processing thread. class StringDedup::Stat { -public: - // Only phases that can be blocked, so excluding "idle". - enum class Phase { - process, - resize_table, - cleanup_table - }; - private: // Counters size_t _inspected; @@ -58,26 +50,25 @@ private: size_t _skipped_shared; // Phase counters for deduplication thread - size_t _concurrent; + size_t _active; size_t _idle; size_t _process; size_t _resize_table; size_t _cleanup_table; - size_t _block; // Time spent by the deduplication thread in different phases - Ticks _concurrent_start; - Tickspan _concurrent_elapsed; + Ticks _active_start; + Tickspan _active_elapsed; Ticks _phase_start; + // These phases are disjoint, so share _phase_start. + // Some of these overlap with active, hence need _active_start. Tickspan _idle_elapsed; Tickspan _process_elapsed; Tickspan _resize_table_elapsed; Tickspan _cleanup_table_elapsed; - Tickspan _block_elapsed; void report_phase_start(const char* phase); void report_phase_end(const char* phase, Tickspan* elapsed); - Tickspan* elapsed_for_phase(Phase phase); void log_times(const char* prefix) const; @@ -153,11 +144,8 @@ public: void report_cleanup_table_start(size_t entry_count, size_t dead_count); void report_cleanup_table_end(); - void report_concurrent_start(); - void report_concurrent_end(); - - void block_phase(Phase phase); - void unblock_phase(); + void report_active_start(); + void report_active_end(); void add(const Stat* const stat); void log_statistics(bool total) const; diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp index 28a0872fb29..ac8fc0759ca 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -283,7 +283,6 @@ public: virtual bool step() = 0; virtual TableValue find(typeArrayOop obj, uint hash_code) const = 0; virtual void report_end() const = 0; - virtual Stat::Phase phase() const = 0; virtual void verify() const = 0; }; @@ -321,10 +320,6 @@ public: _cur_stat.report_resize_table_end(); } - virtual Stat::Phase phase() const { - return Stat::Phase::resize_table; - } - virtual void verify() const; }; @@ -389,10 +384,6 @@ public: _cur_stat.report_cleanup_table_end(); } - virtual Stat::Phase phase() const { - return Stat::Phase::cleanup_table; - } - virtual void verify() const {} // Nothing to do here. }; @@ -718,11 +709,6 @@ void StringDedup::Table::cleanup_end() { Atomic::store(&_dead_state, DeadState::wait2); } -StringDedup::Stat::Phase StringDedup::Table::cleanup_phase() { - assert(_cleanup_state != nullptr, "precondition"); - return _cleanup_state->phase(); -} - void StringDedup::Table::verify() { size_t total_count = 0; for (size_t i = 0; i < _number_of_buckets; ++i) { diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp index ed2d379aedd..19df8184a05 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,10 +138,6 @@ public: // precondition: a cleanup is in progress. static void cleanup_end(); - // Return the phase kind for the cleanup being performed. - // precondition: a cleanup is in progress. - static Stat::Phase cleanup_phase(); - static void verify(); static void log_statistics(); }; diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.cpp new file mode 100644 index 00000000000..371b409abfd --- /dev/null +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "gc/shared/stringdedup/stringDedupProcessor.hpp" +#include "gc/shared/stringdedup/stringDedupThread.hpp" +#include "runtime/handles.hpp" +#include "runtime/os.hpp" +#include "utilities/exceptions.hpp" + +StringDedupThread::StringDedupThread() : JavaThread(thread_entry) {} + +void StringDedupThread::initialize() { + EXCEPTION_MARK; + + const char* name = "StringDedupThread"; + Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK); + StringDedupThread* thread = new StringDedupThread(); + JavaThread::vm_exit_on_osthread_failure(thread); + JavaThread::start_internal_daemon(THREAD, thread, thread_oop, NormPriority); +} + +void StringDedupThread::thread_entry(JavaThread* thread, TRAPS) { + StringDedup::_processor->run(thread); +} + +bool StringDedupThread::is_hidden_from_external_view() const { + return true; +} + diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.hpp new file mode 100644 index 00000000000..504e1fe617f --- /dev/null +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupThread.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHARED_STRINGDEDUP_STRINGDEDUPTHREAD_HPP +#define SHARE_GC_SHARED_STRINGDEDUP_STRINGDEDUPTHREAD_HPP + +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "runtime/javaThread.hpp" +#include "utilities/exceptions.hpp" +#include "utilities/macros.hpp" + +// Thread class for string deduplication. There is only one instance of this +// class. This class provides thread management. It uses the Processor +// to perform most of the work. +// +// Unlike most of the classes in the stringdedup implementation, this class is +// not an inner class of StringDedup. This is because we need a simple public +// identifier for use by VMStructs. +class StringDedupThread : public JavaThread { + friend class VMStructs; + + StringDedupThread(); + ~StringDedupThread() = default; + + NONCOPYABLE(StringDedupThread); + + static void thread_entry(JavaThread* thread, TRAPS); + +public: + static void initialize(); + + bool is_hidden_from_external_view() const override; +}; + +#endif // SHARE_GC_SHARED_STRINGDEDUP_STRINGDEDUPTHREAD_HPP diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp index ecd4045cbdd..2db0f466d34 100644 --- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp +++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp @@ -89,12 +89,9 @@ nonstatic_field(CardTable, _whole_heap, const MemRegion) \ nonstatic_field(CardTable, _page_size, const size_t) \ nonstatic_field(CardTable, _byte_map_size, const size_t) \ - nonstatic_field(CardTable, _byte_map, CardTable::CardValue*) \ - nonstatic_field(CardTable, _cur_covered_regions, int) \ - nonstatic_field(CardTable, _covered, MemRegion*) \ - nonstatic_field(CardTable, _committed, MemRegion*) \ + nonstatic_field(CardTable, _byte_map, CardTable::CardValue*) \ nonstatic_field(CardTable, _guard_region, MemRegion) \ - nonstatic_field(CardTable, _byte_map_base, CardTable::CardValue*) \ + nonstatic_field(CardTable, _byte_map_base, CardTable::CardValue*) \ nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \ nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \ \ diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index b213625c80e..bfa5d9d6a6a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -58,7 +58,6 @@ #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" -#include "gc/shenandoah/shenandoahStringDedup.hpp" #include "gc/shenandoah/shenandoahSTWMark.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" @@ -478,8 +477,8 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : _phase_timings(nullptr), _monitoring_support(nullptr), _memory_pool(nullptr), - _stw_memory_manager("Shenandoah Pauses", "end of GC pause"), - _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"), + _stw_memory_manager("Shenandoah Pauses"), + _cycle_memory_manager("Shenandoah Cycles"), _gc_timer(new ConcurrentGCTimer()), _soft_ref_policy(), _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes), @@ -1186,9 +1185,6 @@ void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { if (_safepoint_workers != nullptr) { _safepoint_workers->threads_do(tcl); } - if (ShenandoahStringDedup::is_enabled()) { - ShenandoahStringDedup::threads_do(tcl); - } } void ShenandoahHeap::print_tracing_info() const { @@ -2193,13 +2189,13 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { } void ShenandoahHeap::safepoint_synchronize_begin() { - if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { + if (ShenandoahSuspendibleWorkers) { SuspendibleThreadSet::synchronize(); } } void ShenandoahHeap::safepoint_synchronize_end() { - if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { + if (ShenandoahSuspendibleWorkers) { SuspendibleThreadSet::desynchronize(); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp index 0c5c7676e58..711d906ec7c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp @@ -52,6 +52,7 @@ ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) : _heap->shenandoah_policy()->record_cycle_start(); _heap->heuristics()->record_cycle_start(); _trace_cycle.initialize(_heap->cycle_memory_manager(), cause, + "end of GC cycle", /* allMemoryPoolsAffected */ true, /* recordGCBeginTime = */ true, /* recordPreGCUsage = */ true, @@ -73,9 +74,10 @@ ShenandoahGCSession::~ShenandoahGCSession() { _heap->set_gc_cause(GCCause::_no_gc); } -ShenandoahGCPauseMark::ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_type type) : +ShenandoahGCPauseMark::ShenandoahGCPauseMark(uint gc_id, const char* notification_message, SvcGCMarker::reason_type type) : _heap(ShenandoahHeap::heap()), _gc_id_mark(gc_id), _svc_gc_mark(type), _is_gc_active_mark() { _trace_pause.initialize(_heap->stw_memory_manager(), _heap->gc_cause(), + notification_message, /* allMemoryPoolsAffected */ true, /* recordGCBeginTime = */ true, /* recordPreGCUsage = */ false, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp index a521e46d886..af32a20013a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp @@ -136,7 +136,7 @@ private: TraceMemoryManagerStats _trace_pause; public: - ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_type type); + ShenandoahGCPauseMark(uint gc_id, const char* notification_action, SvcGCMarker::reason_type type); }; class ShenandoahSafepoint : public AllStatic { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp index 18bd4d6e6aa..d6be0920558 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp @@ -47,36 +47,36 @@ void VM_ShenandoahReferenceOperation::doit_epilogue() { } void VM_ShenandoahInitMark::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT); + ShenandoahGCPauseMark mark(_gc_id, "Init Mark", SvcGCMarker::CONCURRENT); _gc->entry_init_mark(); } void VM_ShenandoahFinalMarkStartEvac::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT); + ShenandoahGCPauseMark mark(_gc_id, "Final Mark", SvcGCMarker::CONCURRENT); _gc->entry_final_mark(); } void VM_ShenandoahFullGC::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::FULL); + ShenandoahGCPauseMark mark(_gc_id, "Full GC", SvcGCMarker::FULL); _full_gc->entry_full(_gc_cause); } void VM_ShenandoahDegeneratedGC::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT); + ShenandoahGCPauseMark mark(_gc_id, "Degenerated GC", SvcGCMarker::CONCURRENT); _gc->entry_degenerated(); } void VM_ShenandoahInitUpdateRefs::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT); + ShenandoahGCPauseMark mark(_gc_id, "Init Update Refs", SvcGCMarker::CONCURRENT); _gc->entry_init_updaterefs(); } void VM_ShenandoahFinalUpdateRefs::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT); + ShenandoahGCPauseMark mark(_gc_id, "Final Update Refs", SvcGCMarker::CONCURRENT); _gc->entry_final_updaterefs(); } void VM_ShenandoahFinalRoots::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT); + ShenandoahGCPauseMark mark(_gc_id, "Final Roots", SvcGCMarker::CONCURRENT); _gc->entry_final_roots(); } diff --git a/src/hotspot/share/gc/z/zServiceability.cpp b/src/hotspot/share/gc/z/zServiceability.cpp index 744ad7270e3..c708d0d0c47 100644 --- a/src/hotspot/share/gc/z/zServiceability.cpp +++ b/src/hotspot/share/gc/z/zServiceability.cpp @@ -110,9 +110,8 @@ MemoryUsage ZServiceabilityMemoryPool::get_memory_usage() { } ZServiceabilityMemoryManager::ZServiceabilityMemoryManager(const char* name, - const char* end_message, ZServiceabilityMemoryPool* pool) : - GCMemoryManager(name, end_message) { + GCMemoryManager(name) { add_pool(pool); } @@ -120,8 +119,8 @@ ZServiceability::ZServiceability(size_t min_capacity, size_t max_capacity) : _min_capacity(min_capacity), _max_capacity(max_capacity), _memory_pool(_min_capacity, _max_capacity), - _cycle_memory_manager("ZGC Cycles", "end of GC cycle", &_memory_pool), - _pause_memory_manager("ZGC Pauses", "end of GC pause", &_memory_pool), + _cycle_memory_manager("ZGC Cycles", &_memory_pool), + _pause_memory_manager("ZGC Pauses", &_memory_pool), _counters(NULL) {} void ZServiceability::initialize() { @@ -147,6 +146,7 @@ ZServiceabilityCounters* ZServiceability::counters() { ZServiceabilityCycleTracer::ZServiceabilityCycleTracer() : _memory_manager_stats(ZHeap::heap()->serviceability_cycle_memory_manager(), ZCollectedHeap::heap()->gc_cause(), + "end of GC cycle", true /* allMemoryPoolsAffected */, true /* recordGCBeginTime */, true /* recordPreGCUsage */, @@ -161,6 +161,7 @@ ZServiceabilityPauseTracer::ZServiceabilityPauseTracer() : _counters_stats(ZHeap::heap()->serviceability_counters()->collector_counters()), _memory_manager_stats(ZHeap::heap()->serviceability_pause_memory_manager(), ZCollectedHeap::heap()->gc_cause(), + "end of GC pause", true /* allMemoryPoolsAffected */, true /* recordGCBeginTime */, false /* recordPreGCUsage */, diff --git a/src/hotspot/share/gc/z/zServiceability.hpp b/src/hotspot/share/gc/z/zServiceability.hpp index 3947038c4ce..5d03184d244 100644 --- a/src/hotspot/share/gc/z/zServiceability.hpp +++ b/src/hotspot/share/gc/z/zServiceability.hpp @@ -44,7 +44,6 @@ public: class ZServiceabilityMemoryManager : public GCMemoryManager { public: ZServiceabilityMemoryManager(const char* name, - const char* end_message, ZServiceabilityMemoryPool* pool); }; diff --git a/src/hotspot/share/include/cds.h b/src/hotspot/share/include/cds.h index b9ce48dddaa..af48c1a8c35 100644 --- a/src/hotspot/share/include/cds.h +++ b/src/hotspot/share/include/cds.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,11 +35,11 @@ // // Also, this is a C header file. Do not use C++ here. -#define NUM_CDS_REGIONS 7 // this must be the same as MetaspaceShared::n_regions +#define NUM_CDS_REGIONS 4 // this must be the same as MetaspaceShared::n_regions #define CDS_ARCHIVE_MAGIC 0xf00baba2 #define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8 #define CDS_GENERIC_HEADER_SUPPORTED_MIN_VERSION 13 -#define CURRENT_CDS_ARCHIVE_VERSION 17 +#define CURRENT_CDS_ARCHIVE_VERSION 18 typedef struct CDSFileMapRegion { int _crc; // CRC checksum of this region. diff --git a/src/hotspot/share/include/jvm.h b/src/hotspot/share/include/jvm.h index 55e640cd8b0..01c8c317231 100644 --- a/src/hotspot/share/include/jvm.h +++ b/src/hotspot/share/include/jvm.h @@ -174,6 +174,9 @@ JVM_IsPreviewEnabled(void); JNIEXPORT jboolean JNICALL JVM_IsContinuationsSupported(void); +JNIEXPORT jboolean JNICALL +JVM_IsForeignLinkerSupported(void); + JNIEXPORT void JNICALL JVM_InitializeFromArchive(JNIEnv* env, jclass cls); @@ -273,7 +276,7 @@ JNIEXPORT void JNICALL JVM_Yield(JNIEnv *env, jclass threadClass); JNIEXPORT void JNICALL -JVM_Sleep(JNIEnv *env, jclass threadClass, jlong millis); +JVM_Sleep(JNIEnv *env, jclass threadClass, jlong nanos); JNIEXPORT jobject JNICALL JVM_CurrentCarrierThread(JNIEnv *env, jclass threadClass); @@ -1141,10 +1144,16 @@ JVM_GetEnclosingMethodInfo(JNIEnv* env, jclass ofClass); * Virtual thread support. */ JNIEXPORT void JNICALL -JVM_VirtualThreadMount(JNIEnv* env, jobject vthread, jboolean hide, jboolean first_mount); +JVM_VirtualThreadStart(JNIEnv* env, jobject vthread); JNIEXPORT void JNICALL -JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean hide, jboolean last_unmount); +JVM_VirtualThreadEnd(JNIEnv* env, jobject vthread); + +JNIEXPORT void JNICALL +JVM_VirtualThreadMount(JNIEnv* env, jobject vthread, jboolean hide); + +JNIEXPORT void JNICALL +JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean hide); JNIEXPORT void JNICALL JVM_VirtualThreadHideFrames(JNIEnv* env, jobject vthread, jboolean hide); diff --git a/src/hotspot/share/interpreter/bytecodeUtils.cpp b/src/hotspot/share/interpreter/bytecodeUtils.cpp index 975c7831273..89055fb7d3f 100644 --- a/src/hotspot/share/interpreter/bytecodeUtils.cpp +++ b/src/hotspot/share/interpreter/bytecodeUtils.cpp @@ -476,7 +476,7 @@ ExceptionMessageBuilder::ExceptionMessageBuilder(Method* method, int bci) : _stacks->at_put(0, new SimulatedOperandStack()); // And initialize the start of all exception handlers. - if (const_method->has_exception_handler()) { + if (const_method->has_exception_table()) { ExceptionTableElement *et = const_method->exception_table_start(); for (int i = 0; i < const_method->exception_table_length(); ++i) { u2 index = et[i].handler_pc; diff --git a/src/hotspot/share/interpreter/rewriter.cpp b/src/hotspot/share/interpreter/rewriter.cpp index 40734daff0b..c348a6a2335 100644 --- a/src/hotspot/share/interpreter/rewriter.cpp +++ b/src/hotspot/share/interpreter/rewriter.cpp @@ -473,7 +473,7 @@ void Rewriter::scan_method(Thread* thread, Method* method, bool reverse, bool* i } } - // Update access flags + // Update flags if (has_monitor_bytecodes) { method->set_has_monitor_bytecodes(); } @@ -482,8 +482,6 @@ void Rewriter::scan_method(Thread* thread, Method* method, bool reverse, bool* i // have to be rewritten, so we run the oopMapGenerator on the method if (nof_jsrs > 0) { method->set_has_jsrs(); - // Second pass will revisit this method. - assert(method->has_jsrs(), "didn't we just set this?"); } } diff --git a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp index 63901910c31..45954842223 100644 --- a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp +++ b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp @@ -664,8 +664,7 @@ bool JfrJavaSupport::is_jdk_jfr_module_available(outputStream* stream, TRAPS) { typedef JfrOopTraceId AccessThreadTraceId; -static JavaThread* get_native(jobject thread) { - ThreadsListHandle tlh; +static JavaThread* get_native(ThreadsListHandle& tlh, jobject thread) { JavaThread* native_thread = NULL; (void)tlh.cv_internal_thread_to_JavaThread(thread, &native_thread, NULL); return native_thread; @@ -704,7 +703,8 @@ void JfrJavaSupport::exclude(JavaThread* jt, oop ref, jobject thread) { return; } } - jt = get_native(thread); + ThreadsListHandle tlh; + jt = get_native(tlh, thread); if (jt != nullptr) { JfrThreadLocal::exclude_jvm_thread(jt); } @@ -720,7 +720,8 @@ void JfrJavaSupport::include(JavaThread* jt, oop ref, jobject thread) { return; } } - jt = get_native(thread); + ThreadsListHandle tlh; + jt = get_native(tlh, thread); if (jt != nullptr) { JfrThreadLocal::include_jvm_thread(jt); } diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index 5811cc3f778..d3fc7efe87d 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -926,8 +926,8 @@ C2V_END C2V_VMENTRY(void, setNotInlinableOrCompilable,(JNIEnv* env, jobject, ARGUMENT_PAIR(method))) methodHandle method(THREAD, UNPACK_PAIR(Method, method)); - method->set_not_c1_compilable(); - method->set_not_c2_compilable(); + method->set_is_not_c1_compilable(); + method->set_is_not_c2_compilable(); method->set_dont_inline(true); C2V_END @@ -2050,14 +2050,14 @@ static jobject read_field_value(Handle obj, long displacement, jchar type_char, // folding Unsafe.get* methods with volatile semantics. switch (basic_type) { - case T_BOOLEAN: value = obj->bool_field_acquire(displacement); break; - case T_BYTE: value = obj->byte_field_acquire(displacement); break; - case T_SHORT: value = obj->short_field_acquire(displacement); break; - case T_CHAR: value = obj->char_field_acquire(displacement); break; + case T_BOOLEAN: value = HeapAccess::load(obj->field_addr(displacement)); break; + case T_BYTE: value = HeapAccess::load(obj->field_addr(displacement)); break; + case T_SHORT: value = HeapAccess::load(obj->field_addr(displacement)); break; + case T_CHAR: value = HeapAccess::load(obj->field_addr(displacement)); break; case T_FLOAT: - case T_INT: value = obj->int_field_acquire(displacement); break; + case T_INT: value = HeapAccess::load(obj->field_addr(displacement)); break; case T_DOUBLE: - case T_LONG: value = obj->long_field_acquire(displacement); break; + case T_LONG: value = HeapAccess::load(obj->field_addr(displacement)); break; case T_OBJECT: { if (displacement == java_lang_Class::component_mirror_offset() && java_lang_Class::is_instance(obj()) && @@ -2067,7 +2067,9 @@ static jobject read_field_value(Handle obj, long displacement, jchar type_char, return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_NULL_POINTER()); } - oop value = obj->obj_field_acquire(displacement); + // Perform the read including any barriers required to make the reference strongly reachable + // since it will be wrapped as a JavaConstant. + oop value = obj->obj_field_access(displacement); if (value == nullptr) { return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_NULL_POINTER()); @@ -2670,9 +2672,7 @@ C2V_VMENTRY_NULL(jobject, asReflectionExecutable, (JNIEnv* env, jobject, ARGUMEN return JNIHandles::make_local(THREAD, executable); } -C2V_VMENTRY_NULL(jobject, asReflectionField, (JNIEnv* env, jobject, ARGUMENT_PAIR(klass), jint index)) - requireInHotSpot("asReflectionField", JVMCI_CHECK_NULL); - Klass* klass = UNPACK_PAIR(Klass, klass); +static InstanceKlass* check_field(Klass* klass, jint index, JVMCI_TRAPS) { if (!klass->is_instance_klass()) { JVMCI_THROW_MSG_NULL(IllegalArgumentException, err_msg("Expected non-primitive type, got %s", klass->external_name())); @@ -2682,11 +2682,100 @@ C2V_VMENTRY_NULL(jobject, asReflectionField, (JNIEnv* env, jobject, ARGUMENT_PAI JVMCI_THROW_MSG_NULL(IllegalArgumentException, err_msg("Field index %d out of bounds for %s", index, klass->external_name())); } + return iklass; +} + +C2V_VMENTRY_NULL(jobject, asReflectionField, (JNIEnv* env, jobject, ARGUMENT_PAIR(klass), jint index)) + requireInHotSpot("asReflectionField", JVMCI_CHECK_NULL); + Klass* klass = UNPACK_PAIR(Klass, klass); + InstanceKlass* iklass = check_field(klass, index, JVMCIENV); fieldDescriptor fd(iklass, index); oop reflected = Reflection::new_field(&fd, CHECK_NULL); return JNIHandles::make_local(THREAD, reflected); } +static jbyteArray get_encoded_annotation_data(InstanceKlass* holder, AnnotationArray* annotations_array, bool for_class, + jint filter_length, jlong filter_klass_pointers, + JavaThread* THREAD, JVMCIEnv* JVMCIENV) { + // Get a ConstantPool object for annotation parsing + Handle jcp = reflect_ConstantPool::create(CHECK_NULL); + reflect_ConstantPool::set_cp(jcp(), holder->constants()); + + // load VMSupport + Symbol* klass = vmSymbols::jdk_internal_vm_VMSupport(); + Klass* k = SystemDictionary::resolve_or_fail(klass, true, CHECK_NULL); + + InstanceKlass* vm_support = InstanceKlass::cast(k); + if (vm_support->should_be_initialized()) { + vm_support->initialize(CHECK_NULL); + } + + typeArrayOop annotations_oop = Annotations::make_java_array(annotations_array, CHECK_NULL); + typeArrayHandle annotations = typeArrayHandle(THREAD, annotations_oop); + + InstanceKlass** filter = filter_length == 1 ? + (InstanceKlass**) &filter_klass_pointers: + (InstanceKlass**) filter_klass_pointers; + objArrayOop filter_oop = oopFactory::new_objArray(vmClasses::Class_klass(), filter_length, CHECK_NULL); + objArrayHandle filter_classes(THREAD, filter_oop); + for (int i = 0; i < filter_length; i++) { + filter_classes->obj_at_put(i, filter[i]->java_mirror()); + } + + // invoke VMSupport.encodeAnnotations + JavaValue result(T_OBJECT); + JavaCallArguments args; + args.push_oop(annotations); + args.push_oop(Handle(THREAD, holder->java_mirror())); + args.push_oop(jcp); + args.push_int(for_class); + args.push_oop(filter_classes); + Symbol* signature = vmSymbols::encodeAnnotations_signature(); + JavaCalls::call_static(&result, + vm_support, + vmSymbols::encodeAnnotations_name(), + signature, + &args, + CHECK_NULL); + + oop res = result.get_oop(); + if (JVMCIENV->is_hotspot()) { + return (jbyteArray) JNIHandles::make_local(THREAD, res); + } + + typeArrayOop ba = typeArrayOop(res); + int ba_len = ba->length(); + jbyte* ba_buf = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, jbyte, ba_len); + if (ba_buf == nullptr) { + JVMCI_THROW_MSG_NULL(InternalError, + err_msg("could not allocate %d bytes", ba_len)); + + } + memcpy(ba_buf, ba->byte_at_addr(0), ba_len); + JVMCIPrimitiveArray ba_dest = JVMCIENV->new_byteArray(ba_len, JVMCI_CHECK_NULL); + JVMCIENV->copy_bytes_from(ba_buf, ba_dest, 0, ba_len); + return JVMCIENV->get_jbyteArray(ba_dest); +} + +C2V_VMENTRY_NULL(jbyteArray, getEncodedClassAnnotationData, (JNIEnv* env, jobject, ARGUMENT_PAIR(klass), + jobject filter, jint filter_length, jlong filter_klass_pointers)) + InstanceKlass* holder = InstanceKlass::cast(UNPACK_PAIR(Klass, klass)); + return get_encoded_annotation_data(holder, holder->class_annotations(), true, filter_length, filter_klass_pointers, THREAD, JVMCIENV); +C2V_END + +C2V_VMENTRY_NULL(jbyteArray, getEncodedExecutableAnnotationData, (JNIEnv* env, jobject, ARGUMENT_PAIR(method), + jobject filter, jint filter_length, jlong filter_klass_pointers)) + methodHandle method(THREAD, UNPACK_PAIR(Method, method)); + return get_encoded_annotation_data(method->method_holder(), method->annotations(), false, filter_length, filter_klass_pointers, THREAD, JVMCIENV); +C2V_END + +C2V_VMENTRY_NULL(jbyteArray, getEncodedFieldAnnotationData, (JNIEnv* env, jobject, ARGUMENT_PAIR(klass), jint index, + jobject filter, jint filter_length, jlong filter_klass_pointers)) + InstanceKlass* holder = check_field(InstanceKlass::cast(UNPACK_PAIR(Klass, klass)), index, JVMCIENV); + fieldDescriptor fd(holder, index); + return get_encoded_annotation_data(holder, fd.annotations(), false, filter_length, filter_klass_pointers, THREAD, JVMCIENV); +C2V_END + C2V_VMENTRY_NULL(jobjectArray, getFailedSpeculations, (JNIEnv* env, jobject, jlong failed_speculations_address, jobjectArray current)) FailedSpeculation* head = *((FailedSpeculation**)(address) failed_speculations_address); int result_length = 0; @@ -2967,6 +3056,9 @@ JNINativeMethod CompilerToVM::methods[] = { {CC "getCode", CC "(" HS_INSTALLED_CODE ")[B", FN_PTR(getCode)}, {CC "asReflectionExecutable", CC "(" HS_METHOD2 ")" REFLECTION_EXECUTABLE, FN_PTR(asReflectionExecutable)}, {CC "asReflectionField", CC "(" HS_KLASS2 "I)" REFLECTION_FIELD, FN_PTR(asReflectionField)}, + {CC "getEncodedClassAnnotationData", CC "(" HS_KLASS2 OBJECT "IJ)[B", FN_PTR(getEncodedClassAnnotationData)}, + {CC "getEncodedExecutableAnnotationData", CC "(" HS_METHOD2 OBJECT "IJ)[B", FN_PTR(getEncodedExecutableAnnotationData)}, + {CC "getEncodedFieldAnnotationData", CC "(" HS_KLASS2 "I" OBJECT "IJ)[B", FN_PTR(getEncodedFieldAnnotationData)}, {CC "getFailedSpeculations", CC "(J[[B)[[B", FN_PTR(getFailedSpeculations)}, {CC "getFailedSpeculationsAddress", CC "(" HS_METHOD2 ")J", FN_PTR(getFailedSpeculationsAddress)}, {CC "releaseFailedSpeculations", CC "(J)V", FN_PTR(releaseFailedSpeculations)}, diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index f93fdddf806..eb1fe403575 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -939,7 +939,9 @@ int JVMCIRuntime::release_cleared_oop_handles() { // Example: to_release: 2 // Bulk release the handles with a null referent - object_handles()->release(_oop_handles.adr_at(num_alive), to_release); + if (to_release != 0) { + object_handles()->release(_oop_handles.adr_at(num_alive), to_release); + } // Truncate oop handles to only those with a non-null referent JVMCI_event_1("compacted oop handles in JVMCI runtime %d from %d to %d", _id, _oop_handles.length(), num_alive); diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 15276d44d65..78b521eafc0 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -104,6 +104,7 @@ \ static_field(Abstract_VM_Version, _features, uint64_t) \ \ + nonstatic_field(Annotations, _class_annotations, AnnotationArray*) \ nonstatic_field(Annotations, _fields_annotations, Array*) \ \ nonstatic_field(Array, _length, int) \ @@ -128,7 +129,7 @@ nonstatic_field(ConstantPool, _source_file_name_index, u2) \ \ nonstatic_field(ConstMethod, _constants, ConstantPool*) \ - nonstatic_field(ConstMethod, _flags, u2) \ + nonstatic_field(ConstMethod, _flags._flags, u4) \ nonstatic_field(ConstMethod, _code_size, u2) \ nonstatic_field(ConstMethod, _name_index, u2) \ nonstatic_field(ConstMethod, _signature_index, u2) \ @@ -227,7 +228,7 @@ nonstatic_field(Method, _access_flags, AccessFlags) \ nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _intrinsic_id, u2) \ - nonstatic_field(Method, _flags, u2) \ + nonstatic_field(Method, _flags._status, u4) \ volatile_nonstatic_field(Method, _code, CompiledMethod*) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \ \ @@ -415,8 +416,6 @@ declare_constant(JVMCINMethodData::SPECULATION_LENGTH_BITS) \ \ declare_constant(JVM_ACC_WRITTEN_FLAGS) \ - declare_constant(JVM_ACC_MONITOR_MATCH) \ - declare_constant(JVM_ACC_HAS_MONITOR_BYTECODES) \ declare_constant(JVM_ACC_HAS_FINALIZER) \ declare_constant(JVM_ACC_IS_CLONEABLE_FAST) \ declare_constant(JVM_ACC_IS_HIDDEN_CLASS) \ @@ -581,11 +580,16 @@ declare_constant(ConstantPool::CPCACHE_INDEX_TAG) \ declare_constant(ConstantPool::_has_dynamic_constant) \ \ - declare_constant(ConstMethod::_has_linenumber_table) \ - declare_constant(ConstMethod::_has_localvariable_table) \ - declare_constant(ConstMethod::_has_exception_table) \ - declare_constant(ConstMethod::_has_method_annotations) \ - declare_constant(ConstMethod::_has_parameter_annotations) \ + declare_constant(ConstMethodFlags::_misc_has_linenumber_table) \ + declare_constant(ConstMethodFlags::_misc_has_localvariable_table) \ + declare_constant(ConstMethodFlags::_misc_has_exception_table) \ + declare_constant(ConstMethodFlags::_misc_has_method_annotations) \ + declare_constant(ConstMethodFlags::_misc_has_parameter_annotations) \ + declare_constant(ConstMethodFlags::_misc_caller_sensitive) \ + declare_constant(ConstMethodFlags::_misc_is_hidden) \ + declare_constant(ConstMethodFlags::_misc_intrinsic_candidate) \ + declare_constant(ConstMethodFlags::_misc_reserved_stack_access) \ + declare_constant(ConstMethodFlags::_misc_changes_current_thread) \ \ declare_constant(CounterData::count_off) \ \ @@ -682,13 +686,8 @@ \ declare_constant(markWord::no_hash) \ \ - declare_constant(Method::_caller_sensitive) \ - declare_constant(Method::_force_inline) \ - declare_constant(Method::_dont_inline) \ - declare_constant(Method::_hidden) \ - declare_constant(Method::_intrinsic_candidate) \ - declare_constant(Method::_reserved_stack_access) \ - declare_constant(Method::_changes_current_thread) \ + declare_constant(MethodFlags::_misc_force_inline) \ + declare_constant(MethodFlags::_misc_dont_inline) \ \ declare_constant(Method::nonvirtual_vtable_index) \ declare_constant(Method::invalid_vtable_index) \ diff --git a/src/hotspot/share/memory/allocation.cpp b/src/hotspot/share/memory/allocation.cpp index 55d5eebfabc..64b00a13694 100644 --- a/src/hotspot/share/memory/allocation.cpp +++ b/src/hotspot/share/memory/allocation.cpp @@ -71,11 +71,6 @@ void FreeHeap(void* p) { void* MetaspaceObj::_shared_metaspace_base = nullptr; void* MetaspaceObj::_shared_metaspace_top = nullptr; -void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } -void StackObj::operator delete(void* p) { ShouldNotCallThis(); } -void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } -void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } - void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, TRAPS) throw() { diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp index e694a2ac03f..bd3bd27f01d 100644 --- a/src/hotspot/share/memory/allocation.hpp +++ b/src/hotspot/share/memory/allocation.hpp @@ -179,13 +179,13 @@ void FreeHeap(void* p); class CHeapObjBase { public: - ALWAYSINLINE void* operator new(size_t size, MEMFLAGS f) throw() { + ALWAYSINLINE void* operator new(size_t size, MEMFLAGS f) { return AllocateHeap(size, f); } ALWAYSINLINE void* operator new(size_t size, MEMFLAGS f, - const NativeCallStack& stack) throw() { + const NativeCallStack& stack) { return AllocateHeap(size, f, stack); } @@ -202,13 +202,13 @@ class CHeapObjBase { return AllocateHeap(size, f, AllocFailStrategy::RETURN_NULL); } - ALWAYSINLINE void* operator new[](size_t size, MEMFLAGS f) throw() { + ALWAYSINLINE void* operator new[](size_t size, MEMFLAGS f) { return AllocateHeap(size, f); } ALWAYSINLINE void* operator new[](size_t size, MEMFLAGS f, - const NativeCallStack& stack) throw() { + const NativeCallStack& stack) { return AllocateHeap(size, f, stack); } @@ -233,12 +233,12 @@ class CHeapObjBase { template class CHeapObj { public: - ALWAYSINLINE void* operator new(size_t size) throw() { + ALWAYSINLINE void* operator new(size_t size) { return CHeapObjBase::operator new(size, F); } ALWAYSINLINE void* operator new(size_t size, - const NativeCallStack& stack) throw() { + const NativeCallStack& stack) { return CHeapObjBase::operator new(size, F, stack); } @@ -251,12 +251,12 @@ class CHeapObj { return CHeapObjBase::operator new(size, F, nt); } - ALWAYSINLINE void* operator new[](size_t size) throw() { + ALWAYSINLINE void* operator new[](size_t size) { return CHeapObjBase::operator new[](size, F); } ALWAYSINLINE void* operator new[](size_t size, - const NativeCallStack& stack) throw() { + const NativeCallStack& stack) { return CHeapObjBase::operator new[](size, F, stack); } @@ -282,11 +282,11 @@ class CHeapObj { // Calling new or delete will result in fatal error. class StackObj { - private: - void* operator new(size_t size) throw(); - void* operator new [](size_t size) throw(); - void operator delete(void* p); - void operator delete [](void* p); + public: + void* operator new(size_t size) = delete; + void* operator new [](size_t size) = delete; + void operator delete(void* p) = delete; + void operator delete [](void* p) = delete; }; // Base class for objects stored in Metaspace. @@ -432,7 +432,7 @@ extern void resource_free_bytes( Thread* thread, char *old, size_t size ); // Base class for objects allocated in the resource area. class ResourceObj { public: - void* operator new(size_t size) throw() { + void* operator new(size_t size) { return resource_allocate_bytes(size); } @@ -500,11 +500,11 @@ protected: void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() = delete; // Arena allocations - void* operator new(size_t size, Arena *arena) throw(); - void* operator new [](size_t size, Arena *arena) throw() = delete; + void* operator new(size_t size, Arena *arena); + void* operator new [](size_t size, Arena *arena) = delete; // Resource allocations - void* operator new(size_t size) throw() { + void* operator new(size_t size) { address res = (address)resource_allocate_bytes(size); DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) return res; @@ -515,8 +515,8 @@ protected: return res; } - void* operator new [](size_t size) throw() = delete; - void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() = delete; + void* operator new [](size_t size) = delete; + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) = delete; void operator delete(void* p); void operator delete [](void* p) = delete; diff --git a/src/hotspot/share/memory/iterator.hpp b/src/hotspot/share/memory/iterator.hpp index eee59334757..5d5c494ef11 100644 --- a/src/hotspot/share/memory/iterator.hpp +++ b/src/hotspot/share/memory/iterator.hpp @@ -129,11 +129,12 @@ public: virtual void oops_do(OopClosure* cl) = 0; }; +enum class derived_base : intptr_t; enum class derived_pointer : intptr_t; class DerivedOopClosure : public Closure { public: enum { SkipNull = true }; - virtual void do_derived_oop(oop* base, derived_pointer* derived) = 0; + virtual void do_derived_oop(derived_base* base, derived_pointer* derived) = 0; }; class KlassClosure : public Closure { diff --git a/src/hotspot/share/memory/metaspace/counters.hpp b/src/hotspot/share/memory/metaspace/counters.hpp index 33c19d1186a..c7c841df423 100644 --- a/src/hotspot/share/memory/metaspace/counters.hpp +++ b/src/hotspot/share/memory/metaspace/counters.hpp @@ -93,22 +93,22 @@ public: AbstractAtomicCounter() : _c(0) {} - T get() const { return _c; } + T get() const { return Atomic::load(&_c); } void increment() { - Atomic::inc(&_c); + Atomic::inc(&_c, memory_order_relaxed); } void decrement() { - Atomic::dec(&_c); + Atomic::dec(&_c, memory_order_relaxed); } void increment_by(T v) { - Atomic::add(&_c, v); + Atomic::add(&_c, v, memory_order_relaxed); } void decrement_by(T v) { - Atomic::sub(&_c, v); + Atomic::sub(&_c, v, memory_order_relaxed); } #ifdef ASSERT diff --git a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp index b87554fe94a..a417ed00328 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp @@ -116,7 +116,6 @@ static void print_settings(outputStream* out, size_t scale) { print_human_readable_size(out, MetaspaceGC::capacity_until_GC(), scale); out->cr(); out->print_cr("CDS: %s", (UseSharedSpaces ? "on" : (DumpSharedSpaces ? "dump" : "off"))); - out->print_cr("MetaspaceReclaimPolicy: %s", MetaspaceReclaimPolicy); Settings::print_on(out); } diff --git a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp index 0a3fb61d69c..7dc742755b6 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp @@ -36,29 +36,15 @@ namespace metaspace { -size_t Settings::_commit_granule_bytes = 0; -size_t Settings::_commit_granule_words = 0; - DEBUG_ONLY(bool Settings::_use_allocation_guard = false;) void Settings::ergo_initialize() { - if (strcmp(MetaspaceReclaimPolicy, "aggressive") == 0) { - log_info(metaspace)("Initialized with strategy: aggressive reclaim."); - // Set the granule size rather small; may increase - // mapping fragmentation but also increase chance to uncommit. - _commit_granule_bytes = MAX2(os::vm_page_size(), 16 * K); - _commit_granule_words = _commit_granule_bytes / BytesPerWord; - } else if (strcmp(MetaspaceReclaimPolicy, "balanced") == 0) { - log_info(metaspace)("Initialized with strategy: balanced reclaim."); - _commit_granule_bytes = MAX2(os::vm_page_size(), 64 * K); - _commit_granule_words = _commit_granule_bytes / BytesPerWord; - } else { - vm_exit_during_initialization("Invalid value for MetaspaceReclaimPolicy: \"%s\".", MetaspaceReclaimPolicy); - } - // Sanity checks. + // Granules must be a multiple of page size, and a power-2-value. + assert(_commit_granule_bytes >= os::vm_page_size() && + is_aligned(_commit_granule_bytes, os::vm_page_size()), + "Granule size must be a page-size-aligned power-of-2 value"); assert(commit_granule_words() <= chunklevel::MAX_CHUNK_WORD_SIZE, "Too large granule size"); - assert(is_power_of_2(commit_granule_words()), "granule size must be a power of 2"); // Off for release builds, off by default - but switchable - for debug builds. DEBUG_ONLY(_use_allocation_guard = MetaspaceGuardAllocations;) diff --git a/src/hotspot/share/memory/metaspace/metaspaceSettings.hpp b/src/hotspot/share/memory/metaspace/metaspaceSettings.hpp index 159ba69d194..93be0eb6fe6 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceSettings.hpp +++ b/src/hotspot/share/memory/metaspace/metaspaceSettings.hpp @@ -35,10 +35,10 @@ namespace metaspace { class Settings : public AllStatic { // Granularity, in bytes, metaspace is committed with. - static size_t _commit_granule_bytes; + static constexpr size_t _commit_granule_bytes = 64 * K; // Granularity, in words, metaspace is committed with. - static size_t _commit_granule_words; + static constexpr size_t _commit_granule_words = _commit_granule_bytes / BytesPerWord; // The default size of a VirtualSpaceNode, unless created with an explicitly specified size. // Must be a multiple of the root chunk size. diff --git a/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp b/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp index 8c067d215db..a25c4c68f10 100644 --- a/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp +++ b/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp @@ -79,12 +79,19 @@ void MetaspaceCriticalAllocation::add(MetadataAllocationRequest* request) { MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); log_info(metaspace)("Requesting critical metaspace allocation; almost out of memory"); Atomic::store(&_has_critical_allocation, true); + // This is called by the request constructor to insert the request into the + // global list. The request's destructor will remove the request from the + // list. gcc13 has a false positive warning about the local request being + // added to the global list because it doesn't relate those operations. + PRAGMA_DIAG_PUSH + PRAGMA_DANGLING_POINTER_IGNORED if (_requests_head == nullptr) { _requests_head = _requests_tail = request; } else { _requests_tail->set_next(request); _requests_tail = request; } + PRAGMA_DIAG_POP } void MetaspaceCriticalAllocation::unlink(MetadataAllocationRequest* curr, MetadataAllocationRequest* prev) { diff --git a/src/hotspot/share/oops/constMethod.cpp b/src/hotspot/share/oops/constMethod.cpp index 456cc110b50..d685932ef7c 100644 --- a/src/hotspot/share/oops/constMethod.cpp +++ b/src/hotspot/share/oops/constMethod.cpp @@ -199,7 +199,7 @@ u2* ConstMethod::checked_exceptions_length_addr() const { } u2* ConstMethod::exception_table_length_addr() const { - assert(has_exception_handler(), "called only if table is present"); + assert(has_exception_table(), "called only if table is present"); if (has_checked_exceptions()) { // If checked_exception present, locate immediately before them. return (u2*) checked_exceptions_start() - 1; @@ -217,7 +217,7 @@ u2* ConstMethod::exception_table_length_addr() const { u2* ConstMethod::localvariable_table_length_addr() const { assert(has_localvariable_table(), "called only if table is present"); - if (has_exception_handler()) { + if (has_exception_table()) { // If exception_table present, locate immediately before them. return (u2*) exception_table_start() - 1; } else { @@ -239,30 +239,29 @@ u2* ConstMethod::localvariable_table_length_addr() const { // Update the flags to indicate the presence of these optional fields. void ConstMethod::set_inlined_tables_length(InlineTableSizes* sizes) { - _flags = 0; if (sizes->compressed_linenumber_size() > 0) - _flags |= _has_linenumber_table; + set_has_linenumber_table(); if (sizes->generic_signature_index() != 0) - _flags |= _has_generic_signature; + set_has_generic_signature(); if (sizes->method_parameters_length() >= 0) - _flags |= _has_method_parameters; + set_has_method_parameters(); if (sizes->checked_exceptions_length() > 0) - _flags |= _has_checked_exceptions; + set_has_checked_exceptions(); if (sizes->exception_table_length() > 0) - _flags |= _has_exception_table; + set_has_exception_table(); if (sizes->localvariable_table_length() > 0) - _flags |= _has_localvariable_table; + set_has_localvariable_table(); // annotations, they are all pointer sized embedded objects so don't have // a length embedded also. if (sizes->method_annotations_length() > 0) - _flags |= _has_method_annotations; + set_has_method_annotations(); if (sizes->parameter_annotations_length() > 0) - _flags |= _has_parameter_annotations; + set_has_parameter_annotations(); if (sizes->type_annotations_length() > 0) - _flags |= _has_type_annotations; + set_has_type_annotations(); if (sizes->default_annotations_length() > 0) - _flags |= _has_default_annotations; + set_has_default_annotations(); // This code is extremely brittle and should possibly be revised. // The *_length_addr functions walk backwards through the @@ -329,7 +328,7 @@ LocalVariableTableElement* ConstMethod::localvariable_table_start() const { } int ConstMethod::exception_table_length() const { - return has_exception_handler() ? *(exception_table_length_addr()) : 0; + return has_exception_table() ? *(exception_table_length_addr()) : 0; } ExceptionTableElement* ConstMethod::exception_table_start() const { @@ -431,13 +430,14 @@ void ConstMethod::print_on(outputStream* st) const { ResourceMark rm; st->print_cr("%s", internal_name()); Method* m = method(); - st->print(" - method: " PTR_FORMAT " ", p2i(m)); + st->print(" - method: " PTR_FORMAT " ", p2i(m)); if (m != nullptr) { m->print_value_on(st); } st->cr(); + st->print(" - flags: 0x%x ", _flags.as_int()); _flags.print_on(st); st->cr(); if (has_stackmap_table()) { - st->print(" - stackmap data: "); + st->print(" - stackmap data: "); stackmap_data()->print_value_on(st); st->cr(); } @@ -484,7 +484,7 @@ void ConstMethod::verify_on(outputStream* st) { u2* addr = checked_exceptions_length_addr(); guarantee(*addr > 0 && (address) addr >= compressed_table_end && (address) addr < m_end, "invalid method layout"); } - if (has_exception_handler()) { + if (has_exception_table()) { u2* addr = exception_table_length_addr(); guarantee(*addr > 0 && (address) addr >= compressed_table_end && (address) addr < m_end, "invalid method layout"); } @@ -496,7 +496,7 @@ void ConstMethod::verify_on(outputStream* st) { u2* uncompressed_table_start; if (has_localvariable_table()) { uncompressed_table_start = (u2*) localvariable_table_start(); - } else if (has_exception_handler()) { + } else if (has_exception_table()) { uncompressed_table_start = (u2*) exception_table_start(); } else if (has_checked_exceptions()) { uncompressed_table_start = (u2*) checked_exceptions_start(); diff --git a/src/hotspot/share/oops/constMethod.hpp b/src/hotspot/share/oops/constMethod.hpp index cfe9b518aa2..7f2fb53e61a 100644 --- a/src/hotspot/share/oops/constMethod.hpp +++ b/src/hotspot/share/oops/constMethod.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_OOPS_CONSTMETHOD_HPP #define SHARE_OOPS_CONSTMETHOD_HPP +#include "oops/constMethodFlags.hpp" #include "oops/oop.hpp" #include "utilities/align.hpp" @@ -173,19 +174,6 @@ public: typedef enum { NORMAL, OVERPASS } MethodType; private: - enum { - _has_linenumber_table = 0x0001, - _has_checked_exceptions = 0x0002, - _has_localvariable_table = 0x0004, - _has_exception_table = 0x0008, - _has_generic_signature = 0x0010, - _has_method_parameters = 0x0020, - _is_overpass = 0x0040, - _has_method_annotations = 0x0080, - _has_parameter_annotations = 0x0100, - _has_type_annotations = 0x0200, - _has_default_annotations = 0x0400 - }; // Bit vector of signature // Callers interpret 0=not initialized yet and @@ -204,7 +192,7 @@ private: Array* _stackmap_data; int _constMethod_size; - u2 _flags; + ConstMethodFlags _flags; // for sizing u1 _result_type; // BasicType of result // Size of Java bytecodes allocated immediately after Method*. @@ -236,33 +224,20 @@ public: // Inlined tables void set_inlined_tables_length(InlineTableSizes* sizes); - bool has_generic_signature() const - { return (_flags & _has_generic_signature) != 0; } - - bool has_linenumber_table() const - { return (_flags & _has_linenumber_table) != 0; } - - bool has_checked_exceptions() const - { return (_flags & _has_checked_exceptions) != 0; } - - bool has_localvariable_table() const - { return (_flags & _has_localvariable_table) != 0; } - - bool has_exception_handler() const - { return (_flags & _has_exception_table) != 0; } - - bool has_method_parameters() const - { return (_flags & _has_method_parameters) != 0; } + // Create getters and setters for the flag values. +#define CM_FLAGS_GET_SET(name, ignore) \ + bool name() const { return _flags.name(); } \ + void set_##name() { _flags.set_##name(); } + CM_FLAGS_DO(CM_FLAGS_GET_SET) +#undef CM_FLAGS_GET_SET MethodType method_type() const { - return ((_flags & _is_overpass) == 0) ? NORMAL : OVERPASS; + return (_flags.is_overpass()) ? OVERPASS : NORMAL; } void set_method_type(MethodType mt) { - if (mt == NORMAL) { - _flags &= ~(_is_overpass); - } else { - _flags |= _is_overpass; + if (mt != NORMAL) { + set_is_overpass(); } } @@ -382,20 +357,6 @@ public: int method_parameters_length() const; MethodParametersElement* method_parameters_start() const; - // method annotations - bool has_method_annotations() const - { return (_flags & _has_method_annotations) != 0; } - - bool has_parameter_annotations() const - { return (_flags & _has_parameter_annotations) != 0; } - - bool has_type_annotations() const - { return (_flags & _has_type_annotations) != 0; } - - bool has_default_annotations() const - { return (_flags & _has_default_annotations) != 0; } - - AnnotationArray** method_annotations_addr() const; AnnotationArray* method_annotations() const { return has_method_annotations() ? *(method_annotations_addr()) : nullptr; diff --git a/src/hotspot/share/oops/constMethodFlags.cpp b/src/hotspot/share/oops/constMethodFlags.cpp new file mode 100644 index 00000000000..3664cb12e01 --- /dev/null +++ b/src/hotspot/share/oops/constMethodFlags.cpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "oops/constMethodFlags.hpp" +#include "runtime/atomic.hpp" +#include "utilities/ostream.hpp" + +void ConstMethodFlags::print_on(outputStream* st) const { +#define CM_PRINT(name, ignore) \ + if (name()) st->print(" " #name " "); + CM_FLAGS_DO(CM_PRINT) +#undef CM_PRINT +} diff --git a/src/hotspot/share/oops/constMethodFlags.hpp b/src/hotspot/share/oops/constMethodFlags.hpp new file mode 100644 index 00000000000..494cb6ad9a1 --- /dev/null +++ b/src/hotspot/share/oops/constMethodFlags.hpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_CONSTMETHODFLAGS_HPP +#define SHARE_OOPS_CONSTMETHODFLAGS_HPP + +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +class outputStream; + +// The ConstMethodFlags class contains the parse-time flags associated with +// a Method, and its associated accessors. +// These flags are JVM internal and not part of the AccessFlags classfile specification. + +class ConstMethodFlags { + friend class VMStructs; + friend class JVMCIVMStructs; + +#define CM_FLAGS_DO(flag) \ + flag(has_linenumber_table , 1 << 0) \ + flag(has_checked_exceptions , 1 << 1) \ + flag(has_localvariable_table , 1 << 2) \ + flag(has_exception_table , 1 << 3) \ + flag(has_generic_signature , 1 << 4) \ + flag(has_method_parameters , 1 << 5) \ + flag(is_overpass , 1 << 6) \ + flag(has_method_annotations , 1 << 7) \ + flag(has_parameter_annotations , 1 << 8) \ + flag(has_type_annotations , 1 << 9) \ + flag(has_default_annotations , 1 << 10) \ + flag(caller_sensitive , 1 << 11) \ + flag(is_hidden , 1 << 12) \ + flag(has_injected_profile , 1 << 13) \ + flag(intrinsic_candidate , 1 << 14) \ + flag(reserved_stack_access , 1 << 15) \ + flag(is_scoped , 1 << 16) \ + flag(changes_current_thread , 1 << 17) \ + flag(jvmti_mount_transition , 1 << 18) \ + /* end of list */ + +#define CM_FLAGS_ENUM_NAME(name, value) _misc_##name = value, + enum { + CM_FLAGS_DO(CM_FLAGS_ENUM_NAME) + }; +#undef CM_FLAGS_ENUM_NAME + + // These flags are write-once before the class is published and then read-only so don't require atomic updates. + u4 _flags; + + public: + + ConstMethodFlags() : _flags(0) {} + + // Create getters and setters for the flag values. +#define CM_FLAGS_GET_SET(name, ignore) \ + bool name() const { return (_flags & _misc_##name) != 0; } \ + void set_##name() { \ + _flags |= _misc_##name; \ + } + CM_FLAGS_DO(CM_FLAGS_GET_SET) +#undef CM_FLAGS_GET_SET + + int as_int() const { return _flags; } + void print_on(outputStream* st) const; +}; + +#endif // SHARE_OOPS_CONSTMETHODFLAGS_HPP diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 8bb3f10f868..c6efafe1526 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -2133,9 +2133,9 @@ void PrintClassClosure::do_klass(Klass* k) { char buf[10]; int i = 0; if (k->has_finalizer()) buf[i++] = 'F'; - if (k->has_final_method()) buf[i++] = 'f'; if (k->is_instance_klass()) { InstanceKlass* ik = InstanceKlass::cast(k); + if (ik->has_final_method()) buf[i++] = 'f'; if (ik->is_rewritten()) buf[i++] = 'W'; if (ik->is_contended()) buf[i++] = 'C'; if (ik->has_been_redefined()) buf[i++] = 'R'; @@ -2624,6 +2624,16 @@ void InstanceKlass::init_shared_package_entry() { #endif } +void InstanceKlass::compute_has_loops_flag_for_methods() { + Array* methods = this->methods(); + for (int index = 0; index < methods->length(); ++index) { + Method* m = methods->at(index); + if (!m->is_overpass()) { // work around JDK-8305771 + m->compute_has_loops_flag(); + } + } +} + void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) { // InstanceKlass::add_to_hierarchy() sets the init_state to loaded @@ -3467,6 +3477,7 @@ void InstanceKlass::print_on(outputStream* st) const { st->print(BULLET"instance size: %d", size_helper()); st->cr(); st->print(BULLET"klass size: %d", size()); st->cr(); st->print(BULLET"access: "); access_flags().print_on(st); st->cr(); + st->print(BULLET"flags: "); _misc_flags.print_on(st); st->cr(); st->print(BULLET"state: "); st->print_cr("%s", init_state_name()); st->print(BULLET"name: "); name()->print_value_on(st); st->cr(); st->print(BULLET"super: "); Metadata::print_value_on_maybe_null(st, super()); st->cr(); @@ -4011,18 +4022,18 @@ void InstanceKlass::set_init_state(ClassState state) { // Globally, there is at least one previous version of a class to walk // during class unloading, which is saved because old methods in the class // are still running. Otherwise the previous version list is cleaned up. -bool InstanceKlass::_has_previous_versions = false; +bool InstanceKlass::_should_clean_previous_versions = false; // Returns true if there are previous versions of a class for class // unloading only. Also resets the flag to false. purge_previous_version // will set the flag to true if there are any left, i.e., if there's any // work to do for next time. This is to avoid the expensive code cache // walk in CLDG::clean_deallocate_lists(). -bool InstanceKlass::has_previous_versions_and_reset() { - bool ret = _has_previous_versions; - log_trace(redefine, class, iklass, purge)("Class unloading: has_previous_versions = %s", +bool InstanceKlass::should_clean_previous_versions_and_reset() { + bool ret = _should_clean_previous_versions; + log_trace(redefine, class, iklass, purge)("Class unloading: should_clean_previous_versions = %s", ret ? "true" : "false"); - _has_previous_versions = false; + _should_clean_previous_versions = false; return ret; } @@ -4079,12 +4090,17 @@ void InstanceKlass::purge_previous_version_list() { version++; continue; } else { - log_trace(redefine, class, iklass, purge)("previous version " PTR_FORMAT " is alive", p2i(pv_node)); assert(pvcp->pool_holder() != nullptr, "Constant pool with no holder"); guarantee (!loader_data->is_unloading(), "unloaded classes can't be on the stack"); live_count++; - // found a previous version for next time we do class unloading - _has_previous_versions = true; + if (pvcp->is_shared()) { + // Shared previous versions can never be removed so no cleaning is needed. + log_trace(redefine, class, iklass, purge)("previous version " PTR_FORMAT " is shared", p2i(pv_node)); + } else { + // Previous version alive, set that clean is needed for next time. + _should_clean_previous_versions = true; + log_trace(redefine, class, iklass, purge)("previous version " PTR_FORMAT " is alive", p2i(pv_node)); + } } // next previous version @@ -4184,13 +4200,19 @@ void InstanceKlass::add_previous_version(InstanceKlass* scratch_class, return; } - // Add previous version if any methods are still running. - // Set has_previous_version flag for processing during class unloading. - _has_previous_versions = true; - log_trace(redefine, class, iklass, add) ("scratch class added; one of its methods is on_stack."); + // Add previous version if any methods are still running or if this is + // a shared class which should never be removed. assert(scratch_class->previous_versions() == nullptr, "shouldn't have a previous version"); scratch_class->link_previous_versions(previous_versions()); link_previous_versions(scratch_class); + if (cp_ref->is_shared()) { + log_trace(redefine, class, iklass, add) ("scratch class added; class is shared"); + } else { + // We only set clean_previous_versions flag for processing during class + // unloading for non-shared classes. + _should_clean_previous_versions = true; + log_trace(redefine, class, iklass, add) ("scratch class added; one of its methods is on_stack."); + } } // end add_previous_version() #endif // INCLUDE_JVMTI diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index 681fa7d648b..4997a122140 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -223,15 +223,11 @@ class InstanceKlass: public Klass { volatile u2 _idnum_allocated_count; // JNI/JVMTI: increments with the addition of methods, old ids don't change - // _is_marked_dependent can be set concurrently, thus cannot be part of the - // _misc_flags right now. - bool _is_marked_dependent; // used for marking during flushing and deoptimization - volatile ClassState _init_state; // state of class u1 _reference_type; // reference type - // State is set while executing, eventually atomically to not disturb other state + // State is set either at parse time or while executing, atomically to not disturb other state InstanceKlassFlags _misc_flags; Monitor* _init_monitor; // mutual exclusion to _init_state and _init_thread. @@ -531,8 +527,8 @@ public: void set_should_verify_class(bool value) { _misc_flags.set_should_verify_class(value); } // marking - bool is_marked_dependent() const { return _is_marked_dependent; } - void set_is_marked_dependent(bool value) { _is_marked_dependent = value; } + bool is_marked_dependent() const { return _misc_flags.is_marked_dependent(); } + void set_is_marked_dependent(bool value) { _misc_flags.set_is_marked_dependent(value); } // initialization (virtuals from Klass) bool should_be_initialized() const; // means that initialize should be called @@ -681,16 +677,8 @@ public: // Redefinition locking. Class can only be redefined by one thread at a time. // The flag is in access_flags so that it can be set and reset using atomic // operations, and not be reset by other misc_flag settings. - bool is_being_redefined() const { - return _access_flags.is_being_redefined(); - } - void set_is_being_redefined(bool value) { - if (value) { - _access_flags.set_is_being_redefined(); - } else { - _access_flags.clear_is_being_redefined(); - } - } + bool is_being_redefined() const { return _misc_flags.is_being_redefined(); } + void set_is_being_redefined(bool value) { _misc_flags.set_is_being_redefined(value); } // RedefineClasses() support for previous versions: void add_previous_version(InstanceKlass* ik, int emcp_method_count); @@ -716,13 +704,8 @@ public: bool is_scratch_class() const { return _misc_flags.is_scratch_class(); } void set_is_scratch_class() { _misc_flags.set_is_scratch_class(true); } - bool has_resolved_methods() const { - return _access_flags.has_resolved_methods(); - } - - void set_has_resolved_methods() { - _access_flags.set_has_resolved_methods(); - } + bool has_resolved_methods() const { return _misc_flags.has_resolved_methods(); } + void set_has_resolved_methods() { _misc_flags.set_has_resolved_methods(true); } public: #if INCLUDE_JVMTI @@ -732,7 +715,7 @@ public: } private: - static bool _has_previous_versions; + static bool _should_clean_previous_versions; public: static void purge_previous_versions(InstanceKlass* ik) { if (ik->has_been_redefined()) { @@ -740,8 +723,8 @@ public: } } - static bool has_previous_versions_and_reset(); - static bool has_previous_versions() { return _has_previous_versions; } + static bool should_clean_previous_versions_and_reset(); + static bool should_clean_previous_versions() { return _should_clean_previous_versions; } // JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation void set_cached_class_file(JvmtiCachedClassFileData *data) { @@ -761,7 +744,7 @@ public: #else // INCLUDE_JVMTI static void purge_previous_versions(InstanceKlass* ik) { return; }; - static bool has_previous_versions_and_reset() { return false; } + static bool should_clean_previous_versions_and_reset() { return false; } void set_cached_class_file(JvmtiCachedClassFileData *data) { assert(data == nullptr, "unexpected call with JVMTI disabled"); @@ -776,6 +759,13 @@ public: bool declares_nonstatic_concrete_methods() const { return _misc_flags.declares_nonstatic_concrete_methods(); } void set_declares_nonstatic_concrete_methods(bool b) { _misc_flags.set_declares_nonstatic_concrete_methods(b); } + bool has_vanilla_constructor() const { return _misc_flags.has_vanilla_constructor(); } + void set_has_vanilla_constructor() { _misc_flags.set_has_vanilla_constructor(true); } + bool has_miranda_methods () const { return _misc_flags.has_miranda_methods(); } + void set_has_miranda_methods() { _misc_flags.set_has_miranda_methods(true); } + bool has_final_method() const { return _misc_flags.has_final_method(); } + void set_has_final_method() { _misc_flags.set_has_final_method(true); } + // for adding methods, ConstMethod::UNSET_IDNUM means no more ids available inline u2 next_method_idnum(); void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; } @@ -1141,6 +1131,7 @@ public: void init_shared_package_entry(); bool can_be_verified_at_dumptime() const; bool methods_contain_jsr_bytecode() const; + void compute_has_loops_flag_for_methods(); #endif jint compute_modifier_flags() const; diff --git a/src/hotspot/share/oops/instanceKlassFlags.cpp b/src/hotspot/share/oops/instanceKlassFlags.cpp index 8f845c482e0..7f0959f0005 100644 --- a/src/hotspot/share/oops/instanceKlassFlags.cpp +++ b/src/hotspot/share/oops/instanceKlassFlags.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,40 @@ #include "classfile/classLoader.hpp" #include "classfile/classLoaderData.inline.hpp" #include "oops/instanceKlassFlags.hpp" +#include "runtime/atomic.hpp" #include "runtime/safepoint.hpp" #include "utilities/macros.hpp" +#include "utilities/ostream.hpp" + +// This can be removed for the atomic bitset functions, when available. +void InstanceKlassFlags::atomic_set_bits(u1 bits) { + // Atomically update the status with the bits given + u1 old_status, new_status, f; + do { + old_status = _status; + new_status = old_status | bits; + f = Atomic::cmpxchg(&_status, old_status, new_status); + } while(f != old_status); +} + +void InstanceKlassFlags::atomic_clear_bits(u1 bits) { + // Atomically update the status with the bits given + u1 old_status, new_status, f; + do { + old_status = _status; + new_status = old_status & ~bits; + f = Atomic::cmpxchg(&_status, old_status, new_status); + } while(f != old_status); +} + +void InstanceKlassFlags::print_on(outputStream* st) const { +#define IK_FLAGS_PRINT(name, ignore) \ + if (name()) st->print(" ##name "); + IK_FLAGS_DO(IK_FLAGS_PRINT) + IK_STATUS_DO(IK_FLAGS_PRINT) +#undef IK_FLAGS_PRINT + st->cr(); +} #if INCLUDE_CDS void InstanceKlassFlags::set_shared_class_loader_type(s2 loader_type) { diff --git a/src/hotspot/share/oops/instanceKlassFlags.hpp b/src/hotspot/share/oops/instanceKlassFlags.hpp index 6e147f6e33c..5b79f6e4214 100644 --- a/src/hotspot/share/oops/instanceKlassFlags.hpp +++ b/src/hotspot/share/oops/instanceKlassFlags.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,12 @@ class ClassLoaderData; +// The InstanceKlassFlags class contains the parse-time and writeable flags associated with +// an InstanceKlass, and their associated accessors. +// _flags are parse-time and constant in the InstanceKlass after that. _status are set at runtime and +// require atomic access. +// These flags are JVM internal and not part of the AccessFlags classfile specification. + class InstanceKlassFlags { friend class VMStructs; friend class JVMCIVMStructs; @@ -35,18 +41,19 @@ class InstanceKlassFlags { flag(rewritten , 1 << 0) /* methods rewritten. */ \ flag(has_nonstatic_fields , 1 << 1) /* for sizing with UseCompressedOops */ \ flag(should_verify_class , 1 << 2) /* allow caching of preverification */ \ - flag(unused , 1 << 3) /* not currently used */ \ - flag(is_contended , 1 << 4) /* marked with contended annotation */ \ - flag(has_nonstatic_concrete_methods , 1 << 5) /* class/superclass/implemented interfaces has non-static, concrete methods */ \ - flag(declares_nonstatic_concrete_methods, 1 << 6) /* directly declares non-static, concrete methods */ \ - flag(has_been_redefined , 1 << 7) /* class has been redefined */ \ - flag(shared_loading_failed , 1 << 8) /* class has been loaded from shared archive */ \ - flag(is_scratch_class , 1 << 9) /* class is the redefined scratch class */ \ - flag(is_shared_boot_class , 1 << 10) /* defining class loader is boot class loader */ \ - flag(is_shared_platform_class , 1 << 11) /* defining class loader is platform class loader */ \ - flag(is_shared_app_class , 1 << 12) /* defining class loader is app class loader */ \ - flag(has_contended_annotations , 1 << 13) /* has @Contended annotation */ \ - flag(has_localvariable_table , 1 << 14) /* has localvariable information */ + flag(is_contended , 1 << 3) /* marked with contended annotation */ \ + flag(has_nonstatic_concrete_methods , 1 << 4) /* class/superclass/implemented interfaces has non-static, concrete methods */ \ + flag(declares_nonstatic_concrete_methods, 1 << 5) /* directly declares non-static, concrete methods */ \ + flag(shared_loading_failed , 1 << 6) /* class has been loaded from shared archive */ \ + flag(is_shared_boot_class , 1 << 7) /* defining class loader is boot class loader */ \ + flag(is_shared_platform_class , 1 << 8) /* defining class loader is platform class loader */ \ + flag(is_shared_app_class , 1 << 9) /* defining class loader is app class loader */ \ + flag(has_contended_annotations , 1 << 10) /* has @Contended annotation */ \ + flag(has_localvariable_table , 1 << 11) /* has localvariable information */ \ + flag(has_miranda_methods , 1 << 12) /* True if this class has miranda methods in it's vtable */ \ + flag(has_vanilla_constructor , 1 << 13) /* True if klass has a vanilla default constructor */ \ + flag(has_final_method , 1 << 14) /* True if klass has final method */ \ + /* end of list */ #define IK_FLAGS_ENUM_NAME(name, value) _misc_##name = value, enum { @@ -54,6 +61,20 @@ class InstanceKlassFlags { }; #undef IK_FLAGS_ENUM_NAME +#define IK_STATUS_DO(status) \ + status(is_being_redefined , 1 << 0) /* True if the klass is being redefined */ \ + status(has_resolved_methods , 1 << 1) /* True if the klass has resolved MethodHandle methods */ \ + status(has_been_redefined , 1 << 2) /* class has been redefined */ \ + status(is_scratch_class , 1 << 3) /* class is the redefined scratch class */ \ + status(is_marked_dependent , 1 << 4) /* class is the redefined scratch class */ \ + /* end of list */ + +#define IK_STATUS_ENUM_NAME(name, value) _misc_##name = value, + enum { + IK_STATUS_DO(IK_STATUS_ENUM_NAME) + }; +#undef IK_STATUS_ENUM_NAME + u2 shared_loader_type_bits() const { return _misc_is_shared_boot_class|_misc_is_shared_platform_class|_misc_is_shared_app_class; } @@ -61,23 +82,22 @@ class InstanceKlassFlags { // These flags are write-once before the class is published and then read-only so don't require atomic updates. u2 _flags; + // These flags are written during execution so require atomic stores + u1 _status; + public: - InstanceKlassFlags() : _flags(0) {} + InstanceKlassFlags() : _flags(0), _status(0) {} // Create getters and setters for the flag values. -#define IK_FLAGS_GET(name, ignore) \ - bool name() const { return (_flags & _misc_##name) != 0; } - IK_FLAGS_DO(IK_FLAGS_GET) -#undef IK_FLAGS_GET - -#define IK_FLAGS_SET(name, ignore) \ +#define IK_FLAGS_GET_SET(name, ignore) \ + bool name() const { return (_flags & _misc_##name) != 0; } \ void set_##name(bool b) { \ assert_is_safe(name()); \ if (b) _flags |= _misc_##name; \ } - IK_FLAGS_DO(IK_FLAGS_SET) -#undef IK_FLAGS_SET + IK_FLAGS_DO(IK_FLAGS_GET_SET) +#undef IK_FLAGS_GET_SET bool is_shared_unregistered_class() const { return (_flags & shared_loader_type_bits()) == 0; @@ -87,6 +107,23 @@ class InstanceKlassFlags { void assign_class_loader_type(const ClassLoaderData* cld); void assert_is_safe(bool set) NOT_DEBUG_RETURN; + + // Create getters and setters for the status values. +#define IK_STATUS_GET_SET(name, ignore) \ + bool name() const { return (_status & _misc_##name) != 0; } \ + void set_##name(bool b) { \ + if (b) { \ + atomic_set_bits(_misc_##name); \ + } else { \ + atomic_clear_bits(_misc_##name); \ + } \ + } + IK_STATUS_DO(IK_STATUS_GET_SET) +#undef IK_STATUS_GET_SET + + void atomic_set_bits(u1 bits); + void atomic_clear_bits(u1 bits); + void print_on(outputStream* st) const; }; #endif // SHARE_OOPS_INSTANCEKLASSFLAGS_HPP diff --git a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp index 1d5113f5a8e..867a0580a12 100644 --- a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp @@ -56,10 +56,7 @@ void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { if (klass != nullptr) { if (klass->class_loader_data() == nullptr) { // This is a mirror that belongs to a shared class that has not be loaded yet. - // It's only reachable via HeapShared::roots(). All of its fields should be zero - // so there's no need to scan. assert(klass->is_shared(), "must be"); - return; } else if (klass->is_instance_klass() && klass->class_loader_data()->has_class_mirror_holder()) { // A non-strong hidden class doesn't have its own class loader, // so when handling the java mirror for the class we need to make sure its class diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index acb9a41730f..abe8951fddd 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -176,6 +176,7 @@ private: // Various attributes for shared classes. Should be zero for a non-shared class. u2 _shared_class_flags; enum CDSSharedClassFlags { + _is_shared_class = 1 << 0, // shadows MetaspaceObj::is_shared _archived_lambda_proxy_is_available = 1 << 1, _has_value_based_class_annotation = 1 << 2, _verified_at_dump_time = 1 << 3, @@ -363,6 +364,15 @@ protected: NOT_CDS(return false;) } + bool is_shared() const { // shadows MetaspaceObj::is_shared)() + CDS_ONLY(return (_shared_class_flags & _is_shared_class) != 0;) + NOT_CDS(return false;) + } + + void set_is_shared() { + CDS_ONLY(_shared_class_flags |= _is_shared_class;) + } + // Obtain the module or package for this class virtual ModuleEntry* module() const = 0; virtual PackageEntry* package() const = 0; @@ -649,15 +659,7 @@ protected: bool is_synthetic() const { return _access_flags.is_synthetic(); } void set_is_synthetic() { _access_flags.set_is_synthetic(); } bool has_finalizer() const { return _access_flags.has_finalizer(); } - bool has_final_method() const { return _access_flags.has_final_method(); } void set_has_finalizer() { _access_flags.set_has_finalizer(); } - void set_has_final_method() { _access_flags.set_has_final_method(); } - bool has_vanilla_constructor() const { return _access_flags.has_vanilla_constructor(); } - void set_has_vanilla_constructor() { _access_flags.set_has_vanilla_constructor(); } - bool has_miranda_methods () const { return access_flags().has_miranda_methods(); } - void set_has_miranda_methods() { _access_flags.set_has_miranda_methods(); } - bool is_shared() const { return access_flags().is_shared_class(); } // shadows MetaspaceObj::is_shared)() - void set_is_shared() { _access_flags.set_is_shared_class(); } bool is_hidden() const { return access_flags().is_hidden_class(); } void set_is_hidden() { _access_flags.set_is_hidden_class(); } bool is_value_based() { return _access_flags.is_value_based_class(); } diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 8f8e9d1793c..20bf400f55c 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -102,11 +102,6 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name) { set_constMethod(xconst); set_access_flags(access_flags); set_intrinsic_id(vmIntrinsics::_none); - set_force_inline(false); - set_hidden(false); - set_dont_inline(false); - set_changes_current_thread(false); - set_has_injected_profile(false); set_method_data(nullptr); clear_method_counters(); set_vtable_index(Method::garbage_vtable_index); @@ -736,24 +731,27 @@ bool Method::compute_has_loops_flag() { case Bytecodes::_if_acmpne: case Bytecodes::_goto: case Bytecodes::_jsr: - if (bcs.dest() < bcs.next_bci()) _access_flags.set_has_loops(); + if (bcs.dest() < bcs.next_bci()) { + return set_has_loops(); + } break; case Bytecodes::_goto_w: case Bytecodes::_jsr_w: - if (bcs.dest_w() < bcs.next_bci()) _access_flags.set_has_loops(); + if (bcs.dest_w() < bcs.next_bci()) { + return set_has_loops(); + } break; case Bytecodes::_lookupswitch: { Bytecode_lookupswitch lookupswitch(this, bcs.bcp()); if (lookupswitch.default_offset() < 0) { - _access_flags.set_has_loops(); + return set_has_loops(); } else { for (int i = 0; i < lookupswitch.number_of_pairs(); ++i) { LookupswitchPair pair = lookupswitch.pair_at(i); if (pair.offset() < 0) { - _access_flags.set_has_loops(); - break; + return set_has_loops(); } } } @@ -762,11 +760,11 @@ bool Method::compute_has_loops_flag() { case Bytecodes::_tableswitch: { Bytecode_tableswitch tableswitch(this, bcs.bcp()); if (tableswitch.default_offset() < 0) { - _access_flags.set_has_loops(); + return set_has_loops(); } else { for (int i = 0; i < tableswitch.length(); ++i) { if (tableswitch.dest_offset_at(i) < 0) { - _access_flags.set_has_loops(); + return set_has_loops(); } } } @@ -776,8 +774,9 @@ bool Method::compute_has_loops_flag() { break; } } - _access_flags.set_loops_flag_init(); - return _access_flags.has_loops(); + + _flags.set_has_loops_flag_init(true); + return false; } bool Method::is_final_method(AccessFlags class_access_flags) const { @@ -1108,13 +1107,13 @@ void Method::set_not_compilable(const char* reason, int comp_level, bool report) } print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason); if (comp_level == CompLevel_all) { - set_not_c1_compilable(); - set_not_c2_compilable(); + set_is_not_c1_compilable(); + set_is_not_c2_compilable(); } else { if (is_c1_compile(comp_level)) - set_not_c1_compilable(); + set_is_not_c1_compilable(); if (is_c2_compile(comp_level)) - set_not_c2_compilable(); + set_is_not_c2_compilable(); } assert(!CompilationPolicy::can_be_compiled(methodHandle(Thread::current(), this), comp_level), "sanity check"); } @@ -1134,13 +1133,13 @@ bool Method::is_not_osr_compilable(int comp_level) const { void Method::set_not_osr_compilable(const char* reason, int comp_level, bool report) { print_made_not_compilable(comp_level, /*is_osr*/ true, report, reason); if (comp_level == CompLevel_all) { - set_not_c1_osr_compilable(); - set_not_c2_osr_compilable(); + set_is_not_c1_osr_compilable(); + set_is_not_c2_osr_compilable(); } else { if (is_c1_compile(comp_level)) - set_not_c1_osr_compilable(); + set_is_not_c1_osr_compilable(); if (is_c2_compile(comp_level)) - set_not_c2_osr_compilable(); + set_is_not_c2_osr_compilable(); } assert(!CompilationPolicy::can_be_osr_compiled(methodHandle(Thread::current(), this), comp_level), "sanity check"); } @@ -1663,7 +1662,7 @@ void Method::init_intrinsic_id(vmSymbolID klass_id) { set_intrinsic_id(id); if (id == vmIntrinsics::_Class_cast) { // Even if the intrinsic is rejected, we want to inline this simple method. - set_force_inline(true); + set_force_inline(); } return; } @@ -2241,8 +2240,8 @@ void Method::set_on_stack(const bool value) { // on stack means some method referring to it is also on the stack. constants()->set_on_stack(value); - bool already_set = on_stack(); - _access_flags.set_on_stack(value); + bool already_set = on_stack_flag(); + set_on_stack_flag(value); if (value && !already_set) { MetadataOnStackMark::record(this); } @@ -2304,6 +2303,7 @@ void Method::print_on(outputStream* st) const { st->print (" - constants: " PTR_FORMAT " ", p2i(constants())); constants()->print_value_on(st); st->cr(); st->print (" - access: 0x%x ", access_flags().as_int()); access_flags().print_on(st); st->cr(); + st->print (" - flags: 0x%x ", _flags.as_int()); _flags.print_on(st); st->cr(); st->print (" - name: "); name()->print_value_on(st); st->cr(); st->print (" - signature: "); signature()->print_value_on(st); st->cr(); st->print_cr(" - max stack: %d", max_stack()); diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 5757f64eed2..ab8c309afd8 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -31,6 +31,7 @@ #include "oops/annotations.hpp" #include "oops/constantPool.hpp" #include "oops/methodCounters.hpp" +#include "oops/methodFlags.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.hpp" #include "oops/typeArrayOop.hpp" @@ -79,23 +80,9 @@ class Method : public Metadata { AdapterHandlerEntry* _adapter; AccessFlags _access_flags; // Access flags int _vtable_index; // vtable index of this method (see VtableIndexFlag) - // note: can have vtables with >2**16 elements (because of inheritance) - u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) + MethodFlags _flags; - // Flags - enum Flags { - _caller_sensitive = 1 << 0, - _force_inline = 1 << 1, - _dont_inline = 1 << 2, - _hidden = 1 << 3, - _has_injected_profile = 1 << 4, - _intrinsic_candidate = 1 << 5, - _reserved_stack_access = 1 << 6, - _scoped = 1 << 7, - _changes_current_thread = 1 << 8, - _jvmti_mount_transition = 1 << 9, - }; - mutable u2 _flags; + u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) JFR_ONLY(DEFINE_TRACE_FLAG;) @@ -332,7 +319,7 @@ class Method : public Metadata { // exception handler table bool has_exception_handler() const - { return constMethod()->has_exception_handler(); } + { return constMethod()->has_exception_table(); } int exception_table_length() const { return constMethod()->exception_table_length(); } ExceptionTableElement* exception_table_start() const @@ -602,31 +589,35 @@ public: // true if method can omit stack trace in throw in compiled code. bool can_omit_stack_trace(); + // Flags getting and setting. +#define M_STATUS_GET_SET(name, ignore) \ + bool name() const { return _flags.name(); } \ + void set_##name(bool x) { _flags.set_##name(x); } \ + void set_##name() { _flags.set_##name(true); } + M_STATUS_DO(M_STATUS_GET_SET) +#undef M_STATUS_GET_SET + // returns true if the method has any backward branches. bool has_loops() { - return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); + return has_loops_flag_init() ? has_loops_flag() : compute_has_loops_flag(); }; bool compute_has_loops_flag(); - - bool has_jsrs() { - return access_flags().has_jsrs(); - }; - void set_has_jsrs() { - _access_flags.set_has_jsrs(); + bool set_has_loops() { + // set both the flags and that it's been initialized. + set_has_loops_flag(); + set_has_loops_flag_init(); + return true; } // returns true if the method has any monitors. - bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } - bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } - - void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } + bool has_monitors() const { return is_synchronized() || has_monitor_bytecodes(); } // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes - // propererly nest in the method. It might return false, even though they actually nest properly, since the info. + // properly nest in the method. It might return false, even though they actually nest properly, since the info. // has not been computed yet. - bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } - void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } + bool guaranteed_monitor_matching() const { return monitor_matching(); } + void set_guaranteed_monitor_matching() { set_monitor_matching(); } // returns true if the method is an accessor function (setter/getter). bool is_accessor() const; @@ -745,14 +736,7 @@ public: static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize // RedefineClasses() support: - bool is_old() const { return access_flags().is_old(); } - void set_is_old() { _access_flags.set_is_old(); } - bool is_obsolete() const { return access_flags().is_obsolete(); } - void set_is_obsolete() { _access_flags.set_is_obsolete(); } - bool is_deleted() const { return access_flags().is_deleted(); } - void set_is_deleted() { _access_flags.set_is_deleted(); } - - bool on_stack() const { return access_flags().on_stack(); } + bool on_stack() const { return on_stack_flag(); } void set_on_stack(const bool value); void record_gc_epoch(); @@ -760,10 +744,6 @@ public: // see the definition in Method*.cpp for the gory details bool should_not_be_cached() const; - // JVMTI Native method prefixing support: - bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } - void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } - // Rewriting support static methodHandle clone_with_new_data(const methodHandle& m, u_char* new_code, int new_code_length, u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); @@ -820,78 +800,29 @@ public: void init_intrinsic_id(vmSymbolID klass_id); // updates from _none if a match static vmSymbolID klass_id_for_intrinsics(const Klass* holder); - bool caller_sensitive() { - return (_flags & _caller_sensitive) != 0; - } - void set_caller_sensitive(bool x) { - _flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive); - } + bool caller_sensitive() const { return constMethod()->caller_sensitive(); } + void set_caller_sensitive() { constMethod()->set_caller_sensitive(); } - bool force_inline() { - return (_flags & _force_inline) != 0; - } - void set_force_inline(bool x) { - _flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline); - } + bool changes_current_thread() const { return constMethod()->changes_current_thread(); } + void set_changes_current_thread() { constMethod()->set_changes_current_thread(); } - bool dont_inline() { - return (_flags & _dont_inline) != 0; - } - void set_dont_inline(bool x) { - _flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline); - } + bool jvmti_mount_transition() const { return constMethod()->jvmti_mount_transition(); } + void set_jvmti_mount_transition() { constMethod()->set_jvmti_mount_transition(); } - bool changes_current_thread() { - return (_flags & _changes_current_thread) != 0; - } - void set_changes_current_thread(bool x) { - _flags = x ? (_flags | _changes_current_thread) : (_flags & ~_changes_current_thread); - } + bool is_hidden() const { return constMethod()->is_hidden(); } + void set_is_hidden() { constMethod()->set_is_hidden(); } - bool jvmti_mount_transition() { - return (_flags & _jvmti_mount_transition) != 0; - } - void set_jvmti_mount_transition(bool x) { - _flags = x ? (_flags | _jvmti_mount_transition) : (_flags & ~_jvmti_mount_transition); - } + bool is_scoped() const { return constMethod()->is_scoped(); } + void set_scoped() { constMethod()->set_is_scoped(); } - bool is_hidden() const { - return (_flags & _hidden) != 0; - } + bool intrinsic_candidate() const { return constMethod()->intrinsic_candidate(); } + void set_intrinsic_candidate() { constMethod()->set_intrinsic_candidate(); } - void set_hidden(bool x) { - _flags = x ? (_flags | _hidden) : (_flags & ~_hidden); - } + bool has_injected_profile() const { return constMethod()->has_injected_profile(); } + void set_has_injected_profile() { constMethod()->set_has_injected_profile(); } - bool is_scoped() const { - return (_flags & _scoped) != 0; - } - - void set_scoped(bool x) { - _flags = x ? (_flags | _scoped) : (_flags & ~_scoped); - } - - bool intrinsic_candidate() { - return (_flags & _intrinsic_candidate) != 0; - } - void set_intrinsic_candidate(bool x) { - _flags = x ? (_flags | _intrinsic_candidate) : (_flags & ~_intrinsic_candidate); - } - - bool has_injected_profile() { - return (_flags & _has_injected_profile) != 0; - } - void set_has_injected_profile(bool x) { - _flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile); - } - - bool has_reserved_stack_access() { - return (_flags & _reserved_stack_access) != 0; - } - - void set_has_reserved_stack_access(bool x) { - _flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access); - } + bool has_reserved_stack_access() const { return constMethod()->reserved_stack_access(); } + void set_has_reserved_stack_access() { constMethod()->set_reserved_stack_access(); } JFR_ONLY(DEFINE_TRACE_FLAG_ACCESSOR;) @@ -939,24 +870,17 @@ public: return _method_counters; } - bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } - void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } - void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } - bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } - void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } - void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } + void clear_is_not_c1_compilable() { set_is_not_c1_compilable(false); } + void clear_is_not_c2_compilable() { set_is_not_c2_compilable(false); } + void clear_is_not_c2_osr_compilable() { set_is_not_c2_osr_compilable(false); } - bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit - void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit - void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit - bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } - void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } - void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } + // not_c1_osr_compilable == not_c1_compilable + bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } + void set_is_not_c1_osr_compilable() { set_is_not_c1_compilable(); } + void clear_is_not_c1_osr_compilable() { clear_is_not_c1_compilable(); } // Background compilation support - bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } - void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } - void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } + void clear_queued_for_compilation() { set_queued_for_compilation(false); } // Resolve all classes in signature, return 'true' if successful static bool load_signature_classes(const methodHandle& m, TRAPS); diff --git a/src/hotspot/share/oops/methodFlags.cpp b/src/hotspot/share/oops/methodFlags.cpp new file mode 100644 index 00000000000..8effa987a19 --- /dev/null +++ b/src/hotspot/share/oops/methodFlags.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "oops/methodFlags.hpp" +#include "runtime/atomic.hpp" +#include "utilities/ostream.hpp" + +// This can be removed for the atomic bitset functions, when available. +void MethodFlags::atomic_set_bits(u4 bits) { + // Atomically update the status with the bits given + u4 old_status, new_status, f; + do { + old_status = _status; + new_status = old_status | bits; + f = Atomic::cmpxchg(&_status, old_status, new_status); + } while(f != old_status); +} + +void MethodFlags::atomic_clear_bits(u4 bits) { + // Atomically update the status with the bits given + u4 old_status, new_status, f; + do { + old_status = _status; + new_status = old_status & ~bits; + f = Atomic::cmpxchg(&_status, old_status, new_status); + } while(f != old_status); +} + +void MethodFlags::print_on(outputStream* st) const { +#define M_PRINT(name, ignore) \ + if (name()) st->print(" " #name " "); + M_STATUS_DO(M_PRINT) +#undef M_PRINT +} diff --git a/src/hotspot/share/oops/methodFlags.hpp b/src/hotspot/share/oops/methodFlags.hpp new file mode 100644 index 00000000000..1bbc79f43be --- /dev/null +++ b/src/hotspot/share/oops/methodFlags.hpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_METHODFLAGS_HPP +#define SHARE_OOPS_METHODFLAGS_HPP + +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +class outputStream; + +// The MethodFlags class contains the writeable flags aka. status associated with +// an Method, and their associated accessors. +// _status are set at runtime and require atomic access. +// These flags are JVM internal and not part of the AccessFlags classfile specification. + +class MethodFlags { + friend class VMStructs; + friend class JVMCIVMStructs; + /* end of list */ + +#define M_STATUS_DO(status) \ + status(has_monitor_bytecodes , 1 << 0) /* Method contains monitorenter/monitorexit bytecodes */ \ + status(has_jsrs , 1 << 1) \ + status(is_old , 1 << 2) /* RedefineClasses() has replaced this method */ \ + status(is_obsolete , 1 << 3) /* RedefineClasses() has made method obsolete */ \ + status(is_deleted , 1 << 4) /* RedefineClasses() has deleted this method */ \ + status(is_prefixed_native , 1 << 5) /* JVMTI has prefixed this native method */ \ + status(monitor_matching , 1 << 6) /* True if we know that monitorenter/monitorexit bytecodes match */ \ + status(queued_for_compilation , 1 << 7) \ + status(is_not_c2_compilable , 1 << 8) \ + status(is_not_c1_compilable , 1 << 9) \ + status(is_not_c2_osr_compilable , 1 << 10) \ + status(force_inline , 1 << 11) /* Annotations but also set/reset at runtime */ \ + status(dont_inline , 1 << 12) \ + status(has_loops_flag , 1 << 13) /* Method has loops */ \ + status(has_loops_flag_init , 1 << 14) /* The loop flag has been initialized */ \ + status(on_stack_flag , 1 << 15) /* RedefineClasses support to keep Metadata from being cleaned */ \ + /* end of list */ + +#define M_STATUS_ENUM_NAME(name, value) _misc_##name = value, + enum { + M_STATUS_DO(M_STATUS_ENUM_NAME) + }; +#undef M_STATUS_ENUM_NAME + + // These flags are written during execution so require atomic stores + u4 _status; + + public: + + MethodFlags() : _status(0) {} + + // Create getters and setters for the status values. +#define M_STATUS_GET_SET(name, ignore) \ + bool name() const { return (_status & _misc_##name) != 0; } \ + void set_##name(bool b) { \ + if (b) { \ + atomic_set_bits(_misc_##name); \ + } else { \ + atomic_clear_bits(_misc_##name); \ + } \ + } + M_STATUS_DO(M_STATUS_GET_SET) +#undef M_STATUS_GET_SET + + int as_int() const { return _status; } + void atomic_set_bits(u4 bits); + void atomic_clear_bits(u4 bits); + void print_on(outputStream* st) const; +}; + +#endif // SHARE_OOPS_METHODFLAGS_HPP diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index c9a39c7cad6..7d170177ab7 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -231,13 +231,6 @@ jdouble oopDesc::double_field_acquire(int offset) const { return A void oopDesc::release_double_field_put(int offset, jdouble value) { Atomic::release_store(field_addr(offset), value); } #ifdef ASSERT -void oopDesc::verify_forwardee(oop forwardee) { -#if INCLUDE_CDS_JAVA_HEAP - assert(!Universe::heap()->is_archived_object(forwardee) && !Universe::heap()->is_archived_object(this), - "forwarding archive object"); -#endif -} - bool oopDesc::size_might_change() { // UseParallelGC and UseG1GC can change the length field // of an "old copy" of an object array in the young gen so it indicates diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index 9f8e0ce1f2b..a7be060b007 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -257,8 +257,6 @@ class oopDesc { // Forward pointer operations for scavenge inline bool is_forwarded() const; - void verify_forwardee(oop forwardee) NOT_DEBUG_RETURN; - inline void forward_to(oop p); // Like "forward_to", but inserts the forwarding pointer atomically. diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp index cf05750e862..4988846f172 100644 --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -266,14 +266,12 @@ bool oopDesc::is_forwarded() const { // Used by scavengers void oopDesc::forward_to(oop p) { - verify_forwardee(p); markWord m = markWord::encode_pointer_as_mark(p); assert(m.decode_pointer() == p, "encoding must be reversible"); set_mark(m); } oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) { - verify_forwardee(p); markWord m = markWord::encode_pointer_as_mark(p); assert(m.decode_pointer() == p, "encoding must be reversible"); markWord old_mark = cas_set_mark(m, compare, order); diff --git a/src/hotspot/share/oops/stackChunkOop.cpp b/src/hotspot/share/oops/stackChunkOop.cpp index 76e48cba53d..4e771939dc9 100644 --- a/src/hotspot/share/oops/stackChunkOop.cpp +++ b/src/hotspot/share/oops/stackChunkOop.cpp @@ -38,6 +38,11 @@ #include "runtime/smallRegisterMap.inline.hpp" #include "runtime/stackChunkFrameStream.inline.hpp" +// Note: Some functions in this file work with stale object pointers, e.g. +// DerivedPointerSupport. Be extra careful to not put those pointers into +// variables of the 'oop' type. There's extra GC verification around oops +// that may fail when stale oops are being used. + template class FrameOopIterator : public OopIterator { private: @@ -153,43 +158,45 @@ template void stackChunkOopDesc::do_barriers(base); + uintptr_t offset = derived_int_val - base; *(uintptr_t*)derived_loc = offset; } - static void derelativize(oop* base_loc, derived_pointer* derived_loc) { - oop base = *base_loc; - if (base == nullptr) { + static void derelativize(derived_base* base_loc, derived_pointer* derived_loc) { + uintptr_t base = *(uintptr_t*)base_loc; + if (base == 0) { return; } - assert(!UseCompressedOops || !CompressedOops::is_base(base), ""); + assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), ""); // All derived pointers should have been relativized into offsets uintptr_t offset = *(uintptr_t*)derived_loc; // Restore the original derived pointer - *(uintptr_t*)derived_loc = cast_from_oop(base) + offset; + *(uintptr_t*)derived_loc = base + offset; } struct RelativizeClosure : public DerivedOopClosure { - virtual void do_derived_oop(oop* base_loc, derived_pointer* derived_loc) override { + virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override { DerivedPointersSupport::relativize(base_loc, derived_loc); } }; struct DerelativizeClosure : public DerivedOopClosure { - virtual void do_derived_oop(oop* base_loc, derived_pointer* derived_loc) override { + virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override { DerivedPointersSupport::derelativize(base_loc, derived_loc); } }; diff --git a/src/hotspot/share/opto/addnode.cpp b/src/hotspot/share/opto/addnode.cpp index f4931085fa4..3c56f5bd770 100644 --- a/src/hotspot/share/opto/addnode.cpp +++ b/src/hotspot/share/opto/addnode.cpp @@ -1258,6 +1258,138 @@ const Type *MinINode::add_ring( const Type *t0, const Type *t1 ) const { return TypeInt::make( MIN2(r0->_lo,r1->_lo), MIN2(r0->_hi,r1->_hi), MAX2(r0->_widen,r1->_widen) ); } +// Collapse the "addition with overflow-protection" pattern, and the symmetrical +// "subtraction with underflow-protection" pattern. These are created during the +// unrolling, when we have to adjust the limit by subtracting the stride, but want +// to protect against underflow: MaxL(SubL(limit, stride), min_jint). +// If we have more than one of those in a sequence: +// +// x con2 +// | | +// AddL clamp2 +// | | +// Max/MinL con1 +// | | +// AddL clamp1 +// | | +// Max/MinL (n) +// +// We want to collapse it to: +// +// x con1 con2 +// | | | +// | AddLNode (new_con) +// | | +// AddLNode clamp1 +// | | +// Max/MinL (n) +// +// Note: we assume that SubL was already replaced by an AddL, and that the stride +// has its sign flipped: SubL(limit, stride) -> AddL(limit, -stride). +Node* fold_subI_no_underflow_pattern(Node* n, PhaseGVN* phase) { + assert(n->Opcode() == Op_MaxL || n->Opcode() == Op_MinL, "sanity"); + // Check that the two clamps have the correct values. + jlong clamp = (n->Opcode() == Op_MaxL) ? min_jint : max_jint; + auto is_clamp = [&](Node* c) { + const TypeLong* t = phase->type(c)->isa_long(); + return t != nullptr && t->is_con() && + t->get_con() == clamp; + }; + // Check that the constants are negative if MaxL, and positive if MinL. + auto is_sub_con = [&](Node* c) { + const TypeLong* t = phase->type(c)->isa_long(); + return t != nullptr && t->is_con() && + t->get_con() < max_jint && t->get_con() > min_jint && + (t->get_con() < 0) == (n->Opcode() == Op_MaxL); + }; + // Verify the graph level by level: + Node* add1 = n->in(1); + Node* clamp1 = n->in(2); + if (add1->Opcode() == Op_AddL && is_clamp(clamp1)) { + Node* max2 = add1->in(1); + Node* con1 = add1->in(2); + if (max2->Opcode() == n->Opcode() && is_sub_con(con1)) { + Node* add2 = max2->in(1); + Node* clamp2 = max2->in(2); + if (add2->Opcode() == Op_AddL && is_clamp(clamp2)) { + Node* x = add2->in(1); + Node* con2 = add2->in(2); + if (is_sub_con(con2)) { + Node* new_con = phase->transform(new AddLNode(con1, con2)); + Node* new_sub = phase->transform(new AddLNode(x, new_con)); + n->set_req_X(1, new_sub, phase); + return n; + } + } + } + } + return nullptr; +} + +const Type* MaxLNode::add_ring(const Type* t0, const Type* t1) const { + const TypeLong* r0 = t0->is_long(); + const TypeLong* r1 = t1->is_long(); + + return TypeLong::make(MAX2(r0->_lo, r1->_lo), MAX2(r0->_hi, r1->_hi), MAX2(r0->_widen, r1->_widen)); +} + +Node* MaxLNode::Identity(PhaseGVN* phase) { + const TypeLong* t1 = phase->type(in(1))->is_long(); + const TypeLong* t2 = phase->type(in(2))->is_long(); + + // Can we determine maximum statically? + if (t1->_lo >= t2->_hi) { + return in(1); + } else if (t2->_lo >= t1->_hi) { + return in(2); + } + + return MaxNode::Identity(phase); +} + +Node* MaxLNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* n = AddNode::Ideal(phase, can_reshape); + if (n != nullptr) { + return n; + } + if (can_reshape) { + return fold_subI_no_underflow_pattern(this, phase); + } + return nullptr; +} + +const Type* MinLNode::add_ring(const Type* t0, const Type* t1) const { + const TypeLong* r0 = t0->is_long(); + const TypeLong* r1 = t1->is_long(); + + return TypeLong::make(MIN2(r0->_lo, r1->_lo), MIN2(r0->_hi, r1->_hi), MIN2(r0->_widen, r1->_widen)); +} + +Node* MinLNode::Identity(PhaseGVN* phase) { + const TypeLong* t1 = phase->type(in(1))->is_long(); + const TypeLong* t2 = phase->type(in(2))->is_long(); + + // Can we determine minimum statically? + if (t1->_lo >= t2->_hi) { + return in(2); + } else if (t2->_lo >= t1->_hi) { + return in(1); + } + + return MaxNode::Identity(phase); +} + +Node* MinLNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* n = AddNode::Ideal(phase, can_reshape); + if (n != nullptr) { + return n; + } + if (can_reshape) { + return fold_subI_no_underflow_pattern(this, phase); + } + return nullptr; +} + //------------------------------add_ring--------------------------------------- const Type *MinFNode::add_ring( const Type *t0, const Type *t1 ) const { const TypeF *r0 = t0->is_float_constant(); diff --git a/src/hotspot/share/opto/addnode.hpp b/src/hotspot/share/opto/addnode.hpp index c6a6138adb7..fb47972e8b3 100644 --- a/src/hotspot/share/opto/addnode.hpp +++ b/src/hotspot/share/opto/addnode.hpp @@ -322,28 +322,38 @@ public: // MAXimum of 2 longs. class MaxLNode : public MaxNode { public: - MaxLNode(Node *in1, Node *in2) : MaxNode(in1, in2) {} + MaxLNode(Compile* C, Node* in1, Node* in2) : MaxNode(in1, in2) { + init_flags(Flag_is_macro); + C->add_macro_node(this); + } virtual int Opcode() const; - virtual const Type *add_ring(const Type*, const Type*) const { return TypeLong::LONG; } - virtual const Type *add_id() const { return TypeLong::make(min_jlong); } - virtual const Type *bottom_type() const { return TypeLong::LONG; } + virtual const Type* add_ring(const Type* t0, const Type* t1) const; + virtual const Type* add_id() const { return TypeLong::make(min_jlong); } + virtual const Type* bottom_type() const { return TypeLong::LONG; } virtual uint ideal_reg() const { return Op_RegL; } int max_opcode() const { return Op_MaxL; } int min_opcode() const { return Op_MinL; } + virtual Node* Identity(PhaseGVN* phase); + virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); }; //------------------------------MinLNode--------------------------------------- // MINimum of 2 longs. class MinLNode : public MaxNode { public: - MinLNode(Node *in1, Node *in2) : MaxNode(in1, in2) {} + MinLNode(Compile* C, Node* in1, Node* in2) : MaxNode(in1, in2) { + init_flags(Flag_is_macro); + C->add_macro_node(this); + } virtual int Opcode() const; - virtual const Type *add_ring(const Type*, const Type*) const { return TypeLong::LONG; } - virtual const Type *add_id() const { return TypeLong::make(max_jlong); } - virtual const Type *bottom_type() const { return TypeLong::LONG; } + virtual const Type* add_ring(const Type* t0, const Type* t1) const; + virtual const Type* add_id() const { return TypeLong::make(max_jlong); } + virtual const Type* bottom_type() const { return TypeLong::LONG; } virtual uint ideal_reg() const { return Op_RegL; } int max_opcode() const { return Op_MaxL; } int min_opcode() const { return Op_MinL; } + virtual Node* Identity(PhaseGVN* phase); + virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); }; //------------------------------MaxFNode--------------------------------------- diff --git a/src/hotspot/share/opto/block.cpp b/src/hotspot/share/opto/block.cpp index 2666e7a84cf..78774ff2fd1 100644 --- a/src/hotspot/share/opto/block.cpp +++ b/src/hotspot/share/opto/block.cpp @@ -1764,21 +1764,20 @@ void PhaseBlockLayout::merge_traces(bool fall_thru_only) { // Order the sequence of the traces in some desirable way void PhaseBlockLayout::reorder_traces(int count) { - ResourceArea *area = Thread::current()->resource_area(); - Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count); + Trace** new_traces = NEW_RESOURCE_ARRAY(Trace*, count); Block_List worklist; int new_count = 0; // Compact the traces. for (int i = 0; i < count; i++) { - Trace *tr = traces[i]; + Trace* tr = traces[i]; if (tr != nullptr) { new_traces[new_count++] = tr; } } // The entry block should be first on the new trace list. - Trace *tr = trace(_cfg.get_root_block()); + Trace* tr = trace(_cfg.get_root_block()); assert(tr == new_traces[0], "entry trace misplaced"); // Sort the new trace list by frequency @@ -1787,7 +1786,7 @@ void PhaseBlockLayout::reorder_traces(int count) { // Collect all blocks from existing Traces _cfg.clear_blocks(); for (int i = 0; i < new_count; i++) { - Trace *tr = new_traces[i]; + Trace* tr = new_traces[i]; if (tr != nullptr) { // push blocks onto the CFG list for (Block* b = tr->first_block(); b != nullptr; b = tr->next(b)) { @@ -1802,16 +1801,15 @@ PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) : Phase(BlockLayout) , _cfg(cfg) { ResourceMark rm; - ResourceArea *area = Thread::current()->resource_area(); // List of traces int size = _cfg.number_of_blocks() + 1; - traces = NEW_ARENA_ARRAY(area, Trace *, size); + traces = NEW_RESOURCE_ARRAY(Trace*, size); memset(traces, 0, size*sizeof(Trace*)); - next = NEW_ARENA_ARRAY(area, Block *, size); - memset(next, 0, size*sizeof(Block *)); - prev = NEW_ARENA_ARRAY(area, Block *, size); - memset(prev , 0, size*sizeof(Block *)); + next = NEW_RESOURCE_ARRAY(Block*, size); + memset(next, 0, size*sizeof(Block*)); + prev = NEW_RESOURCE_ARRAY(Block*, size); + memset(prev , 0, size*sizeof(Block*)); // List of edges edges = new GrowableArray; diff --git a/src/hotspot/share/opto/c2compiler.cpp b/src/hotspot/share/opto/c2compiler.cpp index 560937d9fa8..e26c992d558 100644 --- a/src/hotspot/share/opto/c2compiler.cpp +++ b/src/hotspot/share/opto/c2compiler.cpp @@ -775,9 +775,11 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method) { return EnableVectorSupport; case vmIntrinsics::_blackhole: #if INCLUDE_JVMTI - case vmIntrinsics::_notifyJvmtiMount: - case vmIntrinsics::_notifyJvmtiUnmount: - case vmIntrinsics::_notifyJvmtiHideFrames: + case vmIntrinsics::_notifyJvmtiVThreadStart: + case vmIntrinsics::_notifyJvmtiVThreadEnd: + case vmIntrinsics::_notifyJvmtiVThreadMount: + case vmIntrinsics::_notifyJvmtiVThreadUnmount: + case vmIntrinsics::_notifyJvmtiVThreadHideFrames: #endif break; diff --git a/src/hotspot/share/opto/castnode.hpp b/src/hotspot/share/opto/castnode.hpp index cdab33dd603..d4b2d616216 100644 --- a/src/hotspot/share/opto/castnode.hpp +++ b/src/hotspot/share/opto/castnode.hpp @@ -69,6 +69,28 @@ public: static Node* make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency); Node* optimize_integer_cast(PhaseGVN* phase, BasicType bt); + + // Visit all non-cast uses of the node, bypassing ConstraintCasts. + // Pattern: this (-> ConstraintCast)* -> non_cast + // In other words: find all non_cast nodes such that + // non_cast->uncast() == this. + template + static void visit_uncasted_uses(const Node* n, Callback callback) { + ResourceMark rm; + Unique_Node_List internals; + internals.push((Node*)n); // start traversal + for (uint j = 0; j < internals.size(); ++j) { + Node* internal = internals.at(j); // for every internal + for (DUIterator_Fast kmax, k = internal->fast_outs(kmax); k < kmax; k++) { + Node* internal_use = internal->fast_out(k); + if (internal_use->is_ConstraintCast()) { + internals.push(internal_use); // traverse this cast also + } else { + callback(internal_use); + } + } + } + } }; //------------------------------CastIINode------------------------------------- diff --git a/src/hotspot/share/opto/chaitin.cpp b/src/hotspot/share/opto/chaitin.cpp index 765ee800224..2f13a8bbd57 100644 --- a/src/hotspot/share/opto/chaitin.cpp +++ b/src/hotspot/share/opto/chaitin.cpp @@ -221,41 +221,60 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool sc _high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency()); // Build a list of basic blocks, sorted by frequency - _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); // Experiment with sorting strategies to speed compilation + uint nr_blocks = _cfg.number_of_blocks(); double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket Block **buckets[NUMBUCKS]; // Array of buckets uint buckcnt[NUMBUCKS]; // Array of bucket counters double buckval[NUMBUCKS]; // Array of bucket value cutoffs + + // The space which our buckets point into. + Block** start = NEW_RESOURCE_ARRAY(Block *, nr_blocks*NUMBUCKS); + for (uint i = 0; i < NUMBUCKS; i++) { - buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks()); + buckets[i] = &start[i*nr_blocks]; buckcnt[i] = 0; // Bump by three orders of magnitude each time cutoff *= 0.001; buckval[i] = cutoff; - for (uint j = 0; j < _cfg.number_of_blocks(); j++) { - buckets[i][j] = nullptr; - } } + // Sort blocks into buckets - for (uint i = 0; i < _cfg.number_of_blocks(); i++) { + for (uint i = 0; i < nr_blocks; i++) { for (uint j = 0; j < NUMBUCKS; j++) { - if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) { + double bval = buckval[j]; + Block* blk = _cfg.get_block(i); + if (j == NUMBUCKS - 1 || blk->_freq > bval) { + uint cnt = buckcnt[j]; // Assign block to end of list for appropriate bucket - buckets[j][buckcnt[j]++] = _cfg.get_block(i); + buckets[j][cnt] = blk; + buckcnt[j] = cnt+1; break; // kick out of inner loop } } } - // Dump buckets into final block array + + // Squash the partially filled buckets together into the first one. + static_assert(NUMBUCKS >= 2, "must"); // If this isn't true then it'll mess up the squashing. + Block** offset = &buckets[0][buckcnt[0]]; + for (int i = 1; i < NUMBUCKS; i++) { + ::memmove(offset, buckets[i], buckcnt[i]*sizeof(Block*)); + offset += buckcnt[i]; + } + assert((&buckets[0][0] + nr_blocks) == offset, "should be"); + + // Free the now unused memory + FREE_RESOURCE_ARRAY(Block*, buckets[1], (NUMBUCKS-1)*nr_blocks); + // Finally, point the _blks to our memory + _blks = buckets[0]; + +#ifdef ASSERT uint blkcnt = 0; for (uint i = 0; i < NUMBUCKS; i++) { - for (uint j = 0; j < buckcnt[i]; j++) { - _blks[blkcnt++] = buckets[i][j]; - } + blkcnt += buckcnt[i]; } - - assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled"); + assert(blkcnt == nr_blocks, "Block array not totally filled"); +#endif } // union 2 sets together. diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index ec3bd19eca9..c49c6eccc46 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -4019,9 +4019,6 @@ bool Compile::final_graph_reshaping() { // must be infinite loops. for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) if (!frc._visited.test(n->fast_out(j)->_idx)) { - DEBUG_ONLY( n->fast_out(j)->dump(); ); - DEBUG_ONLY( n->dump_bfs(1, 0, "-"); ); - assert(false, "infinite loop"); record_method_not_compilable("infinite loop"); return true; // Found unvisited kid; must be unreach } diff --git a/src/hotspot/share/opto/convertnode.cpp b/src/hotspot/share/opto/convertnode.cpp index b276a4d1611..ab2e839424a 100644 --- a/src/hotspot/share/opto/convertnode.cpp +++ b/src/hotspot/share/opto/convertnode.cpp @@ -322,6 +322,20 @@ const Type* ConvI2LNode::Value(PhaseGVN* phase) const { return this_type; } +Node* ConvI2LNode::Identity(PhaseGVN* phase) { + // If type is in "int" sub-range, we can + // convert I2L(L2I(x)) => x + // since the conversions have no effect. + if (in(1)->Opcode() == Op_ConvL2I) { + Node* x = in(1)->in(1); + const TypeLong* t = phase->type(x)->isa_long(); + if (t != nullptr && t->_lo >= min_jint && t->_hi <= max_jint) { + return x; + } + } + return this; +} + #ifdef ASSERT static inline bool long_ranges_overlap(jlong lo1, jlong hi1, jlong lo2, jlong hi2) { diff --git a/src/hotspot/share/opto/convertnode.hpp b/src/hotspot/share/opto/convertnode.hpp index e58213fbc09..dbebf337db2 100644 --- a/src/hotspot/share/opto/convertnode.hpp +++ b/src/hotspot/share/opto/convertnode.hpp @@ -190,6 +190,7 @@ class ConvI2LNode : public TypeNode { virtual int Opcode() const; virtual const Type* Value(PhaseGVN* phase) const; virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + virtual Node* Identity(PhaseGVN* phase); virtual uint ideal_reg() const { return Op_RegL; } }; diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp index 45eaecda441..1aac87bb2a0 100644 --- a/src/hotspot/share/opto/idealGraphPrinter.cpp +++ b/src/hotspot/share/opto/idealGraphPrinter.cpp @@ -462,8 +462,8 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { if (flags & Node::Flag_has_call) { print_prop("has_call", "true"); } - if (flags & Node::Flag_is_reduction) { - print_prop("is_reduction", "true"); + if (flags & Node::Flag_has_swapped_edges) { + print_prop("has_swapped_edges", "true"); } if (C->matcher() != nullptr) { diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index ce0957fc793..fd3b813eded 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -481,11 +481,15 @@ bool LibraryCallKit::try_to_inline(int predicate) { case vmIntrinsics::_setScopedValueCache: return inline_native_setScopedValueCache(); #if INCLUDE_JVMTI - case vmIntrinsics::_notifyJvmtiMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_mount()), - "notifyJvmtiMount"); - case vmIntrinsics::_notifyJvmtiUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_unmount()), - "notifyJvmtiUnmount"); - case vmIntrinsics::_notifyJvmtiHideFrames: return inline_native_notify_jvmti_hide(); + case vmIntrinsics::_notifyJvmtiVThreadStart: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_start()), + "notifyJvmtiStart", true, false); + case vmIntrinsics::_notifyJvmtiVThreadEnd: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_end()), + "notifyJvmtiEnd", false, true); + case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()), + "notifyJvmtiMount", false, false); + case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()), + "notifyJvmtiUnmount", false, false); + case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide(); #endif #ifdef JFR_HAVE_INTRINSICS @@ -2873,25 +2877,24 @@ bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* func // When notifications are disabled then just update the VTMS transition bit and return. // Otherwise, the bit is updated in the given function call implementing JVMTI notification protocol. -bool LibraryCallKit::inline_native_notify_jvmti_funcs(address funcAddr, const char* funcName) { +bool LibraryCallKit::inline_native_notify_jvmti_funcs(address funcAddr, const char* funcName, bool is_start, bool is_end) { if (!DoJVMTIVirtualThreadTransitions) { return true; } IdealKit ideal(this); Node* ONE = ideal.ConI(1); - Node* hide = _gvn.transform(argument(1)); // hide argument: true for begin and false for end of VTMS transition + Node* hide = is_start ? ideal.ConI(0) : (is_end ? ideal.ConI(1) : _gvn.transform(argument(1))); Node* addr = makecon(TypeRawPtr::make((address)&JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events)); Node* notify_jvmti_enabled = ideal.load(ideal.ctrl(), addr, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw); ideal.if_then(notify_jvmti_enabled, BoolTest::eq, ONE); { // if notifyJvmti enabled then make a call to the given SharedRuntime function - const TypeFunc* tf = OptoRuntime::notify_jvmti_Type(); + const TypeFunc* tf = OptoRuntime::notify_jvmti_vthread_Type(); Node* vt_oop = _gvn.transform(must_be_not_null(argument(0), true)); // VirtualThread this argument - Node* cond = _gvn.transform(argument(2)); // firstMount or lastUnmount argument sync_kit(ideal); - make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, hide, cond); + make_runtime_call(RC_NO_LEAF, tf, funcAddr, funcName, TypePtr::BOTTOM, vt_oop, hide); ideal.sync_kit(this); } ideal.else_(); { // set hide value to the VTMS transition bit in current JavaThread and VirtualThread object @@ -3508,8 +3511,7 @@ bool LibraryCallKit::inline_native_setScopedValueCache() { Node* cache_obj_handle = scopedValueCache_helper(); const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr(); - store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type, - MemNode::unordered); + access_store_at(nullptr, cache_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED); return true; } diff --git a/src/hotspot/share/opto/library_call.hpp b/src/hotspot/share/opto/library_call.hpp index ac9e8ab6a47..46dd51bf654 100644 --- a/src/hotspot/share/opto/library_call.hpp +++ b/src/hotspot/share/opto/library_call.hpp @@ -246,7 +246,7 @@ class LibraryCallKit : public GraphKit { bool inline_native_time_funcs(address method, const char* funcName); #if INCLUDE_JVMTI - bool inline_native_notify_jvmti_funcs(address funcAddr, const char* funcName); + bool inline_native_notify_jvmti_funcs(address funcAddr, const char* funcName, bool is_start, bool is_end); bool inline_native_notify_jvmti_hide(); #endif diff --git a/src/hotspot/share/opto/live.cpp b/src/hotspot/share/opto/live.cpp index ea1722702ce..6932fd6a697 100644 --- a/src/hotspot/share/opto/live.cpp +++ b/src/hotspot/share/opto/live.cpp @@ -54,7 +54,6 @@ PhaseLive::PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena, b void PhaseLive::compute(uint maxlrg) { _maxlrg = maxlrg; - _worklist = new (_arena) Block_List(); // Init the sparse live arrays. This data is live on exit from here! // The _live info is the live-out info. @@ -88,6 +87,8 @@ void PhaseLive::compute(uint maxlrg) { _free_IndexSet = nullptr; + Block_List worklist; + // Blocks having done pass-1 VectorSet first_pass; @@ -135,13 +136,13 @@ void PhaseLive::compute(uint maxlrg) { // Push these live-in things to predecessors for (uint l = 1; l < block->num_preds(); l++) { Block* p = _cfg.get_block_for_node(block->pred(l)); - add_liveout(p, use, first_pass); + add_liveout(worklist, p, use, first_pass); // PhiNode uses go in the live-out set of prior blocks. for (uint k = i; k > 0; k--) { Node *phi = block->get_node(k - 1); if (l < phi->req()) { - add_liveout(p, _names.at(phi->in(l)->_idx), first_pass); + add_liveout(worklist, p, _names.at(phi->in(l)->_idx), first_pass); } } } @@ -149,15 +150,15 @@ void PhaseLive::compute(uint maxlrg) { first_pass.set(block->_pre_order); // Inner loop: blocks that picked up new live-out values to be propagated - while (_worklist->size()) { - Block* block = _worklist->pop(); + while (worklist.size() != 0) { + Block* block = worklist.pop(); IndexSet *delta = getset(block); assert(delta->count(), "missing delta set"); // Add new-live-in to predecessors live-out sets for (uint l = 1; l < block->num_preds(); l++) { Block* predecessor = _cfg.get_block_for_node(block->pred(l)); - add_liveout(predecessor, delta, first_pass); + add_liveout(worklist, predecessor, delta, first_pass); } freeset(block); @@ -228,7 +229,7 @@ void PhaseLive::freeset(Block *p) { // Add a live-out value to a given blocks live-out set. If it is new, then // also add it to the delta set and stick the block on the worklist. -void PhaseLive::add_liveout(Block *p, uint r, VectorSet &first_pass) { +void PhaseLive::add_liveout(Block_List& worklist, Block* p, uint r, VectorSet& first_pass) { IndexSet *live = &_live[p->_pre_order-1]; if (live->insert(r)) { // If actually inserted... // We extended the live-out set. See if the value is generated locally. @@ -236,7 +237,7 @@ void PhaseLive::add_liveout(Block *p, uint r, VectorSet &first_pass) { if (!_defs[p->_pre_order-1].member(r)) { if (!_deltas[p->_pre_order-1] && // Not on worklist? first_pass.test(p->_pre_order)) { - _worklist->push(p); // Actually go on worklist if already 1st pass + worklist.push(p); // Actually go on worklist if already 1st pass } getset(p)->insert(r); } @@ -244,7 +245,7 @@ void PhaseLive::add_liveout(Block *p, uint r, VectorSet &first_pass) { } // Add a vector of live-out values to a given blocks live-out set. -void PhaseLive::add_liveout(Block *p, IndexSet *lo, VectorSet &first_pass) { +void PhaseLive::add_liveout(Block_List& worklist, Block* p, IndexSet* lo, VectorSet& first_pass) { IndexSet *live = &_live[p->_pre_order-1]; IndexSet *defs = &_defs[p->_pre_order-1]; IndexSet *on_worklist = _deltas[p->_pre_order-1]; @@ -265,7 +266,7 @@ void PhaseLive::add_liveout(Block *p, IndexSet *lo, VectorSet &first_pass) { _deltas[p->_pre_order-1] = delta; // Flag as on worklist now if (!on_worklist && // Not on worklist? first_pass.test(p->_pre_order)) { - _worklist->push(p); // Actually go on worklist if already 1st pass + worklist.push(p); // Actually go on worklist if already 1st pass } } else { // Nothing there; just free it delta->set_next(_free_IndexSet); diff --git a/src/hotspot/share/opto/live.hpp b/src/hotspot/share/opto/live.hpp index d5ff1570fd1..1ff93bc5767 100644 --- a/src/hotspot/share/opto/live.hpp +++ b/src/hotspot/share/opto/live.hpp @@ -57,20 +57,18 @@ class PhaseLive : public Phase { IndexSet **_deltas; IndexSet *_free_IndexSet; // Free list of same - Block_List *_worklist; // Worklist for iterative solution - const PhaseCFG &_cfg; // Basic blocks const LRG_List &_names; // Mapping from Nodes to live ranges uint _maxlrg; // Largest live-range number Arena *_arena; bool _keep_deltas; // Retain live in information - IndexSet *getset( Block *p ); + IndexSet *getset(Block* p); IndexSet *getfreeset( ); void freeset( Block *p ); - void add_liveout( Block *p, uint r, VectorSet &first_pass ); - void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ); - void add_livein( Block *p, IndexSet *lo ); + void add_liveout(Block_List& worklist, Block* p, uint r, VectorSet& first_pass); + void add_liveout(Block_List& worklist, Block* p, IndexSet* lo, VectorSet& first_pass); + void add_livein(Block* p, IndexSet* lo); public: PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena, bool keep_deltas); diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp index cb704b076a4..2899e7c18a8 100644 --- a/src/hotspot/share/opto/loopTransform.cpp +++ b/src/hotspot/share/opto/loopTransform.cpp @@ -1037,10 +1037,6 @@ bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) { } if (UseSuperWord) { - if (!cl->is_reduction_loop()) { - phase->mark_reductions(this); - } - // Only attempt slp analysis when user controls do not prohibit it if (!range_checks_present() && (LoopMaxUnroll > _local_loop_unroll_factor)) { // Once policy_slp_analysis succeeds, mark the loop with the @@ -1694,15 +1690,6 @@ void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_n set_idom(new_pre_exit, pre_end, dd_main_head); set_loop(new_pre_exit, outer_loop->_parent); - if (peel_only) { - // Nodes in the peeled iteration that were marked as reductions within the - // original loop might not be reductions within their new outer loop. - for (uint i = 0; i < loop->_body.size(); i++) { - Node* n = old_new[loop->_body[i]->_idx]; - n->remove_flag(Node::Flag_is_reduction); - } - } - // Step B2: Build a zero-trip guard for the main-loop. After leaving the // pre-loop, the main-loop may not execute at all. Later in life this // zero-trip guard will become the minimum-trip guard when we unroll @@ -2288,74 +2275,32 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj new_limit = _igvn.intcon(limit->get_int() - stride_con); set_ctrl(new_limit, C->root()); } else { - // Limit is not constant. - assert(loop_head->unrolled_count() != 1 || has_ctrl(opaq), "should have opaque for first unroll"); - if ((stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo)) || - (stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi))) { - // No underflow. - new_limit = new SubINode(limit, stride); + // Limit is not constant. Int subtraction could lead to underflow. + // (1) Convert to long. + Node* limit_l = new ConvI2LNode(limit); + register_new_node(limit_l, get_ctrl(limit)); + Node* stride_l = _igvn.longcon(stride_con); + set_ctrl(stride_l, C->root()); + + // (2) Subtract: compute in long, to prevent underflow. + Node* new_limit_l = new SubLNode(limit_l, stride_l); + register_new_node(new_limit_l, ctrl); + + // (3) Clamp to int range, in case we had subtraction underflow. + Node* underflow_clamp_l = _igvn.longcon((stride_con > 0) ? min_jint : max_jint); + set_ctrl(underflow_clamp_l, C->root()); + Node* new_limit_no_underflow_l = nullptr; + if (stride_con > 0) { + // limit = MaxL(limit - stride, min_jint) + new_limit_no_underflow_l = new MaxLNode(C, new_limit_l, underflow_clamp_l); } else { - // (limit - stride) may underflow. - // Clamp the adjustment value with MININT or MAXINT: - // - // new_limit = limit-stride - // if (stride > 0) - // new_limit = (limit < new_limit) ? MININT : new_limit; - // else - // new_limit = (limit > new_limit) ? MAXINT : new_limit; - // - BoolTest::mask bt = loop_end->test_trip(); - assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); - Node* underflow_clamp = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); - set_ctrl(underflow_clamp, C->root()); - Node* limit_before_underflow = nullptr; - Node* prev_limit = nullptr; - Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : nullptr; - if (loop_head->unrolled_count() > 1 && - limit->is_CMove() && limit->Opcode() == Op_CMoveI && - limit->in(CMoveNode::IfTrue) == underflow_clamp && - bol->as_Bool()->_test._test == bt && - bol->in(1)->Opcode() == Op_CmpI && - bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { - // Loop was unrolled before, and had an unrolling protection CMoveI. - // Use inputs to previous CMoveI for the new one: - prev_limit = limit->in(CMoveNode::IfFalse); // unpack previous limit with underflow - limit_before_underflow = bol->in(1)->in(1); // CMoveI -> Bool -> CmpI -> limit_before_underflow - } else { - // Loop was not unrolled before, or the limit did not underflow in a previous unrolling. - prev_limit = limit; - limit_before_underflow = limit; - } - // prev_limit stride - // | | - // limit_before_underflow new_limit_with_underflow (SubI) - // | | | - // underflow_cmp | - // | | - // underflow_bool [lt/gt] | - // | | - // +----+ +------------+ - // | | - // | | underflow_clamp (min_jint/max_jint) - // | | | - // CMoveINode ([min_jint..hi] / [lo..max_jing]) - // - assert(limit_before_underflow != nullptr && prev_limit != nullptr, "must find them"); - Node* new_limit_with_underflow = new SubINode(prev_limit, stride); - register_new_node(new_limit_with_underflow, ctrl); - // We must compare with limit_before_underflow, prev_limit may already have underflowed. - Node* underflow_cmp = new CmpINode(limit_before_underflow, new_limit_with_underflow); - register_new_node(underflow_cmp, ctrl); - Node* underflow_bool = new BoolNode(underflow_cmp, bt); - register_new_node(underflow_bool, ctrl); - // Prevent type from becoming too pessimistic due to type underflow. The new limit - // may be arbitrarily decreased by unrolling, but still in [min_jint..hi] / [lo..max_jint] - const TypeInt* limit_before_underflow_t = _igvn.type(limit_before_underflow)->is_int(); - const TypeInt* no_underflow_t = TypeInt::make(stride_con > 0 ? min_jint : limit_before_underflow_t->_lo, - stride_con > 0 ? limit_before_underflow_t->_hi : max_jint, - Type::WidenMax); - new_limit = new CMoveINode(underflow_bool, new_limit_with_underflow, underflow_clamp, no_underflow_t); + // limit = MinL(limit - stride, max_jint) + new_limit_no_underflow_l = new MinLNode(C, new_limit_l, underflow_clamp_l); } + register_new_node(new_limit_no_underflow_l, ctrl); + + // (4) Convert back to int. + new_limit = new ConvL2INode(new_limit_no_underflow_l); register_new_node(new_limit, ctrl); } @@ -2498,72 +2443,12 @@ void PhaseIdealLoop::do_maximally_unroll(IdealLoopTree *loop, Node_List &old_new } } -void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) { - if (SuperWordReductions == false) return; - - CountedLoopNode* loop_head = loop->_head->as_CountedLoop(); - if (loop_head->unrolled_count() > 1) { - return; - } - - Node* trip_phi = loop_head->phi(); - for (DUIterator_Fast imax, i = loop_head->fast_outs(imax); i < imax; i++) { - Node* phi = loop_head->fast_out(i); - if (phi->is_Phi() && phi->outcnt() > 0 && phi != trip_phi) { - // For definitions which are loop inclusive and not tripcounts. - Node* def_node = phi->in(LoopNode::LoopBackControl); - - if (def_node != nullptr) { - Node* n_ctrl = get_ctrl(def_node); - if (n_ctrl != nullptr && loop->is_member(get_loop(n_ctrl))) { - // Now test it to see if it fits the standard pattern for a reduction operator. - int opc = def_node->Opcode(); - if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type()) - || opc == Op_MinD || opc == Op_MinF || opc == Op_MaxD || opc == Op_MaxF) { - if (!def_node->is_reduction()) { // Not marked yet - // To be a reduction, the arithmetic node must have the phi as input and provide a def to it - bool ok = false; - for (unsigned j = 1; j < def_node->req(); j++) { - Node* in = def_node->in(j); - if (in == phi) { - ok = true; - break; - } - } - - // do nothing if we did not match the initial criteria - if (ok == false) { - continue; - } - - // The result of the reduction must not be used in the loop - for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) { - Node* u = def_node->fast_out(i); - if (!loop->is_member(get_loop(ctrl_or_self(u)))) { - continue; - } - if (u == phi) { - continue; - } - ok = false; - } - - // iff the uses conform - if (ok) { - def_node->add_flag(Node::Flag_is_reduction); - loop_head->mark_has_reductions(); - } - } - } - } - } - } - } -} - //------------------------------adjust_limit----------------------------------- // Helper function that computes new loop limit as (rc_limit-offset)/scale Node* PhaseIdealLoop::adjust_limit(bool is_positive_stride, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round) { + Node* old_limit_long = new ConvI2LNode(old_limit); + register_new_node(old_limit_long, pre_ctrl); + Node* sub = new SubLNode(rc_limit, offset); register_new_node(sub, pre_ctrl); Node* limit = new DivLNode(nullptr, sub, scale); @@ -2589,27 +2474,19 @@ Node* PhaseIdealLoop::adjust_limit(bool is_positive_stride, Node* scale, Node* o // - integer underflow of limit: MAXL chooses old_limit (>= MIN_INT > limit) // INT() is finally converting the limit back to an integer value. - // We use CMove nodes to implement long versions of min/max (MINL/MAXL). - // We use helper methods for inner MINL/MAXL which return CMoveL nodes to keep a long value for the outer MINL/MAXL comparison: - Node* inner_result_long; + Node* inner_result_long = nullptr; + Node* outer_result_long = nullptr; if (is_positive_stride) { - inner_result_long = MaxNode::signed_max(limit, _igvn.longcon(min_jint), TypeLong::LONG, _igvn); + inner_result_long = new MaxLNode(C, limit, _igvn.longcon(min_jint)); + outer_result_long = new MinLNode(C, inner_result_long, old_limit_long); } else { - inner_result_long = MaxNode::signed_min(limit, _igvn.longcon(max_jint), TypeLong::LONG, _igvn); + inner_result_long = new MinLNode(C, limit, _igvn.longcon(max_jint)); + outer_result_long = new MaxLNode(C, inner_result_long, old_limit_long); } - set_subtree_ctrl(inner_result_long, false); + register_new_node(inner_result_long, pre_ctrl); + register_new_node(outer_result_long, pre_ctrl); - // Outer MINL/MAXL: - // The comparison is done with long values but the result is the converted back to int by using CmovI. - Node* old_limit_long = new ConvI2LNode(old_limit); - register_new_node(old_limit_long, pre_ctrl); - Node* cmp = new CmpLNode(old_limit_long, limit); - register_new_node(cmp, pre_ctrl); - Node* bol = new BoolNode(cmp, is_positive_stride ? BoolTest::gt : BoolTest::lt); - register_new_node(bol, pre_ctrl); - Node* inner_result_int = new ConvL2INode(inner_result_long); // Could under-/overflow but that's fine as comparison was done with CmpL - register_new_node(inner_result_int, pre_ctrl); - limit = new CMoveINode(bol, old_limit, inner_result_int, TypeInt::INT); + limit = new ConvL2INode(outer_result_long); register_new_node(limit, pre_ctrl); return limit; } diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp index f62f9dac876..73cb22c6db8 100644 --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -2249,7 +2249,6 @@ void CountedLoopNode::dump_spec(outputStream *st) const { if (is_pre_loop ()) st->print("pre of N%d" , _main_idx); if (is_main_loop()) st->print("main of N%d", _idx); if (is_post_loop()) st->print("post of N%d", _main_idx); - if (is_reduction_loop()) st->print(" reduction"); if (is_strip_mined()) st->print(" strip mined"); } #endif @@ -3991,7 +3990,6 @@ void IdealLoopTree::dump_head() { if (cl->is_pre_loop ()) tty->print(" pre" ); if (cl->is_main_loop()) tty->print(" main"); if (cl->is_post_loop()) tty->print(" post"); - if (cl->is_reduction_loop()) tty->print(" reduction"); if (cl->is_vectorized_loop()) tty->print(" vector"); if (range_checks_present()) tty->print(" rc "); if (cl->is_multiversioned()) tty->print(" multi "); @@ -4468,7 +4466,7 @@ void PhaseIdealLoop::build_and_optimize() { AutoNodeBudget node_budget(this); if (lpt->_head->as_CountedLoop()->is_normal_loop() && lpt->policy_maximally_unroll(this)) { - memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) ); + memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) ); do_maximally_unroll(lpt, worklist); } } @@ -4543,7 +4541,7 @@ void PhaseIdealLoop::build_and_optimize() { // If split-if's didn't hack the graph too bad (no CFG changes) // then do loop opts. if (C->has_loops() && !C->major_progress()) { - memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) ); + memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) ); _ltree_root->_child->iteration_split( this, worklist ); // No verify after peeling! GCM has hoisted code out of the loop. // After peeling, the hoisted code could sink inside the peeled area. @@ -6356,7 +6354,7 @@ void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) co } } // Dump nodes it controls - for (uint k = 0; k < _nodes.Size(); k++) { + for (uint k = 0; k < _nodes.max(); k++) { // (k < C->unique() && get_ctrl(find(k)) == n) if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) { Node* m = C->root()->find(k); diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp index c781cc4651f..459021120ed 100644 --- a/src/hotspot/share/opto/loopnode.hpp +++ b/src/hotspot/share/opto/loopnode.hpp @@ -61,23 +61,22 @@ protected: uint _loop_flags; // Names for flag bitfields enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3, - MainHasNoPreLoop = 1<<2, - HasExactTripCount = 1<<3, - InnerLoop = 1<<4, - PartialPeelLoop = 1<<5, - PartialPeelFailed = 1<<6, - HasReductions = 1<<7, - WasSlpAnalyzed = 1<<8, - PassedSlpAnalysis = 1<<9, - DoUnrollOnly = 1<<10, - VectorizedLoop = 1<<11, - HasAtomicPostLoop = 1<<12, - IsMultiversioned = 1<<13, - StripMined = 1<<14, - SubwordLoop = 1<<15, - ProfileTripFailed = 1<<16, - LoopNestInnerLoop = 1 << 17, - LoopNestLongOuterLoop = 1 << 18}; + MainHasNoPreLoop = 1<<2, + HasExactTripCount = 1<<3, + InnerLoop = 1<<4, + PartialPeelLoop = 1<<5, + PartialPeelFailed = 1<<6, + WasSlpAnalyzed = 1<<7, + PassedSlpAnalysis = 1<<8, + DoUnrollOnly = 1<<9, + VectorizedLoop = 1<<10, + HasAtomicPostLoop = 1<<11, + IsMultiversioned = 1<<12, + StripMined = 1<<13, + SubwordLoop = 1<<14, + ProfileTripFailed = 1<<15, + LoopNestInnerLoop = 1<<16, + LoopNestLongOuterLoop = 1<<17}; char _unswitch_count; enum { _unswitch_max=3 }; char _postloop_flags; @@ -105,7 +104,6 @@ public: bool is_loop_nest_outer_loop() const { return _loop_flags & LoopNestLongOuterLoop; } void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } - void mark_has_reductions() { _loop_flags |= HasReductions; } void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; } void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; } void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; } @@ -286,7 +284,6 @@ public: bool is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; } bool is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; } bool is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; } - bool is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; } bool was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; } bool has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; } bool is_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; } @@ -1313,9 +1310,6 @@ public: // Unroll the loop body one step - make each trip do 2 iterations. void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ); - // Mark vector reduction candidates before loop unrolling - void mark_reductions( IdealLoopTree *loop ); - // Return true if exp is a constant times an induction var bool is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_scale, bool* p_short_scale, int depth = 0); diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp index 1e485cc73dc..53fce009040 100644 --- a/src/hotspot/share/opto/loopopts.cpp +++ b/src/hotspot/share/opto/loopopts.cpp @@ -2622,10 +2622,6 @@ void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, Node* old = body.at(i); Node* nnn = old->clone(); old_new.map(old->_idx, nnn); - if (old->is_reduction()) { - // Reduction flag is not copied by default. Copy it here when cloning the entire loop body. - nnn->add_flag(Node::Flag_is_reduction); - } if (C->do_vector_loop() && cm != nullptr) { cm->verify_insert_and_clone(old, nnn, cm->clone_idx()); } diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index f2cfe06c75c..e9132f83274 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -2373,6 +2373,8 @@ void PhaseMacroExpand::eliminate_macro_nodes() { assert(n->Opcode() == Op_LoopLimit || n->Opcode() == Op_Opaque3 || n->Opcode() == Op_Opaque4 || + n->Opcode() == Op_MaxL || + n->Opcode() == Op_MinL || BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n), "unknown node type in macro list"); } @@ -2457,6 +2459,18 @@ bool PhaseMacroExpand::expand_macro_nodes() { n->as_OuterStripMinedLoop()->adjust_strip_mined_loop(&_igvn); C->remove_macro_node(n); success = true; + } else if (n->Opcode() == Op_MaxL) { + // Since MaxL and MinL are not implemented in the backend, we expand them to + // a CMoveL construct now. At least until here, the type could be computed + // precisely. CMoveL is not so smart, but we can give it at least the best + // type we know abouot n now. + Node* repl = MaxNode::signed_max(n->in(1), n->in(2), _igvn.type(n), _igvn); + _igvn.replace_node(n, repl); + success = true; + } else if (n->Opcode() == Op_MinL) { + Node* repl = MaxNode::signed_min(n->in(1), n->in(2), _igvn.type(n), _igvn); + _igvn.replace_node(n, repl); + success = true; } assert(!success || (C->macro_count() == (old_macro_count - 1)), "elimination must have deleted one node from macro list"); progress = progress || success; diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp index c9ea54bde1f..15cea8a2555 100644 --- a/src/hotspot/share/opto/matcher.cpp +++ b/src/hotspot/share/opto/matcher.cpp @@ -1735,7 +1735,7 @@ MachNode* Matcher::find_shared_node(Node* leaf, uint rule) { if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return nullptr; // See if this Con has already been reduced using this rule. - if (_shared_nodes.Size() <= leaf->_idx) return nullptr; + if (_shared_nodes.max() <= leaf->_idx) return nullptr; MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx); if (last != nullptr && rule == last->rule()) { // Don't expect control change for DecodeN diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp index 20892cfef73..f781fa28785 100644 --- a/src/hotspot/share/opto/node.cpp +++ b/src/hotspot/share/opto/node.cpp @@ -521,10 +521,6 @@ Node *Node::clone() const { // If it is applicable, it will happen anyway when the cloned node is registered with IGVN. n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn); } - if (n->is_reduction()) { - // Do not copy reduction information. This must be explicitly set by the calling code. - n->remove_flag(Node::Flag_is_reduction); - } BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); bs->register_potential_barrier_node(n); @@ -1310,7 +1306,7 @@ bool Node::dominates(Node* sub, Node_List &nlist) { } else if (sub == up && sub->is_Region() && sub->req() == 2) { // Take in(1) path on the way up to 'dom' for regions with only one input up = sub->in(1); - } else if (sub == up && sub->is_Region() && sub->req() == 3) { + } else if (sub == up && sub->is_Region()) { // Try both paths for Regions with 2 input paths (it may be a loop head). // It could give conservative 'false' answer without information // which region's input is the entry path. diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp index 107654752bf..a81359e8459 100644 --- a/src/hotspot/share/opto/node.hpp +++ b/src/hotspot/share/opto/node.hpp @@ -578,6 +578,12 @@ public: _in[i2] = n1; // If this node is in the hash table, make sure it doesn't need a rehash. assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); + // Flip swapped edges flag. + if (has_swapped_edges()) { + remove_flag(Node::Flag_has_swapped_edges); + } else { + add_flag(Node::Flag_has_swapped_edges); + } } // Iterators over input Nodes for a Node X are written as: @@ -784,7 +790,7 @@ public: Flag_avoid_back_to_back_before = 1 << 8, Flag_avoid_back_to_back_after = 1 << 9, Flag_has_call = 1 << 10, - Flag_is_reduction = 1 << 11, + Flag_has_swapped_edges = 1 << 11, Flag_is_scheduled = 1 << 12, Flag_is_expensive = 1 << 13, Flag_is_predicated_vector = 1 << 14, @@ -1001,10 +1007,8 @@ public: bool is_macro() const { return (_flags & Flag_is_macro) != 0; } // The node is expensive: the best control is set during loop opts bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; } - - // An arithmetic node which accumulates a data in a loop. - // It must have the loop's phi as input and provide a def to the phi. - bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; } + // The node's original edge position is swapped. + bool has_swapped_edges() const { return (_flags & Flag_has_swapped_edges) != 0; } bool is_predicated_vector() const { return (_flags & Flag_is_predicated_vector) != 0; } @@ -1548,7 +1552,7 @@ public: Copy::zero_to_bytes(_nodes, _max * sizeof(Node*)); } - uint Size() const { return _max; } + uint max() const { return _max; } void dump() const; }; diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp index 5539eb5724c..524e38fcf72 100644 --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -3072,7 +3072,7 @@ void Scheduling::garbage_collect_pinch_nodes() { if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:"); #endif int trace_cnt = 0; - for (uint k = 0; k < _reg_node.Size(); k++) { + for (uint k = 0; k < _reg_node.max(); k++) { Node* pinch = _reg_node[k]; if ((pinch != nullptr) && pinch->Opcode() == Op_Node && // no predecence input edges diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index a1cf2c3508e..28b045e9bbb 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -435,7 +435,7 @@ void Parse::do_tableswitch() { // generate decision tree, using trichotomy when possible int rnum = len+2; - bool makes_backward_branch = false; + bool makes_backward_branch = (default_dest <= bci()); SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); int rp = -1; if (lo_index != min_jint) { @@ -526,7 +526,7 @@ void Parse::do_lookupswitch() { } int rnum = len*2+1; - bool makes_backward_branch = false; + bool makes_backward_branch = (default_dest <= bci()); SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); int rp = -1; for (int j = 0; j < len; j++) { @@ -1192,6 +1192,25 @@ static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, i } return false; } + +// Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. +// We also check that individual counters are positive first, otherwise the sum can become positive. +// (check for saturation, integer overflow, and immature counts) +static bool counters_are_meaningful(int counter1, int counter2, int min) { + // check for saturation, including "uint" values too big to fit in "int" + if (counter1 < 0 || counter2 < 0) { + return false; + } + // check for integer overflow of the sum + int64_t sum = (int64_t)counter1 + (int64_t)counter2; + STATIC_ASSERT(sizeof(counter1) < sizeof(sum)); + if (sum > INT_MAX) { + return false; + } + // check if mature + return (counter1 + counter2) >= min; +} + //--------------------------dynamic_branch_prediction-------------------------- // Try to gather dynamic branch prediction behavior. Return a probability // of the branch being taken and set the "cnt" field. Returns a -1.0 @@ -1218,6 +1237,8 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t if (!data->is_JumpData()) return PROB_UNKNOWN; // get taken and not taken values + // NOTE: saturated UINT_MAX values become negative, + // as do counts above INT_MAX. taken = data->as_JumpData()->taken(); not_taken = 0; if (data->is_BranchData()) { @@ -1225,13 +1246,16 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t } // scale the counts to be commensurate with invocation counts: + // NOTE: overflow for positive values is clamped at INT_MAX taken = method()->scale_count(taken); not_taken = method()->scale_count(not_taken); } + // At this point, saturation or overflow is indicated by INT_MAX + // or a negative value. // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. // We also check that individual counters are positive first, otherwise the sum can become positive. - if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { + if (!counters_are_meaningful(taken, not_taken, 40)) { if (C->log() != nullptr) { C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); } @@ -1260,7 +1284,7 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t } assert((cnt > 0.0f) && (prob > 0.0f), - "Bad frequency assignment in if"); + "Bad frequency assignment in if cnt=%g prob=%g taken=%d not_taken=%d", cnt, prob, taken, not_taken); if (C->log() != nullptr) { const char* prob_str = nullptr; diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp index b00d5e2d29b..79b76624d58 100644 --- a/src/hotspot/share/opto/phaseX.cpp +++ b/src/hotspot/share/opto/phaseX.cpp @@ -664,7 +664,7 @@ void PhaseTransform::dump_old2new_map() const { } void PhaseTransform::dump_new( uint nidx ) const { - for( uint i=0; i<_nodes.Size(); i++ ) + for( uint i=0; i<_nodes.max(); i++ ) if( _nodes[i] && _nodes[i]->_idx == nidx ) { _nodes[i]->dump(); tty->cr(); @@ -1760,31 +1760,16 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) { } // If changed Cast input, notify down for Phi and Sub - both do "uncast" + // Patterns: + // ConstraintCast+ -> Sub + // ConstraintCast+ -> Phi if (use->is_ConstraintCast()) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - if (u->is_Phi() || u->is_Sub()) { - // Phi (.., CastII, ..) or Sub(Cast(x), x) - _worklist.push(u); - } else if (u->is_ConstraintCast()) { - // Follow cast-chains down to Sub: Sub( CastII(CastII(x)), x) - // This case is quite rare. Let's BFS-traverse casts, to find Subs: - ResourceMark rm; - Unique_Node_List casts; - casts.push(u); // start traversal - for (uint j = 0; j < casts.size(); ++j) { - Node* cast = casts.at(j); // for every cast - for (DUIterator_Fast kmax, k = cast->fast_outs(kmax); k < kmax; k++) { - Node* cast_use = cast->fast_out(k); - if (cast_use->is_ConstraintCast()) { - casts.push(cast_use); // traverse this cast also - } else if (cast_use->is_Sub()) { - _worklist.push(cast_use); // found Sub - } - } - } + auto push_phi_or_sub_uses_to_worklist = [&](Node* n){ + if (n->is_Phi() || n->is_Sub()) { + _worklist.push(n); } - } + }; + ConstraintCastNode::visit_uncasted_uses(use, push_phi_or_sub_uses_to_worklist); } // If changed LShift inputs, check RShift users for useless sign-ext if( use_op == Op_LShiftI ) { @@ -1965,10 +1950,15 @@ void PhaseCCP::analyze() { _types.map(i, Type::TOP); } + // CCP worklist is placed on a local arena, so that we can allow ResourceMarks on "Compile::current()->resource_arena()". + // We also do not want to put the worklist on "Compile::current()->comp_arena()", as that one only gets de-allocated after + // Compile is over. The local arena gets de-allocated at the end of its scope. + ResourceArea local_arena(mtCompiler); + Unique_Node_List worklist(&local_arena); + DEBUG_ONLY(Unique_Node_List worklist_verify(&local_arena);) + // Push root onto worklist - Unique_Node_List worklist; worklist.push(C->root()); - DEBUG_ONLY(Unique_Node_List worklist_verify;) assert(_root_and_safepoints.size() == 0, "must be empty (unused)"); _root_and_safepoints.push(C->root()); @@ -2149,17 +2139,18 @@ void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* // AndI/L::Value() optimizes patterns similar to (v << 2) & 3 to zero if they are bitwise disjoint. // Add the AndI/L nodes back to the worklist to re-apply Value() in case the shift value changed. +// Pattern: parent -> LShift (use) -> ConstraintCast* -> And void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const { uint use_op = use->Opcode(); if ((use_op == Op_LShiftI || use_op == Op_LShiftL) && use->in(2) == parent) { // is shift value (right-hand side of LShift) - for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { - Node* and_node = use->fast_out(i); - uint and_node_op = and_node->Opcode(); - if (and_node_op == Op_AndI || and_node_op == Op_AndL) { - push_if_not_bottom_type(worklist, and_node); + auto push_and_uses_to_worklist = [&](Node* n){ + uint opc = n->Opcode(); + if (opc == Op_AndI || opc == Op_AndL) { + push_if_not_bottom_type(worklist, n); } - } + }; + ConstraintCastNode::visit_uncasted_uses(use, push_and_uses_to_worklist); } } diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 9a57d1743b6..e473fa68f0f 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -111,8 +111,10 @@ address OptoRuntime::_slow_arraycopy_Java = nullptr; address OptoRuntime::_register_finalizer_Java = nullptr; #if INCLUDE_JVMTI address OptoRuntime::_notify_jvmti_object_alloc = nullptr; -address OptoRuntime::_notify_jvmti_mount = nullptr; -address OptoRuntime::_notify_jvmti_unmount = nullptr; +address OptoRuntime::_notify_jvmti_vthread_start = nullptr; +address OptoRuntime::_notify_jvmti_vthread_end = nullptr; +address OptoRuntime::_notify_jvmti_vthread_mount = nullptr; +address OptoRuntime::_notify_jvmti_vthread_unmount = nullptr; #endif ExceptionBlob* OptoRuntime::_exception_blob; @@ -155,8 +157,10 @@ bool OptoRuntime::generate(ciEnv* env) { gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true, false); #if INCLUDE_JVMTI gen(env, _notify_jvmti_object_alloc , notify_jvmti_object_alloc_Type, SharedRuntime::notify_jvmti_object_alloc, 0, true, false); - gen(env, _notify_jvmti_mount , notify_jvmti_Type , SharedRuntime::notify_jvmti_mount, 0 , true, false); - gen(env, _notify_jvmti_unmount , notify_jvmti_Type , SharedRuntime::notify_jvmti_unmount, 0 , true, false); + gen(env, _notify_jvmti_vthread_start , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_start, 0, true, false); + gen(env, _notify_jvmti_vthread_end , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_end, 0, true, false); + gen(env, _notify_jvmti_vthread_mount , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_mount, 0, true, false); + gen(env, _notify_jvmti_vthread_unmount , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_unmount, 0, true, false); #endif gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false); gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false); @@ -491,6 +495,21 @@ const TypeFunc *OptoRuntime::notify_jvmti_object_alloc_Type() { return TypeFunc::make(domain, range); } + +const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() { + // create input type (domain) + const Type **fields = TypeTuple::fields(2); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop + fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); + + // no result type needed + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = NULL; // void + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); + + return TypeFunc::make(domain,range); +} #endif const TypeFunc *OptoRuntime::athrow_Type() { @@ -1670,24 +1689,6 @@ const TypeFunc *OptoRuntime::class_id_load_barrier_Type() { } #endif -#if INCLUDE_JVMTI -const TypeFunc *OptoRuntime::notify_jvmti_Type() { - // create input type (domain) - const Type **fields = TypeTuple::fields(3); - fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // VirtualThread oop - fields[TypeFunc::Parms+1] = TypeInt::BOOL; // jboolean - fields[TypeFunc::Parms+2] = TypeInt::BOOL; // jboolean - const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); - - // no result type needed - fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void - const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); - - return TypeFunc::make(domain,range); -} -#endif - //----------------------------------------------------------------------------- // Dtrace support. entry and exit probes have the same signature const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() { diff --git a/src/hotspot/share/opto/runtime.hpp b/src/hotspot/share/opto/runtime.hpp index cc52e02a318..f54ce4241eb 100644 --- a/src/hotspot/share/opto/runtime.hpp +++ b/src/hotspot/share/opto/runtime.hpp @@ -137,8 +137,10 @@ class OptoRuntime : public AllStatic { static address _register_finalizer_Java; #if INCLUDE_JVMTI static address _notify_jvmti_object_alloc; - static address _notify_jvmti_mount; - static address _notify_jvmti_unmount; + static address _notify_jvmti_vthread_start; + static address _notify_jvmti_vthread_end; + static address _notify_jvmti_vthread_mount; + static address _notify_jvmti_vthread_unmount; #endif // @@ -215,8 +217,10 @@ private: static address register_finalizer_Java() { return _register_finalizer_Java; } #if INCLUDE_JVMTI static address notify_jvmti_object_alloc() { return _notify_jvmti_object_alloc; } - static address notify_jvmti_mount() { return _notify_jvmti_mount; } - static address notify_jvmti_unmount() { return _notify_jvmti_unmount; } + static address notify_jvmti_vthread_start() { return _notify_jvmti_vthread_start; } + static address notify_jvmti_vthread_end() { return _notify_jvmti_vthread_end; } + static address notify_jvmti_vthread_mount() { return _notify_jvmti_vthread_mount; } + static address notify_jvmti_vthread_unmount() { return _notify_jvmti_vthread_unmount; } #endif static ExceptionBlob* exception_blob() { return _exception_blob; } @@ -306,7 +310,7 @@ private: JFR_ONLY(static const TypeFunc* class_id_load_barrier_Type();) #if INCLUDE_JVMTI static const TypeFunc* notify_jvmti_object_alloc_Type(); - static const TypeFunc* notify_jvmti_Type(); + static const TypeFunc* notify_jvmti_vthread_Type(); #endif // Dtrace support diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp index a782fedfec5..af8f08f722b 100644 --- a/src/hotspot/share/opto/superword.cpp +++ b/src/hotspot/share/opto/superword.cpp @@ -72,6 +72,7 @@ SuperWord::SuperWord(PhaseIdealLoop* phase) : _lpt(nullptr), // loop tree node _lp(nullptr), // CountedLoopNode _pre_loop_end(nullptr), // Pre loop CountedLoopEndNode + _loop_reductions(arena()), // reduction nodes in the current loop _bb(nullptr), // basic block _iv(nullptr), // induction var _race_possible(false), // cases where SDMU is true @@ -111,7 +112,17 @@ bool SuperWord::transform_loop(IdealLoopTree* lpt, bool do_optimization) { return false; // skip malformed counted loop } - if (cl->is_rce_post_loop() && cl->is_reduction_loop()) { + // Initialize simple data used by reduction marking early. + set_lpt(lpt); + set_lp(cl); + // For now, define one block which is the entire loop body. + set_bb(cl); + + if (SuperWordReductions) { + mark_reductions(); + } + + if (cl->is_rce_post_loop() && is_marked_reduction_loop()) { // Post loop vectorization doesn't support reductions return false; } @@ -167,18 +178,12 @@ bool SuperWord::transform_loop(IdealLoopTree* lpt, bool do_optimization) { init(); // initialize data structures - set_lpt(lpt); - set_lp(cl); - - // For now, define one block which is the entire loop body - set_bb(cl); - bool success = true; if (do_optimization) { assert(_packset.length() == 0, "packset must be empty"); success = SLP_extract(); if (PostLoopMultiversioning) { - if (cl->is_vectorized_loop() && cl->is_main_loop() && !cl->is_reduction_loop()) { + if (cl->is_vectorized_loop() && cl->is_main_loop() && !is_marked_reduction_loop()) { IdealLoopTree *lpt_next = cl->is_strip_mined() ? lpt->_parent->_next : lpt->_next; CountedLoopNode *cl_next = lpt_next->_head->as_CountedLoop(); // Main loop SLP works well for manually unrolled loops. But post loop @@ -223,7 +228,7 @@ void SuperWord::unrolling_analysis(int &local_loop_unroll_factor) { for (uint i = 0; i < lpt()->_body.size(); i++) { Node* n = lpt()->_body.at(i); if (n == cl->incr() || - n->is_reduction() || + is_marked_reduction(n) || n->is_AddP() || n->is_Cmp() || n->is_Bool() || @@ -411,6 +416,139 @@ void SuperWord::unrolling_analysis(int &local_loop_unroll_factor) { } } +bool SuperWord::is_reduction(const Node* n) { + if (!is_reduction_operator(n)) { + return false; + } + // Test whether there is a reduction cycle via every edge index + // (typically indices 1 and 2). + for (uint input = 1; input < n->req(); input++) { + if (in_reduction_cycle(n, input)) { + return true; + } + } + return false; +} + +bool SuperWord::is_reduction_operator(const Node* n) { + int opc = n->Opcode(); + return (opc != ReductionNode::opcode(opc, n->bottom_type()->basic_type())); +} + +bool SuperWord::in_reduction_cycle(const Node* n, uint input) { + // First find input reduction path to phi node. + auto has_my_opcode = [&](const Node* m){ return m->Opcode() == n->Opcode(); }; + PathEnd path_to_phi = find_in_path(n, input, LoopMaxUnroll, has_my_opcode, + [&](const Node* m) { return m->is_Phi(); }); + const Node* phi = path_to_phi.first; + if (phi == nullptr) { + return false; + } + // If there is an input reduction path from the phi's loop-back to n, then n + // is part of a reduction cycle. + const Node* first = phi->in(LoopNode::LoopBackControl); + PathEnd path_from_phi = find_in_path(first, input, LoopMaxUnroll, has_my_opcode, + [&](const Node* m) { return m == n; }); + return path_from_phi.first != nullptr; +} + +Node* SuperWord::original_input(const Node* n, uint i) { + if (n->has_swapped_edges()) { + assert(n->is_Add() || n->is_Mul(), "n should be commutative"); + if (i == 1) { + return n->in(2); + } else if (i == 2) { + return n->in(1); + } + } + return n->in(i); +} + +void SuperWord::mark_reductions() { + + _loop_reductions.clear(); + + // Iterate through all phi nodes associated to the loop and search for + // reduction cycles in the basic block. + for (DUIterator_Fast imax, i = lp()->fast_outs(imax); i < imax; i++) { + const Node* phi = lp()->fast_out(i); + if (!phi->is_Phi()) { + continue; + } + if (phi->outcnt() == 0) { + continue; + } + if (phi == iv()) { + continue; + } + // The phi's loop-back is considered the first node in the reduction cycle. + const Node* first = phi->in(LoopNode::LoopBackControl); + if (first == nullptr) { + continue; + } + // Test that the node fits the standard pattern for a reduction operator. + if (!is_reduction_operator(first)) { + continue; + } + // Test that 'first' is the beginning of a reduction cycle ending in 'phi'. + // To contain the number of searched paths, assume that all nodes in a + // reduction cycle are connected via the same edge index, modulo swapped + // inputs. This assumption is realistic because reduction cycles usually + // consist of nodes cloned by loop unrolling. + int reduction_input = -1; + int path_nodes = -1; + for (uint input = 1; input < first->req(); input++) { + // Test whether there is a reduction path in the basic block from 'first' + // to the phi node following edge index 'input'. + PathEnd path = + find_in_path( + first, input, lpt()->_body.size(), + [&](const Node* n) { return n->Opcode() == first->Opcode() && in_bb(n); }, + [&](const Node* n) { return n == phi; }); + if (path.first != nullptr) { + reduction_input = input; + path_nodes = path.second; + break; + } + } + if (reduction_input == -1) { + continue; + } + // Test that reduction nodes do not have any users in the loop besides their + // reduction cycle successors. + const Node* current = first; + const Node* succ = phi; // current's successor in the reduction cycle. + bool used_in_loop = false; + for (int i = 0; i < path_nodes; i++) { + for (DUIterator_Fast jmax, j = current->fast_outs(jmax); j < jmax; j++) { + Node* u = current->fast_out(j); + if (!in_bb(u)) { + continue; + } + if (u == succ) { + continue; + } + used_in_loop = true; + break; + } + if (used_in_loop) { + break; + } + succ = current; + current = original_input(current, reduction_input); + } + if (used_in_loop) { + continue; + } + // Reduction cycle found. Mark all nodes in the found path as reductions. + current = first; + for (int i = 0; i < path_nodes; i++) { + _loop_reductions.set(current->_idx); + current = original_input(current, reduction_input); + } + } +} + //------------------------------SLP_extract--------------------------- // Extract the superword level parallelism // @@ -1378,7 +1516,7 @@ bool SuperWord::independent(Node* s1, Node* s2) { // those nodes, and have not found another node from the pack, we know // that all nodes in the pack are independent. Node* SuperWord::find_dependence(Node_List* p) { - if (p->at(0)->is_reduction()) { + if (is_marked_reduction(p->at(0))) { return nullptr; // ignore reductions } ResourceMark rm; @@ -1436,7 +1574,7 @@ bool SuperWord::reduction(Node* s1, Node* s2) { int d1 = depth(s1); int d2 = depth(s2); if (d2 > d1) { - if (s1->is_reduction() && s2->is_reduction()) { + if (is_marked_reduction(s1) && is_marked_reduction(s2)) { // This is an ordered set, so s1 should define s2 for (DUIterator_Fast imax, i = s1->fast_outs(imax); i < imax; i++) { Node* t1 = s1->fast_out(i); @@ -1653,7 +1791,7 @@ void SuperWord::order_def_uses(Node_List* p) { if (s1->is_Store()) return; // reductions are always managed beforehand - if (s1->is_reduction()) return; + if (is_marked_reduction(s1)) return; for (DUIterator_Fast imax, i = s1->fast_outs(imax); i < imax; i++) { Node* t1 = s1->fast_out(i); @@ -1689,15 +1827,15 @@ void SuperWord::order_def_uses(Node_List* p) { bool SuperWord::opnd_positions_match(Node* d1, Node* u1, Node* d2, Node* u2) { // check reductions to see if they are marshalled to represent the reduction // operator in a specified opnd - if (u1->is_reduction() && u2->is_reduction()) { + if (is_marked_reduction(u1) && is_marked_reduction(u2)) { // ensure reductions have phis and reduction definitions feeding the 1st operand Node* first = u1->in(2); - if (first->is_Phi() || first->is_reduction()) { + if (first->is_Phi() || is_marked_reduction(first)) { u1->swap_edges(1, 2); } // ensure reductions have phis and reduction definitions feeding the 1st operand first = u2->in(2); - if (first->is_Phi() || first->is_reduction()) { + if (first->is_Phi() || is_marked_reduction(first)) { u2->swap_edges(1, 2); } return true; @@ -1920,7 +2058,7 @@ void SuperWord::filter_packs() { remove_pack_at(i); } Node *n = pk->at(0); - if (n->is_reduction()) { + if (is_marked_reduction(n)) { _num_reductions++; } else { _num_work_vecs++; @@ -2171,7 +2309,7 @@ bool SuperWord::implemented(Node_List* p) { if (p0 != nullptr) { int opc = p0->Opcode(); uint size = p->size(); - if (p0->is_reduction()) { + if (is_marked_reduction(p0)) { const Type *arith_type = p0->bottom_type(); // Length 2 reductions of INT/LONG do not offer performance benefits if (((arith_type->basic_type() == T_INT) || (arith_type->basic_type() == T_LONG)) && (size == 2)) { @@ -2261,13 +2399,13 @@ bool SuperWord::profitable(Node_List* p) { } } // Check if reductions are connected - if (p0->is_reduction()) { + if (is_marked_reduction(p0)) { Node* second_in = p0->in(2); Node_List* second_pk = my_pack(second_in); if ((second_pk == nullptr) || (_num_work_vecs == _num_reductions)) { - // Remove reduction flag if no parent pack or if not enough work + // Unmark reduction if no parent pack or if not enough work // to cover reduction expansion overhead - p0->remove_flag(Node::Flag_is_reduction); + _loop_reductions.remove(p0->_idx); return false; } else if (second_pk->size() != p->size()) { return false; @@ -2299,7 +2437,7 @@ bool SuperWord::profitable(Node_List* p) { if (def == n) { // Reductions should only have a Phi use at the loop head or a non-phi use // outside of the loop if it is the last element of the pack (e.g. SafePoint). - if (def->is_reduction() && + if (is_marked_reduction(def) && ((use->is_Phi() && use->in(0) == _lpt->_head) || (!_lpt->is_member(_phase->get_loop(_phase->ctrl_or_self(use))) && i == p->size()-1))) { continue; @@ -2442,7 +2580,7 @@ public: for (DepPreds preds(n, dg); !preds.done(); preds.next()) { Node* pred = preds.current(); int pred_pid = get_pid_or_zero(pred); - if (pred_pid == pid && n->is_reduction()) { + if (pred_pid == pid && _slp->is_marked_reduction(n)) { continue; // reduction -> self-cycle is not a cyclic dependency } // Only add edges once, and only for mapped nodes (in block) @@ -2992,7 +3130,7 @@ bool SuperWord::output() { } else if (n->req() == 3 && !is_cmov_pack(p)) { // Promote operands to vector Node* in1 = nullptr; - bool node_isa_reduction = n->is_reduction(); + bool node_isa_reduction = is_marked_reduction(n); if (node_isa_reduction) { // the input to the first reduction operation is retained in1 = low_adr->in(1); @@ -3246,7 +3384,7 @@ bool SuperWord::output() { Node* SuperWord::create_post_loop_vmask() { CountedLoopNode *cl = lpt()->_head->as_CountedLoop(); assert(cl->is_rce_post_loop(), "Must be an rce post loop"); - assert(!cl->is_reduction_loop(), "no vector reduction in post loop"); + assert(!is_marked_reduction_loop(), "no vector reduction in post loop"); assert(abs(cl->stride_con()) == 1, "post loop stride can only be +/-1"); // Collect vector element types of all post loop packs. Also collect @@ -3524,7 +3662,7 @@ void SuperWord::insert_extracts(Node_List* p) { _n_idx_list.pop(); Node* def = use->in(idx); - if (def->is_reduction()) continue; + if (is_marked_reduction(def)) continue; // Insert extract operation _igvn.hash_delete(def); @@ -3547,7 +3685,7 @@ void SuperWord::insert_extracts(Node_List* p) { bool SuperWord::is_vector_use(Node* use, int u_idx) { Node_List* u_pk = my_pack(use); if (u_pk == nullptr) return false; - if (use->is_reduction()) return true; + if (is_marked_reduction(use)) return true; Node* def = use->in(u_idx); Node_List* d_pk = my_pack(def); if (d_pk == nullptr) { @@ -3708,7 +3846,7 @@ bool SuperWord::construct_bb() { if (in_bb(use) && !visited_test(use) && // Don't go around backedge (!use->is_Phi() || n == entry)) { - if (use->is_reduction()) { + if (is_marked_reduction(use)) { // First see if we can map the reduction on the given system we are on, then // make a data entry operation for each reduction we see. BasicType bt = use->bottom_type()->basic_type(); @@ -4345,10 +4483,6 @@ void SuperWord::init() { _iteration_last.clear(); _node_info.clear(); _align_to_ref = nullptr; - _lpt = nullptr; - _lp = nullptr; - _bb = nullptr; - _iv = nullptr; _race_possible = 0; _early_return = false; _num_work_vecs = 0; diff --git a/src/hotspot/share/opto/superword.hpp b/src/hotspot/share/opto/superword.hpp index 1317ac9bb81..70e97e9444c 100644 --- a/src/hotspot/share/opto/superword.hpp +++ b/src/hotspot/share/opto/superword.hpp @@ -29,6 +29,7 @@ #include "opto/phaseX.hpp" #include "opto/vectornode.hpp" #include "utilities/growableArray.hpp" +#include "utilities/pair.hpp" #include "libadt/dict.hpp" // @@ -357,6 +358,7 @@ class SuperWord : public ResourceObj { IdealLoopTree* _lpt; // Current loop tree node CountedLoopNode* _lp; // Current CountedLoopNode CountedLoopEndNode* _pre_loop_end; // Current CountedLoopEndNode of pre loop + VectorSet _loop_reductions; // Reduction nodes in the current loop Node* _bb; // Current basic block PhiNode* _iv; // Induction var bool _race_possible; // In cases where SDMU is true @@ -471,6 +473,62 @@ class SuperWord : public ResourceObj { // methods + typedef const Pair PathEnd; + + // Search for a path P = (n_1, n_2, ..., n_k) such that: + // - original_input(n_i, input) = n_i+1 for all 1 <= i < k, + // - path(n) for all n in P, + // - k <= max, and + // - there exists a node e such that original_input(n_k, input) = e and end(e). + // Return , if P is found, or otherwise. + // Note that original_input(n, i) has the same behavior as n->in(i) except + // that it commutes the inputs of binary nodes whose edges have been swapped. + template + static PathEnd find_in_path(const Node *n1, uint input, int max, + NodePredicate1 path, NodePredicate2 end) { + const PathEnd no_path(nullptr, -1); + const Node* current = n1; + int k = 0; + for (int i = 0; i <= max; i++) { + if (current == nullptr) { + return no_path; + } + if (end(current)) { + return PathEnd(current, k); + } + if (!path(current)) { + return no_path; + } + current = original_input(current, input); + k++; + } + return no_path; + } + +public: + // Whether n is a reduction operator and part of a reduction cycle. + // This function can be used for individual queries outside the SLP analysis, + // e.g. to inform matching in target-specific code. Otherwise, the + // almost-equivalent but faster SuperWord::mark_reductions() is preferable. + static bool is_reduction(const Node* n); + // Whether n is marked as a reduction node. + bool is_marked_reduction(Node* n) { return _loop_reductions.test(n->_idx); } + // Whether the current loop has any reduction node. + bool is_marked_reduction_loop() { return !_loop_reductions.is_empty(); } +private: + // Whether n is a standard reduction operator. + static bool is_reduction_operator(const Node* n); + // Whether n is part of a reduction cycle via the 'input' edge index. To bound + // the search, constrain the size of reduction cycles to LoopMaxUnroll. + static bool in_reduction_cycle(const Node* n, uint input); + // Reference to the i'th input node of n, commuting the inputs of binary nodes + // whose edges have been swapped. Assumes n is a commutative operation. + static Node* original_input(const Node* n, uint i); + // Find and mark reductions in a loop. Running mark_reductions() is similar to + // querying is_reduction(n) for every n in the SuperWord loop, but stricter in + // that it assumes counted loops and requires that reduction nodes are not + // used within the loop except by their reduction cycle predecessors. + void mark_reductions(); // Extract the superword level parallelism bool SLP_extract(); // Find the adjacent memory references and create pack pairs for them. diff --git a/src/hotspot/share/opto/vector.cpp b/src/hotspot/share/opto/vector.cpp index 97b44e37f06..35ae3ab330f 100644 --- a/src/hotspot/share/opto/vector.cpp +++ b/src/hotspot/share/opto/vector.cpp @@ -299,9 +299,11 @@ void PhaseVector::scalarize_vbox_node(VectorBoxNode* vec_box) { void PhaseVector::expand_vbox_node(VectorBoxNode* vec_box) { if (vec_box->outcnt() > 0) { + VectorSet visited; Node* vbox = vec_box->in(VectorBoxNode::Box); Node* vect = vec_box->in(VectorBoxNode::Value); - Node* result = expand_vbox_node_helper(vbox, vect, vec_box->box_type(), vec_box->vec_type()); + Node* result = expand_vbox_node_helper(vbox, vect, vec_box->box_type(), + vec_box->vec_type(), visited); C->gvn_replace_by(vec_box, result); C->print_method(PHASE_EXPAND_VBOX, 3, vec_box); } @@ -311,39 +313,64 @@ void PhaseVector::expand_vbox_node(VectorBoxNode* vec_box) { Node* PhaseVector::expand_vbox_node_helper(Node* vbox, Node* vect, const TypeInstPtr* box_type, - const TypeVect* vect_type) { - if (vbox->is_Phi() && vect->is_Phi()) { - assert(vbox->as_Phi()->region() == vect->as_Phi()->region(), ""); - Node* new_phi = new PhiNode(vbox->as_Phi()->region(), box_type); - for (uint i = 1; i < vbox->req(); i++) { - Node* new_box = expand_vbox_node_helper(vbox->in(i), vect->in(i), box_type, vect_type); - new_phi->set_req(i, new_box); - } - new_phi = C->initial_gvn()->transform(new_phi); - return new_phi; - } else if (vbox->is_Phi() && (vect->is_Vector() || vect->is_LoadVector())) { - // Handle the case when the allocation input to VectorBoxNode is a phi - // but the vector input is not, which can definitely be the case if the - // vector input has been value-numbered. It seems to be safe to do by - // construction because VectorBoxNode and VectorBoxAllocate come in a - // specific order as a result of expanding an intrinsic call. After that, if - // any of the inputs to VectorBoxNode are value-numbered they can only - // move up and are guaranteed to dominate. - Node* new_phi = new PhiNode(vbox->as_Phi()->region(), box_type); - for (uint i = 1; i < vbox->req(); i++) { - Node* new_box = expand_vbox_node_helper(vbox->in(i), vect, box_type, vect_type); - new_phi->set_req(i, new_box); - } - new_phi = C->initial_gvn()->transform(new_phi); - return new_phi; - } else if (vbox->is_Proj() && vbox->in(0)->Opcode() == Op_VectorBoxAllocate) { + const TypeVect* vect_type, + VectorSet &visited) { + // JDK-8304948 shows an example that there may be a cycle in the graph. + if (visited.test_set(vbox->_idx)) { + assert(vbox->is_Phi(), "should be phi"); + return vbox; // already visited + } + + // Handle the case when the allocation input to VectorBoxNode is a Proj. + // This is the normal case before expanding. + if (vbox->is_Proj() && vbox->in(0)->Opcode() == Op_VectorBoxAllocate) { VectorBoxAllocateNode* vbox_alloc = static_cast(vbox->in(0)); return expand_vbox_alloc_node(vbox_alloc, vect, box_type, vect_type); - } else { - assert(!vbox->is_Phi(), ""); - // TODO: assert that expanded vbox is initialized with the same value (vect). - return vbox; // already expanded } + + // Handle the case when both the allocation input and vector input to + // VectorBoxNode are Phi. This case is generated after the transformation of + // Phi: Phi (VectorBox1 VectorBox2) => VectorBox (Phi1 Phi2). + // With this optimization, the relative two allocation inputs of VectorBox1 and + // VectorBox2 are gathered into Phi1 now. Similarly, the original vector + // inputs of two VectorBox nodes are in Phi2. + // + // See PhiNode::merge_through_phi in cfg.cpp for more details. + if (vbox->is_Phi() && vect->is_Phi()) { + assert(vbox->as_Phi()->region() == vect->as_Phi()->region(), ""); + for (uint i = 1; i < vbox->req(); i++) { + Node* new_box = expand_vbox_node_helper(vbox->in(i), vect->in(i), + box_type, vect_type, visited); + if (!new_box->is_Phi()) { + C->initial_gvn()->hash_delete(vbox); + vbox->set_req(i, new_box); + } + } + return C->initial_gvn()->transform(vbox); + } + + // Handle the case when the allocation input to VectorBoxNode is a phi + // but the vector input is not, which can definitely be the case if the + // vector input has been value-numbered. It seems to be safe to do by + // construction because VectorBoxNode and VectorBoxAllocate come in a + // specific order as a result of expanding an intrinsic call. After that, if + // any of the inputs to VectorBoxNode are value-numbered they can only + // move up and are guaranteed to dominate. + if (vbox->is_Phi() && (vect->is_Vector() || vect->is_LoadVector())) { + for (uint i = 1; i < vbox->req(); i++) { + Node* new_box = expand_vbox_node_helper(vbox->in(i), vect, + box_type, vect_type, visited); + if (!new_box->is_Phi()) { + C->initial_gvn()->hash_delete(vbox); + vbox->set_req(i, new_box); + } + } + return C->initial_gvn()->transform(vbox); + } + + assert(!vbox->is_Phi(), "should be expanded"); + // TODO: assert that expanded vbox is initialized with the same value (vect). + return vbox; // already expanded } Node* PhaseVector::expand_vbox_alloc_node(VectorBoxAllocateNode* vbox_alloc, diff --git a/src/hotspot/share/opto/vector.hpp b/src/hotspot/share/opto/vector.hpp index 067a2280d30..e76b4df376d 100644 --- a/src/hotspot/share/opto/vector.hpp +++ b/src/hotspot/share/opto/vector.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,8 @@ class PhaseVector : public Phase { Node* expand_vbox_node_helper(Node* vbox, Node* vect, const TypeInstPtr* box_type, - const TypeVect* vect_type); + const TypeVect* vect_type, + VectorSet &visited); Node* expand_vbox_alloc_node(VectorBoxAllocateNode* vbox_alloc, Node* value, const TypeInstPtr* box_type, diff --git a/src/hotspot/share/prims/downcallLinker.cpp b/src/hotspot/share/prims/downcallLinker.cpp index ec20fd17d80..b2d5ae2e551 100644 --- a/src/hotspot/share/prims/downcallLinker.cpp +++ b/src/hotspot/share/prims/downcallLinker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,12 +41,12 @@ void DowncallLinker::capture_state(int32_t* value_ptr, int captured_state_mask) #ifdef _WIN64 if (captured_state_mask & GET_LAST_ERROR) { *value_ptr = GetLastError(); - value_ptr++; } + value_ptr++; if (captured_state_mask & WSA_GET_LAST_ERROR) { *value_ptr = WSAGetLastError(); - value_ptr++; } + value_ptr++; #endif if (captured_state_mask & ERRNO) { *value_ptr = errno; diff --git a/src/hotspot/share/prims/downcallLinker.hpp b/src/hotspot/share/prims/downcallLinker.hpp index 86849c05158..6840a3c7f69 100644 --- a/src/hotspot/share/prims/downcallLinker.hpp +++ b/src/hotspot/share/prims/downcallLinker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,8 @@ public: const GrowableArray& input_registers, const GrowableArray& output_registers, bool needs_return_buffer, - int captured_state_mask); + int captured_state_mask, + bool needs_transition); static void capture_state(int32_t* value_ptr, int captured_state_mask); }; diff --git a/src/hotspot/share/prims/foreignGlobals.hpp b/src/hotspot/share/prims/foreignGlobals.hpp index d0160f23226..009fd974d0b 100644 --- a/src/hotspot/share/prims/foreignGlobals.hpp +++ b/src/hotspot/share/prims/foreignGlobals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,6 +76,8 @@ private: static void parse_register_array(objArrayOop jarray, StorageType type_index, GrowableArray& array, T (*converter)(int)); public: + static bool is_foreign_linker_supported(); + static const ABIDescriptor parse_abi_descriptor(jobject jabi); static const CallRegs parse_call_regs(jobject jconv); static VMStorage parse_vmstorage(oop storage); diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index 221c5b95d70..7df2bc634c9 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -63,6 +63,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "prims/foreignGlobals.hpp" #include "prims/jvm_misc.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.inline.hpp" @@ -3043,9 +3044,9 @@ JVM_LEAF(void, JVM_Yield(JNIEnv *env, jclass threadClass)) os::naked_yield(); JVM_END -JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis)) - if (millis < 0) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); +JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong nanos)) + if (nanos < 0) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nanosecond timeout value out of range"); } if (thread->is_interrupted(true) && !HAS_PENDING_EXCEPTION) { @@ -3056,14 +3057,14 @@ JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis)) // And set new thread state to SLEEPING. JavaThreadSleepState jtss(thread); - HOTSPOT_THREAD_SLEEP_BEGIN(millis); + HOTSPOT_THREAD_SLEEP_BEGIN(nanos / NANOSECS_PER_MILLISEC); - if (millis == 0) { + if (nanos == 0) { os::naked_yield(); } else { ThreadState old_state = thread->osthread()->get_state(); thread->osthread()->set_state(SLEEPING); - if (!thread->sleep(millis)) { // interrupted + if (!thread->sleep_nanos(nanos)) { // interrupted // An asynchronous exception could have been thrown on // us while we were sleeping. We do not overwrite those. if (!HAS_PENDING_EXCEPTION) { @@ -3462,6 +3463,10 @@ JVM_LEAF(jboolean, JVM_IsContinuationsSupported(void)) return VMContinuations ? JNI_TRUE : JNI_FALSE; JVM_END +JVM_LEAF(jboolean, JVM_IsForeignLinkerSupported(void)) + return ForeignGlobals::is_foreign_linker_supported() ? JNI_TRUE : JNI_FALSE; +JVM_END + // String support /////////////////////////////////////////////////////////////////////////// JVM_ENTRY(jstring, JVM_InternString(JNIEnv *env, jstring str)) @@ -3905,24 +3910,53 @@ JVM_LEAF(jint, JVM_FindSignal(const char *name)) return os::get_signal_number(name); JVM_END -// If notifications are disabled then just update the VTMS transition bit and return. -// Otherwise, the bit is updated in the given jvmtiVTMSTransitionDisabler function call. -JVM_ENTRY(void, JVM_VirtualThreadMount(JNIEnv* env, jobject vthread, jboolean hide, jboolean first_mount)) +JVM_ENTRY(void, JVM_VirtualThreadStart(JNIEnv* env, jobject vthread)) #if INCLUDE_JVMTI if (!DoJVMTIVirtualThreadTransitions) { assert(!JvmtiExport::can_support_virtual_threads(), "sanity check"); return; } - if (!JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) { - thread->set_is_in_VTMS_transition(hide); - oop vt = JNIHandles::resolve_external_guard(vthread); - java_lang_Thread::set_is_in_VTMS_transition(vt, hide); + if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) { + JvmtiVTMSTransitionDisabler::VTMS_vthread_start(vthread); + } else { + // set VTMS transition bit value in JavaThread and java.lang.VirtualThread object + JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, false); + } +#else + fatal("Should only be called with JVMTI enabled"); +#endif +JVM_END + +JVM_ENTRY(void, JVM_VirtualThreadEnd(JNIEnv* env, jobject vthread)) +#if INCLUDE_JVMTI + if (!DoJVMTIVirtualThreadTransitions) { + assert(!JvmtiExport::can_support_virtual_threads(), "sanity check"); return; } - if (hide) { - JvmtiVTMSTransitionDisabler::VTMS_mount_begin(vthread, first_mount); + if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) { + JvmtiVTMSTransitionDisabler::VTMS_vthread_end(vthread); } else { - JvmtiVTMSTransitionDisabler::VTMS_mount_end(vthread, first_mount); + // set VTMS transition bit value in JavaThread and java.lang.VirtualThread object + JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, true); + } +#else + fatal("Should only be called with JVMTI enabled"); +#endif +JVM_END + +// If notifications are disabled then just update the VTMS transition bit and return. +// Otherwise, the bit is updated in the given jvmtiVTMSTransitionDisabler function call. +JVM_ENTRY(void, JVM_VirtualThreadMount(JNIEnv* env, jobject vthread, jboolean hide)) +#if INCLUDE_JVMTI + if (!DoJVMTIVirtualThreadTransitions) { + assert(!JvmtiExport::can_support_virtual_threads(), "sanity check"); + return; + } + if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) { + JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(vthread, hide); + } else { + // set VTMS transition bit value in JavaThread and java.lang.VirtualThread object + JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, hide); } #else fatal("Should only be called with JVMTI enabled"); @@ -3931,22 +3965,17 @@ JVM_END // If notifications are disabled then just update the VTMS transition bit and return. // Otherwise, the bit is updated in the given jvmtiVTMSTransitionDisabler function call below. -JVM_ENTRY(void, JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean hide, jboolean last_unmount)) +JVM_ENTRY(void, JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean hide)) #if INCLUDE_JVMTI if (!DoJVMTIVirtualThreadTransitions) { assert(!JvmtiExport::can_support_virtual_threads(), "sanity check"); return; } - if (!JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) { - thread->set_is_in_VTMS_transition(hide); - oop vt = JNIHandles::resolve_external_guard(vthread); - java_lang_Thread::set_is_in_VTMS_transition(vt, hide); - return; - } - if (hide) { - JvmtiVTMSTransitionDisabler::VTMS_unmount_begin(vthread, last_unmount); + if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) { + JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide); } else { - JvmtiVTMSTransitionDisabler::VTMS_unmount_end(vthread, last_unmount); + // set VTMS transition bit value in JavaThread and java.lang.VirtualThread object + JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, hide); } #else fatal("Should only be called with JVMTI enabled"); diff --git a/src/hotspot/share/prims/jvmtiAgent.hpp b/src/hotspot/share/prims/jvmtiAgent.hpp index e51c45351b0..9baf6698868 100644 --- a/src/hotspot/share/prims/jvmtiAgent.hpp +++ b/src/hotspot/share/prims/jvmtiAgent.hpp @@ -58,20 +58,20 @@ class JvmtiAgent : public CHeapObj { public: JvmtiAgent(const char* name, const char* options, bool is_absolute_path, bool dynamic = false); - const char* name() const; + const char* name() const NOT_JVMTI_RETURN_(nullptr); const char* options() const; - bool is_absolute_path() const; - void* os_lib() const; - void set_os_lib(void* os_lib); + bool is_absolute_path() const NOT_JVMTI_RETURN_(false); + void* os_lib() const NOT_JVMTI_RETURN_(nullptr); + void set_os_lib(void* os_lib) NOT_JVMTI_RETURN; const char* os_lib_path() const; - void set_os_lib_path(const char* path); - bool is_static_lib() const; - void set_static_lib(); + void set_os_lib_path(const char* path) NOT_JVMTI_RETURN; + bool is_static_lib() const NOT_JVMTI_RETURN_(false); + void set_static_lib() NOT_JVMTI_RETURN; bool is_dynamic() const; bool is_xrun() const; bool is_instrument_lib() const; - bool is_loaded() const; - void set_loaded(); + bool is_loaded() const NOT_JVMTI_RETURN_(false); + void set_loaded() NOT_JVMTI_RETURN; bool is_jplis() const; bool is_jplis(JvmtiEnv* env) const; void set_jplis(const void* jplis); diff --git a/src/hotspot/share/prims/jvmtiAgentList.cpp b/src/hotspot/share/prims/jvmtiAgentList.cpp index fb46ddea661..22d74e930cf 100644 --- a/src/hotspot/share/prims/jvmtiAgentList.cpp +++ b/src/hotspot/share/prims/jvmtiAgentList.cpp @@ -28,7 +28,6 @@ #include "prims/jvmtiExport.hpp" #include "runtime/atomic.hpp" #include "runtime/os.inline.hpp" -#include "utilities/growableArray.hpp" JvmtiAgent* JvmtiAgentList::_list = nullptr; @@ -82,10 +81,6 @@ JvmtiAgentList::Iterator::Iterator(JvmtiAgent** list, Filter filter) : } } -JvmtiAgentList::Iterator::~Iterator() { - delete _stack; -} - bool JvmtiAgentList::Iterator::has_next() const { assert(_stack != nullptr, "invariant"); return _stack->is_nonempty(); @@ -250,7 +245,6 @@ JvmtiAgent* JvmtiAgentList::lookup(JvmtiEnv* env, void* f_ptr) { return nullptr; } assert(buffer[0] != '\0', "invariant"); - assert(offset >= 0, "invariant"); const void* const os_module_address = reinterpret_cast
(f_ptr) - offset; JvmtiAgentList::Iterator it = JvmtiAgentList::agents(); diff --git a/src/hotspot/share/prims/jvmtiAgentList.hpp b/src/hotspot/share/prims/jvmtiAgentList.hpp index 5fc4e140887..cdb8b7722b0 100644 --- a/src/hotspot/share/prims/jvmtiAgentList.hpp +++ b/src/hotspot/share/prims/jvmtiAgentList.hpp @@ -27,9 +27,8 @@ #include "memory/allocation.hpp" #include "prims/jvmtiAgent.hpp" +#include "utilities/growableArray.hpp" -template -class GrowableArrayCHeap; class JvmtiEnv; // Maintains a single cas linked-list of JvmtiAgents. @@ -49,13 +48,14 @@ class JvmtiAgentList : AllStatic { }; GrowableArrayCHeap* _stack; const Filter _filter; + Iterator() : _stack(nullptr), _filter(ALL) {} Iterator(JvmtiAgent** list, Filter filter); JvmtiAgent* select(JvmtiAgent* agent) const; public: - bool has_next() const; - JvmtiAgent* next(); - const JvmtiAgent* next() const; - ~Iterator(); + bool has_next() const NOT_JVMTI_RETURN_(false); + JvmtiAgent* next() NOT_JVMTI_RETURN_(nullptr); + const JvmtiAgent* next() const NOT_JVMTI_RETURN_(nullptr); + ~Iterator() { delete _stack; } }; private: @@ -66,19 +66,19 @@ class JvmtiAgentList : AllStatic { static void convert_xrun_agents(); public: - static void add(JvmtiAgent* agent); - static void add(const char* name, char* options, bool absolute_path); - static void add_xrun(const char* name, char* options, bool absolute_path); + static void add(JvmtiAgent* agent) NOT_JVMTI_RETURN; + static void add(const char* name, char* options, bool absolute_path) NOT_JVMTI_RETURN; + static void add_xrun(const char* name, char* options, bool absolute_path) NOT_JVMTI_RETURN; - static void load_agents(); + static void load_agents() NOT_JVMTI_RETURN; static jint load_agent(const char* agent, const char* absParam, - const char* options, outputStream* st); - static void load_xrun_agents(); - static void unload_agents(); + const char* options, outputStream* st) NOT_JVMTI_RETURN_(0); + static void load_xrun_agents() NOT_JVMTI_RETURN; + static void unload_agents() NOT_JVMTI_RETURN; static JvmtiAgent* lookup(JvmtiEnv* env, void* f_ptr); - static Iterator agents(); + static Iterator agents() NOT_JVMTI({ Iterator it; return it; }); static Iterator java_agents(); static Iterator native_agents(); static Iterator xrun_agents(); diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index 295edf689bb..027f55889f8 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -3651,7 +3651,7 @@ JvmtiEnv::IsMethodObsolete(Method* method, jboolean* is_obsolete_ptr) { // monitor_ptr - pre-checked for null jvmtiError JvmtiEnv::CreateRawMonitor(const char* name, jrawMonitorID* monitor_ptr) { - JvmtiRawMonitor* rmonitor = new JvmtiRawMonitor(name); + JvmtiRawMonitor* rmonitor = new (std::nothrow) JvmtiRawMonitor(name); NULL_CHECK(rmonitor, JVMTI_ERROR_OUT_OF_MEMORY); *monitor_ptr = (jrawMonitorID)rmonitor; diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.hpp b/src/hotspot/share/prims/jvmtiRawMonitor.hpp index acbe6aa6fcf..2adc730ff9f 100644 --- a/src/hotspot/share/prims/jvmtiRawMonitor.hpp +++ b/src/hotspot/share/prims/jvmtiRawMonitor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,11 +110,6 @@ class JvmtiRawMonitor : public CHeapObj { M_INTERRUPTED // Thread.interrupt() }; - // Non-aborting operator new - void* operator new(size_t size) throw() { - return CHeapObj::operator new(size, std::nothrow); - } - JvmtiRawMonitor(const char* name); ~JvmtiRawMonitor(); diff --git a/src/hotspot/share/prims/jvmtiThreadState.cpp b/src/hotspot/share/prims/jvmtiThreadState.cpp index f33581c6cca..40168637d79 100644 --- a/src/hotspot/share/prims/jvmtiThreadState.cpp +++ b/src/hotspot/share/prims/jvmtiThreadState.cpp @@ -292,7 +292,7 @@ JvmtiVTMSTransitionDisabler::~JvmtiVTMSTransitionDisabler() { // disable VTMS transitions for one virtual thread void JvmtiVTMSTransitionDisabler::VTMS_transition_disable_for_one() { - assert(_thread != NULL, "sanity check"); + assert(_thread != nullptr, "sanity check"); JavaThread* thread = JavaThread::current(); HandleMark hm(thread); Handle vth = Handle(thread, JNIHandles::resolve_external_guard(_thread)); @@ -511,8 +511,95 @@ JvmtiVTMSTransitionDisabler::finish_VTMS_transition(jthread vthread, bool is_mou #endif } +// set VTMS transition bit value in JavaThread and java.lang.VirtualThread object +void JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(JavaThread* thread, jobject vthread, bool in_trans) { + oop vt = JNIHandles::resolve_external_guard(vthread); + java_lang_Thread::set_is_in_VTMS_transition(vt, in_trans); + thread->set_is_in_VTMS_transition(in_trans); +} + void -JvmtiVTMSTransitionDisabler::VTMS_mount_begin(jobject vthread, jboolean first_mount) { +JvmtiVTMSTransitionDisabler::VTMS_vthread_start(jobject vthread) { + VTMS_mount_end(vthread); + JavaThread* thread = JavaThread::current(); + + assert(!thread->is_in_VTMS_transition(), "sanity check"); + assert(!thread->is_in_tmp_VTMS_transition(), "sanity check"); + + if (JvmtiExport::can_support_virtual_threads()) { + JvmtiEventController::thread_started(thread); + if (JvmtiExport::should_post_vthread_start()) { + JvmtiExport::post_vthread_start(vthread); + } + } else { // compatibility for vthread unaware agents: legacy thread_start + if (PostVirtualThreadCompatibleLifecycleEvents && + JvmtiExport::should_post_thread_life()) { + // JvmtiEventController::thread_started is called here + JvmtiExport::post_thread_start(thread); + } + } + // post VirtualThreadMount event after VirtualThreadStart + if (JvmtiExport::should_post_vthread_mount()) { + JvmtiExport::post_vthread_mount(vthread); + } +} + +void +JvmtiVTMSTransitionDisabler::VTMS_vthread_end(jobject vthread) { + JavaThread* thread = JavaThread::current(); + + assert(!thread->is_in_VTMS_transition(), "sanity check"); + assert(!thread->is_in_tmp_VTMS_transition(), "sanity check"); + + // post VirtualThreadUnmount event before VirtualThreadEnd + if (JvmtiExport::should_post_vthread_unmount()) { + JvmtiExport::post_vthread_unmount(vthread); + } + if (JvmtiExport::can_support_virtual_threads()) { + if (JvmtiExport::should_post_vthread_end()) { + JvmtiExport::post_vthread_end(vthread); + } + } else { // compatibility for vthread unaware agents: legacy thread_end + if (PostVirtualThreadCompatibleLifecycleEvents && + JvmtiExport::should_post_thread_life()) { + JvmtiExport::post_thread_end(thread); + } + } + if (thread->jvmti_thread_state() != nullptr) { + JvmtiExport::cleanup_thread(thread); + thread->set_jvmti_thread_state(nullptr); + oop vt = JNIHandles::resolve(vthread); + java_lang_Thread::set_jvmti_thread_state(vt, nullptr); + } + VTMS_unmount_begin(vthread); +} + +void +JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(jobject vthread, bool hide) { + if (hide) { + VTMS_mount_begin(vthread); + } else { + VTMS_mount_end(vthread); + if (JvmtiExport::should_post_vthread_mount()) { + JvmtiExport::post_vthread_mount(vthread); + } + } +} + +void +JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(jobject vthread, bool hide) { + if (hide) { + if (JvmtiExport::should_post_vthread_unmount()) { + JvmtiExport::post_vthread_unmount(vthread); + } + VTMS_unmount_begin(vthread); + } else { + VTMS_unmount_end(vthread); + } +} + +void +JvmtiVTMSTransitionDisabler::VTMS_mount_begin(jobject vthread) { JavaThread* thread = JavaThread::current(); assert(!thread->is_in_tmp_VTMS_transition(), "sanity check"); assert(!thread->is_in_VTMS_transition(), "sanity check"); @@ -520,7 +607,7 @@ JvmtiVTMSTransitionDisabler::VTMS_mount_begin(jobject vthread, jboolean first_mo } void -JvmtiVTMSTransitionDisabler::VTMS_mount_end(jobject vthread, jboolean first_mount) { +JvmtiVTMSTransitionDisabler::VTMS_mount_end(jobject vthread) { JavaThread* thread = JavaThread::current(); oop vt = JNIHandles::resolve(vthread); @@ -536,62 +623,21 @@ JvmtiVTMSTransitionDisabler::VTMS_mount_end(jobject vthread, jboolean first_moun assert(thread->is_in_VTMS_transition(), "sanity check"); assert(!thread->is_in_tmp_VTMS_transition(), "sanity check"); finish_VTMS_transition(vthread, /* is_mount */ true); - if (first_mount) { - // thread start - if (JvmtiExport::can_support_virtual_threads()) { - JvmtiEventController::thread_started(thread); - if (JvmtiExport::should_post_vthread_start()) { - JvmtiExport::post_vthread_start(vthread); - } - } else { // compatibility for vthread unaware agents: legacy thread_start - if (PostVirtualThreadCompatibleLifecycleEvents && - JvmtiExport::should_post_thread_life()) { - // JvmtiEventController::thread_started is called here - JvmtiExport::post_thread_start(thread); - } - } - } - if (JvmtiExport::should_post_vthread_mount()) { - JvmtiExport::post_vthread_mount(vthread); - } } void -JvmtiVTMSTransitionDisabler::VTMS_unmount_begin(jobject vthread, jboolean last_unmount) { +JvmtiVTMSTransitionDisabler::VTMS_unmount_begin(jobject vthread) { JavaThread* thread = JavaThread::current(); - HandleMark hm(thread); - Handle ct(thread, thread->threadObj()); - if (JvmtiExport::should_post_vthread_unmount()) { - JvmtiExport::post_vthread_unmount(vthread); - } - if (last_unmount) { - if (JvmtiExport::can_support_virtual_threads()) { - if (JvmtiExport::should_post_vthread_end()) { - JvmtiExport::post_vthread_end(vthread); - } - } else { // compatibility for vthread unaware agents: legacy thread_end - if (PostVirtualThreadCompatibleLifecycleEvents && - JvmtiExport::should_post_thread_life()) { - JvmtiExport::post_thread_end(thread); - } - } - } assert(!thread->is_in_tmp_VTMS_transition(), "sanity check"); assert(!thread->is_in_VTMS_transition(), "sanity check"); - start_VTMS_transition(vthread, /* is_mount */ false); - if (last_unmount && thread->jvmti_thread_state() != nullptr) { - JvmtiExport::cleanup_thread(thread); - thread->set_jvmti_thread_state(nullptr); - oop vt = JNIHandles::resolve(vthread); - java_lang_Thread::set_jvmti_thread_state(vt, nullptr); - } - thread->rebind_to_jvmti_thread_state_of(ct()); + start_VTMS_transition(vthread, /* is_mount */ false); + thread->rebind_to_jvmti_thread_state_of(thread->threadObj()); } void -JvmtiVTMSTransitionDisabler::VTMS_unmount_end(jobject vthread, jboolean last_unmount) { +JvmtiVTMSTransitionDisabler::VTMS_unmount_end(jobject vthread) { JavaThread* thread = JavaThread::current(); assert(thread->is_in_VTMS_transition(), "sanity check"); assert(!thread->is_in_tmp_VTMS_transition(), "sanity check"); diff --git a/src/hotspot/share/prims/jvmtiThreadState.hpp b/src/hotspot/share/prims/jvmtiThreadState.hpp index a4e9f25a6ca..c48096b90d4 100644 --- a/src/hotspot/share/prims/jvmtiThreadState.hpp +++ b/src/hotspot/share/prims/jvmtiThreadState.hpp @@ -105,13 +105,23 @@ class JvmtiVTMSTransitionDisabler { JvmtiVTMSTransitionDisabler(jthread thread); ~JvmtiVTMSTransitionDisabler(); + // set VTMS transition bit value in JavaThread and java.lang.VirtualThread object + static void set_is_in_VTMS_transition(JavaThread* thread, jobject vthread, bool in_trans); + static void start_VTMS_transition(jthread vthread, bool is_mount); static void finish_VTMS_transition(jthread vthread, bool is_mount); - static void VTMS_mount_begin(jobject vthread, jboolean first_mount); - static void VTMS_mount_end(jobject vthread, jboolean first_mount); - static void VTMS_unmount_begin(jobject vthread, jboolean last_unmount); - static void VTMS_unmount_end(jobject vthread, jboolean last_unmount); + static void VTMS_vthread_start(jobject vthread); + static void VTMS_vthread_end(jobject vthread); + + static void VTMS_vthread_mount(jobject vthread, bool hide); + static void VTMS_vthread_unmount(jobject vthread, bool hide); + + static void VTMS_mount_begin(jobject vthread); + static void VTMS_mount_end(jobject vthread); + + static void VTMS_unmount_begin(jobject vthread); + static void VTMS_unmount_end(jobject vthread); }; /////////////////////////////////////////////////////////////// diff --git a/src/hotspot/share/prims/nativeEntryPoint.cpp b/src/hotspot/share/prims/nativeEntryPoint.cpp index 1a7ba6fe67b..ef962f8ee41 100644 --- a/src/hotspot/share/prims/nativeEntryPoint.cpp +++ b/src/hotspot/share/prims/nativeEntryPoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,8 @@ JNI_ENTRY(jlong, NEP_makeDowncallStub(JNIEnv* env, jclass _unused, jobject method_type, jobject jabi, jobjectArray arg_moves, jobjectArray ret_moves, - jboolean needs_return_buffer, jint captured_state_mask)) + jboolean needs_return_buffer, jint captured_state_mask, + jboolean needs_transition)) ResourceMark rm; const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi); @@ -77,7 +78,8 @@ JNI_ENTRY(jlong, NEP_makeDowncallStub(JNIEnv* env, jclass _unused, jobject metho return (jlong) DowncallLinker::make_downcall_stub(basic_type, pslots, ret_bt, abi, input_regs, output_regs, - needs_return_buffer, captured_state_mask)->code_begin(); + needs_return_buffer, captured_state_mask, + needs_transition)->code_begin(); JNI_END JNI_ENTRY(jboolean, NEP_freeDowncallStub(JNIEnv* env, jclass _unused, jlong invoker)) @@ -97,7 +99,7 @@ JNI_END #define VM_STORAGE_ARR "[Ljdk/internal/foreign/abi/VMStorage;" static JNINativeMethod NEP_methods[] = { - {CC "makeDowncallStub", CC "(" METHOD_TYPE ABI_DESC VM_STORAGE_ARR VM_STORAGE_ARR "ZI)J", FN_PTR(NEP_makeDowncallStub)}, + {CC "makeDowncallStub", CC "(" METHOD_TYPE ABI_DESC VM_STORAGE_ARR VM_STORAGE_ARR "ZIZ)J", FN_PTR(NEP_makeDowncallStub)}, {CC "freeDowncallStub0", CC "(J)Z", FN_PTR(NEP_freeDowncallStub)}, }; diff --git a/src/hotspot/share/prims/upcallLinker.cpp b/src/hotspot/share/prims/upcallLinker.cpp index 13ae00fa027..7be41f7447f 100644 --- a/src/hotspot/share/prims/upcallLinker.cpp +++ b/src/hotspot/share/prims/upcallLinker.cpp @@ -75,6 +75,7 @@ JavaThread* UpcallLinker::maybe_attach_and_get_thread() { // modelled after JavaCallWrapper::JavaCallWrapper JavaThread* UpcallLinker::on_entry(UpcallStub::FrameData* context) { JavaThread* thread = maybe_attach_and_get_thread(); + guarantee(thread->thread_state() == _thread_in_native, "wrong thread state for upcall"); context->thread = thread; assert(thread->can_call_java(), "must be able to call Java"); diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 7e98023e18c..7891f111744 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -1206,9 +1206,9 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method)) mdo->clean_method_data(/*always_clean*/true); } - mh->clear_not_c1_compilable(); - mh->clear_not_c2_compilable(); - mh->clear_not_c2_osr_compilable(); + mh->clear_is_not_c1_compilable(); + mh->clear_is_not_c2_compilable(); + mh->clear_is_not_c2_osr_compilable(); NOT_PRODUCT(mh->set_compiled_invocation_count(0)); if (mcs != nullptr) { mcs->clear_counters(); @@ -2006,11 +2006,6 @@ WB_ENTRY(jboolean, WB_CDSMemoryMappingFailed(JNIEnv* env, jobject wb)) return FileMapInfo::memory_mapping_failed(); WB_END -WB_ENTRY(jboolean, WB_IsShared(JNIEnv* env, jobject wb, jobject obj)) - oop obj_oop = JNIHandles::resolve(obj); - return Universe::heap()->is_archived_object(obj_oop); -WB_END - WB_ENTRY(jboolean, WB_IsSharedInternedString(JNIEnv* env, jobject wb, jobject str)) ResourceMark rm(THREAD); oop str_oop = JNIHandles::resolve(str); @@ -2024,19 +2019,7 @@ WB_ENTRY(jboolean, WB_IsSharedClass(JNIEnv* env, jobject wb, jclass clazz)) WB_END WB_ENTRY(jboolean, WB_AreSharedStringsMapped(JNIEnv* env)) - return ArchiveHeapLoader::closed_regions_mapped(); -WB_END - -WB_ENTRY(jobject, WB_GetResolvedReferences(JNIEnv* env, jobject wb, jclass clazz)) - Klass *k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz)); - if (k->is_instance_klass()) { - InstanceKlass *ik = InstanceKlass::cast(k); - ConstantPool *cp = ik->constants(); - objArrayOop refs = cp->resolved_references(); - return (jobject)JNIHandles::make_local(THREAD, refs); - } else { - return nullptr; - } + return ArchiveHeapLoader::is_mapped(); WB_END WB_ENTRY(void, WB_LinkClass(JNIEnv* env, jobject wb, jclass clazz)) @@ -2049,7 +2032,7 @@ WB_ENTRY(void, WB_LinkClass(JNIEnv* env, jobject wb, jclass clazz)) WB_END WB_ENTRY(jboolean, WB_AreOpenArchiveHeapObjectsMapped(JNIEnv* env)) - return ArchiveHeapLoader::open_regions_mapped(); + return ArchiveHeapLoader::is_mapped(); WB_END WB_ENTRY(jboolean, WB_IsCDSIncluded(JNIEnv* env)) @@ -2766,11 +2749,9 @@ static JNINativeMethod methods[] = { {CC"getCDSGenericHeaderMinVersion", CC"()I", (void*)&WB_GetCDSGenericHeaderMinVersion}, {CC"getCurrentCDSVersion", CC"()I", (void*)&WB_GetCDSCurrentVersion}, {CC"isSharingEnabled", CC"()Z", (void*)&WB_IsSharingEnabled}, - {CC"isShared", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsShared }, {CC"isSharedInternedString", CC"(Ljava/lang/String;)Z", (void*)&WB_IsSharedInternedString }, {CC"isSharedClass", CC"(Ljava/lang/Class;)Z", (void*)&WB_IsSharedClass }, {CC"areSharedStringsMapped", CC"()Z", (void*)&WB_AreSharedStringsMapped }, - {CC"getResolvedReferences", CC"(Ljava/lang/Class;)Ljava/lang/Object;", (void*)&WB_GetResolvedReferences}, {CC"linkClass", CC"(Ljava/lang/Class;)V", (void*)&WB_LinkClass}, {CC"areOpenArchiveHeapObjectsMapped", CC"()Z", (void*)&WB_AreOpenArchiveHeapObjectsMapped}, {CC"isCDSIncluded", CC"()Z", (void*)&WB_IsCDSIncluded }, diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index c87c1cb03f9..51d79037959 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -85,7 +85,6 @@ char* Arguments::_java_command = nullptr; SystemProperty* Arguments::_system_properties = nullptr; size_t Arguments::_conservative_max_heap_alignment = 0; Arguments::Mode Arguments::_mode = _mixed; -bool Arguments::_java_compiler = false; bool Arguments::_xdebug_mode = false; const char* Arguments::_java_vendor_url_bug = nullptr; const char* Arguments::_sun_java_launcher = DEFAULT_JAVA_LAUNCHER; @@ -524,6 +523,7 @@ static SpecialFlag const special_jvm_flags[] = { { "G1UsePreventiveGC", JDK_Version::undefined(), JDK_Version::jdk(21), JDK_Version::jdk(22) }, { "G1ConcRSLogCacheSize", JDK_Version::undefined(), JDK_Version::jdk(21), JDK_Version::undefined() }, { "G1ConcRSHotCardLimit", JDK_Version::undefined(), JDK_Version::jdk(21), JDK_Version::undefined() }, + { "MetaspaceReclaimPolicy", JDK_Version::undefined(), JDK_Version::jdk(21), JDK_Version::undefined() }, #ifdef ASSERT { "DummyObsoleteTestFlag", JDK_Version::undefined(), JDK_Version::jdk(18), JDK_Version::undefined() }, @@ -1266,8 +1266,14 @@ bool Arguments::add_property(const char* prop, PropertyWriteable writeable, Prop #endif if (strcmp(key, "java.compiler") == 0) { - process_java_compiler_argument(value); - // Record value in Arguments, but let it get passed to Java. + // we no longer support java.compiler system property, log a warning and let it get + // passed to Java, like any other system property + if (strlen(value) == 0 || strcasecmp(value, "NONE") == 0) { + // for applications using NONE or empty value, log a more informative message + warning("The java.compiler system property is obsolete and no longer supported, use -Xint"); + } else { + warning("The java.compiler system property is obsolete and no longer supported."); + } } else if (strcmp(key, "sun.java.launcher.is_altjvm") == 0) { // sun.java.launcher.is_altjvm property is // private and is processed in process_sun_java_launcher_properties(); @@ -1372,7 +1378,6 @@ void Arguments::set_mode_flags(Mode mode) { // Set up default values for all flags. // If you add a flag to any of the branches below, // add a default value for it here. - set_java_compiler(false); _mode = mode; // Ensure Agent_OnLoad has the correct initial values. @@ -1862,16 +1867,6 @@ jint Arguments::set_aggressive_opts_flags() { } //=========================================================================================================== -// Parsing of java.compiler property - -void Arguments::process_java_compiler_argument(const char* arg) { - // For backwards compatibility, Djava.compiler=NONE or "" - // causes us to switch to -Xint mode UNLESS -Xdebug - // is also specified. - if (strlen(arg) == 0 || strcasecmp(arg, "NONE") == 0) { - set_java_compiler(true); // "-Djava.compiler[=...]" most recently seen. - } -} void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) { if (_sun_java_launcher != _default_java_launcher) { @@ -3004,15 +2999,6 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) { } } - // This must be done after all arguments have been processed. - // java_compiler() true means set to "NONE" or empty. - if (java_compiler() && !xdebug_mode()) { - // For backwards compatibility, we switch to interpreted mode if - // -Djava.compiler="NONE" or "" is specified AND "-Xdebug" was - // not specified. - set_mode_flags(_int); - } - // CompileThresholdScaling == 0.0 is same as -Xint: Disable compilation (enable interpreter-only mode), // but like -Xint, leave compilation thresholds unaffected. // With tiered compilation disabled, setting CompileThreshold to 0 disables compilation as well. diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp index 4f7eec12735..977422a2961 100644 --- a/src/hotspot/share/runtime/arguments.hpp +++ b/src/hotspot/share/runtime/arguments.hpp @@ -242,9 +242,6 @@ class Arguments : AllStatic { // Operation modi static Mode _mode; static void set_mode_flags(Mode mode); - static bool _java_compiler; - static void set_java_compiler(bool arg) { _java_compiler = arg; } - static bool java_compiler() { return _java_compiler; } // -Xdebug flag static bool _xdebug_mode; @@ -302,7 +299,6 @@ class Arguments : AllStatic { static bool parse_argument(const char* arg, JVMFlagOrigin origin); static bool process_argument(const char* arg, jboolean ignore_unrecognized, JVMFlagOrigin origin); static void process_java_launcher_argument(const char*, void*); - static void process_java_compiler_argument(const char* arg); static jint parse_options_environment_variable(const char* name, ScopedVMInitArgs* vm_args); static jint parse_java_tool_options_environment_variable(ScopedVMInitArgs* vm_args); static jint parse_java_options_environment_variable(ScopedVMInitArgs* vm_args); diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp index f9cafaf7be4..45ebaeadbcb 100644 --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomic.hpp @@ -160,6 +160,82 @@ public: inline static bool replace_if_null(D* volatile* dest, T* value, atomic_memory_order order = memory_order_conservative); + // Bitwise logical operations (and, or, xor) + // + // All operations apply the corresponding operation to the value in dest and + // bits, storing the result in dest. They return either the old value + // (fetch_then_BITOP) or the newly updated value (BITOP_then_fetch). + // + // Requirements: + // - T is an integral type + // - sizeof(T) == sizeof(int) || sizeof(T) == sizeof(void*) + + // Performs atomic bitwise-and of *dest and bits, storing the result in + // *dest. Returns the prior value of *dest. That is, atomically performs + // this sequence of operations: + // { tmp = *dest; *dest &= bits; return tmp; } + template + static T fetch_then_and(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().fetch_then_and(dest, bits, order); + } + + // Performs atomic bitwise-or of *dest and bits, storing the result in + // *dest. Returns the prior value of *dest. That is, atomically performs + // this sequence of operations: + // { tmp = *dest; *dest |= bits; return tmp; } + template + static T fetch_then_or(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().fetch_then_or(dest, bits, order); + } + + // Performs atomic bitwise-xor of *dest and bits, storing the result in + // *dest. Returns the prior value of *dest. That is, atomically performs + // this sequence of operations: + // { tmp = *dest; *dest ^= bits; return tmp; } + template + static T fetch_then_xor(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().fetch_then_xor(dest, bits, order); + } + + // Performs atomic bitwise-and of *dest and bits, storing the result in + // *dest. Returns the new value of *dest. That is, atomically performs + // this operation: + // { return *dest &= bits; } + template + static T and_then_fetch(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().and_then_fetch(dest, bits, order); + } + + // Performs atomic bitwise-or of *dest and bits, storing the result in + // *dest. Returns the new value of *dest. That is, atomically performs + // this operation: + // { return *dest |= bits; } + template + static T or_then_fetch(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().or_then_fetch(dest, bits, order); + } + + // Performs atomic bitwise-xor of *dest and bits, storing the result in + // *dest. Returns the new value of *dest. That is, atomically performs + // this operation: + // { return *dest ^= bits; } + template + static T xor_then_fetch(volatile T* dest, T bits, + atomic_memory_order order = memory_order_conservative) { + static_assert(std::is_integral::value, "bitop with non-integral type"); + return PlatformBitops().xor_then_fetch(dest, bits, order); + } + private: // Test whether From is implicitly convertible to To. // From and To must be pointer types. @@ -367,6 +443,44 @@ private: static T xchg_using_helper(Fn fn, T volatile* dest, T exchange_value); + + // Platform-specific implementation of the bitops (and, or, xor). Support + // for sizes of 4 bytes and (if different) pointer size bytes are required. + // The class is a function object that must be default constructable, with + // these requirements: + // + // - T is an integral type. + // - dest is of type T*. + // - bits is of type T. + // - order is of type atomic_memory_order. + // - platform_bitops is an object of type PlatformBitops. + // + // Then + // platform_bitops.fetch_then_and(dest, bits, order) + // platform_bitops.fetch_then_or(dest, bits, order) + // platform_bitops.fetch_then_xor(dest, bits, order) + // platform_bitops.and_then_fetch(dest, bits, order) + // platform_bitops.or_then_fetch(dest, bits, order) + // platform_bitops.xor_then_fetch(dest, bits, order) + // must all be valid expressions, returning a result convertible to T. + // + // A default definition is provided, which implements all of the operations + // using cmpxchg. + // + // For each required size, a platform must either use the default or + // entirely specialize the class for that size by providing all of the + // required operations. + // + // The second (bool) template parameter allows platforms to provide a + // partial specialization with a parameterized size, and is otherwise + // unused. The default value for that bool parameter means specializations + // don't need to mention it. + template class PlatformBitops; + + // Helper base classes that may be used to implement PlatformBitops. + class PrefetchBitopsUsingCmpxchg; + class PostfetchBitopsUsingCmpxchg; + class PostfetchBitopsUsingPrefetch; }; template @@ -576,6 +690,99 @@ struct Atomic::PlatformXchg { atomic_memory_order order) const; }; +// Implement fetch_then_bitop operations using a CAS loop. +class Atomic::PrefetchBitopsUsingCmpxchg { + template + T bitop(T volatile* dest, atomic_memory_order order, Op operation) const { + T old_value; + T new_value; + T fetched_value = Atomic::load(dest); + do { + old_value = fetched_value; + new_value = operation(old_value); + fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order); + } while (old_value != fetched_value); + return fetched_value; + } + +public: + template + T fetch_then_and(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value & bits; }); + } + + template + T fetch_then_or(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value | bits; }); + } + + template + T fetch_then_xor(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value ^ bits; }); + } +}; + +// Implement bitop_then_fetch operations using a CAS loop. +class Atomic::PostfetchBitopsUsingCmpxchg { + template + T bitop(T volatile* dest, atomic_memory_order order, Op operation) const { + T old_value; + T new_value; + T fetched_value = Atomic::load(dest); + do { + old_value = fetched_value; + new_value = operation(old_value); + fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order); + } while (old_value != fetched_value); + return new_value; + } + +public: + template + T and_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value & bits; }); + } + + template + T or_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value | bits; }); + } + + template + T xor_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bitop(dest, order, [&](T value) -> T { return value ^ bits; }); + } +}; + +// Implement bitop_then_fetch operations by calling fetch_then_bitop and +// applying the operation to the result and the bits argument. +class Atomic::PostfetchBitopsUsingPrefetch { +public: + template + T and_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bits & Atomic::fetch_then_and(dest, bits, order); + } + + template + T or_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bits | Atomic::fetch_then_or(dest, bits, order); + } + + template + T xor_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { + return bits ^ Atomic::fetch_then_xor(dest, bits, order); + } +}; + +// The default definition uses cmpxchg. Platforms can override by defining a +// partial specialization providing size, either as a template parameter or as +// a specific value. +template +class Atomic::PlatformBitops + : public PrefetchBitopsUsingCmpxchg, + public PostfetchBitopsUsingCmpxchg +{}; + template class ScopedFenceGeneral: public StackObj { public: diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp index b09a0090644..09505e4f210 100644 --- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp +++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp @@ -487,7 +487,7 @@ FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* #if !defined(PPC64) || defined(ZERO) static const int doYield_stub_frame_size = frame::metadata_words; #else - static const int doYield_stub_frame_size = frame::abi_reg_args_size >> LogBytesPerWord; + static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord; #endif assert(SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, ""); @@ -1058,12 +1058,8 @@ NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, fr // The frame's top never includes the stack arguments to the callee intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted); - intptr_t* const callers_sp = ContinuationHelper::InterpretedFrame::callers_sp(f); - const int locals = f.interpreter_frame_method()->max_locals(); - const int fsize = callers_sp + frame::metadata_words_at_top + locals - stack_frame_top; - intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f); - assert(stack_frame_bottom - stack_frame_top >= fsize, ""); // == on x86 + const int fsize = stack_frame_bottom - stack_frame_top; DEBUG_ONLY(verify_frame_top(f, stack_frame_top)); @@ -1093,9 +1089,9 @@ NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, fr intptr_t* heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf); assert(heap_frame_bottom == heap_frame_top + fsize, ""); - // on AArch64 we add padding between the locals and the rest of the frame to keep the fp 16-byte-aligned - copy_to_chunk(stack_frame_bottom - locals, heap_frame_bottom - locals, locals); // copy locals - copy_to_chunk(stack_frame_top, heap_frame_top, fsize - locals); // copy rest + // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned. + // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame. + copy_to_chunk(stack_frame_top, heap_frame_top, fsize); assert(!is_bottom_frame || !caller.is_interpreted_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), ""); relativize_interpreted_frame_metadata(f, hf); @@ -1754,7 +1750,6 @@ private: void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; } static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f); - static inline void set_interpreter_frame_bottom(const frame& f, intptr_t* bottom); public: CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; }) @@ -2149,21 +2144,19 @@ NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& c intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top; intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf); - assert(hf.is_heap_frame(), "should be"); - const int fsize = heap_frame_bottom - heap_frame_top; - - assert((stack_frame_bottom >= stack_frame_top + fsize) && - (stack_frame_bottom <= stack_frame_top + fsize + 1), ""); // internal alignment on aarch64 - - // on AArch64/PPC64 we add padding between the locals and the rest of the frame to keep the fp 16-byte-aligned - const int locals = hf.interpreter_frame_method()->max_locals(); assert(hf.is_heap_frame(), "should be"); assert(!f.is_heap_frame(), "should not be"); - copy_from_chunk(heap_frame_bottom - locals, stack_frame_bottom - locals, locals); // copy locals - copy_from_chunk(heap_frame_top, stack_frame_top, fsize - locals); // copy rest + const int fsize = heap_frame_bottom - heap_frame_top; + assert((stack_frame_bottom == stack_frame_top + fsize), ""); + + // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned. + // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame. + copy_from_chunk(heap_frame_top, stack_frame_top, fsize); + + // Make sure the relativized locals is already set. + assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom"); - set_interpreter_frame_bottom(f, stack_frame_bottom); // the copy overwrites the metadata derelativize_interpreted_frame_metadata(hf, f); patch(f, caller, is_bottom_frame); @@ -2174,6 +2167,8 @@ NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& c maybe_set_fastpath(f.sp()); + const int locals = hf.interpreter_frame_method()->max_locals(); + if (!is_bottom_frame) { // can only fix caller once this frame is thawed (due to callee saved regs) _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance); diff --git a/src/hotspot/share/runtime/continuationWrapper.inline.hpp b/src/hotspot/share/runtime/continuationWrapper.inline.hpp index fc89d98be64..03b2c726a0e 100644 --- a/src/hotspot/share/runtime/continuationWrapper.inline.hpp +++ b/src/hotspot/share/runtime/continuationWrapper.inline.hpp @@ -92,10 +92,8 @@ public: } inline ~SafepointOp() { // reload oops _cont._continuation = _conth(); - if (_cont._tail != nullptr) { - _cont._tail = jdk_internal_vm_Continuation::tail(_cont._continuation); - } - _cont.disallow_safepoint(); + _cont._tail = jdk_internal_vm_Continuation::tail(_cont._continuation); + _cont.disallow_safepoint(); } }; diff --git a/src/hotspot/share/runtime/escapeBarrier.cpp b/src/hotspot/share/runtime/escapeBarrier.cpp index 5d80cd8f5e3..bc01d900285 100644 --- a/src/hotspot/share/runtime/escapeBarrier.cpp +++ b/src/hotspot/share/runtime/escapeBarrier.cpp @@ -123,9 +123,8 @@ bool EscapeBarrier::deoptimize_objects_all_threads() { if (!barrier_active()) return true; ResourceMark rm(calling_thread()); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { - oop vt_oop = jt->jvmti_vthread(); - // Skip virtual threads - if (vt_oop != nullptr && java_lang_VirtualThread::is_instance(vt_oop)) { + // Skip thread with mounted continuation + if (jt->last_continuation() != nullptr) { continue; } if (jt->frames_to_pop_failed_realloc() > 0) { diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index bb4a6cc2e54..d4e7c26f18b 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -1241,7 +1241,7 @@ class FrameValuesOopClosure: public OopClosure, public DerivedOopClosure { private: GrowableArray* _oops; GrowableArray* _narrow_oops; - GrowableArray* _base; + GrowableArray* _base; GrowableArray* _derived; NoSafepointVerifier nsv; @@ -1249,7 +1249,7 @@ public: FrameValuesOopClosure() { _oops = new (mtThread) GrowableArray(100, mtThread); _narrow_oops = new (mtThread) GrowableArray(100, mtThread); - _base = new (mtThread) GrowableArray(100, mtThread); + _base = new (mtThread) GrowableArray(100, mtThread); _derived = new (mtThread) GrowableArray(100, mtThread); } ~FrameValuesOopClosure() { @@ -1261,7 +1261,7 @@ public: virtual void do_oop(oop* p) override { _oops->push(p); } virtual void do_oop(narrowOop* p) override { _narrow_oops->push(p); } - virtual void do_derived_oop(oop* base_loc, derived_pointer* derived_loc) override { + virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override { _base->push(base_loc); _derived->push(derived_loc); } @@ -1281,7 +1281,7 @@ public: } assert(_base->length() == _derived->length(), "should be the same"); for (int i = 0; i < _base->length(); i++) { - oop* base = _base->at(i); + derived_base* base = _base->at(i); derived_pointer* derived = _derived->at(i); values.describe(frame_no, (intptr_t*)derived, err_msg("derived pointer (base: " INTPTR_FORMAT ") for #%d", p2i(base), frame_no)); } diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index c67b17ae3fb..51fb9292b06 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -713,6 +713,13 @@ const int ObjectAlignmentInBytes = 8; "MonitorUsedDeflationThreshold is exceeded (0 is off).") \ range(0, max_jint) \ \ + /* notice: the max range value here is max_jint, not max_intx */ \ + /* because of overflow issue */ \ + product(intx, GuaranteedAsyncDeflationInterval, 60000, DIAGNOSTIC, \ + "Async deflate idle monitors every so many milliseconds even " \ + "when MonitorUsedDeflationThreshold is NOT exceeded (0 is off).") \ + range(0, max_jint) \ + \ product(size_t, AvgMonitorsPerThreadEstimate, 1024, DIAGNOSTIC, \ "Used to estimate a variable ceiling based on number of threads " \ "for use with MonitorUsedDeflationThreshold (0 is off).") \ @@ -727,8 +734,9 @@ const int ObjectAlignmentInBytes = 8; \ product(intx, MonitorUsedDeflationThreshold, 90, DIAGNOSTIC, \ "Percentage of used monitors before triggering deflation (0 is " \ - "off). The check is performed on GuaranteedSafepointInterval " \ - "or AsyncDeflationInterval.") \ + "off). The check is performed on GuaranteedSafepointInterval, " \ + "AsyncDeflationInterval or GuaranteedAsyncDeflationInterval, " \ + "whichever is lower.") \ range(0, 100) \ \ product(uintx, NoAsyncDeflationProgressMax, 3, DIAGNOSTIC, \ @@ -1415,9 +1423,6 @@ const int ObjectAlignmentInBytes = 8; "Force the class space to be allocated at this address or " \ "fails VM initialization (requires -Xshare=off.") \ \ - product(ccstr, MetaspaceReclaimPolicy, "balanced", DIAGNOSTIC, \ - "options: balanced, aggressive") \ - \ product(bool, PrintMetaspaceStatisticsAtExit, false, DIAGNOSTIC, \ "Print metaspace statistics upon VM exit.") \ \ diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index 44c82a0a849..140d0281e2f 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -479,11 +479,6 @@ void before_exit(JavaThread* thread, bool halt) { StatSampler::disengage(); StatSampler::destroy(); - // Shut down string deduplication if running. - if (StringDedup::is_enabled()) { - StringDedup::stop(); - } - // Stop concurrent GC threads Universe::heap()->stop(); diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index 473ce59c751..dde001a30fc 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -1515,12 +1515,13 @@ void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const { st->print("%s \"%s\"", type_name(), get_thread_name_string(buf, buflen)); Thread* current = Thread::current_or_null_safe(); assert(current != nullptr, "cannot be called by a detached thread"); + st->fill_to(60); if (!current->is_Java_thread() || JavaThread::cast(current)->is_oop_safe()) { // Only access threadObj() if current thread is not a JavaThread // or if it is a JavaThread that can safely access oops. oop thread_obj = threadObj(); if (thread_obj != nullptr) { - if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon"); + st->print(java_lang_Thread::is_daemon(thread_obj) ? " daemon" : " "); } } st->print(" ["); @@ -1528,8 +1529,9 @@ void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const { if (osthread()) { st->print(", id=%d", osthread()->thread_id()); } - st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ")", - p2i(stack_end()), p2i(stack_base())); + st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ") (" PROPERFMT ")", + p2i(stack_end()), p2i(stack_base()), + PROPERFMTARGS(stack_size())); st->print("]"); ThreadsSMRSupport::print_info_on(this, st); @@ -1980,11 +1982,24 @@ Klass* JavaThread::security_get_caller_class(int depth) { return nullptr; } +// Internal convenience function for millisecond resolution sleeps. +bool JavaThread::sleep(jlong millis) { + jlong nanos; + if (millis > max_jlong / NANOUNITS_PER_MILLIUNIT) { + // Conversion to nanos would overflow, saturate at max + nanos = max_jlong; + } else { + nanos = millis * NANOUNITS_PER_MILLIUNIT; + } + return sleep_nanos(nanos); +} + // java.lang.Thread.sleep support // Returns true if sleep time elapsed as expected, and false // if the thread was interrupted. -bool JavaThread::sleep(jlong millis) { +bool JavaThread::sleep_nanos(jlong nanos) { assert(this == Thread::current(), "thread consistency check"); + assert(nanos >= 0, "nanos are in range"); ParkEvent * const slp = this->_SleepEvent; // Because there can be races with thread interruption sending an unpark() @@ -1998,20 +2013,22 @@ bool JavaThread::sleep(jlong millis) { jlong prevtime = os::javaTimeNanos(); + jlong nanos_remaining = nanos; + for (;;) { // interruption has precedence over timing out if (this->is_interrupted(true)) { return false; } - if (millis <= 0) { + if (nanos_remaining <= 0) { return true; } { ThreadBlockInVM tbivm(this); OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */); - slp->park(millis); + slp->park_nanos(nanos_remaining); } // Update elapsed time tracking @@ -2022,7 +2039,7 @@ bool JavaThread::sleep(jlong millis) { assert(false, "unexpected time moving backwards detected in JavaThread::sleep()"); } else { - millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC; + nanos_remaining -= (newtime - prevtime); } prevtime = newtime; } diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index 14c6b39a779..c56a6d3b175 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -512,9 +512,7 @@ private: virtual bool is_Java_thread() const { return true; } virtual bool can_call_java() const { return true; } - virtual bool is_active_Java_thread() const { - return on_thread_list() && !is_terminated(); - } + virtual bool is_active_Java_thread() const; // Thread oop. threadObj() can be null for initial JavaThread // (or for threads attached via JNI) @@ -1142,6 +1140,7 @@ private: ParkEvent * _SleepEvent; public: bool sleep(jlong millis); + bool sleep_nanos(jlong nanos); // java.lang.Thread interruption support void interrupt(); diff --git a/src/hotspot/share/runtime/javaThread.inline.hpp b/src/hotspot/share/runtime/javaThread.inline.hpp index 4e4399d77a4..7b1ad7e17e1 100644 --- a/src/hotspot/share/runtime/javaThread.inline.hpp +++ b/src/hotspot/share/runtime/javaThread.inline.hpp @@ -223,6 +223,10 @@ inline void JavaThread::set_terminated(TerminatedTypes t) { Atomic::release_store(&_terminated, t); } +inline bool JavaThread::is_active_Java_thread() const { + return on_thread_list() && !is_terminated(); +} + // Allow tracking of class initialization monitor use inline void JavaThread::set_class_to_be_initialized(InstanceKlass* k) { assert((k == nullptr && _class_to_be_initialized != nullptr) || diff --git a/src/hotspot/share/runtime/monitorDeflationThread.cpp b/src/hotspot/share/runtime/monitorDeflationThread.cpp index 2f800f296b1..01b33182973 100644 --- a/src/hotspot/share/runtime/monitorDeflationThread.cpp +++ b/src/hotspot/share/runtime/monitorDeflationThread.cpp @@ -47,6 +47,38 @@ void MonitorDeflationThread::initialize() { } void MonitorDeflationThread::monitor_deflation_thread_entry(JavaThread* jt, TRAPS) { + + // We wait for the lowest of these three intervals: + // - GuaranteedSafepointInterval + // While deflation is not related to safepoint anymore, this keeps compatibility with + // the old behavior when deflation also happened at safepoints. Users who set this + // option to get more/less frequent deflations would be served with this option. + // - AsyncDeflationInterval + // Normal threshold-based deflation heuristic checks the conditions at this interval. + // See is_async_deflation_needed(). + // - GuaranteedAsyncDeflationInterval + // Backup deflation heuristic checks the conditions at this interval. + // See is_async_deflation_needed(). + // + intx wait_time = max_intx; + if (GuaranteedSafepointInterval > 0) { + wait_time = MIN2(wait_time, GuaranteedSafepointInterval); + } + if (AsyncDeflationInterval > 0) { + wait_time = MIN2(wait_time, AsyncDeflationInterval); + } + if (GuaranteedAsyncDeflationInterval > 0) { + wait_time = MIN2(wait_time, GuaranteedAsyncDeflationInterval); + } + + // If all options are disabled, then wait time is not defined, and the deflation + // is effectively disabled. In that case, exit the thread immediately after printing + // a warning message. + if (wait_time == max_intx) { + warning("Async deflation is disabled"); + return; + } + while (true) { { // Need state transition ThreadBlockInVM so that this thread @@ -58,9 +90,7 @@ void MonitorDeflationThread::monitor_deflation_thread_entry(JavaThread* jt, TRAP MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag); while (!ObjectSynchronizer::is_async_deflation_needed()) { // Wait until notified that there is some work to do. - // We wait for GuaranteedSafepointInterval so that - // is_async_deflation_needed() is checked at the same interval. - ml.wait(GuaranteedSafepointInterval); + ml.wait(wait_time); } } diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 0e3c4797b93..a94061c5d8c 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -30,16 +30,17 @@ #include "memory/universe.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/os.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" +#include "utilities/vmError.hpp" // Mutexes used in the VM (see comment in mutexLocker.hpp): Mutex* Patching_lock = nullptr; Mutex* CompiledMethod_lock = nullptr; Monitor* SystemDictionary_lock = nullptr; -Mutex* InvokeMethodTable_lock = nullptr; +Mutex* InvokeMethodTypeTable_lock = nullptr; +Monitor* InvokeMethodIntrinsicTable_lock = nullptr; Mutex* SharedDictionary_lock = nullptr; Monitor* ClassInitError_lock = nullptr; Mutex* Module_lock = nullptr; @@ -165,7 +166,7 @@ static int _num_mutex; #ifdef ASSERT void assert_locked_or_safepoint(const Mutex* lock) { - if (DebuggingContext::is_enabled()) return; + if (DebuggingContext::is_enabled() || VMError::is_error_reported()) return; // check if this thread owns the lock (common case) assert(lock != nullptr, "Need non-null lock"); if (lock->owned_by_self()) return; @@ -174,19 +175,9 @@ void assert_locked_or_safepoint(const Mutex* lock) { fatal("must own lock %s", lock->name()); } -// a weaker assertion than the above -void assert_locked_or_safepoint_weak(const Mutex* lock) { - if (DebuggingContext::is_enabled()) return; - assert(lock != nullptr, "Need non-null lock"); - if (lock->is_locked()) return; - if (SafepointSynchronize::is_at_safepoint()) return; - if (!Universe::is_fully_initialized()) return; - fatal("must own lock %s", lock->name()); -} - // a stronger assertion than the above void assert_lock_strong(const Mutex* lock) { - if (DebuggingContext::is_enabled()) return; + if (DebuggingContext::is_enabled() || VMError::is_error_reported()) return; assert(lock != nullptr, "Need non-null lock"); if (lock->owned_by_self()) return; fatal("must own lock %s", lock->name()); @@ -254,7 +245,9 @@ void mutex_init() { } MUTEX_DEFN(JmethodIdCreation_lock , PaddedMutex , nosafepoint-2); // used for creating jmethodIDs. - MUTEX_DEFN(InvokeMethodTable_lock , PaddedMutex , safepoint); + MUTEX_DEFN(InvokeMethodTypeTable_lock , PaddedMutex , safepoint); + MUTEX_DEFN(InvokeMethodIntrinsicTable_lock , PaddedMonitor, safepoint); + MUTEX_DEFN(AdapterHandlerLibrary_lock , PaddedMutex , safepoint); MUTEX_DEFN(SharedDictionary_lock , PaddedMutex , safepoint); MUTEX_DEFN(VMStatistic_lock , PaddedMutex , safepoint); MUTEX_DEFN(SignatureHandlerLibrary_lock , PaddedMutex , safepoint); @@ -344,7 +337,6 @@ void mutex_init() { MUTEX_DEFL(Threads_lock , PaddedMonitor, CompileThread_lock, true); MUTEX_DEFL(Compile_lock , PaddedMutex , MethodCompileQueue_lock); - MUTEX_DEFL(AdapterHandlerLibrary_lock , PaddedMutex , InvokeMethodTable_lock); MUTEX_DEFL(Heap_lock , PaddedMonitor, AdapterHandlerLibrary_lock); MUTEX_DEFL(PerfDataMemAlloc_lock , PaddedMutex , Heap_lock); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index 64b527f97b2..0e7ac8810aa 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -34,7 +34,8 @@ extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code extern Mutex* CompiledMethod_lock; // a lock used to guard a compiled method and OSR queues extern Monitor* SystemDictionary_lock; // a lock on the system dictionary -extern Mutex* InvokeMethodTable_lock; +extern Mutex* InvokeMethodTypeTable_lock; +extern Monitor* InvokeMethodIntrinsicTable_lock; extern Mutex* SharedDictionary_lock; // a lock on the CDS shared dictionary extern Monitor* ClassInitError_lock; // a lock on the class initialization error table extern Mutex* Module_lock; // a lock on module and package related data structures @@ -180,11 +181,9 @@ void print_lock_ranks(outputStream* st); // for debugging: check that we're already owning this lock (or are at a safepoint / handshake) #ifdef ASSERT void assert_locked_or_safepoint(const Mutex* lock); -void assert_locked_or_safepoint_weak(const Mutex* lock); void assert_lock_strong(const Mutex* lock); #else #define assert_locked_or_safepoint(lock) -#define assert_locked_or_safepoint_weak(lock) #define assert_lock_strong(lock) #endif diff --git a/src/hotspot/share/runtime/relocator.cpp b/src/hotspot/share/runtime/relocator.cpp index f3f8de2bf33..e61b9366321 100644 --- a/src/hotspot/share/runtime/relocator.cpp +++ b/src/hotspot/share/runtime/relocator.cpp @@ -407,11 +407,24 @@ void Relocator::adjust_exception_table(int bci, int delta) { } } +static void print_linenumber_table(unsigned char* table) { + CompressedLineNumberReadStream stream(table); + tty->print_cr("-------------------------------------------------"); + while (stream.read_pair()) { + tty->print_cr(" - line %d: %d", stream.line(), stream.bci()); + } + tty->print_cr("-------------------------------------------------"); +} // The width of instruction at "bci" is changing by "delta". Adjust the line number table. void Relocator::adjust_line_no_table(int bci, int delta) { if (method()->has_linenumber_table()) { - CompressedLineNumberReadStream reader(method()->compressed_linenumber_table()); + // if we already made adjustments then use the updated table + unsigned char *table = compressed_line_number_table(); + if (table == nullptr) { + table = method()->compressed_linenumber_table(); + } + CompressedLineNumberReadStream reader(table); CompressedLineNumberWriteStream writer(64); // plenty big for most line number tables while (reader.read_pair()) { int adjustment = (reader.bci() > bci) ? delta : 0; @@ -420,6 +433,10 @@ void Relocator::adjust_line_no_table(int bci, int delta) { writer.write_terminator(); set_compressed_line_number_table(writer.buffer()); set_compressed_line_number_table_size(writer.position()); + if (TraceRelocator) { + tty->print_cr("Adjusted line number table"); + print_linenumber_table(compressed_line_number_table()); + } } } diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index ddb9dc6988f..0efc2f7ffae 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -64,7 +64,7 @@ #include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/jniHandles.hpp" +#include "runtime/jniHandles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" @@ -636,24 +636,30 @@ JRT_ENTRY(void, SharedRuntime::notify_jvmti_object_alloc(oopDesc* o, JavaThread* current->set_vm_result(h()); JRT_END -JRT_ENTRY(void, SharedRuntime::notify_jvmti_mount(oopDesc* vt, jboolean hide, jboolean first_mount, JavaThread* current)) +JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current)) + assert(hide == JNI_FALSE, "must be VTMS transition finish"); jobject vthread = JNIHandles::make_local(const_cast(vt)); - - if (hide) { - JvmtiVTMSTransitionDisabler::VTMS_mount_begin(vthread, first_mount); - } else { - JvmtiVTMSTransitionDisabler::VTMS_mount_end(vthread, first_mount); - } + JvmtiVTMSTransitionDisabler::VTMS_vthread_start(vthread); + JNIHandles::destroy_local(vthread); JRT_END -JRT_ENTRY(void, SharedRuntime::notify_jvmti_unmount(oopDesc* vt, jboolean hide, jboolean last_unmount, JavaThread* current)) +JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current)) + assert(hide == JNI_TRUE, "must be VTMS transition start"); jobject vthread = JNIHandles::make_local(const_cast(vt)); + JvmtiVTMSTransitionDisabler::VTMS_vthread_end(vthread); + JNIHandles::destroy_local(vthread); +JRT_END - if (hide) { - JvmtiVTMSTransitionDisabler::VTMS_unmount_begin(vthread, last_unmount); - } else { - JvmtiVTMSTransitionDisabler::VTMS_unmount_end(vthread, last_unmount); - } +JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current)) + jobject vthread = JNIHandles::make_local(const_cast(vt)); + JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(vthread, hide); + JNIHandles::destroy_local(vthread); +JRT_END + +JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current)) + jobject vthread = JNIHandles::make_local(const_cast(vt)); + JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide); + JNIHandles::destroy_local(vthread); JRT_END #endif // INCLUDE_JVMTI diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index 3b0e90eec3f..e38e8ab4fcc 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -267,8 +267,10 @@ class SharedRuntime: AllStatic { #if INCLUDE_JVMTI static void notify_jvmti_object_alloc(oopDesc* o, JavaThread* current); // Functions for JVMTI notifications - static void notify_jvmti_mount(oopDesc* vt, jboolean hide, jboolean first_mount, JavaThread* current); - static void notify_jvmti_unmount(oopDesc* vt, jboolean hide, jboolean last_unmount, JavaThread* current); + static void notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current); + static void notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current); + static void notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current); + static void notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current); #endif // RedefineClasses() tracing support for obsolete method entry diff --git a/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp b/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp index e5d300b0016..0a279c57385 100644 --- a/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp +++ b/src/hotspot/share/runtime/stackChunkFrameStream.inline.hpp @@ -410,7 +410,7 @@ inline void StackChunkFrameStream::iterate_derived_pointers(DerivedO assert(is_in_oops(base_loc, map), "not found: " INTPTR_FORMAT, p2i(base_loc)); assert(!is_in_oops(derived_loc, map), "found: " INTPTR_FORMAT, p2i(derived_loc)); - Devirtualizer::do_derived_oop(closure, (oop*)base_loc, (derived_pointer*)derived_loc); + Devirtualizer::do_derived_oop(closure, (derived_base*)base_loc, (derived_pointer*)derived_loc); } } diff --git a/src/hotspot/share/runtime/stackValue.cpp b/src/hotspot/share/runtime/stackValue.cpp index 9934cb060f6..b08c527dd01 100644 --- a/src/hotspot/share/runtime/stackValue.cpp +++ b/src/hotspot/share/runtime/stackValue.cpp @@ -80,7 +80,19 @@ static oop oop_from_oop_location(stackChunkOop chunk, void* addr) { } // Load oop from stack - return *(oop*)addr; + oop val = *(oop*)addr; + +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + // Pass the value through the barrier to avoid capturing bad oops as + // stack values. Note: do not heal the location, to avoid accidentally + // corrupting the stack. Stack watermark barriers are supposed to handle + // the healing. + val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); + } +#endif + + return val; } static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) { @@ -105,7 +117,19 @@ static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_ } // Load oop from stack - return CompressedOops::decode(*narrow_addr); + oop val = CompressedOops::decode(*narrow_addr); + +#if INCLUDE_SHENANDOAHGC + if (UseShenandoahGC) { + // Pass the value through the barrier to avoid capturing bad oops as + // stack values. Note: do not heal the location, to avoid accidentally + // corrupting the stack. Stack watermark barriers are supposed to handle + // the healing. + val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); + } +#endif + + return val; } StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk, void* addr) { diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index 196892c65dc..8aca915ff78 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -262,6 +262,9 @@ void ObjectSynchronizer::initialize() { } // Start the ceiling with the estimate for one thread. set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate); + + // Start the timer for deflations, so it does not trigger immediately. + _last_async_deflation_time_ns = os::javaTimeNanos(); } MonitorList ObjectSynchronizer::_in_use_list; @@ -290,6 +293,7 @@ bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; bool volatile ObjectSynchronizer::_is_final_audit = false; jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; static uintx _no_progress_cnt = 0; +static bool _no_progress_skip_increment = false; // =====================> Quick functions @@ -1080,7 +1084,14 @@ static bool monitors_used_above_threshold(MonitorList* list) { // Check if our monitor usage is above the threshold: size_t monitor_usage = (monitors_used * 100LL) / ceiling; - return int(monitor_usage) > MonitorUsedDeflationThreshold; + if (int(monitor_usage) > MonitorUsedDeflationThreshold) { + log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT + ", monitor_usage=" SIZE_FORMAT ", threshold=" INTX_FORMAT, + monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold); + return true; + } + + return false; } size_t ObjectSynchronizer::in_use_list_ceiling() { @@ -1102,17 +1113,49 @@ void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) { bool ObjectSynchronizer::is_async_deflation_needed() { if (is_async_deflation_requested()) { // Async deflation request. + log_info(monitorinflation)("Async deflation needed: explicit request"); return true; } + + jlong time_since_last = time_since_last_async_deflation_ms(); + if (AsyncDeflationInterval > 0 && - time_since_last_async_deflation_ms() > AsyncDeflationInterval && + time_since_last > AsyncDeflationInterval && monitors_used_above_threshold(&_in_use_list)) { // It's been longer than our specified deflate interval and there // are too many monitors in use. We don't deflate more frequently // than AsyncDeflationInterval (unless is_async_deflation_requested) // in order to not swamp the MonitorDeflationThread. + log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold"); return true; } + + if (GuaranteedAsyncDeflationInterval > 0 && + time_since_last > GuaranteedAsyncDeflationInterval) { + // It's been longer than our specified guaranteed deflate interval. + // We need to clean up the used monitors even if the threshold is + // not reached, to keep the memory utilization at bay when many threads + // touched many monitors. + log_info(monitorinflation)("Async deflation needed: guaranteed interval (" INTX_FORMAT " ms) " + "is greater than time since last deflation (" JLONG_FORMAT " ms)", + GuaranteedAsyncDeflationInterval, time_since_last); + + // If this deflation has no progress, then it should not affect the no-progress + // tracking, otherwise threshold heuristics would think it was triggered, experienced + // no progress, and needs to backoff more aggressively. In this "no progress" case, + // the generic code would bump the no-progress counter, and we compensate for that + // by telling it to skip the update. + // + // If this deflation has progress, then it should let non-progress tracking + // know about this, otherwise the threshold heuristics would kick in, potentially + // experience no-progress due to aggressive cleanup by this deflation, and think + // it is still in no-progress stride. In this "progress" case, the generic code would + // zero the counter, and we allow it to happen. + _no_progress_skip_increment = true; + + return true; + } + return false; } @@ -1530,6 +1573,8 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) if (deflated_count != 0) { _no_progress_cnt = 0; + } else if (_no_progress_skip_increment) { + _no_progress_skip_increment = false; } else { _no_progress_cnt++; } diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 2d8e0291a43..206a231143c 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -58,15 +58,6 @@ THREAD_LOCAL Thread* Thread::_thr_current = nullptr; #endif // ======= Thread ======== -void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) { - return throw_excpt ? AllocateHeap(size, flags, CURRENT_PC) - : AllocateHeap(size, flags, CURRENT_PC, AllocFailStrategy::RETURN_NULL); -} - -void Thread::operator delete(void* p) { - FreeHeap(p); -} - // Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread, // JavaThread @@ -492,10 +483,11 @@ void Thread::print_on_error(outputStream* st, char* buf, int buflen) const { OSThread* os_thr = osthread(); if (os_thr != nullptr) { + st->fill_to(67); if (os_thr->get_state() != ZOMBIE) { - st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]", - p2i(stack_end()), p2i(stack_base())); - st->print(" [id=%d]", osthread()->thread_id()); + st->print(" [id=%d, stack(" PTR_FORMAT "," PTR_FORMAT ") (" PROPERFMT ")]", + osthread()->thread_id(), p2i(stack_end()), p2i(stack_base()), + PROPERFMTARGS(stack_size())); } else { st->print(" terminated"); } diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index 6a25070e1d8..e4e7badc788 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -200,14 +200,6 @@ class Thread: public ThreadShadow { // with the calling Thread? static bool is_JavaThread_protected_by_TLH(const JavaThread* target); - void* operator new(size_t size) throw() { return allocate(size, true); } - void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { - return allocate(size, false); } - void operator delete(void* p); - - protected: - static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread); - private: DEBUG_ONLY(bool _suspendible_thread;) diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp index 384936b905e..ff1a41f00e6 100644 --- a/src/hotspot/share/runtime/threads.cpp +++ b/src/hotspot/share/runtime/threads.cpp @@ -691,6 +691,11 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { } #endif + // Start string deduplication thread if requested. + if (StringDedup::is_enabled()) { + StringDedup::start(); + } + // Pre-initialize some JSR292 core classes to avoid deadlock during class loading. // It is done after compilers are initialized, because otherwise compilations of // signature polymorphic MH intrinsics can be missed @@ -1267,9 +1272,6 @@ void Threads::print_on(outputStream* st, bool print_stacks, PrintOnClosure cl(st); cl.do_thread(VMThread::vm_thread()); Universe::heap()->gc_threads_do(&cl); - if (StringDedup::is_enabled()) { - StringDedup::threads_do(&cl); - } cl.do_thread(WatcherThread::watcher_thread()); cl.do_thread(AsyncLogWriter::instance()); @@ -1296,14 +1298,19 @@ class PrintOnErrorClosure : public ThreadClosure { char* _buf; int _buflen; bool* _found_current; + unsigned _num_printed; public: PrintOnErrorClosure(outputStream* st, Thread* current, char* buf, int buflen, bool* found_current) : - _st(st), _current(current), _buf(buf), _buflen(buflen), _found_current(found_current) {} + _st(st), _current(current), _buf(buf), _buflen(buflen), _found_current(found_current), + _num_printed(0) {} virtual void do_thread(Thread* thread) { + _num_printed++; Threads::print_on_error(thread, _st, _current, _buf, _buflen, _found_current); } + + unsigned num_printed() const { return _num_printed; } }; // Threads::print_on_error() is called by fatal error handler. It's possible @@ -1317,12 +1324,18 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, bool found_current = false; st->print_cr("Java Threads: ( => current thread )"); + unsigned num_java = 0; ALL_JAVA_THREADS(thread) { print_on_error(thread, st, current, buf, buflen, &found_current); + num_java++; } + st->print_cr("Total: %u", num_java); st->cr(); st->print_cr("Other Threads:"); + unsigned num_other = ((VMThread::vm_thread() != nullptr) ? 1 : 0) + + ((WatcherThread::watcher_thread() != nullptr) ? 1 : 0) + + ((AsyncLogWriter::instance() != nullptr) ? 1 : 0); print_on_error(VMThread::vm_thread(), st, current, buf, buflen, &found_current); print_on_error(WatcherThread::watcher_thread(), st, current, buf, buflen, &found_current); print_on_error(AsyncLogWriter::instance(), st, current, buf, buflen, &found_current); @@ -1330,26 +1343,26 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, if (Universe::heap() != nullptr) { PrintOnErrorClosure print_closure(st, current, buf, buflen, &found_current); Universe::heap()->gc_threads_do(&print_closure); - } - - if (StringDedup::is_enabled()) { - PrintOnErrorClosure print_closure(st, current, buf, buflen, &found_current); - StringDedup::threads_do(&print_closure); + num_other += print_closure.num_printed(); } if (!found_current) { st->cr(); st->print("=>" PTR_FORMAT " (exited) ", p2i(current)); current->print_on_error(st, buf, buflen); + num_other++; st->cr(); } + st->print_cr("Total: %u", num_other); st->cr(); st->print_cr("Threads with active compile tasks:"); - print_threads_compiling(st, buf, buflen); + unsigned num = print_threads_compiling(st, buf, buflen); + st->print_cr("Total: %u", num); } -void Threads::print_threads_compiling(outputStream* st, char* buf, int buflen, bool short_form) { +unsigned Threads::print_threads_compiling(outputStream* st, char* buf, int buflen, bool short_form) { + unsigned num = 0; ALL_JAVA_THREADS(thread) { if (thread->is_Compiler_thread()) { CompilerThread* ct = (CompilerThread*) thread; @@ -1363,9 +1376,11 @@ void Threads::print_threads_compiling(outputStream* st, char* buf, int buflen, b thread->print_name_on_error(st, buf, buflen); st->print(" "); task->print(st, nullptr, short_form, true); + num++; } } } + return num; } void Threads::verify() { diff --git a/src/hotspot/share/runtime/threads.hpp b/src/hotspot/share/runtime/threads.hpp index 520404e2e7c..b81c74caa13 100644 --- a/src/hotspot/share/runtime/threads.hpp +++ b/src/hotspot/share/runtime/threads.hpp @@ -128,7 +128,8 @@ public: static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); static void print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, int buflen, bool* found_current); - static void print_threads_compiling(outputStream* st, char* buf, int buflen, bool short_form = false); + // Print threads busy compiling, and returns the number of printed threads. + static unsigned print_threads_compiling(outputStream* st, char* buf, int buflen, bool short_form = false); // Get Java threads that are waiting to enter a monitor. static GrowableArray* get_pending_threads(ThreadsList * t_list, diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index b3d8584f82c..b74012122a7 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -48,6 +48,7 @@ #include "code/vmreg.hpp" #include "compiler/compileBroker.hpp" #include "compiler/oopMap.hpp" +#include "gc/shared/stringdedup/stringDedupThread.hpp" #include "gc/shared/vmStructs_gc.hpp" #include "interpreter/bytecodes.hpp" #include "interpreter/interpreter.hpp" @@ -236,7 +237,6 @@ nonstatic_field(InstanceKlass, _static_field_size, int) \ nonstatic_field(InstanceKlass, _static_oop_field_count, u2) \ nonstatic_field(InstanceKlass, _nonstatic_oop_map_size, int) \ - nonstatic_field(InstanceKlass, _is_marked_dependent, bool) \ volatile_nonstatic_field(InstanceKlass, _init_state, InstanceKlass::ClassState) \ volatile_nonstatic_field(InstanceKlass, _init_thread, JavaThread*) \ nonstatic_field(InstanceKlass, _itable_len, int) \ @@ -299,7 +299,6 @@ nonstatic_field(Method, _access_flags, AccessFlags) \ nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _intrinsic_id, u2) \ - nonstatic_field(Method, _flags, u2) \ volatile_nonstatic_field(Method, _code, CompiledMethod*) \ nonstatic_field(Method, _i2i_entry, address) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \ @@ -308,7 +307,7 @@ nonstatic_field(ConstMethod, _constants, ConstantPool*) \ nonstatic_field(ConstMethod, _stackmap_data, Array*) \ nonstatic_field(ConstMethod, _constMethod_size, int) \ - nonstatic_field(ConstMethod, _flags, u2) \ + nonstatic_field(ConstMethod, _flags._flags, u4) \ nonstatic_field(ConstMethod, _code_size, u2) \ nonstatic_field(ConstMethod, _name_index, u2) \ nonstatic_field(ConstMethod, _signature_index, u2) \ @@ -1312,6 +1311,7 @@ declare_type(ServiceThread, JavaThread) \ declare_type(NotificationThread, JavaThread) \ declare_type(CompilerThread, JavaThread) \ + declare_type(StringDedupThread, JavaThread) \ declare_toplevel_type(OSThread) \ declare_toplevel_type(JavaFrameAnchor) \ \ @@ -2084,23 +2084,8 @@ /************************************************************/ \ \ declare_constant(JVM_ACC_WRITTEN_FLAGS) \ - declare_constant(JVM_ACC_MONITOR_MATCH) \ - declare_constant(JVM_ACC_HAS_MONITOR_BYTECODES) \ - declare_constant(JVM_ACC_HAS_LOOPS) \ - declare_constant(JVM_ACC_LOOPS_FLAG_INIT) \ - declare_constant(JVM_ACC_QUEUED) \ - declare_constant(JVM_ACC_NOT_C2_OSR_COMPILABLE) \ - declare_constant(JVM_ACC_HAS_LINE_NUMBER_TABLE) \ - declare_constant(JVM_ACC_HAS_CHECKED_EXCEPTIONS) \ - declare_constant(JVM_ACC_HAS_JSRS) \ - declare_constant(JVM_ACC_IS_OLD) \ - declare_constant(JVM_ACC_IS_OBSOLETE) \ - declare_constant(JVM_ACC_IS_PREFIXED_NATIVE) \ - declare_constant(JVM_ACC_HAS_MIRANDA_METHODS) \ - declare_constant(JVM_ACC_HAS_VANILLA_CONSTRUCTOR) \ declare_constant(JVM_ACC_HAS_FINALIZER) \ declare_constant(JVM_ACC_IS_CLONEABLE_FAST) \ - declare_constant(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE) \ \ declare_constant(JVM_CONSTANT_Utf8) \ declare_constant(JVM_CONSTANT_Unicode) \ @@ -2182,30 +2167,23 @@ declare_constant(Klass::_lh_array_tag_type_value) \ declare_constant(Klass::_lh_array_tag_obj_value) \ \ + declare_constant(Method::nonvirtual_vtable_index) \ + declare_constant(Method::extra_stack_entries_for_jsr292) \ + \ /********************************/ \ /* ConstMethod anon-enum */ \ /********************************/ \ \ - declare_constant(Method::_caller_sensitive) \ - declare_constant(Method::_force_inline) \ - declare_constant(Method::_dont_inline) \ - declare_constant(Method::_hidden) \ - declare_constant(Method::_changes_current_thread) \ - \ - declare_constant(Method::nonvirtual_vtable_index) \ - \ - declare_constant(Method::extra_stack_entries_for_jsr292) \ - \ - declare_constant(ConstMethod::_has_linenumber_table) \ - declare_constant(ConstMethod::_has_checked_exceptions) \ - declare_constant(ConstMethod::_has_localvariable_table) \ - declare_constant(ConstMethod::_has_exception_table) \ - declare_constant(ConstMethod::_has_generic_signature) \ - declare_constant(ConstMethod::_has_method_parameters) \ - declare_constant(ConstMethod::_has_method_annotations) \ - declare_constant(ConstMethod::_has_parameter_annotations) \ - declare_constant(ConstMethod::_has_default_annotations) \ - declare_constant(ConstMethod::_has_type_annotations) \ + declare_constant(ConstMethodFlags::_misc_has_linenumber_table) \ + declare_constant(ConstMethodFlags::_misc_has_checked_exceptions) \ + declare_constant(ConstMethodFlags::_misc_has_localvariable_table) \ + declare_constant(ConstMethodFlags::_misc_has_exception_table) \ + declare_constant(ConstMethodFlags::_misc_has_generic_signature) \ + declare_constant(ConstMethodFlags::_misc_has_method_parameters) \ + declare_constant(ConstMethodFlags::_misc_has_method_annotations) \ + declare_constant(ConstMethodFlags::_misc_has_parameter_annotations) \ + declare_constant(ConstMethodFlags::_misc_has_default_annotations) \ + declare_constant(ConstMethodFlags::_misc_has_type_annotations) \ \ /**************/ \ /* DataLayout */ \ diff --git a/src/hotspot/share/services/memoryManager.cpp b/src/hotspot/share/services/memoryManager.cpp index d4851840175..4c92b6651e0 100644 --- a/src/hotspot/share/services/memoryManager.cpp +++ b/src/hotspot/share/services/memoryManager.cpp @@ -176,8 +176,8 @@ void GCStatInfo::clear() { } -GCMemoryManager::GCMemoryManager(const char* name, const char* gc_end_message) : - MemoryManager(name), _gc_end_message(gc_end_message) { +GCMemoryManager::GCMemoryManager(const char* name) : + MemoryManager(name) { _num_collections = 0; _last_gc_stat = nullptr; _last_gc_lock = new Mutex(Mutex::nosafepoint, "GCMemoryManager_lock"); @@ -241,9 +241,11 @@ void GCMemoryManager::gc_begin(bool recordGCBeginTime, bool recordPreGCUsage, // to ensure the current gc stat is placed in _last_gc_stat. void GCMemoryManager::gc_end(bool recordPostGCUsage, bool recordAccumulatedGCTime, - bool recordGCEndTime, bool countCollection, + bool recordGCEndTime, + bool countCollection, GCCause::Cause cause, - bool allMemoryPoolsAffected) { + bool allMemoryPoolsAffected, + const char* message) { if (recordAccumulatedGCTime) { _accumulated_timer.stop(); } @@ -293,7 +295,7 @@ void GCMemoryManager::gc_end(bool recordPostGCUsage, } if (is_notification_enabled()) { - GCNotifier::pushNotification(this, _gc_end_message, GCCause::to_string(cause)); + GCNotifier::pushNotification(this, message, GCCause::to_string(cause)); } } } diff --git a/src/hotspot/share/services/memoryManager.hpp b/src/hotspot/share/services/memoryManager.hpp index ec40a9fc0d2..3e4e85a194e 100644 --- a/src/hotspot/share/services/memoryManager.hpp +++ b/src/hotspot/share/services/memoryManager.hpp @@ -140,11 +140,10 @@ private: GCStatInfo* _current_gc_stat; int _num_gc_threads; volatile bool _notification_enabled; - const char* _gc_end_message; bool _pool_always_affected_by_gc[MemoryManager::max_num_pools]; public: - GCMemoryManager(const char* name, const char* gc_end_message); + GCMemoryManager(const char* name); ~GCMemoryManager(); void add_pool(MemoryPool* pool); @@ -167,7 +166,7 @@ public: bool recordAccumulatedGCTime); void gc_end(bool recordPostGCUsage, bool recordAccumulatedGCTime, bool recordGCEndTime, bool countCollection, GCCause::Cause cause, - bool allMemoryPoolsAffected); + bool allMemoryPoolsAffected, const char* message); void reset_gc_stat() { _num_collections = 0; _accumulated_timer.reset(); } diff --git a/src/hotspot/share/services/memoryService.cpp b/src/hotspot/share/services/memoryService.cpp index 4391a8c2611..21b773e204e 100644 --- a/src/hotspot/share/services/memoryService.cpp +++ b/src/hotspot/share/services/memoryService.cpp @@ -183,10 +183,10 @@ void MemoryService::gc_end(GCMemoryManager* manager, bool recordPostGCUsage, bool recordAccumulatedGCTime, bool recordGCEndTime, bool countCollection, GCCause::Cause cause, - bool allMemoryPoolsAffected) { + bool allMemoryPoolsAffected, const char* message) { // register the GC end statistics and memory usage manager->gc_end(recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime, - countCollection, cause, allMemoryPoolsAffected); + countCollection, cause, allMemoryPoolsAffected, message); } bool MemoryService::set_verbose(bool verbose) { @@ -220,6 +220,7 @@ Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) { TraceMemoryManagerStats::TraceMemoryManagerStats(GCMemoryManager* gc_memory_manager, GCCause::Cause cause, + const char* end_message, bool allMemoryPoolsAffected, bool recordGCBeginTime, bool recordPreGCUsage, @@ -228,16 +229,17 @@ TraceMemoryManagerStats::TraceMemoryManagerStats(GCMemoryManager* gc_memory_mana bool recordAccumulatedGCTime, bool recordGCEndTime, bool countCollection) { - initialize(gc_memory_manager, cause, allMemoryPoolsAffected, - recordGCBeginTime, recordPreGCUsage, recordPeakUsage, - recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime, - countCollection); + initialize(gc_memory_manager, cause, end_message, + allMemoryPoolsAffected, recordGCBeginTime, recordPreGCUsage, + recordPeakUsage, recordPostGCUsage, recordAccumulatedGCTime, + recordGCEndTime, countCollection); } // for a subclass to create then initialize an instance before invoking // the MemoryService void TraceMemoryManagerStats::initialize(GCMemoryManager* gc_memory_manager, GCCause::Cause cause, + const char* end_message, bool allMemoryPoolsAffected, bool recordGCBeginTime, bool recordPreGCUsage, @@ -247,6 +249,8 @@ void TraceMemoryManagerStats::initialize(GCMemoryManager* gc_memory_manager, bool recordGCEndTime, bool countCollection) { _gc_memory_manager = gc_memory_manager; + _cause = cause; + _end_message = end_message; _allMemoryPoolsAffected = allMemoryPoolsAffected; _recordGCBeginTime = recordGCBeginTime; _recordPreGCUsage = recordPreGCUsage; @@ -255,7 +259,6 @@ void TraceMemoryManagerStats::initialize(GCMemoryManager* gc_memory_manager, _recordAccumulatedGCTime = recordAccumulatedGCTime; _recordGCEndTime = recordGCEndTime; _countCollection = countCollection; - _cause = cause; MemoryService::gc_begin(_gc_memory_manager, _recordGCBeginTime, _recordAccumulatedGCTime, _recordPreGCUsage, _recordPeakUsage); @@ -263,5 +266,6 @@ void TraceMemoryManagerStats::initialize(GCMemoryManager* gc_memory_manager, TraceMemoryManagerStats::~TraceMemoryManagerStats() { MemoryService::gc_end(_gc_memory_manager, _recordPostGCUsage, _recordAccumulatedGCTime, - _recordGCEndTime, _countCollection, _cause, _allMemoryPoolsAffected); + _recordGCEndTime, _countCollection, _cause, _allMemoryPoolsAffected, + _end_message); } diff --git a/src/hotspot/share/services/memoryService.hpp b/src/hotspot/share/services/memoryService.hpp index 3c7bc483dfb..a00e49dd0a7 100644 --- a/src/hotspot/share/services/memoryService.hpp +++ b/src/hotspot/share/services/memoryService.hpp @@ -104,7 +104,7 @@ public: bool recordAccumulatedGCTime, bool recordGCEndTime, bool countCollection, GCCause::Cause cause, - bool allMemoryPoolsAffected); + bool allMemoryPoolsAffected, const char* notificationMessage = nullptr); static bool get_verbose() { return log_is_enabled(Info, gc); } static bool set_verbose(bool verbose); @@ -116,19 +116,21 @@ public: class TraceMemoryManagerStats : public StackObj { private: GCMemoryManager* _gc_memory_manager; - bool _allMemoryPoolsAffected; - bool _recordGCBeginTime; - bool _recordPreGCUsage; - bool _recordPeakUsage; - bool _recordPostGCUsage; - bool _recordAccumulatedGCTime; - bool _recordGCEndTime; - bool _countCollection; - GCCause::Cause _cause; + GCCause::Cause _cause; + const char* _end_message; + bool _allMemoryPoolsAffected; + bool _recordGCBeginTime; + bool _recordPreGCUsage; + bool _recordPeakUsage; + bool _recordPostGCUsage; + bool _recordAccumulatedGCTime; + bool _recordGCEndTime; + bool _countCollection; public: TraceMemoryManagerStats() {} TraceMemoryManagerStats(GCMemoryManager* gc_memory_manager, GCCause::Cause cause, + const char* end_message, bool allMemoryPoolsAffected = true, bool recordGCBeginTime = true, bool recordPreGCUsage = true, @@ -140,6 +142,7 @@ public: void initialize(GCMemoryManager* gc_memory_manager, GCCause::Cause cause, + const char* end_message, bool allMemoryPoolsAffected, bool recordGCBeginTime, bool recordPreGCUsage, diff --git a/src/hotspot/share/utilities/accessFlags.cpp b/src/hotspot/share/utilities/accessFlags.cpp index 94219bdb686..ec68d124488 100644 --- a/src/hotspot/share/utilities/accessFlags.cpp +++ b/src/hotspot/share/utilities/accessFlags.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,27 +27,6 @@ #include "runtime/atomic.hpp" #include "utilities/accessFlags.hpp" -void AccessFlags::atomic_set_bits(jint bits) { - // Atomically update the flags with the bits given - jint old_flags, new_flags, f; - do { - old_flags = _flags; - new_flags = old_flags | bits; - f = Atomic::cmpxchg(&_flags, old_flags, new_flags); - } while(f != old_flags); -} - -void AccessFlags::atomic_clear_bits(jint bits) { - // Atomically update the flags with the bits given - jint old_flags, new_flags, f; - do { - old_flags = _flags; - new_flags = old_flags & ~bits; - f = Atomic::cmpxchg(&_flags, old_flags, new_flags); - } while(f != old_flags); -} - - #if !defined(PRODUCT) || INCLUDE_JVMTI void AccessFlags::print_on(outputStream* st) const { @@ -63,9 +42,6 @@ void AccessFlags::print_on(outputStream* st) const { if (is_interface ()) st->print("interface " ); if (is_abstract ()) st->print("abstract " ); if (is_synthetic ()) st->print("synthetic " ); - if (is_old ()) st->print("{old} " ); - if (is_obsolete ()) st->print("{obsolete} " ); - if (on_stack ()) st->print("{on_stack} " ); } #endif // !PRODUCT || INCLUDE_JVMTI diff --git a/src/hotspot/share/utilities/accessFlags.hpp b/src/hotspot/share/utilities/accessFlags.hpp index 901be7964e4..78bf179e03a 100644 --- a/src/hotspot/share/utilities/accessFlags.hpp +++ b/src/hotspot/share/utilities/accessFlags.hpp @@ -37,50 +37,24 @@ class outputStream; enum { // See jvm.h for shared JVM_ACC_XXX access flags - // HotSpot-specific access flags - // flags actually put in .class file JVM_ACC_WRITTEN_FLAGS = 0x00007FFF, - // Method* flags - JVM_ACC_MONITOR_MATCH = 0x10000000, // True if we know that monitorenter/monitorexit bytecodes match - JVM_ACC_HAS_MONITOR_BYTECODES = 0x20000000, // Method contains monitorenter/monitorexit bytecodes - JVM_ACC_HAS_LOOPS = 0x40000000, // Method has loops - JVM_ACC_LOOPS_FLAG_INIT = (int)0x80000000,// The loop flag has been initialized - JVM_ACC_QUEUED = 0x01000000, // Queued for compilation - JVM_ACC_NOT_C2_COMPILABLE = 0x02000000, - JVM_ACC_NOT_C1_COMPILABLE = 0x04000000, - JVM_ACC_NOT_C2_OSR_COMPILABLE = 0x08000000, - JVM_ACC_HAS_LINE_NUMBER_TABLE = 0x00100000, - JVM_ACC_HAS_CHECKED_EXCEPTIONS = 0x00400000, - JVM_ACC_HAS_JSRS = 0x00800000, - JVM_ACC_IS_OLD = 0x00010000, // RedefineClasses() has replaced this method - JVM_ACC_IS_OBSOLETE = 0x00020000, // RedefineClasses() has made method obsolete - JVM_ACC_IS_PREFIXED_NATIVE = 0x00040000, // JVMTI has prefixed this native method - JVM_ACC_ON_STACK = 0x00080000, // RedefineClasses() was used on the stack - JVM_ACC_IS_DELETED = 0x00008000, // RedefineClasses() has deleted this method - - // Klass* flags - JVM_ACC_HAS_MIRANDA_METHODS = 0x10000000, // True if this class has miranda methods in it's vtable - JVM_ACC_HAS_VANILLA_CONSTRUCTOR = 0x20000000, // True if klass has a vanilla default constructor + // HotSpot-specific access flags + // These Klass flags should be migrated, to a field such as InstanceKlass::_misc_flags, + // or to a similar flags field in Klass itself. + // Do not add new ACC flags here. JVM_ACC_HAS_FINALIZER = 0x40000000, // True if klass has a non-empty finalize() method JVM_ACC_IS_CLONEABLE_FAST = (int)0x80000000,// True if klass implements the Cloneable interface and can be optimized in generated code - JVM_ACC_HAS_FINAL_METHOD = 0x01000000, // True if klass has final method - JVM_ACC_IS_SHARED_CLASS = 0x02000000, // True if klass is shared JVM_ACC_IS_HIDDEN_CLASS = 0x04000000, // True if klass is hidden JVM_ACC_IS_VALUE_BASED_CLASS = 0x08000000, // True if klass is marked as a ValueBased class - JVM_ACC_IS_BEING_REDEFINED = 0x00100000, // True if the klass is being redefined. - JVM_ACC_HAS_RESOLVED_METHODS = 0x00200000, // True if the klass has resolved methods - - // Method* flags - JVM_ACC_HAS_LOCAL_VARIABLE_TABLE= 0x00400000, }; class AccessFlags { friend class VMStructs; private: - jint _flags; + jint _flags; // TODO: move 4 access flags above to Klass and change to u2 public: AccessFlags() : _flags(0) {} @@ -103,47 +77,12 @@ class AccessFlags { // Attribute flags bool is_synthetic () const { return (_flags & JVM_ACC_SYNTHETIC ) != 0; } - // Method* flags - bool is_monitor_matching () const { return (_flags & JVM_ACC_MONITOR_MATCH ) != 0; } - bool has_monitor_bytecodes () const { return (_flags & JVM_ACC_HAS_MONITOR_BYTECODES ) != 0; } - bool has_loops () const { return (_flags & JVM_ACC_HAS_LOOPS ) != 0; } - bool loops_flag_init () const { return (_flags & JVM_ACC_LOOPS_FLAG_INIT ) != 0; } - bool queued_for_compilation () const { return (_flags & JVM_ACC_QUEUED ) != 0; } - bool is_not_c1_compilable () const { return (_flags & JVM_ACC_NOT_C1_COMPILABLE ) != 0; } - bool is_not_c2_compilable () const { return (_flags & JVM_ACC_NOT_C2_COMPILABLE ) != 0; } - bool is_not_c2_osr_compilable() const { return (_flags & JVM_ACC_NOT_C2_OSR_COMPILABLE ) != 0; } - bool has_linenumber_table () const { return (_flags & JVM_ACC_HAS_LINE_NUMBER_TABLE ) != 0; } - bool has_checked_exceptions () const { return (_flags & JVM_ACC_HAS_CHECKED_EXCEPTIONS ) != 0; } - bool has_jsrs () const { return (_flags & JVM_ACC_HAS_JSRS ) != 0; } - bool is_old () const { return (_flags & JVM_ACC_IS_OLD ) != 0; } - bool is_obsolete () const { return (_flags & JVM_ACC_IS_OBSOLETE ) != 0; } - bool is_deleted () const { return (_flags & JVM_ACC_IS_DELETED ) != 0; } - bool is_prefixed_native () const { return (_flags & JVM_ACC_IS_PREFIXED_NATIVE ) != 0; } - // Klass* flags - bool has_miranda_methods () const { return (_flags & JVM_ACC_HAS_MIRANDA_METHODS ) != 0; } - bool has_vanilla_constructor () const { return (_flags & JVM_ACC_HAS_VANILLA_CONSTRUCTOR) != 0; } bool has_finalizer () const { return (_flags & JVM_ACC_HAS_FINALIZER ) != 0; } - bool has_final_method () const { return (_flags & JVM_ACC_HAS_FINAL_METHOD ) != 0; } bool is_cloneable_fast () const { return (_flags & JVM_ACC_IS_CLONEABLE_FAST ) != 0; } - bool is_shared_class () const { return (_flags & JVM_ACC_IS_SHARED_CLASS ) != 0; } bool is_hidden_class () const { return (_flags & JVM_ACC_IS_HIDDEN_CLASS ) != 0; } bool is_value_based_class () const { return (_flags & JVM_ACC_IS_VALUE_BASED_CLASS ) != 0; } - // Method* flags - bool has_localvariable_table () const { return (_flags & JVM_ACC_HAS_LOCAL_VARIABLE_TABLE) != 0; } - void set_has_localvariable_table() { atomic_set_bits(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE); } - void clear_has_localvariable_table() { atomic_clear_bits(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE); } - - bool is_being_redefined() const { return (_flags & JVM_ACC_IS_BEING_REDEFINED) != 0; } - void set_is_being_redefined() { atomic_set_bits(JVM_ACC_IS_BEING_REDEFINED); } - void clear_is_being_redefined() { atomic_clear_bits(JVM_ACC_IS_BEING_REDEFINED); } - - bool has_resolved_methods() const { return (_flags & JVM_ACC_HAS_RESOLVED_METHODS) != 0; } - void set_has_resolved_methods() { atomic_set_bits(JVM_ACC_HAS_RESOLVED_METHODS); } - - bool on_stack() const { return (_flags & JVM_ACC_ON_STACK) != 0; } - // get .class file flags jint get_flags () const { return (_flags & JVM_ACC_WRITTEN_FLAGS); } @@ -154,61 +93,23 @@ class AccessFlags { } void set_flags(jint flags) { _flags = (flags & JVM_ACC_WRITTEN_FLAGS); } - void set_queued_for_compilation() { atomic_set_bits(JVM_ACC_QUEUED); } - void clear_queued_for_compilation() { atomic_clear_bits(JVM_ACC_QUEUED); } - - // Atomic update of flags - void atomic_set_bits(jint bits); - void atomic_clear_bits(jint bits); - private: - friend class Method; friend class Klass; friend class ClassFileParser; // the functions below should only be called on the _access_flags inst var directly, // otherwise they are just changing a copy of the flags // attribute flags - void set_is_synthetic() { atomic_set_bits(JVM_ACC_SYNTHETIC); } + void set_is_synthetic() { _flags |= JVM_ACC_SYNTHETIC; } - // Method* flags - void set_monitor_matching() { atomic_set_bits(JVM_ACC_MONITOR_MATCH); } - void set_has_monitor_bytecodes() { atomic_set_bits(JVM_ACC_HAS_MONITOR_BYTECODES); } - void set_has_loops() { atomic_set_bits(JVM_ACC_HAS_LOOPS); } - void set_loops_flag_init() { atomic_set_bits(JVM_ACC_LOOPS_FLAG_INIT); } - void set_not_c1_compilable() { atomic_set_bits(JVM_ACC_NOT_C1_COMPILABLE); } - void set_not_c2_compilable() { atomic_set_bits(JVM_ACC_NOT_C2_COMPILABLE); } - void set_not_c2_osr_compilable() { atomic_set_bits(JVM_ACC_NOT_C2_OSR_COMPILABLE); } - void set_has_linenumber_table() { atomic_set_bits(JVM_ACC_HAS_LINE_NUMBER_TABLE); } - void set_has_checked_exceptions() { atomic_set_bits(JVM_ACC_HAS_CHECKED_EXCEPTIONS); } - void set_has_jsrs() { atomic_set_bits(JVM_ACC_HAS_JSRS); } - void set_is_old() { atomic_set_bits(JVM_ACC_IS_OLD); } - void set_is_obsolete() { atomic_set_bits(JVM_ACC_IS_OBSOLETE); } - void set_is_deleted() { atomic_set_bits(JVM_ACC_IS_DELETED); } - void set_is_prefixed_native() { atomic_set_bits(JVM_ACC_IS_PREFIXED_NATIVE); } - - void clear_not_c1_compilable() { atomic_clear_bits(JVM_ACC_NOT_C1_COMPILABLE); } - void clear_not_c2_compilable() { atomic_clear_bits(JVM_ACC_NOT_C2_COMPILABLE); } - void clear_not_c2_osr_compilable() { atomic_clear_bits(JVM_ACC_NOT_C2_OSR_COMPILABLE); } // Klass* flags - void set_has_vanilla_constructor() { atomic_set_bits(JVM_ACC_HAS_VANILLA_CONSTRUCTOR); } - void set_has_finalizer() { atomic_set_bits(JVM_ACC_HAS_FINALIZER); } - void set_has_final_method() { atomic_set_bits(JVM_ACC_HAS_FINAL_METHOD); } - void set_is_cloneable_fast() { atomic_set_bits(JVM_ACC_IS_CLONEABLE_FAST); } - void set_has_miranda_methods() { atomic_set_bits(JVM_ACC_HAS_MIRANDA_METHODS); } - void set_is_shared_class() { atomic_set_bits(JVM_ACC_IS_SHARED_CLASS); } - void set_is_hidden_class() { atomic_set_bits(JVM_ACC_IS_HIDDEN_CLASS); } - void set_is_value_based_class() { atomic_set_bits(JVM_ACC_IS_VALUE_BASED_CLASS); } + // These are set at classfile parsing time so do not require atomic access. + void set_has_finalizer() { _flags |= JVM_ACC_HAS_FINALIZER; } + void set_is_cloneable_fast() { _flags |= JVM_ACC_IS_CLONEABLE_FAST; } + void set_is_hidden_class() { _flags |= JVM_ACC_IS_HIDDEN_CLASS; } + void set_is_value_based_class() { _flags |= JVM_ACC_IS_VALUE_BASED_CLASS; } public: - void set_on_stack(const bool value) - { - if (value) { - atomic_set_bits(JVM_ACC_ON_STACK); - } else { - atomic_clear_bits(JVM_ACC_ON_STACK); - } - } // Conversion jshort as_short() const { return (jshort)_flags; } jint as_int() const { return _flags; } diff --git a/src/hotspot/share/utilities/compilerWarnings.hpp b/src/hotspot/share/utilities/compilerWarnings.hpp index c3de6863f8a..6fd085530eb 100644 --- a/src/hotspot/share/utilities/compilerWarnings.hpp +++ b/src/hotspot/share/utilities/compilerWarnings.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,10 @@ #define ATTRIBUTE_SCANF(fmt, vargs) #endif +#ifndef PRAGMA_DANGLING_POINTER_IGNORED +#define PRAGMA_DANGLING_POINTER_IGNORED +#endif + #ifndef PRAGMA_FORMAT_NONLITERAL_IGNORED #define PRAGMA_FORMAT_NONLITERAL_IGNORED #endif diff --git a/src/hotspot/share/utilities/compilerWarnings_gcc.hpp b/src/hotspot/share/utilities/compilerWarnings_gcc.hpp index 3cfbd0326e7..8104c8f7684 100644 --- a/src/hotspot/share/utilities/compilerWarnings_gcc.hpp +++ b/src/hotspot/share/utilities/compilerWarnings_gcc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,11 @@ #define PRAGMA_DIAG_PUSH _Pragma("GCC diagnostic push") #define PRAGMA_DIAG_POP _Pragma("GCC diagnostic pop") +// Disable -Wdangling-pointer which is introduced in GCC 12. +#if !defined(__clang_major__) && (__GNUC__ >= 12) +#define PRAGMA_DANGLING_POINTER_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wdangling-pointer") +#endif + #define PRAGMA_FORMAT_NONLITERAL_IGNORED \ PRAGMA_DISABLE_GCC_WARNING("-Wformat-nonliteral") \ PRAGMA_DISABLE_GCC_WARNING("-Wformat-security") diff --git a/src/hotspot/share/utilities/devirtualizer.hpp b/src/hotspot/share/utilities/devirtualizer.hpp index 813a56d3740..b4d444dc5a8 100644 --- a/src/hotspot/share/utilities/devirtualizer.hpp +++ b/src/hotspot/share/utilities/devirtualizer.hpp @@ -38,7 +38,7 @@ class Devirtualizer { template static void do_klass(OopClosureType* closure, Klass* k); template static void do_cld(OopClosureType* closure, ClassLoaderData* cld); template static bool do_metadata(OopClosureType* closure); - template static void do_derived_oop(DerivedOopClosureType* closure, oop* base, derived_pointer* derived); + template static void do_derived_oop(DerivedOopClosureType* closure, derived_base* base, derived_pointer* derived); template static bool do_bit(BitMapClosureType* closure, BitMap::idx_t index); }; diff --git a/src/hotspot/share/utilities/devirtualizer.inline.hpp b/src/hotspot/share/utilities/devirtualizer.inline.hpp index 001596874cc..0cae27f87e9 100644 --- a/src/hotspot/share/utilities/devirtualizer.inline.hpp +++ b/src/hotspot/share/utilities/devirtualizer.inline.hpp @@ -154,18 +154,18 @@ void Devirtualizer::do_cld(OopClosureType* closure, ClassLoaderData* cld) { template static typename EnableIf::value, void>::type -call_do_derived_oop(void (Receiver::*)(oop*, derived_pointer*), void (Base::*)(oop*, derived_pointer*), DerivedOopClosureType* closure, oop* base, derived_pointer* derived) { +call_do_derived_oop(void (Receiver::*)(derived_base*, derived_pointer*), void (Base::*)(derived_base*, derived_pointer*), DerivedOopClosureType* closure, derived_base* base, derived_pointer* derived) { closure->do_derived_oop(base, derived); } template static typename EnableIf::value, void>::type -call_do_derived_oop(void (Receiver::*)(oop*, derived_pointer*), void (Base::*)(oop*, derived_pointer*), DerivedOopClosureType* closure, oop* base, derived_pointer* derived) { +call_do_derived_oop(void (Receiver::*)(derived_base*, derived_pointer*), void (Base::*)(derived_base*, derived_pointer*), DerivedOopClosureType* closure, derived_base* base, derived_pointer* derived) { closure->DerivedOopClosureType::do_derived_oop(base, derived); } template -inline void Devirtualizer::do_derived_oop(DerivedOopClosureType* closure, oop* base, derived_pointer* derived) { +inline void Devirtualizer::do_derived_oop(DerivedOopClosureType* closure, derived_base* base, derived_pointer* derived) { call_do_derived_oop(&DerivedOopClosureType::do_derived_oop, &DerivedOopClosure::do_derived_oop, closure, base, derived); } diff --git a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp index 3da670ef476..11ffe72bd56 100644 --- a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp +++ b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp @@ -140,12 +140,12 @@ inline int g_isfinite(jdouble f) { return isfinite(f); } // use offsetof() instead, with the invalid-offsetof warning // temporarily disabled. #define offset_of(klass,field) \ -[]() { \ +([]() { \ char space[sizeof (klass)] ATTRIBUTE_ALIGNED(16); \ klass* dummyObj = (klass*)space; \ char* c = (char*)(void*)&dummyObj->field; \ return (size_t)(c - space); \ -}() +}()) #if defined(_LP64) && defined(__APPLE__) diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp index be3a0f935d6..b960f004f15 100644 --- a/src/hotspot/share/utilities/growableArray.hpp +++ b/src/hotspot/share/utilities/growableArray.hpp @@ -809,13 +809,16 @@ public: this->clear_and_deallocate(); } - void* operator new(size_t size) throw() { + void* operator new(size_t size) { return AnyObj::operator new(size, F); } void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { return AnyObj::operator new(size, nothrow_constant, F); } + void operator delete(void *p) { + AnyObj::operator delete(p); + } }; // Custom STL-style iterator to iterate over GrowableArrays diff --git a/src/hotspot/share/utilities/numberSeq.cpp b/src/hotspot/share/utilities/numberSeq.cpp index db2f8d92d73..bce17d558cd 100644 --- a/src/hotspot/share/utilities/numberSeq.cpp +++ b/src/hotspot/share/utilities/numberSeq.cpp @@ -206,8 +206,15 @@ double TruncatedSeq::oldest() const { } double TruncatedSeq::predict_next() const { - if (_num == 0) + if (_num == 0) { + // No data points, pick function: y = 0 + 0*x return 0.0; + } + + if (_num == 1) { + // Only one point P, pick function: y = P_y + 0*x + return _sequence[0]; + } double num = (double) _num; double x_squared_sum = 0.0; diff --git a/src/java.base/macosx/classes/jdk/internal/loader/ClassLoaderHelper.java b/src/java.base/macosx/classes/jdk/internal/loader/ClassLoaderHelper.java index 2e0e15e69a2..4ee692b8410 100644 --- a/src/java.base/macosx/classes/jdk/internal/loader/ClassLoaderHelper.java +++ b/src/java.base/macosx/classes/jdk/internal/loader/ClassLoaderHelper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,21 +27,15 @@ package jdk.internal.loader; import java.io.File; import java.util.ArrayList; -import sun.security.action.GetPropertyAction; + +import jdk.internal.util.OperatingSystem; +import jdk.internal.util.Version; class ClassLoaderHelper { - private static final boolean hasDynamicLoaderCache; - static { - String osVersion = GetPropertyAction.privilegedGetProperty("os.version"); - // dynamic linker cache support on os.version >= 11.x - int major = 11; - int i = osVersion.indexOf('.'); - try { - major = Integer.parseInt(i < 0 ? osVersion : osVersion.substring(0, i)); - } catch (NumberFormatException e) {} - // SDK 10.15 and earlier always reports 10.16 instead of 11.x.x - hasDynamicLoaderCache = major >= 11 || osVersion.equals("10.16"); - } + + // SDK 10.15 and earlier always reports 10.16 instead of 11.x.x + private static final boolean hasDynamicLoaderCache = OperatingSystem.version() + .compareTo(new Version(10, 16)) >= 0; private ClassLoaderHelper() {} diff --git a/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java b/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java index 3f5de6bbefa..e7d488de005 100644 --- a/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java +++ b/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,6 @@ import java.nio.file.attribute.FileAttributeView; import java.nio.file.attribute.UserDefinedFileAttributeView; import java.io.IOException; import java.util.Arrays; -import sun.security.action.GetPropertyAction; /** * Bsd implementation of FileStore @@ -93,11 +92,8 @@ class BsdFileStore // typical macOS file system types that are known to support xattr String fstype = entry().fstype(); - if ("hfs".equals(fstype)) + if ("hfs".equals(fstype) || "apfs".equals(fstype)) { return true; - if ("apfs".equals(fstype)) { - // fgetxattr broken on APFS prior to 10.14 - return isOsVersionGte(10, 14); } // probe file system capabilities @@ -113,17 +109,4 @@ class BsdFileStore return supportsFileAttributeView(UserDefinedFileAttributeView.class); return super.supportsFileAttributeView(name); } - - /** - * Returns true if the OS major/minor version is greater than, or equal, to the - * given major/minor version. - */ - private static boolean isOsVersionGte(int requiredMajor, int requiredMinor) { - String osVersion = GetPropertyAction.privilegedGetProperty("os.version"); - String[] vers = Util.split(osVersion, '.'); - int majorVersion = Integer.parseInt(vers[0]); - int minorVersion = Integer.parseInt(vers[1]); - return (majorVersion > requiredMajor) - || (majorVersion == requiredMajor && minorVersion >= requiredMinor); - } } diff --git a/src/java.base/share/classes/java/io/FileInputStream.java b/src/java.base/share/classes/java/io/FileInputStream.java index e882946cf74..a1797cf1246 100644 --- a/src/java.base/share/classes/java/io/FileInputStream.java +++ b/src/java.base/share/classes/java/io/FileInputStream.java @@ -41,19 +41,13 @@ import sun.nio.ch.FileChannelImpl; * {@code FileReader}. * * @apiNote - * To release resources used by this stream {@link #close} should be called - * directly or by try-with-resources. Subclasses are responsible for the cleanup - * of resources acquired by the subclass. - * Subclasses that override {@link #finalize} in order to perform cleanup - * should be modified to use alternative cleanup mechanisms such as - * {@link java.lang.ref.Cleaner} and remove the overriding {@code finalize} method. + * The {@link #close} method should be called to release resources used by this + * stream, either directly, or with the {@code try}-with-resources statement. * * @implSpec - * If this FileInputStream has been subclassed and the {@link #close} - * method has been overridden, the {@link #close} method will be - * called when the FileInputStream is unreachable. - * Otherwise, it is implementation specific how the resource cleanup described in - * {@link #close} is performed. + * Subclasses are responsible for the cleanup of resources acquired by the subclass. + * Subclasses requiring that resource cleanup take place after a stream becomes + * unreachable should use {@link java.lang.ref.Cleaner} or some other mechanism. * * @author Arthur van Hoff * @see java.io.File @@ -494,10 +488,10 @@ public class FileInputStream extends InputStream * @apiNote * Overriding {@link #close} to perform cleanup actions is reliable * only when called directly or when called by try-with-resources. - * Do not depend on finalization to invoke {@code close}; - * finalization is not reliable and is deprecated. - * If cleanup of native resources is needed, other mechanisms such as - * {@linkplain java.lang.ref.Cleaner} should be used. + * + * @implSpec + * Subclasses requiring that resource cleanup take place after a stream becomes + * unreachable should use the {@link java.lang.ref.Cleaner} mechanism. * * @throws IOException {@inheritDoc} * diff --git a/src/java.base/share/classes/java/io/FileOutputStream.java b/src/java.base/share/classes/java/io/FileOutputStream.java index 5d2dead569a..9aadf1f38be 100644 --- a/src/java.base/share/classes/java/io/FileOutputStream.java +++ b/src/java.base/share/classes/java/io/FileOutputStream.java @@ -46,19 +46,13 @@ import sun.nio.ch.FileChannelImpl; * {@code FileWriter}. * * @apiNote - * To release resources used by this stream {@link #close} should be called - * directly or by try-with-resources. Subclasses are responsible for the cleanup - * of resources acquired by the subclass. - * Subclasses that override {@link #finalize} in order to perform cleanup - * should be modified to use alternative cleanup mechanisms such as - * {@link java.lang.ref.Cleaner} and remove the overriding {@code finalize} method. + * The {@link #close} method should be called to release resources used by this + * stream, either directly, or with the {@code try}-with-resources statement. * * @implSpec - * If this FileOutputStream has been subclassed and the {@link #close} - * method has been overridden, the {@link #close} method will be - * called when the FileInputStream is unreachable. - * Otherwise, it is implementation specific how the resource cleanup described in - * {@link #close} is performed. + * Subclasses are responsible for the cleanup of resources acquired by the subclass. + * Subclasses requiring that resource cleanup take place after a stream becomes + * unreachable should use {@link java.lang.ref.Cleaner} or some other mechanism. * * @author Arthur van Hoff * @see java.io.File @@ -387,10 +381,10 @@ public class FileOutputStream extends OutputStream * @apiNote * Overriding {@link #close} to perform cleanup actions is reliable * only when called directly or when called by try-with-resources. - * Do not depend on finalization to invoke {@code close}; - * finalization is not reliable and is deprecated. - * If cleanup of native resources is needed, other mechanisms such as - * {@linkplain java.lang.ref.Cleaner} should be used. + * + * @implSpec + * Subclasses requiring that resource cleanup take place after a stream becomes + * unreachable should use the {@link java.lang.ref.Cleaner} mechanism. * * @throws IOException if an I/O error occurs. * diff --git a/src/java.base/share/classes/java/lang/Enum.java b/src/java.base/share/classes/java/lang/Enum.java index 355d18d621b..56016779243 100644 --- a/src/java.base/share/classes/java/lang/Enum.java +++ b/src/java.base/share/classes/java/lang/Enum.java @@ -37,6 +37,8 @@ import java.lang.constant.DynamicConstantDesc; import java.lang.invoke.MethodHandles; import java.util.Optional; +import jdk.internal.vm.annotation.Stable; + import static java.util.Objects.requireNonNull; /** @@ -166,13 +168,28 @@ public abstract class Enum> return this==other; } + /** + * The hash code of this enumeration constant. + */ + @Stable + private int hash; + /** * Returns a hash code for this enum constant. * * @return a hash code for this enum constant. */ public final int hashCode() { - return super.hashCode(); + // Once initialized, the hash field value does not change. + // HotSpot's identity hash code generation also never returns zero + // as the identity hash code. This makes zero a convenient marker + // for the un-initialized value for both @Stable and the lazy + // initialization code below. + int hc = hash; + if (hc == 0) { + hc = hash = System.identityHashCode(this); + } + return hc; } /** diff --git a/src/java.base/share/classes/java/lang/FdLibm.java b/src/java.base/share/classes/java/lang/FdLibm.java index f712f3efc97..5410a0a1ad6 100644 --- a/src/java.base/share/classes/java/lang/FdLibm.java +++ b/src/java.base/share/classes/java/lang/FdLibm.java @@ -64,6 +64,16 @@ class FdLibm { private static final double TWO54 = 0x1.0p54; // 1.80143985094819840000e+16 private static final double HUGE = 1.0e+300; + /* + * Constants for bit-wise manipulation of IEEE 754 double + * values. These constants are for the high-order 32-bits of a + * 64-bit double value: 1 sign bit as the most significant bit, + * followed by 11 exponent bits, and then the remaining bits as + * the significand. + */ + private static final int SIGN_BIT = 0x8000_0000; + private static final int EXP_BITS = 0x7ff0_0000; + private static final int EXP_SIGNIF_BITS = 0x7fff_ffff; private FdLibm() { throw new UnsupportedOperationException("No FdLibm instances for you."); @@ -156,10 +166,10 @@ class FdLibm { ix = __HI(x); // |x| ~< pi/4 - ix &= 0x7fff_ffff; + ix &= EXP_SIGNIF_BITS; if (ix <= 0x3fe9_21fb) { return __kernel_sin(x, z, 0); - } else if (ix>=0x7ff0_0000) { // sin(Inf or NaN) is NaN + } else if (ix >= EXP_BITS) { // sin(Inf or NaN) is NaN return x - x; } else { // argument reduction needed n = RemPio2.__ieee754_rem_pio2(x, y); @@ -211,7 +221,7 @@ class FdLibm { static double __kernel_sin(double x, double y, int iy) { double z, r, v; int ix; - ix = __HI(x) & 0x7fff_ffff; // high word of x + ix = __HI(x) & EXP_SIGNIF_BITS; // high word of x if (ix < 0x3e40_0000) { // |x| < 2**-27 if ((int)x == 0) // generate inexact return x; @@ -269,11 +279,11 @@ class FdLibm { ix = __HI(x); // |x| ~< pi/4 - ix &= 0x7fff_ffff; + ix &= EXP_SIGNIF_BITS; if (ix <= 0x3fe9_21fb) { return __kernel_cos(x, z); - } else if (ix >= 0x7ff0_0000) { // cos(Inf or NaN) is NaN - return x-x; + } else if (ix >= EXP_BITS) { // cos(Inf or NaN) is NaN + return x - x; } else { // argument reduction needed n = RemPio2.__ieee754_rem_pio2(x,y); switch (n & 3) { @@ -331,7 +341,7 @@ class FdLibm { static double __kernel_cos(double x, double y) { double a, hz, z, r, qx = 0.0; int ix; - ix = __HI(x) & 0x7fff_ffff; // ix = |x|'s high word + ix = __HI(x) & EXP_SIGNIF_BITS; // ix = |x|'s high word if (ix < 0x3e40_0000) { // if x < 2**27 if (((int)x) == 0) { // generate inexact return 1.0; @@ -395,11 +405,11 @@ class FdLibm { ix = __HI(x); // |x| ~< pi/4 - ix &= 0x7fff_ffff; + ix &= EXP_SIGNIF_BITS; if (ix <= 0x3fe9_21fb) { return __kernel_tan(x, z, 1); - } else if (ix >= 0x7ff0_0000) { // tan(Inf or NaN) is NaN - return x-x; // NaN + } else if (ix >= EXP_BITS) { // tan(Inf or NaN) is NaN + return x - x; // NaN } else { // argument reduction needed n = RemPio2.__ieee754_rem_pio2(x, y); return __kernel_tan(y[0], y[1], 1 - ((n & 1) << 1)); // 1 -- n even; -1 -- n odd @@ -462,7 +472,7 @@ class FdLibm { double z, r, v, w, s; int ix, hx; hx = __HI(x); // high word of x - ix = hx&0x7fff_ffff; // high word of |x| + ix = hx & EXP_SIGNIF_BITS; // high word of |x| if (ix < 0x3e30_0000) { // x < 2**-28 if ((int)x == 0) { // generate inexact if (((ix | __LO(x)) | (iy + 1)) == 0) { @@ -584,7 +594,7 @@ class FdLibm { int e0, i, j, nx, n, ix, hx; hx = __HI(x); // high word of x - ix = hx & 0x7fff_ffff; + ix = hx & EXP_SIGNIF_BITS; if (ix <= 0x3fe9_21fb) { // |x| ~<= pi/4 , no need for reduction y[0] = x; y[1] = 0; @@ -655,13 +665,13 @@ class FdLibm { /* * all other (large) arguments */ - if (ix >= 0x7ff0_0000) { // x is inf or NaN + if (ix >= EXP_BITS) { // x is inf or NaN y[0] = y[1] = x - x; return 0; } - // set z = scalbn(|x|,ilogb(x)-23) + // set z = scalbn(|x|, ilogb(x)-23) z = __LO(z, __LO(x)); - e0 = (ix >> 20) - 1046; /* e0 = ilogb(z)-23; */ + e0 = (ix >> 20) - 1046; // e0 = ilogb(z) - 23; z = __HI(z, ix - (e0 << 20)); for (i=0; i < 2; i++) { tx[i] = (double)((int)(z)); @@ -859,7 +869,7 @@ class FdLibm { // compute n z = Math.scalb(z, q0); // actual value of z - z -= 8.0*Math.floor(z*0.125); // trim off integer >= 8 + z -= 8.0*Math.floor(z*0.125); // trim off integer >= 8 n = (int) z; z -= (double)n; ih = 0; @@ -1071,7 +1081,7 @@ class FdLibm { double t = 0, w, p, q, c, r, s; int hx, ix; hx = __HI(x); - ix = hx & 0x7fff_ffff; + ix = hx & EXP_SIGNIF_BITS; if (ix >= 0x3ff0_0000) { // |x| >= 1 if(((ix - 0x3ff0_0000) | __LO(x)) == 0) { // asin(1) = +-pi/2 with inexact @@ -1157,7 +1167,7 @@ class FdLibm { double z, p, q, r, w, s, c, df; int hx, ix; hx = __HI(x); - ix = hx & 0x7fff_ffff; + ix = hx & EXP_SIGNIF_BITS; if (ix >= 0x3ff0_0000) { // |x| >= 1 if (((ix - 0x3ff0_0000) | __LO(x)) == 0) { // |x| == 1 if (hx > 0) {// acos(1) = 0 @@ -1166,7 +1176,7 @@ class FdLibm { return Math.PI + 2.0*pio2_lo; } } - return (x-x)/(x-x); // acos(|x| > 1) is NaN + return (x - x)/(x - x); // acos(|x| > 1) is NaN } if (ix < 0x3fe0_0000) { // |x| < 0.5 if (ix <= 0x3c60_0000) { // if |x| < 2**-57 @@ -1255,10 +1265,10 @@ class FdLibm { int ix, hx, id; hx = __HI(x); - ix = hx & 0x7fff_ffff; + ix = hx & EXP_SIGNIF_BITS; if (ix >= 0x4410_0000) { // if |x| >= 2^66 - if (ix > 0x7ff0_0000 || - (ix == 0x7ff0_0000 && (__LO(x) != 0))) { + if (ix > EXP_BITS || + (ix == EXP_BITS && (__LO(x) != 0))) { return x+x; // NaN } if (hx > 0) { @@ -1352,10 +1362,10 @@ class FdLibm { /*unsigned*/ int lx, ly; hx = __HI(x); - ix = hx & 0x7fff_ffff; + ix = hx & EXP_SIGNIF_BITS; lx = __LO(x); hy = __HI(y); - iy = hy&0x7fff_ffff; + iy = hy & EXP_SIGNIF_BITS; ly = __LO(y); if (Double.isNaN(x) || Double.isNaN(y)) return x + y; @@ -1378,8 +1388,8 @@ class FdLibm { } // when x is INF - if (ix == 0x7ff0_0000) { - if (iy == 0x7ff0_0000) { + if (ix == EXP_BITS) { + if (iy == EXP_BITS) { switch(m) { case 0: return pi_o_4 + tiny; // atan(+INF, +INF) case 1: return -pi_o_4 - tiny; // atan(-INF, +INF) @@ -1396,7 +1406,7 @@ class FdLibm { } } // when y is INF - if (iy == 0x7ff0_0000) { + if (iy == EXP_BITS) { return (hy < 0)? -pi_o_2 - tiny : pi_o_2 + tiny; } @@ -1494,7 +1504,7 @@ class FdLibm { static double compute(double x) { double z = 0.0; - int sign = 0x8000_0000; + int sign = SIGN_BIT; /*unsigned*/ int r, t1, s1, ix1, q1; int ix0, s0, q, m, t, i; @@ -1502,7 +1512,7 @@ class FdLibm { ix1 = __LO(x); // low word of x // take care of Inf and NaN - if ((ix0 & 0x7ff0_0000) == 0x7ff0_0000) { + if ((ix0 & EXP_BITS) == EXP_BITS) { return x*x + x; // sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN } // take care of zero @@ -1510,7 +1520,7 @@ class FdLibm { if (((ix0 & (~sign)) | ix1) == 0) return x; // sqrt(+-0) = +-0 else if (ix0 < 0) - return (x-x)/(x-x); // sqrt(-ve) = sNaN + return (x - x)/(x - x); // sqrt(-ve) = sNaN } // normalize x m = (ix0 >> 20); @@ -2136,7 +2146,7 @@ class FdLibm { } final int hx = __HI(x); - int ix = hx & 0x7fffffff; + int ix = hx & EXP_SIGNIF_BITS; /* * When x < 0, determine if y is an odd integer: @@ -2176,7 +2186,7 @@ class FdLibm { // (x < 0)**(non-int) is NaN if ((n | y_is_int) == 0) - return (x-x)/(x-x); + return (x - x)/(x - x); s = 1.0; // s (sign of result -ve**odd) = -1 else = 1 if ( (n | (y_is_int - 1)) == 0) @@ -2299,7 +2309,7 @@ class FdLibm { if (p_l + OVT > z - p_h) return s * INFINITY; // Overflow } - } else if ((j & 0x7fffffff) >= 0x4090cc00 ) { // z <= -1075 + } else if ((j & EXP_SIGNIF_BITS) >= 0x4090cc00 ) { // z <= -1075 if (((j - 0xc090cc00) | i)!=0) // z < -1075 return s * 0.0; // Underflow else { @@ -2319,12 +2329,12 @@ class FdLibm { final double LG2 = 0x1.62e4_2fef_a39efp-1; // 6.93147180559945286227e-01 final double LG2_H = 0x1.62e43p-1; // 6.93147182464599609375e-01 final double LG2_L = -0x1.05c6_10ca_86c39p-29; // -1.90465429995776804525e-09 - i = j & 0x7fffffff; + i = j & EXP_SIGNIF_BITS; k = (i >> 20) - 0x3ff; n = 0; if (i > 0x3fe00000) { // if |z| > 0.5, set n = [z + 0.5] n = j + (0x00100000 >> (k + 1)); - k = ((n & 0x7fffffff) >> 20) - 0x3ff; // new k for n + k = ((n & EXP_SIGNIF_BITS) >> 20) - 0x3ff; // new k for n t = 0.0; t = __HI(t, (n & ~(0x000fffff >> k)) ); n = ((n & 0x000fffff) | 0x00100000) >> (20 - k); @@ -2449,7 +2459,7 @@ class FdLibm { hx = __HI(x); /* high word of x */ xsb = (hx >> 31) & 1; /* sign bit of x */ - hx &= 0x7fffffff; /* high word of |x| */ + hx &= EXP_SIGNIF_BITS; /* high word of |x| */ /* filter out non-finite argument */ if (hx >= 0x40862E42) { /* if |x| >= 709.78... */ @@ -2568,8 +2578,6 @@ class FdLibm { Lg6 = 0x1.39a09d078c69fp-3, // 1.531383769920937332e-01 Lg7 = 0x1.2f112df3e5244p-3; // 1.479819860511658591e-01 - private static final double zero = 0.0; - static double compute(double x) { double hfsq, f, s, z, R, w, t1, t2, dk; int k, hx, i, j; @@ -2580,17 +2588,17 @@ class FdLibm { k=0; if (hx < 0x0010_0000) { // x < 2**-1022 - if (((hx & 0x7fff_ffff) | lx) == 0) { // log(+-0) = -inf - return -TWO54/zero; + if (((hx & EXP_SIGNIF_BITS) | lx) == 0) { // log(+-0) = -inf + return -TWO54/0.0; } if (hx < 0) { // log(-#) = NaN - return (x - x)/zero; + return (x - x)/0.0; } k -= 54; x *= TWO54; // subnormal number, scale up x hx = __HI(x); // high word of x } - if (hx >= 0x7ff0_0000) { + if (hx >= EXP_BITS) { return x + x; } k += (hx >> 20) - 1023; @@ -2600,9 +2608,9 @@ class FdLibm { k += (i >> 20); f = x - 1.0; if ((0x000f_ffff & (2 + hx)) < 3) {// |f| < 2**-20 - if (f == zero) { + if (f == 0.0) { if (k == 0) { - return zero; + return 0.0; } else { dk = (double)k; return dk*ln2_hi + dk*ln2_lo; @@ -2694,7 +2702,7 @@ class FdLibm { k=0; if (hx < 0x0010_0000) { /* x < 2**-1022 */ - if (((hx & 0x7fff_ffff) | lx) == 0) { + if (((hx & EXP_SIGNIF_BITS) | lx) == 0) { return -TWO54/0.0; /* log(+-0)=-inf */ } if (hx < 0) { @@ -2705,12 +2713,12 @@ class FdLibm { hx = __HI(x); } - if (hx >= 0x7ff0_0000) { + if (hx >= EXP_BITS) { return x + x; } k += (hx >> 20) - 1023; - i = (k & 0x8000_0000) >>> 31; // unsigned shift + i = (k & SIGN_BIT) >>> 31; // unsigned shift hx = (hx & 0x000f_ffff) | ((0x3ff - i) << 20); y = (double)(k + i); x = __HI(x, hx); // replace high word of x with hx @@ -2800,7 +2808,7 @@ class FdLibm { int k, hx, hu=0, ax; hx = __HI(x); /* high word of x */ - ax = hx & 0x7fff_ffff; + ax = hx & EXP_SIGNIF_BITS; k = 1; if (hx < 0x3FDA_827A) { /* x < 0.41422 */ @@ -2826,7 +2834,7 @@ class FdLibm { } } - if (hx >= 0x7ff0_0000) { + if (hx >= EXP_BITS) { return x + x; } @@ -2977,7 +2985,6 @@ class FdLibm { * to produce the hexadecimal values shown. */ static class Expm1 { - private static final double one = 1.0; private static final double huge = 1.0e+300; private static final double tiny = 1.0e-300; private static final double o_threshold = 0x1.62e42fefa39efp9; // 7.09782712893383973096e+02 @@ -2997,9 +3004,9 @@ class FdLibm { /*unsigned*/ int hx; hx = __HI(x); // high word of x - xsb = hx & 0x8000_0000; // sign bit of x + xsb = hx & SIGN_BIT; // sign bit of x y = Math.abs(x); - hx &= 0x7fff_ffff; // high word of |x| + hx &= EXP_SIGNIF_BITS; // high word of |x| // filter out huge and non-finite argument if (hx >= 0x4043_687A) { // if |x| >= 56*ln2 @@ -3017,7 +3024,7 @@ class FdLibm { } if (xsb != 0) { // x < -56*ln2, return -1.0 with inexact if (x + tiny < 0.0) { // raise inexact - return tiny - one; // return -1 + return tiny - 1.0; // return -1 } } } @@ -3052,7 +3059,7 @@ class FdLibm { // x is now in primary range hfx = 0.5*x; hxs = x*hfx; - r1 = one + hxs*(Q1 + hxs*(Q2 + hxs*(Q3 + hxs*(Q4 + hxs*Q5)))); + r1 = 1.0 + hxs*(Q1 + hxs*(Q2 + hxs*(Q3 + hxs*(Q4 + hxs*Q5)))); t = 3.0 - r1*hfx; e = hxs *((r1 - t)/(6.0 - x*t)); if (k == 0) { @@ -3067,15 +3074,15 @@ class FdLibm { if (x < -0.25) { return -2.0*(e - (x + 0.5)); } else { - return one + 2.0*(x - e); + return 1.0 + 2.0*(x - e); } } if (k <= -2 || k > 56) { // suffice to return exp(x) - 1 - y = one - (e - x); + y = 1.0 - (e - x); y = __HI(y, __HI(y) + (k << 20)); // add k to y's exponent - return y - one; + return y - 1.0; } - t = one; + t = 1.0; if (k < 20) { t = __HI(t, 0x3ff0_0000 - (0x2_00000 >> k)); // t = 1-2^-k y = t - ( e - x); @@ -3083,7 +3090,7 @@ class FdLibm { } else { t = __HI(t, ((0x3ff - k) << 20)); // 2^-k y = x - (e + t); - y += one; + y += 1.0; y = __HI(y, __HI(y) + (k << 20)); // add k to y's exponent } } @@ -3120,10 +3127,10 @@ class FdLibm { // High word of |x| jx = __HI(x); - ix = jx & 0x7fff_ffff; + ix = jx & EXP_SIGNIF_BITS; // x is INF or NaN - if (ix >= 0x7ff0_0000) { + if (ix >= EXP_BITS) { return x + x; } @@ -3196,10 +3203,10 @@ class FdLibm { // High word of |x| ix = __HI(x); - ix &= 0x7fff_ffff; + ix &= EXP_SIGNIF_BITS; // x is INF or NaN - if (ix >= 0x7ff0_0000) { + if (ix >= EXP_BITS) { return x*x; } @@ -3273,10 +3280,10 @@ class FdLibm { // High word of |x|. jx = __HI(x); - ix = jx & 0x7fff_ffff; + ix = jx & EXP_SIGNIF_BITS; // x is INF or NaN - if (ix >= 0x7ff0_0000) { + if (ix >= EXP_BITS) { if (jx >= 0) { // tanh(+-inf)=+-1 return 1.0/x + 1.0; } else { // tanh(NaN) = NaN @@ -3314,17 +3321,17 @@ class FdLibm { lx = __LO(x); // low word of x hp = __HI(p); // high word of p lp = __LO(p); // low word of p - sx = hx & 0x8000_0000; - hp &= 0x7fff_ffff; - hx &= 0x7fff_ffff; + sx = hx & SIGN_BIT; + hp &= EXP_SIGNIF_BITS; + hx &= EXP_SIGNIF_BITS; // purge off exception values if ((hp | lp) == 0) {// p = 0 return (x*p)/(x*p); } - if ((hx >= 0x7ff0_0000) || // not finite - ((hp >= 0x7ff0_0000) && // p is NaN - (((hp - 0x7ff0_0000) | lp) != 0))) + if ((hx >= EXP_BITS) || // not finite + ((hp >= EXP_BITS) && // p is NaN + (((hp - EXP_BITS) | lp) != 0))) return (x*p)/(x*p); if (hp <= 0x7fdf_ffff) { // now x < 2p @@ -3362,13 +3369,13 @@ class FdLibm { lx = __LO(x); // low word of x hy = __HI(y); // high word of y ly = __LO(y); // low word of y - sx = hx & 0x8000_0000; // sign of x + sx = hx & SIGN_BIT; // sign of x hx ^= sx; // |x| - hy &= 0x7fff_ffff; // |y| + hy &= EXP_SIGNIF_BITS; // |y| // purge off exception values - if ((hy | ly) == 0 || (hx >= 0x7ff0_0000)|| // y = 0, or x not finite - ((hy | ((ly | -ly) >>> 31)) > 0x7ff0_0000)) // or y is NaN, unsigned shift + if ((hy | ly) == 0 || (hx >= EXP_BITS)|| // y = 0, or x not finite + ((hy | ((ly | -ly) >>> 31)) > EXP_BITS)) // or y is NaN, unsigned shift return (x*y)/(x*y); if (hx <= hy) { if ((hx < hy) || (Integer.compareUnsigned(lx, ly) < 0)) { // |x| < |y| return x diff --git a/src/java.base/share/classes/java/lang/ProcessBuilder.java b/src/java.base/share/classes/java/lang/ProcessBuilder.java index e870682a38e..e7d5d9debef 100644 --- a/src/java.base/share/classes/java/lang/ProcessBuilder.java +++ b/src/java.base/share/classes/java/lang/ProcessBuilder.java @@ -1116,8 +1116,8 @@ public final class ProcessBuilder String dir = directory == null ? null : directory.toString(); - for (int i = 1; i < cmdarray.length; i++) { - if (cmdarray[i].indexOf('\u0000') >= 0) { + for (String s : cmdarray) { + if (s.indexOf('\u0000') >= 0) { throw new IOException("invalid null character in command"); } } diff --git a/src/java.base/share/classes/java/lang/Runtime.java b/src/java.base/share/classes/java/lang/Runtime.java index 9f87c68da75..e77bf4c41e3 100644 --- a/src/java.base/share/classes/java/lang/Runtime.java +++ b/src/java.base/share/classes/java/lang/Runtime.java @@ -140,19 +140,21 @@ public class Runtime { private Runtime() {} /** - * Initiates the shutdown sequence of the Java Virtual Machine. - * This method blocks indefinitely; it never returns or throws an exception (that is, it - * does not complete either normally or abruptly). The argument serves as a status code; - * by convention, a nonzero status code indicates abnormal termination. + * Initiates the {@linkplain ##shutdown shutdown sequence} of the Java Virtual Machine. + * Unless the security manager denies exiting, this method initiates the shutdown sequence + * (if it is not already initiated) and then blocks indefinitely. This method neither returns + * nor throws an exception; that is, it does not complete either normally or abruptly. * - *

Invocations of this method are serialized such that only one - * invocation will actually proceed with the shutdown sequence and - * terminate the VM with the given status code. All other invocations - * simply block indefinitely. + *

The argument serves as a status code. By convention, a nonzero status code + * indicates abnormal termination. * - *

Because this method always blocks indefinitely, if it is invoked from - * a shutdown hook, it will prevent that shutdown hook from terminating. - * Consequently, this will prevent the shutdown sequence from finishing. + *

Successful invocations of this method are serialized such that only one invocation + * initiates the shutdown sequence and terminates the VM with the given status code. + * All other invocations will perform no action and block indefinitely. + * + *

Because a successful invocation of this method blocks indefinitely, if it is invoked + * from a shutdown hook, it will prevent that shutdown hook from terminating. Consequently, + * this will prevent the shutdown sequence from finishing. * *

The {@link System#exit(int) System.exit} method is the * conventional and convenient means of invoking this method. @@ -190,7 +192,7 @@ public class Runtime { * Registers a new virtual-machine shutdown hook. * *

A shutdown hook is simply an initialized but unstarted thread. Shutdown hooks - * are started at the beginning of the shutdown sequence. + * are started at the beginning of the {@linkplain ##shutdown shutdown sequence}. * Registration and de-registration of shutdown hooks is disallowed once the shutdown * sequence has begun. *

@@ -280,15 +282,17 @@ public class Runtime { } /** - * Immediately terminates the Java Virtual Machine. Termination - * is unconditional and immediate. This method does not initiate the - * shutdown sequence, nor does it wait for the shutdown sequence - * to finish if it is already in progress. This method never returns normally. + * Immediately {@linkplain ##termination terminates} the Java Virtual Machine. + * If the security manager denies exiting, throws {@link SecurityException}. + * Otherwise, termination of the Java Virtual Machine is unconditional and immediate. + * This method does not initiate the {@linkplain ##shutdown shutdown sequence}, nor does + * it wait for the shutdown sequence to finish if it is already in progress. An + * invocation of this method never returns normally. * * @apiNote * This method should be used with extreme caution. Using it may circumvent or disrupt * any cleanup actions intended to be performed by shutdown hooks, possibly leading to - * data corruption. See the termination section above + * data corruption. See the {@linkplain ##termination termination} section above * for other possible consequences of halting the Java Virtual Machine. * * @param status diff --git a/src/java.base/share/classes/java/lang/System.java b/src/java.base/share/classes/java/lang/System.java index f15b5861670..f9cb79d2d48 100644 --- a/src/java.base/share/classes/java/lang/System.java +++ b/src/java.base/share/classes/java/lang/System.java @@ -760,8 +760,6 @@ public final class System { * List of paths to search when loading libraries * {@systemProperty java.io.tmpdir} * Default temp file path - * {@systemProperty java.compiler} - * Name of JIT compiler to use * {@systemProperty os.name} * Operating system name * {@systemProperty os.arch} @@ -1888,18 +1886,18 @@ public final class System { } /** - * Initiates the shutdown sequence of the - * Java Virtual Machine. This method always blocks indefinitely. The argument - * serves as a status code; by convention, a nonzero status code indicates - * abnormal termination. + * Initiates the {@linkplain Runtime##shutdown shutdown sequence} of the Java Virtual Machine. + * Unless the security manager denies exiting, this method initiates the shutdown sequence + * (if it is not already initiated) and then blocks indefinitely. This method neither returns + * nor throws an exception; that is, it does not complete either normally or abruptly. *

- * This method calls the {@code exit} method in class {@code Runtime}. This - * method never returns normally. + * The argument serves as a status code. By convention, a nonzero status code + * indicates abnormal termination. *

* The call {@code System.exit(n)} is effectively equivalent to the call: - *

-     * Runtime.getRuntime().exit(n)
-     * 
+ * {@snippet : + * Runtime.getRuntime().exit(n) + * } * * @implNote * The initiation of the shutdown sequence is logged by {@link Runtime#exit(int)}. diff --git a/src/java.base/share/classes/java/lang/Thread.java b/src/java.base/share/classes/java/lang/Thread.java index 5e30698ae43..6d33247df30 100644 --- a/src/java.base/share/classes/java/lang/Thread.java +++ b/src/java.base/share/classes/java/lang/Thread.java @@ -506,14 +506,14 @@ public class Thread implements Runnable { if (currentThread() instanceof VirtualThread vthread) { vthread.sleepNanos(nanos); } else { - sleep0(millis); + sleep0(nanos); } } finally { afterSleep(event); } } - private static native void sleep0(long millis) throws InterruptedException; + private static native void sleep0(long nanos) throws InterruptedException; /** * Causes the currently executing thread to sleep (temporarily cease @@ -555,11 +555,7 @@ public class Thread implements Runnable { if (currentThread() instanceof VirtualThread vthread) { vthread.sleepNanos(totalNanos); } else { - // millisecond precision - if (nanos > 0 && millis < Long.MAX_VALUE) { - millis++; - } - sleep0(millis); + sleep0(totalNanos); } } finally { afterSleep(event); @@ -593,12 +589,7 @@ public class Thread implements Runnable { if (currentThread() instanceof VirtualThread vthread) { vthread.sleepNanos(nanos); } else { - // millisecond precision - long millis = NANOSECONDS.toMillis(nanos); - if (nanos > MILLISECONDS.toNanos(millis)) { - millis += 1L; - } - sleep0(millis); + sleep0(nanos); } } finally { afterSleep(event); diff --git a/src/java.base/share/classes/java/lang/VirtualThread.java b/src/java.base/share/classes/java/lang/VirtualThread.java index b6530e1b929..746f93cdfac 100644 --- a/src/java.base/share/classes/java/lang/VirtualThread.java +++ b/src/java.base/share/classes/java/lang/VirtualThread.java @@ -205,22 +205,19 @@ final class VirtualThread extends BaseVirtualThread { } // set state to RUNNING - boolean firstRun; int initialState = state(); if (initialState == STARTED && compareAndSetState(STARTED, RUNNING)) { // first run - firstRun = true; } else if (initialState == RUNNABLE && compareAndSetState(RUNNABLE, RUNNING)) { // consume parking permit setParkPermit(false); - firstRun = false; } else { // not runnable return; } // notify JVMTI before mount - notifyJvmtiMount(/*hide*/true, firstRun); + notifyJvmtiMount(/*hide*/true); try { cont.run(); @@ -300,7 +297,7 @@ final class VirtualThread extends BaseVirtualThread { // first mount mount(); - notifyJvmtiMount(/*hide*/false, /*first*/true); + notifyJvmtiStart(); // emit JFR event if enabled if (VirtualThreadStartEvent.isTurnedOn()) { @@ -328,7 +325,7 @@ final class VirtualThread extends BaseVirtualThread { } finally { // last unmount - notifyJvmtiUnmount(/*hide*/true, /*last*/true); + notifyJvmtiEnd(); unmount(); // final state @@ -438,14 +435,14 @@ final class VirtualThread extends BaseVirtualThread { @ChangesCurrentThread private boolean yieldContinuation() { // unmount - notifyJvmtiUnmount(/*hide*/true, /*last*/false); + notifyJvmtiUnmount(/*hide*/true); unmount(); try { return Continuation.yield(VTHREAD_SCOPE); } finally { // re-mount mount(); - notifyJvmtiMount(/*hide*/false, /*first*/false); + notifyJvmtiMount(/*hide*/false); } } @@ -462,7 +459,7 @@ final class VirtualThread extends BaseVirtualThread { setState(PARKED); // notify JVMTI that unmount has completed, thread is parked - notifyJvmtiUnmount(/*hide*/false, /*last*/false); + notifyJvmtiUnmount(/*hide*/false); // may have been unparked while parking if (parkPermit && compareAndSetState(PARKED, RUNNABLE)) { @@ -478,7 +475,7 @@ final class VirtualThread extends BaseVirtualThread { setState(RUNNABLE); // notify JVMTI that unmount has completed, thread is runnable - notifyJvmtiUnmount(/*hide*/false, /*last*/false); + notifyJvmtiUnmount(/*hide*/false); // external submit if there are no tasks in the local task queue if (currentThread() instanceof CarrierThread ct && ct.getQueuedTaskCount() == 0) { @@ -508,7 +505,7 @@ final class VirtualThread extends BaseVirtualThread { assert (state() == TERMINATED) && (carrierThread == null); if (executed) { - notifyJvmtiUnmount(/*hide*/false, /*last*/true); + notifyJvmtiUnmount(/*hide*/false); } // notify anyone waiting for this virtual thread to terminate @@ -1086,11 +1083,19 @@ final class VirtualThread extends BaseVirtualThread { @IntrinsicCandidate @JvmtiMountTransition - private native void notifyJvmtiMount(boolean hide, boolean firstMount); + private native void notifyJvmtiStart(); @IntrinsicCandidate @JvmtiMountTransition - private native void notifyJvmtiUnmount(boolean hide, boolean lastUnmount); + private native void notifyJvmtiEnd(); + + @IntrinsicCandidate + @JvmtiMountTransition + private native void notifyJvmtiMount(boolean hide); + + @IntrinsicCandidate + @JvmtiMountTransition + private native void notifyJvmtiUnmount(boolean hide); @IntrinsicCandidate @JvmtiMountTransition diff --git a/src/java.base/share/classes/java/lang/constant/ConstantUtils.java b/src/java.base/share/classes/java/lang/constant/ConstantUtils.java index 21f52ee2f35..ff53a94d619 100644 --- a/src/java.base/share/classes/java/lang/constant/ConstantUtils.java +++ b/src/java.base/share/classes/java/lang/constant/ConstantUtils.java @@ -75,6 +75,66 @@ class ConstantUtils { return name; } + /** + * Validates the correctness of a binary package name. + * In particular checks for the presence of invalid characters in the name. + * Empty package name is allowed. + * + * @param name the package name + * @return the package name passed if valid + * @throws IllegalArgumentException if the package name is invalid + * @throws NullPointerException if the package name is {@code null} + */ + public static String validateBinaryPackageName(String name) { + for (int i=0; i= 0; i--) { + char ch = name.charAt(i); + if ((ch >= '\u0000' && ch <= '\u001F') + || ((ch == '\\' || ch == ':' || ch =='@') && (i == 0 || name.charAt(--i) != '\\'))) + throw new IllegalArgumentException("Invalid module name: " + name); + } + return name; + } + /** * Validates a member name * diff --git a/src/java.base/share/classes/java/lang/constant/ModuleDesc.java b/src/java.base/share/classes/java/lang/constant/ModuleDesc.java new file mode 100644 index 00000000000..fe68d096f1d --- /dev/null +++ b/src/java.base/share/classes/java/lang/constant/ModuleDesc.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.lang.constant; + +import static java.util.Objects.requireNonNull; + +/** + * A nominal descriptor for a {@code Module} constant. + * + *

+ * To create a {@link ModuleDesc} for a module, use the {@link #of(String)} + * method. + * + * @jvms 4.4.11 The CONSTANT_Module_info Structure + * @since 21 + */ +public sealed interface ModuleDesc + permits ModuleDescImpl { + + /** + * Returns a {@link ModuleDesc} for a module, + * given the name of the module. + * + * @param name the module name + * @return a {@link ModuleDesc} describing the desired module + * @throws NullPointerException if the argument is {@code null} + * @throws IllegalArgumentException if the name string is not in the + * correct format + * @jvms 4.2.3 Module and Package Names + */ + static ModuleDesc of(String name) { + ConstantUtils.validateModuleName(requireNonNull(name)); + return new ModuleDescImpl(name); + } + + /** + * Returns the module name of this {@link ModuleDesc}. + * + * @return the module name + */ + String name(); + + /** + * Compare the specified object with this descriptor for equality. + * Returns {@code true} if and only if the specified object is + * also a {@link ModuleDesc} and both describe the same module. + * + * @param o the other object + * @return whether this descriptor is equal to the other object + */ + @Override + boolean equals(Object o); +} diff --git a/src/java.base/share/classes/java/lang/constant/ModuleDescImpl.java b/src/java.base/share/classes/java/lang/constant/ModuleDescImpl.java new file mode 100644 index 00000000000..0a01f5b2b73 --- /dev/null +++ b/src/java.base/share/classes/java/lang/constant/ModuleDescImpl.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.lang.constant; + +/* + * Implementation of {@code ModuleDesc} + * @param name must have been validated + */ +record ModuleDescImpl(String name) implements ModuleDesc { + + @Override + public String toString() { + return String.format("ModuleDesc[%s]", name()); + } +} diff --git a/src/java.base/share/classes/java/lang/constant/PackageDesc.java b/src/java.base/share/classes/java/lang/constant/PackageDesc.java new file mode 100644 index 00000000000..2dc20696a47 --- /dev/null +++ b/src/java.base/share/classes/java/lang/constant/PackageDesc.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.lang.constant; + +import static java.util.Objects.requireNonNull; + +/** + * A nominal descriptor for a {@code Package} constant. + * + *

+ * To create a {@link PackageDesc} for a package, + * use the {@link #of(String)} or {@link #ofInternalName(String)} method. + * + * @jvms 4.4.12 The CONSTANT_Package_info Structure + * @since 21 + */ +public sealed interface PackageDesc + permits PackageDescImpl { + + /** + * Returns a {@link PackageDesc} for a package, + * given the name of the package, such as {@code "java.lang"}. + * + * @param name the fully qualified (dot-separated) package name + * @return a {@link PackageDesc} describing the desired package + * @throws NullPointerException if the argument is {@code null} + * @throws IllegalArgumentException if the name string is not in the + * correct format + * @jls 6.5.3 Module Names and Package Names + * @see PackageDesc#ofInternalName(String) + */ + static PackageDesc of(String name) { + ConstantUtils.validateBinaryPackageName(requireNonNull(name)); + return new PackageDescImpl(ConstantUtils.binaryToInternal(name)); + } + + /** + * Returns a {@link PackageDesc} for a package, + * given the name of the package in internal form, + * such as {@code "java/lang"}. + * + * @param name the fully qualified package name, in internal + * (slash-separated) form + * @return a {@link PackageDesc} describing the desired package + * @throws NullPointerException if the argument is {@code null} + * @throws IllegalArgumentException if the name string is not in the + * correct format + * @jvms 4.2.1 Binary Class and Interface Names + * @jvms 4.2.3 Module and Package Names + * @see PackageDesc#of(String) + */ + static PackageDesc ofInternalName(String name) { + ConstantUtils.validateInternalPackageName(requireNonNull(name)); + return new PackageDescImpl(name); + } + + /** + * Returns the fully qualified (slash-separated) package name in internal form + * of this {@link PackageDesc}. + * + * @return the package name in internal form, or the empty string for the + * unnamed package + * @see PackageDesc#name() + */ + String internalName(); + + /** + * Returns the fully qualified (dot-separated) package name + * of this {@link PackageDesc}. + * + * @return the package name, or the empty string for the + * unnamed package + * @see PackageDesc#internalName() + */ + default String name() { + return ConstantUtils.internalToBinary(internalName()); + } + + /** + * Compare the specified object with this descriptor for equality. + * Returns {@code true} if and only if the specified object is + * also a {@link PackageDesc} and both describe the same package. + * + * @param o the other object + * @return whether this descriptor is equal to the other object + */ + @Override + boolean equals(Object o); +} diff --git a/src/java.base/share/classes/java/lang/constant/PackageDescImpl.java b/src/java.base/share/classes/java/lang/constant/PackageDescImpl.java new file mode 100644 index 00000000000..1f0077007b8 --- /dev/null +++ b/src/java.base/share/classes/java/lang/constant/PackageDescImpl.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.lang.constant; + +/* + * Implementation of {@code PackageDesc} + * @param internalName must have been validated + */ +record PackageDescImpl(String internalName) implements PackageDesc { + + @Override + public String toString() { + return String.format("PackageDesc[%s]", name()); + } +} diff --git a/src/java.base/share/classes/java/lang/constant/package-info.java b/src/java.base/share/classes/java/lang/constant/package-info.java index fd85dfb93d7..268587f1db4 100644 --- a/src/java.base/share/classes/java/lang/constant/package-info.java +++ b/src/java.base/share/classes/java/lang/constant/package-info.java @@ -89,6 +89,11 @@ * It is also suitable for describing {@code invokedynamic} call sites in bytecode * reading and writing APIs. * + *

Other members of this package are {@link ModuleDesc} + * and {@link PackageDesc}. They represent module and package + * info structures, suitable for describing modules and their content in bytecode + * reading and writing APIs. + * * @jvms 4.4 The Constant Pool * * @since 12 diff --git a/src/java.base/share/classes/java/lang/foreign/AddressLayout.java b/src/java.base/share/classes/java/lang/foreign/AddressLayout.java new file mode 100644 index 00000000000..fc98f558133 --- /dev/null +++ b/src/java.base/share/classes/java/lang/foreign/AddressLayout.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package java.lang.foreign; + +import jdk.internal.foreign.layout.ValueLayouts; +import jdk.internal.javac.PreviewFeature; +import jdk.internal.reflect.CallerSensitive; + +import java.lang.foreign.Linker.Option; +import java.lang.invoke.MethodHandle; +import java.nio.ByteOrder; +import java.util.Optional; + +/** + * A value layout used to model the address of some region of memory. The carrier associated with an address layout is + * {@code MemorySegment.class}. The size and alignment of an address layout are platform dependent + * (e.g. on a 64-bit platform, the size and alignment of an address layout are set to 64 bits). + *

+ * An address layout may optionally feature a {@linkplain #targetLayout() target layout}. An address layout with + * target layout {@code T} can be used to model the address of a region of memory whose layout is {@code T}. + * For instance, an address layout with target layout {@link ValueLayout#JAVA_INT} can be used to model the address + * of a region of memory that is 4 bytes long. Specifying a target layout can be useful in the following situations: + *

    + *
  • When accessing a memory segment that has been obtained by reading an address from another + * memory segment, e.g. using {@link MemorySegment#getAtIndex(AddressLayout, long)};
  • + *
  • When creating a downcall method handle, using {@link Linker#downcallHandle(FunctionDescriptor, Option...)}; + *
  • When creating an upcall stub, using {@link Linker#upcallStub(MethodHandle, FunctionDescriptor, Arena, Option...)}. + *
+ * + * @see #ADDRESS + * @see #ADDRESS_UNALIGNED + * @since 19 + */ +@PreviewFeature(feature = PreviewFeature.Feature.FOREIGN) +public sealed interface AddressLayout extends ValueLayout permits ValueLayouts.OfAddressImpl { + + /** + * {@inheritDoc} + */ + @Override + AddressLayout withName(String name); + + /** + * {@inheritDoc} + */ + @Override + AddressLayout withoutName(); + + /** + * {@inheritDoc} + */ + @Override + AddressLayout withBitAlignment(long bitAlignment); + + /** + * {@inheritDoc} + */ + @Override + AddressLayout withOrder(ByteOrder order); + + /** + * Returns an address layout with the same carrier, alignment constraint, name and order as this address layout, + * but associated with the specified target layout. The returned address layout allows raw addresses to be accessed + * as {@linkplain MemorySegment memory segments} whose size is set to the size of the specified layout. Moreover, + * if the accessed raw address is not compatible with the alignment constraint in the provided layout, + * {@linkplain IllegalArgumentException} will be thrown. + * @apiNote + * This method can also be used to create an address layout which, when used, creates native memory + * segments with maximal size (e.g. {@linkplain Long#MAX_VALUE}). This can be done by using a target sequence + * layout with unspecified size, as follows: + * {@snippet lang = java: + * AddressLayout addressLayout = ... + * AddressLayout unboundedLayout = addressLayout.withTargetLayout( + * MemoryLayout.sequenceLayout(ValueLayout.JAVA_BYTE)); + *} + *

+ * This method is restricted. + * Restricted methods are unsafe, and, if used incorrectly, their use might crash + * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on + * restricted methods, and use safe and supported functionalities, where possible. + * + * @param layout the target layout. + * @return an address layout with same characteristics as this layout, but with the provided target layout. + * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. + * @see #targetLayout() + */ + @CallerSensitive + AddressLayout withTargetLayout(MemoryLayout layout); + + /** + * Returns an address layout with the same carrier, alignment constraint, name and order as this address layout, + * but without any specified target layout. + *

+ * This can be useful to compare two address layouts that have different target layouts, but are otherwise equal. + * + * @return an address layout with same characteristics as this layout, but with no target layout. + * @see #targetLayout() + */ + AddressLayout withoutTargetLayout(); + + /** + * {@return the target layout associated with this address layout (if any)}. + */ + Optional targetLayout(); + +} diff --git a/src/java.base/share/classes/java/lang/foreign/Arena.java b/src/java.base/share/classes/java/lang/foreign/Arena.java index 2fff341e658..287f466b7f7 100644 --- a/src/java.base/share/classes/java/lang/foreign/Arena.java +++ b/src/java.base/share/classes/java/lang/foreign/Arena.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,40 +27,105 @@ package java.lang.foreign; import jdk.internal.foreign.MemorySessionImpl; import jdk.internal.javac.PreviewFeature; +import jdk.internal.ref.CleanerFactory; + +import java.lang.foreign.MemorySegment.Scope; /** - * An arena controls the lifecycle of memory segments, providing both flexible allocation and timely deallocation. + * An arena controls the lifecycle of native memory segments, providing both flexible allocation and timely deallocation. *

- * An arena has a {@linkplain #scope() scope}, called the arena scope. When the arena is {@linkplain #close() closed}, - * the arena scope is no longer {@linkplain SegmentScope#isAlive() alive}. As a result, all the - * segments associated with the arena scope are invalidated, safely and atomically, their backing memory regions are - * deallocated (where applicable) and can no longer be accessed after the arena is closed: + * An arena has a {@linkplain MemorySegment.Scope scope} - the arena scope. All the segments allocated + * by the arena are associated with the arena scope. As such, the arena determines the temporal bounds + * of all the memory segments allocated by it. + *

+ * Moreover, an arena also determines whether access to memory segments allocated by it should be + * {@linkplain MemorySegment#isAccessibleBy(Thread) restricted} to specific threads. + * An arena is a {@link SegmentAllocator} and features several allocation methods that can be used by clients + * to obtain native segments. + *

+ * The simplest arena is the {@linkplain Arena#global() global arena}. The global arena + * features an unbounded lifetime. As such, native segments allocated with the global arena are always + * accessible and their backing regions of memory are never deallocated. Moreover, memory segments allocated with the + * global arena can be {@linkplain MemorySegment#isAccessibleBy(Thread) accessed} from any thread. + * {@snippet lang = java: + * MemorySegment segment = Arena.global().allocate(100, 1); + * ... + * // segment is never deallocated! + *} + *

+ * Alternatively, clients can obtain an {@linkplain Arena#ofAuto() automatic arena}, that is an arena + * which features a bounded lifetime that is managed, automatically, by the garbage collector. As such, the regions + * of memory backing memory segments allocated with the automatic arena are deallocated at some unspecified time + * after the automatic arena (and all the segments allocated by it) become + * unreachable, as shown below: * * {@snippet lang = java: - * try (Arena arena = Arena.openConfined()) { - * MemorySegment segment = MemorySegment.allocateNative(100, arena.scope()); - * ... - * } // memory released here + * MemorySegment segment = Arena.ofAuto().allocate(100, 1); + * ... + * segment = null; // the segment region becomes available for deallocation after this point *} - * - * Furthermore, an arena is a {@link SegmentAllocator}. All the segments {@linkplain #allocate(long, long) allocated} by the - * arena are associated with the arena scope. This makes arenas extremely useful when interacting with foreign code, as shown below: + * Memory segments allocated with an automatic arena can also be {@linkplain MemorySegment#isAccessibleBy(Thread) accessed} from any thread. + *

+ * Rather than leaving deallocation in the hands of the Java runtime, clients will often wish to exercise control over + * the timing of deallocation for regions of memory that back memory segments. Two kinds of arenas support this, + * namely {@linkplain #ofConfined() confined} and {@linkplain #ofShared() shared} arenas. They both feature + * bounded lifetimes that are managed manually. For instance, the lifetime of a confined arena starts when the confined + * arena is created, and ends when the confined arena is {@linkplain #close() closed}. As a result, the regions of memory + * backing memory segments allocated with a confined arena are deallocated when the confined arena is closed. + * When this happens, all the segments allocated with the confined arena are invalidated, and subsequent access + * operations on these segments will fail {@link IllegalStateException}: * * {@snippet lang = java: - * try (Arena arena = Arena.openConfined()) { - * MemorySegment nativeArray = arena.allocateArray(ValueLayout.JAVA_INT, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); - * MemorySegment nativeString = arena.allocateUtf8String("Hello!"); - * MemorySegment upcallStub = linker.upcallStub(handle, desc, arena.scope()); + * MemorySegment segment = null; + * try (Arena arena = Arena.ofConfined()) { + * segment = arena.allocate(100); * ... - * } // memory released here + * } // segment region deallocated here + * segment.get(ValueLayout.JAVA_BYTE, 0); // throws IllegalStateException *} * + * Memory segments allocated with a {@linkplain #ofConfined() confined arena} can only be accessed (and closed) by the + * thread that created the arena. If access to a memory segment from multiple threads is required, clients can allocate + * segments in a {@linkplain #ofShared() shared arena} instead. + *

+ * The characteristics of the various arenas are summarized in the following table: + * + *

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Arenas characteristics
KindBounded lifetimeExplicitly closeableAccessible from multiple threads
GlobalNoNoYes
AutomaticYesNoYes
ConfinedYesYesNo
SharedYesYesYes
+ * *

Safety and thread-confinement

* * Arenas provide strong temporal safety guarantees: a memory segment allocated by an arena cannot be accessed * after the arena has been closed. The cost of providing this guarantee varies based on the * number of threads that have access to the memory segments allocated by the arena. For instance, if an arena - * is always created and closed by one thread, and the memory segments associated with the arena's scope are always + * is always created and closed by one thread, and the memory segments allocated by the arena are always * accessed by that same thread, then ensuring correctness is trivial. *

* Conversely, if an arena allocates segments that can be accessed by multiple threads, or if the arena can be closed @@ -70,34 +135,120 @@ import jdk.internal.javac.PreviewFeature; * impact, arenas are divided into thread-confined arenas, and shared arenas. *

* Confined arenas, support strong thread-confinement guarantees. Upon creation, they are assigned an - * {@linkplain #isCloseableBy(Thread) owner thread}, typically the thread which initiated the creation operation. - * The segments created by a confined arena can only be {@linkplain SegmentScope#isAccessibleBy(Thread) accessed} + * owner thread, typically the thread which initiated the creation operation. + * The segments created by a confined arena can only be {@linkplain MemorySegment#isAccessibleBy(Thread) accessed} * by the owner thread. Moreover, any attempt to close the confined arena from a thread other than the owner thread will * fail with {@link WrongThreadException}. *

* Shared arenas, on the other hand, have no owner thread. The segments created by a shared arena - * can be {@linkplain SegmentScope#isAccessibleBy(Thread) accessed} by any thread. This might be useful when + * can be {@linkplain MemorySegment#isAccessibleBy(Thread) accessed} by any thread. This might be useful when * multiple threads need to access the same memory segment concurrently (e.g. in the case of parallel processing). - * Moreover, a shared arena {@linkplain #isCloseableBy(Thread) can be closed} by any thread. + * Moreover, a shared arena can be closed by any thread. + * + *

Custom arenas

+ * + * Clients can define custom arenas to implement more efficient allocation strategies, or to have better control over + * when (and by whom) an arena can be closed. As an example, the following code defines a slicing arena that behaves + * like a confined arena (i.e., single-threaded access), but internally uses a + * {@linkplain SegmentAllocator#slicingAllocator(MemorySegment) slicing allocator} to respond to allocation requests. + * When the slicing arena is closed, the underlying confined arena is also closed; this will invalidate all segments + * allocated with the slicing arena (since the scope of the slicing arena is the same as that of the underlying + * confined arena): + * + * {@snippet lang = java: + * class SlicingArena implements Arena { + * final Arena arena = Arena.ofConfined(); + * final SegmentAllocator slicingAllocator; + * + * SlicingArena(long size) { + * slicingAllocator = SegmentAllocator.slicingAllocator(arena.allocate(size)); + * } + * + * public void allocate(long byteSize, long byteAlignment) { + * return slicingAllocator.allocate(byteSize, byteAlignment); + * } + * + * public MemorySegment.Scope scope() { + * return arena.scope(); + * } + * + * public void close() { + * return arena.close(); + * } + * } + * } + * + * In other words, a slicing arena provides a vastly more efficient and scalable allocation strategy, while still retaining + * the timely deallocation guarantee provided by the underlying confined arena: + * + * {@snippet lang = java: + * try (Arena slicingArena = new SlicingArena(1000)) { + * for (int i = 0 ; i < 10 ; i++) { + * MemorySegment s = slicingArena.allocateArray(JAVA_INT, 1, 2, 3, 4, 5); + * ... + * } + * } // all memory allocated is released here + * } + * + * @implSpec + * Implementations of this interface are thread-safe. + * + * @see MemorySegment * * @since 20 */ @PreviewFeature(feature=PreviewFeature.Feature.FOREIGN) public interface Arena extends SegmentAllocator, AutoCloseable { + /** + * Creates a new arena that is managed, automatically, by the garbage collector. + * Segments obtained with the returned arena can be + * {@linkplain MemorySegment#isAccessibleBy(Thread) accessed} by any thread. + * Calling {@link #close()} on the returned arena will result in an {@link UnsupportedOperationException}. + * + * @return a new arena that is managed, automatically, by the garbage collector. + */ + static Arena ofAuto() { + return MemorySessionImpl.createImplicit(CleanerFactory.cleaner()).asArena(); + } + + /** + * Obtains the global arena. Segments obtained with the global arena can be + * {@linkplain MemorySegment#isAccessibleBy(Thread) accessed} by any thread. + * Calling {@link #close()} on the returned arena will result in an {@link UnsupportedOperationException}. + * + * @return the global arena. + */ + static Arena global() { + class Holder { + static final Arena GLOBAL = MemorySessionImpl.GLOBAL.asArena(); + } + return Holder.GLOBAL; + } + + /** + * {@return a new confined arena, owned by the current thread} + */ + static Arena ofConfined() { + return MemorySessionImpl.createConfined(Thread.currentThread()).asArena(); + } + + /** + * {@return a new shared arena} + */ + static Arena ofShared() { + return MemorySessionImpl.createShared().asArena(); + } + /** * Returns a native memory segment with the given size (in bytes) and alignment constraint (in bytes). - * The returned segment is associated with the arena scope. + * The returned segment is associated with this {@linkplain #scope() arena scope}. * The segment's {@link MemorySegment#address() address} is the starting address of the * allocated off-heap memory region backing the segment, and the address is * aligned according the provided alignment constraint. * * @implSpec - * The default implementation of this method is equivalent to the following code: - * {@snippet lang = java: - * MemorySegment.allocateNative(bytesSize, byteAlignment, scope()); - *} - * More generally implementations of this method must return a native segment featuring the requested size, + * Implementations of this method must return a native segment featuring the requested size, * and that is compatible with the provided alignment constraint. Furthermore, for any two segments * {@code S1, S2} returned by this method, the following invariant must hold: * @@ -110,57 +261,43 @@ public interface Arena extends SegmentAllocator, AutoCloseable { * @return a new native memory segment. * @throws IllegalArgumentException if {@code bytesSize < 0}, {@code alignmentBytes <= 0}, or if {@code alignmentBytes} * is not a power of 2. - * @throws IllegalStateException if the arena has already been {@linkplain #close() closed}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. - * @see MemorySegment#allocateNative(long, long, SegmentScope) + * @throws IllegalStateException if this arena has already been {@linkplain #close() closed}. + * @throws WrongThreadException if this arena is confined, and this method is called from a thread {@code T} + * other than the arena owner thread. */ @Override default MemorySegment allocate(long byteSize, long byteAlignment) { - return MemorySegment.allocateNative(byteSize, byteAlignment, scope()); + return ((MemorySessionImpl)scope()).allocate(byteSize, byteAlignment); } /** * {@return the arena scope} */ - SegmentScope scope(); + Scope scope(); /** - * Closes this arena. If this method completes normally, the arena scope is no longer {@linkplain SegmentScope#isAlive() alive}, + * Closes this arena. If this method completes normally, the arena scope is no longer {@linkplain Scope#isAlive() alive}, * and all the memory segments associated with it can no longer be accessed. Furthermore, any off-heap region of memory backing the - * segments associated with that scope are also released. + * segments obtained from this arena are also released. * * @apiNote This operation is not idempotent; that is, closing an already closed arena always results in an * exception being thrown. This reflects a deliberate design choice: failure to close an arena might reveal a bug * in the underlying application logic. * - * @see SegmentScope#isAlive() + * @implSpec If this method completes normally, then {@code this.scope().isAlive() == false}. + * Implementations are allowed to throw {@link UnsupportedOperationException} if an explicit close operation is + * not supported. + * + * @see Scope#isAlive() * * @throws IllegalStateException if the arena has already been closed. - * @throws IllegalStateException if the arena scope is {@linkplain SegmentScope#whileAlive(Runnable) kept alive}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code isCloseableBy(T) == false}. + * @throws IllegalStateException if a segment associated with this arena is being accessed concurrently, e.g. + * by a {@linkplain Linker#downcallHandle(FunctionDescriptor, Linker.Option...) downcall method handle}. + * @throws WrongThreadException if this arena is confined, and this method is called from a thread {@code T} + * other than the arena owner thread. + * @throws UnsupportedOperationException if this arena does not support explicit closure. */ @Override void close(); - /** - * {@return {@code true} if the provided thread can close this arena} - * @param thread the thread to be tested. - */ - boolean isCloseableBy(Thread thread); - - /** - * {@return a new confined arena, owned by the current thread} - */ - static Arena openConfined() { - return MemorySessionImpl.createConfined(Thread.currentThread()).asArena(); - } - - /** - * {@return a new shared arena} - */ - static Arena openShared() { - return MemorySessionImpl.createShared().asArena(); - } } diff --git a/src/java.base/share/classes/java/lang/foreign/FunctionDescriptor.java b/src/java.base/share/classes/java/lang/foreign/FunctionDescriptor.java index 3b6627d7f92..27759a78845 100644 --- a/src/java.base/share/classes/java/lang/foreign/FunctionDescriptor.java +++ b/src/java.base/share/classes/java/lang/foreign/FunctionDescriptor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ import jdk.internal.javac.PreviewFeature; * A function descriptor models the signature of foreign functions. A function descriptor is made up of zero or more * argument layouts and zero or one return layout. A function descriptor is typically used when creating * {@linkplain Linker#downcallHandle(MemorySegment, FunctionDescriptor, Linker.Option...) downcall method handles} or - * {@linkplain Linker#upcallStub(MethodHandle, FunctionDescriptor, SegmentScope) upcall stubs}. + * {@linkplain Linker#upcallStub(MethodHandle, FunctionDescriptor, Arena, Linker.Option...) upcall stubs}. * * @implSpec * Implementing classes are immutable, thread-safe and value-based. diff --git a/src/java.base/share/classes/java/lang/foreign/GroupLayout.java b/src/java.base/share/classes/java/lang/foreign/GroupLayout.java index 56ace4ee135..872f4923992 100644 --- a/src/java.base/share/classes/java/lang/foreign/GroupLayout.java +++ b/src/java.base/share/classes/java/lang/foreign/GroupLayout.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,5 +63,14 @@ public sealed interface GroupLayout extends MemoryLayout permits StructLayout, U * {@inheritDoc} */ @Override + GroupLayout withoutName(); + + /** + * {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + * @throws IllegalArgumentException if {@code bitAlignment} is less than {@code M}, where {@code M} is the maximum alignment + * constraint in any of the member layouts associated with this group layout. + */ + @Override GroupLayout withBitAlignment(long bitAlignment); } diff --git a/src/java.base/share/classes/java/lang/foreign/Linker.java b/src/java.base/share/classes/java/lang/foreign/Linker.java index 12ba7f6ce5f..71ef4200eeb 100644 --- a/src/java.base/share/classes/java/lang/foreign/Linker.java +++ b/src/java.base/share/classes/java/lang/foreign/Linker.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,11 @@ import jdk.internal.reflect.CallerSensitive; import jdk.internal.reflect.Reflection; import java.lang.invoke.MethodHandle; -import java.util.Arrays; +import java.nio.ByteOrder; +import java.util.Objects; +import java.util.Optional; import java.util.Set; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -54,46 +57,351 @@ import java.util.stream.Stream; *
  • A linker allows Java code to link against foreign functions, via * {@linkplain #downcallHandle(MemorySegment, FunctionDescriptor, Option...) downcall method handles}; and
  • *
  • A linker allows foreign functions to call Java method handles, - * via the generation of {@linkplain #upcallStub(MethodHandle, FunctionDescriptor, SegmentScope) upcall stubs}.
  • + * via the generation of {@linkplain #upcallStub(MethodHandle, FunctionDescriptor, Arena, Option...) upcall stubs}. * * In addition, a linker provides a way to look up foreign functions in libraries that conform to the ABI. Each linker * chooses a set of libraries that are commonly used on the OS and processor combination associated with the ABI. * For example, a linker for Linux/x64 might choose two libraries: {@code libc} and {@code libm}. The functions in these * libraries are exposed via a {@linkplain #defaultLookup() symbol lookup}. - *

    - * The {@link #nativeLinker()} method provides a linker for the ABI associated with the OS and processor where the Java runtime - * is currently executing. This linker also provides access, via its {@linkplain #defaultLookup() default lookup}, - * to the native libraries loaded with the Java runtime. * - *

    Downcall method handles

    + *

    Calling native functions

    * - * {@linkplain #downcallHandle(FunctionDescriptor, Option...) Linking a foreign function} is a process which requires a function descriptor, - * a set of memory layouts which, together, specify the signature of the foreign function to be linked, and returns, - * when complete, a downcall method handle, that is, a method handle that can be used to invoke the target foreign function. - *

    - * The Java {@linkplain java.lang.invoke.MethodType method type} associated with the returned method handle is - * {@linkplain FunctionDescriptor#toMethodType() derived} from the argument and return layouts in the function descriptor. - * The downcall method handle type, might then be decorated by additional leading parameters, in the given order if both are present: - *

      - *
    • If the downcall method handle is created {@linkplain #downcallHandle(FunctionDescriptor, Option...) without specifying a target address}, - * the downcall method handle type features a leading parameter of type {@link MemorySegment}, from which the - * address of the target foreign function can be derived.
    • - *
    • If the function descriptor's return layout is a group layout, the resulting downcall method handle accepts - * an additional leading parameter of type {@link SegmentAllocator}, which is used by the linker runtime to allocate the - * memory region associated with the struct returned by the downcall method handle.
    • - *
    + * The {@linkplain #nativeLinker() native linker} can be used to link against functions + * defined in C libraries (native functions). Suppose we wish to downcall from Java to the {@code strlen} function + * defined in the standard C library: + * {@snippet lang = c: + * size_t strlen(const char *s); + * } + * A downcall method handle that exposes {@code strlen} is obtained, using the native linker, as follows: * - *

    Upcall stubs

    + * {@snippet lang = java: + * Linker linker = Linker.nativeLinker(); + * MethodHandle strlen = linker.downcallHandle( + * linker.defaultLookup().find("strlen").get(), + * FunctionDescriptor.of(JAVA_LONG, ADDRESS) + * ); + * } * - * {@linkplain #upcallStub(MethodHandle, FunctionDescriptor, SegmentScope) Creating an upcall stub} requires a method - * handle and a function descriptor; in this case, the set of memory layouts in the function descriptor - * specify the signature of the function pointer associated with the upcall stub. + * Note how the native linker also provides access, via its {@linkplain #defaultLookup() default lookup}, + * to the native functions defined by the C libraries loaded with the Java runtime. Above, the default lookup + * is used to search the address of the {@code strlen} native function. That address is then passed, along with + * a platform-dependent description of the signature of the function expressed as a + * {@link FunctionDescriptor} (more on that below) to the native linker's + * {@link #downcallHandle(MemorySegment, FunctionDescriptor, Option...)} method. + * The obtained downcall method handle is then invoked as follows: + * + * {@snippet lang = java: + * try (Arena arena = Arena.openConfined()) { + * MemorySegment str = arena.allocateUtf8String("Hello"); + * long len = strlen.invoke(str); // 5 + * } + * } + *

    Describing C signatures

    + * + * When interacting with the native linker, clients must provide a platform-dependent description of the signature + * of the C function they wish to link against. This description, a {@link FunctionDescriptor function descriptor}, + * defines the layouts associated with the parameter types and return type (if any) of the C function. *

    - * The type of the provided method handle's type has to match the method type associated with the upcall stub, - * which is {@linkplain FunctionDescriptor#toMethodType() derived} from the provided function descriptor. + * Scalar C types such as {@code bool}, {@code int} are modelled as {@linkplain ValueLayout value layouts} + * of a suitable carrier. The mapping between a scalar type and its corresponding layout is dependent on the ABI + * implemented by the native linker. For instance, the C type {@code long} maps to the layout constant + * {@link ValueLayout#JAVA_LONG} on Linux/x64, but maps to the layout constant {@link ValueLayout#JAVA_INT} on + * Windows/x64. Similarly, the C type {@code size_t} maps to the layout constant {@link ValueLayout#JAVA_LONG} + * on 64-bit platforms, but maps to the layout constant {@link ValueLayout#JAVA_INT} on 32-bit platforms. *

    - * Upcall stubs are modelled by instances of type {@link MemorySegment}; upcall stubs can be passed by reference to other - * downcall method handles and, they are released via their associated {@linkplain SegmentScope scope}. + * Composite types are modelled as {@linkplain GroupLayout group layouts}. More specifically, a C {@code struct} type + * maps to a {@linkplain StructLayout struct layout}, whereas a C {@code union} type maps to a {@link UnionLayout union + * layout}. When defining a struct or union layout, clients must pay attention to the size and alignment constraint + * of the corresponding composite type definition in C. For instance, padding between two struct fields + * must be modelled explicitly, by adding an adequately sized {@linkplain PaddingLayout padding layout} member + * to the resulting struct layout. + *

    + * Finally, pointer types such as {@code int**} and {@code int(*)(size_t*, size_t*)} are modelled as + * {@linkplain AddressLayout address layouts}. When the spatial bounds of the pointer type are known statically, + * the address layout can be associated with a {@linkplain AddressLayout#targetLayout() target layout}. For instance, + * a pointer that is known to point to a C {@code int[2]} array can be modelled as an address layout whose + * target layout is a sequence layout whose element count is 2, and whose element type is {@link ValueLayout#JAVA_INT}. + *

    + * The following table shows some examples of how C types are modelled in Linux/x64: + * + *

    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Mapping C types
    C typeLayoutJava type
    {@code bool}{@link ValueLayout#JAVA_BOOLEAN}{@code boolean}
    {@code char}{@link ValueLayout#JAVA_BYTE}{@code byte}
    {@code short}{@link ValueLayout#JAVA_SHORT}{@code short}
    {@code int}{@link ValueLayout#JAVA_INT}{@code int}
    {@code long}{@link ValueLayout#JAVA_LONG}{@code long}
    {@code long long}{@link ValueLayout#JAVA_LONG}{@code long}
    {@code float}{@link ValueLayout#JAVA_FLOAT}{@code float}
    {@code double}{@link ValueLayout#JAVA_DOUBLE}{@code double}
    {@code size_t}{@link ValueLayout#JAVA_LONG}{@code long}
    {@code char*}, {@code int**}, {@code struct Point*}{@link ValueLayout#ADDRESS}{@link MemorySegment}
    {@code int (*ptr)[10]} + *
    + * ValueLayout.ADDRESS.withTargetLayout(
    + *     MemoryLayout.sequenceLayout(10,
    + *         ValueLayout.JAVA_INT)
    + * );
    + * 
    + *
    {@link MemorySegment}
    struct Point { int x; long y; }; + *
    + * MemoryLayout.structLayout(
    + *     ValueLayout.JAVA_INT.withName("x"),
    + *     MemoryLayout.paddingLayout(32),
    + *     ValueLayout.JAVA_LONG.withName("y")
    + * );
    + * 
    + *
    {@link MemorySegment}
    union Choice { float a; int b; } + *
    + * MemoryLayout.unionLayout(
    + *     ValueLayout.JAVA_FLOAT.withName("a"),
    + *     ValueLayout.JAVA_INT.withName("b")
    + * );
    + * 
    + *
    {@link MemorySegment}
    + *

    + * All the native linker implementations limit the function descriptors that they support to those that contain + * only so-called canonical layouts. A canonical layout has the following characteristics: + *

      + *
    1. Its alignment constraint is set to its natural alignment
    2. + *
    3. If it is a {@linkplain ValueLayout value layout}, its {@linkplain ValueLayout#order() byte order} is + * the {@linkplain ByteOrder#nativeOrder() native byte order}. + *
    4. If it is a {@linkplain GroupLayout group layout}, its size is a multiple of its alignment constraint, and
    5. + *
    6. It does not contain padding other than what is strictly required to align its non-padding layout elements, + * or to satisfy constraint 3
    7. + *
    + * + *

    Function pointers

    + * + * Sometimes, it is useful to pass Java code as a function pointer to some native function; this is achieved by using + * an {@linkplain #upcallStub(MethodHandle, FunctionDescriptor, Arena, Option...) upcall stub}. To demonstrate this, + * let's consider the following function from the C standard library: + * + * {@snippet lang = c: + * void qsort(void *base, size_t nmemb, size_t size, + * int (*compar)(const void *, const void *)); + * } + * + * The {@code qsort} function can be used to sort the contents of an array, using a custom comparator function which is + * passed as a function pointer (the {@code compar} parameter). To be able to call the {@code qsort} function from Java, + * we must first create a downcall method handle for it, as follows: + * + * {@snippet lang = java: + * Linker linker = Linker.nativeLinker(); + * MethodHandle qsort = linker.downcallHandle( + * linker.defaultLookup().find("qsort").get(), + * FunctionDescriptor.ofVoid(ADDRESS, JAVA_LONG, JAVA_LONG, ADDRESS) + * ); + * } + * + * As before, we use {@link ValueLayout#JAVA_LONG} to map the C type {@code size_t} type, and {@link ValueLayout#ADDRESS} + * for both the first pointer parameter (the array pointer) and the last parameter (the function pointer). + *

    + * To invoke the {@code qsort} downcall handle obtained above, we need a function pointer to be passed as the last + * parameter. That is, we need to create a function pointer out of an existing method handle. First, let's write a + * Java method that can compare two int elements passed as pointers (i.e. as {@linkplain MemorySegment memory segments}): + * + * {@snippet lang = java: + * class Qsort { + * static int qsortCompare(MemorySegment elem1, MemorySegmet elem2) { + * return Integer.compare(elem1.get(JAVA_INT, 0), elem2.get(JAVA_INT, 0)); + * } + * } + * } + * + * Now let's create a method handle for the comparator method defined above: + * + * {@snippet lang = java: + * FunctionDescriptor comparDesc = FunctionDescriptor.of(JAVA_INT, + * ADDRESS.withTargetLayout(JAVA_INT), + * ADDRESS.withTargetLayout(JAVA_INT)); + * MethodHandle comparHandle = MethodHandles.lookup() + * .findStatic(Qsort.class, "qsortCompare", + * comparDesc.toMethodType()); + * } + * + * First, we create a function descriptor for the function pointer type. Since we know that the parameters passed to + * the comparator method will be pointers to elements of a C {@code int[]} array, we can specify {@link ValueLayout#JAVA_INT} + * as the target layout for the address layouts of both parameters. This will allow the comparator method to access + * the contents of the array elements to be compared. We then {@linkplain FunctionDescriptor#toMethodType() turn} + * that function descriptor into a suitable {@linkplain java.lang.invoke.MethodType method type} which we then use to look up + * the comparator method handle. We can now create an upcall stub which points to that method, and pass it, as a function + * pointer, to the {@code qsort} downcall handle, as follows: + * + * {@snippet lang = java: + * try (Arena arena = Arena.ofConfined()) { + * MemorySegment comparFunc = linker.upcallStub(comparHandle, comparDesc, arena); + * MemorySegment array = session.allocateArray(0, 9, 3, 4, 6, 5, 1, 8, 2, 7); + * qsort.invokeExact(array, 10L, 4L, comparFunc); + * int[] sorted = array.toArray(JAVA_INT); // [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ] + * } + * } + * + * This code creates an off-heap array, copies the contents of a Java array into it, and then passes the array to the + * {@code qsort} method handle along with the comparator function we obtained from the native linker. After the invocation, the contents + * of the off-heap array will be sorted according to our comparator function, written in Java. We then extract a + * new Java array from the segment, which contains the sorted elements. + * + *

    Functions returning pointers

    + * + * When interacting with native functions, it is common for those functions to allocate a region of memory and return + * a pointer to that region. Let's consider the following function from the C standard library: + * + * {@snippet lang = c: + * void *malloc(size_t size); + * } + * + * The {@code malloc} function allocates a region of memory of given size, + * and returns a pointer to that region of memory, which is later deallocated using another function from + * the C standard library: + * + * {@snippet lang = c: + * void free(void *ptr); + * } + * + * The {@code free} function takes a pointer to a region of memory and deallocates that region. In this section we + * will show how to interact with these native functions, with the aim of providing a safe allocation + * API (the approach outlined below can of course be generalized to allocation functions other than {@code malloc} + * and {@code free}). + *

    + * First, we need to create the downcall method handles for {@code malloc} and {@code free}, as follows: + * + * {@snippet lang = java: + * Linker linker = Linker.nativeLinker(); + * + * MethodHandle malloc = linker.downcallHandle( + * linker.defaultLookup().find("malloc").get(), + * FunctionDescriptor.of(ADDRESS, JAVA_LONG) + * ); + * + * MethodHandle free = linker.downcallHandle( + * linker.defaultLookup().find("free").get(), + * FunctionDescriptor.ofVoid(ADDRESS) + * ); + * } + * + * When interacting with a native functions returning a pointer (such as {@code malloc}), the Java runtime has no insight + * into the size or the lifetime of the returned pointer. Consider the following code: + * + * {@snippet lang = java: + * MemorySegment segment = (MemorySegment)malloc.invokeExact(100); + * } + * + * The size of the segment returned by the {@code malloc} downcall method handle is + * zero. Moreover, the scope of the + * returned segment is a fresh scope that is always alive. To provide safe access to the segment, we must, + * unsafely, resize the segment to the desired size (100, in this case). It might also be desirable to + * attach the segment to some existing {@linkplain Arena arena}, so that the lifetime of the region of memory + * backing the segment can be managed automatically, as for any other native segment created directly from Java code. + * Both these operations are accomplished using the restricted {@link MemorySegment#reinterpret(long, Arena, Consumer)} + * method, as follows: + * + * {@snippet lang = java: + * MemorySegment allocateMemory(long byteSize, Arena arena) { + * MemorySegment segment = (MemorySegment)malloc.invokeExact(byteSize); // size = 0, scope = always alive + * return segment.reinterpret(byteSize, arena, s -> free.invokeExact(s)); // size = byteSize, scope = arena.scope() + * } + * } + * + * The {@code allocateMemory} method defined above accepts two parameters: a size and an arena. The method calls the + * {@code malloc} downcall method handle, and unsafely reinterprets the returned segment, by giving it a new size + * (the size passed to the {@code allocateMemory} method) and a new scope (the scope of the provided arena). + * The method also specifies a cleanup action to be executed when the provided arena is closed. Unsurprisingly, + * the cleanup action passes the segment to the {@code free} downcall method handle, to deallocate the underlying + * region of memory. We can use the {@code allocateMemory} method as follows: + * + * {@snippet lang = java: + * try (Arena arena = Arena.ofConfined()) { + * MemorySegment segment = allocateMemory(100, arena); + * } // 'free' called here + * } + * + * Note how the segment obtained from {@code allocateMemory} acts as any other segment managed by the confined arena. More + * specifically, the obtained segment has the desired size, can only be accessed by a single thread (the thread which created + * the confined arena), and its lifetime is tied to the surrounding try-with-resources block. + * + *

    Variadic functions

    + * + * Variadic functions (e.g. a C function declared with a trailing ellipses {@code ...} at the end of the formal parameter + * list or with an empty formal parameter list) are not supported directly by the native linker. However, it is still possible + * to link a variadic function by using a specialized function descriptor, together with a + * {@linkplain Linker.Option#firstVariadicArg(int) a linker option} which indicates the position of the first variadic argument + * in that specialized descriptor. + *

    + * A well-known variadic function is the {@code printf} function, defined in the C standard library: + * + * {@snippet lang = c: + * int printf(const char *format, ...); + * } + * + * This function takes a format string, and a number of additional arguments (the number of such arguments is + * dictated by the format string). Consider the following variadic call: + * + * {@snippet lang = c: + * printf("%d plus %d equals %d", 2, 2, 4); + * } + * + * To perform an equivalent call using a downcall method handle we must create a function descriptor which + * describes the specialized signature of the C function we want to call. This descriptor must include layouts for any + * additional variadic argument we intend to provide. In this case, the specialized signature of the C + * function is {@code (char*, int, int, int)} as the format string accepts three integer parameters. Then, we need to use + * a linker option to specify the position of the first variadic layout in the provided function descriptor (starting from 0). + * In this case, since the first parameter is the format string (a non-variadic argument), the first variadic index + * needs to be set to 1, as follows: + * + * {@snippet lang = java: + * Linker linker = Linker.nativeLinker(); + * MethodHandle printf = linker.downcallHandle( + * linker.defaultLookup().lookup("printf").get(), + * FunctionDescriptor.of(JAVA_INT, ADDRESS, JAVA_INT, JAVA_INT, JAVA_INT), + * Linker.Option.firstVariadicArg(1) // first int is variadic + * ); + * } + * + * We can then call the specialized downcall handle as usual: + * + * {@snippet lang = java: + * try (Arena arena = Arena.ofConfined()) { + * int res = (int)printf.invokeExact(arena.allocateUtf8String("%d plus %d equals %d"), 2, 2, 4); //prints "2 plus 2 equals 4" + * } + * } * *

    Safety considerations

    * @@ -101,21 +409,7 @@ import java.util.stream.Stream; * contain enough signature information (e.g. arity and types of foreign function parameters). As a consequence, * the linker runtime cannot validate linkage requests. When a client interacts with a downcall method handle obtained * through an invalid linkage request (e.g. by specifying a function descriptor featuring too many argument layouts), - * the result of such interaction is unspecified and can lead to JVM crashes. On downcall handle invocation, - * the linker runtime guarantees the following for any argument {@code A} of type {@link MemorySegment} whose corresponding - * layout is {@link ValueLayout#ADDRESS}: - *
      - *
    • The scope of {@code A} is {@linkplain SegmentScope#isAlive() alive}. Otherwise, the invocation throws - * {@link IllegalStateException};
    • - *
    • The invocation occurs in a thread {@code T} such that {@code A.scope().isAccessibleBy(T) == true}. - * Otherwise, the invocation throws {@link WrongThreadException}; and
    • - *
    • The scope of {@code A} is {@linkplain SegmentScope#whileAlive(Runnable) kept alive} during the invocation.
    • - *
    - * A downcall method handle created from a function descriptor whose return layout is an - * {@linkplain ValueLayout.OfAddress address layout} returns a native segment associated with - * the {@linkplain SegmentScope#global() global scope}. Under normal conditions, the size of the returned segment is {@code 0}. - * However, if the return layout is an {@linkplain ValueLayout.OfAddress#asUnbounded() unbounded} address layout, - * then the size of the returned segment is {@code Long.MAX_VALUE}. + * the result of such interaction is unspecified and can lead to JVM crashes. *

    * When creating upcall stubs the linker runtime validates the type of the target method handle against the provided * function descriptor and report an error if any mismatch is detected. As for downcalls, JVM crashes might occur, @@ -124,12 +418,6 @@ import java.util.stream.Stream; * handle associated with an upcall stub returns a {@linkplain MemorySegment memory segment}, clients must ensure * that this address cannot become invalid after the upcall completes. This can lead to unspecified behavior, * and even JVM crashes, since an upcall is typically executed in the context of a downcall method handle invocation. - *

    - * An upcall stub argument whose corresponding layout is an {@linkplain ValueLayout.OfAddress address layout} - * is a native segment associated with the {@linkplain SegmentScope#global() global scope}. - * Under normal conditions, the size of this segment argument is {@code 0}. However, if the layout associated with - * the upcall stub argument is an {@linkplain ValueLayout.OfAddress#asUnbounded() unbounded} address layout, - * then the size of the segment argument is {@code Long.MAX_VALUE}. * * @implSpec * Implementations of this interface are immutable, thread-safe and value-based. @@ -143,31 +431,6 @@ public sealed interface Linker permits AbstractLinker { * Returns a linker for the ABI associated with the underlying native platform. The underlying native platform * is the combination of OS and processor where the Java runtime is currently executing. *

    - * When interacting with the returned linker, clients must describe the signature of a foreign function using a - * {@link FunctionDescriptor function descriptor} whose argument and return layouts are specified as follows: - *

      - *
    • Scalar types are modelled by a {@linkplain ValueLayout value layout} instance of a suitable carrier. Example - * of scalar types in C are {@code int}, {@code long}, {@code size_t}, etc. The mapping between a scalar type - * and its corresponding layout is dependent on the ABI of the returned linker; - *
    • Composite types are modelled by a {@linkplain GroupLayout group layout}. Depending on the ABI of the - * returned linker, additional {@linkplain MemoryLayout#paddingLayout(long) padding} member layouts might be required to conform - * to the size and alignment constraint of a composite type definition in C (e.g. using {@code struct} or {@code union}); and
    • - *
    • Pointer types are modelled by a {@linkplain ValueLayout value layout} instance with carrier {@link MemorySegment}. - * Examples of pointer types in C are {@code int**} and {@code int(*)(size_t*, size_t*)};
    • - *
    - *

    - * Any layout not listed above is unsupported; function descriptors containing unsupported layouts - * will cause an {@link IllegalArgumentException} to be thrown, when used to create a - * {@link #downcallHandle(MemorySegment, FunctionDescriptor, Option...) downcall method handle} or an - * {@linkplain #upcallStub(MethodHandle, FunctionDescriptor, SegmentScope) upcall stub}. - *

    - * Variadic functions (e.g. a C function declared with a trailing ellipses {@code ...} at the end of the formal parameter - * list or with an empty formal parameter list) are not supported directly. However, it is possible to link a - * variadic function by using {@linkplain Linker.Option#firstVariadicArg(int) a linker option} to indicate - * the start of the list of variadic arguments, together with a specialized function descriptor describing a - * given variable arity callsite. Alternatively, where the foreign library allows it, clients might be able to - * interact with variadic functions by passing a trailing parameter of type {@link VaList} (e.g. as in {@code vsprintf}). - *

    * This method is restricted. * Restricted methods are unsafe, and, if used incorrectly, their use might crash * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on @@ -178,7 +441,7 @@ public sealed interface Linker permits AbstractLinker { * linker are the native libraries loaded in the process where the Java runtime is currently executing. For example, * on Linux, these libraries typically include {@code libc}, {@code libm} and {@code libdl}. * - * @return a linker for the ABI associated with the OS and processor where the Java runtime is currently executing. + * @return a linker for the ABI associated with the underlying native platform. * @throws UnsupportedOperationException if the underlying native platform is not supported. * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. */ @@ -189,11 +452,7 @@ public sealed interface Linker permits AbstractLinker { } /** - * Creates a method handle which can be used to call a foreign function with the given signature and address. - *

    - * If the provided method type's return type is {@code MemorySegment}, then the resulting method handle features - * an additional prefix parameter, of type {@link SegmentAllocator}, which will be used by the linker to allocate - * structs returned by-value. + * Creates a method handle which is used to call a foreign function with the given signature and address. *

    * Calling this method is equivalent to the following code: * {@snippet lang=java : @@ -214,17 +473,35 @@ public sealed interface Linker permits AbstractLinker { } /** - * Creates a method handle which can be used to call a foreign function with the given signature. - * The resulting method handle features a prefix parameter (as the first parameter) corresponding to the foreign function - * entry point, of type {@link MemorySegment}, which is used to specify the address of the target function - * to be called. + * Creates a method handle which is used to call a foreign function with the given signature. *

    - * If the provided function descriptor's return layout is a {@link GroupLayout}, then the resulting method handle features an - * additional prefix parameter (inserted immediately after the address parameter), of type {@link SegmentAllocator}), - * which will be used by the linker to allocate structs returned by-value. + * The Java {@linkplain java.lang.invoke.MethodType method type} associated with the returned method handle is + * {@linkplain FunctionDescriptor#toMethodType() derived} from the argument and return layouts in the function descriptor, + * but features an additional leading parameter of type {@link MemorySegment}, from which the address of the target + * foreign function is derived. Moreover, if the function descriptor's return layout is a group layout, the resulting + * downcall method handle accepts an additional leading parameter of type {@link SegmentAllocator}, which is used by + * the linker runtime to allocate the memory region associated with the struct returned by the downcall method handle. *

    - * The returned method handle will throw an {@link IllegalArgumentException} if the {@link MemorySegment} parameter passed to it is - * associated with the {@link MemorySegment#NULL} address, or a {@link NullPointerException} if that parameter is {@code null}. + * Upon invoking a downcall method handle, the linker runtime will guarantee the following for any argument + * {@code A} of type {@link MemorySegment} whose corresponding layout is an {@linkplain AddressLayout address layout}: + *

      + *
    • {@code A.scope().isAlive() == true}. Otherwise, the invocation throws {@link IllegalStateException};
    • + *
    • The invocation occurs in a thread {@code T} such that {@code A.isAccessibleBy(T) == true}. + * Otherwise, the invocation throws {@link WrongThreadException}; and
    • + *
    • {@code A} is kept alive during the invocation. For instance, if {@code A} has been obtained using a + * {@linkplain Arena#ofShared()} shared arena}, any attempt to {@linkplain Arena#close() close} + * the shared arena while the downcall method handle is executing will result in an {@link IllegalStateException}.
    • + *
    + *

    + * Moreover, if the provided function descriptor's return layout is an {@linkplain AddressLayout address layout}, + * invoking the returned method handle will return a native segment associated with + * a fresh scope that is always alive. Under normal conditions, the size of the returned segment is {@code 0}. + * However, if the function descriptor's return layout has a {@linkplain AddressLayout#targetLayout()} {@code T}, + * then the size of the returned segment is set to {@code T.byteSize()}. + *

    + * The returned method handle will throw an {@link IllegalArgumentException} if the {@link MemorySegment} + * representing the target address of the foreign function is the {@link MemorySegment#NULL} address. + * The returned method handle will additionally throw {@link NullPointerException} if any argument passed to it is {@code null}. * * @param function the function descriptor of the target function. * @param options any linker options. @@ -237,12 +514,19 @@ public sealed interface Linker permits AbstractLinker { /** * Creates a stub which can be passed to other foreign functions as a function pointer, associated with the given - * scope. Calling such a function pointer from foreign code will result in the execution of the provided + * arena. Calling such a function pointer from foreign code will result in the execution of the provided * method handle. *

    * The returned memory segment's address points to the newly allocated upcall stub, and is associated with - * the provided scope. As such, the corresponding upcall stub will be deallocated - * when the scope becomes not {@linkplain SegmentScope#isAlive() alive}. + * the provided arena. As such, the lifetime of the returned upcall stub segment is controlled by the + * provided arena. For instance, if the provided arena is a confined arena, the returned + * upcall stub segment will be deallocated when the provided confined arena is {@linkplain Arena#close() closed}. + *

    + * An upcall stub argument whose corresponding layout is an {@linkplain AddressLayout address layout} + * is a native segment associated with a fresh scope that is always alive. + * Under normal conditions, the size of this segment argument is {@code 0}. + * However, if the address layout has a {@linkplain AddressLayout#targetLayout()} {@code T}, then the size of the + * segment argument is set to {@code T.byteSize()}. *

    * The target method handle should not throw any exceptions. If the target method handle does throw an exception, * the VM will exit with a non-zero exit code. To avoid the VM aborting due to an uncaught exception, clients @@ -252,16 +536,17 @@ public sealed interface Linker permits AbstractLinker { * * @param target the target method handle. * @param function the upcall stub function descriptor. - * @param scope the scope associated with the returned upcall stub segment. + * @param arena the arena associated with the returned upcall stub segment. + * @param options any linker options. * @return a zero-length segment whose address is the address of the upcall stub. * @throws IllegalArgumentException if the provided function descriptor is not supported by this linker. * @throws IllegalArgumentException if it is determined that the target method handle can throw an exception, or if the target method handle * has a type that does not match the upcall stub inferred type. - * @throws IllegalStateException if {@code scope} is not {@linkplain SegmentScope#isAlive() alive}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope.isAccessibleBy(T) == false}. + * @throws IllegalStateException if {@code arena.scope().isAlive() == false} + * @throws WrongThreadException if {@code arena} is a confined arena, and this method is called from a + * thread {@code T}, other than the arena's owner thread. */ - MemorySegment upcallStub(MethodHandle target, FunctionDescriptor function, SegmentScope scope); + MemorySegment upcallStub(MethodHandle target, FunctionDescriptor function, Arena arena, Linker.Option... options); /** * Returns a symbol lookup for symbols in a set of commonly used libraries. @@ -285,8 +570,7 @@ public sealed interface Linker permits AbstractLinker { */ @PreviewFeature(feature=PreviewFeature.Feature.FOREIGN) sealed interface Option - permits LinkerOptions.LinkerOptionImpl, - Option.CaptureCallState { + permits LinkerOptions.LinkerOptionImpl { /** * {@return a linker option used to denote the index of the first variadic argument layout in a @@ -302,70 +586,91 @@ public sealed interface Linker permits AbstractLinker { * calling a foreign function associated with a downcall method handle, * before it can be overwritten by the Java runtime, or read through conventional means} *

    - * A downcall method handle linked with this option will feature an additional {@link MemorySegment} - * parameter directly following the target address, and optional {@link SegmentAllocator} parameters. - * This memory segment must be a native segment into which the captured state is written. - * - * @param capturedState the names of the values to save. - * @see CaptureCallState#supported() - */ - static CaptureCallState captureCallState(String... capturedState) { - Set set = Stream.of(capturedState) - .map(CapturableState::forName) - .collect(Collectors.toSet()); - return new LinkerOptions.CaptureCallStateImpl(set); - } - - /** - * A linker option for saving portions of the execution state immediately - * after calling a foreign function associated with a downcall method handle, - * before it can be overwritten by the runtime, or read through conventional means. - *

    * Execution state is captured by a downcall method handle on invocation, by writing it * to a native segment provided by the user to the downcall method handle. - * For this purpose, a downcall method handle linked with the {@link #captureCallState(String[])} + * For this purpose, a downcall method handle linked with this * option will feature an additional {@link MemorySegment} parameter directly * following the target address, and optional {@link SegmentAllocator} parameters. - * This parameter represents the native segment into which the captured state is written. + * This parameter, called the 'capture state segment', represents the native segment into which + * the captured state is written. *

    - * The native segment should have the layout {@linkplain CaptureCallState#layout associated} - * with the particular {@code CaptureCallState} instance used to link the downcall handle. + * The capture state segment should have the layout returned by {@linkplain #captureStateLayout}. + * This layout is a struct layout which has a named field for each captured value. *

    - * Captured state can be retrieved from this native segment by constructing var handles - * from the {@linkplain #layout layout} associated with the {@code CaptureCallState} instance. + * Captured state can be retrieved from the capture state segment by constructing var handles + * from the {@linkplain #captureStateLayout capture state layout}. *

    * The following example demonstrates the use of this linker option: * {@snippet lang = "java": * MemorySegment targetAddress = ... - * CaptureCallState ccs = Linker.Option.captureCallState("errno"); + * Linker.Option ccs = Linker.Option.captureCallState("errno"); * MethodHandle handle = Linker.nativeLinker().downcallHandle(targetAddress, FunctionDescriptor.ofVoid(), ccs); * - * VarHandle errnoHandle = ccs.layout().varHandle(PathElement.groupElement("errno")); - * try (Arena arena = Arena.openConfined()) { - * MemorySegment capturedState = arena.allocate(ccs.layout()); + * StructLayout capturedStateLayout = Linker.Option.capturedStateLayout(); + * VarHandle errnoHandle = capturedStateLayout.varHandle(PathElement.groupElement("errno")); + * try (Arena arena = Arena.ofConfined()) { + * MemorySegment capturedState = arena.allocate(capturedStateLayout); * handle.invoke(capturedState); * int errno = errnoHandle.get(capturedState); * // use errno * } * } + * + * @param capturedState the names of the values to save. + * @throws IllegalArgumentException if at least one of the provided {@code capturedState} names + * is unsupported on the current platform. + * @see #captureStateLayout() */ - @PreviewFeature(feature=PreviewFeature.Feature.FOREIGN) - sealed interface CaptureCallState extends Option - permits LinkerOptions.CaptureCallStateImpl { - /** - * {@return A struct layout that represents the layout of the native segment passed - * to a downcall handle linked with this {@code CapturedCallState} instance} - */ - StructLayout layout(); + static Option captureCallState(String... capturedState) { + Set set = Stream.of(Objects.requireNonNull(capturedState)) + .map(Objects::requireNonNull) + .map(CapturableState::forName) + .collect(Collectors.toSet()); + return new LinkerOptions.CaptureCallState(set); + } - /** - * {@return the names of the state that can be capture by this implementation} - */ - static Set supported() { - return Arrays.stream(CapturableState.values()) - .map(CapturableState::stateName) - .collect(Collectors.toSet()); - } + /** + * {@return A struct layout that represents the layout of the capture state segment that is passed + * to a downcall handle linked with {@link #captureCallState(String...)}}. + *

    + * The capture state layout is platform dependent but is guaranteed to be + * a {@linkplain StructLayout struct layout} containing only {@linkplain ValueLayout value layouts} + * and possibly {@linkplain PaddingLayout padding layouts}. + * As an example, on Windows, the returned layout might contain three value layouts named: + *

      + *
    • GetLastError
    • + *
    • WSAGetLastError
    • + *
    • errno
    • + *
    + * The following snipet shows how to obtain the names of the supported captured value layouts: + * {@snippet lang = java: + * String capturedNames = Linker.Option.captureStateLayout().memberLayouts().stream() + * .map(MemoryLayout::name) + * .flatMap(Optional::stream) + * .map(Objects::toString) + * .collect(Collectors.joining(", ")); + * } + * + * @see #captureCallState(String...) + */ + static StructLayout captureStateLayout() { + return CapturableState.LAYOUT; + } + + /** + * {@return A linker option used to mark a foreign function as trivial} + *

    + * A trivial function is a function that has an extremely short running time + * in all cases (similar to calling an empty function), and does not call back into Java (e.g. using an upcall stub). + *

    + * Using this linker option is a hint which some implementations may use to apply + * optimizations that are only valid for trivial functions. + *

    + * Using this linker option when linking non trivial functions is likely to have adverse effects, + * such as loss of performance, or JVM crashes. + */ + static Option isTrivial() { + return LinkerOptions.IsTrivial.INSTANCE; } } } diff --git a/src/java.base/share/classes/java/lang/foreign/MemoryLayout.java b/src/java.base/share/classes/java/lang/foreign/MemoryLayout.java index 8d56202a04a..eaac5cb936d 100644 --- a/src/java.base/share/classes/java/lang/foreign/MemoryLayout.java +++ b/src/java.base/share/classes/java/lang/foreign/MemoryLayout.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,6 @@ package java.lang.foreign; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.VarHandle; -import java.nio.ByteOrder; import java.util.EnumSet; import java.util.Objects; import java.util.Optional; @@ -36,6 +35,7 @@ import java.util.Set; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Stream; + import jdk.internal.foreign.LayoutPath; import jdk.internal.foreign.LayoutPath.PathElementImpl.PathKind; import jdk.internal.foreign.Utils; @@ -44,7 +44,6 @@ import jdk.internal.foreign.layout.PaddingLayoutImpl; import jdk.internal.foreign.layout.SequenceLayoutImpl; import jdk.internal.foreign.layout.StructLayoutImpl; import jdk.internal.foreign.layout.UnionLayoutImpl; -import jdk.internal.foreign.layout.ValueLayouts; import jdk.internal.javac.PreviewFeature; /** @@ -200,6 +199,17 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin */ MemoryLayout withName(String name); + /** + * Returns a memory layout of the same type with the same size and alignment constraint as this layout, + * but without a name. + *

    + * This can be useful to compare two layouts that have different names, but are otherwise equal. + * + * @return a memory layout without a name. + * @see MemoryLayout#name() + */ + MemoryLayout withoutName(); + /** * Returns the alignment constraint associated with this layout, expressed in bits. Layout alignment defines a power * of two {@code A} which is the bit-wise alignment of the layout. If {@code A <= 8} then {@code A/8} is the number of @@ -235,10 +245,7 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @return the layout alignment constraint, in bytes. * @throws UnsupportedOperationException if {@code bitAlignment()} is not a multiple of 8. */ - default long byteAlignment() { - return Utils.bitsToBytesOrThrow(bitAlignment(), - () -> new UnsupportedOperationException("Cannot compute byte alignment; bit alignment is not a multiple of 8")); - } + long byteAlignment(); /** * Returns a memory layout of the same type with the same size and name as this layout, @@ -259,12 +266,14 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @throws IllegalArgumentException if the layout path does not select any layout nested in this layout, or if the * layout path contains one or more path elements that select multiple sequence element indices * (see {@link PathElement#sequenceElement()} and {@link PathElement#sequenceElement(long, long)}). + * @throws IllegalArgumentException if the layout path contains one or more dereference path elements + * (see {@link PathElement#dereferenceElement()}). * @throws NullPointerException if either {@code elements == null}, or if any of the elements * in {@code elements} is {@code null}. */ default long bitOffset(PathElement... elements) { return computePathOp(LayoutPath.rootPath(this), LayoutPath::offset, - EnumSet.of(PathKind.SEQUENCE_ELEMENT, PathKind.SEQUENCE_RANGE), elements); + EnumSet.of(PathKind.SEQUENCE_ELEMENT, PathKind.SEQUENCE_RANGE, PathKind.DEREF_ELEMENT), elements); } /** @@ -293,10 +302,12 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * specified by the given layout path elements, when supplied with the missing sequence element indices. * @throws IllegalArgumentException if the layout path contains one or more path elements that select * multiple sequence element indices (see {@link PathElement#sequenceElement(long, long)}). + * @throws IllegalArgumentException if the layout path contains one or more dereference path elements + * (see {@link PathElement#dereferenceElement()}). */ default MethodHandle bitOffsetHandle(PathElement... elements) { return computePathOp(LayoutPath.rootPath(this), LayoutPath::offsetHandle, - EnumSet.of(PathKind.SEQUENCE_RANGE), elements); + EnumSet.of(PathKind.SEQUENCE_RANGE, PathKind.DEREF_ELEMENT), elements); } /** @@ -308,12 +319,14 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @throws IllegalArgumentException if the layout path does not select any layout nested in this layout, or if the * layout path contains one or more path elements that select multiple sequence element indices * (see {@link PathElement#sequenceElement()} and {@link PathElement#sequenceElement(long, long)}). + * @throws IllegalArgumentException if the layout path contains one or more dereference path elements + * (see {@link PathElement#dereferenceElement()}). * @throws UnsupportedOperationException if {@code bitOffset(elements)} is not a multiple of 8. * @throws NullPointerException if either {@code elements == null}, or if any of the elements * in {@code elements} is {@code null}. */ default long byteOffset(PathElement... elements) { - return Utils.bitsToBytesOrThrow(bitOffset(elements), Utils.BITS_TO_BYTES_THROW_OFFSET); + return Utils.bitsToBytes(bitOffset(elements)); } /** @@ -346,10 +359,12 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * specified by the given layout path elements, when supplied with the missing sequence element indices. * @throws IllegalArgumentException if the layout path contains one or more path elements that select * multiple sequence element indices (see {@link PathElement#sequenceElement(long, long)}). + * @throws IllegalArgumentException if the layout path contains one or more dereference path elements + * (see {@link PathElement#dereferenceElement()}). */ default MethodHandle byteOffsetHandle(PathElement... elements) { MethodHandle mh = bitOffsetHandle(elements); - mh = MethodHandles.filterReturnValue(mh, Utils.MH_BITS_TO_BYTES_OR_THROW_FOR_OFFSET); + mh = MethodHandles.filterReturnValue(mh, Utils.BITS_TO_BYTES); return mh; } @@ -379,6 +394,28 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin *

    * Additionally, the provided dynamic values must conform to some bound which is derived from the layout path, that is, * {@code 0 <= x_i < b_i}, where {@code 1 <= i <= n}, or {@link IndexOutOfBoundsException} is thrown. + *

    + * Multiple paths can be chained, by using {@linkplain PathElement#dereferenceElement() dereference path elements}. + * A dereference path element allows to obtain a native memory segment whose base address is the address obtained + * by following the layout path elements immediately preceding the dereference path element. In other words, + * if a layout path contains one or more dereference path elements, the final address accessed by the returned + * var handle can be computed as follows: + * + *

    {@code
    +     * address_1 = base(segment) + offset_1
    +     * address_2 = base(segment_1) + offset_2
    +     * ...
    +     * address_k = base(segment_k-1) + offset_k
    +     * }
    + * + * where {@code k} is the number of dereference path elements in a layout path, {@code segment} is the input segment, + * {@code segment_1}, ... {@code segment_k-1} are the segments obtained by dereferencing the address associated with + * a given dereference path element (e.g. {@code segment_1} is a native segment whose base address is {@code address_1}), + * and {@code offset_1}, {@code offset_2}, ... {@code offset_k} are the offsets computed by evaluating + * the path elements after a given dereference operation (these offsets are obtained using the computation described + * above). In these more complex access operations, all memory accesses immediately preceding a dereference operation + * (e.g. those at addresses {@code address_1}, {@code address_2}, ..., {@code address_k-1} are performed using the + * {@link VarHandle.AccessMode#GET} access mode. * * @apiNote the resulting var handle will feature an additional {@code long} access coordinate for every * unspecified sequence access component contained in this layout path. Moreover, the resulting var handle @@ -388,6 +425,8 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @return a var handle which can be used to access a memory segment at the (possibly nested) layout selected by the layout path in {@code elements}. * @throws UnsupportedOperationException if the layout path has one or more elements with incompatible alignment constraint. * @throws IllegalArgumentException if the layout path in {@code elements} does not select a value layout (see {@link ValueLayout}). + * @throws IllegalArgumentException if the layout path in {@code elements} contains a {@linkplain PathElement#dereferenceElement() + * dereference path element} for an address layout that has no {@linkplain AddressLayout#targetLayout() target layout}. * @see MethodHandles#memorySegmentViewVarHandle(ValueLayout) */ default VarHandle varHandle(PathElement... elements) { @@ -432,6 +471,8 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @param elements the layout path elements. * @return a method handle which can be used to create a slice of the selected layout element, given a segment. * @throws UnsupportedOperationException if the size of the selected layout in bits is not a multiple of 8. + * @throws IllegalArgumentException if the layout path contains one or more dereference path elements + * (see {@link PathElement#dereferenceElement()}). */ default MethodHandle sliceHandle(PathElement... elements) { return computePathOp(LayoutPath.rootPath(this), LayoutPath::sliceHandle, @@ -446,10 +487,12 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @throws IllegalArgumentException if the layout path does not select any layout nested in this layout, * or if the layout path contains one or more path elements that select one or more sequence element indices * (see {@link PathElement#sequenceElement(long)} and {@link PathElement#sequenceElement(long, long)}). + * @throws IllegalArgumentException if the layout path contains one or more dereference path elements + * (see {@link PathElement#dereferenceElement()}). */ default MemoryLayout select(PathElement... elements) { return computePathOp(LayoutPath.rootPath(this), LayoutPath::layout, - EnumSet.of(PathKind.SEQUENCE_ELEMENT_INDEX, PathKind.SEQUENCE_RANGE), elements); + EnumSet.of(PathKind.SEQUENCE_ELEMENT_INDEX, PathKind.SEQUENCE_RANGE, PathKind.DEREF_ELEMENT), elements); } private static Z computePathOp(LayoutPath path, Function finalizer, @@ -489,6 +532,7 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * * @implSpec in case multiple group elements with a matching name exist, the path element returned by this * method will select the first one; that is, the group element with the lowest offset from current path is selected. + * In such cases, using {@link #groupElement(long)} might be preferable. * * @param name the name of the group element to be selected. * @return a path element which selects the group element with the given name. @@ -499,6 +543,23 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin path -> path.groupElement(name)); } + /** + * Returns a path element which selects a member layout with the given index in a group layout. + * The path element returned by this method does not alter the number of free dimensions of any path + * that is combined with such element. + * + * @param index the index of the group element to be selected. + * @return a path element which selects the group element with the given index. + * @throws IllegalArgumentException if {@code index < 0}. + */ + static PathElement groupElement(long index) { + if (index < 0) { + throw new IllegalArgumentException("Index < 0"); + } + return new LayoutPath.PathElementImpl(PathKind.GROUP_ELEMENT, + path -> path.groupElement(index)); + } + /** * Returns a path element which selects the element layout at the specified position in a sequence layout. * The path element returned by this method does not alter the number of free dimensions of any path @@ -578,6 +639,21 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin return new LayoutPath.PathElementImpl(PathKind.SEQUENCE_ELEMENT, LayoutPath::sequenceElement); } + + /** + * Returns a path element which dereferences an address layout as its + * {@linkplain AddressLayout#targetLayout() target layout} (where set). + * The path element returned by this method does not alter the number of free dimensions of any path + * that is combined with such element. Using this path layout to dereference an address layout + * that has no target layout results in an {@link IllegalArgumentException} (e.g. when + * a var handle is {@linkplain #varHandle(PathElement...) obtained}). + * + * @return a path element which dereferences an address layout. + */ + static PathElement dereferenceElement() { + return new LayoutPath.PathElementImpl(PathKind.DEREF_ELEMENT, + LayoutPath::derefElement); + } } /** @@ -611,60 +687,14 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin String toString(); /** - * Creates a padding layout with the given size. + * Creates a padding layout with the given bitSize and a bit-alignment of eight. * - * @param size the padding size in bits. + * @param bitSize the padding size in bits. * @return the new selector layout. - * @throws IllegalArgumentException if {@code size <= 0}. + * @throws IllegalArgumentException if {@code bitSize <= 0} or {@code bitSize % 8 != 0} */ - static PaddingLayout paddingLayout(long size) { - MemoryLayoutUtil.checkSize(size); - return PaddingLayoutImpl.of(size); - } - - /** - * Creates a value layout of given Java carrier and byte order. The type of resulting value layout is determined - * by the carrier provided: - *
      - *
    • {@link ValueLayout.OfBoolean}, for {@code boolean.class}
    • - *
    • {@link ValueLayout.OfByte}, for {@code byte.class}
    • - *
    • {@link ValueLayout.OfShort}, for {@code short.class}
    • - *
    • {@link ValueLayout.OfChar}, for {@code char.class}
    • - *
    • {@link ValueLayout.OfInt}, for {@code int.class}
    • - *
    • {@link ValueLayout.OfFloat}, for {@code float.class}
    • - *
    • {@link ValueLayout.OfLong}, for {@code long.class}
    • - *
    • {@link ValueLayout.OfDouble}, for {@code double.class}
    • - *
    • {@link ValueLayout.OfAddress}, for {@code MemorySegment.class}
    • - *
    - * @param carrier the value layout carrier. - * @param order the value layout's byte order. - * @return a value layout with the given Java carrier and byte-order. - * @throws IllegalArgumentException if the carrier type is not supported. - */ - static ValueLayout valueLayout(Class carrier, ByteOrder order) { - Objects.requireNonNull(carrier); - Objects.requireNonNull(order); - if (carrier == boolean.class) { - return ValueLayouts.OfBooleanImpl.of(order); - } else if (carrier == char.class) { - return ValueLayouts.OfCharImpl.of(order); - } else if (carrier == byte.class) { - return ValueLayouts.OfByteImpl.of(order); - } else if (carrier == short.class) { - return ValueLayouts.OfShortImpl.of(order); - } else if (carrier == int.class) { - return ValueLayouts.OfIntImpl.of(order); - } else if (carrier == float.class) { - return ValueLayouts.OfFloatImpl.of(order); - } else if (carrier == long.class) { - return ValueLayouts.OfLongImpl.of(order); - } else if (carrier == double.class) { - return ValueLayouts.OfDoubleImpl.of(order); - } else if (carrier == MemorySegment.class) { - return ValueLayouts.OfAddressImpl.of(order); - } else { - throw new IllegalArgumentException("Unsupported carrier: " + carrier.getName()); - } + static PaddingLayout paddingLayout(long bitSize) { + return PaddingLayoutImpl.of(MemoryLayoutUtil.requireBitSizeValid(bitSize, false)); } /** @@ -674,10 +704,12 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @param elementLayout the sequence element layout. * @return the new sequence layout with the given element layout and size. * @throws IllegalArgumentException if {@code elementCount } is negative. + * @throws IllegalArgumentException if {@code elementLayout.bitAlignment() > elementLayout.bitSize()}. */ static SequenceLayout sequenceLayout(long elementCount, MemoryLayout elementLayout) { - MemoryLayoutUtil.checkSize(elementCount, true); + MemoryLayoutUtil.requireNonNegative(elementCount); Objects.requireNonNull(elementLayout); + Utils.checkElementAlignment(elementLayout, "Element layout alignment greater than its size"); return wrapOverflow(() -> SequenceLayoutImpl.of(elementCount, elementLayout)); } @@ -693,6 +725,7 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * * @param elementLayout the sequence element layout. * @return a new sequence layout with the given element layout and maximum element count. + * @throws IllegalArgumentException if {@code elementLayout.bitAlignment() > elementLayout.bitSize()}. */ static SequenceLayout sequenceLayout(MemoryLayout elementLayout) { Objects.requireNonNull(elementLayout); @@ -706,6 +739,28 @@ public sealed interface MemoryLayout permits SequenceLayout, GroupLayout, Paddin * @return a struct layout with the given member layouts. * @throws IllegalArgumentException if the sum of the {@linkplain #bitSize() bit sizes} of the member layouts * overflows. + * @throws IllegalArgumentException if a member layout in {@code elements} occurs at an offset (relative to the start + * of the struct layout) which is not compatible with its alignment constraint. + * + * @apiNote This factory does not automatically align element layouts, by inserting additional {@linkplain PaddingLayout + * padding layout} elements. As such, the following struct layout creation will fail with an exception: + * + * {@snippet lang = java: + * structLayout(JAVA_SHORT, JAVA_INT) + * } + * + * To avoid the exception, clients can either insert additional padding layout elements: + * + * {@snippet lang = java: + * structLayout(JAVA_SHORT, MemoryLayout.ofPadding(16), JAVA_INT) + * } + * + * Or, alternatively, they can use a member layout which features a smaller alignment constraint. This will result + * in a packed struct layout: + * + * {@snippet lang = java: + * structLayout(JAVA_SHORT, JAVA_INT.withBitAlignment(16)) + * } */ static StructLayout structLayout(MemoryLayout... elements) { Objects.requireNonNull(elements); diff --git a/src/java.base/share/classes/java/lang/foreign/MemorySegment.java b/src/java.base/share/classes/java/lang/foreign/MemorySegment.java index 11633df2ce7..1580787431f 100644 --- a/src/java.base/share/classes/java/lang/foreign/MemorySegment.java +++ b/src/java.base/share/classes/java/lang/foreign/MemorySegment.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,8 @@ package java.lang.foreign; import java.io.UncheckedIOException; +import java.lang.foreign.Linker.Option; +import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.nio.Buffer; import java.nio.ByteBuffer; @@ -39,17 +41,17 @@ import java.util.Arrays; import java.util.Objects; import java.util.Optional; import java.util.Spliterator; +import java.util.function.Consumer; import java.util.stream.Stream; import jdk.internal.foreign.AbstractMemorySegmentImpl; import jdk.internal.foreign.HeapMemorySegmentImpl; +import jdk.internal.foreign.MemorySessionImpl; import jdk.internal.foreign.NativeMemorySegmentImpl; import jdk.internal.foreign.Utils; import jdk.internal.foreign.abi.SharedUtils; import jdk.internal.foreign.layout.ValueLayouts; import jdk.internal.javac.PreviewFeature; -import jdk.internal.misc.ScopedMemoryAccess; import jdk.internal.reflect.CallerSensitive; -import jdk.internal.reflect.Reflection; import jdk.internal.vm.annotation.ForceInline; /** @@ -63,10 +65,10 @@ import jdk.internal.vm.annotation.ForceInline; * Heap segments can be obtained by calling one of the {@link MemorySegment#ofArray(int[])} factory methods. * These methods return a memory segment backed by the on-heap region that holds the specified Java array. *

    - * Native segments can be obtained by calling one of the {@link MemorySegment#allocateNative(long, long, SegmentScope)} + * Native segments can be obtained by calling one of the {@link Arena#allocate(long, long)} * factory methods, which return a memory segment backed by a newly allocated off-heap region with the given size * and aligned to the given alignment constraint. Alternatively, native segments can be obtained by - * {@link FileChannel#map(MapMode, long, long, SegmentScope) mapping} a file into a new off-heap region + * {@link FileChannel#map(MapMode, long, long, Arena) mapping} a file into a new off-heap region * (in some systems, this operation is sometimes referred to as {@code mmap}). * Segments obtained in this way are called mapped segments, and their contents can be {@linkplain #force() persisted} and * {@linkplain #load() loaded} to and from the underlying memory-mapped file. @@ -91,23 +93,22 @@ import jdk.internal.vm.annotation.ForceInline; * Every memory segment has a {@linkplain #byteSize() size}. The size of a heap segment is derived from the Java array * from which it is obtained. This size is predictable across Java runtimes. * The size of a native segment is either passed explicitly - * (as in {@link MemorySegment#allocateNative(long, SegmentScope)}) or derived from a {@link MemoryLayout} - * (as in {@link MemorySegment#allocateNative(MemoryLayout, SegmentScope)}). The size of a memory segment is typically + * (as in {@link Arena#allocate(long, long)}) or derived from a {@link MemoryLayout} + * (as in {@link Arena#allocate(MemoryLayout)}). The size of a memory segment is typically * a positive number but may be zero, but never negative. *

    * The address and size of a memory segment jointly ensure that access operations on the segment cannot fall * outside the boundaries of the region of memory which backs the segment. * That is, a memory segment has spatial bounds. *

    - * Every memory segment is associated with a {@linkplain SegmentScope scope}. This ensures that access operations + * Every memory segment is associated with a {@linkplain Scope scope}. This ensures that access operations * on a memory segment cannot occur when the region of memory which backs the memory segment is no longer available - * (e.g., after the scope associated with the accessed memory segment is no longer {@linkplain SegmentScope#isAlive() alive}). + * (e.g., after the scope associated with the accessed memory segment is no longer {@linkplain Scope#isAlive() alive}). * That is, a memory segment has temporal bounds. *

    - * Finally, access operations on a memory segment are subject to the thread-confinement checks enforced by the associated - * scope; that is, if the segment is associated with the {@linkplain SegmentScope#global() global scope} or an {@linkplain SegmentScope#auto() automatic scope}, - * it can be accessed by multiple threads. If the segment is associated with an arena scope, then it can only be - * accessed compatibly with the arena confinement characteristics. + * Finally, access operations on a memory segment can be subject to additional thread-confinement checks. + * Heap segments can be accessed from any thread. Conversely, native segments can only be accessed compatibly with the + * confinement characteristics of the arena used to obtain them. * *

    Accessing memory segments

    * @@ -161,28 +162,28 @@ import jdk.internal.vm.annotation.ForceInline; * segment is derived from the address of the original segment, by adding an offset (expressed in bytes). The size of * the sliced segment is either derived implicitly (by subtracting the specified offset from the size of the original segment), * or provided explicitly. In other words, a sliced segment has stricter spatial bounds than those of the original segment: - * {@snippet lang=java : + * {@snippet lang = java: * Arena arena = ... * MemorySegment segment = arena.allocate(100); * MemorySegment slice = segment.asSlice(50, 10); * slice.get(ValueLayout.JAVA_INT, 20); // Out of bounds! * arena.close(); * slice.get(ValueLayout.JAVA_INT, 0); // Already closed! - * } + *} * The above code creates a native segment that is 100 bytes long; then, it creates a slice that starts at offset 50 * of {@code segment}, and is 10 bytes long. That is, the address of the {@code slice} is {@code segment.address() + 50}, * and its size is 10. As a result, attempting to read an int value at offset 20 of the - * {@code slice} segment will result in an exception. The {@linkplain SegmentScope temporal bounds} of the original segment - * is inherited by its slices; that is, when the scope associated with {@code segment} is no longer {@linkplain SegmentScope#isAlive() alive}, + * {@code slice} segment will result in an exception. The {@linkplain Arena temporal bounds} of the original segment + * is inherited by its slices; that is, when the scope associated with {@code segment} is no longer {@linkplain Scope#isAlive() alive}, * {@code slice} will also be become inaccessible. *

    * A client might obtain a {@link Stream} from a segment, which can then be used to slice the segment (according to a given * element layout) and even allow multiple threads to work in parallel on disjoint segment slices - * (to do this, the segment has to be associated with a scope that allows {@linkplain SegmentScope#isAccessibleBy(Thread) access} + * (to do this, the segment has to be {@linkplain MemorySegment#isAccessibleBy(Thread) accessible} * from multiple threads). The following code can be used to sum all int values in a memory segment in parallel: * * {@snippet lang = java: - * try (Arena arena = Arena.openShared()) { + * try (Arena arena = Arena.ofShared()) { * SequenceLayout SEQUENCE_LAYOUT = MemoryLayout.sequenceLayout(1024, ValueLayout.JAVA_INT); * MemorySegment segment = arena.allocate(SEQUENCE_LAYOUT); * int sum = segment.elements(ValueLayout.JAVA_INT).parallel() @@ -241,8 +242,8 @@ import jdk.internal.vm.annotation.ForceInline; *

    * The alignment constraint used to access a segment is typically dictated by the shape of the data structure stored * in the segment. For example, if the programmer wishes to store a sequence of 8-byte values in a native segment, then - * the segment should be allocated by specifying a 8-byte alignment constraint, either via {@link #allocateNative(long, long, SegmentScope)} - * or {@link #allocateNative(MemoryLayout, SegmentScope)}. These factories ensure that the off-heap region of memory backing + * the segment should be allocated by specifying a 8-byte alignment constraint, either via {@link Arena#allocate(long, long)} + * or {@link Arena#allocate(MemoryLayout)}. These factories ensure that the off-heap region of memory backing * the returned segment has a starting address that is 8-byte aligned. Subsequently, the programmer can access the * segment at the offsets of interest -- 0, 8, 16, 24, etc -- in the knowledge that every such access is aligned. *

    @@ -343,53 +344,82 @@ import jdk.internal.vm.annotation.ForceInline; * the region, stored in the pointer, is available. For example, a C function with return type {@code char*} might return * a pointer to a region containing a single {@code char} value, or to a region containing an array of {@code char} values, * where the size of the array might be provided in a separate parameter. The size of the array is not readily apparent - * to the code calling the foreign function and hoping to use its result. + * to the code calling the foreign function and hoping to use its result. In addition to having no insight + * into the size of the region of memory backing a pointer returned from a foreign function, it also has no insight + * into the lifetime intended for said region of memory by the foreign function that allocated it. *

    - * The {@link Linker} represents a pointer returned from a foreign function with a zero-length memory segment. - * The address of the segment is the address stored in the pointer. The size of the segment is zero. Similarly, when a - * client reads an address from a memory segment, a zero-length memory segment is returned. + * The {@code MemorySegment} API uses zero-length memory segments to represent: + *

    + * The address of the zero-length segment is the address stored in the pointer. The spatial and temporal bounds of the + * zero-length segment are as follows: + *
      + *
    • The size of the segment is zero. any attempt to access these segments will fail with {@link IndexOutOfBoundsException}. + * This is a crucial safety feature: as these segments are associated with a region + * of memory whose size is not known, any access operations involving these segments cannot be validated. + * In effect, a zero-length memory segment wraps an address, and it cannot be used without explicit intent + * (see below);
    • + *
    • The segment is associated with a fresh scope that is always alive. Thus, while zero-length + * memory segments cannot be accessed directly, they can be passed, opaquely, to other pointer-accepting foreign functions.
    • + *
    *

    - * Since a zero-length segment features trivial spatial bounds, any attempt to access these segments will fail with - * {@link IndexOutOfBoundsException}. This is a crucial safety feature: as these segments are associated with a region - * of memory whose size is not known, any access operations involving these segments cannot be validated. - * In effect, a zero-length memory segment wraps an address, and it cannot be used without explicit intent. + * To demonstrate how clients can work with zero-length memory segments, consider the case of a client that wants + * to read a pointer from some memory segment. This can be done via the + * {@linkplain MemorySegment#get(AddressLayout, long)} access method. This method accepts an + * {@linkplain AddressLayout address layout} (e.g. {@link ValueLayout#ADDRESS}), the layout of the pointer + * to be read. For instance on a 64-bit platform, the size of an address layout is 64 bits. The access operation + * also accepts an offset, expressed in bytes, which indicates the position (relative to the start of the memory segment) + * at which the pointer is stored. The access operation returns a zero-length native memory segment, backed by a region + * of memory whose starting address is the 64-bit value read at the specified offset. *

    - * Zero-length memory segments obtained when interacting with foreign functions are associated with the - * {@link SegmentScope#global() global scope}. This is because the Java runtime, in addition to having no insight - * into the size of the region of memory backing a pointer returned from a foreign function, also has no insight - * into the lifetime intended for said region of memory by the foreign function that allocated it. The global scope - * ensures that the obtained segment can be passed, opaquely, to other pointer-accepting foreign functions. - *

    - * To access native zero-length memory segments, clients have two options, both of which are unsafe. Clients - * can {@linkplain java.lang.foreign.MemorySegment#ofAddress(long, long, SegmentScope) obtain} - * a new native segment, with new spatial and temporal bounds, as follows: + * The returned zero-length memory segment cannot be accessed directly by the client: since the size of the segment + * is zero, any access operation would result in out-of-bounds access. Instead, the client must, unsafely, + * assign new spatial bounds to the zero-length memory segment. This can be done via the + * {@link #reinterpret(long)} method, as follows: * * {@snippet lang = java: - * SegmentScope scope = ... // obtains a scope - * MemorySegment foreign = someSegment.get(ValueLayout.ADDRESS, 0); // wrap address into segment (size = 0) - * MemorySegment segment = MemorySegment.ofAddress(foreign.address(), 4, scope); // create new segment (size = 4) - * int x = segment.get(ValueLayout.JAVA_INT, 0); //ok + * MemorySegment z = segment.get(ValueLayout.ADDRESS, ...); // size = 0 + * MemorySegment ptr = z.reinterpret(16); // size = 16 + * int x = ptr.getAtIndex(ValueLayout.JAVA_INT, 3); // ok *} - * - * Alternatively, clients can obtain an {@linkplain java.lang.foreign.ValueLayout.OfAddress#asUnbounded() unbounded} - * address value layout. When an access operation, or a function descriptor that is passed to a downcall method handle, - * uses an unbounded address value layouts, the runtime will wrap any corresponding raw addresses with native segments - * with maximal size (i.e. {@linkplain java.lang.Long#MAX_VALUE}). As such, these segments can be accessed directly, as follows: + *

    + * In some cases, the client might additionally want to assign new temporal bounds to a zero-length memory segment. + * This can be done via the {@link #reinterpret(long, Arena, Consumer)} method, which returns a + * new native segment with the desired size and the same temporal bounds as those of the provided arena: * * {@snippet lang = java: - * MemorySegment foreign = someSegment.get(ValueLayout.ADDRESS.asUnbounded(), 0); // wrap address into segment (size = Long.MAX_VALUE) - * int x = foreign.get(ValueLayout.JAVA_INT, 0); //ok + * MemorySegment ptr = null; + * try (Arena arena = Arena.ofConfined()) { + * MemorySegment z = segment.get(ValueLayout.ADDRESS, ...); // size = 0, scope = always alive + * ptr = z.reinterpret(16, arena, null); // size = 4, scope = arena.scope() + * int x = ptr.getAtIndex(ValueLayout.JAVA_INT, 3); // ok + * } + * int x = ptr.getAtIndex(ValueLayout.JAVA_INT, 3); // throws IllegalStateException *} * - * Both {@link #ofAddress(long, long, SegmentScope)} and {@link ValueLayout.OfAddress#asUnbounded()} are + * Alternatively, if the size of the region of memory backing the zero-length memory segment is known statically, + * the client can overlay a {@linkplain AddressLayout#withTargetLayout(MemoryLayout) target layout} on the address + * layout used when reading a pointer. The target layout is then used to dynamically + * expand the size of the native memory segment returned by the access operation, so that the size + * of the segment is the same as the size of the target layout. In other words, the returned segment is no + * longer a zero-length memory segment, and the pointer it represents can be dereferenced directly: + * + * {@snippet lang = java: + * AddressLayout intArrPtrLayout = ValueLayout.ADDRESS.withTargetLayout( + * MemoryLayout.sequenceLayout(4, ValueLayout.JAVA_INT)); // layout for int (*ptr)[4] + * MemorySegment ptr = segment.get(intArrPtrLayout, ...); // size = 16 + * int x = ptr.getAtIndex(ValueLayout.JAVA_INT, 3); // ok + *} + *

    + * All the methods which can be used to manipulate zero-length memory segments + * ({@link #reinterpret(long)}, {@link #reinterpret(Arena, Consumer)}, {@link #reinterpret(long, Arena, Consumer)} and + * {@link AddressLayout#withTargetLayout(MemoryLayout)}) are * restricted methods, and should be used with caution: - * for instance, sizing a segment incorrectly could result in a VM crash when attempting to access the memory segment. - *

    - * Which approach is taken largely depends on the information that a client has available when obtaining a memory segment - * wrapping a native pointer. For instance, if such pointer points to a C struct, the client might prefer to resize the - * segment unsafely, to match the size of the struct (so that out-of-bounds access will be detected by the API). - * In other instances, however, there will be no, or little information as to what spatial and/or temporal bounds should - * be associated with a given native pointer. In these cases using an unbounded address layout might be preferable. + * assigning a segment incorrect spatial and/or temporal bounds could result in a VM crash when attempting to access + * the memory segment. * * @implSpec * Implementations of this interface are immutable, thread-safe and value-based. @@ -405,9 +435,13 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { long address(); /** - * {@return the Java array associated with this memory segment, if any} + * Returns the Java object stored in the on-heap memory region backing this memory segment, if any. For instance, if this + * memory segment is a heap segment created with the {@link #ofArray(byte[])} factory method, this method will return the + * {@code byte[]} object which was used to obtain the segment. This method returns an empty {@code Optional} value + * if either this segment is a {@linkplain #isNative() native} segment, or if this segment is {@linkplain #isReadOnly() read-only}. + * @return the Java object associated with this memory segment, if any. */ - Optional array(); + Optional heapBase(); /** * Returns a spliterator for this memory segment. The returned spliterator reports {@link Spliterator#SIZED}, @@ -418,7 +452,7 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * if the supplied layout has size N, then calling {@link Spliterator#trySplit()} will result in a spliterator serving * approximately {@code S/N} elements (depending on whether N is even or not), where {@code S} is the size of * this segment. As such, splitting is possible as long as {@code S/N >= 2}. The spliterator returns segments that - * are associated with the same scope as that associated with this segment. + * have the same lifetime as that of this segment. *

    * The returned spliterator effectively allows to slice this segment into disjoint {@linkplain #asSlice(long, long) slices}, * which can then be processed in parallel by multiple threads. @@ -451,7 +485,13 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * {@return the scope associated with this memory segment} */ - SegmentScope scope(); + Scope scope(); + + /** + * {@return {@code true} if this segment can be accessed from the provided thread} + * @param thread the thread to be tested. + */ + boolean isAccessibleBy(Thread thread); /** * {@return the size (in bytes) of this memory segment} @@ -461,8 +501,13 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Returns a slice of this memory segment, at the given offset. The returned segment's address is the address * of this segment plus the given offset; its size is specified by the given argument. + *

    + * Equivalent to the following code: + * {@snippet lang=java : + * asSlice(offset, layout.byteSize(), 1); + * } * - * @see #asSlice(long) + * @see #asSlice(long, long, long) * * @param offset The new segment base offset (relative to the address of this segment), specified in bytes. * @param newSize The new segment size, specified in bytes. @@ -471,6 +516,44 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { */ MemorySegment asSlice(long offset, long newSize); + /** + * Returns a slice of this memory segment, at the given offset, with the provided alignment constraint. + * The returned segment's address is the address of this segment plus the given offset; its size is specified by the given argument. + * + * @param offset The new segment base offset (relative to the address of this segment), specified in bytes. + * @param newSize The new segment size, specified in bytes. + * @param byteAlignment The alignment constraint (in bytes) of the returned slice. + * @return a slice of this memory segment. + * @throws IndexOutOfBoundsException if {@code offset < 0}, {@code offset > byteSize()}, {@code newSize < 0}, or {@code newSize > byteSize() - offset} + * @throws IllegalArgumentException if this segment cannot be accessed at {@code offset} under + * the provided alignment constraint. + */ + MemorySegment asSlice(long offset, long newSize, long byteAlignment); + + /** + * Returns a slice of this memory segment with the given layout, at the given offset. The returned segment's address is the address + * of this segment plus the given offset; its size is the same as the size of the provided layout. + *

    + * Equivalent to the following code: + * {@snippet lang=java : + * asSlice(offset, layout.byteSize(), layout.byteAlignment()); + * } + * + * @see #asSlice(long, long, long) + * + * @param offset The new segment base offset (relative to the address of this segment), specified in bytes. + * @param layout The layout of the segment slice. + * @throws IndexOutOfBoundsException if {@code offset < 0}, {@code offset > layout.byteSize()}, + * {@code newSize < 0}, or {@code newSize > layout.byteSize() - offset} + * @throws IllegalArgumentException if this segment cannot be accessed at {@code offset} under + * the alignment constraint specified by {@code layout}. + * @return a slice of this memory segment. + */ + default MemorySegment asSlice(long offset, MemoryLayout layout) { + Objects.requireNonNull(layout); + return asSlice(offset, layout.byteSize(), layout.byteAlignment()); + } + /** * Returns a slice of this memory segment, at the given offset. The returned segment's address is the address * of this segment plus the given offset; its size is computed by subtracting the specified offset from this segment size. @@ -486,9 +569,110 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @return a slice of this memory segment. * @throws IndexOutOfBoundsException if {@code offset < 0}, or {@code offset > byteSize()}. */ - default MemorySegment asSlice(long offset) { - return asSlice(offset, byteSize() - offset); - } + MemorySegment asSlice(long offset); + + /** + * Returns a new memory segment that has the same address and scope as this segment, but with the provided size. + *

    + * This method is restricted. + * Restricted methods are unsafe, and, if used incorrectly, their use might crash + * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on + * restricted methods, and use safe and supported functionalities, where possible. + * + * @param newSize the size of the returned segment. + * @return a new memory segment that has the same address and scope as this segment, but the new + * provided size. + * @throws IllegalArgumentException if {@code newSize < 0}. + * @throws UnsupportedOperationException if this segment is not a {@linkplain #isNative() native} segment. + * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. + */ + @CallerSensitive + MemorySegment reinterpret(long newSize); + + /** + * Returns a new memory segment with the same address and size as this segment, but with the provided scope. + * As such, the returned segment cannot be accessed after the provided arena has been closed. + * Moreover, the returned segment can be accessed compatibly with the confinement restrictions associated with the + * provided arena: that is, if the provided arena is a {@linkplain Arena#ofConfined() confined arena}, + * the returned segment can only be accessed by the arena's owner thread, regardless of the confinement restrictions + * associated with this segment. In other words, this method returns a segment that behaves as if it had been allocated + * using the provided arena. + *

    + * Clients can specify an optional cleanup action that should be executed when the provided scope becomes + * invalid. This cleanup action receives a fresh memory segment that is obtained from this segment as follows: + * {@snippet lang=java : + * MemorySegment cleanupSegment = MemorySegment.ofAddress(this.address()); + * } + * That is, the cleanup action receives a segment that is associated with a fresh scope that is always alive, + * and is accessible from any thread. The size of the segment accepted by the cleanup action is {@link #byteSize()}. + *

    + * This method is restricted. + * Restricted methods are unsafe, and, if used incorrectly, their use might crash + * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on + * restricted methods, and use safe and supported functionalities, where possible. + * + * @apiNote The cleanup action (if present) should take care not to leak the received segment to external + * clients which might access the segment after its backing region of memory is no longer available. Furthermore, + * if the provided scope is the scope of an {@linkplain Arena#ofAuto() automatic arena}, the cleanup action + * must not prevent the scope from becoming unreachable. + * A failure to do so will permanently prevent the regions of memory allocated by the automatic arena from being deallocated. + *

    + * This method is restricted. + * Restricted methods are unsafe, and, if used incorrectly, their use might crash + * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on + * restricted methods, and use safe and supported functionalities, where possible. + * + * @param arena the arena to be associated with the returned segment. + * @param cleanup the cleanup action that should be executed when the provided arena is closed (can be {@code null}). + * @return a new memory segment with unbounded size. + * @throws IllegalArgumentException if {@code newSize < 0}. + * @throws IllegalStateException if {@code scope.isAlive() == false}. + * @throws UnsupportedOperationException if this segment is not a {@linkplain #isNative() native} segment. + * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. + */ + @CallerSensitive + MemorySegment reinterpret(Arena arena, Consumer cleanup); + + /** + * Returns a new segment with the same address as this segment, but with the provided size and scope. + * As such, the returned segment cannot be accessed after the provided arena has been closed. + * Moreover, if the returned segment can be accessed compatibly with the confinement restrictions associated with the + * provided arena: that is, if the provided arena is a {@linkplain Arena#ofConfined() confined arena}, + * the returned segment can only be accessed by the arena's owner thread, regardless of the confinement restrictions + * associated with this segment. In other words, this method returns a segment that behaves as if it had been allocated + * using the provided arena. + *

    + * Clients can specify an optional cleanup action that should be executed when the provided scope becomes + * invalid. This cleanup action receives a fresh memory segment that is obtained from this segment as follows: + * {@snippet lang=java : + * MemorySegment cleanupSegment = MemorySegment.ofAddress(this.address()); + * } + * That is, the cleanup action receives a segment that is associated with a fresh scope that is always alive, + * and is accessible from any thread. The size of the segment accepted by the cleanup action is {@code newSize}. + *

    + * This method is restricted. + * Restricted methods are unsafe, and, if used incorrectly, their use might crash + * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on + * restricted methods, and use safe and supported functionalities, where possible. + * + * @apiNote The cleanup action (if present) should take care not to leak the received segment to external + * clients which might access the segment after its backing region of memory is no longer available. Furthermore, + * if the provided scope is the scope of an {@linkplain Arena#ofAuto() automatic arena}, the cleanup action + * must not prevent the scope from becoming unreachable. + * A failure to do so will permanently prevent the regions of memory allocated by the automatic arena from being deallocated. + * + * @param newSize the size of the returned segment. + * @param arena the arena to be associated with the returned segment. + * @param cleanup the cleanup action that should be executed when the provided arena is closed (can be {@code null}). + * @return a new segment that has the same address as this segment, but with new size and its scope set to + * that of the provided arena. + * @throws UnsupportedOperationException if this segment is not a {@linkplain #isNative() native} segment. + * @throws IllegalArgumentException if {@code newSize < 0}. + * @throws IllegalStateException if {@code scope.isAlive() == false}. + * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. + */ + @CallerSensitive + MemorySegment reinterpret(long newSize, Arena arena, Consumer cleanup); /** * {@return {@code true}, if this segment is read-only} @@ -506,7 +690,7 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Returns {@code true} if this segment is a native segment. A native segment is - * created e.g. using the {@link #allocateNative(long, SegmentScope)} (and related) factory, or by + * created e.g. using the {@link Arena#allocate(long, long)} (and related) factory, or by * {@linkplain #ofBuffer(Buffer) wrapping} a {@linkplain ByteBuffer#allocateDirect(int) direct buffer}. * @return {@code true} if this segment is native segment. */ @@ -514,7 +698,7 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Returns {@code true} if this segment is a mapped segment. A mapped memory segment is created e.g. using the - * {@link FileChannel#map(FileChannel.MapMode, long, long, SegmentScope)} factory, or by + * {@link FileChannel#map(FileChannel.MapMode, long, long, Arena)} factory, or by * {@linkplain #ofBuffer(Buffer) wrapping} a {@linkplain java.nio.MappedByteBuffer mapped byte buffer}. * @return {@code true} if this segment is a mapped segment. */ @@ -581,9 +765,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param value the value to fill into this segment * @return this memory segment * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws UnsupportedOperationException if this segment is read-only (see {@link #isReadOnly()}). */ MemorySegment fill(byte value); @@ -600,13 +784,13 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param src the source segment. * @throws IndexOutOfBoundsException if {@code src.byteSize() > this.byteSize()}. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code src} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code src.scope().isAccessibleBy(T) == false}. + * such that {@code src.isAccessibleBy(T) == false}. * @throws UnsupportedOperationException if this segment is read-only (see {@link #isReadOnly()}). * @return this segment. */ @@ -634,13 +818,13 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @return the relative offset, in bytes, of the first mismatch between this * and the given other segment, otherwise -1 if no mismatch * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code other} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code other.scope().isAccessibleBy(T) == false}. + * such that {@code other.isAccessibleBy(T) == false}. */ default long mismatch(MemorySegment other) { Objects.requireNonNull(other); @@ -666,9 +850,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * is resident in physical memory * * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws UnsupportedOperationException if this segment is not a mapped memory segment, e.g. if * {@code isMapped() == false}. */ @@ -683,9 +867,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * occur.

    * * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws UnsupportedOperationException if this segment is not a mapped memory segment, e.g. if * {@code isMapped() == false}. */ @@ -700,9 +884,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * occur (as this segment's contents might need to be paged back in).

    * * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws UnsupportedOperationException if this segment is not a mapped memory segment, e.g. if * {@code isMapped() == false}. */ @@ -729,9 +913,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { *

    * * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws UnsupportedOperationException if this segment is not a mapped memory segment, e.g. if * {@code isMapped() == false}. * @throws UncheckedIOException if there is an I/O error writing the contents of this segment to the associated storage device @@ -751,11 +935,11 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * returned if this segment' size is greater than {@link Integer#MAX_VALUE}. *

    * The life-cycle of the returned buffer will be tied to that of this segment. That is, accessing the returned buffer - * after the scope associated with this segment is no longer {@linkplain SegmentScope#isAlive() alive}, will + * after the scope associated with this segment is no longer {@linkplain Scope#isAlive() alive}, will * throw an {@link IllegalStateException}. Similarly, accessing the returned buffer from a thread {@code T} - * such that {@code scope().isAccessible(T) == false} will throw a {@link WrongThreadException}. + * such that {@code isAccessible(T) == false} will throw a {@link WrongThreadException}. *

    - * If this segment is associated with a scope that can only be accessed from a single thread, calling read/write I/O + * If this segment is accessible from a single thread, calling read/write I/O * operations on the resulting buffer might result in an unspecified exception being thrown. Examples of such problematic operations are * {@link java.nio.channels.AsynchronousSocketChannel#read(ByteBuffer)} and * {@link java.nio.channels.AsynchronousSocketChannel#write(ByteBuffer)}. @@ -776,9 +960,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * different from the {@linkplain ByteOrder#nativeOrder native order}, a byte swap operation will be performed on each array element. * @return a new byte array whose contents are copied from this memory segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if this segment's contents cannot be copied into a {@code byte[]} instance, * e.g. its size is greater than {@link Integer#MAX_VALUE}. */ @@ -790,9 +974,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * different from the {@linkplain ByteOrder#nativeOrder native order}, a byte swap operation will be performed on each array element. * @return a new short array whose contents are copied from this memory segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if this segment's contents cannot be copied into a {@code short[]} instance, * e.g. because {@code byteSize() % 2 != 0}, or {@code byteSize() / 2 > Integer#MAX_VALUE} */ @@ -804,9 +988,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * different from the {@linkplain ByteOrder#nativeOrder native order}, a byte swap operation will be performed on each array element. * @return a new char array whose contents are copied from this memory segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if this segment's contents cannot be copied into a {@code char[]} instance, * e.g. because {@code byteSize() % 2 != 0}, or {@code byteSize() / 2 > Integer#MAX_VALUE}. */ @@ -818,9 +1002,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * different from the {@linkplain ByteOrder#nativeOrder native order}, a byte swap operation will be performed on each array element. * @return a new int array whose contents are copied from this memory segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if this segment's contents cannot be copied into a {@code int[]} instance, * e.g. because {@code byteSize() % 4 != 0}, or {@code byteSize() / 4 > Integer#MAX_VALUE}. */ @@ -832,9 +1016,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * different from the {@linkplain ByteOrder#nativeOrder native order}, a byte swap operation will be performed on each array element. * @return a new float array whose contents are copied from this memory segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if this segment's contents cannot be copied into a {@code float[]} instance, * e.g. because {@code byteSize() % 4 != 0}, or {@code byteSize() / 4 > Integer#MAX_VALUE}. */ @@ -846,9 +1030,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * different from the {@linkplain ByteOrder#nativeOrder native order}, a byte swap operation will be performed on each array element. * @return a new long array whose contents are copied from this memory segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if this segment's contents cannot be copied into a {@code long[]} instance, * e.g. because {@code byteSize() % 8 != 0}, or {@code byteSize() / 8 > Integer#MAX_VALUE}. */ @@ -860,9 +1044,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * different from the {@linkplain ByteOrder#nativeOrder native order}, a byte swap operation will be performed on each array element. * @return a new double array whose contents are copied from this memory segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalStateException if this segment's contents cannot be copied into a {@code double[]} instance, * e.g. because {@code byteSize() % 8 != 0}, or {@code byteSize() / 8 > Integer#MAX_VALUE}. */ @@ -882,9 +1066,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @throws IndexOutOfBoundsException if {@code offset < 0} or {@code S + offset > byteSize()}, where {@code S} is the size of the UTF-8 * string (including the terminator character). * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. */ default String getUtf8String(long offset) { return SharedUtils.toJavaStringInternal(this, offset); @@ -907,9 +1091,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param str the Java string to be written into this segment. * @throws IndexOutOfBoundsException if {@code offset < 0} or {@code str.getBytes().length() + offset >= byteSize()}. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. */ default void setUtf8String(long offset, String str) { Utils.toCString(str.getBytes(StandardCharsets.UTF_8), SegmentAllocator.prefixAllocator(asSlice(offset))); @@ -924,15 +1108,13 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * {@linkplain ByteBuffer#isReadOnly() read-only}. Moreover, if the buffer is a {@linkplain Buffer#isDirect() direct buffer}, * the returned segment is a native segment; otherwise the returned memory segment is a heap segment. *

    - * The scope {@code S} associated with the returned segment is computed as follows: - *

      - *
    • if the buffer has been obtained by calling {@link #asByteBuffer()} on a memory segment whose scope - * is {@code S'}, then {@code S = S'}; or
    • - *
    • if the buffer is a heap buffer, then {@code S} is the {@linkplain SegmentScope#global() global scope}; or - *
    • if the buffer is a direct buffer, then {@code S} is a scope that is always alive and which keeps the buffer reachable. - * Therefore, the off-heap region of memory backing the buffer instance will remain available as long as the - * returned segment is reachable.
    • - *
    + * If the provided buffer has been obtained by calling {@link #asByteBuffer()} on a memory segment whose + * {@linkplain Scope scope} is {@code S}, the returned segment will be associated with the + * same scope {@code S}. Otherwise, the scope of the returned segment is a fresh scope that is always alive. + *

    + * The scope associated with the returned segment keeps the provided buffer reachable. As such, if + * the provided buffer is a direct buffer, its backing memory region will not be deallocated as long as the + * returned segment (or any of its slices) are kept reachable. * * @param buffer the buffer instance to be turned into a new memory segment. * @return a memory segment, derived from the given buffer instance. @@ -947,8 +1129,8 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Creates a heap segment backed by the on-heap region of memory that holds the given byte array. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}, and - * its {@link #address()} is set to zero. + * The scope of the returned segment is a fresh scope that is always alive, and keeps the given byte array reachable. + * The returned segment is always accessible, from any thread. Its {@link #address()} is set to zero. * * @param byteArray the primitive array backing the heap memory segment. * @return a heap memory segment backed by a byte array. @@ -959,8 +1141,8 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Creates a heap segment backed by the on-heap region of memory that holds the given char array. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}, and - * its {@link #address()} is set to zero. + * The scope of the returned segment is a fresh scope that is always alive, and keeps the given byte array reachable. + * The returned segment is always accessible, from any thread. Its {@link #address()} is set to zero. * * @param charArray the primitive array backing the heap segment. * @return a heap memory segment backed by a char array. @@ -971,8 +1153,8 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Creates a heap segment backed by the on-heap region of memory that holds the given short array. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}, and - * its {@link #address()} is set to zero. + * The scope of the returned segment is a fresh scope that is always alive, and keeps the given byte array reachable. + * The returned segment is always accessible, from any thread. Its {@link #address()} is set to zero. * * @param shortArray the primitive array backing the heap segment. * @return a heap memory segment backed by a short array. @@ -983,8 +1165,8 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Creates a heap segment backed by the on-heap region of memory that holds the given int array. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}, and - * its {@link #address()} is set to zero. + * The scope of the returned segment is a fresh scope that is always alive, and keeps the given byte array reachable. + * The returned segment is always accessible, from any thread. Its {@link #address()} is set to zero. * * @param intArray the primitive array backing the heap segment. * @return a heap memory segment backed by an int array. @@ -995,8 +1177,8 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Creates a heap segment backed by the on-heap region of memory that holds the given float array. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}, and - * its {@link #address()} is set to zero. + * The scope of the returned segment is a fresh scope that is always alive, and keeps the given byte array reachable. + * The returned segment is always accessible, from any thread. Its {@link #address()} is set to zero. * * @param floatArray the primitive array backing the heap segment. * @return a heap memory segment backed by a float array. @@ -1007,8 +1189,8 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Creates a heap segment backed by the on-heap region of memory that holds the given long array. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}, and - * its {@link #address()} is set to zero. + * The scope of the returned segment is a fresh scope that is always alive, and keeps the given byte array reachable. + * The returned segment is always accessible, from any thread. Its {@link #address()} is set to zero. * * @param longArray the primitive array backing the heap segment. * @return a heap memory segment backed by a long array. @@ -1019,8 +1201,8 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Creates a heap segment backed by the on-heap region of memory that holds the given double array. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}, and - * its {@link #address()} is set to zero. + * The scope of the returned segment is a fresh scope that is always alive, and keeps the given byte array reachable. + * The returned segment is always accessible, from any thread. Its {@link #address()} is set to zero. * * @param doubleArray the primitive array backing the heap segment. * @return a heap memory segment backed by a double array. @@ -1032,16 +1214,16 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * A zero-length native segment modelling the {@code NULL} address. */ - MemorySegment NULL = NativeMemorySegmentImpl.makeNativeSegmentUnchecked(0L, 0); + MemorySegment NULL = new NativeMemorySegmentImpl(); /** * Creates a zero-length native segment from the given {@linkplain #address() address value}. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}. + * The returned segment is always accessible, from any thread. *

    - * This is equivalent to the following code: - * {@snippet lang = java: - * ofAddress(address, 0); - *} + * On 32-bit platforms, the given address value will be normalized such that the + * highest-order ("leftmost") 32 bits of the {@link MemorySegment#address() address} + * of the returned memory segment are set to zero. + * * @param address the address of the returned native segment. * @return a zero-length native segment with the given address. */ @@ -1049,208 +1231,6 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(address, 0); } - /** - * Creates a native segment with the given size and {@linkplain #address() address value}. - * The returned segment is associated with the {@linkplain SegmentScope#global() global scope}. - *

    - * This is equivalent to the following code: - * {@snippet lang = java: - * ofAddress(address, byteSize, SegmentScope.global()); - *} - * This method is restricted. - * Restricted methods are unsafe, and, if used incorrectly, their use might crash - * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on - * restricted methods, and use safe and supported functionalities, where possible. - * @param address the address of the returned native segment. - * @param byteSize the size (in bytes) of the returned native segment. - * @return a zero-length native segment with the given address and size. - * @throws IllegalArgumentException if {@code byteSize < 0}. - * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. - */ - @CallerSensitive - static MemorySegment ofAddress(long address, long byteSize) { - Reflection.ensureNativeAccess(Reflection.getCallerClass(), MemorySegment.class, "ofAddress"); - return MemorySegment.ofAddress(address, byteSize, SegmentScope.global()); - } - - /** - * Creates a native segment with the given size, address, and scope. - * This method can be useful when interacting with custom memory sources (e.g. custom allocators), - * where an address to some underlying region of memory is typically obtained from foreign code - * (often as a plain {@code long} value). - *

    - * The returned segment is not read-only (see {@link MemorySegment#isReadOnly()}), and is associated with the - * provided scope. - *

    - * This is equivalent to the following code: - * {@snippet lang = java: - * ofAddress(address, byteSize, scope, null); - *} - * This method is restricted. - * Restricted methods are unsafe, and, if used incorrectly, their use might crash - * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on - * restricted methods, and use safe and supported functionalities, where possible. - * @param address the returned segment's address. - * @param byteSize the desired size. - * @param scope the scope associated with the returned native segment. - * @return a native segment with the given address, size and scope. - * @throws IllegalArgumentException if {@code byteSize < 0}. - * @throws IllegalStateException if {@code scope} is not {@linkplain SegmentScope#isAlive() alive}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope.isAccessibleBy(T) == false}. - * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. - */ - @CallerSensitive - @ForceInline - static MemorySegment ofAddress(long address, long byteSize, SegmentScope scope) { - Reflection.ensureNativeAccess(Reflection.getCallerClass(), MemorySegment.class, "ofAddress"); - Objects.requireNonNull(scope); - Utils.checkAllocationSizeAndAlign(byteSize, 1); - return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(address, byteSize, scope, null); - } - - /** - * Creates a native segment with the given size, address, and scope. - * This method can be useful when interacting with custom memory sources (e.g. custom allocators), - * where an address to some underlying region of memory is typically obtained from foreign code - * (often as a plain {@code long} value). - *

    - * The returned segment is not read-only (see {@link MemorySegment#isReadOnly()}), and is associated with the - * provided scope. - *

    - * The provided cleanup action (if any) will be invoked when the scope becomes not {@linkplain SegmentScope#isAlive() alive}. - *

    - * Clients should ensure that the address and bounds refer to a valid region of memory that is accessible for reading and, - * if appropriate, writing; an attempt to access an invalid address from Java code will either return an arbitrary value, - * have no visible effect, or cause an unspecified exception to be thrown. - *

    - * This method is restricted. - * Restricted methods are unsafe, and, if used incorrectly, their use might crash - * the JVM or, worse, silently result in memory corruption. Thus, clients should refrain from depending on - * restricted methods, and use safe and supported functionalities, where possible. - * - * - * @param address the returned segment's address. - * @param byteSize the desired size. - * @param scope the scope associated with the returned native segment. - * @param cleanupAction the custom cleanup action to be associated to the returned segment (can be null). - * @return a native segment with the given address, size and scope. - * @throws IllegalArgumentException if {@code byteSize < 0}. - * @throws IllegalStateException if {@code scope} is not {@linkplain SegmentScope#isAlive() alive}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope.isAccessibleBy(T) == false}. - * @throws IllegalCallerException If the caller is in a module that does not have native access enabled. - */ - @CallerSensitive - static MemorySegment ofAddress(long address, long byteSize, SegmentScope scope, Runnable cleanupAction) { - Reflection.ensureNativeAccess(Reflection.getCallerClass(), MemorySegment.class, "ofAddress"); - Objects.requireNonNull(scope); - Utils.checkAllocationSizeAndAlign(byteSize, 1); - return NativeMemorySegmentImpl.makeNativeSegmentUnchecked(address, byteSize, scope, cleanupAction); - } - - /** - * Creates a native segment with the given layout and scope. - *

    - * The lifetime off-heap region of memory associated with the returned native segment is determined by the - * provided scope. The off-heap memory region is deallocated when the scope becomes not - * {@linkplain SegmentScope#isAlive() alive}. If the scope has been obtained using an {@link Arena}, - * clients are responsible for ensuring that the arena is closed when the returned segment is no longer in use - * Failure to do so will result in off-heap memory leaks. As an alternative, an {@linkplain SegmentScope#auto() automatic scope} - * can be used, allowing the off-heap memory region associated with the returned native segment to be - * automatically released some unspecified time after the scope is no longer referenced. - *

    - * The {@linkplain #address() address} of the returned memory segment is the starting address of - * the newly allocated off-heap region backing the segment. Moreover, the {@linkplain #address() address} - * of the returned segment will be aligned according to the alignment constraint of the provided layout. - *

    - * This is equivalent to the following code: - * {@snippet lang=java : - * allocateNative(layout.bytesSize(), layout.bytesAlignment(), scope); - * } - *

    - * The region of off-heap region backing the returned native segment is initialized to zero. - * - * @param layout the layout of the off-heap memory region backing the native segment. - * @param scope the scope associated with the returned native segment. - * @return a new native segment. - * @throws IllegalStateException if {@code scope} is not {@linkplain SegmentScope#isAlive() alive}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope.isAccessibleBy(T) == false}. - */ - static MemorySegment allocateNative(MemoryLayout layout, SegmentScope scope) { - Objects.requireNonNull(layout); - Objects.requireNonNull(scope); - return allocateNative(layout.byteSize(), layout.byteAlignment(), scope); - } - - /** - * Creates a native segment with the given size (in bytes) and scope. - *

    - * The lifetime off-heap region of memory associated with the returned native segment is determined by the - * provided scope. The off-heap memory region is deallocated when the scope becomes not - * {@linkplain SegmentScope#isAlive() alive}. If the scope has been obtained using an {@link Arena}, - * clients are responsible for ensuring that the arena is closed when the returned segment is no longer in use - * Failure to do so will result in off-heap memory leaks. As an alternative, an {@linkplain SegmentScope#auto() automatic scope} - * can be used, allowing the off-heap memory region associated with the returned native segment to be - * automatically released some unspecified time after the scope is no longer referenced. - *

    - * The {@linkplain #address() address} of the returned memory segment is the starting address of - * the newly allocated off-heap region backing the segment. Moreover, the {@linkplain #address() address} - * of the returned segment is guaranteed to be at least 1-byte aligned. - *

    - * This is equivalent to the following code: - * {@snippet lang=java : - * allocateNative(bytesSize, 1, scope); - * } - *

    - * The region of off-heap region backing the returned native segment is initialized to zero. - * - * @param byteSize the size (in bytes) of the off-heap memory region of memory backing the native memory segment. - * @param scope the scope associated with the returned native segment. - * @return a new native memory segment. - * @throws IllegalArgumentException if {@code byteSize < 0}. - * @throws IllegalStateException if {@code scope} is not {@linkplain SegmentScope#isAlive() alive}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope.isAccessibleBy(T) == false}. - */ - static MemorySegment allocateNative(long byteSize, SegmentScope scope) { - return allocateNative(byteSize, 1, scope); - } - - /** - * Creates a native segment with the given size (in bytes), alignment (in bytes) and scope. - *

    - * The lifetime off-heap region of memory associated with the returned native segment is determined by the - * provided scope. The off-heap memory region is deallocated when the scope becomes not - * {@linkplain SegmentScope#isAlive() alive}. If the scope has been obtained using an {@link Arena}, - * clients are responsible for ensuring that the arena is closed when the returned segment is no longer in use - * Failure to do so will result in off-heap memory leaks. As an alternative, an {@linkplain SegmentScope#auto() automatic scope} - * can be used, allowing the off-heap memory region associated with the returned native segment to be - * automatically released some unspecified time after the scope is no longer referenced. - *

    - * The {@linkplain #address() address} of the returned memory segment is the starting address of - * the newly allocated off-heap region backing the segment. Moreover, the {@linkplain #address() address} - * of the returned segment will be aligned according to the provided alignment constraint. - *

    - * The region of off-heap region backing the returned native segment is initialized to zero. - * - * @param byteSize the size (in bytes) of the off-heap region of memory backing the native memory segment. - * @param byteAlignment the alignment constraint (in bytes) of the off-heap region of memory backing the native memory segment. - * @param scope the scope associated with the returned native segment. - * @return a new native memory segment. - * @throws IllegalArgumentException if {@code byteSize < 0}, {@code byteAlignment <= 0}, or if {@code byteAlignment} - * is not a power of 2. - * @throws IllegalStateException if {@code scope} is not {@linkplain SegmentScope#isAlive() alive}. - * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope.isAccessibleBy(T) == false}. - */ - static MemorySegment allocateNative(long byteSize, long byteAlignment, SegmentScope scope) { - Objects.requireNonNull(scope); - Utils.checkAllocationSizeAndAlign(byteSize, byteAlignment); - return NativeMemorySegmentImpl.makeNativeSegment(byteSize, byteAlignment, scope); - } - /** * Performs a bulk copy from source segment to destination segment. More specifically, the bytes at offset * {@code srcOffset} through {@code srcOffset + bytes - 1} in the source segment are copied into the destination @@ -1275,20 +1255,21 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param dstOffset the starting offset, in bytes, of the destination segment. * @param bytes the number of bytes to be copied. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code srcSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code srcSegment.scope().isAccessibleBy(T) == false}. + * such that {@code srcSegment.isAccessibleBy(T) == false}. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code dstSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code dstSegment.scope().isAccessibleBy(T) == false}. + * such that {@code dstSegment.isAccessibleBy(T) == false}. * @throws IndexOutOfBoundsException if {@code srcOffset + bytes > srcSegment.byteSize()} or if * {@code dstOffset + bytes > dstSegment.byteSize()}, or if either {@code srcOffset}, {@code dstOffset} * or {@code bytes} are {@code < 0}. * @throws UnsupportedOperationException if the destination segment is read-only (see {@link #isReadOnly()}). */ @ForceInline - static void copy(MemorySegment srcSegment, long srcOffset, MemorySegment dstSegment, long dstOffset, long bytes) { + static void copy(MemorySegment srcSegment, long srcOffset, + MemorySegment dstSegment, long dstOffset, long bytes) { copy(srcSegment, ValueLayout.JAVA_BYTE, srcOffset, dstSegment, ValueLayout.JAVA_BYTE, dstOffset, bytes); } @@ -1322,50 +1303,27 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * incompatible with the alignment constraint in the source * (resp. destination) element layout, or if the source (resp. destination) element layout alignment is greater than its size. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code srcSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code srcSegment().scope().isAccessibleBy(T) == false}. + * such that {@code srcSegment().isAccessibleBy(T) == false}. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code dstSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code dstSegment().scope().isAccessibleBy(T) == false}. + * such that {@code dstSegment().isAccessibleBy(T) == false}. * @throws IndexOutOfBoundsException if {@code srcOffset + (elementCount * S) > srcSegment.byteSize()} or if * {@code dstOffset + (elementCount * S) > dstSegment.byteSize()}, where {@code S} is the byte size * of the element layouts, or if either {@code srcOffset}, {@code dstOffset} or {@code elementCount} are {@code < 0}. * @throws UnsupportedOperationException if the destination segment is read-only (see {@link #isReadOnly()}). */ @ForceInline - static void copy(MemorySegment srcSegment, ValueLayout srcElementLayout, long srcOffset, MemorySegment dstSegment, - ValueLayout dstElementLayout, long dstOffset, long elementCount) { + static void copy(MemorySegment srcSegment, ValueLayout srcElementLayout, long srcOffset, + MemorySegment dstSegment, ValueLayout dstElementLayout, long dstOffset, + long elementCount) { Objects.requireNonNull(srcSegment); Objects.requireNonNull(srcElementLayout); Objects.requireNonNull(dstSegment); Objects.requireNonNull(dstElementLayout); - AbstractMemorySegmentImpl srcImpl = (AbstractMemorySegmentImpl)srcSegment; - AbstractMemorySegmentImpl dstImpl = (AbstractMemorySegmentImpl)dstSegment; - if (srcElementLayout.byteSize() != dstElementLayout.byteSize()) { - throw new IllegalArgumentException("Source and destination layouts must have same size"); - } - Utils.checkElementAlignment(srcElementLayout, "Source layout alignment greater than its size"); - Utils.checkElementAlignment(dstElementLayout, "Destination layout alignment greater than its size"); - if (!srcImpl.isAlignedForElement(srcOffset, srcElementLayout)) { - throw new IllegalArgumentException("Source segment incompatible with alignment constraints"); - } - if (!dstImpl.isAlignedForElement(dstOffset, dstElementLayout)) { - throw new IllegalArgumentException("Destination segment incompatible with alignment constraints"); - } - long size = elementCount * srcElementLayout.byteSize(); - srcImpl.checkAccess(srcOffset, size, true); - dstImpl.checkAccess(dstOffset, size, false); - if (srcElementLayout.byteSize() == 1 || srcElementLayout.order() == dstElementLayout.order()) { - ScopedMemoryAccess.getScopedMemoryAccess().copyMemory(srcImpl.sessionImpl(), dstImpl.sessionImpl(), - srcImpl.unsafeGetBase(), srcImpl.unsafeGetOffset() + srcOffset, - dstImpl.unsafeGetBase(), dstImpl.unsafeGetOffset() + dstOffset, size); - } else { - ScopedMemoryAccess.getScopedMemoryAccess().copySwapMemory(srcImpl.sessionImpl(), dstImpl.sessionImpl(), - srcImpl.unsafeGetBase(), srcImpl.unsafeGetOffset() + srcOffset, - dstImpl.unsafeGetBase(), dstImpl.unsafeGetOffset() + dstOffset, size, srcElementLayout.byteSize()); - } + AbstractMemorySegmentImpl.copy(srcSegment, srcElementLayout, srcOffset, dstSegment, dstElementLayout, dstOffset, elementCount); } /** @@ -1375,9 +1333,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a byte value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1395,9 +1353,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the byte value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1416,9 +1374,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a boolean value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1436,9 +1394,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the boolean value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1457,9 +1415,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a char value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1477,9 +1435,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the char value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1498,9 +1456,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a short value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1518,9 +1476,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the short value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1539,9 +1497,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return an int value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1559,9 +1517,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the int value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1580,9 +1538,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a float value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1600,9 +1558,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the float value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1621,9 +1579,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a long value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1641,9 +1599,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the long value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1662,9 +1620,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a double value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1682,9 +1640,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the double value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1698,24 +1656,27 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Reads an address from this segment at the given offset, with the given layout. The read address is wrapped in - * a native segment, associated with the {@linkplain SegmentScope#global() global scope}. Under normal conditions, - * the size of the returned segment is {@code 0}. However, if the provided layout is an - * {@linkplain ValueLayout.OfAddress#asUnbounded() unbounded} address layout, then the size of the returned - * segment is {@code Long.MAX_VALUE}. + * a native segment, associated with a fresh scope that is always alive. Under normal conditions, + * the size of the returned segment is {@code 0}. However, if the provided address layout has a + * {@linkplain AddressLayout#targetLayout()} {@code T}, then the size of the returned segment + * is set to {@code T.byteSize()}. * @param layout the layout of the region of memory to be read. * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @return a native segment wrapping an address read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. + * @throws IllegalArgumentException if provided address layout has a {@linkplain AddressLayout#targetLayout() target layout} + * {@code T}, and the address of the returned segment + * incompatible with the alignment constraint in {@code T}. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the * memory segment. */ @ForceInline - default MemorySegment get(ValueLayout.OfAddress layout, long offset) { + default MemorySegment get(AddressLayout layout, long offset) { return (MemorySegment) ((ValueLayouts.OfAddressImpl) layout).accessHandle().get(this, offset); } @@ -1726,9 +1687,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param offset offset in bytes (relative to this segment address) at which this access operation will occur. * @param value the address value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the @@ -1737,10 +1698,58 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @throws UnsupportedOperationException if {@code value} is not a {@linkplain #isNative() native} segment. */ @ForceInline - default void set(ValueLayout.OfAddress layout, long offset, MemorySegment value) { + default void set(AddressLayout layout, long offset, MemorySegment value) { ((ValueLayouts.OfAddressImpl) layout).accessHandle().set(this, offset, value); } + /** + * Reads a byte from this segment at the given index, scaled by the given layout size. + * + * @param layout the layout of the region of memory to be read. + * @param index a logical index. The offset in bytes (relative to this segment address) at which the access operation + * will occur can be expressed as {@code (index * layout.byteSize())}. + * @return a byte value read from this segment. + * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not + * {@linkplain Scope#isAlive() alive}. + * @throws WrongThreadException if this method is called from a thread {@code T}, + * such that {@code isAccessibleBy(T) == false}. + * @throws IllegalArgumentException if the access operation is + * incompatible with the alignment constraint in the provided layout, + * or if the layout alignment is greater than its size. + * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the + * memory segment. + */ + @ForceInline + default byte getAtIndex(ValueLayout.OfByte layout, long index) { + Utils.checkElementAlignment(layout, "Layout alignment greater than its size"); + // note: we know size is a small value (as it comes from ValueLayout::byteSize()) + return (byte) ((ValueLayouts.OfByteImpl) layout).accessHandle().get(this, index * layout.byteSize()); + } + + /** + * Reads a boolean from this segment at the given index, scaled by the given layout size. + * + * @param layout the layout of the region of memory to be read. + * @param index a logical index. The offset in bytes (relative to this segment address) at which the access operation + * will occur can be expressed as {@code (index * layout.byteSize())}. + * @return a boolean value read from this segment. + * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not + * {@linkplain Scope#isAlive() alive}. + * @throws WrongThreadException if this method is called from a thread {@code T}, + * such that {@code isAccessibleBy(T) == false}. + * @throws IllegalArgumentException if the access operation is + * incompatible with the alignment constraint in the provided layout, + * or if the layout alignment is greater than its size. + * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the + * memory segment. + */ + @ForceInline + default boolean getAtIndex(ValueLayout.OfBoolean layout, long index) { + Utils.checkElementAlignment(layout, "Layout alignment greater than its size"); + // note: we know size is a small value (as it comes from ValueLayout::byteSize()) + return (boolean) ((ValueLayouts.OfBooleanImpl) layout).accessHandle().get(this, index * layout.byteSize()); + } + /** * Reads a char from this segment at the given index, scaled by the given layout size. * @@ -1749,9 +1758,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @return a char value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1773,9 +1782,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @param value the char value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1798,9 +1807,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @return a short value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1814,6 +1823,57 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { return (short) ((ValueLayouts.OfShortImpl) layout).accessHandle().get(this, index * layout.byteSize()); } + /** + * Writes a byte into this segment at the given index, scaled by the given layout size. + * + * @param layout the layout of the region of memory to be written. + * @param index a logical index. The offset in bytes (relative to this segment address) at which the access operation + * will occur can be expressed as {@code (index * layout.byteSize())}. + * @param value the short value to be written. + * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not + * {@linkplain Scope#isAlive() alive}. + * @throws WrongThreadException if this method is called from a thread {@code T}, + * such that {@code isAccessibleBy(T) == false}. + * @throws IllegalArgumentException if the access operation is + * incompatible with the alignment constraint in the provided layout, + * or if the layout alignment is greater than its size. + * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the + * memory segment. + * @throws UnsupportedOperationException if this segment is {@linkplain #isReadOnly() read-only}. + */ + @ForceInline + default void setAtIndex(ValueLayout.OfByte layout, long index, byte value) { + Utils.checkElementAlignment(layout, "Layout alignment greater than its size"); + // note: we know size is a small value (as it comes from ValueLayout::byteSize()) + ((ValueLayouts.OfByteImpl) layout).accessHandle().set(this, index * layout.byteSize(), value); + + } + + /** + * Writes a boolean into this segment at the given index, scaled by the given layout size. + * + * @param layout the layout of the region of memory to be written. + * @param index a logical index. The offset in bytes (relative to this segment address) at which the access operation + * will occur can be expressed as {@code (index * layout.byteSize())}. + * @param value the short value to be written. + * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not + * {@linkplain Scope#isAlive() alive}. + * @throws WrongThreadException if this method is called from a thread {@code T}, + * such that {@code isAccessibleBy(T) == false}. + * @throws IllegalArgumentException if the access operation is + * incompatible with the alignment constraint in the provided layout, + * or if the layout alignment is greater than its size. + * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the + * memory segment. + * @throws UnsupportedOperationException if this segment is {@linkplain #isReadOnly() read-only}. + */ + @ForceInline + default void setAtIndex(ValueLayout.OfBoolean layout, long index, boolean value) { + Utils.checkElementAlignment(layout, "Layout alignment greater than its size"); + // note: we know size is a small value (as it comes from ValueLayout::byteSize()) + ((ValueLayouts.OfBooleanImpl) layout).accessHandle().set(this, index * layout.byteSize(), value); + } + /** * Writes a short into this segment at the given index, scaled by the given layout size. * @@ -1822,9 +1882,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @param value the short value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1847,9 +1907,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @return an int value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1871,9 +1931,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @param value the int value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1896,9 +1956,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @return a float value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1920,9 +1980,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @param value the float value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1945,9 +2005,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @return a long value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1969,9 +2029,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @param value the long value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -1994,9 +2054,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @return a double value read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -2018,9 +2078,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @param value the double value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -2037,27 +2097,29 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { /** * Reads an address from this segment at the given at the given index, scaled by the given layout size. The read address is wrapped in - * a native segment, associated with the {@linkplain SegmentScope#global() global scope}. Under normal conditions, - * the size of the returned segment is {@code 0}. However, if the provided layout is an - * {@linkplain ValueLayout.OfAddress#asUnbounded() unbounded} address layout, then the size of the returned - * segment is {@code Long.MAX_VALUE}. - * + * a native segment, associated with a fresh scope that is always alive. Under normal conditions, + * the size of the returned segment is {@code 0}. However, if the provided address layout has a + * {@linkplain AddressLayout#targetLayout()} {@code T}, then the size of the returned segment + * is set to {@code T.byteSize()}. * @param layout the layout of the region of memory to be read. * @param index a logical index. The offset in bytes (relative to this segment address) at which the access operation * will occur can be expressed as {@code (index * layout.byteSize())}. * @return a native segment wrapping an address read from this segment. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. + * @throws IllegalArgumentException if provided address layout has a {@linkplain AddressLayout#targetLayout() target layout} + * {@code T}, and the address of the returned segment + * incompatible with the alignment constraint in {@code T}. * @throws IndexOutOfBoundsException when the access operation falls outside the spatial bounds of the * memory segment. */ @ForceInline - default MemorySegment getAtIndex(ValueLayout.OfAddress layout, long index) { + default MemorySegment getAtIndex(AddressLayout layout, long index) { Utils.checkElementAlignment(layout, "Layout alignment greater than its size"); // note: we know size is a small value (as it comes from ValueLayout::byteSize()) return (MemorySegment) ((ValueLayouts.OfAddressImpl) layout).accessHandle().get(this, index * layout.byteSize()); @@ -2071,9 +2133,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * will occur can be expressed as {@code (index * layout.byteSize())}. * @param value the address value to be written. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with this segment is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code scope().isAccessibleBy(T) == false}. + * such that {@code isAccessibleBy(T) == false}. * @throws IllegalArgumentException if the access operation is * incompatible with the alignment constraint in the provided layout, * or if the layout alignment is greater than its size. @@ -2083,7 +2145,7 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @throws UnsupportedOperationException if {@code value} is not a {@linkplain #isNative() native} segment. */ @ForceInline - default void setAtIndex(ValueLayout.OfAddress layout, long index, MemorySegment value) { + default void setAtIndex(AddressLayout layout, long index, MemorySegment value) { Utils.checkElementAlignment(layout, "Layout alignment greater than its size"); // note: we know size is a small value (as it comes from ValueLayout::byteSize()) ((ValueLayouts.OfAddressImpl) layout).accessHandle().set(this, index * layout.byteSize(), value); @@ -2133,7 +2195,7 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param dstIndex the starting index of the destination array. * @param elementCount the number of array elements to be copied. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code srcSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, * such that {@code srcSegment().isAccessibleBy(T) == false}. * @throws IllegalArgumentException if {@code dstArray} is not an array, or if it is an array but whose type is not supported, @@ -2167,7 +2229,7 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @param dstOffset the starting offset, in bytes, of the destination segment. * @param elementCount the number of array elements to be copied. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code dstSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, * such that {@code dstSegment().isAccessibleBy(T) == false}. * @throws IllegalArgumentException if {@code srcArray} is not an array, or if it is an array but whose type is not supported, @@ -2209,13 +2271,13 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { * @return the relative offset, in bytes, of the first mismatch between the source and destination segments, * otherwise -1 if no mismatch. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code srcSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code srcSegment.scope().isAccessibleBy(T) == false}. + * such that {@code srcSegment.isAccessibleBy(T) == false}. * @throws IllegalStateException if the {@linkplain #scope() scope} associated with {@code dstSegment} is not - * {@linkplain SegmentScope#isAlive() alive}. + * {@linkplain Scope#isAlive() alive}. * @throws WrongThreadException if this method is called from a thread {@code T}, - * such that {@code dstSegment.scope().isAccessibleBy(T) == false}. + * such that {@code dstSegment.isAccessibleBy(T) == false}. * @throws IndexOutOfBoundsException if {@code srcFromOffset < 0}, {@code srcToOffset < srcFromOffset} or * {@code srcToOffset > srcSegment.byteSize()} * @throws IndexOutOfBoundsException if {@code dstFromOffset < 0}, {@code dstToOffset < dstFromOffset} or @@ -2230,4 +2292,38 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl { dstSegment, dstFromOffset, dstToOffset); } + /** + * A scope models the lifetime of all the memory segments associated with it. That is, a memory segment + * cannot be accessed if its associated scope is not {@linkplain #isAlive() alive}. A new scope is typically + * obtained indirectly, by creating a new {@linkplain Arena arena}. + *

    + * Scope instances can be compared for equality. That is, two scopes + * are considered {@linkplain #equals(Object)} if they denote the same lifetime. + */ + @PreviewFeature(feature=PreviewFeature.Feature.FOREIGN) + sealed interface Scope permits MemorySessionImpl { + /** + * {@return {@code true}, if the regions of memory backing the memory segments associated with this scope are + * still valid} + */ + boolean isAlive(); + + /** + * {@return {@code true}, if the provided object is also a scope, which models the same lifetime as that + * modelled by this scope}. In that case, it is always the case that + * {@code this.isAlive() == ((Scope)that).isAlive()}. + * @param that the object to be tested. + */ + @Override + boolean equals(Object that); + + /** + * Returns the hash code of this scope object. + * @implSpec Implementations of this method obey the general contract of {@link Object#hashCode}. + * @return the hash code of this scope object. + * @see #equals(Object) + */ + @Override + int hashCode(); + } } diff --git a/src/java.base/share/classes/java/lang/foreign/PaddingLayout.java b/src/java.base/share/classes/java/lang/foreign/PaddingLayout.java index 47848641edc..a09689c874c 100644 --- a/src/java.base/share/classes/java/lang/foreign/PaddingLayout.java +++ b/src/java.base/share/classes/java/lang/foreign/PaddingLayout.java @@ -50,5 +50,12 @@ public sealed interface PaddingLayout extends MemoryLayout permits PaddingLayout * {@inheritDoc} */ @Override + PaddingLayout withoutName(); + + /** + * {@inheritDoc} + * @throws IllegalArgumentException {@inheritDoc} + */ + @Override PaddingLayout withBitAlignment(long bitAlignment); } diff --git a/src/java.base/share/classes/java/lang/foreign/SegmentAllocator.java b/src/java.base/share/classes/java/lang/foreign/SegmentAllocator.java index e7a3c7efb5e..54c070001cc 100644 --- a/src/java.base/share/classes/java/lang/foreign/SegmentAllocator.java +++ b/src/java.base/share/classes/java/lang/foreign/SegmentAllocator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,6 @@ import java.nio.charset.StandardCharsets; import java.util.Objects; import java.util.function.Function; import jdk.internal.foreign.AbstractMemorySegmentImpl; -import jdk.internal.foreign.MemorySessionImpl; import jdk.internal.foreign.SlicingAllocator; import jdk.internal.foreign.Utils; import jdk.internal.javac.PreviewFeature; @@ -46,8 +45,6 @@ import jdk.internal.javac.PreviewFeature; *

    * This interface also defines factories for commonly used allocators: *