diff --git a/jdk/make/java/management/Exportedfiles.gmk b/jdk/make/java/management/Exportedfiles.gmk index 0654fb57526..21f46bd923a 100644 --- a/jdk/make/java/management/Exportedfiles.gmk +++ b/jdk/make/java/management/Exportedfiles.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ FILES_export = \ sun/management/ClassLoadingImpl.java \ + sun/management/DiagnosticCommandImpl.java \ sun/management/FileSystemImpl.java \ sun/management/Flag.java \ sun/management/GarbageCollectorImpl.java \ diff --git a/jdk/make/java/management/FILES_c.gmk b/jdk/make/java/management/FILES_c.gmk index 8c5ddce29b5..f0d0c9c6e43 100644 --- a/jdk/make/java/management/FILES_c.gmk +++ b/jdk/make/java/management/FILES_c.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ FILES_c = \ ClassLoadingImpl.c \ + DiagnosticCommandImpl.c \ FileSystemImpl.c \ Flag.c \ GarbageCollectorImpl.c \ diff --git a/jdk/make/java/management/mapfile-vers b/jdk/make/java/management/mapfile-vers index 2e87b707463..63c4fbf03cc 100644 --- a/jdk/make/java/management/mapfile-vers +++ b/jdk/make/java/management/mapfile-vers @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,10 @@ SUNWprivate_1.1 { Java_com_sun_management_UnixOperatingSystem_getTotalSwapSpaceSize; Java_com_sun_management_UnixOperatingSystem_initialize; Java_sun_management_ClassLoadingImpl_setVerboseClass; + Java_sun_management_DiagnosticCommandImpl_executeDiagnosticCommand; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommands; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommandInfo; + Java_sun_management_DiagnosticCommandImpl_setNotificationEnabled; Java_sun_management_FileSystemImpl_isAccessUserOnly0; Java_sun_management_Flag_getAllFlagNames; Java_sun_management_Flag_getFlags; diff --git a/jdk/make/tools/CharsetMapping/EUC_KR.map b/jdk/make/tools/CharsetMapping/EUC_KR.map index 0b44e6c91c9..d1fae60e1aa 100644 --- a/jdk/make/tools/CharsetMapping/EUC_KR.map +++ b/jdk/make/tools/CharsetMapping/EUC_KR.map @@ -5,6 +5,8 @@ # (2)Added 2 new codepoints (KS X 1001:1998) # 0xA2E6 0x20AC # EURO Sign # 0xA2E7 0x00AE # Registered Sign +# (3) KS X 1001:2002 +# 0xA2E8 0x327E # CIRCLED KOREAN CHARACTER JUEUI (Postal Code Mark) # 0x00 0x0000 0x01 0x0001 @@ -295,6 +297,7 @@ # 0xA2E6 0x20AC # EURO Sign 0xA2E7 0x00AE # Registered Sign +0xA2E8 0x327E # CIRCLED KOREAN CHARACTER JUEUI # 0xA2E0 0x2116 # NUMERO SIGN 0xA2E1 0x33C7 # SQUARE CO diff --git a/jdk/make/tools/src/build/tools/generatebreakiteratordata/CharSet.java b/jdk/make/tools/src/build/tools/generatebreakiteratordata/CharSet.java index 2dc948a6641..b8539159a8c 100644 --- a/jdk/make/tools/src/build/tools/generatebreakiteratordata/CharSet.java +++ b/jdk/make/tools/src/build/tools/generatebreakiteratordata/CharSet.java @@ -39,6 +39,7 @@ package build.tools.generatebreakiteratordata; +import java.util.Arrays; import java.util.Hashtable; /** @@ -701,7 +702,14 @@ class CharSet { * the exact same characters as this one */ public boolean equals(Object that) { - return (that instanceof CharSet) && chars.equals(((CharSet)that).chars); + return (that instanceof CharSet) && Arrays.equals(chars, ((CharSet)that).chars); + } + + /** + * Returns the hash code for this set of characters + */ + public int hashCode() { + return Arrays.hashCode(chars); } /** diff --git a/jdk/makefiles/CompileJavaClasses.gmk b/jdk/makefiles/CompileJavaClasses.gmk index 24ce0d922f0..0b3e70dd914 100644 --- a/jdk/makefiles/CompileJavaClasses.gmk +++ b/jdk/makefiles/CompileJavaClasses.gmk @@ -342,7 +342,7 @@ $(eval $(call SetupJavaCompilation,BUILD_JOBJC,\ DISABLE_SJAVAC:=true,\ SRC:=$(JDK_TOPDIR)/src/macosx/native/jobjc/src/core/java \ $(JDK_TOPDIR)/src/macosx/native/jobjc/src/runtime-additions/java \ - $(JDK_OUTPUTDIR)/gensrc, \ + $(JDK_OUTPUTDIR)/gensrc_jobjc/src, \ INCLUDES := com/apple/jobjc,\ EXCLUDES := tests/java/com/apple/jobjc,\ BIN:=$(JDK_OUTPUTDIR)/jobjc_classes,\ @@ -355,7 +355,7 @@ $(eval $(call SetupJavaCompilation,BUILD_JOBJC_HEADERS,\ SETUP:=GENERATE_JDKBYTECODE,\ SRC:=$(JDK_TOPDIR)/src/macosx/native/jobjc/src/core/java \ $(JDK_TOPDIR)/src/macosx/native/jobjc/src/runtime-additions/java \ - $(JDK_OUTPUTDIR)/gensrc, \ + $(JDK_OUTPUTDIR)/gensrc_jobjc/src, \ INCLUDES := com/apple/jobjc,\ EXCLUDES := tests/java/com/apple/jobjc,\ BIN:=$(JDK_OUTPUTDIR)/jobjc_classes_headers,\ diff --git a/jdk/makefiles/GensrcBuffer.gmk b/jdk/makefiles/GensrcBuffer.gmk index 3e55b1c14a4..55b51d050d8 100644 --- a/jdk/makefiles/GensrcBuffer.gmk +++ b/jdk/makefiles/GensrcBuffer.gmk @@ -69,6 +69,9 @@ define typesAndBits $1_fulltype := character $1_Fulltype := Character $1_category := integralType + $1_streams := streamableType + $1_streamtype := int + $1_Streamtype := Int $1_LBPV := 1 endif @@ -97,7 +100,7 @@ define typesAndBits $1_Type := Long $1_fulltype := long $1_Fulltype := Long - $1_category := integralType + $1_category := integralType $1_LBPV := 3 endif @@ -231,10 +234,13 @@ $$($1_DST) : $$($1_DEP) $(GENSRC_BUFFER_DST)/_the.buffer.dir $(TOOL_SPP) < $$($1_SRC) > $$($1_OUT).tmp \ -K$$($1_type) \ -K$$($1_category) \ + -K$$($1_streams) \ -Dtype=$$($1_type) \ -DType=$$($1_Type) \ -Dfulltype=$$($1_fulltype) \ -DFulltype=$$($1_Fulltype) \ + -Dstreamtype=$$($1_streamtype) \ + -DStreamtype=$$($1_Streamtype) \ -Dx=$$($1_x) \ -Dmemtype=$$($1_memtype) \ -DMemtype=$$($1_Memtype) \ diff --git a/jdk/makefiles/mapfiles/libmanagement/mapfile-vers b/jdk/makefiles/mapfiles/libmanagement/mapfile-vers index 2e87b707463..b934fe8b748 100644 --- a/jdk/makefiles/mapfiles/libmanagement/mapfile-vers +++ b/jdk/makefiles/mapfiles/libmanagement/mapfile-vers @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,10 @@ SUNWprivate_1.1 { Java_com_sun_management_UnixOperatingSystem_getTotalSwapSpaceSize; Java_com_sun_management_UnixOperatingSystem_initialize; Java_sun_management_ClassLoadingImpl_setVerboseClass; + Java_sun_management_DiagnosticCommandImpl_executeDiagnosticCommand; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommands; + Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommandInfo; + Java_sun_management_DiagnosticCommandImpl_setNotificationEnabled; Java_sun_management_FileSystemImpl_isAccessUserOnly0; Java_sun_management_Flag_getAllFlagNames; Java_sun_management_Flag_getFlags; diff --git a/jdk/src/share/classes/com/sun/crypto/provider/DHKeyAgreement.java b/jdk/src/share/classes/com/sun/crypto/provider/DHKeyAgreement.java index 33fa49297dd..6a2f298fe6e 100644 --- a/jdk/src/share/classes/com/sun/crypto/provider/DHKeyAgreement.java +++ b/jdk/src/share/classes/com/sun/crypto/provider/DHKeyAgreement.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -403,8 +403,9 @@ extends KeyAgreementSpi { } return skey; } else if (algorithm.equals("TlsPremasterSecret")) { - // return entire secret - return new SecretKeySpec(secret, "TlsPremasterSecret"); + // remove leading zero bytes per RFC 5246 Section 8.1.2 + return new SecretKeySpec( + KeyUtil.trimZeroes(secret), "TlsPremasterSecret"); } else { throw new NoSuchAlgorithmException("Unsupported secret key " + "algorithm: "+ algorithm); diff --git a/jdk/src/share/classes/com/sun/crypto/provider/HmacPKCS12PBESHA1.java b/jdk/src/share/classes/com/sun/crypto/provider/HmacPKCS12PBESHA1.java index d41b88c7e38..e85b8816a28 100644 --- a/jdk/src/share/classes/com/sun/crypto/provider/HmacPKCS12PBESHA1.java +++ b/jdk/src/share/classes/com/sun/crypto/provider/HmacPKCS12PBESHA1.java @@ -86,12 +86,13 @@ public final class HmacPKCS12PBESHA1 extends HmacCore { throw new InvalidKeyException("SecretKey of PBE type required"); } if (params == null) { - // generate default for salt and iteration count if necessary - if (salt == null) { - salt = new byte[20]; - SunJCE.getRandom().nextBytes(salt); + // should not auto-generate default values since current + // javax.crypto.Mac api does not have any method for caller to + // retrieve the generated defaults. + if ((salt == null) || (iCount == 0)) { + throw new InvalidAlgorithmParameterException + ("PBEParameterSpec required for salt and iteration count"); } - if (iCount == 0) iCount = 100; } else if (!(params instanceof PBEParameterSpec)) { throw new InvalidAlgorithmParameterException ("PBEParameterSpec type required"); diff --git a/jdk/src/share/classes/com/sun/crypto/provider/PBMAC1Core.java b/jdk/src/share/classes/com/sun/crypto/provider/PBMAC1Core.java index 47663943184..2f0ba131a55 100644 --- a/jdk/src/share/classes/com/sun/crypto/provider/PBMAC1Core.java +++ b/jdk/src/share/classes/com/sun/crypto/provider/PBMAC1Core.java @@ -42,12 +42,10 @@ import java.security.spec.*; */ abstract class PBMAC1Core extends HmacCore { - private static final int DEFAULT_SALT_LENGTH = 20; - private static final int DEFAULT_COUNT = 4096; - + // NOTE: this class inherits the Cloneable interface from HmacCore + // Need to override clone() if mutable fields are added. private final String kdfAlgo; private final String hashAlgo; - private final PBKDF2Core kdf; private final int blockLength; // in octets /** @@ -56,13 +54,15 @@ abstract class PBMAC1Core extends HmacCore { */ PBMAC1Core(String kdfAlgo, String hashAlgo, int blockLength) throws NoSuchAlgorithmException { - super(hashAlgo, blockLength); this.kdfAlgo = kdfAlgo; this.hashAlgo = hashAlgo; this.blockLength = blockLength; + } - switch(kdfAlgo) { + private static PBKDF2Core getKDFImpl(String algo) { + PBKDF2Core kdf = null; + switch(algo) { case "HmacSHA1": kdf = new PBKDF2Core.HmacSHA1(); break; @@ -79,9 +79,10 @@ abstract class PBMAC1Core extends HmacCore { kdf = new PBKDF2Core.HmacSHA512(); break; default: - throw new NoSuchAlgorithmException( - "No MAC implementation for " + kdfAlgo); + throw new ProviderException( + "No MAC implementation for " + algo); } + return kdf; } /** @@ -120,12 +121,13 @@ abstract class PBMAC1Core extends HmacCore { throw new InvalidKeyException("SecretKey of PBE type required"); } if (params == null) { - // generate default for salt and iteration count if necessary - if (salt == null) { - salt = new byte[DEFAULT_SALT_LENGTH]; - SunJCE.getRandom().nextBytes(salt); + // should not auto-generate default values since current + // javax.crypto.Mac api does not have any method for caller to + // retrieve the generated defaults. + if ((salt == null) || (iCount == 0)) { + throw new InvalidAlgorithmParameterException + ("PBEParameterSpec required for salt and iteration count"); } - if (iCount == 0) iCount = DEFAULT_COUNT; } else if (!(params instanceof PBEParameterSpec)) { throw new InvalidAlgorithmParameterException ("PBEParameterSpec type required"); @@ -168,7 +170,7 @@ abstract class PBMAC1Core extends HmacCore { java.util.Arrays.fill(passwdChars, ' '); SecretKey s = null; - + PBKDF2Core kdf = getKDFImpl(kdfAlgo); try { s = kdf.engineGenerateSecret(pbeSpec); diff --git a/jdk/src/share/classes/com/sun/crypto/provider/SunJCE.java b/jdk/src/share/classes/com/sun/crypto/provider/SunJCE.java index 7be5416d390..a6843ff4f59 100644 --- a/jdk/src/share/classes/com/sun/crypto/provider/SunJCE.java +++ b/jdk/src/share/classes/com/sun/crypto/provider/SunJCE.java @@ -731,10 +731,11 @@ public final class SunJCE extends Provider { put("Mac.HmacSHA384 SupportedKeyFormats", "RAW"); put("Mac.HmacSHA512 SupportedKeyFormats", "RAW"); put("Mac.HmacPBESHA1 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA224 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA256 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA384 SupportedKeyFormats", "RAW"); - put("Mac.HmacPBESHA512 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA1 SupportedKeyFormatS", "RAW"); + put("Mac.PBEWithHmacSHA224 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA256 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA384 SupportedKeyFormats", "RAW"); + put("Mac.PBEWithHmacSHA512 SupportedKeyFormats", "RAW"); put("Mac.SslMacMD5 SupportedKeyFormats", "RAW"); put("Mac.SslMacSHA1 SupportedKeyFormats", "RAW"); diff --git a/jdk/src/share/classes/com/sun/management/DiagnosticCommandMBean.java b/jdk/src/share/classes/com/sun/management/DiagnosticCommandMBean.java new file mode 100644 index 00000000000..da2d510024e --- /dev/null +++ b/jdk/src/share/classes/com/sun/management/DiagnosticCommandMBean.java @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.sun.management; + +import java.lang.management.PlatformManagedObject; +import javax.management.DynamicMBean; + +/** + * Management interface for the diagnostic commands for the HotSpot Virtual Machine. + * + *

The {code DiagnosticCommandMBean} is registered to the + * {@linkplain java.lang.management.ManagementFactory#getPlatformMBeanServer + * platform MBeanServer} as are other platform MBeans. + * + *

The {@link javax.management.ObjectName ObjectName} for uniquely identifying + * the diagnostic MBean within an MBeanServer is: + *

+ * {@code com.sun.management:type=DiagnosticCommand} + *
+ * + *

This MBean is a {@link javax.management.DynamicMBean DynamicMBean} + * and also a {@link javax.management.NotificationEmitter}. + * The {@code DiagnosticCommandMBean} is generated at runtime and is subject to + * modifications during the lifetime of the Java virtual machine. + * + * A diagnostic command is represented as an operation of + * the {@code DiagnosticCommandMBean} interface. Each diagnostic command has: + *

+ * + * The recommended way to transform a diagnostic command name into a MBean + * operation name is as follows: + * + * + *

The diagnostic command name is always provided with the meta-data on the + * operation in a field named {@code dcmd.name} (see below). + * + *

A diagnostic command may or may not support options or arguments. + * All the operations return {@code String} and either take + * no parameter for operations that do not support any option or argument, + * or take a {@code String[]} parameter for operations that support at least + * one option or argument. + * Each option or argument must be stored in a single String. + * Options or arguments split across several String instances are not supported. + * + *

The distinction between options and arguments: options are identified by + * the option name while arguments are identified by their position in the + * command line. Options and arguments are processed in the order of the array + * passed to the invocation method. + * + *

Like any operation of a dynamic MBean, each of these operations is + * described by {@link javax.management.MBeanOperationInfo MBeanOperationInfo} + * instance. Here's the values returned by this object: + *

+ * + *

The {@link javax.management.Descriptor Descriptor} + * is a collection of fields containing additional + * meta-data for a JMX element. A field is a name and an associated value. + * The additional meta-data provided for an operation associated with a + * diagnostic command are described in the table below: + *

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
NameTypeDescription
dcmd.nameStringThe original diagnostic command name (not the operation name)
dcmd.descriptionStringThe diagnostic command description
dcmd.helpStringThe full help message for this diagnostic command (same output as + * the one produced by the 'help' command)
dcmd.vmImpactStringThe impact of the diagnostic command, + * this value is the same as the one printed in the 'impact' + * section of the help message of the diagnostic command, and it + * is different from the getImpact() of the MBeanOperationInfo
dcmd.enabledbooleanTrue if the diagnostic command is enabled, false otherwise
dcmd.permissionClassStringSome diagnostic command might require a specific permission to be + * executed, in addition to the MBeanPermission to invoke their + * associated MBean operation. This field returns the fully qualified + * name of the permission class or null if no permission is required + *
dcmd.permissionNameStringThe fist argument of the permission required to execute this + * diagnostic command or null if no permission is required
dcmd.permissionActionStringThe second argument of the permission required to execute this + * diagnostic command or null if the permission constructor has only + * one argument (like the ManagementPermission) or if no permission + * is required
dcmd.argumentsDescriptorA Descriptor instance containing the descriptions of options and + * arguments supported by the diagnostic command (see below)
+ *

+ * + *

The description of parameters (options or arguments) of a diagnostic + * command is provided within a Descriptor instance. In this Descriptor, + * each field name is a parameter name, and each field value is itself + * a Descriptor instance. The fields provided in this second Descriptor + * instance are described in the table below: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
NameTypeDescription
dcmd.arg.nameStringThe name of the parameter
dcmd.arg.typeStringThe type of the parameter. The returned String is the name of a type + * recognized by the diagnostic command parser. These types are not + * Java types and are implementation dependent. + *
dcmd.arg.descriptionStringThe parameter description
dcmd.arg.isMandatorybooleanTrue if the parameter is mandatory, false otherwise
dcmd.arg.isOptionbooleanTrue if the parameter is an option, false if it is an argument
dcmd.arg.isMultiplebooleanTrue if the parameter can be specified several times, false + * otherwise
+ * + *

When the set of diagnostic commands currently supported by the Java + * Virtual Machine is modified, the {@code DiagnosticCommandMBean} emits + * a {@link javax.management.Notification} with a + * {@linkplain javax.management.Notification#getType() type} of + * + * {@code "jmx.mbean.info.changed"} and a + * {@linkplain javax.management.Notification#getUserData() userData} that + * is the new {@code MBeanInfo}. + * + * @since 8 + */ +public interface DiagnosticCommandMBean extends DynamicMBean +{ + +} diff --git a/jdk/src/share/classes/java/lang/Integer.java b/jdk/src/share/classes/java/lang/Integer.java index a0eeecf2d98..e5de967a069 100644 --- a/jdk/src/share/classes/java/lang/Integer.java +++ b/jdk/src/share/classes/java/lang/Integer.java @@ -26,7 +26,6 @@ package java.lang; import java.lang.annotation.Native; -import java.util.Properties; /** * The {@code Integer} class wraps a value of the primitive type @@ -185,7 +184,7 @@ public final class Integer extends Number implements Comparable { * @since 1.8 */ public static String toUnsignedString(int i, int radix) { - return Long.toString(toUnsignedLong(i), radix); + return Long.toUnsignedString(toUnsignedLong(i), radix); } /** @@ -307,20 +306,39 @@ public final class Integer extends Number implements Comparable { /** * Convert the integer to an unsigned number. */ - private static String toUnsignedString0(int i, int shift) { - char[] buf = new char[32]; - int charPos = 32; + private static String toUnsignedString0(int val, int shift) { + // assert shift > 0 && shift <=5 : "Illegal shift value"; + int mag = Integer.SIZE - Integer.numberOfLeadingZeros(val); + int chars = Math.max(((mag + (shift - 1)) / shift), 1); + char[] buf = new char[chars]; + + formatUnsignedInt(val, shift, buf, 0, chars); + + // Use special constructor which takes over "buf". + return new String(buf, true); + } + + /** + * Format a long (treated as unsigned) into a character buffer. + * @param val the unsigned int to format + * @param shift the log2 of the base to format in (4 for hex, 3 for octal, 1 for binary) + * @param buf the character buffer to write to + * @param offset the offset in the destination buffer to start at + * @param len the number of characters to write + * @return the lowest character location used + */ + static int formatUnsignedInt(int val, int shift, char[] buf, int offset, int len) { + int charPos = len; int radix = 1 << shift; int mask = radix - 1; do { - buf[--charPos] = digits[i & mask]; - i >>>= shift; - } while (i != 0); + buf[offset + --charPos] = Integer.digits[val & mask]; + val >>>= shift; + } while (val != 0 && charPos > 0); - return new String(buf, charPos, (32 - charPos)); + return charPos; } - final static char [] DigitTens = { '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', @@ -875,6 +893,7 @@ public final class Integer extends Number implements Comparable { * Returns the value of this {@code Integer} as a {@code long} * after a widening primitive conversion. * @jls 5.1.2 Widening Primitive Conversions + * @see Integer#toUnsignedLong(int) */ public long longValue() { return (long)value; diff --git a/jdk/src/share/classes/java/lang/Long.java b/jdk/src/share/classes/java/lang/Long.java index 76dbfc38a7b..80967aca5de 100644 --- a/jdk/src/share/classes/java/lang/Long.java +++ b/jdk/src/share/classes/java/lang/Long.java @@ -28,6 +28,7 @@ package java.lang; import java.lang.annotation.Native; import java.math.*; + /** * The {@code Long} class wraps a value of the primitive type {@code * long} in an object. An object of type {@code Long} contains a @@ -344,18 +345,39 @@ public final class Long extends Number implements Comparable { } /** - * Convert the integer to an unsigned number. + * Format a long (treated as unsigned) into a String. + * @param val the value to format + * @param shift the log2 of the base to format in (4 for hex, 3 for octal, 1 for binary) */ - private static String toUnsignedString0(long i, int shift) { - char[] buf = new char[64]; - int charPos = 64; + static String toUnsignedString0(long val, int shift) { + // assert shift > 0 && shift <=5 : "Illegal shift value"; + int mag = Long.SIZE - Long.numberOfLeadingZeros(val); + int chars = Math.max(((mag + (shift - 1)) / shift), 1); + char[] buf = new char[chars]; + + formatUnsignedLong(val, shift, buf, 0, chars); + return new String(buf, true); + } + + /** + * Format a long (treated as unsigned) into a character buffer. + * @param val the unsigned long to format + * @param shift the log2 of the base to format in (4 for hex, 3 for octal, 1 for binary) + * @param buf the character buffer to write to + * @param offset the offset in the destination buffer to start at + * @param len the number of characters to write + * @return the lowest character location used + */ + static int formatUnsignedLong(long val, int shift, char[] buf, int offset, int len) { + int charPos = len; int radix = 1 << shift; - long mask = radix - 1; + int mask = radix - 1; do { - buf[--charPos] = Integer.digits[(int)(i & mask)]; - i >>>= shift; - } while (i != 0); - return new String(buf, charPos, (64 - charPos)); + buf[offset + --charPos] = Integer.digits[((int) val) & mask]; + val >>>= shift; + } while (val != 0 && charPos > 0); + + return charPos; } /** diff --git a/jdk/src/share/classes/java/lang/management/ManagementFactory.java b/jdk/src/share/classes/java/lang/management/ManagementFactory.java index 278ace1d7d1..384cf4cda06 100644 --- a/jdk/src/share/classes/java/lang/management/ManagementFactory.java +++ b/jdk/src/share/classes/java/lang/management/ManagementFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,9 @@ import javax.management.StandardMBean; import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.security.AccessController; import java.security.Permission; import java.security.PrivilegedAction; @@ -482,6 +484,11 @@ public class ManagementFactory { } } } + HashMap dynmbeans = + ManagementFactoryHelper.getPlatformDynamicMBeans(); + for (Map.Entry e : dynmbeans.entrySet()) { + addDynamicMBean(platformMBeanServer, e.getValue(), e.getKey()); + } } return platformMBeanServer; } @@ -825,4 +832,24 @@ public class ManagementFactory { } } + /** + * Registers a DynamicMBean. + */ + private static void addDynamicMBean(final MBeanServer mbs, + final DynamicMBean dmbean, + final ObjectName on) { + try { + AccessController.doPrivileged(new PrivilegedExceptionAction() { + @Override + public Void run() throws InstanceAlreadyExistsException, + MBeanRegistrationException, + NotCompliantMBeanException { + mbs.registerMBean(dmbean, on); + return null; + } + }); + } catch (PrivilegedActionException e) { + throw new RuntimeException(e.getException()); + } + } } diff --git a/jdk/src/share/classes/java/net/HttpCookie.java b/jdk/src/share/classes/java/net/HttpCookie.java index d5a36df507f..d265e284c26 100644 --- a/jdk/src/share/classes/java/net/HttpCookie.java +++ b/jdk/src/share/classes/java/net/HttpCookie.java @@ -128,8 +128,7 @@ public final class HttpCookie implements Cloneable { * a {@code String} specifying the value of the cookie * * @throws IllegalArgumentException - * if the cookie name contains illegal characters or it is one of - * the tokens reserved for use by the cookie protocol + * if the cookie name contains illegal characters * @throws NullPointerException * if {@code name} is {@code null} * @@ -142,7 +141,7 @@ public final class HttpCookie implements Cloneable { private HttpCookie(String name, String value, String header) { name = name.trim(); - if (name.length() == 0 || !isToken(name)) { + if (name.length() == 0 || !isToken(name) || name.charAt(0) == '$') { throw new IllegalArgumentException("Illegal cookie name"); } @@ -170,9 +169,8 @@ public final class HttpCookie implements Cloneable { * @return a List of cookie parsed from header line string * * @throws IllegalArgumentException - * if header string violates the cookie specification's syntax, or - * the cookie name contains illegal characters, or the cookie name - * is one of the tokens reserved for use by the cookie protocol + * if header string violates the cookie specification's syntax or + * the cookie name contains illegal characters. * @throws NullPointerException * if the header string is {@code null} */ diff --git a/jdk/src/share/classes/java/net/HttpURLPermission.java b/jdk/src/share/classes/java/net/HttpURLPermission.java index 52d6e79344a..55d37fda8ca 100644 --- a/jdk/src/share/classes/java/net/HttpURLPermission.java +++ b/jdk/src/share/classes/java/net/HttpURLPermission.java @@ -377,7 +377,7 @@ public final class HttpURLPermission extends Permission { throw new IllegalArgumentException ("unexpected URL scheme"); } if (!u.getSchemeSpecificPart().equals("*")) { - u = URI.create(scheme + "://" + u.getAuthority() + u.getPath()); + u = URI.create(scheme + "://" + u.getRawAuthority() + u.getRawPath()); } return u; } diff --git a/jdk/src/share/classes/java/nio/Buffer.java b/jdk/src/share/classes/java/nio/Buffer.java index 1c0591fb28f..24d3cf8c56a 100644 --- a/jdk/src/share/classes/java/nio/Buffer.java +++ b/jdk/src/share/classes/java/nio/Buffer.java @@ -25,6 +25,7 @@ package java.nio; +import java.util.Spliterator; /** * A container for data of a specific primitive type. @@ -173,6 +174,13 @@ package java.nio; public abstract class Buffer { + /** + * The characteristics of Spliterators that traverse and split elements + * maintained in Buffers. + */ + static final int SPLITERATOR_CHARACTERISTICS = + Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.ORDERED; + // Invariants: mark <= position <= limit <= capacity private int mark = -1; private int position = 0; diff --git a/jdk/src/share/classes/java/nio/ByteBufferAs-X-Buffer.java.template b/jdk/src/share/classes/java/nio/ByteBufferAs-X-Buffer.java.template index 1673da2bf45..d5be9669fb8 100644 --- a/jdk/src/share/classes/java/nio/ByteBufferAs-X-Buffer.java.template +++ b/jdk/src/share/classes/java/nio/ByteBufferAs-X-Buffer.java.template @@ -115,6 +115,12 @@ class ByteBufferAs$Type$Buffer$RW$$BO$ // package-private return Bits.get$Type$$BO$(bb, ix(checkIndex(i))); } +#if[streamableType] + $type$ getUnchecked(int i) { + return Bits.get$Type$$BO$(bb, ix(i)); + } +#end[streamableType] + #end[rw] public $Type$Buffer put($type$ x) { diff --git a/jdk/src/share/classes/java/nio/CharBufferSpliterator.java b/jdk/src/share/classes/java/nio/CharBufferSpliterator.java new file mode 100644 index 00000000000..19fd8a8f0ba --- /dev/null +++ b/jdk/src/share/classes/java/nio/CharBufferSpliterator.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this +* particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.nio; + +import java.util.Comparator; +import java.util.Spliterator; +import java.util.function.IntConsumer; + +/** + * A Spliterator.OfInt for sources that traverse and split elements + * maintained in a CharBuffer. + * + * @implNote + * The implementation is based on the code for the Array-based spliterators. + */ +class CharBufferSpliterator implements Spliterator.OfInt { + private final CharBuffer buffer; + private int index; // current index, modified on advance/split + private final int limit; + + CharBufferSpliterator(CharBuffer buffer) { + this(buffer, buffer.position(), buffer.limit()); + } + + CharBufferSpliterator(CharBuffer buffer, int origin, int limit) { + assert origin <= limit; + this.buffer = buffer; + this.index = (origin <= limit) ? origin : limit; + this.limit = limit; + } + + @Override + public OfInt trySplit() { + int lo = index, mid = (lo + limit) >>> 1; + return (lo >= mid) + ? null + : new CharBufferSpliterator(buffer, lo, index = mid); + } + + @Override + public void forEachRemaining(IntConsumer action) { + if (action == null) + throw new NullPointerException(); + CharBuffer cb = buffer; + int i = index; + int hi = limit; + index = hi; + while (i < hi) { + action.accept(cb.getUnchecked(i++)); + } + } + + @Override + public boolean tryAdvance(IntConsumer action) { + if (action == null) + throw new NullPointerException(); + if (index >= 0 && index < limit) { + action.accept(buffer.getUnchecked(index++)); + return true; + } + return false; + } + + @Override + public long estimateSize() { + return (long)(limit - index); + } + + @Override + public int characteristics() { + return Buffer.SPLITERATOR_CHARACTERISTICS; + } +} diff --git a/jdk/src/share/classes/java/nio/Direct-X-Buffer.java.template b/jdk/src/share/classes/java/nio/Direct-X-Buffer.java.template index d01e873697d..0523e2a2cd6 100644 --- a/jdk/src/share/classes/java/nio/Direct-X-Buffer.java.template +++ b/jdk/src/share/classes/java/nio/Direct-X-Buffer.java.template @@ -253,6 +253,12 @@ class Direct$Type$Buffer$RW$$BO$ return $fromBits$($swap$(unsafe.get$Swaptype$(ix(checkIndex(i))))); } +#if[streamableType] + $type$ getUnchecked(int i) { + return $fromBits$($swap$(unsafe.get$Swaptype$(ix(i)))); + } +#end[streamableType] + public $Type$Buffer get($type$[] dst, int offset, int length) { #if[rw] if ((length << $LG_BYTES_PER_VALUE$) > Bits.JNI_COPY_TO_ARRAY_THRESHOLD) { diff --git a/jdk/src/share/classes/java/nio/Heap-X-Buffer.java.template b/jdk/src/share/classes/java/nio/Heap-X-Buffer.java.template index cecc1bacf9a..54f7164b24c 100644 --- a/jdk/src/share/classes/java/nio/Heap-X-Buffer.java.template +++ b/jdk/src/share/classes/java/nio/Heap-X-Buffer.java.template @@ -139,6 +139,12 @@ class Heap$Type$Buffer$RW$ return hb[ix(checkIndex(i))]; } +#if[streamableType] + $type$ getUnchecked(int i) { + return hb[ix(i)]; + } +#end[streamableType] + public $Type$Buffer get($type$[] dst, int offset, int length) { checkBounds(offset, length, dst.length); if (length > remaining()) diff --git a/jdk/src/share/classes/java/nio/StringCharBuffer.java b/jdk/src/share/classes/java/nio/StringCharBuffer.java index 47b6bae3228..a277ef559bd 100644 --- a/jdk/src/share/classes/java/nio/StringCharBuffer.java +++ b/jdk/src/share/classes/java/nio/StringCharBuffer.java @@ -77,6 +77,10 @@ class StringCharBuffer // package-private return str.charAt(checkIndex(index) + offset); } + char getUnchecked(int index) { + return str.charAt(index + offset); + } + // ## Override bulk get methods for better performance public final CharBuffer put(char c) { diff --git a/jdk/src/share/classes/java/nio/X-Buffer.java.template b/jdk/src/share/classes/java/nio/X-Buffer.java.template index a727b000bf1..475818d300d 100644 --- a/jdk/src/share/classes/java/nio/X-Buffer.java.template +++ b/jdk/src/share/classes/java/nio/X-Buffer.java.template @@ -30,6 +30,11 @@ package java.nio; #if[char] import java.io.IOException; #end[char] +#if[streamableType] +import java.util.Spliterator; +import java.util.stream.StreamSupport; +import java.util.stream.$Streamtype$Stream; +#end[streamableType] /** * $A$ $type$ buffer. @@ -589,6 +594,19 @@ public abstract class $Type$Buffer */ public abstract $type$ get(int index); +#if[streamableType] + /** + * Absolute get method. Reads the $type$ at the given + * index without any validation of the index. + * + * @param index + * The index from which the $type$ will be read + * + * @return The $type$ at the given index + */ + abstract $type$ getUnchecked(int index); // package-private +#end[streamableType] + /** * Absolute put method  (optional operation). * @@ -1458,4 +1476,16 @@ public abstract class $Type$Buffer #end[byte] +#if[streamableType] + +#if[char] + @Override +#end[char] + public $Streamtype$Stream $type$s() { + return StreamSupport.$streamtype$Stream(() -> new $Type$BufferSpliterator(this), + Buffer.SPLITERATOR_CHARACTERISTICS); + } + +#end[streamableType] + } diff --git a/jdk/src/share/classes/java/security/AccessControlContext.java b/jdk/src/share/classes/java/security/AccessControlContext.java index ba088af8716..9ca78fe9495 100644 --- a/jdk/src/share/classes/java/security/AccessControlContext.java +++ b/jdk/src/share/classes/java/security/AccessControlContext.java @@ -85,6 +85,15 @@ public final class AccessControlContext { private DomainCombiner combiner = null; + // limited privilege scope + private Permission permissions[]; + private AccessControlContext parent; + private boolean isWrapped; + + // is constrained by limited privilege scope? + private boolean isLimited; + private ProtectionDomain limitedContext[]; + private static boolean debugInit = false; private static Debug debug = null; @@ -178,14 +187,79 @@ public final class AccessControlContext { /** * package private for AccessController + * + * This "argument wrapper" context will be passed as the actual context + * parameter on an internal doPrivileged() call used in the implementation. */ - AccessControlContext(ProtectionDomain context[], DomainCombiner combiner) { + AccessControlContext(ProtectionDomain caller, DomainCombiner combiner, + AccessControlContext parent, AccessControlContext context, + Permission[] perms) + { + /* + * Combine the domains from the doPrivileged() context into our + * wrapper context, if necessary. + */ + ProtectionDomain[] callerPDs = null; + if (caller != null) { + callerPDs = new ProtectionDomain[] { caller }; + } if (context != null) { - this.context = context.clone(); + if (combiner != null) { + this.context = combiner.combine(callerPDs, context.context); + } else { + this.context = combine(callerPDs, context.context); + } + } else { + /* + * Call combiner even if there is seemingly nothing to combine. + */ + if (combiner != null) { + this.context = combiner.combine(callerPDs, null); + } else { + this.context = combine(callerPDs, null); + } } this.combiner = combiner; + + Permission[] tmp = null; + if (perms != null) { + tmp = new Permission[perms.length]; + for (int i=0; i < perms.length; i++) { + if (perms[i] == null) { + throw new NullPointerException("permission can't be null"); + } + + /* + * An AllPermission argument is equivalent to calling + * doPrivileged() without any limit permissions. + */ + if (perms[i].getClass() == AllPermission.class) { + parent = null; + } + tmp[i] = perms[i]; + } + } + + /* + * For a doPrivileged() with limited privilege scope, initialize + * the relevant fields. + * + * The limitedContext field contains the union of all domains which + * are enclosed by this limited privilege scope. In other words, + * it contains all of the domains which could potentially be checked + * if none of the limiting permissions implied a requested permission. + */ + if (parent != null) { + this.limitedContext = combine(parent.context, parent.limitedContext); + this.isLimited = true; + this.isWrapped = true; + this.permissions = tmp; + this.parent = parent; + this.privilegedContext = context; // used in checkPermission2() + } } + /** * package private constructor for AccessController.getContext() */ @@ -260,6 +334,13 @@ public final class AccessControlContext { if (sm != null) { sm.checkPermission(SecurityConstants.GET_COMBINER_PERMISSION); } + return getCombiner(); + } + + /** + * package private for AccessController + */ + DomainCombiner getCombiner() { return combiner; } @@ -335,8 +416,10 @@ public final class AccessControlContext { or the first domain was a Privileged system domain. This is to make the common case for system code very fast */ - if (context == null) + if (context == null) { + checkPermission2(perm); return; + } for (int i=0; i< context.length; i++) { if (context[i] != null && !context[i].implies(perm)) { @@ -370,20 +453,108 @@ public final class AccessControlContext { debug.println("access allowed "+perm); } - return; + checkPermission2(perm); + } + + /* + * Check the domains associated with the limited privilege scope. + */ + private void checkPermission2(Permission perm) { + if (!isLimited) { + return; + } + + /* + * Check the doPrivileged() context parameter, if present. + */ + if (privilegedContext != null) { + privilegedContext.checkPermission2(perm); + } + + /* + * Ignore the limited permissions and parent fields of a wrapper + * context since they were already carried down into the unwrapped + * context. + */ + if (isWrapped) { + return; + } + + /* + * Try to match any limited privilege scope. + */ + if (permissions != null) { + Class permClass = perm.getClass(); + for (int i=0; i < permissions.length; i++) { + Permission limit = permissions[i]; + if (limit.getClass().equals(permClass) && limit.implies(perm)) { + return; + } + } + } + + /* + * Check the limited privilege scope up the call stack or the inherited + * parent thread call stack of this ACC. + */ + if (parent != null) { + /* + * As an optimization, if the parent context is the inherited call + * stack context from a parent thread then checking the protection + * domains of the parent context is redundant since they have + * already been merged into the child thread's context by + * optimize(). When parent is set to an inherited context this + * context was not directly created by a limited scope + * doPrivileged() and it does not have its own limited permissions. + */ + if (permissions == null) { + parent.checkPermission2(perm); + } else { + parent.checkPermission(perm); + } + } } /** * Take the stack-based context (this) and combine it with the - * privileged or inherited context, if need be. + * privileged or inherited context, if need be. Any limited + * privilege scope is flagged regardless of whether the assigned + * context comes from an immediately enclosing limited doPrivileged(). + * The limited privilege scope can indirectly flow from the inherited + * parent thread or an assigned context previously captured by getContext(). */ AccessControlContext optimize() { // the assigned (privileged or inherited) context AccessControlContext acc; + DomainCombiner combiner = null; + AccessControlContext parent = null; + Permission[] permissions = null; + if (isPrivileged) { acc = privilegedContext; + if (acc != null) { + /* + * If the context is from a limited scope doPrivileged() then + * copy the permissions and parent fields out of the wrapper + * context that was created to hold them. + */ + if (acc.isWrapped) { + permissions = acc.permissions; + parent = acc.parent; + } + } } else { acc = AccessController.getInheritedAccessControlContext(); + if (acc != null) { + /* + * If the inherited context is constrained by a limited scope + * doPrivileged() then set it as our parent so we will process + * the non-domain-related state. + */ + if (acc.isLimited) { + parent = acc; + } + } } // this.context could be null if only system code is on the stack; @@ -393,53 +564,98 @@ public final class AccessControlContext { // acc.context could be null if only system code was involved; // in that case, ignore the assigned context boolean skipAssigned = (acc == null || acc.context == null); + ProtectionDomain[] assigned = (skipAssigned) ? null : acc.context; + ProtectionDomain[] pd; + + // if there is no enclosing limited privilege scope on the stack or + // inherited from a parent thread + boolean skipLimited = ((acc == null || !acc.isWrapped) && parent == null); if (acc != null && acc.combiner != null) { // let the assigned acc's combiner do its thing - return goCombiner(context, acc); + if (getDebug() != null) { + debug.println("AccessControlContext invoking the Combiner"); + } + + // No need to clone current and assigned.context + // combine() will not update them + combiner = acc.combiner; + pd = combiner.combine(context, assigned); + } else { + if (skipStack) { + if (skipAssigned) { + calculateFields(acc, parent, permissions); + return this; + } else if (skipLimited) { + return acc; + } + } else if (assigned != null) { + if (skipLimited) { + // optimization: if there is a single stack domain and + // that domain is already in the assigned context; no + // need to combine + if (context.length == 1 && context[0] == assigned[0]) { + return acc; + } + } + } + + pd = combine(context, assigned); + if (skipLimited && !skipAssigned && pd == assigned) { + return acc; + } else if (skipAssigned && pd == context) { + calculateFields(acc, parent, permissions); + return this; + } } - // optimization: if neither have contexts; return acc if possible - // rather than this, because acc might have a combiner - if (skipAssigned && skipStack) { - return this; - } + // Reuse existing ACC + this.context = pd; + this.combiner = combiner; + this.isPrivileged = false; - // optimization: if there is no stack context; there is no reason - // to compress the assigned context, it already is compressed - if (skipStack) { - return acc; - } + calculateFields(acc, parent, permissions); + return this; + } - int slen = context.length; + + /* + * Combine the current (stack) and assigned domains. + */ + private static ProtectionDomain[] combine(ProtectionDomain[]current, + ProtectionDomain[] assigned) { + + // current could be null if only system code is on the stack; + // in that case, ignore the stack context + boolean skipStack = (current == null); + + // assigned could be null if only system code was involved; + // in that case, ignore the assigned context + boolean skipAssigned = (assigned == null); + + int slen = (skipStack) ? 0 : current.length; // optimization: if there is no assigned context and the stack length // is less then or equal to two; there is no reason to compress the // stack context, it already is if (skipAssigned && slen <= 2) { - return this; + return current; } - // optimization: if there is a single stack domain and that domain - // is already in the assigned context; no need to combine - if ((slen == 1) && (context[0] == acc.context[0])) { - return acc; - } - - int n = (skipAssigned) ? 0 : acc.context.length; + int n = (skipAssigned) ? 0 : assigned.length; // now we combine both of them, and create a new context ProtectionDomain pd[] = new ProtectionDomain[slen + n]; // first copy in the assigned context domains, no need to compress if (!skipAssigned) { - System.arraycopy(acc.context, 0, pd, 0, n); + System.arraycopy(assigned, 0, pd, 0, n); } // now add the stack context domains, discarding nulls and duplicates outer: - for (int i = 0; i < context.length; i++) { - ProtectionDomain sd = context[i]; + for (int i = 0; i < slen; i++) { + ProtectionDomain sd = current[i]; if (sd != null) { for (int j = 0; j < n; j++) { if (sd == pd[j]) { @@ -453,54 +669,48 @@ public final class AccessControlContext { // if length isn't equal, we need to shorten the array if (n != pd.length) { // optimization: if we didn't really combine anything - if (!skipAssigned && n == acc.context.length) { - return acc; + if (!skipAssigned && n == assigned.length) { + return assigned; } else if (skipAssigned && n == slen) { - return this; + return current; } ProtectionDomain tmp[] = new ProtectionDomain[n]; System.arraycopy(pd, 0, tmp, 0, n); pd = tmp; } - // return new AccessControlContext(pd, false); - - // Reuse existing ACC - - this.context = pd; - this.combiner = null; - this.isPrivileged = false; - - return this; + return pd; } - private AccessControlContext goCombiner(ProtectionDomain[] current, - AccessControlContext assigned) { - // the assigned ACC's combiner is not null -- - // let the combiner do its thing + /* + * Calculate the additional domains that could potentially be reached via + * limited privilege scope. Mark the context as being subject to limited + * privilege scope unless the reachable domains (if any) are already + * contained in this domain context (in which case any limited + * privilege scope checking would be redundant). + */ + private void calculateFields(AccessControlContext assigned, + AccessControlContext parent, Permission[] permissions) + { + ProtectionDomain[] parentLimit = null; + ProtectionDomain[] assignedLimit = null; + ProtectionDomain[] newLimit; - // XXX we could add optimizations to 'current' here ... - - if (getDebug() != null) { - debug.println("AccessControlContext invoking the Combiner"); + parentLimit = (parent != null)? parent.limitedContext: null; + assignedLimit = (assigned != null)? assigned.limitedContext: null; + newLimit = combine(parentLimit, assignedLimit); + if (newLimit != null) { + if (context == null || !containsAllPDs(newLimit, context)) { + this.limitedContext = newLimit; + this.permissions = permissions; + this.parent = parent; + this.isLimited = true; + } } - - // No need to clone current and assigned.context - // combine() will not update them - ProtectionDomain[] combinedPds = assigned.combiner.combine( - current, assigned.context); - - // return new AccessControlContext(combinedPds, assigned.combiner); - - // Reuse existing ACC - this.context = combinedPds; - this.combiner = assigned.combiner; - this.isPrivileged = false; - - return this; } + /** * Checks two AccessControlContext objects for equality. * Checks that obj is @@ -520,31 +730,131 @@ public final class AccessControlContext { AccessControlContext that = (AccessControlContext) obj; - - if (context == null) { - return (that.context == null); - } - - if (that.context == null) + if (!equalContext(that)) return false; - if (!(this.containsAllPDs(that) && that.containsAllPDs(this))) - return false; - - if (this.combiner == null) - return (that.combiner == null); - - if (that.combiner == null) - return false; - - if (!this.combiner.equals(that.combiner)) + if (!equalLimitedContext(that)) return false; return true; } - private boolean containsAllPDs(AccessControlContext that) { + /* + * Compare for equality based on state that is free of limited + * privilege complications. + */ + private boolean equalContext(AccessControlContext that) { + if (!equalPDs(this.context, that.context)) + return false; + + if (this.combiner == null && that.combiner != null) + return false; + + if (this.combiner != null && !this.combiner.equals(that.combiner)) + return false; + + return true; + } + + private boolean equalPDs(ProtectionDomain[] a, ProtectionDomain[] b) { + if (a == null) { + return (b == null); + } + + if (b == null) + return false; + + if (!(containsAllPDs(a, b) && containsAllPDs(b, a))) + return false; + + return true; + } + + /* + * Compare for equality based on state that is captured during a + * call to AccessController.getContext() when a limited privilege + * scope is in effect. + */ + private boolean equalLimitedContext(AccessControlContext that) { + if (that == null) + return false; + + /* + * If neither instance has limited privilege scope then we're done. + */ + if (!this.isLimited && !that.isLimited) + return true; + + /* + * If only one instance has limited privilege scope then we're done. + */ + if (!(this.isLimited && that.isLimited)) + return false; + + /* + * Wrapped instances should never escape outside the implementation + * this class and AccessController so this will probably never happen + * but it only makes any sense to compare if they both have the same + * isWrapped state. + */ + if ((this.isWrapped && !that.isWrapped) || + (!this.isWrapped && that.isWrapped)) { + return false; + } + + if (this.permissions == null && that.permissions != null) + return false; + + if (this.permissions != null && that.permissions == null) + return false; + + if (!(this.containsAllLimits(that) && that.containsAllLimits(this))) + return false; + + /* + * Skip through any wrapped contexts. + */ + AccessControlContext thisNextPC = getNextPC(this); + AccessControlContext thatNextPC = getNextPC(that); + + /* + * The protection domains and combiner of a privilegedContext are + * not relevant because they have already been included in the context + * of this instance by optimize() so we only care about any limited + * privilege state they may have. + */ + if (thisNextPC == null && thatNextPC != null && thatNextPC.isLimited) + return false; + + if (thisNextPC != null && !thisNextPC.equalLimitedContext(thatNextPC)) + return false; + + if (this.parent == null && that.parent != null) + return false; + + if (this.parent != null && !this.parent.equals(that.parent)) + return false; + + return true; + } + + /* + * Follow the privilegedContext link making our best effort to skip + * through any wrapper contexts. + */ + private static AccessControlContext getNextPC(AccessControlContext acc) { + while (acc != null && acc.privilegedContext != null) { + acc = acc.privilegedContext; + if (!acc.isWrapped) + return acc; + } + return null; + } + + private static boolean containsAllPDs(ProtectionDomain[] thisContext, + ProtectionDomain[] thatContext) { boolean match = false; + // // ProtectionDomains within an ACC currently cannot be null // and this is enforced by the constructor and the various @@ -552,17 +862,17 @@ public final class AccessControlContext { // to support the notion of a null PD and therefore this logic continues // to support that notion. ProtectionDomain thisPd; - for (int i = 0; i < context.length; i++) { + for (int i = 0; i < thisContext.length; i++) { match = false; - if ((thisPd = context[i]) == null) { - for (int j = 0; (j < that.context.length) && !match; j++) { - match = (that.context[j] == null); + if ((thisPd = thisContext[i]) == null) { + for (int j = 0; (j < thatContext.length) && !match; j++) { + match = (thatContext[j] == null); } } else { Class thisPdClass = thisPd.getClass(); ProtectionDomain thatPd; - for (int j = 0; (j < that.context.length) && !match; j++) { - thatPd = that.context[j]; + for (int j = 0; (j < thatContext.length) && !match; j++) { + thatPd = thatContext[j]; // Class check required to avoid PD exposure (4285406) match = (thatPd != null && @@ -573,6 +883,29 @@ public final class AccessControlContext { } return match; } + + private boolean containsAllLimits(AccessControlContext that) { + boolean match = false; + Permission thisPerm; + + if (this.permissions == null && that.permissions == null) + return true; + + for (int i = 0; i < this.permissions.length; i++) { + Permission limit = this.permissions[i]; + Class limitClass = limit.getClass(); + match = false; + for (int j = 0; (j < that.permissions.length) && !match; j++) { + Permission perm = that.permissions[j]; + match = (limitClass.equals(perm.getClass()) && + limit.equals(perm)); + } + if (!match) return false; + } + return match; + } + + /** * Returns the hash code value for this context. The hash code * is computed by exclusive or-ing the hash code of all the protection @@ -591,6 +924,7 @@ public final class AccessControlContext { if (context[i] != null) hashCode ^= context[i].hashCode(); } + return hashCode; } } diff --git a/jdk/src/share/classes/java/security/AccessController.java b/jdk/src/share/classes/java/security/AccessController.java index e7fbe737a8e..6eac9a619b2 100644 --- a/jdk/src/share/classes/java/security/AccessController.java +++ b/jdk/src/share/classes/java/security/AccessController.java @@ -82,9 +82,15 @@ import sun.reflect.Reflection; * else if (caller i is marked as privileged) { * if (a context was specified in the call to doPrivileged) * context.checkPermission(permission) - * return; + * if (limited permissions were specified in the call to doPrivileged) { + * for (each limited permission) { + * if (the limited permission implies the requested permission) + * return; + * } + * } else + * return; * } - * }; + * } * * // Next, check the context inherited when the thread was created. * // Whenever a new thread is created, the AccessControlContext at @@ -101,11 +107,16 @@ import sun.reflect.Reflection; * was marked as "privileged" via a doPrivileged * call without a context argument (see below for information about a * context argument). If that caller's domain has the - * specified permission, no further checking is done and + * specified permission and at least one limiting permission argument (if any) + * implies the requested permission, no further checking is done and * checkPermission * returns quietly, indicating that the requested access is allowed. * If that domain does not have the specified permission, an exception - * is thrown, as usual. + * is thrown, as usual. If the caller's domain had the specified permission + * but it was not implied by any limiting permission arguments given in the call + * to doPrivileged then the permission checking continues + * until there are no more callers or another doPrivileged + * call matches the requested permission and returns normally. * *

The normal use of the "privileged" feature is as follows. If you * don't need to return a value from within the "privileged" block, do @@ -180,6 +191,9 @@ import sun.reflect.Reflection; * *

Be *very* careful in your use of the "privileged" construct, and * always remember to make the privileged code section as small as possible. + * You can pass Permission arguments to further limit the + * scope of the "privilege" (see below). + * * *

Note that checkPermission always performs security checks * within the context of the currently executing thread. @@ -215,7 +229,9 @@ import sun.reflect.Reflection; * *

There are also times where you don't know a priori which permissions * to check the context against. In these cases you can use the - * doPrivileged method that takes a context: + * doPrivileged method that takes a context. You can also limit the scope + * of the privileged code by passing additional Permission + * parameters. * *

 {@code
  * somemethod() {
@@ -223,12 +239,21 @@ import sun.reflect.Reflection;
  *         public Object run() {
  *             // Code goes here. Any permission checks within this
  *             // run method will require that the intersection of the
- *             // callers protection domain and the snapshot's
- *             // context have the desired permission.
+ *             // caller's protection domain and the snapshot's
+ *             // context have the desired permission. If a requested
+ *             // permission is not implied by the limiting FilePermission
+ *             // argument then checking of the thread continues beyond the
+ *             // caller of doPrivileged.
  *         }
- *     }, acc);
+ *     }, acc, new FilePermission("/temp/*", read));
  *     ...normal code here...
  * }}
+ *

Passing a limiting Permission argument of an instance of + * AllPermission is equivalent to calling the equivalent + * doPrivileged method without limiting Permission + * arguments. Passing a zero length array of Permission disables + * the code privileges so that checking always continues beyond the caller of + * that doPrivileged method. * * @see AccessControlContext * @@ -334,6 +359,112 @@ public final class AccessController { public static native T doPrivileged(PrivilegedAction action, AccessControlContext context); + + /** + * Performs the specified PrivilegedAction with privileges + * enabled and restricted by the specified + * AccessControlContext and with a privilege scope limited + * by specified Permission arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * AccessControlContext. + *

+ * If the action's run method throws an (unchecked) exception, + * it will propagate through this method. + * + * @param action the action to be performed. + * @param context an access control context + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * null, + * then no additional restriction is applied. + * @param perms the Permission arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's run method. + * + * @throws NullPointerException if action or perms or any element of + * perms is null + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedExceptionAction,AccessControlContext) + * + * @since 1.8 + */ + @CallerSensitive + public static T doPrivileged(PrivilegedAction action, + AccessControlContext context, Permission... perms) { + + AccessControlContext parent = getContext(); + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(null, + caller, parent, context, perms)); + } + + + /** + * Performs the specified PrivilegedAction with privileges + * enabled and restricted by the specified + * AccessControlContext and with a privilege scope limited + * by specified Permission arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * AccessControlContext. + *

+ * If the action's run method throws an (unchecked) exception, + * it will propagate through this method. + * + *

This method preserves the current AccessControlContext's + * DomainCombiner (which may be null) while the action is performed. + * + * @param action the action to be performed. + * @param context an access control context + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * null, + * then no additional restriction is applied. + * @param perms the Permission arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's run method. + * + * @throws NullPointerException if action or perms or any element of + * perms is null + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedExceptionAction,AccessControlContext) + * @see java.security.DomainCombiner + * + * @since 1.8 + */ + @CallerSensitive + public static T doPrivilegedWithCombiner(PrivilegedAction action, + AccessControlContext context, Permission... perms) { + + AccessControlContext parent = getContext(); + DomainCombiner dc = parent.getCombiner(); + if (dc == null && context != null) { + dc = context.getCombiner(); + } + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(dc, caller, + parent, context, perms)); + } + /** * Performs the specified PrivilegedExceptionAction with * privileges enabled. The action is performed with all of the @@ -408,6 +539,22 @@ public final class AccessController { private static AccessControlContext preserveCombiner(DomainCombiner combiner, Class caller) { + return createWrapper(combiner, caller, null, null, null); + } + + /** + * Create a wrapper to contain the limited privilege scope data. + */ + private static AccessControlContext + createWrapper(DomainCombiner combiner, Class caller, + AccessControlContext parent, AccessControlContext context, + Permission[] perms) + { + return new AccessControlContext(getCallerPD(caller), combiner, parent, + context, perms); + } + + private static ProtectionDomain getCallerPD(final Class caller) { ProtectionDomain callerPd = doPrivileged (new PrivilegedAction() { public ProtectionDomain run() { @@ -415,18 +562,9 @@ public final class AccessController { } }); - // perform 'combine' on the caller of doPrivileged, - // even if the caller is from the bootclasspath - ProtectionDomain[] pds = new ProtectionDomain[] {callerPd}; - if (combiner == null) { - return new AccessControlContext(pds); - } else { - return new AccessControlContext(combiner.combine(pds, null), - combiner); - } + return callerPd; } - /** * Performs the specified PrivilegedExceptionAction with * privileges enabled and restricted by the specified @@ -454,7 +592,7 @@ public final class AccessController { * @exception NullPointerException if the action is null * * @see #doPrivileged(PrivilegedAction) - * @see #doPrivileged(PrivilegedExceptionAction,AccessControlContext) + * @see #doPrivileged(PrivilegedAction,AccessControlContext) */ @CallerSensitive public static native T @@ -462,6 +600,118 @@ public final class AccessController { AccessControlContext context) throws PrivilegedActionException; + + /** + * Performs the specified PrivilegedExceptionAction with + * privileges enabled and restricted by the specified + * AccessControlContext and with a privilege scope limited by + * specified Permission arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * AccessControlContext. + *

+ * If the action's run method throws an (unchecked) exception, + * it will propagate through this method. + * + * @param action the action to be performed. + * @param context an access control context + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * null, + * then no additional restriction is applied. + * @param perms the Permission arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's run method. + * + * @throws PrivilegedActionException if the specified action's + * run method threw a checked exception + * @throws NullPointerException if action or perms or any element of + * perms is null + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedAction,AccessControlContext) + * + * @since 1.8 + */ + @CallerSensitive + public static T doPrivileged(PrivilegedExceptionAction action, + AccessControlContext context, Permission... perms) + throws PrivilegedActionException + { + AccessControlContext parent = getContext(); + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(null, caller, parent, context, perms)); + } + + + /** + * Performs the specified PrivilegedExceptionAction with + * privileges enabled and restricted by the specified + * AccessControlContext and with a privilege scope limited by + * specified Permission arguments. + * + * The action is performed with the intersection of the permissions + * possessed by the caller's protection domain, and those possessed + * by the domains represented by the specified + * AccessControlContext. + *

+ * If the action's run method throws an (unchecked) exception, + * it will propagate through this method. + * + *

This method preserves the current AccessControlContext's + * DomainCombiner (which may be null) while the action is performed. + * + * @param action the action to be performed. + * @param context an access control context + * representing the restriction to be applied to the + * caller's domain's privileges before performing + * the specified action. If the context is + * null, + * then no additional restriction is applied. + * @param perms the Permission arguments which limit the + * scope of the caller's privileges. The number of arguments + * is variable. + * + * @return the value returned by the action's run method. + * + * @throws PrivilegedActionException if the specified action's + * run method threw a checked exception + * @throws NullPointerException if action or perms or any element of + * perms is null + * + * @see #doPrivileged(PrivilegedAction) + * @see #doPrivileged(PrivilegedAction,AccessControlContext) + * @see java.security.DomainCombiner + * + * @since 1.8 + */ + @CallerSensitive + public static T doPrivilegedWithCombiner(PrivilegedExceptionAction action, + AccessControlContext context, + Permission... perms) + throws PrivilegedActionException + { + AccessControlContext parent = getContext(); + DomainCombiner dc = parent.getCombiner(); + if (dc == null && context != null) { + dc = context.getCombiner(); + } + if (perms == null) { + throw new NullPointerException("null permissions parameter"); + } + Class caller = Reflection.getCallerClass(); + return AccessController.doPrivileged(action, createWrapper(dc, caller, + parent, context, perms)); + } + /** * Returns the AccessControl context. i.e., it gets * the protection domains of all the callers on the stack, @@ -474,6 +724,7 @@ public final class AccessController { private static native AccessControlContext getStackAccessControlContext(); + /** * Returns the "inherited" AccessControl context. This is the context * that existed when the thread was created. Package private so @@ -484,9 +735,9 @@ public final class AccessController { /** * This method takes a "snapshot" of the current calling context, which - * includes the current Thread's inherited AccessControlContext, - * and places it in an AccessControlContext object. This context may then - * be checked at a later point, possibly in another thread. + * includes the current Thread's inherited AccessControlContext and any + * limited privilege scope, and places it in an AccessControlContext object. + * This context may then be checked at a later point, possibly in another thread. * * @see AccessControlContext * @@ -524,7 +775,7 @@ public final class AccessController { */ public static void checkPermission(Permission perm) - throws AccessControlException + throws AccessControlException { //System.err.println("checkPermission "+perm); //Thread.currentThread().dumpStack(); diff --git a/jdk/src/share/classes/java/security/DigestOutputStream.java b/jdk/src/share/classes/java/security/DigestOutputStream.java index 1307bdff344..31b77259ea6 100644 --- a/jdk/src/share/classes/java/security/DigestOutputStream.java +++ b/jdk/src/share/classes/java/security/DigestOutputStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 1999, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,10 +112,10 @@ public class DigestOutputStream extends FilterOutputStream { * @see MessageDigest#update(byte) */ public void write(int b) throws IOException { + out.write(b); if (on) { digest.update((byte)b); } - out.write(b); } /** @@ -142,10 +142,10 @@ public class DigestOutputStream extends FilterOutputStream { * @see MessageDigest#update(byte[], int, int) */ public void write(byte[] b, int off, int len) throws IOException { + out.write(b, off, len); if (on) { digest.update(b, off, len); } - out.write(b, off, len); } /** diff --git a/jdk/src/share/classes/java/util/HashMap.java b/jdk/src/share/classes/java/util/HashMap.java index 5e79498da9f..c9b106ff759 100644 --- a/jdk/src/share/classes/java/util/HashMap.java +++ b/jdk/src/share/classes/java/util/HashMap.java @@ -26,6 +26,8 @@ package java.util; import java.io.*; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; import java.util.function.Consumer; import java.util.function.BiFunction; import java.util.function.Function; @@ -126,7 +128,7 @@ import java.util.function.Function; */ public class HashMap - extends AbstractMap + extends AbstractMap implements Map, Cloneable, Serializable { @@ -150,12 +152,12 @@ public class HashMap /** * An empty table instance to share when the table is not inflated. */ - static final Entry[] EMPTY_TABLE = {}; + static final Object[] EMPTY_TABLE = {}; /** * The table, resized as necessary. Length MUST Always be a power of two. */ - transient Entry[] table = EMPTY_TABLE; + transient Object[] table = EMPTY_TABLE; /** * The number of key-value mappings contained in this map. @@ -186,10 +188,10 @@ public class HashMap */ transient int modCount; + /** + * Holds values which can't be initialized until after VM is booted. + */ private static class Holder { - /** - * - */ static final sun.misc.Unsafe UNSAFE; /** @@ -198,22 +200,616 @@ public class HashMap */ static final long HASHSEED_OFFSET; + static final boolean USE_HASHSEED; + static { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - HashMap.class.getDeclaredField("hashSeed")); - } catch (NoSuchFieldException | SecurityException e) { - throw new InternalError("Failed to record hashSeed offset", e); + String hashSeedProp = java.security.AccessController.doPrivileged( + new sun.security.action.GetPropertyAction( + "jdk.map.useRandomSeed")); + boolean localBool = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + USE_HASHSEED = localBool; + + if (USE_HASHSEED) { + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + HASHSEED_OFFSET = UNSAFE.objectFieldOffset( + HashMap.class.getDeclaredField("hashSeed")); + } catch (NoSuchFieldException | SecurityException e) { + throw new InternalError("Failed to record hashSeed offset", e); + } + } else { + UNSAFE = null; + HASHSEED_OFFSET = 0; } } } - /** + /* * A randomizing value associated with this instance that is applied to * hash code of keys to make hash collisions harder to find. + * + * Non-final so it can be set lazily, but be sure not to set more than once. */ - transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + transient final int hashSeed; + + /* + * TreeBin/TreeNode code from CHM doesn't handle the null key. Store the + * null key entry here. + */ + transient Entry nullKeyEntry = null; + + /* + * In order to improve performance under high hash-collision conditions, + * HashMap will switch to storing a bin's entries in a balanced tree + * (TreeBin) instead of a linked-list once the number of entries in the bin + * passes a certain threshold (TreeBin.TREE_THRESHOLD), if at least one of + * the keys in the bin implements Comparable. This technique is borrowed + * from ConcurrentHashMap. + */ + + /* + * Code based on CHMv8 + * + * Node type for TreeBin + */ + final static class TreeNode { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + final HashMap.Entry entry; + + TreeNode(HashMap.Entry entry, Object next, TreeNode parent) { + this.entry = entry; + this.entry.next = next; + this.parent = parent; + } + } + + /** + * Returns a Class for the given object of the form "class C + * implements Comparable", if one exists, else null. See the TreeBin + * docs, below, for explanation. + */ + static Class comparableClassFor(Object x) { + Class c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p; + if ((c = x.getClass()) == String.class) // bypass checks + return c; + if ((cmpc = Comparable.class).isAssignableFrom(c)) { + while (cmpc.isAssignableFrom(s = c.getSuperclass())) + c = s; // find topmost comparable class + if ((ts = c.getGenericInterfaces()) != null) { + for (int i = 0; i < ts.length; ++i) { + if (((t = ts[i]) instanceof ParameterizedType) && + ((p = (ParameterizedType)t).getRawType() == cmpc) && + (as = p.getActualTypeArguments()) != null && + as.length == 1 && as[0] == c) // type arg is c + return c; + } + } + } + return null; + } + + /* + * Code based on CHMv8 + * + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by Comparable.compareTo order if applicable. On lookup at a + * node, if elements are not comparable or compare as 0 then both + * left and right children may need to be searched in the case of + * tied hash values. (This corresponds to the full list search + * that would be necessary if all elements were non-Comparable and + * had tied hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + */ + final class TreeBin { + /* + * The bin count threshold for using a tree rather than list for a bin. The + * value reflects the approximate break-even point for using tree-based + * operations. + */ + static final int TREE_THRESHOLD = 16; + + TreeNode root; // root of tree + TreeNode first; // head of next-pointer list + + /* + * Split a TreeBin into lo and hi parts and install in given table. + * + * Existing Entrys are re-used, which maintains the before/after links for + * LinkedHashMap.Entry. + * + * No check for Comparable, though this is the same as CHM. + */ + final void splitTreeBin(Object[] newTable, int i, TreeBin loTree, TreeBin hiTree) { + TreeBin oldTree = this; + int bit = newTable.length >>> 1; + int loCount = 0, hiCount = 0; + TreeNode e = oldTree.first; + TreeNode next; + + // This method is called when the table has just increased capacity, + // so indexFor() is now taking one additional bit of hash into + // account ("bit"). Entries in this TreeBin now belong in one of + // two bins, "i" or "i+bit", depending on if the new top bit of the + // hash is set. The trees for the two bins are loTree and hiTree. + // If either tree ends up containing fewer than TREE_THRESHOLD + // entries, it is converted back to a linked list. + while (e != null) { + // Save entry.next - it will get overwritten in putTreeNode() + next = (TreeNode)e.entry.next; + + int h = e.entry.hash; + K k = (K) e.entry.key; + V v = e.entry.value; + if ((h & bit) == 0) { + ++loCount; + // Re-using e.entry + loTree.putTreeNode(h, k, v, e.entry); + } else { + ++hiCount; + hiTree.putTreeNode(h, k, v, e.entry); + } + // Iterate using the saved 'next' + e = next; + } + if (loCount < TREE_THRESHOLD) { // too small, convert back to list + HashMap.Entry loEntry = null; + TreeNode p = loTree.first; + while (p != null) { + @SuppressWarnings("unchecked") + TreeNode savedNext = (TreeNode) p.entry.next; + p.entry.next = loEntry; + loEntry = p.entry; + p = savedNext; + } + // assert newTable[i] == null; + newTable[i] = loEntry; + } else { + // assert newTable[i] == null; + newTable[i] = loTree; + } + if (hiCount < TREE_THRESHOLD) { // too small, convert back to list + HashMap.Entry hiEntry = null; + TreeNode p = hiTree.first; + while (p != null) { + @SuppressWarnings("unchecked") + TreeNode savedNext = (TreeNode) p.entry.next; + p.entry.next = hiEntry; + hiEntry = p.entry; + p = savedNext; + } + // assert newTable[i + bit] == null; + newTable[i + bit] = hiEntry; + } else { + // assert newTable[i + bit] == null; + newTable[i + bit] = hiTree; + } + } + + /* + * Popuplate the TreeBin with entries from the linked list e + * + * Assumes 'this' is a new/empty TreeBin + * + * Note: no check for Comparable + * Note: I believe this changes iteration order + */ + @SuppressWarnings("unchecked") + void populate(HashMap.Entry e) { + // assert root == null; + // assert first == null; + HashMap.Entry next; + while (e != null) { + // Save entry.next - it will get overwritten in putTreeNode() + next = (HashMap.Entry)e.next; + // Re-using Entry e will maintain before/after in LinkedHM + putTreeNode(e.hash, (K)e.key, (V)e.value, e); + // Iterate using the saved 'next' + e = next; + } + } + + /** + * Copied from CHMv8 + * From CLR + */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) { + rl.parent = p; + } + if ((pp = r.parent = p.parent) == null) { + root = r; + } else if (pp.left == p) { + pp.left = r; + } else { + pp.right = r; + } + r.left = p; + p.parent = r; + } + } + + /** + * Copied from CHMv8 + * From CLR + */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) { + lr.parent = p; + } + if ((pp = l.parent = p.parent) == null) { + root = l; + } else if (pp.right == p) { + pp.right = l; + } else { + pp.left = l; + } + l.right = p; + p.parent = l; + } + } + + /** + * Returns the TreeNode (or null if not found) for the given + * key. A front-end for recursive version. + */ + final TreeNode getTreeNode(int h, K k) { + return getTreeNode(h, k, root, comparableClassFor(k)); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") + final TreeNode getTreeNode (int h, K k, TreeNode p, Class cc) { + // assert k != null; + while (p != null) { + int dir, ph; Object pk; + if ((ph = p.entry.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.entry.key) == k || k.equals(pk)) + return p; + else if (cc == null || comparableClassFor(pk) != cc || + (dir = ((Comparable)k).compareTo(pk)) == 0) { + // assert pk != null; + TreeNode r, pl, pr; // check both sides + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else if ((pl = p.left) != null) + dir = -1; + else // nothing there + break; + } + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + /* + * Finds or adds a node. + * + * 'entry' should be used to recycle an existing Entry (e.g. in the case + * of converting a linked-list bin to a TreeBin). + * If entry is null, a new Entry will be created for the new TreeNode + * + * @return the TreeNode containing the mapping, or null if a new + * TreeNode was added + */ + @SuppressWarnings("unchecked") + TreeNode putTreeNode(int h, K k, V v, HashMap.Entry entry) { + // assert k != null; + //if (entry != null) { + // assert h == entry.hash; + // assert k == entry.key; + // assert v == entry.value; + // } + Class cc = comparableClassFor(k); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; Object pk; + p = pp; + if ((ph = p.entry.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.entry.key) == k || k.equals(pk)) + return p; + else if (cc == null || comparableClassFor(pk) != cc || + (dir = ((Comparable)k).compareTo(pk)) == 0) { + TreeNode r, pr; + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else // continue left + dir = -1; + } + pp = (dir > 0) ? p.right : p.left; + } + + // Didn't find the mapping in the tree, so add it + TreeNode f = first; + TreeNode x; + if (entry != null) { + x = new TreeNode(entry, f, p); + } else { + x = new TreeNode(newEntry(h, k, v, null), f, p); + } + first = x; + + if (p == null) { + root = x; + } else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) { + f.prev = x; + } + if (dir <= 0) { + p.left = x; + } else { + p.right = x; + } + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red + && (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) { + r.red = false; + } + } + return null; + } + + /* + * From CHMv8 + * + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode) p.entry.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) { + first = next; + } else { + pred.entry.next = next; + } + if (next != null) { + next.prev = pred; + } + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + { + s = sl; + } + boolean c = s.red; + s.red = p.red; + p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) { + sp.left = p; + } else { + sp.right = p; + } + } + if ((s.right = pr) != null) { + pr.parent = s; + } + } + p.left = null; + if ((p.right = sr) != null) { + sr.parent = p; + } + if ((s.left = pl) != null) { + pl.parent = s; + } + if ((s.parent = pp) == null) { + root = s; + } else if (p == pp.left) { + pp.left = s; + } else { + pp.right = s; + } + replacement = sr; + } else { + replacement = (pl != null) ? pl : pr; + } + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } else { + replacement.parent = pp; + if (pp == null) { + root = replacement; + } else if (p == pp.left) { + pp.left = replacement; + } else { + pp.right = replacement; + } + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) { + x = xp; + } else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) + && (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } else { + if (sr == null || !sr.red) { + if (sl != null) { + sl.red = false; + } + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? + null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) { + sr.red = false; + } + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) { + x = xp; + } else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) + && (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } else { + if (sl == null || !sl.red) { + if (sr != null) { + sr.red = false; + } + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? + null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) { + sl.red = false; + } + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + { + pp.left = null; + } else if (p == pp.right) { + pp.right = null; + } + p.parent = null; + } + } + } /** * Constructs an empty HashMap with the specified initial @@ -233,9 +829,9 @@ public class HashMap if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new IllegalArgumentException("Illegal load factor: " + loadFactor); - this.loadFactor = loadFactor; threshold = initialCapacity; + hashSeed = initHashSeed(); init(); } @@ -269,10 +865,11 @@ public class HashMap */ public HashMap(Map m) { this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); + DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); inflateTable(threshold); putAllForCreate(m); + // assert size == m.size(); } private static int roundUpToPowerOf2(int number) { @@ -294,7 +891,7 @@ public class HashMap int capacity = roundUpToPowerOf2(toSize); threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1); - table = new Entry[capacity]; + table = new Object[capacity]; } // internal utilities @@ -309,18 +906,25 @@ public class HashMap void init() { } + /** + * Return an initial value for the hashSeed, or 0 if the random seed is not + * enabled. + */ + final int initHashSeed() { + if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { + return sun.misc.Hashing.randomHashSeed(this); + } + return 0; + } + /** * Retrieve object hash code and applies a supplemental hash function to the - * result hash, which defends against poor quality hash functions. This is + * result hash, which defends against poor quality hash functions. This is * critical because HashMap uses power-of-two length hash tables, that * otherwise encounter collisions for hashCodes that do not differ * in lower bits. */ final int hash(Object k) { - if (k instanceof String) { - return ((String) k).hash32(); - } - int h = hashSeed ^ k.hashCode(); // This function ensures that hashCodes that differ only by @@ -409,19 +1013,35 @@ public class HashMap if (isEmpty()) { return null; } + if (key == null) { + return nullKeyEntry; + } + int hash = hash(key); + int bin = indexFor(hash, table.length); - int hash = (key == null) ? 0 : hash(key); - for (Entry e = table[indexFor(hash, table.length)]; - e != null; - e = e.next) { - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) - return (Entry)e; + if (table[bin] instanceof Entry) { + Entry e = (Entry) table[bin]; + for (; e != null; e = (Entry)e.next) { + Object k; + if (e.hash == hash && + ((k = e.key) == key || key.equals(k))) { + return e; + } + } + } else if (table[bin] != null) { + TreeBin e = (TreeBin)table[bin]; + TreeNode p = e.getTreeNode(hash, (K)key); + if (p != null) { + // assert p.entry.hash == hash && p.entry.key.equals(key); + return (Entry)p.entry; + } else { + return null; + } } return null; } + /** * Associates the specified value with the specified key in this map. * If the map previously contained a mapping for the key, the old @@ -434,28 +1054,57 @@ public class HashMap * (A null return can also indicate that the map * previously associated null with key.) */ + @SuppressWarnings("unchecked") public V put(K key, V value) { if (table == EMPTY_TABLE) { inflateTable(threshold); } - if (key == null) + if (key == null) return putForNullKey(value); int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; - for(; e != null; e = e.next) { - Object k; - if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); - return oldValue; + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + + if (table[i] instanceof Entry) { + // Bin contains ordinary Entries. Search for key in the linked list + // of entries, counting the number of entries. Only check for + // TreeBin conversion if the list size is >= TREE_THRESHOLD. + // (The conversion still may not happen if the table gets resized.) + int listSize = 0; + Entry e = (Entry) table[i]; + for (; e != null; e = (Entry)e.next) { + Object k; + if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { + V oldValue = e.value; + e.value = value; + e.recordAccess(this); + return oldValue; + } + listSize++; + } + // Didn't find, so fall through and call addEntry() to add the + // Entry and check for TreeBin conversion. + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p == null) { // putTreeNode() added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + return null; + } else { // putTreeNode() found an existing node + Entry pEntry = (Entry)p.entry; + V oldVal = pEntry.value; + pEntry.value = value; + pEntry.recordAccess(this); + return oldVal; } } - modCount++; - addEntry(hash, key, value, i); + addEntry(hash, key, value, i, checkIfNeedTree); return null; } @@ -463,47 +1112,79 @@ public class HashMap * Offloaded version of put for null keys */ private V putForNullKey(V value) { - @SuppressWarnings("unchecked") - Entry e = (Entry)table[0]; - for(; e != null; e = e.next) { - if (e.key == null) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); - return oldValue; - } + if (nullKeyEntry != null) { + V oldValue = nullKeyEntry.value; + nullKeyEntry.value = value; + nullKeyEntry.recordAccess(this); + return oldValue; } modCount++; - addEntry(0, null, value, 0); + size++; // newEntry() skips size++ + nullKeyEntry = newEntry(0, null, value, null); return null; } + private void putForCreateNullKey(V value) { + // Look for preexisting entry for key. This will never happen for + // clone or deserialize. It will only happen for construction if the + // input Map is a sorted map whose ordering is inconsistent w/ equals. + if (nullKeyEntry != null) { + nullKeyEntry.value = value; + } else { + nullKeyEntry = newEntry(0, null, value, null); + size++; + } + } + + /** * This method is used instead of put by constructors and * pseudoconstructors (clone, readObject). It does not resize the table, - * check for comodification, etc. It calls createEntry rather than - * addEntry. + * check for comodification, etc, though it will convert bins to TreeBins + * as needed. It calls createEntry rather than addEntry. */ + @SuppressWarnings("unchecked") private void putForCreate(K key, V value) { - int hash = null == key ? 0 : hash(key); + if (null == key) { + putForCreateNullKey(value); + return; + } + int hash = hash(key); int i = indexFor(hash, table.length); + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? /** * Look for preexisting entry for key. This will never happen for * clone or deserialize. It will only happen for construction if the * input Map is a sorted map whose ordering is inconsistent w/ equals. */ - for (@SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; e != null; e = e.next) { - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) { - e.value = value; - return; + if (table[i] instanceof Entry) { + int listSize = 0; + Entry e = (Entry) table[i]; + for (; e != null; e = (Entry)e.next) { + Object k; + if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { + e.value = value; + return; + } + listSize++; } + // Didn't find, fall through to createEntry(). + // Check for conversion to TreeBin done via createEntry(). + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p != null) { + p.entry.setValue(value); // Found an existing node, set value + } else { + size++; // Added a new TreeNode, so update size + } + // don't need modCount++/check for resize - just return + return; } - createEntry(hash, key, value, i); + createEntry(hash, key, value, i, checkIfNeedTree); } private void putAllForCreate(Map m) { @@ -526,14 +1207,14 @@ public class HashMap * is irrelevant). */ void resize(int newCapacity) { - Entry[] oldTable = table; + Object[] oldTable = table; int oldCapacity = oldTable.length; if (oldCapacity == MAXIMUM_CAPACITY) { threshold = Integer.MAX_VALUE; return; } - Entry[] newTable = new Entry[newCapacity]; + Object[] newTable = new Object[newCapacity]; transfer(newTable); table = newTable; threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1); @@ -541,19 +1222,31 @@ public class HashMap /** * Transfers all entries from current table to newTable. + * + * Assumes newTable is larger than table */ @SuppressWarnings("unchecked") - void transfer(Entry[] newTable) { - Entry[] src = table; + void transfer(Object[] newTable) { + Object[] src = table; + // assert newTable.length > src.length : "newTable.length(" + + // newTable.length + ") expected to be > src.length("+src.length+")"; int newCapacity = newTable.length; - for (int j = 0; j < src.length; j++ ) { - Entry e = (Entry) src[j]; - while(null != e) { - Entry next = e.next; - int i = indexFor(e.hash, newCapacity); - e.next = (Entry) newTable[i]; - newTable[i] = e; - e = next; + for (int j = 0; j < src.length; j++) { + if (src[j] instanceof Entry) { + // Assume: since wasn't TreeBin before, won't need TreeBin now + Entry e = (Entry) src[j]; + while (null != e) { + Entry next = (Entry)e.next; + int i = indexFor(e.hash, newCapacity); + e.next = (Entry) newTable[i]; + newTable[i] = e; + e = next; + } + } else if (src[j] != null) { + TreeBin e = (TreeBin) src[j]; + TreeBin loTree = new TreeBin(); + TreeBin hiTree = new TreeBin(); + e.splitTreeBin(newTable, j, loTree, hiTree); } } Arrays.fill(table, null); @@ -585,20 +1278,13 @@ public class HashMap * By using the conservative calculation, we subject ourself * to at most one extra resize. */ - if (numKeysToBeAdded > threshold) { - int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1); - if (targetCapacity > MAXIMUM_CAPACITY) - targetCapacity = MAXIMUM_CAPACITY; - int newCapacity = table.length; - while (newCapacity < targetCapacity) - newCapacity <<= 1; - if (newCapacity > table.length) - resize(newCapacity); + if (numKeysToBeAdded > threshold && table.length < MAXIMUM_CAPACITY) { + resize(table.length * 2); } for (Map.Entry e : m.entrySet()) put(e.getKey(), e.getValue()); - } + } /** * Removes the mapping for the specified key from this map if present. @@ -621,24 +1307,57 @@ public class HashMap if (table == EMPTY_TABLE) { inflateTable(threshold); } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; - for(; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - if(e.value != null) { - return e.value; - } - e.value = value; - modCount++; - e.recordAccess(this); + if (key == null) { + if (nullKeyEntry == null || nullKeyEntry.value == null) { + putForNullKey(value); return null; + } else { + return nullKeyEntry.value; } } + int hash = hash(key); + int i = indexFor(hash, table.length); + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + if (table[i] instanceof Entry) { + int listSize = 0; + Entry e = (Entry) table[i]; + for (; e != null; e = (Entry)e.next) { + if (e.hash == hash && Objects.equals(e.key, key)) { + if (e.value != null) { + return e.value; + } + e.value = value; + e.recordAccess(this); + return null; + } + listSize++; + } + // Didn't find, so fall through and call addEntry() to add the + // Entry and check for TreeBin conversion. + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p == null) { // not found, putTreeNode() added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + return null; + } else { // putTreeNode() found an existing node + Entry pEntry = (Entry)p.entry; + V oldVal = pEntry.value; + if (oldVal == null) { // only replace if maps to null + pEntry.value = value; + pEntry.recordAccess(this); + } + return oldVal; + } + } modCount++; - addEntry(hash, key, value, i); + addEntry(hash, key, value, i, checkIfNeedTree); return null; } @@ -647,31 +1366,61 @@ public class HashMap if (isEmpty()) { return false; } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - Entry next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - if (!Objects.equals(e.value, value)) { - return false; - } - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); + if (key == null) { + if (nullKeyEntry != null && + Objects.equals(nullKeyEntry.value, value)) { + removeNullKey(); return true; } - prev = e; - e = next; + return false; } + int hash = hash(key); + int i = indexFor(hash, table.length); + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry prev = (Entry) table[i]; + Entry e = prev; + while (e != null) { + @SuppressWarnings("unchecked") + Entry next = (Entry) e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + if (!Objects.equals(e.value, value)) { + return false; + } + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + return true; + } + prev = e; + e = next; + } + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, (K)key); + if (p != null) { + Entry pEntry = (Entry)p.entry; + // assert pEntry.key.equals(key); + if (Objects.equals(pEntry.value, value)) { + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + return true; + } + } + } return false; } @@ -680,39 +1429,82 @@ public class HashMap if (isEmpty()) { return false; } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; - for (; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { - e.value = newValue; - e.recordAccess(this); + if (key == null) { + if (nullKeyEntry != null && + Objects.equals(nullKeyEntry.value, oldValue)) { + putForNullKey(newValue); return true; } + return false; } + int hash = hash(key); + int i = indexFor(hash, table.length); + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry e = (Entry) table[i]; + for (; e != null; e = (Entry)e.next) { + if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { + e.value = newValue; + e.recordAccess(this); + return true; + } + } + return false; + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, key); + if (p != null) { + Entry pEntry = (Entry)p.entry; + // assert pEntry.key.equals(key); + if (Objects.equals(pEntry.value, oldValue)) { + pEntry.value = newValue; + pEntry.recordAccess(this); + return true; + } + } + } return false; } - @Override + @Override public V replace(K key, V value) { if (isEmpty()) { return null; } - int hash = (key == null) ? 0 : hash(key); + if (key == null) { + if (nullKeyEntry != null) { + return putForNullKey(value); + } + return null; + } + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; - for (; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry e = (Entry)table[i]; + for (; e != null; e = (Entry)e.next) { + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + e.value = value; + e.recordAccess(this); + return oldValue; + } + } + + return null; + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, key); + if (p != null) { + Entry pEntry = (Entry)p.entry; + // assert pEntry.key.equals(key); + V oldValue = pEntry.value; + pEntry.value = value; + pEntry.recordAccess(this); return oldValue; } } - return null; } @@ -721,21 +1513,75 @@ public class HashMap if (table == EMPTY_TABLE) { inflateTable(threshold); } - int hash = (key == null) ? 0 : hash(key); + if (key == null) { + if (nullKeyEntry == null || nullKeyEntry.value == null) { + V newValue = mappingFunction.apply(key); + if (newValue != null) { + putForNullKey(newValue); + } + return newValue; + } + return nullKeyEntry.value; + } + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; - for (; e != null; e = e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - return oldValue == null ? (e.value = mappingFunction.apply(key)) : oldValue; + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + + if (table[i] instanceof Entry) { + int listSize = 0; + @SuppressWarnings("unchecked") + Entry e = (Entry)table[i]; + for (; e != null; e = (Entry)e.next) { + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + if (oldValue == null) { + V newValue = mappingFunction.apply(key); + if (newValue != null) { + e.value = newValue; + e.recordAccess(this); + } + return newValue; + } + return oldValue; + } + listSize++; + } + // Didn't find, fall through to call the mapping function + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin e = (TreeBin)table[i]; + V value = mappingFunction.apply(key); + if (value == null) { // Return the existing value, if any + TreeNode p = e.getTreeNode(hash, key); + if (p != null) { + return (V) p.entry.value; + } + return null; + } else { // Put the new value into the Tree, if absent + TreeNode p = e.putTreeNode(hash, key, value, null); + if (p == null) { // not found, new node was added + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + return value; + } else { // putTreeNode() found an existing node + Entry pEntry = (Entry)p.entry; + V oldVal = pEntry.value; + if (oldVal == null) { // only replace if maps to null + pEntry.value = value; + pEntry.recordAccess(this); + return value; + } + return oldVal; + } } } - V newValue = mappingFunction.apply(key); - if (newValue != null) { + if (newValue != null) { // add Entry and check for TreeBin conversion modCount++; - addEntry(hash, key, newValue, i); + addEntry(hash, key, newValue, i, checkIfNeedTree); } return newValue; @@ -746,59 +1592,34 @@ public class HashMap if (isEmpty()) { return null; } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - Entry next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - if (oldValue == null) - break; + if (key == null) { + V oldValue; + if (nullKeyEntry != null && (oldValue = nullKeyEntry.value) != null) { V newValue = remappingFunction.apply(key, oldValue); - modCount++; - if (newValue == null) { - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); + if (newValue != null ) { + putForNullKey(newValue); + return newValue; } else { - e.value = newValue; - e.recordAccess(this); + removeNullKey(); } - return newValue; } - prev = e; - e = next; + return null; } - - return null; - } - - @Override - public V compute(K key, BiFunction remappingFunction) { - if (table == EMPTY_TABLE) { - inflateTable(threshold); - } - int hash = (key == null) ? 0 : hash(key); + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - Entry next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - V newValue = remappingFunction.apply(key, oldValue); - if (newValue != oldValue) { - modCount++; + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry prev = (Entry)table[i]; + Entry e = prev; + while (e != null) { + Entry next = (Entry)e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + if (oldValue == null) + break; + V newValue = remappingFunction.apply(key, oldValue); if (newValue == null) { + modCount++; size--; if (prev == e) table[i] = next; @@ -809,17 +1630,136 @@ public class HashMap e.value = newValue; e.recordAccess(this); } + return newValue; } - return newValue; + prev = e; + e = next; } - prev = e; - e = next; + } else if (table[i] != null) { + TreeBin tb = (TreeBin)table[i]; + TreeNode p = tb.getTreeNode(hash, key); + if (p != null) { + Entry pEntry = (Entry)p.entry; + // assert pEntry.key.equals(key); + V oldValue = pEntry.value; + if (oldValue != null) { + V newValue = remappingFunction.apply(key, oldValue); + if (newValue == null) { // remove mapping + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + } else { + pEntry.value = newValue; + pEntry.recordAccess(this); + } + return newValue; + } + } + } + return null; + } + + @Override + public V compute(K key, BiFunction remappingFunction) { + if (table == EMPTY_TABLE) { + inflateTable(threshold); + } + if (key == null) { + V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != oldValue) { + if (newValue == null) { + removeNullKey(); + } else { + putForNullKey(newValue); + } + } + return newValue; + } + int hash = hash(key); + int i = indexFor(hash, table.length); + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + + if (table[i] instanceof Entry) { + int listSize = 0; + @SuppressWarnings("unchecked") + Entry prev = (Entry)table[i]; + Entry e = prev; + + while (e != null) { + Entry next = (Entry)e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != oldValue) { + if (newValue == null) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + } else { + e.value = newValue; + e.recordAccess(this); + } + } + return newValue; + } + prev = e; + e = next; + listSize++; + } + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin tb = (TreeBin)table[i]; + TreeNode p = tb.getTreeNode(hash, key); + V oldValue = p == null ? null : (V)p.entry.value; + V newValue = remappingFunction.apply(key, oldValue); + if (newValue != oldValue) { + if (newValue == null) { + Entry pEntry = (Entry)p.entry; + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + } else { + if (p != null) { // just update the value + Entry pEntry = (Entry)p.entry; + pEntry.value = newValue; + pEntry.recordAccess(this); + } else { // need to put new node + p = tb.putTreeNode(hash, key, newValue, null); + // assert p == null; // should have added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + } + } + } + return newValue; } V newValue = remappingFunction.apply(key, null); if (newValue != null) { modCount++; - addEntry(hash, key, newValue, i); + addEntry(hash, key, newValue, i, checkIfNeedTree); } return newValue; @@ -830,40 +1770,96 @@ public class HashMap if (table == EMPTY_TABLE) { inflateTable(threshold); } - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - Entry next = e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - V newValue = remappingFunction.apply(oldValue, value); - modCount++; - if (newValue == null) { - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - } else { - e.value = newValue; - e.recordAccess(this); - } - return newValue; + if (key == null) { + V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; + V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value); + if (newValue != null) { + putForNullKey(newValue); + } else if (nullKeyEntry != null) { + removeNullKey(); } - prev = e; - e = next; + return newValue; } + int hash = hash(key); + int i = indexFor(hash, table.length); + boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + if (table[i] instanceof Entry) { + int listSize = 0; + @SuppressWarnings("unchecked") + Entry prev = (Entry)table[i]; + Entry e = prev; + + while (e != null) { + Entry next = (Entry)e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + V oldValue = e.value; + V newValue = (oldValue == null) ? value : + remappingFunction.apply(oldValue, value); + if (newValue == null) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + } else { + e.value = newValue; + e.recordAccess(this); + } + return newValue; + } + prev = e; + e = next; + listSize++; + } + // Didn't find, so fall through and (maybe) call addEntry() to add + // the Entry and check for TreeBin conversion. + checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; + } else if (table[i] != null) { + TreeBin tb = (TreeBin)table[i]; + TreeNode p = tb.getTreeNode(hash, key); + V oldValue = p == null ? null : (V)p.entry.value; + V newValue = (oldValue == null) ? value : + remappingFunction.apply(oldValue, value); + if (newValue == null) { + if (p != null) { + Entry pEntry = (Entry)p.entry; + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + } + return null; + } else if (newValue != oldValue) { + if (p != null) { // just update the value + Entry pEntry = (Entry)p.entry; + pEntry.value = newValue; + pEntry.recordAccess(this); + } else { // need to put new node + p = tb.putTreeNode(hash, key, newValue, null); + // assert p == null; // should have added a new node + modCount++; + size++; + if (size >= threshold) { + resize(2 * table.length); + } + } + } + return newValue; + } if (value != null) { modCount++; - addEntry(hash, key, value, i); + addEntry(hash, key, value, i, checkIfNeedTree); } - return value; } @@ -873,36 +1869,65 @@ public class HashMap * Removes and returns the entry associated with the specified key * in the HashMap. Returns null if the HashMap contains no mapping * for this key. + * + * We don't bother converting TreeBins back to Entry lists if the bin falls + * back below TREE_THRESHOLD, but we do clear bins when removing the last + * TreeNode in a TreeBin. */ final Entry removeEntryForKey(Object key) { if (isEmpty()) { return null; } - int hash = (key == null) ? 0 : hash(key); + if (key == null) { + if (nullKeyEntry != null) { + return removeNullKey(); + } + return null; + } + int hash = hash(key); int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - while (e != null) { - Entry next = e.next; - Object k; - if (e.hash == hash && - ((k = e.key) == key || (key != null && key.equals(k)))) { + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry prev = (Entry)table[i]; + Entry e = prev; + + while (e != null) { + @SuppressWarnings("unchecked") + Entry next = (Entry) e.next; + if (e.hash == hash && Objects.equals(e.key, key)) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, (K)key); + if (p != null) { + Entry pEntry = (Entry)p.entry; + // assert pEntry.key.equals(key); modCount++; size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return e; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + return pEntry; } - prev = e; - e = next; } - - return e; + return null; } /** @@ -915,29 +1940,75 @@ public class HashMap Map.Entry entry = (Map.Entry) o; Object key = entry.getKey(); - int hash = (key == null) ? 0 : hash(key); - int i = indexFor(hash, table.length); - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - while (e != null) { - Entry next = e.next; - if (e.hash == hash && e.equals(entry)) { - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return e; + if (key == null) { + if (entry.equals(nullKeyEntry)) { + return removeNullKey(); } - prev = e; - e = next; + return null; } - return e; + int hash = hash(key); + int i = indexFor(hash, table.length); + + if (table[i] instanceof Entry) { + @SuppressWarnings("unchecked") + Entry prev = (Entry)table[i]; + Entry e = prev; + + while (e != null) { + @SuppressWarnings("unchecked") + Entry next = (Entry)e.next; + if (e.hash == hash && e.equals(entry)) { + modCount++; + size--; + if (prev == e) + table[i] = next; + else + prev.next = next; + e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + } else if (table[i] != null) { + TreeBin tb = ((TreeBin) table[i]); + TreeNode p = tb.getTreeNode(hash, (K)key); + if (p != null && p.entry.equals(entry)) { + @SuppressWarnings("unchecked") + Entry pEntry = (Entry)p.entry; + // assert pEntry.key.equals(key); + modCount++; + size--; + tb.deleteTreeNode(p); + pEntry.recordRemoval(this); + if (tb.root == null || tb.first == null) { + // assert tb.root == null && tb.first == null : + // "TreeBin.first and root should both be null"; + // TreeBin is now empty, we should blank this bin + table[i] = null; + } + return pEntry; + } + } + return null; + } + + /* + * Remove the mapping for the null key, and update internal accounting + * (size, modcount, recordRemoval, etc). + * + * Assumes nullKeyEntry is non-null. + */ + private Entry removeNullKey() { + // assert nullKeyEntry != null; + Entry retVal = nullKeyEntry; + modCount++; + size--; + retVal.recordRemoval(this); + nullKeyEntry = null; + return retVal; } /** @@ -946,6 +2017,9 @@ public class HashMap */ public void clear() { modCount++; + if (nullKeyEntry != null) { + nullKeyEntry = null; + } Arrays.fill(table, null); size = 0; } @@ -959,27 +2033,58 @@ public class HashMap * specified value */ public boolean containsValue(Object value) { - if (value == null) + if (value == null) { return containsNullValue(); - - Entry[] tab = table; - for (int i = 0; i < tab.length; i++) - for (Entry e = tab[i]; e != null; e = e.next) - if (value.equals(e.value)) - return true; - return false; + } + Object[] tab = table; + for (int i = 0; i < tab.length; i++) { + if (tab[i] instanceof Entry) { + Entry e = (Entry)tab[i]; + for (; e != null; e = (Entry)e.next) { + if (value.equals(e.value)) { + return true; + } + } + } else if (tab[i] != null) { + TreeBin e = (TreeBin)tab[i]; + TreeNode p = e.first; + for (; p != null; p = (TreeNode) p.entry.next) { + if (value == p.entry.value || value.equals(p.entry.value)) { + return true; + } + } + } + } + // Didn't find value in table - could be in nullKeyEntry + return (nullKeyEntry != null && (value == nullKeyEntry.value || + value.equals(nullKeyEntry.value))); } /** * Special-case code for containsValue with null argument */ private boolean containsNullValue() { - Entry[] tab = table; - for (int i = 0; i < tab.length; i++) - for (Entry e = tab[i]; e != null; e = e.next) - if (e.value == null) - return true; - return false; + Object[] tab = table; + for (int i = 0; i < tab.length; i++) { + if (tab[i] instanceof Entry) { + Entry e = (Entry)tab[i]; + for (; e != null; e = (Entry)e.next) { + if (e.value == null) { + return true; + } + } + } else if (tab[i] != null) { + TreeBin e = (TreeBin)tab[i]; + TreeNode p = e.first; + for (; p != null; p = (TreeNode) p.entry.next) { + if (p.entry.value == null) { + return true; + } + } + } + } + // Didn't find value in table - could be in nullKeyEntry + return (nullKeyEntry != null && nullKeyEntry.value == null); } /** @@ -1007,6 +2112,7 @@ public class HashMap result.entrySet = null; result.modCount = 0; result.size = 0; + result.nullKeyEntry = null; result.init(); result.putAllForCreate(this); @@ -1016,13 +2122,13 @@ public class HashMap static class Entry implements Map.Entry { final K key; V value; - Entry next; + Object next; // an Entry, or a TreeNode final int hash; /** * Creates new entry. */ - Entry(int h, K k, V v, Entry n) { + Entry(int h, K k, V v, Object n) { value = v; next = n; key = k; @@ -1054,7 +2160,7 @@ public class HashMap Object v2 = e.getValue(); if (v1 == v2 || (v1 != null && v1.equals(v2))) return true; - } + } return false; } @@ -1068,8 +2174,7 @@ public class HashMap /** * This method is invoked whenever the value in an entry is - * overwritten by an invocation of put(k,v) for a key k that's already - * in the HashMap. + * overwritten for a key that's already in the HashMap. */ void recordAccess(HashMap m) { } @@ -1082,50 +2187,96 @@ public class HashMap } } + void addEntry(int hash, K key, V value, int bucketIndex) { + addEntry(hash, key, value, bucketIndex, true); + } + /** * Adds a new entry with the specified key, value and hash code to * the specified bucket. It is the responsibility of this - * method to resize the table if appropriate. + * method to resize the table if appropriate. The new entry is then + * created by calling createEntry(). * * Subclass overrides this to alter the behavior of put method. + * + * If checkIfNeedTree is false, it is known that this bucket will not need + * to be converted to a TreeBin, so don't bothering checking. + * + * Assumes key is not null. */ - void addEntry(int hash, K key, V value, int bucketIndex) { + void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { + // assert key != null; if ((size >= threshold) && (null != table[bucketIndex])) { resize(2 * table.length); - hash = (null != key) ? hash(key) : 0; + hash = hash(key); bucketIndex = indexFor(hash, table.length); } - - createEntry(hash, key, value, bucketIndex); + createEntry(hash, key, value, bucketIndex, checkIfNeedTree); } /** - * Like addEntry except that this version is used when creating entries + * Called by addEntry(), and also used when creating entries * as part of Map construction or "pseudo-construction" (cloning, - * deserialization). This version needn't worry about resizing the table. + * deserialization). This version does not check for resizing of the table. * - * Subclass overrides this to alter the behavior of HashMap(Map), - * clone, and readObject. + * This method is responsible for converting a bucket to a TreeBin once + * TREE_THRESHOLD is reached. However if checkIfNeedTree is false, it is known + * that this bucket will not need to be converted to a TreeBin, so don't + * bother checking. The new entry is constructed by calling newEntry(). + * + * Assumes key is not null. + * + * Note: buckets already converted to a TreeBin don't call this method, but + * instead call TreeBin.putTreeNode() to create new entries. */ - void createEntry(int hash, K key, V value, int bucketIndex) { + void createEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { + // assert key != null; @SuppressWarnings("unchecked") Entry e = (Entry)table[bucketIndex]; - table[bucketIndex] = new Entry<>(hash, key, value, e); + table[bucketIndex] = newEntry(hash, key, value, e); size++; + + if (checkIfNeedTree) { + int listSize = 0; + for (e = (Entry) table[bucketIndex]; e != null; e = (Entry)e.next) { + listSize++; + if (listSize >= TreeBin.TREE_THRESHOLD) { // Convert to TreeBin + if (comparableClassFor(key) != null) { + TreeBin t = new TreeBin(); + t.populate((Entry)table[bucketIndex]); + table[bucketIndex] = t; + } + break; + } + } + } } + /* + * Factory method to create a new Entry object. + */ + Entry newEntry(int hash, K key, V value, Object next) { + return new HashMap.Entry<>(hash, key, value, next); + } + + private abstract class HashIterator implements Iterator { - Entry next; // next entry to return + Object next; // next entry to return, an Entry or a TreeNode int expectedModCount; // For fast-fail int index; // current slot - Entry current; // current entry + Object current; // current entry, an Entry or a TreeNode HashIterator() { expectedModCount = modCount; if (size > 0) { // advance to first entry - Entry[] t = table; - while (index < t.length && (next = t[index++]) == null) - ; + if (nullKeyEntry != null) { + // assert nullKeyEntry.next == null; + // This works with nextEntry(): nullKeyEntry isa Entry, and + // e.next will be null, so we'll hit the findNextBin() call. + next = nullKeyEntry; + } else { + findNextBin(); + } } } @@ -1135,19 +2286,28 @@ public class HashMap @SuppressWarnings("unchecked") final Entry nextEntry() { - if (modCount != expectedModCount) + if (modCount != expectedModCount) { throw new ConcurrentModificationException(); - Entry e = next; + } + Object e = next; + Entry retVal; + if (e == null) throw new NoSuchElementException(); - if ((next = e.next) == null) { - Entry[] t = table; - while (index < t.length && (next = t[index++]) == null) - ; + if (e instanceof Entry) { + retVal = (Entry)e; + next = ((Entry)e).next; + } else { // TreeBin + retVal = (Entry)((TreeNode)e).entry; + next = retVal.next; + } + + if (next == null) { // Move to next bin + findNextBin(); } current = e; - return (Entry)e; + return retVal; } public void remove() { @@ -1155,11 +2315,33 @@ public class HashMap throw new IllegalStateException(); if (modCount != expectedModCount) throw new ConcurrentModificationException(); - Object k = current.key; + K k; + + if (current instanceof Entry) { + k = ((Entry)current).key; + } else { + k = ((Entry)((TreeNode)current).entry).key; + + } current = null; HashMap.this.removeEntryForKey(k); expectedModCount = modCount; } + + /* + * Set 'next' to the first entry of the next non-empty bin in the table + */ + private void findNextBin() { + // assert next == null; + Object[] t = table; + + while (index < t.length && (next = t[index++]) == null) + ; + if (next instanceof HashMap.TreeBin) { // Point to the first TreeNode + next = ((TreeBin) next).first; + // assert next != null; // There should be no empty TreeBins + } + } } private final class ValueIterator extends HashIterator { @@ -1357,7 +2539,7 @@ public class HashMap if (table==EMPTY_TABLE) { s.writeInt(roundUpToPowerOf2(threshold)); } else { - s.writeInt(table.length); + s.writeInt(table.length); } // Write out size (number of Mappings) @@ -1389,8 +2571,10 @@ public class HashMap } // set other fields that need values - Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, - sun.misc.Hashing.randomHashSeed(this)); + if (Holder.USE_HASHSEED) { + Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, + sun.misc.Hashing.randomHashSeed(this)); + } table = EMPTY_TABLE; // Read in number of buckets @@ -1404,9 +2588,9 @@ public class HashMap // capacity chosen by number of mappings and desired load (if >= 0.25) int capacity = (int) Math.min( - mappings * Math.min(1 / loadFactor, 4.0f), - // we have limits... - HashMap.MAXIMUM_CAPACITY); + mappings * Math.min(1 / loadFactor, 4.0f), + // we have limits... + HashMap.MAXIMUM_CAPACITY); // allocate the bucket array; if (mappings > 0) { @@ -1420,9 +2604,9 @@ public class HashMap // Read the keys and values, and put the mappings in the HashMap for (int i=0; i */ static class HashMapSpliterator { final HashMap map; - HashMap.Entry current; // current node + Object current; // current node, can be Entry or TreeNode int index; // current index, modified on advance/split int fence; // one past last index int est; // size estimate int expectedModCount; // for comodification checks + boolean acceptedNull; // Have we accepted the null key? + // Without this, we can't distinguish + // between being at the very beginning (and + // needing to accept null), or being at the + // end of the list in bin 0. In both cases, + // current == null && index == 0. HashMapSpliterator(HashMap m, int origin, int fence, int est, @@ -1450,6 +2640,7 @@ public class HashMap this.fence = fence; this.est = est; this.expectedModCount = expectedModCount; + this.acceptedNull = false; } final int getFence() { // initialize fence and size on first use @@ -1479,9 +2670,15 @@ public class HashMap public KeySpliterator trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - return (lo >= mid || current != null) ? null : - new KeySpliterator(map, lo, index = mid, est >>>= 1, - expectedModCount); + if (lo >= mid || current != null) { + return null; + } else { + KeySpliterator retVal = new KeySpliterator(map, lo, + index = mid, est >>>= 1, expectedModCount); + // Only 'this' Spliterator chould check for null. + retVal.acceptedNull = true; + return retVal; + } } @SuppressWarnings("unchecked") @@ -1490,21 +2687,39 @@ public class HashMap if (action == null) throw new NullPointerException(); HashMap m = map; - HashMap.Entry[] tab = (HashMap.Entry[])m.table; + Object[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = tab.length; } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) { - HashMap.Entry p = current; + + if (!acceptedNull) { + acceptedNull = true; + if (m.nullKeyEntry != null) { + action.accept(m.nullKeyEntry.key); + } + } + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + Object p = current; + current = null; do { - if (p == null) + if (p == null) { p = tab[i++]; - else { - action.accept(p.getKey()); - p = p.next; + if (p instanceof HashMap.TreeBin) { + p = ((HashMap.TreeBin)p).first; + } + } else { + HashMap.Entry entry; + if (p instanceof HashMap.Entry) { + entry = (HashMap.Entry)p; + } else { + entry = (HashMap.Entry)((TreeNode)p).entry; + } + action.accept(entry.key); + p = entry.next; } } while (p != null || i < hi); if (m.modCount != mc) @@ -1517,14 +2732,34 @@ public class HashMap int hi; if (action == null) throw new NullPointerException(); - HashMap.Entry[] tab = (HashMap.Entry[])map.table; - if (tab.length >= (hi = getFence()) && index >= 0) { + Object[] tab = map.table; + hi = getFence(); + + if (!acceptedNull) { + acceptedNull = true; + if (map.nullKeyEntry != null) { + action.accept(map.nullKeyEntry.key); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + if (tab.length >= hi && index >= 0) { while (current != null || index < hi) { - if (current == null) + if (current == null) { current = tab[index++]; - else { - K k = current.getKey(); - current = current.next; + if (current instanceof HashMap.TreeBin) { + current = ((HashMap.TreeBin)current).first; + } + } else { + HashMap.Entry entry; + if (current instanceof HashMap.Entry) { + entry = (HashMap.Entry)current; + } else { + entry = (HashMap.Entry)((TreeNode)current).entry; + } + K k = entry.key; + current = entry.next; action.accept(k); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -1551,9 +2786,15 @@ public class HashMap public ValueSpliterator trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - return (lo >= mid || current != null) ? null : - new ValueSpliterator(map, lo, index = mid, est >>>= 1, - expectedModCount); + if (lo >= mid || current != null) { + return null; + } else { + ValueSpliterator retVal = new ValueSpliterator(map, + lo, index = mid, est >>>= 1, expectedModCount); + // Only 'this' Spliterator chould check for null. + retVal.acceptedNull = true; + return retVal; + } } @SuppressWarnings("unchecked") @@ -1562,21 +2803,39 @@ public class HashMap if (action == null) throw new NullPointerException(); HashMap m = map; - HashMap.Entry[] tab = (HashMap.Entry[])m.table; + Object[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = tab.length; } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) { - HashMap.Entry p = current; + + if (!acceptedNull) { + acceptedNull = true; + if (m.nullKeyEntry != null) { + action.accept(m.nullKeyEntry.value); + } + } + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + Object p = current; + current = null; do { - if (p == null) + if (p == null) { p = tab[i++]; - else { - action.accept(p.getValue()); - p = p.next; + if (p instanceof HashMap.TreeBin) { + p = ((HashMap.TreeBin)p).first; + } + } else { + HashMap.Entry entry; + if (p instanceof HashMap.Entry) { + entry = (HashMap.Entry)p; + } else { + entry = (HashMap.Entry)((TreeNode)p).entry; + } + action.accept(entry.value); + p = entry.next; } } while (p != null || i < hi); if (m.modCount != mc) @@ -1589,14 +2848,34 @@ public class HashMap int hi; if (action == null) throw new NullPointerException(); - HashMap.Entry[] tab = (HashMap.Entry[])map.table; - if (tab.length >= (hi = getFence()) && index >= 0) { + Object[] tab = map.table; + hi = getFence(); + + if (!acceptedNull) { + acceptedNull = true; + if (map.nullKeyEntry != null) { + action.accept(map.nullKeyEntry.value); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + if (tab.length >= hi && index >= 0) { while (current != null || index < hi) { - if (current == null) + if (current == null) { current = tab[index++]; - else { - V v = current.getValue(); - current = current.next; + if (current instanceof HashMap.TreeBin) { + current = ((HashMap.TreeBin)current).first; + } + } else { + HashMap.Entry entry; + if (current instanceof HashMap.Entry) { + entry = (Entry)current; + } else { + entry = (Entry)((TreeNode)current).entry; + } + V v = entry.value; + current = entry.next; action.accept(v); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -1622,9 +2901,15 @@ public class HashMap public EntrySpliterator trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - return (lo >= mid || current != null) ? null : - new EntrySpliterator(map, lo, index = mid, est >>>= 1, - expectedModCount); + if (lo >= mid || current != null) { + return null; + } else { + EntrySpliterator retVal = new EntrySpliterator(map, + lo, index = mid, est >>>= 1, expectedModCount); + // Only 'this' Spliterator chould check for null. + retVal.acceptedNull = true; + return retVal; + } } @SuppressWarnings("unchecked") @@ -1633,21 +2918,40 @@ public class HashMap if (action == null) throw new NullPointerException(); HashMap m = map; - HashMap.Entry[] tab = (HashMap.Entry[])m.table; + Object[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; hi = fence = tab.length; } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) { - HashMap.Entry p = current; + + if (!acceptedNull) { + acceptedNull = true; + if (m.nullKeyEntry != null) { + action.accept(m.nullKeyEntry); + } + } + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { + Object p = current; + current = null; do { - if (p == null) + if (p == null) { p = tab[i++]; - else { - action.accept(p); - p = p.next; + if (p instanceof HashMap.TreeBin) { + p = ((HashMap.TreeBin)p).first; + } + } else { + HashMap.Entry entry; + if (p instanceof HashMap.Entry) { + entry = (HashMap.Entry)p; + } else { + entry = (HashMap.Entry)((TreeNode)p).entry; + } + action.accept(entry); + p = entry.next; + } } while (p != null || i < hi); if (m.modCount != mc) @@ -1660,14 +2964,33 @@ public class HashMap int hi; if (action == null) throw new NullPointerException(); - HashMap.Entry[] tab = (HashMap.Entry[])map.table; - if (tab.length >= (hi = getFence()) && index >= 0) { + Object[] tab = map.table; + hi = getFence(); + + if (!acceptedNull) { + acceptedNull = true; + if (map.nullKeyEntry != null) { + action.accept(map.nullKeyEntry); + if (map.modCount != expectedModCount) + throw new ConcurrentModificationException(); + return true; + } + } + if (tab.length >= hi && index >= 0) { while (current != null || index < hi) { - if (current == null) + if (current == null) { current = tab[index++]; - else { - HashMap.Entry e = current; - current = current.next; + if (current instanceof HashMap.TreeBin) { + current = ((HashMap.TreeBin)current).first; + } + } else { + HashMap.Entry e; + if (current instanceof HashMap.Entry) { + e = (Entry)current; + } else { + e = (Entry)((TreeNode)current).entry; + } + current = e.next; action.accept(e); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); diff --git a/jdk/src/share/classes/java/util/Hashtable.java b/jdk/src/share/classes/java/util/Hashtable.java index 1e38fcaa43b..a078aa3e207 100644 --- a/jdk/src/share/classes/java/util/Hashtable.java +++ b/jdk/src/share/classes/java/util/Hashtable.java @@ -180,13 +180,27 @@ public class Hashtable */ static final long HASHSEED_OFFSET; + static final boolean USE_HASHSEED; + static { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - Hashtable.class.getDeclaredField("hashSeed")); - } catch (NoSuchFieldException | SecurityException e) { - throw new InternalError("Failed to record hashSeed offset", e); + String hashSeedProp = java.security.AccessController.doPrivileged( + new sun.security.action.GetPropertyAction( + "jdk.map.useRandomSeed")); + boolean localBool = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + USE_HASHSEED = localBool; + + if (USE_HASHSEED) { + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + HASHSEED_OFFSET = UNSAFE.objectFieldOffset( + Hashtable.class.getDeclaredField("hashSeed")); + } catch (NoSuchFieldException | SecurityException e) { + throw new InternalError("Failed to record hashSeed offset", e); + } + } else { + UNSAFE = null; + HASHSEED_OFFSET = 0; } } } @@ -194,21 +208,24 @@ public class Hashtable /** * A randomizing value associated with this instance that is applied to * hash code of keys to make hash collisions harder to find. + * + * Non-final so it can be set lazily, but be sure not to set more than once. */ - transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + transient final int hashSeed; + + /** + * Return an initial value for the hashSeed, or 0 if the random seed is not + * enabled. + */ + final int initHashSeed() { + if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { + return sun.misc.Hashing.randomHashSeed(this); + } + return 0; + } private int hash(Object k) { - if (k instanceof String) { - return ((String)k).hash32(); - } - - int h = hashSeed ^ k.hashCode(); - - // This function ensures that hashCodes that differ only by - // constant multiples at each bit position have a bounded - // number of collisions (approximately 8 at default load factor). - h ^= (h >>> 20) ^ (h >>> 12); - return h ^ (h >>> 7) ^ (h >>> 4); + return hashSeed ^ k.hashCode(); } /** @@ -232,6 +249,7 @@ public class Hashtable this.loadFactor = loadFactor; table = new Entry[initialCapacity]; threshold = (int)Math.min(initialCapacity * loadFactor, MAX_ARRAY_SIZE + 1); + hashSeed = initHashSeed(); } /** @@ -1187,8 +1205,10 @@ public class Hashtable s.defaultReadObject(); // set hashMask - Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, - sun.misc.Hashing.randomHashSeed(this)); + if (Holder.USE_HASHSEED) { + Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, + sun.misc.Hashing.randomHashSeed(this)); + } // Read the original length of the array and number of elements int origlength = s.readInt(); diff --git a/jdk/src/share/classes/java/util/IntSummaryStatistics.java b/jdk/src/share/classes/java/util/IntSummaryStatistics.java index f179e67478b..fcca3296f85 100644 --- a/jdk/src/share/classes/java/util/IntSummaryStatistics.java +++ b/jdk/src/share/classes/java/util/IntSummaryStatistics.java @@ -159,7 +159,7 @@ public class IntSummaryStatistics implements IntConsumer { */ public String toString() { return String.format( - "%s{count=%d, sum=%d, min=%d, average=%d, max=%d}", + "%s{count=%d, sum=%d, min=%d, average=%f, max=%d}", this.getClass().getSimpleName(), getCount(), getSum(), diff --git a/jdk/src/share/classes/java/util/LinkedHashMap.java b/jdk/src/share/classes/java/util/LinkedHashMap.java index 1693db6c688..100710b1bdd 100644 --- a/jdk/src/share/classes/java/util/LinkedHashMap.java +++ b/jdk/src/share/classes/java/util/LinkedHashMap.java @@ -55,9 +55,9 @@ import java.io.*; * order they were presented.) * *

A special {@link #LinkedHashMap(int,float,boolean) constructor} is - * provided to create a linked hash map whose order of iteration is the order - * in which its entries were last accessed, from least-recently accessed to - * most-recently (access-order). This kind of map is well-suited to + * provided to create a LinkedHashMap whose order of iteration is the + * order in which its entries were last accessed, from least-recently accessed + * to most-recently (access-order). This kind of map is well-suited to * building LRU caches. Invoking the put or get method * results in an access to the corresponding entry (assuming it exists after * the invocation completes). The putAll method generates one entry @@ -242,23 +242,6 @@ public class LinkedHashMap header.before = header.after = header; } - /** - * Transfers all entries to new table array. This method is called - * by superclass resize. It is overridden for performance, as it is - * faster to iterate using our linked list. - */ - @Override - @SuppressWarnings("unchecked") - void transfer(HashMap.Entry[] newTable) { - int newCapacity = newTable.length; - for (Entry e = header.after; e != header; e = e.after) { - int index = indexFor(e.hash, newCapacity); - e.next = (HashMap.Entry)newTable[index]; - newTable[index] = e; - } - } - - /** * Returns true if this map maps one or more keys to the * specified value. @@ -320,7 +303,7 @@ public class LinkedHashMap // These fields comprise the doubly linked list used for iteration. Entry before, after; - Entry(int hash, K key, V value, HashMap.Entry next) { + Entry(int hash, K key, V value, Object next) { super(hash, key, value, next); } @@ -344,7 +327,7 @@ public class LinkedHashMap /** * This method is invoked by the superclass whenever the value - * of a pre-existing entry is read by Map.get or modified by Map.set. + * of a pre-existing entry is read by Map.get or modified by Map.put. * If the enclosing Map is access-ordered, it moves the entry * to the end of the list; otherwise, it does nothing. */ @@ -422,8 +405,9 @@ public class LinkedHashMap * allocated entry to get inserted at the end of the linked list and * removes the eldest entry if appropriate. */ - void addEntry(int hash, K key, V value, int bucketIndex) { - super.addEntry(hash, key, value, bucketIndex); + @Override + void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { + super.addEntry(hash, key, value, bucketIndex, checkIfNeedTree); // Remove eldest entry if instructed Entry eldest = header.after; @@ -432,17 +416,14 @@ public class LinkedHashMap } } - /** - * This override differs from addEntry in that it doesn't resize the - * table or remove the eldest entry. + /* + * Create a new LinkedHashMap.Entry and setup the before/after pointers */ - void createEntry(int hash, K key, V value, int bucketIndex) { - @SuppressWarnings("unchecked") - HashMap.Entry old = (HashMap.Entry)table[bucketIndex]; - Entry e = new Entry<>(hash, key, value, old); - table[bucketIndex] = e; - e.addBefore(header); - size++; + @Override + HashMap.Entry newEntry(int hash, K key, V value, Object next) { + Entry newEntry = new Entry<>(hash, key, value, next); + newEntry.addBefore(header); + return newEntry; } /** diff --git a/jdk/src/share/classes/java/util/LongSummaryStatistics.java b/jdk/src/share/classes/java/util/LongSummaryStatistics.java index 3c7b7aee561..0e2da71f8bc 100644 --- a/jdk/src/share/classes/java/util/LongSummaryStatistics.java +++ b/jdk/src/share/classes/java/util/LongSummaryStatistics.java @@ -171,7 +171,7 @@ public class LongSummaryStatistics implements LongConsumer, IntConsumer { */ public String toString() { return String.format( - "%s{count=%d, sum=%d, min=%d, average=%d, max=%d}", + "%s{count=%d, sum=%d, min=%d, average=%f, max=%d}", this.getClass().getSimpleName(), getCount(), getSum(), diff --git a/jdk/src/share/classes/java/util/PrimitiveIterator.java b/jdk/src/share/classes/java/util/PrimitiveIterator.java index d4e032e7430..f05d9e06605 100644 --- a/jdk/src/share/classes/java/util/PrimitiveIterator.java +++ b/jdk/src/share/classes/java/util/PrimitiveIterator.java @@ -91,6 +91,7 @@ public interface PrimitiveIterator extends Iterator { * @throws NullPointerException if the specified action is null */ default void forEachRemaining(IntConsumer action) { + Objects.requireNonNull(action); while (hasNext()) action.accept(nextInt()); } @@ -123,6 +124,8 @@ public interface PrimitiveIterator extends Iterator { forEachRemaining((IntConsumer) action); } else { + // The method reference action::accept is never null + Objects.requireNonNull(action); if (Tripwire.ENABLED) Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfInt.forEachRemainingInt(action::accept)"); forEachRemaining((IntConsumer) action::accept); @@ -162,6 +165,7 @@ public interface PrimitiveIterator extends Iterator { * @throws NullPointerException if the specified action is null */ default void forEachRemaining(LongConsumer action) { + Objects.requireNonNull(action); while (hasNext()) action.accept(nextLong()); } @@ -194,6 +198,8 @@ public interface PrimitiveIterator extends Iterator { forEachRemaining((LongConsumer) action); } else { + // The method reference action::accept is never null + Objects.requireNonNull(action); if (Tripwire.ENABLED) Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfLong.forEachRemainingLong(action::accept)"); forEachRemaining((LongConsumer) action::accept); @@ -232,6 +238,7 @@ public interface PrimitiveIterator extends Iterator { * @throws NullPointerException if the specified action is null */ default void forEachRemaining(DoubleConsumer action) { + Objects.requireNonNull(action); while (hasNext()) action.accept(nextDouble()); } @@ -265,6 +272,8 @@ public interface PrimitiveIterator extends Iterator { forEachRemaining((DoubleConsumer) action); } else { + // The method reference action::accept is never null + Objects.requireNonNull(action); if (Tripwire.ENABLED) Tripwire.trip(getClass(), "{0} calling PrimitiveIterator.OfDouble.forEachRemainingDouble(action::accept)"); forEachRemaining((DoubleConsumer) action::accept); diff --git a/jdk/src/share/classes/java/util/Spliterator.java b/jdk/src/share/classes/java/util/Spliterator.java index 5ed0b1243de..10c551a5921 100644 --- a/jdk/src/share/classes/java/util/Spliterator.java +++ b/jdk/src/share/classes/java/util/Spliterator.java @@ -394,9 +394,9 @@ public interface Spliterator { * Convenience method that returns {@link #estimateSize()} if this * Spliterator is {@link #SIZED}, else {@code -1}. * @implSpec - * The default returns the result of {@code estimateSize()} if the - * Spliterator reports a characteristic of {@code SIZED}, and {@code -1} - * otherwise. + * The default implementation returns the result of {@code estimateSize()} + * if the Spliterator reports a characteristic of {@code SIZED}, and + * {@code -1} otherwise. * * @return the exact size, if known, else {@code -1}. */ diff --git a/jdk/src/share/classes/java/util/StringJoiner.java b/jdk/src/share/classes/java/util/StringJoiner.java index bb6f4c6deba..3157aa3a2db 100644 --- a/jdk/src/share/classes/java/util/StringJoiner.java +++ b/jdk/src/share/classes/java/util/StringJoiner.java @@ -29,14 +29,6 @@ package java.util; * by a delimiter and optionally starting with a supplied prefix * and ending with a supplied suffix. *

- * For example, the String {@code "[George:Sally:Fred]"} may - * be constructed as follows: - *

 {@code
- *     StringJoiner sj = new StringJoiner(":", "[", "]");
- *     sj.add("George").add("Sally").add("Fred");
- *     String desiredString = sj.toString();
- * }
- *

* Prior to adding something to the {@code StringJoiner}, its * {@code sj.toString()} method will, by default, return {@code prefix + suffix}. * However, if the {@code setEmptyValue} method is called, the {@code emptyValue} @@ -45,17 +37,28 @@ package java.util; * "{}", where the {@code prefix} is "{", the * {@code suffix} is "}" and nothing has been added to the * {@code StringJoiner}. - *

- * A {@code StringJoiner} may be employed to create formatted output from a - * collection using lambda expressions as shown in the following example. + * + * @apiNote + *

The String {@code "[George:Sally:Fred]"} may be constructed as follows: * *

 {@code
- *     List people = ...
- *     String commaSeparatedNames =
- *         people.map(p -> p.getName()).into(new StringJoiner(", ")).toString();
+ * StringJoiner sj = new StringJoiner(":", "[", "]");
+ * sj.add("George").add("Sally").add("Fred");
+ * String desiredString = sj.toString();
+ * }
+ *

+ * A {@code StringJoiner} may be employed to create formatted output from a + * {@link java.util.stream.Stream} using + * {@link java.util.stream.Collectors#toStringJoiner}. For example: + * + *

 {@code
+ * List numbers = Arrays.asList(1, 2, 3, 4);
+ * String commaSeparatedNumbers = numbers.stream()
+ *     .map(i -> i.toString())
+ *     .collect(Collectors.toStringJoiner(", ")).toString();
  * }
* - * @author Jim Gish + * @see java.util.stream.Collectors#toStringJoiner * @since 1.8 */ public final class StringJoiner { diff --git a/jdk/src/share/classes/java/util/WeakHashMap.java b/jdk/src/share/classes/java/util/WeakHashMap.java index 77f9e094c1a..183909afd1e 100644 --- a/jdk/src/share/classes/java/util/WeakHashMap.java +++ b/jdk/src/share/classes/java/util/WeakHashMap.java @@ -187,11 +187,37 @@ public class WeakHashMap */ int modCount; + private static class Holder { + static final boolean USE_HASHSEED; + + static { + String hashSeedProp = java.security.AccessController.doPrivileged( + new sun.security.action.GetPropertyAction( + "jdk.map.useRandomSeed")); + boolean localBool = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + USE_HASHSEED = localBool; + } + } + /** * A randomizing value associated with this instance that is applied to * hash code of keys to make hash collisions harder to find. + * + * Non-final so it can be set lazily, but be sure not to set more than once. */ - transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + transient int hashSeed; + + /** + * Initialize the hashing mask value. + */ + final void initHashSeed() { + if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { + // Do not set hashSeed more than once! + // assert hashSeed == 0; + hashSeed = sun.misc.Hashing.randomHashSeed(this); + } + } @SuppressWarnings("unchecked") private Entry[] newTable(int n) { @@ -223,6 +249,7 @@ public class WeakHashMap table = newTable(capacity); this.loadFactor = loadFactor; threshold = (int)(capacity * loadFactor); + initHashSeed(); } /** @@ -298,10 +325,7 @@ public class WeakHashMap * in lower bits. */ final int hash(Object k) { - if (k instanceof String) { - return ((String) k).hash32(); - } - int h = hashSeed ^ k.hashCode(); + int h = hashSeed ^ k.hashCode(); // This function ensures that hashCodes that differ only by // constant multiples at each bit position have a bounded @@ -1076,9 +1100,10 @@ public class WeakHashMap } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < hi) { - index = hi; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { WeakHashMap.Entry p = current; + current = null; // exhaust do { if (p == null) p = tab[i++]; @@ -1155,9 +1180,10 @@ public class WeakHashMap } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < hi) { - index = hi; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { WeakHashMap.Entry p = current; + current = null; // exhaust do { if (p == null) p = tab[i++]; @@ -1232,9 +1258,10 @@ public class WeakHashMap } else mc = expectedModCount; - if (tab.length >= hi && (i = index) >= 0 && i < hi) { - index = hi; + if (tab.length >= hi && (i = index) >= 0 && + (i < (index = hi) || current != null)) { WeakHashMap.Entry p = current; + current = null; // exhaust do { if (p == null) p = tab[i++]; diff --git a/jdk/src/share/classes/java/util/concurrent/ConcurrentHashMap.java b/jdk/src/share/classes/java/util/concurrent/ConcurrentHashMap.java index a80760d43ff..e62ef35916e 100644 --- a/jdk/src/share/classes/java/util/concurrent/ConcurrentHashMap.java +++ b/jdk/src/share/classes/java/util/concurrent/ConcurrentHashMap.java @@ -34,14 +34,47 @@ */ package java.util.concurrent; -import java.io.ObjectInputStream; -import java.util.concurrent.locks.*; -import java.util.*; import java.io.Serializable; +import java.io.ObjectStreamField; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.AbstractMap; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.ConcurrentModificationException; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.Spliterator; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.StampedLock; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.BinaryOperator; +import java.util.function.Consumer; +import java.util.function.DoubleBinaryOperator; +import java.util.function.Function; +import java.util.function.IntBinaryOperator; +import java.util.function.LongBinaryOperator; +import java.util.function.ToDoubleBiFunction; +import java.util.function.ToDoubleFunction; +import java.util.function.ToIntBiFunction; +import java.util.function.ToIntFunction; +import java.util.function.ToLongBiFunction; +import java.util.function.ToLongFunction; +import java.util.stream.Stream; /** * A hash table supporting full concurrency of retrievals and - * adjustable expected concurrency for updates. This class obeys the + * high expected concurrency for updates. This class obeys the * same functional specification as {@link java.util.Hashtable}, and * includes versions of methods corresponding to each method of * {@code Hashtable}. However, even though all operations are @@ -51,35 +84,61 @@ import java.io.Serializable; * interoperable with {@code Hashtable} in programs that rely on its * thread safety but not on its synchronization details. * - *

Retrieval operations (including {@code get}) generally do not - * block, so may overlap with update operations (including - * {@code put} and {@code remove}). Retrievals reflect the results - * of the most recently completed update operations holding - * upon their onset. For aggregate operations such as {@code putAll} - * and {@code clear}, concurrent retrievals may reflect insertion or - * removal of only some entries. Similarly, Iterators and - * Enumerations return elements reflecting the state of the hash table - * at some point at or since the creation of the iterator/enumeration. - * They do not throw {@link ConcurrentModificationException}. - * However, iterators are designed to be used by only one thread at a time. + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. * - *

The allowed concurrency among update operations is guided by - * the optional {@code concurrencyLevel} constructor argument - * (default {@code 16}), which is used as a hint for internal sizing. The - * table is internally partitioned to try to permit the indicated - * number of concurrent updates without contention. Because placement - * in hash tables is essentially random, the actual concurrency will - * vary. Ideally, you should choose a value to accommodate as many - * threads as will ever concurrently modify the table. Using a - * significantly higher value than you need can waste space and time, - * and a significantly lower value can lead to thread contention. But - * overestimates and underestimates within an order of magnitude do - * not usually have much noticeable impact. A value of one is - * appropriate when it is known that only one thread will modify and - * all others will only read. Also, resizing this or any other kind of - * hash table is a relatively slow operation, so, when possible, it is - * a good idea to provide estimates of expected table sizes in - * constructors. + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. To ameliorate impact, when keys are {@link Comparable}, + * this class may use comparison order among keys to help break ties. + * + *

A {@link Set} projection of a ConcurrentHashMap may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMap can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link + * java.util.concurrent.atomic.LongAdder} values and initializing via + * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count + * to a {@code ConcurrentHashMap freqs}, you can use + * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();} * *

This class and its views and iterators implement all of the * optional methods of the {@link Map} and {@link Iterator} @@ -88,6 +147,114 @@ import java.io.Serializable; *

Like {@link Hashtable} but unlike {@link HashMap}, this class * does not allow {@code null} to be used as a key or value. * + *

ConcurrentHashMaps support a set of sequential and parallel bulk + * operations that, unlike most {@link Stream} methods, are designed + * to be safely, and often sensibly, applied even with maps that are + * being concurrently updated by other threads; for example, when + * computing a snapshot summary of the values in a shared registry. + * There are three kinds of operation, each with four forms, accepting + * functions with Keys, Values, Entries, and (Key, Value) arguments + * and/or return values. Because the elements of a ConcurrentHashMap + * are not ordered in any particular way, and may be processed in + * different orders in different parallel executions, the correctness + * of supplied functions should not depend on any ordering, or on any + * other objects or values that may transiently change while + * computation is in progress; and except for forEach actions, should + * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry} + * objects do not support method {@code setValue}. + * + *

    + *
  • forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.
  • + * + *
  • search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.
  • + * + *
  • reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + *
      + * + *
    • Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)
    • + * + *
    • Mapped reductions that accumulate the results of a given + * function applied to each element.
    • + * + *
    • Reductions to scalar doubles, longs, and ints, using a + * given basis value.
    • + * + *
    + *
  • + *
+ * + *

These bulk operations accept a {@code parallelismThreshold} + * argument. Methods proceed sequentially if the current map size is + * estimated to be less than the given threshold. Using a value of + * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value + * of {@code 1} results in maximal parallelism by partitioning into + * enough subtasks to fully utilize the {@link + * ForkJoinPool#commonPool()} that is used for all parallel + * computations. Normally, you would initially choose one of these + * extreme values, and then measure performance of using in-between + * values that trade off overhead versus throughput. + * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMap: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Speedups for parallel compared to sequential forms are common + * but not guaranteed. Parallel operations involving brief functions + * on small maps may execute more slowly than sequential forms if the + * underlying work to parallelize the computation is more expensive + * than the computation itself. Similarly, parallelization may not + * lead to much actual parallelism if all processors are busy + * performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * *

This class is a member of the * * Java Collections Framework. @@ -97,735 +264,2373 @@ import java.io.Serializable; * @param the type of keys maintained by this map * @param the type of mapped values */ -public class ConcurrentHashMap extends AbstractMap - implements ConcurrentMap, Serializable { +@SuppressWarnings({"unchecked", "rawtypes", "serial"}) +public class ConcurrentHashMap extends AbstractMap + implements ConcurrentMap, Serializable { + private static final long serialVersionUID = 7249069246763182397L; /* - * The basic strategy is to subdivide the table among Segments, - * each of which itself is a concurrently readable hash table. To - * reduce footprint, all but one segments are constructed only - * when first needed (see ensureSegment). To maintain visibility - * in the presence of lazy construction, accesses to segments as - * well as elements of segment's table must use volatile access, - * which is done via Unsafe within methods segmentAt etc - * below. These provide the functionality of AtomicReferenceArrays - * but reduce the levels of indirection. Additionally, - * volatile-writes of table elements and entry "next" fields - * within locked operations use the cheaper "lazySet" forms of - * writes (via putOrderedObject) because these writes are always - * followed by lock releases that maintain sequential consistency - * of table updates. + * Overview: * - * Historical note: The previous version of this class relied - * heavily on "final" fields, which avoided some volatile reads at - * the expense of a large initial footprint. Some remnants of - * that design (including forced construction of segment 0) exist - * to ensure serialization compatibility. + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node key + * fields can contain special values, they are defined using plain + * Object types (not type "K"). This leads to a lot of explicit + * casting (and the use of class-wide warning suppressions). It + * also allows some of the public methods to be factored into a + * smaller number of internal methods (although sadly not so for + * the five variants of put-related operations). The + * validation-based approach explained below leads to a lot of + * code sprawl because retry-control precludes factoring into + * smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. + * + * We use the top (sign) bit of Node hash fields for control + * purposes -- it is available anyway because of addressing + * constraints. Nodes with negative hash fields are forwarding + * nodes to either TreeBins or resized tables. The lower 31 bits + * of each normal Node's hash field contain a transformation of + * the key's hash code. + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Locking support for these locks relies on builtin + * "synchronized" monitors. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is at least twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Any thread + * noticing an overfull bin may assist in resizing after the + * initiating thread allocates and sets up the replacement + * array. However, rather than stalling, these other threads may + * proceed with insertions etc. The use of TreeBins shields us + * from the worst case effects of overfilling while resizes are in + * progress. Resizing proceeds by transferring bins, one by one, + * from the table to the next table. To enable concurrency, the + * next table must be (incrementally) prefilled with place-holders + * serving as reverse forwarders to the old table. Because we are + * using power-of-two expansion, the elements from each bin must + * either stay at same index, or move with a power of two + * offset. We eliminate unnecessary node creation by catching + * cases where old nodes can be reused because their next fields + * won't change. On average, only about one-sixth of them need + * cloning when a table doubles. The nodes they replace will be + * garbage collectable as soon as they are no longer referenced by + * any reader thread that may be in the midst of concurrently + * traversing table. Upon transfer, the old table bin contains + * only a special forwarding node (with hash field "MOVED") that + * contains the next table as its key. On encountering a + * forwarding node, access and update operations restart, using + * the new table. + * + * Each bin transfer requires its bin lock, which can stall + * waiting for locks while resizing. However, because other + * threads can join in and help resize rather than contend for + * locks, average aggregate waits become shorter as resizing + * progresses. The transfer operation must also ensure that all + * accessible bins in both the old and new table are usable by any + * traversal. This is arranged by proceeding from the last bin + * (table.length - 1) up towards the first. Upon seeing a + * forwarding node, traversals (see class Traverser) arrange to + * move to the new table without revisiting nodes. However, to + * ensure that no intervening nodes are skipped, bin splitting can + * only begin after the associated reverse-forwarders are in + * place. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a specialization of + * LongAdder. We need to incorporate a specialization rather than + * just use a LongAdder in order to access implicit + * contention-sensing that leads to creation of multiple + * Cells. The counter mechanics avoid contention on + * updates but can encounter cache thrashing if read too + * frequently during concurrent access. To avoid reading so often, + * resizing under contention is attempted only upon adding to a + * bin already holding two or more nodes. Under uniform hash + * distributions, the probability of this occurring at threshold + * is around 13%, meaning that only about 1 in 8 puts check + * threshold (and after resizing, many fewer do so). The bulk + * putAll operation further reduces contention by only committing + * count updates upon these size checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. */ /* ---------------- Constants -------------- */ /** - * The default initial capacity for this table, - * used when not otherwise specified in a constructor. + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. */ - static final int DEFAULT_INITIAL_CAPACITY = 16; + private static final int MAXIMUM_CAPACITY = 1 << 30; /** - * The default load factor for this table, used when not - * otherwise specified in a constructor. + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. */ - static final float DEFAULT_LOAD_FACTOR = 0.75f; + private static final int DEFAULT_CAPACITY = 16; /** - * The default concurrency level for this table, used when not - * otherwise specified in a constructor. + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. */ - static final int DEFAULT_CONCURRENCY_LEVEL = 16; + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; /** - * The maximum capacity, used if a higher value is implicitly - * specified by either of the constructors with arguments. MUST - * be a power of two <= 1<<30 to ensure that entries are indexable - * using ints. + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. */ - static final int MAXIMUM_CAPACITY = 1 << 30; + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; /** - * The minimum capacity for per-segment tables. Must be a power - * of two, at least two to avoid immediate resizing on next use - * after lazy construction. + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. */ - static final int MIN_SEGMENT_TABLE_CAPACITY = 2; + private static final float LOAD_FACTOR = 0.75f; /** - * The maximum number of segments to allow; used to bound - * constructor arguments. Must be power of two less than 1 << 24. + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. */ - static final int MAX_SEGMENTS = 1 << 16; // slightly conservative + private static final int TREE_THRESHOLD = 8; /** - * Number of unsynchronized retries in size and containsValue - * methods before resorting to locking. This is used to avoid - * unbounded retries if tables undergo continuous modification - * which would make it impossible to obtain an accurate result. + * Minimum number of rebinnings per transfer step. Ranges are + * subdivided to allow multiple resizer threads. This value + * serves as a lower bound to avoid resizers encountering + * excessive memory contention. The value should be at least + * DEFAULT_CAPACITY. */ - static final int RETRIES_BEFORE_LOCK = 2; + private static final int MIN_TRANSFER_STRIDE = 16; + + /* + * Encodings for Node hash fields. See above for explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash + + /** Number of CPUS, to place bounds on some sizings */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** For serialization compatibility. */ + private static final ObjectStreamField[] serialPersistentFields = { + new ObjectStreamField("segments", Segment[].class), + new ObjectStreamField("segmentMask", Integer.TYPE), + new ObjectStreamField("segmentShift", Integer.TYPE) + }; + + /** + * A padded cell for distributing counts. Adapted from LongAdder + * and Striped64. See their internal docs for explanation. + */ + @sun.misc.Contended static final class Cell { + volatile long value; + Cell(long x) { value = x; } + } /* ---------------- Fields -------------- */ /** - * A randomizing value associated with this instance that is applied to - * hash code of keys to make hash collisions harder to find. + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. */ - private transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this); + transient volatile Node[] table; /** - * Mask value for indexing into segments. The upper bits of a - * key's hash code are used to choose the segment. + * The next table to use; non-null only while resizing. */ - final int segmentMask; + private transient volatile Node[] nextTable; /** - * Shift value for indexing within segments. + * Base counter value, used mainly when there is no contention, + * but also as a fallback during table initialization + * races. Updated via CAS. */ - final int segmentShift; + private transient volatile long baseCount; /** - * The segments, each of which is a specialized hash table. + * Table initialization and resizing control. When negative, the + * table is being initialized or resized: -1 for initialization, + * else -(1 + the number of active resizing threads). Otherwise, + * when table is null, holds the initial table size to use upon + * creation, or 0 for default. After initialization, holds the + * next element count value upon which to resize the table. */ - final Segment[] segments; - - transient Set keySet; - transient Set> entrySet; - transient Collection values; + private transient volatile int sizeCtl; /** - * ConcurrentHashMap list entry. Note that this is never exported - * out as a user-visible Map.Entry. + * The next table index (plus one) to split while resizing. */ - static final class HashEntry { + private transient volatile int transferIndex; + + /** + * The least available table index to split while resizing. + */ + private transient volatile int transferOrigin; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + private transient volatile int cellsBusy; + + /** + * Table of counter cells. When non-null, size is a power of 2. + */ + private transient volatile Cell[] counterCells; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(Node[] tab, int i) { + return (Node)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); + } + + static final boolean casTabAt(Node[] tab, int i, + Node c, Node v) { + return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); + } + + static final void setTabAt(Node[] tab, int i, Node v) { + U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. This class is never exported out as a + * user-mutable Map.Entry (i.e., one supporting setValue; see + * MapEntry below), but can be used for read-only traversals used + * in bulk tasks. Nodes with a hash field of MOVED are special, + * and do not contain user keys or values (and are never + * exported). Otherwise, keys and vals are never null. + */ + static class Node implements Map.Entry { final int hash; - final K key; - volatile V value; - volatile HashEntry next; + final Object key; + volatile V val; + Node next; - HashEntry(int hash, K key, V value, HashEntry next) { + Node(int hash, Object key, V val, Node next) { this.hash = hash; this.key = key; - this.value = value; + this.val = val; this.next = next; } - /** - * Sets next field with volatile write semantics. (See above - * about use of putOrderedObject.) - */ - final void setNext(HashEntry n) { - UNSAFE.putOrderedObject(this, nextOffset, n); + public final K getKey() { return (K)key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + public final V setValue(V value) { + throw new UnsupportedOperationException(); } - // Unsafe mechanics - static final sun.misc.Unsafe UNSAFE; - static final long nextOffset; - static { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - Class k = HashEntry.class; - nextOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("next")); - } catch (Exception e) { - throw new Error(e); + public final boolean equals(Object o) { + Object k, v, u; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == (u = val) || v.equals(u))); + } + } + + /** + * Exported Entry for EntryIterator + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMap map; + MapEntry(K key, V val, ConcurrentHashMap map) { + this.key = key; + this.val = val; + this.map = map; + } + public K getKey() { return key; } + public V getValue() { return val; } + public int hashCode() { return key.hashCode() ^ val.hashCode(); } + public String toString() { return key + "=" + val; } + + public boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed, in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, V val, Node next, + TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * Returns a Class for the given type of the form "class C + * implements Comparable", if one exists, else null. See below + * for explanation. + */ + static Class comparableClassFor(Class c) { + Class s, cmpc; Type[] ts, as; Type t; ParameterizedType p; + if (c == String.class) // bypass checks + return c; + if (c != null && (cmpc = Comparable.class).isAssignableFrom(c)) { + while (cmpc.isAssignableFrom(s = c.getSuperclass())) + c = s; // find topmost comparable class + if ((ts = c.getGenericInterfaces()) != null) { + for (int i = 0; i < ts.length; ++i) { + if (((t = ts[i]) instanceof ParameterizedType) && + ((p = (ParameterizedType)t).getRawType() == cmpc) && + (as = p.getActualTypeArguments()) != null && + as.length == 1 && as[0] == c) // type arg is c + return c; + } } } + return null; } /** - * Gets the ith element of given table (if nonnull) with volatile - * read semantics. Note: This is manually integrated into a few - * performance-sensitive methods to reduce call overhead. + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by Comparable.compareTo order if applicable. On lookup at a + * node, if elements are not comparable or compare as 0 then both + * left and right children may need to be searched in the case of + * tied hash values. (This corresponds to the full list search + * that would be necessary if all elements were non-Comparable and + * had tied hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin extends + * StampedLock to support a form of read-write lock. For update + * operations and table validation, the exclusive form of lock + * behaves in the same way as bin-head locks. However, lookups use + * shared read-lock mechanics to allow multiple readers in the + * absence of writers. Additionally, these lookups do not ever + * block: While the lock is not available, they proceed along the + * slow traversal path (via next-pointers) until the lock becomes + * available or the list is exhausted, whichever comes + * first. These cases are not fast, but maximize aggregate + * expected throughput. */ - @SuppressWarnings("unchecked") - static final HashEntry entryAt(HashEntry[] tab, int i) { - return (tab == null) ? null : - (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)i << TSHIFT) + TBASE); - } - - /** - * Sets the ith element of given table, with volatile write - * semantics. (See above about use of putOrderedObject.) - */ - static final void setEntryAt(HashEntry[] tab, int i, - HashEntry e) { - UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); - } - - /** - * Applies a supplemental hash function to a given hashCode, which - * defends against poor quality hash functions. This is critical - * because ConcurrentHashMap uses power-of-two length hash tables, - * that otherwise encounter collisions for hashCodes that do not - * differ in lower or upper bits. - */ - private int hash(Object k) { - if (k instanceof String) { - return ((String) k).hash32(); - } - - int h = hashSeed ^ k.hashCode(); - - // Spread bits to regularize both segment and index locations, - // using variant of single-word Wang/Jenkins hash. - h += (h << 15) ^ 0xffffcd7d; - h ^= (h >>> 10); - h += (h << 3); - h ^= (h >>> 6); - h += (h << 2) + (h << 14); - return h ^ (h >>> 16); - } - - /** - * Segments are specialized versions of hash tables. This - * subclasses from ReentrantLock opportunistically, just to - * simplify some locking and avoid separate construction. - */ - static final class Segment extends ReentrantLock implements Serializable { - /* - * Segments maintain a table of entry lists that are always - * kept in a consistent state, so can be read (via volatile - * reads of segments and tables) without locking. This - * requires replicating nodes when necessary during table - * resizing, so the old lists can be traversed by readers - * still using old version of table. - * - * This class defines only mutative methods requiring locking. - * Except as noted, the methods of this class perform the - * per-segment versions of ConcurrentHashMap methods. (Other - * methods are integrated directly into ConcurrentHashMap - * methods.) These mutative methods use a form of controlled - * spinning on contention via methods scanAndLock and - * scanAndLockForPut. These intersperse tryLocks with - * traversals to locate nodes. The main benefit is to absorb - * cache misses (which are very common for hash tables) while - * obtaining locks so that traversal is faster once - * acquired. We do not actually use the found nodes since they - * must be re-acquired under lock anyway to ensure sequential - * consistency of updates (and in any case may be undetectably - * stale), but they will normally be much faster to re-locate. - * Also, scanAndLockForPut speculatively creates a fresh node - * to use in put if no node is found. - */ - + static final class TreeBin extends StampedLock { private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list - /** - * The maximum number of times to tryLock in a prescan before - * possibly blocking on acquire in preparation for a locked - * segment operation. On multiprocessors, using a bounded - * number of retries maintains cache acquired while locating - * nodes. - */ - static final int MAX_SCAN_RETRIES = - Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; - - /** - * The per-segment table. Elements are accessed via - * entryAt/setEntryAt providing volatile semantics. - */ - transient volatile HashEntry[] table; - - /** - * The number of elements. Accessed only either within locks - * or among other volatile reads that maintain visibility. - */ - transient int count; - - /** - * The total number of mutative operations in this segment. - * Even though this may overflows 32 bits, it provides - * sufficient accuracy for stability checks in CHM isEmpty() - * and size() methods. Accessed only either within locks or - * among other volatile reads that maintain visibility. - */ - transient int modCount; - - /** - * The table is rehashed when its size exceeds this threshold. - * (The value of this field is always {@code (int)(capacity * - * loadFactor)}.) - */ - transient int threshold; - - /** - * The load factor for the hash table. Even though this value - * is same for all segments, it is replicated to avoid needing - * links to outer object. - * @serial - */ - final float loadFactor; - - Segment(float lf, int threshold, HashEntry[] tab) { - this.loadFactor = lf; - this.threshold = threshold; - this.table = tab; + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } } - final V put(K key, int hash, V value, boolean onlyIfAbsent) { - HashEntry node = tryLock() ? null : - scanAndLockForPut(key, hash, value); - V oldValue; - try { - HashEntry[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry first = entryAt(tab, index); - for (HashEntry e = first;;) { - if (e != null) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - if (!onlyIfAbsent) { - e.value = value; - ++modCount; + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + final TreeNode getTreeNode(int h, Object k, TreeNode p, + Class cc) { + while (p != null) { + int dir, ph; Object pk; Class pc; + if ((ph = p.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.key) == k || k.equals(pk)) + return p; + else if (cc == null || pk == null || + ((pc = pk.getClass()) != cc && + comparableClassFor(pc) != cc) || + (dir = ((Comparable)k).compareTo(pk)) == 0) { + TreeNode r, pr; // check both sides + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else // continue left + dir = -1; + } + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final V getValue(int h, Object k) { + Class cc = comparableClassFor(k.getClass()); + Node r = null; + for (Node e = first; e != null; e = e.next) { + long s; + if ((s = tryReadLock()) != 0L) { + try { + r = getTreeNode(h, k, root, cc); + } finally { + unlockRead(s); + } + break; + } + else if (e.hash == h && k.equals(e.key)) { + r = e; + break; + } + } + return r == null ? null : r.val; + } + + /** + * Finds or adds a node. + * @return null if added + */ + final TreeNode putTreeNode(int h, Object k, V v) { + Class cc = comparableClassFor(k.getClass()); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; Object pk; Class pc; + p = pp; + if ((ph = p.hash) != h) + dir = (h < ph) ? -1 : 1; + else if ((pk = p.key) == k || k.equals(pk)) + return p; + else if (cc == null || pk == null || + ((pc = pk.getClass()) != cc && + comparableClassFor(pc) != cc) || + (dir = ((Comparable)k).compareTo(pk)) == 0) { + TreeNode r, pr; + if ((pr = p.right) != null && + (r = getTreeNode(h, k, pr, cc)) != null) + return r; + else // continue left + dir = -1; + } + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + for (TreeNode xp, xpp, xppl, xppr;;) { + if ((xp = x.parent) == null) { + (root = x).red = false; + break; + } + else if (!xp.red || (xpp = xp.parent) == null) { + TreeNode r = root; + if (r != null && r.red) + r.red = false; + break; + } + else if ((xppl = xpp.left) == xp) { + if ((xppr = xpp.right) != null && xppr.red) { + xppr.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + if (xppl != null && xppl.red) { + xppl.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + } + assert checkInvariants(); + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; + TreeNode pred = p.prev; // unlink traversal pointers + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + else if (pred == null) { + root = null; + return; + } + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + if (sr != null) + replacement = sr; + else + replacement = p; + } + else if (pl != null) + replacement = pl; + else if (pr != null) + replacement = pr; + else + replacement = p; + if (replacement != p) { + TreeNode pp = replacement.parent = p.parent; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + for (TreeNode x = replacement; x != null; ) { + TreeNode xp, xpl, xpr; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + else if ((xpl = xp.left) == x) { + if ((xpr = xp.right) != null && xpr.red) { + xpr.red = false; + xp.red = true; + rotateLeft(xp); + xpr = (xp = x.parent) == null ? null : xp.right; + } + if (xpr == null) + x = xp; + else { + TreeNode sl = xpr.left, sr = xpr.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + xpr.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + xpr.red = true; + rotateRight(xpr); + xpr = (xp = x.parent) == null ? + null : xp.right; + } + if (xpr != null) { + xpr.red = (xp == null) ? false : xp.red; + if ((sr = xpr.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + if (xpl != null && xpl.red) { + xpl.red = false; + xp.red = true; + rotateRight(xp); + xpl = (xp = x.parent) == null ? null : xp.left; + } + if (xpl == null) + x = xp; + else { + TreeNode sl = xpl.left, sr = xpl.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + xpl.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + xpl.red = true; + rotateLeft(xpl); + xpl = (xp = x.parent) == null ? + null : xp.left; + } + if (xpl != null) { + xpl.red = (xp == null) ? false : xp.red; + if ((sl = xpl.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement) { // detach pointers + TreeNode pp; + if ((pp = p.parent) != null) { + if (p == pp.left) + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + assert checkInvariants(); + } + + /** + * Checks linkage and balance invariants at root + */ + final boolean checkInvariants() { + TreeNode r = root; + if (r == null) + return (first == null); + else + return (first != null) && checkTreeNode(r); + } + + /** + * Recursive invariant check + */ + final boolean checkTreeNode(TreeNode t) { + TreeNode tp = t.parent, tl = t.left, tr = t.right, + tb = t.prev, tn = (TreeNode)t.next; + if (tb != null && tb.next != t) + return false; + if (tn != null && tn.prev != t) + return false; + if (tp != null && t != tp.left && t != tp.right) + return false; + if (tl != null && (tl.parent != t || tl.hash > t.hash)) + return false; + if (tr != null && (tr.parent != t || tr.hash < t.hash)) + return false; + if (t.red && tl != null && tl.red && tr != null && tr.red) + return false; + if (tl != null && !checkTreeNode(tl)) + return false; + if (tr != null && !checkTreeNode(tr)) + return false; + return true; + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top bit to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin if key is comparable. Call + * only when locked. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if (tab != null && comparableClassFor(key.getClass()) != null) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final V internalGet(Object k) { + int h = spread(k.hashCode()); + V v = null; + Node[] tab; Node e; + if ((tab = table) != null && + (e = tabAt(tab, (tab.length - 1) & h)) != null) { + for (;;) { + int eh; Object ek; + if ((eh = e.hash) < 0) { + if ((ek = e.key) instanceof TreeBin) { // search TreeBin + v = ((TreeBin)ek).getValue(h, k); + break; + } + else if (!(ek instanceof Node[]) || // try new table + (e = tabAt(tab = (Node[])ek, + (tab.length - 1) & h)) == null) + break; + } + else if (eh == h && ((ek = e.key) == k || k.equals(ek))) { + v = e.val; + break; + } + else if ((e = e.next) == null) + break; + } + } + return v; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final V internalReplace(Object k, V v, Object cv) { + int h = spread(k.hashCode()); + V oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + Class cc = comparableClassFor(k.getClass()); + TreeNode p = t.getTreeNode(h, k, t.root, cc); + if (p != null) { + V pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if (v != null) + p.val = v; + else { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.unlockWrite(stamp); + } + if (validated) { + if (deleted) + addCount(-1L, -1); + break; + } + } + else + tab = (Node[])fk; + } + else { + boolean validated = false; + boolean deleted = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + V ev = e.val; + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if (v != null) + e.val = v; + else { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } + if (validated) { + if (deleted) + addCount(-1L, -1); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of insertion methods + * All have the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The putAll method differs mainly in attempting to pre-allocate + * enough table space, and also more lazily performs count updates + * and checks. + * + * Most of the function-accepting methods can't be factored nicely + * because they require different functional forms, so instead + * sprawl out similar mechanics. + */ + + /** Implementation for put and putIfAbsent */ + private final V internalPut(K k, V v, boolean onlyIfAbsent) { + if (k == null || v == null) throw new NullPointerException(); + int h = spread(k.hashCode()); + int len = 0; + for (Node[] tab = table;;) { + int i, fh; Node f; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + V oldVal = null; + try { + if (tabAt(tab, i) == f) { + len = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + if (!onlyIfAbsent) + p.val = v; + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else { + V oldVal = null; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = e.val; + if (!onlyIfAbsent) + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + } + addCount(1L, len); + return null; + } + + /** Implementation for computeIfAbsent */ + private final V internalComputeIfAbsent(K k, Function mf) { + if (k == null || mf == null) + throw new NullPointerException(); + int h = spread(k.hashCode()); + V val = null; + int len = 0; + for (Node[] tab = table;;) { + Node f; int i; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(h, k, null, null); + synchronized (node) { + if (casTabAt(tab, i, null, node)) { + len = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + } + } + } + if (len != 0) + break; + } + else if (f.hash < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + boolean added = false; + try { + if (tabAt(tab, i) == f) { + len = 2; + Class cc = comparableClassFor(k.getClass()); + TreeNode p = t.getTreeNode(h, k, t.root, cc); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + t.putTreeNode(h, k, val); + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node[])fk; + } + else { + boolean added = false; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f;; ++len) { + Object ek; V ev; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + val = e.val; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } + if (len != 0) { + if (!added) + return val; + break; + } + } + } + if (val != null) + addCount(1L, len); + return val; + } + + /** Implementation for compute */ + private final V internalCompute(K k, boolean onlyIfPresent, + BiFunction mf) { + if (k == null || mf == null) + throw new NullPointerException(); + int h = spread(k.hashCode()); + V val = null; + int delta = 0; + int len = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(h, k, null, null); + synchronized (node) { + if (casTabAt(tab, i, null, node)) { + try { + len = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + } + } + } + if (len != 0) + break; + } + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + len = 2; + Class cc = comparableClassFor(k.getClass()); + TreeNode p = t.getTreeNode(h, k, t.root, cc); + if (p != null || !onlyIfPresent) { + V pv = (p == null) ? null : p.val; + if ((val = mf.apply(k, pv)) != null) { + if (p != null) + p.val = val; + else { + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) + break; + } + else + tab = (Node[])fk; + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f, pred = null;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, e.val); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && + (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } + if (len != 0) + break; + } + } + if (delta != 0) + addCount((long)delta, len); + return val; + } + + /** Implementation for merge */ + private final V internalMerge(K k, V v, + BiFunction mf) { + if (k == null || v == null || mf == null) + throw new NullPointerException(); + int h = spread(k.hashCode()); + V val = null; + int delta = 0; + int len = 0; + for (Node[] tab = table;;) { + int i; Node f; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if (f.hash < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + len = 2; + Class cc = comparableClassFor(k.getClass()); + TreeNode p = t.getTreeNode(h, k, t.root, cc); + val = (p == null) ? v : mf.apply(p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.unlockWrite(stamp); + } + if (len != 0) + break; + } + else + tab = (Node[])fk; + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f, pred = null;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(e.val, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + delta = 1; + val = v; + pred.next = new Node(h, k, val, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) + break; + } + } + if (delta != 0) + addCount((long)delta, len); + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k; V v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) < 0) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + boolean validated = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + Class cc = comparableClassFor(k.getClass()); + TreeNode p = t.getTreeNode(h, k, + t.root, cc); + if (p != null) + p.val = v; + else { + ++delta; + t.putTreeNode(h, k, v); + } + } + } finally { + t.unlockWrite(stamp); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else { + int len = 0; + synchronized (f) { + if (tabAt(tab, i) == f) { + len = 1; + for (Node e = f;; ++len) { + Object ek; + if (e.hash == h && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (len > TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } + if (len != 0) { + if (len > 1) { + addCount(delta, len); + delta = 0L; } break; } - e = e.next; - } - else { - if (node != null) - node.setNext(first); - else - node = new HashEntry(hash, key, value, first); - int c = count + 1; - if (c > threshold && tab.length < MAXIMUM_CAPACITY) - rehash(node); - else - setEntryAt(tab, index, node); - ++modCount; - count = c; - oldValue = null; - break; } } - } finally { - unlock(); } - return oldValue; + } finally { + if (delta != 0L) + addCount(delta, 2); } + if (npe) + throw new NullPointerException(); + } - /** - * Doubles size of table and repacks entries, also adding the - * given node to new table - */ - @SuppressWarnings("unchecked") - private void rehash(HashEntry node) { - /* - * Reclassify nodes in each list to new table. Because we - * are using power-of-two expansion, the elements from - * each bin must either stay at same index, or move with a - * power of two offset. We eliminate unnecessary node - * creation by catching cases where old nodes can be - * reused because their next fields won't change. - * Statistically, at the default threshold, only about - * one-sixth of them need cloning when a table - * doubles. The nodes they replace will be garbage - * collectable as soon as they are no longer referenced by - * any reader thread that may be in the midst of - * concurrently traversing table. Entry accesses use plain - * array indexing because they are followed by volatile - * table write. - */ - HashEntry[] oldTable = table; - int oldCapacity = oldTable.length; - int newCapacity = oldCapacity << 1; - threshold = (int)(newCapacity * loadFactor); - HashEntry[] newTable = - (HashEntry[]) new HashEntry[newCapacity]; - int sizeMask = newCapacity - 1; - for (int i = 0; i < oldCapacity ; i++) { - HashEntry e = oldTable[i]; - if (e != null) { - HashEntry next = e.next; - int idx = e.hash & sizeMask; - if (next == null) // Single node on list - newTable[idx] = e; - else { // Reuse consecutive sequence at same slot - HashEntry lastRun = e; - int lastIdx = idx; - for (HashEntry last = next; - last != null; - last = last.next) { - int k = last.hash & sizeMask; - if (k != lastIdx) { - lastIdx = k; - lastRun = last; + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if (f.hash < 0) { + Object fk; + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) + --delta; + t.first = null; + t.root = null; + ++i; + } + } finally { + t.unlockWrite(stamp); + } + } + else + tab = (Node[])fk; + } + else { + synchronized (f) { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) + --delta; + setTabAt(tab, i, null); + ++i; + } + } + } + } + if (delta != 0L) + addCount(delta, -1); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + table = tab = (Node[])new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * Adds to count, and if table is too small and not already + * resizing, initiates transfer. If already resizing, helps + * perform transfer if work is available. Rechecks occupancy + * after a transfer to see if another resize is already needed + * because resizings are lagging additions. + * + * @param x the count to add + * @param check if <0, don't check resize, if <= 1 only check if uncontended + */ + private final void addCount(long x, int check) { + Cell[] as; long b, s; + if ((as = counterCells) != null || + !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { + Cell a; long v; int m; + boolean uncontended = true; + if (as == null || (m = as.length - 1) < 0 || + (a = as[ThreadLocalRandom.getProbe() & m]) == null || + !(uncontended = + U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { + fullAddCount(x, uncontended); + return; + } + if (check <= 1) + return; + s = sumCount(); + } + if (check >= 0) { + Node[] tab, nt; int sc; + while (s >= (long)(sc = sizeCtl) && (tab = table) != null && + tab.length < MAXIMUM_CAPACITY) { + if (sc < 0) { + if (sc == -1 || transferIndex <= transferOrigin || + (nt = nextTable) == null) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) + transfer(tab, nt); + } + else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) + transfer(tab, null); + s = sumCount(); + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if (table == tab) { + table = (Node[])new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (tab == table && + U.compareAndSwapInt(this, SIZECTL, sc, -2)) + transfer(tab, null); + } + } + + /** + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + */ + private final void transfer(Node[] tab, Node[] nextTab) { + int n = tab.length, stride; + if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) + stride = MIN_TRANSFER_STRIDE; // subdivide range + if (nextTab == null) { // initiating + try { + nextTab = (Node[])new Node[n << 1]; + } catch (Throwable ex) { // try to cope with OOME + sizeCtl = Integer.MAX_VALUE; + return; + } + nextTable = nextTab; + transferOrigin = n; + transferIndex = n; + Node rev = new Node(MOVED, tab, null, null); + for (int k = n; k > 0;) { // progressively reveal ready slots + int nextk = (k > stride) ? k - stride : 0; + for (int m = nextk; m < k; ++m) + nextTab[m] = rev; + for (int m = n + nextk; m < n + k; ++m) + nextTab[m] = rev; + U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); + } + } + int nextn = nextTab.length; + Node fwd = new Node(MOVED, nextTab, null, null); + boolean advance = true; + for (int i = 0, bound = 0;;) { + int nextIndex, nextBound; Node f; Object fk; + while (advance) { + if (--i >= bound) + advance = false; + else if ((nextIndex = transferIndex) <= transferOrigin) { + i = -1; + advance = false; + } + else if (U.compareAndSwapInt + (this, TRANSFERINDEX, nextIndex, + nextBound = (nextIndex > stride ? + nextIndex - stride : 0))) { + bound = nextBound; + i = nextIndex - 1; + advance = false; + } + } + if (i < 0 || i >= n || i + n >= nextn) { + for (int sc;;) { + if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { + if (sc == -1) { + nextTable = null; + table = nextTab; + sizeCtl = (n << 1) - (n >>> 1); + } + return; + } + } + } + else if ((f = tabAt(tab, i)) == null) { + if (casTabAt(tab, i, null, fwd)) { + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + advance = true; + } + } + else if (f.hash >= 0) { + synchronized (f) { + if (tabAt(tab, i) == f) { + int runBit = f.hash & n; + Node lastRun = f, lo = null, hi = null; + for (Node p = f.next; p != null; p = p.next) { + int b = p.hash & n; + if (b != runBit) { + runBit = b; + lastRun = p; } } - newTable[lastIdx] = lastRun; - // Clone remaining nodes - for (HashEntry p = e; p != lastRun; p = p.next) { - V v = p.value; - int h = p.hash; - int k = h & sizeMask; - HashEntry n = newTable[k]; - newTable[k] = new HashEntry(h, p.key, v, n); - } - } - } - } - int nodeIndex = node.hash & sizeMask; // add the new node - node.setNext(newTable[nodeIndex]); - newTable[nodeIndex] = node; - table = newTable; - } - - /** - * Scans for a node containing given key while trying to - * acquire lock, creating and returning one if not found. Upon - * return, guarantees that lock is held. UNlike in most - * methods, calls to method equals are not screened: Since - * traversal speed doesn't matter, we might as well help warm - * up the associated code and accesses as well. - * - * @return a new node if key not found, else null - */ - private HashEntry scanAndLockForPut(K key, int hash, V value) { - HashEntry first = entryForHash(this, hash); - HashEntry e = first; - HashEntry node = null; - int retries = -1; // negative while locating node - while (!tryLock()) { - HashEntry f; // to recheck first below - if (retries < 0) { - if (e == null) { - if (node == null) // speculatively create node - node = new HashEntry(hash, key, value, null); - retries = 0; - } - else if (key.equals(e.key)) - retries = 0; - else - e = e.next; - } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; - } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; // re-traverse if entry changed - retries = -1; - } - } - return node; - } - - /** - * Scans for a node containing the given key while trying to - * acquire lock for a remove or replace operation. Upon - * return, guarantees that lock is held. Note that we must - * lock even if the key is not found, to ensure sequential - * consistency of updates. - */ - private void scanAndLock(Object key, int hash) { - // similar to but simpler than scanAndLockForPut - HashEntry first = entryForHash(this, hash); - HashEntry e = first; - int retries = -1; - while (!tryLock()) { - HashEntry f; - if (retries < 0) { - if (e == null || key.equals(e.key)) - retries = 0; - else - e = e.next; - } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; - } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; - retries = -1; - } - } - } - - /** - * Remove; match on key only if value null, else match both. - */ - final V remove(Object key, int hash, Object value) { - if (!tryLock()) - scanAndLock(key, hash); - V oldValue = null; - try { - HashEntry[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry e = entryAt(tab, index); - HashEntry pred = null; - while (e != null) { - K k; - HashEntry next = e.next; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - V v = e.value; - if (value == null || value == v || value.equals(v)) { - if (pred == null) - setEntryAt(tab, index, next); + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = f; p != lastRun; p = p.next) { + int ph = p.hash; Object pk = p.key; V pv = p.val; + if ((ph & n) == 0) + lo = new Node(ph, pk, pv, lo); else - pred.setNext(next); - ++modCount; - --count; - oldValue = v; + hi = new Node(ph, pk, pv, hi); } - break; + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + n, hi); + setTabAt(tab, i, fwd); + advance = true; } - pred = e; - e = next; } - } finally { - unlock(); } - return oldValue; - } - - final boolean replace(K key, int hash, V oldValue, V newValue) { - if (!tryLock()) - scanAndLock(key, hash); - boolean replaced = false; - try { - HashEntry e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - if (oldValue.equals(e.value)) { - e.value = newValue; - ++modCount; - replaced = true; + else if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + long stamp = t.writeLock(); + try { + if (tabAt(tab, i) == f) { + TreeNode root; + Node ln = null, hn = null; + if ((root = t.root) != null) { + Node e, p; TreeNode lr, rr; int lh; + TreeBin lt = null, ht = null; + for (lr = root; lr.left != null; lr = lr.left); + for (rr = root; rr.right != null; rr = rr.right); + if ((lh = lr.hash) == rr.hash) { // move entire tree + if ((lh & n) == 0) + lt = t; + else + ht = t; + } + else { + lt = new TreeBin(); + ht = new TreeBin(); + int lc = 0, hc = 0; + for (e = t.first; e != null; e = e.next) { + int h = e.hash; + Object k = e.key; V v = e.val; + if ((h & n) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + if (lc < TREE_THRESHOLD) { // throw away + for (p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, + p.val, ln); + lt = null; + } + if (hc < TREE_THRESHOLD) { + for (p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, + p.val, hn); + ht = null; + } + } + if (ln == null && lt != null) + ln = new Node(MOVED, lt, null, null); + if (hn == null && ht != null) + hn = new Node(MOVED, ht, null, null); } - break; + setTabAt(nextTab, i, ln); + setTabAt(nextTab, i + n, hn); + setTabAt(tab, i, fwd); + advance = true; } + } finally { + t.unlockWrite(stamp); } - } finally { - unlock(); - } - return replaced; - } - - final V replace(K key, int hash, V value) { - if (!tryLock()) - scanAndLock(key, hash); - V oldValue = null; - try { - HashEntry e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - e.value = value; - ++modCount; - break; - } - } - } finally { - unlock(); - } - return oldValue; - } - - final void clear() { - lock(); - try { - HashEntry[] tab = table; - for (int i = 0; i < tab.length ; i++) - setEntryAt(tab, i, null); - ++modCount; - count = 0; - } finally { - unlock(); } + else + advance = true; // already processed } } - // Accessing segments + /* ---------------- Counter support -------------- */ - /** - * Gets the jth element of given segment array (if nonnull) with - * volatile element access semantics via Unsafe. (The null check - * can trigger harmlessly only during deserialization.) Note: - * because each element of segments array is set only once (using - * fully ordered writes), some performance-sensitive methods rely - * on this method only as a recheck upon null reads. - */ - @SuppressWarnings("unchecked") - static final Segment segmentAt(Segment[] ss, int j) { - long u = (j << SSHIFT) + SBASE; - return ss == null ? null : - (Segment) UNSAFE.getObjectVolatile(ss, u); + final long sumCount() { + Cell[] as = counterCells; Cell a; + long sum = baseCount; + if (as != null) { + for (int i = 0; i < as.length; ++i) { + if ((a = as[i]) != null) + sum += a.value; + } + } + return sum; } + // See LongAdder version for explanation + private final void fullAddCount(long x, boolean wasUncontended) { + int h; + if ((h = ThreadLocalRandom.getProbe()) == 0) { + ThreadLocalRandom.localInit(); // force initialization + h = ThreadLocalRandom.getProbe(); + wasUncontended = true; + } + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = counterCells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (cellsBusy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistic create + if (cellsBusy == 0 && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = counterCells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + cellsBusy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) + break; + else if (counterCells != as || n >= NCPU) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (cellsBusy == 0 && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + try { + if (counterCells == as) {// Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + counterCells = rs; + } + } finally { + cellsBusy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h = ThreadLocalRandom.advanceProbe(h); + } + else if (cellsBusy == 0 && counterCells == as && + U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { + boolean init = false; + try { // Initialize table + if (counterCells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + counterCells = rs; + init = true; + } + } finally { + cellsBusy = 0; + } + if (init) + break; + } + else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) + break; // Fall back on using base + } + } + + /* ----------------Table Traversal -------------- */ + /** - * Returns the segment for the given index, creating it and - * recording in segment table (via CAS) if not already present. + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and spliterators. * - * @param k the index - * @return the segment + * Method advance visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. */ - @SuppressWarnings("unchecked") - private Segment ensureSegment(int k) { - final Segment[] ss = this.segments; - long u = (k << SSHIFT) + SBASE; // raw offset - Segment seg; - if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) == null) { - Segment proto = ss[0]; // use segment 0 as prototype - int cap = proto.table.length; - float lf = proto.loadFactor; - int threshold = (int)(cap * lf); - HashEntry[] tab = (HashEntry[])new HashEntry[cap]; - if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) - == null) { // recheck - Segment s = new Segment(lf, threshold, tab); - while ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) - == null) { - if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) - break; + static class Traverser { + Node[] tab; // current table; updated if resized + Node next; // the next entry to use + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + final int baseSize; // initial table size + + Traverser(Node[] tab, int size, int index, int limit) { + this.tab = tab; + this.baseSize = size; + this.baseIndex = this.index = index; + this.baseLimit = limit; + this.next = null; + } + + /** + * Advances if possible, returning next valid node, or null if none. + */ + final Node advance() { + Node e; + if ((e = next) != null) + e = e.next; + for (;;) { + Node[] t; int i, n; Object ek; // must use locals in checks + if (e != null) + return next = e; + if (baseIndex >= baseLimit || (t = tab) == null || + (n = t.length) <= (i = index) || i < 0) + return next = null; + if ((e = tabAt(t, index)) != null && e.hash < 0) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + e = null; + continue; + } } + if ((index += baseSize) >= n) + index = ++baseIndex; // visit upper slots if present } } - return seg; - } - - // Hash-based segment and entry accesses - - /** - * Gets the segment for the given hash code. - */ - @SuppressWarnings("unchecked") - private Segment segmentForHash(int h) { - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - return (Segment) UNSAFE.getObjectVolatile(segments, u); } /** - * Gets the table entry for the given segment and hash code. + * Base of key, value, and entry Iterators. Adds fields to + * Traverser to support iterator.remove */ - @SuppressWarnings("unchecked") - static final HashEntry entryForHash(Segment seg, int h) { - HashEntry[] tab; - return (seg == null || (tab = seg.table) == null) ? null : - (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + static class BaseIterator extends Traverser { + final ConcurrentHashMap map; + Node lastReturned; + BaseIterator(Node[] tab, int size, int index, int limit, + ConcurrentHashMap map) { + super(tab, size, index, limit); + this.map = map; + advance(); + } + + public final boolean hasNext() { return next != null; } + public final boolean hasMoreElements() { return next != null; } + + public final void remove() { + Node p; + if ((p = lastReturned) == null) + throw new IllegalStateException(); + lastReturned = null; + map.internalReplace((K)p.key, null, null); + } } + static final class KeyIterator extends BaseIterator + implements Iterator, Enumeration { + KeyIterator(Node[] tab, int index, int size, int limit, + ConcurrentHashMap map) { + super(tab, index, size, limit, map); + } + + public final K next() { + Node p; + if ((p = next) == null) + throw new NoSuchElementException(); + K k = (K)p.key; + lastReturned = p; + advance(); + return k; + } + + public final K nextElement() { return next(); } + } + + static final class ValueIterator extends BaseIterator + implements Iterator, Enumeration { + ValueIterator(Node[] tab, int index, int size, int limit, + ConcurrentHashMap map) { + super(tab, index, size, limit, map); + } + + public final V next() { + Node p; + if ((p = next) == null) + throw new NoSuchElementException(); + V v = p.val; + lastReturned = p; + advance(); + return v; + } + + public final V nextElement() { return next(); } + } + + static final class EntryIterator extends BaseIterator + implements Iterator> { + EntryIterator(Node[] tab, int index, int size, int limit, + ConcurrentHashMap map) { + super(tab, index, size, limit, map); + } + + public final Map.Entry next() { + Node p; + if ((p = next) == null) + throw new NoSuchElementException(); + K k = (K)p.key; + V v = p.val; + lastReturned = p; + advance(); + return new MapEntry(k, v, map); + } + } + + static final class KeySpliterator extends Traverser + implements Spliterator { + long est; // size estimate + KeySpliterator(Node[] tab, int size, int index, int limit, + long est) { + super(tab, size, index, limit); + this.est = est; + } + + public Spliterator trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new KeySpliterator(tab, baseSize, baseLimit = h, + f, est >>>= 1); + } + + public void forEachRemaining(Consumer action) { + if (action == null) throw new NullPointerException(); + for (Node p; (p = advance()) != null;) + action.accept((K)p.key); + } + + public boolean tryAdvance(Consumer action) { + if (action == null) throw new NullPointerException(); + Node p; + if ((p = advance()) == null) + return false; + action.accept((K)p.key); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.CONCURRENT | + Spliterator.NONNULL; + } + } + + static final class ValueSpliterator extends Traverser + implements Spliterator { + long est; // size estimate + ValueSpliterator(Node[] tab, int size, int index, int limit, + long est) { + super(tab, size, index, limit); + this.est = est; + } + + public Spliterator trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new ValueSpliterator(tab, baseSize, baseLimit = h, + f, est >>>= 1); + } + + public void forEachRemaining(Consumer action) { + if (action == null) throw new NullPointerException(); + for (Node p; (p = advance()) != null;) + action.accept(p.val); + } + + public boolean tryAdvance(Consumer action) { + if (action == null) throw new NullPointerException(); + Node p; + if ((p = advance()) == null) + return false; + action.accept(p.val); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.CONCURRENT | Spliterator.NONNULL; + } + } + + static final class EntrySpliterator extends Traverser + implements Spliterator> { + final ConcurrentHashMap map; // To export MapEntry + long est; // size estimate + EntrySpliterator(Node[] tab, int size, int index, int limit, + long est, ConcurrentHashMap map) { + super(tab, size, index, limit); + this.map = map; + this.est = est; + } + + public Spliterator> trySplit() { + int i, f, h; + return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : + new EntrySpliterator(tab, baseSize, baseLimit = h, + f, est >>>= 1, map); + } + + public void forEachRemaining(Consumer> action) { + if (action == null) throw new NullPointerException(); + for (Node p; (p = advance()) != null; ) + action.accept(new MapEntry((K)p.key, p.val, map)); + } + + public boolean tryAdvance(Consumer> action) { + if (action == null) throw new NullPointerException(); + Node p; + if ((p = advance()) == null) + return false; + action.accept(new MapEntry((K)p.key, p.val, map)); + return true; + } + + public long estimateSize() { return est; } + + public int characteristics() { + return Spliterator.DISTINCT | Spliterator.CONCURRENT | + Spliterator.NONNULL; + } + } + + /* ---------------- Public operations -------------- */ /** - * Creates a new, empty map with the specified initial - * capacity, load factor and concurrency level. - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. - * @param concurrencyLevel the estimated number of concurrently - * updating threads. The implementation performs internal sizing - * to try to accommodate this many threads. - * @throws IllegalArgumentException if the initial capacity is - * negative or the load factor or concurrencyLevel are - * nonpositive. + * Creates a new, empty map with the default initial table size (16). */ - @SuppressWarnings("unchecked") - public ConcurrentHashMap(int initialCapacity, - float loadFactor, int concurrencyLevel) { - if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) - throw new IllegalArgumentException(); - if (concurrencyLevel > MAX_SEGMENTS) - concurrencyLevel = MAX_SEGMENTS; - // Find power-of-two sizes best matching arguments - int sshift = 0; - int ssize = 1; - while (ssize < concurrencyLevel) { - ++sshift; - ssize <<= 1; - } - this.segmentShift = 32 - sshift; - this.segmentMask = ssize - 1; - if (initialCapacity > MAXIMUM_CAPACITY) - initialCapacity = MAXIMUM_CAPACITY; - int c = initialCapacity / ssize; - if (c * ssize < initialCapacity) - ++c; - int cap = MIN_SEGMENT_TABLE_CAPACITY; - while (cap < c) - cap <<= 1; - // create segments and segments[0] - Segment s0 = - new Segment(loadFactor, (int)(cap * loadFactor), - (HashEntry[])new HashEntry[cap]); - Segment[] ss = (Segment[])new Segment[ssize]; - UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] - this.segments = ss; + public ConcurrentHashMap() { } /** - * Creates a new, empty map with the specified initial capacity - * and load factor and with the default concurrencyLevel (16). + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMap(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMap(Map m) { + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size * @throws IllegalArgumentException if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public ConcurrentHashMap(int initialCapacity, float loadFactor) { - this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); + this(initialCapacity, loadFactor, 1); } /** - * Creates a new, empty map with the specified initial capacity, - * and with default load factor (0.75) and concurrencyLevel (16). + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). * * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @throws IllegalArgumentException if the initial capacity of - * elements is negative. + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive */ - public ConcurrentHashMap(int initialCapacity) { - this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + public ConcurrentHashMap(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.sizeCtl = cap; } /** - * Creates a new, empty map with a default initial capacity (16), - * load factor (0.75) and concurrencyLevel (16). - */ - public ConcurrentHashMap() { - this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new map with the same mappings as the given map. - * The map is created with a capacity of 1.5 times the number - * of mappings in the given map or 16 (whichever is greater), - * and a default load factor (0.75) and concurrencyLevel (16). + * Creates a new {@link Set} backed by a ConcurrentHashMap + * from the given type to {@code Boolean.TRUE}. * - * @param m the map + * @return the new set + * @since 1.8 */ - public ConcurrentHashMap(Map m) { - this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - putAll(m); + public static KeySetView newKeySet() { + return new KeySetView + (new ConcurrentHashMap(), Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMap + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + * @since 1.8 + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView + (new ConcurrentHashMap(initialCapacity), Boolean.TRUE); } /** @@ -834,38 +2639,7 @@ public class ConcurrentHashMap extends AbstractMap * @return {@code true} if this map contains no key-value mappings */ public boolean isEmpty() { - /* - * Sum per-segment modCounts to avoid mis-reporting when - * elements are concurrently added and removed in one segment - * while checking another, in which case the table was never - * actually empty at any point. (The sum ensures accuracy up - * through at least 1<<31 per-segment modifications before - * recheck.) Methods size() and containsValue() use similar - * constructions for stability checks. - */ - long sum = 0L; - final Segment[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) - return false; - sum += seg.modCount; - } - } - if (sum != 0L) { // recheck unless no modifications - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) - return false; - sum -= seg.modCount; - } - } - if (sum != 0L) - return false; - } - return true; + return sumCount() <= 0L; // ignore transient negative values } /** @@ -876,43 +2650,25 @@ public class ConcurrentHashMap extends AbstractMap * @return the number of key-value mappings in this map */ public int size() { - // Try a few times to get accurate count. On failure due to - // continuous async changes in table, resort to locking. - final Segment[] segments = this.segments; - int size; - boolean overflow; // true if size overflows 32 bits - long sum; // sum of modCounts - long last = 0L; // previous sum - int retries = -1; // first iteration isn't retry - try { - for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - ensureSegment(j).lock(); // force creation - } - sum = 0L; - size = 0; - overflow = false; - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - sum += seg.modCount; - int c = seg.count; - if (c < 0 || (size += c) < 0) - overflow = true; - } - } - if (sum == last) - break; - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - segmentAt(segments, j).unlock(); - } - } - return overflow ? Integer.MAX_VALUE : size; + long n = sumCount(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMap may + * contain more mappings than can be represented as an int. The + * value returned is an estimate; the actual count may differ if + * there are concurrent insertions or removals. + * + * @return the number of mappings + * @since 1.8 + */ + public long mappingCount() { + long n = sumCount(); + return (n < 0L) ? 0L : n; // ignore transient negative values } /** @@ -926,23 +2682,24 @@ public class ConcurrentHashMap extends AbstractMap * * @throws NullPointerException if the specified key is null */ - @SuppressWarnings("unchecked") public V get(Object key) { - Segment s; // manually integrate access methods to reduce overhead - HashEntry[] tab; - int h = hash(key); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) - return e.value; - } - } - return null; + return internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, or the + * given default value if this map contains no mapping for the + * key. + * + * @param key the key whose associated value is to be returned + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the default value + * @throws NullPointerException if the specified key is null + */ + public V getOrDefault(Object key, V defaultValue) { + V v; + return (v = internalGet(key)) == null ? defaultValue : v; } /** @@ -954,29 +2711,14 @@ public class ConcurrentHashMap extends AbstractMap * {@code equals} method; {@code false} otherwise * @throws NullPointerException if the specified key is null */ - @SuppressWarnings("unchecked") public boolean containsKey(Object key) { - Segment s; // same as get() except no need for volatile value read - HashEntry[] tab; - int h = hash(key); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) - return true; - } - } - return false; + return internalGet(key) != null; } /** * Returns {@code true} if this map maps one or more keys to the - * specified value. Note: This method requires a full traversal - * of the map, and so is much slower than method {@code containsKey}. + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. * * @param value value whose presence in this map is to be tested * @return {@code true} if this map maps one or more keys to the @@ -984,49 +2726,18 @@ public class ConcurrentHashMap extends AbstractMap * @throws NullPointerException if the specified value is null */ public boolean containsValue(Object value) { - // Same idea as size() if (value == null) throw new NullPointerException(); - final Segment[] segments = this.segments; - boolean found = false; - long last = 0; - int retries = -1; - try { - outer: for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - ensureSegment(j).lock(); // force creation - } - long hashSum = 0L; - int sum = 0; - for (int j = 0; j < segments.length; ++j) { - HashEntry[] tab; - Segment seg = segmentAt(segments, j); - if (seg != null && (tab = seg.table) != null) { - for (int i = 0 ; i < tab.length; i++) { - HashEntry e; - for (e = entryAt(tab, i); e != null; e = e.next) { - V v = e.value; - if (v != null && value.equals(v)) { - found = true; - break outer; - } - } - } - sum += seg.modCount; - } - } - if (retries > 0 && sum == last) - break; - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - segmentAt(segments, j).unlock(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + V v; + if ((v = p.val) == value || value.equals(v)) + return true; } } - return found; + return false; } /** @@ -1061,17 +2772,8 @@ public class ConcurrentHashMap extends AbstractMap * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V put(K key, V value) { - Segment s; - if (value == null) - throw new NullPointerException(); - int hash = hash(key); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment)UNSAFE.getObject // nonvolatile; recheck - (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment - s = ensureSegment(j); - return s.put(key, hash, value, false); + return internalPut(key, value, false); } /** @@ -1081,17 +2783,8 @@ public class ConcurrentHashMap extends AbstractMap * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ - @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { - Segment s; - if (value == null) - throw new NullPointerException(); - int hash = hash(key); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment)UNSAFE.getObject - (segments, (j << SSHIFT) + SBASE)) == null) - s = ensureSegment(j); - return s.put(key, hash, value, true); + return internalPut(key, value, true); } /** @@ -1102,8 +2795,105 @@ public class ConcurrentHashMap extends AbstractMap * @param m mappings to be stored in this map */ public void putAll(Map m) { - for (Map.Entry e : m.entrySet()) - put(e.getKey(), e.getValue()); + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * attempts to compute its value using the given mapping function + * and enters it into this map unless {@code null}. The entire + * method invocation is performed atomically, so the function is + * applied at most once per key. Some attempted update operations + * on this map by other threads may be blocked while computation + * is in progress, so the computation should be short and simple, + * and must not attempt to update any other mappings of this map. + * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + public V computeIfAbsent(K key, Function mappingFunction) { + return internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the value for the specified key is present, attempts to + * compute a new mapping given the key and its current mapped + * value. The entire method invocation is performed atomically. + * Some attempted update operations on this map by other threads + * may be blocked while computation is in progress, so the + * computation should be short and simple, and must not attempt to + * update any other mappings of this map. + * + * @param key key with which a value may be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V computeIfPresent(K key, BiFunction remappingFunction) { + return internalCompute(key, true, remappingFunction); + } + + /** + * Attempts to compute a mapping for the specified key and its + * current mapped value (or {@code null} if there is no current + * mapping). The entire method invocation is performed atomically. + * Some attempted update operations on this map by other threads + * may be blocked while computation is in progress, so the + * computation should be short and simple, and must not attempt to + * update any other mappings of this Map. + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V compute(K key, BiFunction remappingFunction) { + return internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated with a + * (non-null) value, associates it with the given value. + * Otherwise, replaces the value with the results of the given + * remapping function, or removes if {@code null}. The entire + * method invocation is performed atomically. Some attempted + * update operations on this map by other threads may be blocked + * while computation is in progress, so the computation should be + * short and simple, and must not attempt to update any other + * mappings of this Map. + * + * @param key key with which the specified value is to be associated + * @param value the value to use if absent + * @param remappingFunction the function to recompute a value if present + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or the + * remappingFunction is null + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + public V merge(K key, V value, BiFunction remappingFunction) { + return internalMerge(key, value, remappingFunction); } /** @@ -1116,9 +2906,7 @@ public class ConcurrentHashMap extends AbstractMap * @throws NullPointerException if the specified key is null */ public V remove(Object key) { - int hash = hash(key); - Segment s = segmentForHash(hash); - return s == null ? null : s.remove(key, hash, null); + return internalReplace(key, null, null); } /** @@ -1127,10 +2915,9 @@ public class ConcurrentHashMap extends AbstractMap * @throws NullPointerException if the specified key is null */ public boolean remove(Object key, Object value) { - int hash = hash(key); - Segment s; - return value != null && (s = segmentForHash(hash)) != null && - s.remove(key, hash, value) != null; + if (key == null) + throw new NullPointerException(); + return value != null && internalReplace(key, null, value) != null; } /** @@ -1139,11 +2926,9 @@ public class ConcurrentHashMap extends AbstractMap * @throws NullPointerException if any of the arguments are null */ public boolean replace(K key, V oldValue, V newValue) { - int hash = hash(key); - if (oldValue == null || newValue == null) + if (key == null || oldValue == null || newValue == null) throw new NullPointerException(); - Segment s = segmentForHash(hash); - return s != null && s.replace(key, hash, oldValue, newValue); + return internalReplace(key, newValue, oldValue) != null; } /** @@ -1154,23 +2939,16 @@ public class ConcurrentHashMap extends AbstractMap * @throws NullPointerException if the specified key or value is null */ public V replace(K key, V value) { - int hash = hash(key); - if (value == null) + if (key == null || value == null) throw new NullPointerException(); - Segment s = segmentForHash(hash); - return s == null ? null : s.replace(key, hash, value); + return internalReplace(key, value, null); } /** * Removes all of the mappings from this map. */ public void clear() { - final Segment[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment s = segmentAt(segments, j); - if (s != null) - s.clear(); - } + internalClear(); } /** @@ -1188,10 +2966,29 @@ public class ConcurrentHashMap extends AbstractMap * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. + * + * @return the set view */ - public Set keySet() { - Set ks = keySet; - return (ks != null) ? ks : (keySet = new KeySet()); + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll(Collection)}). + * This is of course only appropriate if it is acceptable to use + * the same value for all additions from this view. + * + * @param mappedValue the mapped value to use for any additions + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); } /** @@ -1209,10 +3006,12 @@ public class ConcurrentHashMap extends AbstractMap * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. + * + * @return the collection view */ public Collection values() { - Collection vs = values; - return (vs != null) ? vs : (values = new Values()); + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); } /** @@ -1222,18 +3021,19 @@ public class ConcurrentHashMap extends AbstractMap * removal, which removes the corresponding mapping from the map, * via the {@code Iterator.remove}, {@code Set.remove}, * {@code removeAll}, {@code retainAll}, and {@code clear} - * operations. It does not support the {@code add} or - * {@code addAll} operations. + * operations. * *

The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. + * + * @return the set view */ public Set> entrySet() { - Set> es = entrySet; - return (es != null) ? es : (entrySet = new EntrySet()); + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); } /** @@ -1243,7 +3043,9 @@ public class ConcurrentHashMap extends AbstractMap * @see #keySet() */ public Enumeration keys() { - return new KeyIterator(); + Node[] t; + int f = (t = table) == null ? 0 : t.length; + return new KeyIterator(t, f, 0, f, this); } /** @@ -1253,191 +3055,110 @@ public class ConcurrentHashMap extends AbstractMap * @see #values() */ public Enumeration elements() { - return new ValueIterator(); - } - - /* ---------------- Iterator Support -------------- */ - - abstract class HashIterator { - int nextSegmentIndex; - int nextTableIndex; - HashEntry[] currentTable; - HashEntry nextEntry; - HashEntry lastReturned; - - HashIterator() { - nextSegmentIndex = segments.length - 1; - nextTableIndex = -1; - advance(); - } - - /** - * Sets nextEntry to first node of next non-empty table - * (in backwards order, to simplify checks). - */ - final void advance() { - for (;;) { - if (nextTableIndex >= 0) { - if ((nextEntry = entryAt(currentTable, - nextTableIndex--)) != null) - break; - } - else if (nextSegmentIndex >= 0) { - Segment seg = segmentAt(segments, nextSegmentIndex--); - if (seg != null && (currentTable = seg.table) != null) - nextTableIndex = currentTable.length - 1; - } - else - break; - } - } - - final HashEntry nextEntry() { - HashEntry e = nextEntry; - if (e == null) - throw new NoSuchElementException(); - lastReturned = e; // cannot assign until after null check - if ((nextEntry = e.next) == null) - advance(); - return e; - } - - public final boolean hasNext() { return nextEntry != null; } - public final boolean hasMoreElements() { return nextEntry != null; } - - public final void remove() { - if (lastReturned == null) - throw new IllegalStateException(); - ConcurrentHashMap.this.remove(lastReturned.key); - lastReturned = null; - } - } - - final class KeyIterator - extends HashIterator - implements Iterator, Enumeration - { - public final K next() { return super.nextEntry().key; } - public final K nextElement() { return super.nextEntry().key; } - } - - final class ValueIterator - extends HashIterator - implements Iterator, Enumeration - { - public final V next() { return super.nextEntry().value; } - public final V nextElement() { return super.nextEntry().value; } + Node[] t; + int f = (t = table) == null ? 0 : t.length; + return new ValueIterator(t, f, 0, f, this); } /** - * Custom Entry class used by EntryIterator.next(), that relays - * setValue changes to the underlying map. + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map */ - final class WriteThroughEntry - extends AbstractMap.SimpleEntry - { - static final long serialVersionUID = 7249069246763182397L; - - WriteThroughEntry(K k, V v) { - super(k,v); - } - - /** - * Sets our entry's value and writes through to the map. The - * value to return is somewhat arbitrary here. Since a - * WriteThroughEntry does not necessarily track asynchronous - * changes, the most recent "previous" value could be - * different from what we return (or could even have been - * removed in which case the put will re-establish). We do not - * and cannot guarantee more. - */ - public V setValue(V value) { - if (value == null) throw new NullPointerException(); - V v = super.setValue(value); - ConcurrentHashMap.this.put(getKey(), value); - return v; + public int hashCode() { + int h = 0; + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + h += p.key.hashCode() ^ p.val.hashCode(); } + return h; } - final class EntryIterator - extends HashIterator - implements Iterator> - { - public Map.Entry next() { - HashEntry e = super.nextEntry(); - return new WriteThroughEntry(e.key, e.value); + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Node[] t; + int f = (t = table) == null ? 0 : t.length; + Traverser it = new Traverser(t, f, 0, f); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Node p; + if ((p = it.advance()) != null) { + for (;;) { + K k = (K)p.key; + V v = p.val; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((p = it.advance()) == null) + break; + sb.append(',').append(' '); + } } + return sb.append('}').toString(); } - final class KeySet extends AbstractSet { - public Iterator iterator() { - return new KeyIterator(); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public boolean contains(Object o) { - return ConcurrentHashMap.this.containsKey(o); - } - public boolean remove(Object o) { - return ConcurrentHashMap.this.remove(o) != null; - } - public void clear() { - ConcurrentHashMap.this.clear(); - } - } - - final class Values extends AbstractCollection { - public Iterator iterator() { - return new ValueIterator(); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public boolean contains(Object o) { - return ConcurrentHashMap.this.containsValue(o); - } - public void clear() { - ConcurrentHashMap.this.clear(); - } - } - - final class EntrySet extends AbstractSet> { - public Iterator> iterator() { - return new EntryIterator(); - } - public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) return false; - Map.Entry e = (Map.Entry)o; - V v = ConcurrentHashMap.this.get(e.getKey()); - return v != null && v.equals(e.getValue()); - } - public boolean remove(Object o) { - if (!(o instanceof Map.Entry)) - return false; - Map.Entry e = (Map.Entry)o; - return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); - } - public int size() { - return ConcurrentHashMap.this.size(); - } - public boolean isEmpty() { - return ConcurrentHashMap.this.isEmpty(); - } - public void clear() { - ConcurrentHashMap.this.clear(); + Map m = (Map) o; + Node[] t; + int f = (t = table) == null ? 0 : t.length; + Traverser it = new Traverser(t, f, 0, f); + for (Node p; (p = it.advance()) != null; ) { + V val = p.val; + Object v = m.get(p.key); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } } + return true; } /* ---------------- Serialization Support -------------- */ + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment extends ReentrantLock implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + /** * Saves the state of the {@code ConcurrentHashMap} instance to a * stream (i.e., serializes it). @@ -1448,119 +3169,2733 @@ public class ConcurrentHashMap extends AbstractMap * The key-value mappings are emitted in no particular order. */ private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - // force all segments for serialization compatibility - for (int k = 0; k < segments.length; ++k) - ensureSegment(k); - s.defaultWriteObject(); + throws java.io.IOException { + // For serialization compatibility + // Emulate segment calculation from previous version of this class + int sshift = 0; + int ssize = 1; + while (ssize < DEFAULT_CONCURRENCY_LEVEL) { + ++sshift; + ssize <<= 1; + } + int segmentShift = 32 - sshift; + int segmentMask = ssize - 1; + Segment[] segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + s.putFields().put("segments", segments); + s.putFields().put("segmentShift", segmentShift); + s.putFields().put("segmentMask", segmentMask); + s.writeFields(); - final Segment[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment seg = segmentAt(segments, k); - seg.lock(); - try { - HashEntry[] tab = seg.table; - for (int i = 0; i < tab.length; ++i) { - HashEntry e; - for (e = entryAt(tab, i); e != null; e = e.next) { - s.writeObject(e.key); - s.writeObject(e.value); - } - } - } finally { - seg.unlock(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + s.writeObject(p.key); + s.writeObject(p.val); } } s.writeObject(null); s.writeObject(null); + segments = null; // throw away } /** * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream */ - @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - // Don't call defaultReadObject() - ObjectInputStream.GetField oisFields = s.readFields(); - final Segment[] oisSegments = (Segment[])oisFields.get("segments", null); + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); - final int ssize = oisSegments.length; - if (ssize < 1 || ssize > MAX_SEGMENTS - || (ssize & (ssize-1)) != 0 ) // ssize not power of two - throw new java.io.InvalidObjectException("Bad number of segments:" - + ssize); - int sshift = 0, ssizeTmp = ssize; - while (ssizeTmp > 1) { - ++sshift; - ssizeTmp >>>= 1; + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; } - UNSAFE.putIntVolatile(this, SEGSHIFT_OFFSET, 32 - sshift); - UNSAFE.putIntVolatile(this, SEGMASK_OFFSET, ssize - 1); - UNSAFE.putObjectVolatile(this, SEGMENTS_OFFSET, oisSegments); + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + U.compareAndSwapInt(this, SIZECTL, sc, -1)) { + try { + if (table == null) { + init = true; + Node[] tab = (Node[])new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + addCount(size, -1); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut((K)p.key, p.val, false); + p = p.next; + } + } + } + } - // set hashMask - UNSAFE.putIntVolatile(this, HASHSEED_OFFSET, - sun.misc.Hashing.randomHashSeed(this)); + // ------------------------------------------------------- - // Re-initialize segments to be minimally sized, and let grow. - int cap = MIN_SEGMENT_TABLE_CAPACITY; - final Segment[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment seg = segments[k]; - if (seg != null) { - seg.threshold = (int)(cap * seg.loadFactor); - seg.table = (HashEntry[]) new HashEntry[cap]; + // Overrides of other default Map methods + + public void forEach(BiConsumer action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + action.accept((K)p.key, p.val); + } + } + } + + public void replaceAll(BiFunction function) { + if (function == null) throw new NullPointerException(); + Node[] t; + if ((t = table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + K k = (K)p.key; + internalPut(k, function.apply(k, p.val), false); + } + } + } + + // ------------------------------------------------------- + + // Parallel bulk operations + + /** + * Computes initial batch value for bulk tasks. The returned value + * is approximately exp2 of the number of times (minus one) to + * split task by two before executing leaf action. This value is + * faster to compute and more convenient to use as a guide to + * splitting than is the depth, since it is used while dividing by + * two anyway. + */ + final int batchFor(long b) { + long n; + if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) + return 0; + int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 + return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; + } + + /** + * Performs the given action for each (key, value). + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEach(long parallelismThreshold, + BiConsumer action) { + if (action == null) throw new NullPointerException(); + new ForEachMappingTask + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each (key, value). + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public void forEach(long parallelismThreshold, + BiFunction transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedMappingTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each (key, value), or null if none. Upon + * success, further element processing is suppressed and the + * results of any other parallel invocations of the search + * function are ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each (key, value), or null if none + * @since 1.8 + */ + public U search(long parallelismThreshold, + BiFunction searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchMappingsTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public U reduce(long parallelismThreshold, + BiFunction transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public double reduceToDoubleIn(long parallelismThreshold, + ToDoubleBiFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public long reduceToLong(long parallelismThreshold, + ToLongBiFunction transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all (key, value) pairs using the given reducer to + * combine values, and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all (key, value) pairs + * @since 1.8 + */ + public int reduceToInt(long parallelismThreshold, + ToIntBiFunction transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceMappingsToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each key. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachKey(long parallelismThreshold, + Consumer action) { + if (action == null) throw new NullPointerException(); + new ForEachKeyTask + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each key. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public void forEachKey(long parallelismThreshold, + Function transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedKeyTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each key, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each key, or null if none + * @since 1.8 + */ + public U searchKeys(long parallelismThreshold, + Function searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchKeysTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating all keys using the given + * reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all keys using the given + * reducer to combine values, or null if none + * @since 1.8 + */ + public K reduceKeys(long parallelismThreshold, + BiFunction reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceKeysTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, or + * null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public U reduceKeys(long parallelismThreshold, + Function transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public double reduceKeysToDouble(long parallelismThreshold, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public long reduceKeysToLong(long parallelismThreshold, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all keys using the given reducer to combine values, and + * the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all keys + * @since 1.8 + */ + public int reduceKeysToInt(long parallelismThreshold, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceKeysToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachValue(long parallelismThreshold, + Consumer action) { + if (action == null) + throw new NullPointerException(); + new ForEachValueTask + (null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public void forEachValue(long parallelismThreshold, + Function transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedValueTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each value, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each value, or null if none + * @since 1.8 + */ + public U searchValues(long parallelismThreshold, + Function searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchValuesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating all values using the + * given reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all values + * @since 1.8 + */ + public V reduceValues(long parallelismThreshold, + BiFunction reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceValuesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, or + * null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public U reduceValues(long parallelismThreshold, + Function transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public double reduceValuesToDouble(long parallelismThreshold, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public long reduceValuesToLong(long parallelismThreshold, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all values using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all values + * @since 1.8 + */ + public int reduceValuesToInt(long parallelismThreshold, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceValuesToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Performs the given action for each entry. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param action the action + * @since 1.8 + */ + public void forEachEntry(long parallelismThreshold, + Consumer> action) { + if (action == null) throw new NullPointerException(); + new ForEachEntryTask(null, batchFor(parallelismThreshold), 0, 0, table, + action).invoke(); + } + + /** + * Performs the given action for each non-null transformation + * of each entry. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case the action is not applied) + * @param action the action + * @since 1.8 + */ + public void forEachEntry(long parallelismThreshold, + Function, ? extends U> transformer, + Consumer action) { + if (transformer == null || action == null) + throw new NullPointerException(); + new ForEachTransformedEntryTask + (null, batchFor(parallelismThreshold), 0, 0, table, + transformer, action).invoke(); + } + + /** + * Returns a non-null result from applying the given search + * function on each entry, or null if none. Upon success, + * further element processing is suppressed and the results of + * any other parallel invocations of the search function are + * ignored. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param searchFunction a function returning a non-null + * result on success, else null + * @return a non-null result from applying the given search + * function on each entry, or null if none + * @since 1.8 + */ + public U searchEntries(long parallelismThreshold, + Function, ? extends U> searchFunction) { + if (searchFunction == null) throw new NullPointerException(); + return new SearchEntriesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + searchFunction, new AtomicReference()).invoke(); + } + + /** + * Returns the result of accumulating all entries using the + * given reducer to combine values, or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param reducer a commutative associative combining function + * @return the result of accumulating all entries + * @since 1.8 + */ + public Map.Entry reduceEntries(long parallelismThreshold, + BiFunction, Map.Entry, ? extends Map.Entry> reducer) { + if (reducer == null) throw new NullPointerException(); + return new ReduceEntriesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * or null if none. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element, or null if there is no transformation (in + * which case it is not combined) + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public U reduceEntries(long parallelismThreshold, + Function, ? extends U> transformer, + BiFunction reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public double reduceEntriesToDouble(long parallelismThreshold, + ToDoubleFunction> transformer, + double basis, + DoubleBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToDoubleTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public long reduceEntriesToLong(long parallelismThreshold, + ToLongFunction> transformer, + long basis, + LongBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToLongTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + /** + * Returns the result of accumulating the given transformation + * of all entries using the given reducer to combine values, + * and the given basis as an identity value. + * + * @param parallelismThreshold the (estimated) number of elements + * needed for this operation to be executed in parallel + * @param transformer a function returning the transformation + * for an element + * @param basis the identity (initial default value) for the reduction + * @param reducer a commutative associative combining function + * @return the result of accumulating the given transformation + * of all entries + * @since 1.8 + */ + public int reduceEntriesToInt(long parallelismThreshold, + ToIntFunction> transformer, + int basis, + IntBinaryOperator reducer) { + if (transformer == null || reducer == null) + throw new NullPointerException(); + return new MapReduceEntriesToIntTask + (null, batchFor(parallelismThreshold), 0, 0, table, + null, transformer, basis, reducer).invoke(); + } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + abstract static class CollectionView + implements Collection, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + final ConcurrentHashMap map; + CollectionView(ConcurrentHashMap map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMap getMap() { return map; } + + /** + * Removes all of the elements from this view, by removing all + * the mappings from the map backing this view. + */ + public final void clear() { map.clear(); } + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + + // implementations below rely on concrete classes supplying these + // abstract methods + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + */ + public abstract Iterator iterator(); + public abstract boolean contains(Object o); + public abstract boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + for (E e : this) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = e; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + for (E e : this) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)e; + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + /** + * Returns a string representation of this collection. + * The string representation consists of the string representations + * of the collection's elements in the order they are returned by + * its iterator, enclosed in square brackets ({@code "[]"}). + * Adjacent elements are separated by the characters {@code ", "} + * (comma and space). Elements are converted to strings as by + * {@link String#valueOf(Object)}. + * + * @return a string representation of this collection + */ + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Object e : c) { + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMap as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. + * See {@link #keySet() keySet()}, + * {@link #keySet(Object) keySet(V)}, + * {@link #newKeySet() newKeySet()}, + * {@link #newKeySet(int) newKeySet(int)}. + * @since 1.8 + */ + public static class KeySetView extends CollectionView + implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMap map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported + */ + public V getMappedValue() { return value; } + + /** + * {@inheritDoc} + * @throws NullPointerException if the specified key is null + */ + public boolean contains(Object o) { return map.containsKey(o); } + + /** + * Removes the key from this map view, by removing the key (and its + * corresponding value) from the backing map. This method does + * nothing if the key is not in the map. + * + * @param o the key to be removed from the backing map + * @return {@code true} if the backing map contained the specified key + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * @return an iterator over the keys of the backing map + */ + public Iterator iterator() { + Node[] t; + ConcurrentHashMap m = map; + int f = (t = m.table) == null ? 0 : t.length; + return new KeyIterator(t, f, 0, f, m); + } + + /** + * Adds the specified key to this set view by mapping the key to + * the default mapped value in the backing map, if defined. + * + * @param e key to be added + * @return {@code true} if this set changed as a result of the call + * @throws NullPointerException if the specified key is null + * @throws UnsupportedOperationException if no default mapped value + * for additions was provided + */ + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + return map.internalPut(e, v, true) == null; + } + + /** + * Adds all of the elements in the specified collection to this set, + * as if by calling {@link #add} on each one. + * + * @param c the elements to be inserted into this set + * @return {@code true} if this set changed as a result of the call + * @throws NullPointerException if the collection or any of its + * elements are {@code null} + * @throws UnsupportedOperationException if no default mapped value + * for additions was provided + */ + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (map.internalPut(e, v, true) == null) + added = true; + } + return added; + } + + public int hashCode() { + int h = 0; + for (K e : this) + h += e.hashCode(); + return h; + } + + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + public Spliterator spliterator() { + Node[] t; + ConcurrentHashMap m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new KeySpliterator(t, f, 0, f, n < 0L ? 0L : n); + } + + public void forEach(Consumer action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + action.accept((K)p.key); + } + } + } + + /** + * A view of a ConcurrentHashMap as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values()}. + */ + static final class ValuesView extends CollectionView + implements Collection, java.io.Serializable { + private static final long serialVersionUID = 2249069246763182397L; + ValuesView(ConcurrentHashMap map) { super(map); } + public final boolean contains(Object o) { + return map.containsValue(o); + } + + public final boolean remove(Object o) { + if (o != null) { + for (Iterator it = iterator(); it.hasNext();) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + public final Iterator iterator() { + ConcurrentHashMap m = map; + Node[] t; + int f = (t = m.table) == null ? 0 : t.length; + return new ValueIterator(t, f, 0, f, m); + } + + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + + public Spliterator spliterator() { + Node[] t; + ConcurrentHashMap m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new ValueSpliterator(t, f, 0, f, n < 0L ? 0L : n); + } + + public void forEach(Consumer action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + action.accept(p.val); + } + } + } + + /** + * A view of a ConcurrentHashMap as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet()}. + */ + static final class EntrySetView extends CollectionView> + implements Set>, java.io.Serializable { + private static final long serialVersionUID = 2249069246763182397L; + EntrySetView(ConcurrentHashMap map) { super(map); } + + public boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + + public boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * @return an iterator over the entries of the backing map + */ + public Iterator> iterator() { + ConcurrentHashMap m = map; + Node[] t; + int f = (t = m.table) == null ? 0 : t.length; + return new EntryIterator(t, f, 0, f, m); + } + + public boolean add(Entry e) { + return map.internalPut(e.getKey(), e.getValue(), false) == null; + } + + public boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + + public final int hashCode() { + int h = 0; + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) { + h += p.hashCode(); + } + } + return h; + } + + public final boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + + public Spliterator> spliterator() { + Node[] t; + ConcurrentHashMap m = map; + long n = m.sumCount(); + int f = (t = m.table) == null ? 0 : t.length; + return new EntrySpliterator(t, f, 0, f, n < 0L ? 0L : n, m); + } + + public void forEach(Consumer> action) { + if (action == null) throw new NullPointerException(); + Node[] t; + if ((t = map.table) != null) { + Traverser it = new Traverser(t, t.length, 0, t.length); + for (Node p; (p = it.advance()) != null; ) + action.accept(new MapEntry((K)p.key, p.val, map)); } } - // Read the keys and values, and put the mappings in the table - for (;;) { - K key = (K) s.readObject(); - V value = (V) s.readObject(); - if (key == null) - break; - put(key, value); + } + + // ------------------------------------------------------- + + /** + * Base class for bulk tasks. Repeats some fields and code from + * class Traverser, because we need to subclass CountedCompleter. + */ + abstract static class BulkTask extends CountedCompleter { + Node[] tab; // same as Traverser + Node next; + int index; + int baseIndex; + int baseLimit; + final int baseSize; + int batch; // split control + + BulkTask(BulkTask par, int b, int i, int f, Node[] t) { + super(par); + this.batch = b; + this.index = this.baseIndex = i; + if ((this.tab = t) == null) + this.baseSize = this.baseLimit = 0; + else if (par == null) + this.baseSize = this.baseLimit = t.length; + else { + this.baseLimit = f; + this.baseSize = par.baseSize; + } + } + + /** + * Same as Traverser version + */ + final Node advance() { + Node e; + if ((e = next) != null) + e = e.next; + for (;;) { + Node[] t; int i, n; Object ek; + if (e != null) + return next = e; + if (baseIndex >= baseLimit || (t = tab) == null || + (n = t.length) <= (i = index) || i < 0) + return next = null; + if ((e = tabAt(t, index)) != null && e.hash < 0) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + e = null; + continue; + } + } + if ((index += baseSize) >= n) + index = ++baseIndex; + } + } + } + + /* + * Task classes. Coded in a regular but ugly format/style to + * simplify checks that each variant differs in the right way from + * others. The null screenings exist because compilers cannot tell + * that we've already null-checked task arguments, so we force + * simplest hoisted bypass to help avoid convoluted traps. + */ + + static final class ForEachKeyTask + extends BulkTask { + final Consumer action; + ForEachKeyTask + (BulkTask p, int b, int i, int f, Node[] t, + Consumer action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachKeyTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null;) + action.accept((K)p.key); + propagateCompletion(); + } + } + } + + static final class ForEachValueTask + extends BulkTask { + final Consumer action; + ForEachValueTask + (BulkTask p, int b, int i, int f, Node[] t, + Consumer action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachValueTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null;) + action.accept(p.val); + propagateCompletion(); + } + } + } + + static final class ForEachEntryTask + extends BulkTask { + final Consumer> action; + ForEachEntryTask + (BulkTask p, int b, int i, int f, Node[] t, + Consumer> action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final Consumer> action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachEntryTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null; ) + action.accept(p); + propagateCompletion(); + } + } + } + + static final class ForEachMappingTask + extends BulkTask { + final BiConsumer action; + ForEachMappingTask + (BulkTask p, int b, int i, int f, Node[] t, + BiConsumer action) { + super(p, b, i, f, t); + this.action = action; + } + public final void compute() { + final BiConsumer action; + if ((action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachMappingTask + (this, batch >>>= 1, baseLimit = h, f, tab, + action).fork(); + } + for (Node p; (p = advance()) != null; ) + action.accept((K)p.key, p.val); + propagateCompletion(); + } + } + } + + static final class ForEachTransformedKeyTask + extends BulkTask { + final Function transformer; + final Consumer action; + ForEachTransformedKeyTask + (BulkTask p, int b, int i, int f, Node[] t, + Function transformer, Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedKeyTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class ForEachTransformedValueTask + extends BulkTask { + final Function transformer; + final Consumer action; + ForEachTransformedValueTask + (BulkTask p, int b, int i, int f, Node[] t, + Function transformer, Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedValueTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.val)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class ForEachTransformedEntryTask + extends BulkTask { + final Function, ? extends U> transformer; + final Consumer action; + ForEachTransformedEntryTask + (BulkTask p, int b, int i, int f, Node[] t, + Function, ? extends U> transformer, Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final Function, ? extends U> transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedEntryTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class ForEachTransformedMappingTask + extends BulkTask { + final BiFunction transformer; + final Consumer action; + ForEachTransformedMappingTask + (BulkTask p, int b, int i, int f, Node[] t, + BiFunction transformer, + Consumer action) { + super(p, b, i, f, t); + this.transformer = transformer; this.action = action; + } + public final void compute() { + final BiFunction transformer; + final Consumer action; + if ((transformer = this.transformer) != null && + (action = this.action) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + new ForEachTransformedMappingTask + (this, batch >>>= 1, baseLimit = h, f, tab, + transformer, action).fork(); + } + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key, p.val)) != null) + action.accept(u); + } + propagateCompletion(); + } + } + } + + static final class SearchKeysTask + extends BulkTask { + final Function searchFunction; + final AtomicReference result; + SearchKeysTask + (BulkTask p, int b, int i, int f, Node[] t, + Function searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchKeysTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply((K)p.key)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + static final class SearchValuesTask + extends BulkTask { + final Function searchFunction; + final AtomicReference result; + SearchValuesTask + (BulkTask p, int b, int i, int f, Node[] t, + Function searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchValuesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p.val)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + static final class SearchEntriesTask + extends BulkTask { + final Function, ? extends U> searchFunction; + final AtomicReference result; + SearchEntriesTask + (BulkTask p, int b, int i, int f, Node[] t, + Function, ? extends U> searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final Function, ? extends U> searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchEntriesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply(p)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + return; + } + } + } + } + } + + static final class SearchMappingsTask + extends BulkTask { + final BiFunction searchFunction; + final AtomicReference result; + SearchMappingsTask + (BulkTask p, int b, int i, int f, Node[] t, + BiFunction searchFunction, + AtomicReference result) { + super(p, b, i, f, t); + this.searchFunction = searchFunction; this.result = result; + } + public final U getRawResult() { return result.get(); } + public final void compute() { + final BiFunction searchFunction; + final AtomicReference result; + if ((searchFunction = this.searchFunction) != null && + (result = this.result) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + if (result.get() != null) + return; + addToPendingCount(1); + new SearchMappingsTask + (this, batch >>>= 1, baseLimit = h, f, tab, + searchFunction, result).fork(); + } + while (result.get() == null) { + U u; + Node p; + if ((p = advance()) == null) { + propagateCompletion(); + break; + } + if ((u = searchFunction.apply((K)p.key, p.val)) != null) { + if (result.compareAndSet(null, u)) + quietlyCompleteRoot(); + break; + } + } + } + } + } + + static final class ReduceKeysTask + extends BulkTask { + final BiFunction reducer; + K result; + ReduceKeysTask rights, nextRight; + ReduceKeysTask + (BulkTask p, int b, int i, int f, Node[] t, + ReduceKeysTask nextRight, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final K getRawResult() { return result; } + public final void compute() { + final BiFunction reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceKeysTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + K r = null; + for (Node p; (p = advance()) != null; ) { + K u = (K)p.key; + r = (r == null) ? u : u == null ? r : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceKeysTask + t = (ReduceKeysTask)c, + s = t.rights; + while (s != null) { + K tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class ReduceValuesTask + extends BulkTask { + final BiFunction reducer; + V result; + ReduceValuesTask rights, nextRight; + ReduceValuesTask + (BulkTask p, int b, int i, int f, Node[] t, + ReduceValuesTask nextRight, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final V getRawResult() { return result; } + public final void compute() { + final BiFunction reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceValuesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + V r = null; + for (Node p; (p = advance()) != null; ) { + V v = p.val; + r = (r == null) ? v : reducer.apply(r, v); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceValuesTask + t = (ReduceValuesTask)c, + s = t.rights; + while (s != null) { + V tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class ReduceEntriesTask + extends BulkTask> { + final BiFunction, Map.Entry, ? extends Map.Entry> reducer; + Map.Entry result; + ReduceEntriesTask rights, nextRight; + ReduceEntriesTask + (BulkTask p, int b, int i, int f, Node[] t, + ReduceEntriesTask nextRight, + BiFunction, Map.Entry, ? extends Map.Entry> reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.reducer = reducer; + } + public final Map.Entry getRawResult() { return result; } + public final void compute() { + final BiFunction, Map.Entry, ? extends Map.Entry> reducer; + if ((reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new ReduceEntriesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, reducer)).fork(); + } + Map.Entry r = null; + for (Node p; (p = advance()) != null; ) + r = (r == null) ? p : reducer.apply(r, p); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + ReduceEntriesTask + t = (ReduceEntriesTask)c, + s = t.rights; + while (s != null) { + Map.Entry tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysTask + extends BulkTask { + final Function transformer; + final BiFunction reducer; + U result; + MapReduceKeysTask rights, nextRight; + MapReduceKeysTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysTask nextRight, + Function transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysTask + t = (MapReduceKeysTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesTask + extends BulkTask { + final Function transformer; + final BiFunction reducer; + U result; + MapReduceValuesTask rights, nextRight; + MapReduceValuesTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesTask nextRight, + Function transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p.val)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesTask + t = (MapReduceValuesTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesTask + extends BulkTask { + final Function, ? extends U> transformer; + final BiFunction reducer; + U result; + MapReduceEntriesTask rights, nextRight; + MapReduceEntriesTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesTask nextRight, + Function, ? extends U> transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final Function, ? extends U> transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply(p)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesTask + t = (MapReduceEntriesTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsTask + extends BulkTask { + final BiFunction transformer; + final BiFunction reducer; + U result; + MapReduceMappingsTask rights, nextRight; + MapReduceMappingsTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsTask nextRight, + BiFunction transformer, + BiFunction reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.reducer = reducer; + } + public final U getRawResult() { return result; } + public final void compute() { + final BiFunction transformer; + final BiFunction reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, reducer)).fork(); + } + U r = null; + for (Node p; (p = advance()) != null; ) { + U u; + if ((u = transformer.apply((K)p.key, p.val)) != null) + r = (r == null) ? u : reducer.apply(r, u); + } + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsTask + t = (MapReduceMappingsTask)c, + s = t.rights; + while (s != null) { + U tr, sr; + if ((sr = s.result) != null) + t.result = (((tr = t.result) == null) ? sr : + reducer.apply(tr, sr)); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysToDoubleTask + extends BulkTask { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceKeysToDoubleTask rights, nextRight; + MapReduceKeysToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysToDoubleTask nextRight, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToDoubleTask + t = (MapReduceKeysToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesToDoubleTask + extends BulkTask { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceValuesToDoubleTask rights, nextRight; + MapReduceValuesToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesToDoubleTask nextRight, + ToDoubleFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToDoubleTask + t = (MapReduceValuesToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesToDoubleTask + extends BulkTask { + final ToDoubleFunction> transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceEntriesToDoubleTask rights, nextRight; + MapReduceEntriesToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesToDoubleTask nextRight, + ToDoubleFunction> transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleFunction> transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToDoubleTask + t = (MapReduceEntriesToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsToDoubleTask + extends BulkTask { + final ToDoubleBiFunction transformer; + final DoubleBinaryOperator reducer; + final double basis; + double result; + MapReduceMappingsToDoubleTask rights, nextRight; + MapReduceMappingsToDoubleTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsToDoubleTask nextRight, + ToDoubleBiFunction transformer, + double basis, + DoubleBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Double getRawResult() { return result; } + public final void compute() { + final ToDoubleBiFunction transformer; + final DoubleBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + double r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToDoubleTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key, p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToDoubleTask + t = (MapReduceMappingsToDoubleTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsDouble(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysToLongTask + extends BulkTask { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceKeysToLongTask rights, nextRight; + MapReduceKeysToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysToLongTask nextRight, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToLongTask + t = (MapReduceKeysToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesToLongTask + extends BulkTask { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceValuesToLongTask rights, nextRight; + MapReduceValuesToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesToLongTask nextRight, + ToLongFunction transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToLongTask + t = (MapReduceValuesToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesToLongTask + extends BulkTask { + final ToLongFunction> transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceEntriesToLongTask rights, nextRight; + MapReduceEntriesToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesToLongTask nextRight, + ToLongFunction> transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongFunction> transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong(p)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToLongTask + t = (MapReduceEntriesToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsToLongTask + extends BulkTask { + final ToLongBiFunction transformer; + final LongBinaryOperator reducer; + final long basis; + long result; + MapReduceMappingsToLongTask rights, nextRight; + MapReduceMappingsToLongTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsToLongTask nextRight, + ToLongBiFunction transformer, + long basis, + LongBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Long getRawResult() { return result; } + public final void compute() { + final ToLongBiFunction transformer; + final LongBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + long r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToLongTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key, p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToLongTask + t = (MapReduceMappingsToLongTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsLong(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceKeysToIntTask + extends BulkTask { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceKeysToIntTask rights, nextRight; + MapReduceKeysToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceKeysToIntTask nextRight, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceKeysToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceKeysToIntTask + t = (MapReduceKeysToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceValuesToIntTask + extends BulkTask { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceValuesToIntTask rights, nextRight; + MapReduceValuesToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceValuesToIntTask nextRight, + ToIntFunction transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceValuesToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceValuesToIntTask + t = (MapReduceValuesToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceEntriesToIntTask + extends BulkTask { + final ToIntFunction> transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceEntriesToIntTask rights, nextRight; + MapReduceEntriesToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceEntriesToIntTask nextRight, + ToIntFunction> transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntFunction> transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceEntriesToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt(p)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceEntriesToIntTask + t = (MapReduceEntriesToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } + } + } + + static final class MapReduceMappingsToIntTask + extends BulkTask { + final ToIntBiFunction transformer; + final IntBinaryOperator reducer; + final int basis; + int result; + MapReduceMappingsToIntTask rights, nextRight; + MapReduceMappingsToIntTask + (BulkTask p, int b, int i, int f, Node[] t, + MapReduceMappingsToIntTask nextRight, + ToIntBiFunction transformer, + int basis, + IntBinaryOperator reducer) { + super(p, b, i, f, t); this.nextRight = nextRight; + this.transformer = transformer; + this.basis = basis; this.reducer = reducer; + } + public final Integer getRawResult() { return result; } + public final void compute() { + final ToIntBiFunction transformer; + final IntBinaryOperator reducer; + if ((transformer = this.transformer) != null && + (reducer = this.reducer) != null) { + int r = this.basis; + for (int i = baseIndex, f, h; batch > 0 && + (h = ((f = baseLimit) + i) >>> 1) > i;) { + addToPendingCount(1); + (rights = new MapReduceMappingsToIntTask + (this, batch >>>= 1, baseLimit = h, f, tab, + rights, transformer, r, reducer)).fork(); + } + for (Node p; (p = advance()) != null; ) + r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key, p.val)); + result = r; + CountedCompleter c; + for (c = firstComplete(); c != null; c = c.nextComplete()) { + MapReduceMappingsToIntTask + t = (MapReduceMappingsToIntTask)c, + s = t.rights; + while (s != null) { + t.result = reducer.applyAsInt(t.result, s.result); + s = t.rights = s.nextRight; + } + } + } } } // Unsafe mechanics - private static final sun.misc.Unsafe UNSAFE; - private static final long SBASE; - private static final int SSHIFT; - private static final long TBASE; - private static final int TSHIFT; - private static final long HASHSEED_OFFSET; - private static final long SEGSHIFT_OFFSET; - private static final long SEGMASK_OFFSET; - private static final long SEGMENTS_OFFSET; + private static final sun.misc.Unsafe U; + private static final long SIZECTL; + private static final long TRANSFERINDEX; + private static final long TRANSFERORIGIN; + private static final long BASECOUNT; + private static final long CELLSBUSY; + private static final long CELLVALUE; + private static final long ABASE; + private static final int ASHIFT; static { - int ss, ts; try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - Class tc = HashEntry[].class; - Class sc = Segment[].class; - TBASE = UNSAFE.arrayBaseOffset(tc); - SBASE = UNSAFE.arrayBaseOffset(sc); - ts = UNSAFE.arrayIndexScale(tc); - ss = UNSAFE.arrayIndexScale(sc); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("hashSeed")); - SEGSHIFT_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("segmentShift")); - SEGMASK_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("segmentMask")); - SEGMENTS_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMap.class.getDeclaredField("segments")); + U = sun.misc.Unsafe.getUnsafe(); + Class k = ConcurrentHashMap.class; + SIZECTL = U.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + TRANSFERINDEX = U.objectFieldOffset + (k.getDeclaredField("transferIndex")); + TRANSFERORIGIN = U.objectFieldOffset + (k.getDeclaredField("transferOrigin")); + BASECOUNT = U.objectFieldOffset + (k.getDeclaredField("baseCount")); + CELLSBUSY = U.objectFieldOffset + (k.getDeclaredField("cellsBusy")); + Class ck = Cell.class; + CELLVALUE = U.objectFieldOffset + (ck.getDeclaredField("value")); + Class sc = Node[].class; + ABASE = U.arrayBaseOffset(sc); + int scale = U.arrayIndexScale(sc); + if ((scale & (scale - 1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); } catch (Exception e) { throw new Error(e); } - if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) - throw new Error("data type scale not a power of two"); - SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); - TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); } - } diff --git a/jdk/src/share/classes/java/util/spi/LocaleServiceProvider.java b/jdk/src/share/classes/java/util/spi/LocaleServiceProvider.java index 428c8d700ed..b59dba1978c 100644 --- a/jdk/src/share/classes/java/util/spi/LocaleServiceProvider.java +++ b/jdk/src/share/classes/java/util/spi/LocaleServiceProvider.java @@ -128,6 +128,14 @@ import java.util.Locale; * installed SPI providers, and "JRE" represents the locale sensitive services * in the Java Runtime Environment, the locale sensitive services in the SPI * providers are looked up first. + *

+ * There are two other possible locale sensitive service providers, i.e., "CLDR" + * which is a provider based on Unicode Consortium's + * CLDR Project, and "HOST" which is a + * provider that reflects the user's custom settings in the underlying operating + * system. These two providers may not be available, depending on the Java Runtime + * Environment implementation. Specifying "JRE,SPI" is identical to the default + * behavior, which is compatibile with the prior releases. * * @since 1.6 */ diff --git a/jdk/src/share/classes/java/util/stream/DoubleStream.java b/jdk/src/share/classes/java/util/stream/DoubleStream.java index f105453603a..f402b5daee6 100644 --- a/jdk/src/share/classes/java/util/stream/DoubleStream.java +++ b/jdk/src/share/classes/java/util/stream/DoubleStream.java @@ -603,7 +603,7 @@ public interface DoubleStream extends BaseStream { /** * Returns an {@link OptionalDouble} describing the first element of this * stream (in the encounter order), or an empty {@code OptionalDouble} if - * the stream is empty. If the stream has no encounter order, than any + * the stream is empty. If the stream has no encounter order, then any * element may be returned. * *

This is a short-circuiting diff --git a/jdk/src/share/classes/java/util/stream/IntStream.java b/jdk/src/share/classes/java/util/stream/IntStream.java index 9b343292752..576a6aadea2 100644 --- a/jdk/src/share/classes/java/util/stream/IntStream.java +++ b/jdk/src/share/classes/java/util/stream/IntStream.java @@ -588,7 +588,7 @@ public interface IntStream extends BaseStream { /** * Returns an {@link OptionalInt} describing the first element of this * stream (in the encounter order), or an empty {@code OptionalInt} if the - * stream is empty. If the stream has no encounter order, than any element + * stream is empty. If the stream has no encounter order, then any element * may be returned. * *

This is a short-circuiting diff --git a/jdk/src/share/classes/java/util/stream/LongStream.java b/jdk/src/share/classes/java/util/stream/LongStream.java index cde4d025e5a..22fae149baa 100644 --- a/jdk/src/share/classes/java/util/stream/LongStream.java +++ b/jdk/src/share/classes/java/util/stream/LongStream.java @@ -588,7 +588,7 @@ public interface LongStream extends BaseStream { /** * Returns an {@link OptionalLong} describing the first element of this * stream (in the encounter order), or an empty {@code OptionalLong} if the - * stream is empty. If the stream has no encounter order, than any element + * stream is empty. If the stream has no encounter order, then any element * may be returned. * *

This is a short-circuiting diff --git a/jdk/src/share/classes/java/util/stream/Stream.java b/jdk/src/share/classes/java/util/stream/Stream.java index 516976280fe..f06a01b7aea 100644 --- a/jdk/src/share/classes/java/util/stream/Stream.java +++ b/jdk/src/share/classes/java/util/stream/Stream.java @@ -754,7 +754,7 @@ public interface Stream extends BaseStream> { /** * Returns an {@link Optional} describing the first element of this stream * (in the encounter order), or an empty {@code Optional} if the stream is - * empty. If the stream has no encounter order, than any element may be + * empty. If the stream has no encounter order, then any element may be * returned. * *

This is a short-circuiting diff --git a/jdk/src/share/classes/java/util/stream/StreamBuilder.java b/jdk/src/share/classes/java/util/stream/StreamBuilder.java index cc3bc9a1842..66baa84cdfe 100644 --- a/jdk/src/share/classes/java/util/stream/StreamBuilder.java +++ b/jdk/src/share/classes/java/util/stream/StreamBuilder.java @@ -38,7 +38,7 @@ import java.util.function.LongConsumer; *

A {@code StreamBuilder} has a lifecycle, where it starts in a building * phase, during which elements can be added, and then transitions to a built * phase, after which elements may not be added. The built phase begins - * when the {@link #build()}} method is called, which creates an ordered + * when the {@link #build()} method is called, which creates an ordered * {@code Stream} whose elements are the elements that were added to the stream * builder, in the order they were added. * @@ -98,7 +98,7 @@ public interface StreamBuilder extends Consumer { *

A stream builder has a lifecycle, where it starts in a building * phase, during which elements can be added, and then transitions to a * built phase, after which elements may not be added. The built phase - * begins when the {@link #build()}} method is called, which creates an + * begins when the {@link #build()} method is called, which creates an * ordered stream whose elements are the elements that were added to the * stream builder, in the order they were added. * @@ -155,7 +155,7 @@ public interface StreamBuilder extends Consumer { *

A stream builder has a lifecycle, where it starts in a building * phase, during which elements can be added, and then transitions to a * built phase, after which elements may not be added. The built phase - * begins when the {@link #build()}} method is called, which creates an + * begins when the {@link #build()} method is called, which creates an * ordered stream whose elements are the elements that were added to the * stream builder, in the order they were added. * @@ -209,6 +209,13 @@ public interface StreamBuilder extends Consumer { /** * A mutable builder for a {@code DoubleStream}. * + *

A stream builder has a lifecycle, where it starts in a building + * phase, during which elements can be added, and then transitions to a + * built phase, after which elements may not be added. The built phase + * begins when the {@link #build()} method is called, which creates an + * ordered stream whose elements are the elements that were added to the + * stream builder, in the order they were added. + * * @see LongStream#builder() * @since 1.8 */ @@ -217,13 +224,6 @@ public interface StreamBuilder extends Consumer { /** * Adds an element to the stream being built. * - *

A stream builder has a lifecycle, where it starts in a building - * phase, during which elements can be added, and then transitions to a - * built phase, after which elements may not be added. The built phase - * begins when the {@link #build()}} method is called, which creates an - * ordered stream whose elements are the elements that were added to the - * stream builder, in the order they were added. - * * @throws IllegalStateException if the builder has already transitioned * to the built state */ diff --git a/jdk/src/share/classes/java/util/stream/StreamSupport.java b/jdk/src/share/classes/java/util/stream/StreamSupport.java index ddc4dd5f1cb..2b3cbfaeb76 100644 --- a/jdk/src/share/classes/java/util/stream/StreamSupport.java +++ b/jdk/src/share/classes/java/util/stream/StreamSupport.java @@ -41,7 +41,11 @@ import java.util.function.Supplier; * * @since 1.8 */ -public class StreamSupport { +public final class StreamSupport { + + // Suppresses default constructor, ensuring non-instantiability. + private StreamSupport() {} + /** * Creates a new sequential {@code Stream} from a {@code Spliterator}. * @@ -50,7 +54,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -75,7 +79,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -102,7 +106,7 @@ public class StreamSupport { * *

For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #stream(java.util.Spliterator)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -138,7 +142,7 @@ public class StreamSupport { * *

For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #stream(Spliterator)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -172,7 +176,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)}} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -195,7 +199,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)}} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -220,7 +224,7 @@ public class StreamSupport { * *

For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #intStream(Spliterator.OfInt)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -254,7 +258,7 @@ public class StreamSupport { * *

For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #intStream(Spliterator.OfInt)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -286,7 +290,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -310,7 +314,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -335,7 +339,7 @@ public class StreamSupport { * *

For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #longStream(Spliterator.OfLong)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -369,7 +373,7 @@ public class StreamSupport { * *

For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #longStream(Spliterator.OfLong)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -402,7 +406,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -426,7 +430,7 @@ public class StreamSupport { * *

It is strongly recommended the spliterator report a characteristic of * {@code IMMUTABLE} or {@code CONCURRENT}, or be - * late-binding. Otherwise, + * late-binding. Otherwise, * {@link #stream(Supplier, int)} should be used to * reduce the scope of potential interference with the source. See * Non-Interference for @@ -451,7 +455,7 @@ public class StreamSupport { *

* For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #doubleStream(Spliterator.OfDouble)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the @@ -485,7 +489,7 @@ public class StreamSupport { * *

For spliterators that report a characteristic of {@code IMMUTABLE} * or {@code CONCURRENT}, or that are - * late-binding, it is likely + * late-binding, it is likely * more efficient to use {@link #doubleStream(Spliterator.OfDouble)} instead. * The use of a {@code Supplier} in this form provides a level of * indirection that reduces the scope of potential interference with the diff --git a/jdk/src/share/classes/java/util/zip/ZipConstants.java b/jdk/src/share/classes/java/util/zip/ZipConstants.java index ade50f32fcb..79cefbd46e8 100644 --- a/jdk/src/share/classes/java/util/zip/ZipConstants.java +++ b/jdk/src/share/classes/java/util/zip/ZipConstants.java @@ -68,6 +68,14 @@ interface ZipConstants { static final int EXTSIZ = 8; // compressed size static final int EXTLEN = 12; // uncompressed size + /* + * Extra field header ID + */ + static final int EXTID_ZIP64 = 0x0001; // Zip64 + static final int EXTID_NTFS = 0x000a; // NTFS + static final int EXTID_UNIX = 0x000d; // UNIX + static final int EXTID_EXTT = 0x5455; // Info-ZIP Extended Timestamp + /* * Central directory (CEN) header field offsets */ diff --git a/jdk/src/share/classes/java/util/zip/ZipEntry.java b/jdk/src/share/classes/java/util/zip/ZipEntry.java index 847f8ba463a..60d440ebe46 100644 --- a/jdk/src/share/classes/java/util/zip/ZipEntry.java +++ b/jdk/src/share/classes/java/util/zip/ZipEntry.java @@ -25,8 +25,6 @@ package java.util.zip; -import java.util.Date; - /** * This class is used to represent a ZIP file entry. * @@ -35,7 +33,7 @@ import java.util.Date; public class ZipEntry implements ZipConstants, Cloneable { String name; // entry name - long time = -1; // modification time (in DOS time) + long mtime = -1; // last modification time long crc = -1; // crc-32 of entry data long size = -1; // uncompressed size of entry data long csize = -1; // compressed size of entry data @@ -79,7 +77,7 @@ class ZipEntry implements ZipConstants, Cloneable { */ public ZipEntry(ZipEntry e) { name = e.name; - time = e.time; + mtime = e.mtime; crc = e.crc; size = e.size; csize = e.csize; @@ -89,7 +87,7 @@ class ZipEntry implements ZipConstants, Cloneable { comment = e.comment; } - /* + /** * Creates a new un-initialized zip entry */ ZipEntry() {} @@ -103,22 +101,26 @@ class ZipEntry implements ZipConstants, Cloneable { } /** - * Sets the modification time of the entry. - * @param time the entry modification time in number of milliseconds - * since the epoch + * Sets the last modification time of the entry. + * + * @param time the last modification time of the entry in milliseconds since the epoch * @see #getTime() */ public void setTime(long time) { - this.time = javaToDosTime(time); + this.mtime = time; } /** - * Returns the modification time of the entry, or -1 if not specified. - * @return the modification time of the entry, or -1 if not specified + * Returns the last modification time of the entry. + *

The last modificatin time may come from zip entry's extensible + * data field {@code NTFS} or {@code Info-ZIP Extended Timestamp}, if + * the entry is read from {@link ZipInputStream} or {@link ZipFile}. + * + * @return the last modification time of the entry, or -1 if not specified * @see #setTime(long) */ public long getTime() { - return time != -1 ? dosToJavaTime(time) : -1; + return mtime; } /** @@ -277,35 +279,6 @@ class ZipEntry implements ZipConstants, Cloneable { return getName(); } - /* - * Converts DOS time to Java time (number of milliseconds since epoch). - */ - private static long dosToJavaTime(long dtime) { - @SuppressWarnings("deprecation") // Use of date constructor. - Date d = new Date((int)(((dtime >> 25) & 0x7f) + 80), - (int)(((dtime >> 21) & 0x0f) - 1), - (int)((dtime >> 16) & 0x1f), - (int)((dtime >> 11) & 0x1f), - (int)((dtime >> 5) & 0x3f), - (int)((dtime << 1) & 0x3e)); - return d.getTime(); - } - - /* - * Converts Java time to DOS time. - */ - @SuppressWarnings("deprecation") // Use of date methods - private static long javaToDosTime(long time) { - Date d = new Date(time); - int year = d.getYear() + 1900; - if (year < 1980) { - return (1 << 21) | (1 << 16); - } - return (year - 1980) << 25 | (d.getMonth() + 1) << 21 | - d.getDate() << 16 | d.getHours() << 11 | d.getMinutes() << 5 | - d.getSeconds() >> 1; - } - /** * Returns the hash code value for this entry. */ diff --git a/jdk/src/share/classes/java/util/zip/ZipFile.java b/jdk/src/share/classes/java/util/zip/ZipFile.java index f334f36d1b0..be82c728de3 100644 --- a/jdk/src/share/classes/java/util/zip/ZipFile.java +++ b/jdk/src/share/classes/java/util/zip/ZipFile.java @@ -46,6 +46,7 @@ import java.util.stream.Stream; import java.util.stream.StreamSupport; import static java.util.zip.ZipConstants64.*; +import static java.util.zip.ZipUtils.*; /** * This class is used to read entries from a zip file. @@ -564,12 +565,44 @@ class ZipFile implements ZipConstants, Closeable { e.name = zc.toString(bname, bname.length); } } - e.time = getEntryTime(jzentry); e.crc = getEntryCrc(jzentry); e.size = getEntrySize(jzentry); e. csize = getEntryCSize(jzentry); e.method = getEntryMethod(jzentry); e.extra = getEntryBytes(jzentry, JZENTRY_EXTRA); + if (e.extra != null) { + byte[] extra = e.extra; + int len = e.extra.length; + int off = 0; + while (off + 4 < len) { + int pos = off; + int tag = get16(extra, pos); + int sz = get16(extra, pos + 2); + pos += 4; + if (pos + sz > len) // invalid data + break; + switch (tag) { + case EXTID_NTFS: + pos += 4; // reserved 4 bytes + if (get16(extra, pos) != 0x0001 || get16(extra, pos + 2) != 24) + break; + e.mtime = winToJavaTime(get64(extra, pos + 4)); + break; + case EXTID_EXTT: + int flag = Byte.toUnsignedInt(extra[pos++]); + if ((flag & 0x1) != 0) { + e.mtime = unixToJavaTime(get32(extra, pos)); + pos += 4; + } + break; + default: // unknown tag + } + off += (sz + 4); + } + } + if (e.mtime == -1) { + e.mtime = dosToJavaTime(getEntryTime(jzentry)); + } byte[] bcomm = getEntryBytes(jzentry, JZENTRY_COMMENT); if (bcomm == null) { e.comment = null; diff --git a/jdk/src/share/classes/java/util/zip/ZipInputStream.java b/jdk/src/share/classes/java/util/zip/ZipInputStream.java index 7076f9be5d8..5c315d452c0 100644 --- a/jdk/src/share/classes/java/util/zip/ZipInputStream.java +++ b/jdk/src/share/classes/java/util/zip/ZipInputStream.java @@ -32,6 +32,7 @@ import java.io.PushbackInputStream; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import static java.util.zip.ZipConstants64.*; +import static java.util.zip.ZipUtils.*; /** * This class implements an input stream filter for reading files in the @@ -302,7 +303,7 @@ class ZipInputStream extends InflaterInputStream implements ZipConstants { throw new ZipException("encrypted ZIP entry not supported"); } e.method = get16(tmpbuf, LOCHOW); - e.time = get32(tmpbuf, LOCTIM); + e.mtime = dosToJavaTime(get32(tmpbuf, LOCTIM)); if ((flag & 8) == 8) { /* "Data Descriptor" present */ if (e.method != DEFLATED) { @@ -316,32 +317,51 @@ class ZipInputStream extends InflaterInputStream implements ZipConstants { } len = get16(tmpbuf, LOCEXT); if (len > 0) { - byte[] bb = new byte[len]; - readFully(bb, 0, len); - e.setExtra(bb); + byte[] extra = new byte[len]; + readFully(extra, 0, len); + e.setExtra(extra); // extra fields are in "HeaderID(2)DataSize(2)Data... format - if (e.csize == ZIP64_MAGICVAL || e.size == ZIP64_MAGICVAL) { - int off = 0; - while (off + 4 < len) { - int sz = get16(bb, off + 2); - if (get16(bb, off) == ZIP64_EXTID) { - off += 4; - // LOC extra zip64 entry MUST include BOTH original and - // compressed file size fields - if (sz < 16 || (off + sz) > len ) { - // Invalid zip64 extra fields, simply skip. Even it's - // rare, it's possible the entry size happens to be - // the magic value and it "accidnetly" has some bytes - // in extra match the id. - return e; - } - e.size = get64(bb, off); - e.csize = get64(bb, off + 8); - break; + int off = 0; + while (off + 4 < len) { + int pos = off; + int tag = get16(extra, pos); + int sz = get16(extra, pos + 2); + pos += 4; + if (pos + sz > len) // invalid data + break; + switch (tag) { + case EXTID_ZIP64 : + // LOC extra zip64 entry MUST include BOTH original and + // compressed file size fields. + // + // If invalid zip64 extra fields, simply skip. Even it's + // rare, it's possible the entry size happens to be + // the magic value and it "accidently" has some bytes + // in extra match the id. + if (sz >= 16 && (pos + sz) <= len ) { + e.size = get64(extra, pos); + e.csize = get64(extra, pos + 8); } - off += (sz + 4); + break; + case EXTID_NTFS: + pos += 4; // reserved 4 bytes + if (get16(extra, pos) != 0x0001 || get16(extra, pos + 2) != 24) + break; + // override the loc field, NTFS time has 'microsecond' granularity + e.mtime = winToJavaTime(get64(extra, pos + 4)); + break; + case EXTID_EXTT: + int flag = Byte.toUnsignedInt(extra[pos++]); + if ((flag & 0x1) != 0) { + e.mtime = unixToJavaTime(get32(extra, pos)); + pos += 4; + } + break; + default: // unknown tag } + off += (sz + 4); } + } return e; } @@ -430,27 +450,4 @@ class ZipInputStream extends InflaterInputStream implements ZipConstants { } } - /* - * Fetches unsigned 16-bit value from byte array at specified offset. - * The bytes are assumed to be in Intel (little-endian) byte order. - */ - private static final int get16(byte b[], int off) { - return Byte.toUnsignedInt(b[off]) | (Byte.toUnsignedInt(b[off+1]) << 8); - } - - /* - * Fetches unsigned 32-bit value from byte array at specified offset. - * The bytes are assumed to be in Intel (little-endian) byte order. - */ - private static final long get32(byte b[], int off) { - return (get16(b, off) | ((long)get16(b, off+2) << 16)) & 0xffffffffL; - } - - /* - * Fetches signed 64-bit value from byte array at specified offset. - * The bytes are assumed to be in Intel (little-endian) byte order. - */ - private static final long get64(byte b[], int off) { - return get32(b, off) | (get32(b, off+4) << 32); - } } diff --git a/jdk/src/share/classes/java/util/zip/ZipOutputStream.java b/jdk/src/share/classes/java/util/zip/ZipOutputStream.java index 0c980823e3f..7a2cf852d30 100644 --- a/jdk/src/share/classes/java/util/zip/ZipOutputStream.java +++ b/jdk/src/share/classes/java/util/zip/ZipOutputStream.java @@ -32,6 +32,7 @@ import java.nio.charset.StandardCharsets; import java.util.Vector; import java.util.HashSet; import static java.util.zip.ZipConstants64.*; +import static java.util.zip.ZipUtils.*; /** * This class implements an output stream filter for writing files in the @@ -190,7 +191,7 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { if (current != null) { closeEntry(); // close previous entry } - if (e.time == -1) { + if (e.mtime == -1) { e.setTime(System.currentTimeMillis()); } if (e.method == -1) { @@ -382,16 +383,25 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { private void writeLOC(XEntry xentry) throws IOException { ZipEntry e = xentry.entry; int flag = e.flag; - int elen = (e.extra != null) ? e.extra.length : 0; boolean hasZip64 = false; - + int elen = (e.extra != null) ? e.extra.length : 0; + int eoff = 0; + boolean foundEXTT = false; // if EXTT already present + // do nothing. + while (eoff + 4 < elen) { + int tag = get16(e.extra, eoff); + int sz = get16(e.extra, eoff + 2); + if (tag == EXTID_EXTT) { + foundEXTT = true; + } + eoff += (4 + sz); + } writeInt(LOCSIG); // LOC header signature - if ((flag & 8) == 8) { writeShort(version(e)); // version needed to extract writeShort(flag); // general purpose bit flag writeShort(e.method); // compression method - writeInt(e.time); // last modification time + writeInt(javaToDosTime(e.mtime)); // last modification time // store size, uncompressed size, and crc-32 in data descriptor // immediately following compressed entry data @@ -407,7 +417,7 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { } writeShort(flag); // general purpose bit flag writeShort(e.method); // compression method - writeInt(e.time); // last modification time + writeInt(javaToDosTime(e.mtime)); // last modification time writeInt(e.crc); // crc-32 if (hasZip64) { writeInt(ZIP64_MAGICVAL); @@ -420,6 +430,8 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { } byte[] nameBytes = zc.getBytes(e.name); writeShort(nameBytes.length); + if (!foundEXTT) + elen += 9; // use Info-ZIP's ext time in extra writeShort(elen); writeBytes(nameBytes, 0, nameBytes.length); if (hasZip64) { @@ -428,6 +440,12 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { writeLong(e.size); writeLong(e.csize); } + if (!foundEXTT) { + writeShort(EXTID_EXTT); + writeShort(5); // size for the folowing data block + writeByte(0x1); // flags byte, mtime only + writeInt(javaToUnixTime(e.mtime)); + } if (e.extra != null) { writeBytes(e.extra, 0, e.extra.length); } @@ -457,25 +475,25 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { ZipEntry e = xentry.entry; int flag = e.flag; int version = version(e); - long csize = e.csize; long size = e.size; long offset = xentry.offset; - int e64len = 0; + int elenZIP64 = 0; boolean hasZip64 = false; + if (e.csize >= ZIP64_MAGICVAL) { csize = ZIP64_MAGICVAL; - e64len += 8; // csize(8) + elenZIP64 += 8; // csize(8) hasZip64 = true; } if (e.size >= ZIP64_MAGICVAL) { size = ZIP64_MAGICVAL; // size(8) - e64len += 8; + elenZIP64 += 8; hasZip64 = true; } if (xentry.offset >= ZIP64_MAGICVAL) { offset = ZIP64_MAGICVAL; - e64len += 8; // offset(8) + elenZIP64 += 8; // offset(8) hasZip64 = true; } writeInt(CENSIG); // CEN header signature @@ -488,18 +506,32 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { } writeShort(flag); // general purpose bit flag writeShort(e.method); // compression method - writeInt(e.time); // last modification time + writeInt(javaToDosTime(e.mtime)); // last modification time writeInt(e.crc); // crc-32 writeInt(csize); // compressed size writeInt(size); // uncompressed size byte[] nameBytes = zc.getBytes(e.name); writeShort(nameBytes.length); + + int elen = (e.extra != null) ? e.extra.length : 0; + int eoff = 0; + boolean foundEXTT = false; // if EXTT already present + // do nothing. + while (eoff + 4 < elen) { + int tag = get16(e.extra, eoff); + int sz = get16(e.extra, eoff + 2); + if (tag == EXTID_EXTT) { + foundEXTT = true; + } + eoff += (4 + sz); + } if (hasZip64) { // + headid(2) + datasize(2) - writeShort(e64len + 4 + (e.extra != null ? e.extra.length : 0)); - } else { - writeShort(e.extra != null ? e.extra.length : 0); + elen += (elenZIP64 + 4); } + if (!foundEXTT) + elen += 9; // Info-ZIP's Extended Timestamp + writeShort(elen); byte[] commentBytes; if (e.comment != null) { commentBytes = zc.getBytes(e.comment); @@ -515,7 +547,7 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { writeBytes(nameBytes, 0, nameBytes.length); if (hasZip64) { writeShort(ZIP64_EXTID);// Zip64 extra - writeShort(e64len); + writeShort(elenZIP64); if (size == ZIP64_MAGICVAL) writeLong(e.size); if (csize == ZIP64_MAGICVAL) @@ -523,6 +555,12 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { if (offset == ZIP64_MAGICVAL) writeLong(xentry.offset); } + if (!foundEXTT) { + writeShort(EXTID_EXTT); + writeShort(5); + writeByte(0x1); // flags byte + writeInt(javaToUnixTime(e.mtime)); + } if (e.extra != null) { writeBytes(e.extra, 0, e.extra.length); } @@ -588,6 +626,15 @@ class ZipOutputStream extends DeflaterOutputStream implements ZipConstants { } } + /* + * Writes a 8-bit byte to the output stream. + */ + private void writeByte(int v) throws IOException { + OutputStream out = this.out; + out.write(v & 0xff); + written += 1; + } + /* * Writes a 16-bit short to the output stream in little-endian byte order. */ diff --git a/jdk/src/share/classes/java/util/zip/ZipUtils.java b/jdk/src/share/classes/java/util/zip/ZipUtils.java new file mode 100644 index 00000000000..2b2dd9a6e4b --- /dev/null +++ b/jdk/src/share/classes/java/util/zip/ZipUtils.java @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.util.zip; + +import java.util.Date; +import java.util.concurrent.TimeUnit; + +class ZipUtils { + + // used to adjust values between Windows and java epoch + private static final long WINDOWS_EPOCH_IN_MICROSECONDS = -11644473600000000L; + + /** + * Converts Windows time (in microseconds, UTC/GMT) time to Java time. + */ + public static final long winToJavaTime(long wtime) { + return TimeUnit.MILLISECONDS.convert( + wtime / 10 + WINDOWS_EPOCH_IN_MICROSECONDS, TimeUnit.MICROSECONDS); + } + + /** + * Converts Java time to Windows time. + */ + public static final long javaToWinTime(long time) { + return (TimeUnit.MICROSECONDS.convert(time, TimeUnit.MILLISECONDS) + - WINDOWS_EPOCH_IN_MICROSECONDS) * 10; + } + + /** + * Converts "standard Unix time"(in seconds, UTC/GMT) to Java time + */ + public static final long unixToJavaTime(long utime) { + return TimeUnit.MILLISECONDS.convert(utime, TimeUnit.SECONDS); + } + + /** + * Converts Java time to "standard Unix time". + */ + public static final long javaToUnixTime(long time) { + return TimeUnit.SECONDS.convert(time, TimeUnit.MILLISECONDS); + } + + /** + * Converts DOS time to Java time (number of milliseconds since epoch). + */ + public static long dosToJavaTime(long dtime) { + @SuppressWarnings("deprecation") // Use of date constructor. + Date d = new Date((int)(((dtime >> 25) & 0x7f) + 80), + (int)(((dtime >> 21) & 0x0f) - 1), + (int)((dtime >> 16) & 0x1f), + (int)((dtime >> 11) & 0x1f), + (int)((dtime >> 5) & 0x3f), + (int)((dtime << 1) & 0x3e)); + return d.getTime(); + } + + /** + * Converts Java time to DOS time. + */ + @SuppressWarnings("deprecation") // Use of date methods + public static long javaToDosTime(long time) { + Date d = new Date(time); + int year = d.getYear() + 1900; + if (year < 1980) { + return (1 << 21) | (1 << 16); + } + return (year - 1980) << 25 | (d.getMonth() + 1) << 21 | + d.getDate() << 16 | d.getHours() << 11 | d.getMinutes() << 5 | + d.getSeconds() >> 1; + } + + + /** + * Fetches unsigned 16-bit value from byte array at specified offset. + * The bytes are assumed to be in Intel (little-endian) byte order. + */ + public static final int get16(byte b[], int off) { + return Byte.toUnsignedInt(b[off]) | (Byte.toUnsignedInt(b[off+1]) << 8); + } + + /** + * Fetches unsigned 32-bit value from byte array at specified offset. + * The bytes are assumed to be in Intel (little-endian) byte order. + */ + public static final long get32(byte b[], int off) { + return (get16(b, off) | ((long)get16(b, off+2) << 16)) & 0xffffffffL; + } + + /** + * Fetches signed 64-bit value from byte array at specified offset. + * The bytes are assumed to be in Intel (little-endian) byte order. + */ + public static final long get64(byte b[], int off) { + return get32(b, off) | (get32(b, off+4) << 32); + } + +} diff --git a/jdk/src/share/classes/javax/crypto/Cipher.java b/jdk/src/share/classes/javax/crypto/Cipher.java index bdbe2bcd98e..70d8d346601 100644 --- a/jdk/src/share/classes/javax/crypto/Cipher.java +++ b/jdk/src/share/classes/javax/crypto/Cipher.java @@ -1158,6 +1158,9 @@ public class Cipher { * determined from the given key, or if the given key has a keysize that * exceeds the maximum allowable keysize (as determined from the * configured jurisdiction policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key) throws InvalidKeyException { init(opmode, key, JceSecurity.RANDOM); @@ -1208,6 +1211,9 @@ public class Cipher { * determined from the given key, or if the given key has a keysize that * exceeds the maximum allowable keysize (as determined from the * configured jurisdiction policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, SecureRandom random) throws InvalidKeyException @@ -1285,6 +1291,9 @@ public class Cipher { * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameterSpec params) throws InvalidKeyException, InvalidAlgorithmParameterException @@ -1343,6 +1352,9 @@ public class Cipher { * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameterSpec params, SecureRandom random) @@ -1416,6 +1428,9 @@ public class Cipher { * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameters params) throws InvalidKeyException, InvalidAlgorithmParameterException @@ -1474,6 +1489,9 @@ public class Cipher { * algorithm parameters imply a cryptographic strength that would exceed * the legal limits (as determined from the configured jurisdiction * policy files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Key key, AlgorithmParameters params, SecureRandom random) @@ -1552,6 +1570,9 @@ public class Cipher { * in the given certificate has a keysize that exceeds the maximum * allowable keysize (as determined by the configured jurisdiction policy * files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Certificate certificate) throws InvalidKeyException @@ -1619,6 +1640,9 @@ public class Cipher { * in the given certificate has a keysize that exceeds the maximum * allowable keysize (as determined by the configured jurisdiction policy * files). + * @throws UnsupportedOperationException if (@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} but the mode is not implemented + * by the underlying {@code CipherSpi}. */ public final void init(int opmode, Certificate certificate, SecureRandom random) @@ -2410,6 +2434,9 @@ public class Cipher { * @exception InvalidKeyException if it is impossible or unsafe to * wrap the key with this cipher (e.g., a hardware protected key is * being passed to a software-only cipher). + * + * @throws UnsupportedOperationException if the corresponding method in the + * {@code CipherSpi} is not supported. */ public final byte[] wrap(Key key) throws IllegalBlockSizeException, InvalidKeyException { @@ -2451,6 +2478,9 @@ public class Cipher { * @exception InvalidKeyException if wrappedKey does not * represent a wrapped key of type wrappedKeyType for * the wrappedKeyAlgorithm. + * + * @throws UnsupportedOperationException if the corresponding method in the + * {@code CipherSpi} is not supported. */ public final Key unwrap(byte[] wrappedKey, String wrappedKeyAlgorithm, diff --git a/jdk/src/share/classes/javax/crypto/CipherInputStream.java b/jdk/src/share/classes/javax/crypto/CipherInputStream.java index b9f3cf8d600..f062a1bc28e 100644 --- a/jdk/src/share/classes/javax/crypto/CipherInputStream.java +++ b/jdk/src/share/classes/javax/crypto/CipherInputStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,6 +86,8 @@ public class CipherInputStream extends FilterInputStream { private int ostart = 0; // the offset pointing to the last "new" byte private int ofinish = 0; + // stream status + private boolean closed = false; /** * private convenience function. @@ -293,14 +295,17 @@ public class CipherInputStream extends FilterInputStream { * @since JCE1.2 */ public void close() throws IOException { + if (closed) { + return; + } + + closed = true; input.close(); try { // throw away the unprocessed data cipher.doFinal(); } - catch (BadPaddingException ex) { - } - catch (IllegalBlockSizeException ex) { + catch (BadPaddingException | IllegalBlockSizeException ex) { } ostart = 0; ofinish = 0; diff --git a/jdk/src/share/classes/javax/crypto/CipherOutputStream.java b/jdk/src/share/classes/javax/crypto/CipherOutputStream.java index 15edd4585f4..6b8d2734901 100644 --- a/jdk/src/share/classes/javax/crypto/CipherOutputStream.java +++ b/jdk/src/share/classes/javax/crypto/CipherOutputStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,6 +74,9 @@ public class CipherOutputStream extends FilterOutputStream { // the buffer holding data ready to be written out private byte[] obuffer; + // stream status + private boolean closed = false; + /** * * Constructs a CipherOutputStream from an OutputStream and a @@ -198,11 +201,14 @@ public class CipherOutputStream extends FilterOutputStream { * @since JCE1.2 */ public void close() throws IOException { + if (closed) { + return; + } + + closed = true; try { obuffer = cipher.doFinal(); - } catch (IllegalBlockSizeException e) { - obuffer = null; - } catch (BadPaddingException e) { + } catch (IllegalBlockSizeException | BadPaddingException e) { obuffer = null; } try { diff --git a/jdk/src/share/classes/javax/crypto/CipherSpi.java b/jdk/src/share/classes/javax/crypto/CipherSpi.java index e563e920eb6..d839be7ce80 100644 --- a/jdk/src/share/classes/javax/crypto/CipherSpi.java +++ b/jdk/src/share/classes/javax/crypto/CipherSpi.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -347,6 +347,9 @@ public abstract class CipherSpi { * initializing this cipher, or requires * algorithm parameters that cannot be * determined from the given key. + * @throws UnsupportedOperationException if {@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} is not implemented + * by the cipher. */ protected abstract void engineInit(int opmode, Key key, SecureRandom random) @@ -399,6 +402,9 @@ public abstract class CipherSpi { * parameters are inappropriate for this cipher, * or if this cipher requires * algorithm parameters and params is null. + * @throws UnsupportedOperationException if {@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} is not implemented + * by the cipher. */ protected abstract void engineInit(int opmode, Key key, AlgorithmParameterSpec params, @@ -452,6 +458,9 @@ public abstract class CipherSpi { * parameters are inappropriate for this cipher, * or if this cipher requires * algorithm parameters and params is null. + * @throws UnsupportedOperationException if {@code opmode} is + * {@code WRAP_MODE} or {@code UNWRAP_MODE} is not implemented + * by the cipher. */ protected abstract void engineInit(int opmode, Key key, AlgorithmParameters params, @@ -863,6 +872,8 @@ public abstract class CipherSpi { * @exception InvalidKeyException if it is impossible or unsafe to * wrap the key with this cipher (e.g., a hardware protected key is * being passed to a software-only cipher). + * + * @throws UnsupportedOperationException if this method is not supported. */ protected byte[] engineWrap(Key key) throws IllegalBlockSizeException, InvalidKeyException @@ -899,6 +910,8 @@ public abstract class CipherSpi { * @exception InvalidKeyException if wrappedKey does not * represent a wrapped key of type wrappedKeyType for * the wrappedKeyAlgorithm. + * + * @throws UnsupportedOperationException if this method is not supported. */ protected Key engineUnwrap(byte[] wrappedKey, String wrappedKeyAlgorithm, diff --git a/jdk/src/share/classes/sun/management/DiagnosticCommandArgumentInfo.java b/jdk/src/share/classes/sun/management/DiagnosticCommandArgumentInfo.java new file mode 100644 index 00000000000..37bccb0d3bc --- /dev/null +++ b/jdk/src/share/classes/sun/management/DiagnosticCommandArgumentInfo.java @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.management; + +/** + * Diagnostic Command Argument information. It contains the description + * of one parameter of the diagnostic command. A parameter can either be an + * option or an argument. Options are identified by the option name while + * arguments are identified by their position in the command line. The generic + * syntax of a diagnostic command is: + *

+ * <command name> [<option>=<value>] [<argument_value>] + *
+ * Example: + *
+ * command_name option1=value1 option2=value argumentA argumentB argumentC + *
+ * In this command line, the diagnostic command receives five parameters, two + * options named {@code option1} and {@code option2}, and three arguments. + * argumentA's position is 0, argumentB's position is 1 and argumentC's + * position is 2. + * + * @since 8 + */ + +class DiagnosticCommandArgumentInfo { + private final String name; + private final String description; + private final String type; + private final String defaultValue; + private final boolean mandatory; + private final boolean option; + private final boolean multiple; + private final int position; + + /** + * Returns the argument name. + * + * @return the argument name + */ + String getName() { + return name; + } + + /** + * Returns the argument description. + * + * @return the argument description + */ + String getDescription() { + return description; + } + + /** + * Returns the argument type. + * + * @return the argument type + */ + String getType() { + return type; + } + + /** + * Returns the default value as a String if a default value + * is defined, null otherwise. + * + * @return the default value as a String if a default value + * is defined, null otherwise. + */ + String getDefault() { + return defaultValue; + } + + /** + * Returns {@code true} if the argument is mandatory, + * {@code false} otherwise. + * + * @return {@code true} if the argument is mandatory, + * {@code false} otherwise + */ + boolean isMandatory() { + return mandatory; + } + + /** + * Returns {@code true} if the argument is an option, + * {@code false} otherwise. Options have to be specified using the + * <key>=<value> syntax on the command line, while other + * arguments are specified with a single <value> field and are + * identified by their position on command line. + * + * @return {@code true} if the argument is an option, + * {@code false} otherwise + */ + boolean isOption() { + return option; + } + + /** + * Returns {@code true} if the argument can be specified multiple times, + * {@code false} otherwise. + * + * @return {@code true} if the argument can be specified multiple times, + * {@code false} otherwise + */ + boolean isMultiple() { + return multiple; + } + + /** + * Returns the expected position of this argument if it is not an option, + * -1 otherwise. Argument position if defined from left to right, + * starting at zero and ignoring the diagnostic command name and + * options. + * + * @return the expected position of this argument if it is not an option, + * -1 otherwise. + */ + int getPosition() { + return position; + } + + DiagnosticCommandArgumentInfo(String name, String description, + String type, String defaultValue, + boolean mandatory, boolean option, + boolean multiple, int position) { + this.name = name; + this.description = description; + this.type = type; + this.defaultValue = defaultValue; + this.mandatory = mandatory; + this.option = option; + this.multiple = multiple; + this.position = position; + } +} diff --git a/jdk/src/share/classes/sun/management/DiagnosticCommandImpl.java b/jdk/src/share/classes/sun/management/DiagnosticCommandImpl.java new file mode 100644 index 00000000000..eeeee9af053 --- /dev/null +++ b/jdk/src/share/classes/sun/management/DiagnosticCommandImpl.java @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.management; + +import com.sun.management.DiagnosticCommandMBean; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.security.Permission; +import java.util.*; +import javax.management.*; + +/** + * Implementation class for the diagnostic commands subsystem. + * + * @since 8 + */ +class DiagnosticCommandImpl extends NotificationEmitterSupport + implements DiagnosticCommandMBean { + + private final VMManagement jvm; + private volatile Map wrappers = null; + private static final String strClassName = "".getClass().getName(); + private static final String strArrayClassName = String[].class.getName(); + private final boolean isSupported; + + @Override + public Object getAttribute(String attribute) throws AttributeNotFoundException, + MBeanException, ReflectionException { + throw new AttributeNotFoundException(attribute); + } + + @Override + public void setAttribute(Attribute attribute) throws AttributeNotFoundException, + InvalidAttributeValueException, MBeanException, ReflectionException { + throw new AttributeNotFoundException(attribute.getName()); + } + + @Override + public AttributeList getAttributes(String[] attributes) { + return new AttributeList(); + } + + @Override + public AttributeList setAttributes(AttributeList attributes) { + return new AttributeList(); + } + + private class Wrapper { + + String name; + String cmd; + DiagnosticCommandInfo info; + Permission permission; + + Wrapper(String name, String cmd, DiagnosticCommandInfo info) + throws InstantiationException { + this.name = name; + this.cmd = cmd; + this.info = info; + this.permission = null; + Exception cause = null; + if (info.getPermissionClass() != null) { + try { + Class c = Class.forName(info.getPermissionClass()); + if (info.getPermissionAction() == null) { + try { + Constructor constructor = c.getConstructor(String.class); + permission = (Permission) constructor.newInstance(info.getPermissionName()); + + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + cause = ex; + } + } + if (permission == null) { + try { + Constructor constructor = c.getConstructor(String.class, String.class); + permission = (Permission) constructor.newInstance( + info.getPermissionName(), + info.getPermissionAction()); + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + cause = ex; + } + } + } catch (ClassNotFoundException ex) { } + if (permission == null) { + InstantiationException iex = + new InstantiationException("Unable to instantiate required permission"); + iex.initCause(cause); + } + } + } + + public String execute(String[] args) { + if (permission != null) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(permission); + } + } + if(args == null) { + return executeDiagnosticCommand(cmd); + } else { + StringBuilder sb = new StringBuilder(); + sb.append(cmd); + for(int i=0; i { + @Override + public int compare(MBeanOperationInfo o1, MBeanOperationInfo o2) { + return o1.getName().compareTo(o2.getName()); + } + } + + @Override + public MBeanInfo getMBeanInfo() { + SortedSet operations = new TreeSet<>(new OperationInfoComparator()); + Map wrappersmap; + if (!isSupported) { + wrappersmap = (Map) Collections.EMPTY_MAP; + } else { + try { + String[] command = getDiagnosticCommands(); + DiagnosticCommandInfo[] info = getDiagnosticCommandInfo(command); + MBeanParameterInfo stringArgInfo[] = new MBeanParameterInfo[]{ + new MBeanParameterInfo("arguments", strArrayClassName, + "Array of Diagnostic Commands Arguments and Options") + }; + wrappersmap = new HashMap<>(); + for (int i = 0; i < command.length; i++) { + String name = transform(command[i]); + try { + Wrapper w = new Wrapper(name, command[i], info[i]); + wrappersmap.put(name, w); + operations.add(new MBeanOperationInfo( + w.name, + w.info.getDescription(), + (w.info.getArgumentsInfo() == null + || w.info.getArgumentsInfo().isEmpty()) + ? null : stringArgInfo, + strClassName, + MBeanOperationInfo.ACTION_INFO, + commandDescriptor(w))); + } catch (InstantiationException ex) { + // If for some reasons the creation of a diagnostic command + // wrappers fails, the diagnostic command is just ignored + // and won't appear in the DynamicMBean + } + } + } catch (IllegalArgumentException | UnsupportedOperationException e) { + wrappersmap = (Map) Collections.EMPTY_MAP; + } + } + wrappers = Collections.unmodifiableMap(wrappersmap); + HashMap map = new HashMap<>(); + map.put("immutableInfo", "false"); + map.put("interfaceClassName","com.sun.management.DiagnosticCommandMBean"); + map.put("mxbean", "false"); + Descriptor desc = new ImmutableDescriptor(map); + return new MBeanInfo( + this.getClass().getName(), + "Diagnostic Commands", + null, // attributes + null, // constructors + operations.toArray(new MBeanOperationInfo[operations.size()]), // operations + getNotificationInfo(), // notifications + desc); + } + + @Override + public Object invoke(String actionName, Object[] params, String[] signature) + throws MBeanException, ReflectionException { + if (!isSupported) { + throw new UnsupportedOperationException(); + } + if (wrappers == null) { + getMBeanInfo(); + } + Wrapper w = wrappers.get(actionName); + if (w != null) { + if (w.info.getArgumentsInfo().isEmpty() + && (params == null || params.length == 0) + && (signature == null || signature.length == 0)) { + return w.execute(null); + } else if((params != null && params.length == 1) + && (signature != null && signature.length == 1 + && signature[0] != null + && signature[0].compareTo(strArrayClassName) == 0)) { + return w.execute((String[]) params[0]); + } + } + throw new ReflectionException(new NoSuchMethodException(actionName)); + } + + private static String transform(String name) { + StringBuilder sb = new StringBuilder(); + boolean toLower = true; + boolean toUpper = false; + for (int i = 0; i < name.length(); i++) { + char c = name.charAt(i); + if (c == '.' || c == '_') { + toLower = false; + toUpper = true; + } else { + if (toUpper) { + toUpper = false; + sb.append(Character.toUpperCase(c)); + } else if(toLower) { + sb.append(Character.toLowerCase(c)); + } else { + sb.append(c); + } + } + } + return sb.toString(); + } + + private Descriptor commandDescriptor(Wrapper w) throws IllegalArgumentException { + HashMap map = new HashMap<>(); + map.put("dcmd.name", w.info.getName()); + map.put("dcmd.description", w.info.getDescription()); + map.put("dcmd.vmImpact", w.info.getImpact()); + map.put("dcmd.permissionClass", w.info.getPermissionClass()); + map.put("dcmd.permissionName", w.info.getPermissionName()); + map.put("dcmd.permissionAction", w.info.getPermissionAction()); + map.put("dcmd.enabled", w.info.isEnabled()); + StringBuilder sb = new StringBuilder(); + sb.append("help "); + sb.append(w.info.getName()); + map.put("dcmd.help", executeDiagnosticCommand(sb.toString())); + if (w.info.getArgumentsInfo() != null && !w.info.getArgumentsInfo().isEmpty()) { + HashMap allargmap = new HashMap<>(); + for (DiagnosticCommandArgumentInfo arginfo : w.info.getArgumentsInfo()) { + HashMap argmap = new HashMap<>(); + argmap.put("dcmd.arg.name", arginfo.getName()); + argmap.put("dcmd.arg.type", arginfo.getType()); + argmap.put("dcmd.arg.description", arginfo.getDescription()); + argmap.put("dcmd.arg.isMandatory", arginfo.isMandatory()); + argmap.put("dcmd.arg.isMultiple", arginfo.isMultiple()); + boolean isOption = arginfo.isOption(); + argmap.put("dcmd.arg.isOption", isOption); + if(!isOption) { + argmap.put("dcmd.arg.position", arginfo.getPosition()); + } else { + argmap.put("dcmd.arg.position", -1); + } + allargmap.put(arginfo.getName(), new ImmutableDescriptor(argmap)); + } + map.put("dcmd.arguments", new ImmutableDescriptor(allargmap)); + } + return new ImmutableDescriptor(map); + } + + private final static String notifName = + "javax.management.Notification"; + + private final static String[] diagFramNotifTypes = { + "jmx.mbean.info.changed" + }; + + private MBeanNotificationInfo[] notifInfo = null; + + @Override + public MBeanNotificationInfo[] getNotificationInfo() { + synchronized (this) { + if (notifInfo == null) { + notifInfo = new MBeanNotificationInfo[1]; + notifInfo[0] = + new MBeanNotificationInfo(diagFramNotifTypes, + notifName, + "Diagnostic Framework Notification"); + } + } + return notifInfo; + } + + private static long seqNumber = 0; + private static long getNextSeqNumber() { + return ++seqNumber; + } + + private void createDiagnosticFrameworkNotification() { + + if (!hasListeners()) { + return; + } + ObjectName on = null; + try { + on = ObjectName.getInstance(ManagementFactoryHelper.HOTSPOT_DIAGNOSTIC_COMMAND_MBEAN_NAME); + } catch (MalformedObjectNameException e) { } + Notification notif = new Notification("jmx.mbean.info.changed", + on, + getNextSeqNumber()); + notif.setUserData(getMBeanInfo()); + sendNotification(notif); + } + + @Override + public synchronized void addNotificationListener(NotificationListener listener, + NotificationFilter filter, + Object handback) { + boolean before = hasListeners(); + super.addNotificationListener(listener, filter, handback); + boolean after = hasListeners(); + if (!before && after) { + setNotificationEnabled(true); + } + } + + @Override + public synchronized void removeNotificationListener(NotificationListener listener) + throws ListenerNotFoundException { + boolean before = hasListeners(); + super.removeNotificationListener(listener); + boolean after = hasListeners(); + if (before && !after) { + setNotificationEnabled(false); + } + } + + @Override + public synchronized void removeNotificationListener(NotificationListener listener, + NotificationFilter filter, + Object handback) + throws ListenerNotFoundException { + boolean before = hasListeners(); + super.removeNotificationListener(listener, filter, handback); + boolean after = hasListeners(); + if (before && !after) { + setNotificationEnabled(false); + } + } + + private native void setNotificationEnabled(boolean enabled); + private native String[] getDiagnosticCommands(); + private native DiagnosticCommandInfo[] getDiagnosticCommandInfo(String[] commands); + private native String executeDiagnosticCommand(String command); + +} diff --git a/jdk/src/share/classes/sun/management/DiagnosticCommandInfo.java b/jdk/src/share/classes/sun/management/DiagnosticCommandInfo.java new file mode 100644 index 00000000000..4ad0a963bb8 --- /dev/null +++ b/jdk/src/share/classes/sun/management/DiagnosticCommandInfo.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.management; + +import java.util.List; + +/** + * Diagnostic command information. It contains the description of a + * diagnostic command. + * + * @since 8 + */ + +class DiagnosticCommandInfo { + private final String name; + private final String description; + private final String impact; + private final String permissionClass; + private final String permissionName; + private final String permissionAction; + private final boolean enabled; + private final List arguments; + + /** + * Returns the diagnostic command name. + * + * @return the diagnostic command name + */ + String getName() { + return name; + } + + /** + * Returns the diagnostic command description. + * + * @return the diagnostic command description + */ + String getDescription() { + return description; + } + + /** + * Returns the potential impact of the diagnostic command execution + * on the Java virtual machine behavior. + * + * @return the potential impact of the diagnostic command execution + * on the Java virtual machine behavior + */ + String getImpact() { + return impact; + } + + /** + * Returns the name of the permission class required to be allowed + * to invoke the diagnostic command, or null if no permission + * is required. + * + * @return the name of the permission class name required to be allowed + * to invoke the diagnostic command, or null if no permission + * is required + */ + String getPermissionClass() { + return permissionClass; + } + + /** + * Returns the permission name required to be allowed to invoke the + * diagnostic command, or null if no permission is required. + * + * @return the permission name required to be allowed to invoke the + * diagnostic command, or null if no permission is required + */ + String getPermissionName() { + return permissionName; + } + + /** + * Returns the permission action required to be allowed to invoke the + * diagnostic command, or null if no permission is required or + * if the permission has no action specified. + * + * @return the permission action required to be allowed to invoke the + * diagnostic command, or null if no permission is required or + * if the permission has no action specified + */ + String getPermissionAction() { + return permissionAction; + } + + /** + * Returns {@code true} if the diagnostic command is enabled, + * {@code false} otherwise. The enabled/disabled + * status of a diagnostic command can evolve during + * the lifetime of the Java virtual machine. + * + * @return {@code true} if the diagnostic command is enabled, + * {@code false} otherwise + */ + boolean isEnabled() { + return enabled; + } + + /** + * Returns the list of the diagnostic command arguments description. + * If the diagnostic command has no arguments, it returns an empty list. + * + * @return a list of the diagnostic command arguments description + */ + List getArgumentsInfo() { + return arguments; + } + + DiagnosticCommandInfo(String name, String description, + String impact, String permissionClass, + String permissionName, String permissionAction, + boolean enabled, + List arguments) + { + this.name = name; + this.description = description; + this.impact = impact; + this.permissionClass = permissionClass; + this.permissionName = permissionName; + this.permissionAction = permissionAction; + this.enabled = enabled; + this.arguments = arguments; + } +} diff --git a/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java b/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java index f285ca3c104..6e875a27914 100644 --- a/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java +++ b/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ package sun.management; import java.lang.management.*; +import javax.management.DynamicMBean; import javax.management.InstanceAlreadyExistsException; import javax.management.InstanceNotFoundException; import javax.management.MBeanServer; @@ -42,7 +43,9 @@ import sun.util.logging.LoggingSupport; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import com.sun.management.DiagnosticCommandMBean; import com.sun.management.OSMBeanFactory; import com.sun.management.HotSpotDiagnosticMXBean; @@ -263,6 +266,7 @@ public class ManagementFactoryHelper { private static HotspotThread hsThreadMBean = null; private static HotspotCompilation hsCompileMBean = null; private static HotspotMemory hsMemoryMBean = null; + private static DiagnosticCommandImpl hsDiagCommandMBean = null; public static synchronized HotSpotDiagnosticMXBean getDiagnosticMXBean() { if (hsDiagMBean == null) { @@ -311,6 +315,14 @@ public class ManagementFactoryHelper { return hsMemoryMBean; } + public static synchronized DiagnosticCommandMBean getDiagnosticCommandMBean() { + // Remote Diagnostic Commands may not be supported + if (hsDiagCommandMBean == null && jvm.isRemoteDiagnosticCommandsSupported()) { + hsDiagCommandMBean = new DiagnosticCommandImpl(jvm); + } + return hsDiagCommandMBean; + } + /** * This method is for testing only. */ @@ -365,6 +377,18 @@ public class ManagementFactoryHelper { private final static String HOTSPOT_THREAD_MBEAN_NAME = "sun.management:type=HotspotThreading"; + final static String HOTSPOT_DIAGNOSTIC_COMMAND_MBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static HashMap getPlatformDynamicMBeans() { + HashMap map = new HashMap<>(); + DiagnosticCommandMBean diagMBean = getDiagnosticCommandMBean(); + if (diagMBean != null) { + map.put(Util.newObjectName(HOTSPOT_DIAGNOSTIC_COMMAND_MBEAN_NAME), diagMBean); + } + return map; + } + static void registerInternalMBeans(MBeanServer mbs) { // register all internal MBeans if not registered // No exception is thrown if a MBean with that object name diff --git a/jdk/src/share/classes/sun/management/VMManagement.java b/jdk/src/share/classes/sun/management/VMManagement.java index 50760baa398..a02f828ed16 100644 --- a/jdk/src/share/classes/sun/management/VMManagement.java +++ b/jdk/src/share/classes/sun/management/VMManagement.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ public interface VMManagement { public boolean isThreadAllocatedMemorySupported(); public boolean isThreadAllocatedMemoryEnabled(); public boolean isGcNotificationSupported(); + public boolean isRemoteDiagnosticCommandsSupported(); // Class Loading Subsystem public long getTotalClassCount(); diff --git a/jdk/src/share/classes/sun/management/VMManagementImpl.java b/jdk/src/share/classes/sun/management/VMManagementImpl.java index 88566ae99db..46e0285e622 100644 --- a/jdk/src/share/classes/sun/management/VMManagementImpl.java +++ b/jdk/src/share/classes/sun/management/VMManagementImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,6 +57,7 @@ class VMManagementImpl implements VMManagement { private static boolean synchronizerUsageSupport; private static boolean threadAllocatedMemorySupport; private static boolean gcNotificationSupport; + private static boolean remoteDiagnosticCommandsSupport; static { @@ -106,6 +107,10 @@ class VMManagementImpl implements VMManagement { return gcNotificationSupport; } + public boolean isRemoteDiagnosticCommandsSupported() { + return remoteDiagnosticCommandsSupport; + } + public native boolean isThreadContentionMonitoringEnabled(); public native boolean isThreadCpuTimeEnabled(); public native boolean isThreadAllocatedMemoryEnabled(); diff --git a/jdk/src/share/classes/sun/management/jdp/JdpPacketWriter.java b/jdk/src/share/classes/sun/management/jdp/JdpPacketWriter.java index 2af2fdc0100..f68905b6781 100644 --- a/jdk/src/share/classes/sun/management/jdp/JdpPacketWriter.java +++ b/jdk/src/share/classes/sun/management/jdp/JdpPacketWriter.java @@ -60,9 +60,12 @@ public final class JdpPacketWriter { */ public void addEntry(String entry) throws IOException { - pkt.writeShort(entry.length()); - byte[] b = entry.getBytes("UTF-8"); - pkt.write(b); + /* DataOutputStream.writeUTF() do essentially + * the same as: + * pkt.writeShort(entry.getBytes("UTF-8").length); + * pkt.write(entry.getBytes("UTF-8")); + */ + pkt.writeUTF(entry); } /** diff --git a/jdk/src/share/classes/sun/misc/Contended.java b/jdk/src/share/classes/sun/misc/Contended.java index 6925b4242d6..2269687a9ff 100644 --- a/jdk/src/share/classes/sun/misc/Contended.java +++ b/jdk/src/share/classes/sun/misc/Contended.java @@ -31,7 +31,42 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** - * This annotation marks classes and fields as considered to be contended. + *

An annotation expressing that objects and/or their fields are + * expected to encounter memory contention, generally in the form of + * "false sharing". This annotation serves as a hint that such objects + * and fields should reside in locations isolated from those of other + * objects or fields. Susceptibility to memory contention is a + * property of the intended usages of objects and fields, not their + * types or qualifiers. The effects of this annotation will nearly + * always add significant space overhead to objects. The use of + * {@code @Contended} is warranted only when the performance impact of + * this time/space tradeoff is intrinsically worthwhile; for example, + * in concurrent contexts in which each instance of the annotated + * class is often accessed by a different thread. + * + *

A {@code @Contended} field annotation may optionally include a + * contention group tag. A contention group defines a set of one + * or more fields that collectively must be isolated from all other + * contention groups. The fields in the same contention group may not be + * pairwise isolated. With no contention group tag (or with the default + * empty tag: "") each {@code @Contended} field resides in its own + * distinct and anonymous contention group. + * + *

When the annotation is used at the class level, the effect is + * equivalent to grouping all the declared fields not already having the + * {@code @Contended} annotation into the same anonymous group. + * With the class level annotation, implementations may choose different + * isolation techniques, such as isolating the entire object, rather than + * isolating distinct fields. A contention group tag has no meaning + * in a class level {@code @Contended} annotation, and is ignored. + * + *

The class level {@code @Contended} annotation is not inherited and has + * no effect on the fields declared in any sub-classes. The effects of all + * {@code @Contended} annotations, however, remain in force for all + * subclass instances, providing isolation of all the defined contention + * groups. Contention group tags are not inherited, and the same tag used + * in a superclass and subclass, represent distinct contention groups. + * * @since 1.8 */ @Retention(RetentionPolicy.RUNTIME) @@ -39,7 +74,10 @@ import java.lang.annotation.Target; public @interface Contended { /** - Defines the contention group tag. + * The (optional) contention group tag. + * This tag is only meaningful for field level annotations. + * + * @return contention group tag. */ String value() default ""; } diff --git a/jdk/src/share/classes/sun/misc/Hashing.java b/jdk/src/share/classes/sun/misc/Hashing.java index 1659501d4cd..39ed201da3e 100644 --- a/jdk/src/share/classes/sun/misc/Hashing.java +++ b/jdk/src/share/classes/sun/misc/Hashing.java @@ -24,7 +24,7 @@ */ package sun.misc; -import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; /** * Hashing utilities. @@ -207,28 +207,16 @@ public class Hashing { } /** - * Holds references to things that can't be initialized until after VM - * is fully booted. + * Return a non-zero 32-bit pseudo random value. The {@code instance} object + * may be used as part of the value. + * + * @param instance an object to use if desired in choosing value. + * @return a non-zero 32-bit pseudo random value. */ - private static class Holder { - - /** - * Used for generating per-instance hash seeds. - * - * We try to improve upon the default seeding. - */ - static final Random SEED_MAKER = new Random( - Double.doubleToRawLongBits(Math.random()) - ^ System.identityHashCode(Hashing.class) - ^ System.currentTimeMillis() - ^ System.nanoTime() - ^ Runtime.getRuntime().freeMemory()); - } - public static int randomHashSeed(Object instance) { int seed; if (sun.misc.VM.isBooted()) { - seed = Holder.SEED_MAKER.nextInt(); + seed = ThreadLocalRandom.current().nextInt(); } else { // lower quality "random" seed value--still better than zero and not // not practically reversible. diff --git a/jdk/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java b/jdk/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java index 4e002e8503e..5d06ede46cd 100644 --- a/jdk/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java +++ b/jdk/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java @@ -3158,6 +3158,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection { private boolean marked = false; private int inCache = 0; private int markCount = 0; + private boolean closed; // false public HttpInputStream (InputStream is) { super (is); @@ -3233,8 +3234,14 @@ public class HttpURLConnection extends java.net.HttpURLConnection { } } + private void ensureOpen() throws IOException { + if (closed) + throw new IOException("stream is closed"); + } + @Override public int read() throws IOException { + ensureOpen(); try { byte[] b = new byte[1]; int ret = read(b); @@ -3254,6 +3261,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection { @Override public int read(byte[] b, int off, int len) throws IOException { + ensureOpen(); try { int newLen = super.read(b, off, len); int nWrite; @@ -3291,7 +3299,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection { @Override public long skip (long n) throws IOException { - + ensureOpen(); long remaining = n; int nr; if (skipBuffer == null) @@ -3317,6 +3325,9 @@ public class HttpURLConnection extends java.net.HttpURLConnection { @Override public void close () throws IOException { + if (closed) + return; + try { if (outputStream != null) { if (read() != -1) { @@ -3332,6 +3343,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection { } throw ioex; } finally { + closed = true; HttpURLConnection.this.http = null; checkResponseCredentials (true); } diff --git a/jdk/src/share/classes/sun/security/pkcs11/P11KeyAgreement.java b/jdk/src/share/classes/sun/security/pkcs11/P11KeyAgreement.java index 11ce2b6126b..3e35ff23cec 100644 --- a/jdk/src/share/classes/sun/security/pkcs11/P11KeyAgreement.java +++ b/jdk/src/share/classes/sun/security/pkcs11/P11KeyAgreement.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -330,7 +330,7 @@ final class P11KeyAgreement extends KeyAgreementSpi { // as here we always retrieve the CKA_VALUE even for tokens // that do not have that bug. byte[] keyBytes = key.getEncoded(); - byte[] newBytes = P11Util.trimZeroes(keyBytes); + byte[] newBytes = KeyUtil.trimZeroes(keyBytes); if (keyBytes != newBytes) { key = new SecretKeySpec(newBytes, algorithm); } diff --git a/jdk/src/share/classes/sun/security/pkcs11/P11Signature.java b/jdk/src/share/classes/sun/security/pkcs11/P11Signature.java index 3c94ad6d3ab..cbbda2e0cff 100644 --- a/jdk/src/share/classes/sun/security/pkcs11/P11Signature.java +++ b/jdk/src/share/classes/sun/security/pkcs11/P11Signature.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +41,7 @@ import sun.security.rsa.RSAPadding; import sun.security.pkcs11.wrapper.*; import static sun.security.pkcs11.wrapper.PKCS11Constants.*; +import sun.security.util.KeyUtil; /** * Signature implementation class. This class currently supports the @@ -697,8 +698,8 @@ final class P11Signature extends SignatureSpi { BigInteger r = values[0].getPositiveBigInteger(); BigInteger s = values[1].getPositiveBigInteger(); // trim leading zeroes - byte[] br = P11Util.trimZeroes(r.toByteArray()); - byte[] bs = P11Util.trimZeroes(s.toByteArray()); + byte[] br = KeyUtil.trimZeroes(r.toByteArray()); + byte[] bs = KeyUtil.trimZeroes(s.toByteArray()); int k = Math.max(br.length, bs.length); // r and s each occupy half the array byte[] res = new byte[k << 1]; diff --git a/jdk/src/share/classes/sun/security/pkcs11/P11Util.java b/jdk/src/share/classes/sun/security/pkcs11/P11Util.java index d405743409a..38da9401f33 100644 --- a/jdk/src/share/classes/sun/security/pkcs11/P11Util.java +++ b/jdk/src/share/classes/sun/security/pkcs11/P11Util.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,20 +131,6 @@ public final class P11Util { return b; } - // trim leading (most significant) zeroes from the result - static byte[] trimZeroes(byte[] b) { - int i = 0; - while ((i < b.length - 1) && (b[i] == 0)) { - i++; - } - if (i == 0) { - return b; - } - byte[] t = new byte[b.length - i]; - System.arraycopy(b, i, t, 0, t.length); - return t; - } - public static byte[] getMagnitude(BigInteger bi) { byte[] b = bi.toByteArray(); if ((b.length > 1) && (b[0] == 0)) { diff --git a/jdk/src/share/classes/sun/security/util/KeyUtil.java b/jdk/src/share/classes/sun/security/util/KeyUtil.java index 6664dab38b6..cbaa8a5e23a 100644 --- a/jdk/src/share/classes/sun/security/util/KeyUtil.java +++ b/jdk/src/share/classes/sun/security/util/KeyUtil.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -200,5 +200,24 @@ public final class KeyUtil { // Don't bother to check against the y^q mod p if safe primes are used. } + + /** + * Trim leading (most significant) zeroes from the result. + * + * @throws NullPointerException if {@code b} is null + */ + public static byte[] trimZeroes(byte[] b) { + int i = 0; + while ((i < b.length - 1) && (b[i] == 0)) { + i++; + } + if (i == 0) { + return b; + } + byte[] t = new byte[b.length - i]; + System.arraycopy(b, i, t, 0, t.length); + return t; + } + } diff --git a/jdk/src/share/classes/sun/tools/jconsole/SummaryTab.java b/jdk/src/share/classes/sun/tools/jconsole/SummaryTab.java index 13ee1dd7262..2c199b5f042 100644 --- a/jdk/src/share/classes/sun/tools/jconsole/SummaryTab.java +++ b/jdk/src/share/classes/sun/tools/jconsole/SummaryTab.java @@ -360,6 +360,8 @@ class SummaryTab extends Tab { Math.min(99F, elapsedCpu / (elapsedTime * 10000F * result.nCPUs)); + cpuUsage = Math.max(0F, cpuUsage); + getPlotter().addValues(result.timeStamp, Math.round(cpuUsage * Math.pow(10.0, CPU_DECIMALS))); getInfoLabel().setText(Resources.format(Messages.CPU_USAGE_FORMAT, diff --git a/jdk/src/share/classes/sun/util/locale/provider/FallbackLocaleProviderAdapter.java b/jdk/src/share/classes/sun/util/locale/provider/FallbackLocaleProviderAdapter.java index e045f875197..cb55691c782 100644 --- a/jdk/src/share/classes/sun/util/locale/provider/FallbackLocaleProviderAdapter.java +++ b/jdk/src/share/classes/sun/util/locale/provider/FallbackLocaleProviderAdapter.java @@ -25,6 +25,11 @@ package sun.util.locale.provider; +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; + /** * FallbackProviderAdapter implementation. * @@ -32,6 +37,18 @@ package sun.util.locale.provider; */ public class FallbackLocaleProviderAdapter extends JRELocaleProviderAdapter { + /** + * Supported language tag set. + */ + private static final Set rootTagSet = + Collections.singleton(Locale.ROOT.toLanguageTag()); + + /** + * Fallback provider only provides the ROOT locale data. + */ + private final LocaleResources rootLocaleResources = + new LocaleResources(this, Locale.ROOT); + /** * Returns the type of this LocaleProviderAdapter */ @@ -39,4 +56,14 @@ public class FallbackLocaleProviderAdapter extends JRELocaleProviderAdapter { public LocaleProviderAdapter.Type getAdapterType() { return Type.FALLBACK; } + + @Override + public LocaleResources getLocaleResources(Locale locale) { + return rootLocaleResources; + } + + @Override + protected Set createLanguageTagSet(String category) { + return rootTagSet; + } } diff --git a/jdk/src/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java b/jdk/src/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java index beabfd1032a..f0803e971f9 100644 --- a/jdk/src/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java +++ b/jdk/src/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java @@ -34,12 +34,10 @@ import java.text.spi.DateFormatProvider; import java.text.spi.DateFormatSymbolsProvider; import java.text.spi.DecimalFormatSymbolsProvider; import java.text.spi.NumberFormatProvider; -import java.util.Calendar; import java.util.HashSet; import java.util.Locale; import java.util.Set; import java.util.StringTokenizer; -import java.util.TimeZone; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.spi.CalendarDataProvider; diff --git a/jdk/src/share/classes/sun/util/locale/provider/LocaleProviderAdapter.java b/jdk/src/share/classes/sun/util/locale/provider/LocaleProviderAdapter.java index fa5eac14ba0..d10ccdb3ed6 100644 --- a/jdk/src/share/classes/sun/util/locale/provider/LocaleProviderAdapter.java +++ b/jdk/src/share/classes/sun/util/locale/provider/LocaleProviderAdapter.java @@ -119,6 +119,12 @@ public abstract class LocaleProviderAdapter { */ private static LocaleProviderAdapter fallbackLocaleProviderAdapter = null; + /** + * Default fallback adapter type, which should return something meaningful in any case. + * This is either JRE or FALLBACK. + */ + static LocaleProviderAdapter.Type defaultLocaleProviderAdapter = null; + /** * Adapter lookup cache. */ @@ -140,13 +146,19 @@ public abstract class LocaleProviderAdapter { // load adapter if necessary switch (aType) { case CLDR: - cldrLocaleProviderAdapter = new CLDRLocaleProviderAdapter(); + if (cldrLocaleProviderAdapter == null) { + cldrLocaleProviderAdapter = new CLDRLocaleProviderAdapter(); + } break; case HOST: - hostLocaleProviderAdapter = new HostLocaleProviderAdapter(); + if (hostLocaleProviderAdapter == null) { + hostLocaleProviderAdapter = new HostLocaleProviderAdapter(); + } break; } - typeList.add(aType); + if (!typeList.contains(aType)) { + typeList.add(aType); + } } catch (IllegalArgumentException | UnsupportedOperationException e) { // could be caused by the user specifying wrong // provider name or format in the system property @@ -160,11 +172,15 @@ public abstract class LocaleProviderAdapter { // Append FALLBACK as the last resort. fallbackLocaleProviderAdapter = new FallbackLocaleProviderAdapter(); typeList.add(Type.FALLBACK); + defaultLocaleProviderAdapter = Type.FALLBACK; + } else { + defaultLocaleProviderAdapter = Type.JRE; } } else { // Default preference list typeList.add(Type.JRE); typeList.add(Type.SPI); + defaultLocaleProviderAdapter = Type.JRE; } adapterPreference = Collections.unmodifiableList(typeList); diff --git a/jdk/src/share/classes/sun/util/locale/provider/LocaleServiceProviderPool.java b/jdk/src/share/classes/sun/util/locale/provider/LocaleServiceProviderPool.java index 3a60061776d..7270444c896 100644 --- a/jdk/src/share/classes/sun/util/locale/provider/LocaleServiceProviderPool.java +++ b/jdk/src/share/classes/sun/util/locale/provider/LocaleServiceProviderPool.java @@ -127,32 +127,13 @@ public final class LocaleServiceProviderPool { private LocaleServiceProviderPool (final Class c) { providerClass = c; - // Add the JRE Locale Data Adapter implementation. - providers.putIfAbsent(LocaleProviderAdapter.Type.JRE, - LocaleProviderAdapter.forJRE().getLocaleServiceProvider(c)); - - // Add the SPI Locale Data Adapter implementation. - LocaleProviderAdapter lda = LocaleProviderAdapter.forType(LocaleProviderAdapter.Type.SPI); - LocaleServiceProvider provider = lda.getLocaleServiceProvider(c); - if (provider != null) { - providers.putIfAbsent(LocaleProviderAdapter.Type.SPI, provider); - } - - // Add the CLDR Locale Data Adapter implementation, if needed. - lda = LocaleProviderAdapter.forType(LocaleProviderAdapter.Type.CLDR); - if (lda != null) { - provider = lda.getLocaleServiceProvider(c); - if (provider != null) { - providers.putIfAbsent(LocaleProviderAdapter.Type.CLDR, provider); - } - } - - // Add the Host Locale Data Adapter implementation, if needed. - lda = LocaleProviderAdapter.forType(LocaleProviderAdapter.Type.HOST); - if (lda != null) { - provider = lda.getLocaleServiceProvider(c); - if (provider != null) { - providers.putIfAbsent(LocaleProviderAdapter.Type.HOST, provider); + for (LocaleProviderAdapter.Type type : LocaleProviderAdapter.getAdapterPreference()) { + LocaleProviderAdapter lda = LocaleProviderAdapter.forType(type); + if (lda != null) { + LocaleServiceProvider provider = lda.getLocaleServiceProvider(c); + if (provider != null) { + providers.putIfAbsent(type, provider); + } } } } @@ -246,7 +227,8 @@ public final class LocaleServiceProviderPool { */ boolean hasProviders() { return providers.size() != 1 || - providers.get(LocaleProviderAdapter.Type.JRE) == null; + (providers.get(LocaleProviderAdapter.Type.JRE) == null && + providers.get(LocaleProviderAdapter.Type.FALLBACK) == null); } /** @@ -296,9 +278,8 @@ public final class LocaleServiceProviderPool { // Check whether JRE is the sole locale data provider or not, // and directly call it if it is. if (!hasProviders()) { - return getter.getObject( - (P)providers.get(LocaleProviderAdapter.Type.JRE), - locale, key, params); + return getter.getObject((P)providers.get(LocaleProviderAdapter.defaultLocaleProviderAdapter), + locale, key, params); } List lookupLocales = getLookupLocales(locale); diff --git a/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipFileSystem.java b/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipFileSystem.java index 07c71bb5d46..dc5ccfc2d14 100644 --- a/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipFileSystem.java +++ b/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipFileSystem.java @@ -1818,7 +1818,7 @@ public class ZipFileSystem extends FileSystem { Entry(byte[] name) { name(name); - this.mtime = System.currentTimeMillis(); + this.mtime = this.ctime = this.atime = System.currentTimeMillis(); this.crc = 0; this.size = 0; this.csize = 0; @@ -1912,17 +1912,18 @@ public class ZipFileSystem extends FileSystem { { int written = CENHDR; int version0 = version(); - long csize0 = csize; long size0 = size; long locoff0 = locoff; int elen64 = 0; // extra for ZIP64 int elenNTFS = 0; // extra for NTFS (a/c/mtime) int elenEXTT = 0; // extra for Extended Timestamp + boolean foundExtraTime = false; // if time stamp NTFS, EXTT present // confirm size/length int nlen = (name != null) ? name.length : 0; int elen = (extra != null) ? extra.length : 0; + int eoff = 0; int clen = (comment != null) ? comment.length : 0; if (csize >= ZIP64_MINVAL) { csize0 = ZIP64_MINVAL; @@ -1936,14 +1937,24 @@ public class ZipFileSystem extends FileSystem { locoff0 = ZIP64_MINVAL; elen64 += 8; // offset(8) } - if (elen64 != 0) + if (elen64 != 0) { elen64 += 4; // header and data sz 4 bytes + } - if (atime != -1) { - if (isWindows) // use NTFS + while (eoff + 4 < elen) { + int tag = SH(extra, eoff); + int sz = SH(extra, eoff + 2); + if (tag == EXTID_EXTT || tag == EXTID_NTFS) { + foundExtraTime = true; + } + eoff += (4 + sz); + } + if (!foundExtraTime) { + if (isWindows) { // use NTFS elenNTFS = 36; // total 36 bytes - else // Extended Timestamp otherwise + } else { // Extended Timestamp otherwise elenEXTT = 9; // only mtime in cen + } } writeInt(os, CENSIG); // CEN header signature if (elen64 != 0) { @@ -2092,11 +2103,13 @@ public class ZipFileSystem extends FileSystem { { writeInt(os, LOCSIG); // LOC header signature int version = version(); - int nlen = (name != null) ? name.length : 0; int elen = (extra != null) ? extra.length : 0; + boolean foundExtraTime = false; // if extra timestamp present + int eoff = 0; int elen64 = 0; int elenEXTT = 0; + int elenNTFS = 0; if ((flag & FLAG_DATADESCR) != 0) { writeShort(os, version()); // version needed to extract writeShort(os, flag); // general purpose bit flag @@ -2128,14 +2141,27 @@ public class ZipFileSystem extends FileSystem { writeInt(os, size); // uncompressed size } } - if (atime != -1 && !isWindows) { // on unix use "ext time" - if (ctime == -1) - elenEXTT = 13; - else - elenEXTT = 17; + while (eoff + 4 < elen) { + int tag = SH(extra, eoff); + int sz = SH(extra, eoff + 2); + if (tag == EXTID_EXTT || tag == EXTID_NTFS) { + foundExtraTime = true; + } + eoff += (4 + sz); + } + if (!foundExtraTime) { + if (isWindows) { + elenNTFS = 36; // NTFS, total 36 bytes + } else { // on unix use "ext time" + elenEXTT = 9; + if (atime != -1) + elenEXTT += 4; + if (ctime != -1) + elenEXTT += 4; + } } writeShort(os, name.length); - writeShort(os, elen + elen64 + elenEXTT); + writeShort(os, elen + elen64 + elenNTFS + elenEXTT); writeBytes(os, name); if (elen64 != 0) { writeShort(os, EXTID_ZIP64); @@ -2143,15 +2169,28 @@ public class ZipFileSystem extends FileSystem { writeLong(os, size); writeLong(os, csize); } + if (elenNTFS != 0) { + writeShort(os, EXTID_NTFS); + writeShort(os, elenNTFS - 4); + writeInt(os, 0); // reserved + writeShort(os, 0x0001); // NTFS attr tag + writeShort(os, 24); + writeLong(os, javaToWinTime(mtime)); + writeLong(os, javaToWinTime(atime)); + writeLong(os, javaToWinTime(ctime)); + } if (elenEXTT != 0) { writeShort(os, EXTID_EXTT); writeShort(os, elenEXTT - 4);// size for the folowing data block - if (ctime == -1) - os.write(0x3); // mtime and atime - else - os.write(0x7); // mtime, atime and ctime + int fbyte = 0x1; + if (atime != -1) // mtime and atime + fbyte |= 0x2; + if (ctime != -1) // mtime, atime and ctime + fbyte |= 0x4; + os.write(fbyte); // flags byte writeInt(os, javaToUnixTime(mtime)); - writeInt(os, javaToUnixTime(atime)); + if (atime != -1) + writeInt(os, javaToUnixTime(atime)); if (ctime != -1) writeInt(os, javaToUnixTime(ctime)); } diff --git a/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipInfo.java b/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipInfo.java index e929064b9b4..67027e9fd66 100644 --- a/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipInfo.java +++ b/jdk/src/share/demo/nio/zipfs/src/com/sun/nio/zipfs/ZipInfo.java @@ -214,7 +214,7 @@ public class ZipInfo { winToJavaTime(LL(extra, off + 24))); break; case EXTID_EXTT: - print(" ->Inof-ZIP Extended Timestamp: flag=%x%n",extra[off]); + print(" ->Info-ZIP Extended Timestamp: flag=%x%n",extra[off]); pos = off + 1 ; while (pos + 4 <= off + sz) { print(" *%tc%n", @@ -223,6 +223,7 @@ public class ZipInfo { } break; default: + print(" ->[tag=%x, size=%d]%n", tag, sz); } off += sz; } diff --git a/jdk/src/share/javavm/export/jmm.h b/jdk/src/share/javavm/export/jmm.h index e4b858c8187..e017e5a02c7 100644 --- a/jdk/src/share/javavm/export/jmm.h +++ b/jdk/src/share/javavm/export/jmm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,8 @@ enum { JMM_VERSION_1_1 = 0x20010100, // JDK 6 JMM_VERSION_1_2 = 0x20010200, // JDK 7 JMM_VERSION_1_2_1 = 0x20010201, // JDK 7 GA - JMM_VERSION = 0x20010202 + JMM_VERSION_1_2_2 = 0x20010202, + JMM_VERSION = 0x20010203 }; typedef struct { @@ -62,7 +63,8 @@ typedef struct { unsigned int isObjectMonitorUsageSupported : 1; unsigned int isSynchronizerUsageSupported : 1; unsigned int isThreadAllocatedMemorySupported : 1; - unsigned int : 23; + unsigned int isRemoteDiagnosticCommandsSupported : 1; + unsigned int : 22; } jmmOptionalSupport; typedef enum { @@ -190,21 +192,27 @@ typedef struct { } jmmGCStat; typedef struct { - const char* name; - const char* description; - const char* impact; - int num_arguments; - jboolean enabled; + const char* name; /* Name of the diagnostic command */ + const char* description; /* Short description */ + const char* impact; /* Impact on the JVM */ + const char* permission_class; /* Class name of the required permission if any */ + const char* permission_name; /* Permission name of the required permission if any */ + const char* permission_action; /* Action name of the required permission if any*/ + int num_arguments; /* Number of supported options or arguments */ + jboolean enabled; /* True if the diagnostic command can be invoked, false otherwise*/ } dcmdInfo; typedef struct { - const char* name; - const char* description; - const char* type; - const char* default_string; - jboolean mandatory; - jboolean option; - int position; + const char* name; /* Option/Argument name*/ + const char* description; /* Short description */ + const char* type; /* Type: STRING, BOOLEAN, etc. */ + const char* default_string; /* Default value in a parsable string */ + jboolean mandatory; /* True if the option/argument is mandatory */ + jboolean option; /* True if it is an option, false if it is an argument */ + /* (see diagnosticFramework.hpp for option/argument definitions) */ + jboolean multiple; /* True is the option can be specified several time */ + int position; /* Expected position for this argument (this field is */ + /* meaningless for options) */ } dcmdArgInfo; typedef struct jmmInterface_1_ { @@ -327,6 +335,9 @@ typedef struct jmmInterface_1_ { jstring (JNICALL *ExecuteDiagnosticCommand) (JNIEnv *env, jstring command); + void (JNICALL *SetDiagnosticFrameworkNotificationEnabled) + (JNIEnv *env, + jboolean enabled); } JmmInterface; #ifdef __cplusplus diff --git a/jdk/src/share/native/sun/management/DiagnosticCommandImpl.c b/jdk/src/share/native/sun/management/DiagnosticCommandImpl.c new file mode 100644 index 00000000000..4f601ed55a7 --- /dev/null +++ b/jdk/src/share/native/sun/management/DiagnosticCommandImpl.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include "management.h" +#include "sun_management_DiagnosticCommandImpl.h" + +JNIEXPORT void JNICALL Java_sun_management_DiagnosticCommandImpl_setNotificationEnabled +(JNIEnv *env, jobject dummy, jboolean enabled) { + if(jmm_version > JMM_VERSION_1_2_2) { + jmm_interface->SetDiagnosticFrameworkNotificationEnabled(env, enabled); + } else { + JNU_ThrowByName(env, "java/lang/UnsupportedOperationException", + "JMX interface to diagnostic framework notifications is not supported by this VM"); + } +} + +JNIEXPORT jobjectArray JNICALL +Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommands + (JNIEnv *env, jobject dummy) +{ + return jmm_interface->GetDiagnosticCommands(env); +} + +jobject getDiagnosticCommandArgumentInfoArray(JNIEnv *env, jstring command, + int num_arg) { + int i; + jobject obj; + jobjectArray result; + dcmdArgInfo* dcmd_arg_info_array; + jclass dcmdArgInfoCls; + jclass arraysCls; + jmethodID mid; + jobject resultList; + + dcmd_arg_info_array = (dcmdArgInfo*) malloc(num_arg * sizeof(dcmdArgInfo)); + if (dcmd_arg_info_array == NULL) { + return NULL; + } + jmm_interface->GetDiagnosticCommandArgumentsInfo(env, command, + dcmd_arg_info_array); + dcmdArgInfoCls = (*env)->FindClass(env, + "sun/management/DiagnosticCommandArgumentInfo"); + result = (*env)->NewObjectArray(env, num_arg, dcmdArgInfoCls, NULL); + if (result == NULL) { + free(dcmd_arg_info_array); + return NULL; + } + for (i=0; iNewStringUTF(env,dcmd_arg_info_array[i].name), + (*env)->NewStringUTF(env,dcmd_arg_info_array[i].description), + (*env)->NewStringUTF(env,dcmd_arg_info_array[i].type), + dcmd_arg_info_array[i].default_string == NULL ? NULL: + (*env)->NewStringUTF(env, dcmd_arg_info_array[i].default_string), + dcmd_arg_info_array[i].mandatory, + dcmd_arg_info_array[i].option, + dcmd_arg_info_array[i].multiple, + dcmd_arg_info_array[i].position); + if (obj == NULL) { + free(dcmd_arg_info_array); + return NULL; + } + (*env)->SetObjectArrayElement(env, result, i, obj); + } + free(dcmd_arg_info_array); + arraysCls = (*env)->FindClass(env, "java/util/Arrays"); + mid = (*env)->GetStaticMethodID(env, arraysCls, + "asList", "([Ljava/lang/Object;)Ljava/util/List;"); + resultList = (*env)->CallStaticObjectMethod(env, arraysCls, mid, result); + return resultList; +} + +/* Throws IllegalArgumentException if at least one of the diagnostic command + * passed in argument is not supported by the JVM + */ +JNIEXPORT jobjectArray JNICALL +Java_sun_management_DiagnosticCommandImpl_getDiagnosticCommandInfo +(JNIEnv *env, jobject dummy, jobjectArray commands) +{ + int i; + jclass dcmdInfoCls; + jobject result; + jobjectArray args; + jobject obj; + jmmOptionalSupport mos; + jint ret = jmm_interface->GetOptionalSupport(env, &mos); + jsize num_commands; + dcmdInfo* dcmd_info_array; + + if (commands == NULL) { + JNU_ThrowNullPointerException(env, "Invalid String Array"); + return NULL; + } + num_commands = (*env)->GetArrayLength(env, commands); + dcmd_info_array = (dcmdInfo*) malloc(num_commands * + sizeof(dcmdInfo)); + if (dcmd_info_array == NULL) { + JNU_ThrowOutOfMemoryError(env, NULL); + } + jmm_interface->GetDiagnosticCommandInfo(env, commands, dcmd_info_array); + dcmdInfoCls = (*env)->FindClass(env, + "sun/management/DiagnosticCommandInfo"); + result = (*env)->NewObjectArray(env, num_commands, dcmdInfoCls, NULL); + if (result == NULL) { + free(dcmd_info_array); + JNU_ThrowOutOfMemoryError(env, 0); + } + for (i=0; iGetObjectArrayElement(env,commands,i), + dcmd_info_array[i].num_arguments); + if (args == NULL) { + free(dcmd_info_array); + JNU_ThrowOutOfMemoryError(env, 0); + } + obj = JNU_NewObjectByName(env, + "sun/management/DiagnosticCommandInfo", + "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZLjava/util/List;)V", + (*env)->NewStringUTF(env,dcmd_info_array[i].name), + (*env)->NewStringUTF(env,dcmd_info_array[i].description), + (*env)->NewStringUTF(env,dcmd_info_array[i].impact), + dcmd_info_array[i].permission_class==NULL?NULL:(*env)->NewStringUTF(env,dcmd_info_array[i].permission_class), + dcmd_info_array[i].permission_name==NULL?NULL:(*env)->NewStringUTF(env,dcmd_info_array[i].permission_name), + dcmd_info_array[i].permission_action==NULL?NULL:(*env)->NewStringUTF(env,dcmd_info_array[i].permission_action), + dcmd_info_array[i].enabled, + args); + if (obj == NULL) { + free(dcmd_info_array); + JNU_ThrowOutOfMemoryError(env, 0); + } + (*env)->SetObjectArrayElement(env, result, i, obj); + } + free(dcmd_info_array); + return result; +} + +/* Throws IllegalArgumentException if the diagnostic command + * passed in argument is not supported by the JVM + */ +JNIEXPORT jstring JNICALL +Java_sun_management_DiagnosticCommandImpl_executeDiagnosticCommand +(JNIEnv *env, jobject dummy, jstring command) { + return jmm_interface->ExecuteDiagnosticCommand(env, command); +} diff --git a/jdk/src/share/native/sun/management/VMManagementImpl.c b/jdk/src/share/native/sun/management/VMManagementImpl.c index 1deb7c8c8ca..27784e5dd62 100644 --- a/jdk/src/share/native/sun/management/VMManagementImpl.c +++ b/jdk/src/share/native/sun/management/VMManagementImpl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ */ #include +#include #include "jvm.h" #include "management.h" #include "sun_management_VMManagementImpl.h" @@ -96,6 +97,9 @@ Java_sun_management_VMManagementImpl_initOptionalSupportFields value = mos.isThreadAllocatedMemorySupported; setStaticBooleanField(env, cls, "threadAllocatedMemorySupport", value); + value = mos.isRemoteDiagnosticCommandsSupported; + setStaticBooleanField(env, cls, "remoteDiagnosticCommandsSupport", value); + if ((jmm_version > JMM_VERSION_1_2) || (jmm_version == JMM_VERSION_1_2 && ((jmm_version&0xFF) >= 1))) { setStaticBooleanField(env, cls, "gcNotificationSupport", JNI_TRUE); diff --git a/jdk/src/solaris/bin/java_md_solinux.c b/jdk/src/solaris/bin/java_md_solinux.c index f3cf522d323..b0028bde78d 100644 --- a/jdk/src/solaris/bin/java_md_solinux.c +++ b/jdk/src/solaris/bin/java_md_solinux.c @@ -649,9 +649,9 @@ CreateExecutionEnvironment(int *pargc, char ***pargv, && (dmpath == NULL) /* data model specific variables not set */ #endif /* __solaris__ */ ) { - + JLI_MemFree(newargv); + JLI_MemFree(new_runpath); return; - } } @@ -935,7 +935,7 @@ SetExecname(char **argv) char buf[PATH_MAX+1]; int len = readlink(self, buf, PATH_MAX); if (len >= 0) { - buf[len] = '\0'; /* readlink doesn't nul terminate */ + buf[len] = '\0'; /* readlink(2) doesn't NUL terminate */ exec_path = JLI_StringDup(buf); } } diff --git a/jdk/src/solaris/native/java/net/NetworkInterface.c b/jdk/src/solaris/native/java/net/NetworkInterface.c index f887bbc6319..68633db5a18 100644 --- a/jdk/src/solaris/native/java/net/NetworkInterface.c +++ b/jdk/src/solaris/native/java/net/NetworkInterface.c @@ -658,9 +658,9 @@ jobject createNetworkInterface(JNIEnv *env, netif *ifs) { if (ia2Obj) { setInetAddress_addr(env, ia2Obj, htonl(((struct sockaddr_in*)addrP->brdcast)->sin_addr.s_addr)); (*env)->SetObjectField(env, ibObj, ni_ib4broadcastID, ia2Obj); - (*env)->SetShortField(env, ibObj, ni_ib4maskID, addrP->mask); } } + (*env)->SetShortField(env, ibObj, ni_ib4maskID, addrP->mask); (*env)->SetObjectArrayElement(env, bindArr, bind_index++, ibObj); } } @@ -887,15 +887,12 @@ netif *addif(JNIEnv *env, int sock, const char * if_name, addrP->mask = prefix; addrP->next = 0; if (family == AF_INET) { - /* - * Deal with broadcast addr & subnet mask - */ + // Deal with broadcast addr & subnet mask struct sockaddr * brdcast_to = (struct sockaddr *) ((char *) addrP + sizeof(netaddr) + addr_size); addrP->brdcast = getBroadcast(env, sock, name, brdcast_to ); - if (addrP->brdcast && (mask = getSubnet(env, sock, name)) != -1) { + if ((mask = getSubnet(env, sock, name)) != -1) addrP->mask = mask; - } } /** diff --git a/jdk/src/solaris/native/sun/awt/awt_InputMethod.c b/jdk/src/solaris/native/sun/awt/awt_InputMethod.c index 5a9545550b8..faa76460213 100644 --- a/jdk/src/solaris/native/sun/awt/awt_InputMethod.c +++ b/jdk/src/solaris/native/sun/awt/awt_InputMethod.c @@ -185,7 +185,6 @@ extern char *XSetIMValues( ); #endif -#ifdef XAWT_HACK /* * This function is stolen from /src/solaris/hpi/src/system_md.c * It is used in setting the time in Java-level InputEvents @@ -197,7 +196,6 @@ awt_util_nowMillisUTC() gettimeofday(&t, NULL); return ((jlong)t.tv_sec) * 1000 + (jlong)(t.tv_usec/1000); } -#endif /* XAWT_HACK */ /* * Converts the wchar_t string to a multi-byte string calling wcstombs(). A @@ -546,11 +544,7 @@ awt_x11inputmethod_lookupString(XKeyPressedEvent *event, KeySym *keysymp) "dispatchCommittedText", "(Ljava/lang/String;J)V", javastr, -#ifndef XAWT_HACK - awt_util_nowMillisUTC_offset(event->time)); -#else event->time); -#endif } break; diff --git a/jdk/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java b/jdk/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java index eae2da60d41..16910379b28 100644 --- a/jdk/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java +++ b/jdk/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java @@ -48,7 +48,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.spi.CalendarDataProvider; -import java.util.spi.CalendarNameProvider; import java.util.spi.CurrencyNameProvider; import java.util.spi.LocaleNameProvider; import sun.util.spi.CalendarProvider; @@ -364,32 +363,6 @@ public class HostLocaleProviderAdapterImpl { }; } - public static CalendarNameProvider getCalendarNameProvider() { - return new CalendarNameProvider() { - @Override - public Locale[] getAvailableLocales() { - return getSupportedCalendarLocales(); - } - - @Override - public boolean isSupportedLocale(Locale locale) { - return isSupportedCalendarLocale(locale); - } - - @Override - public String getDisplayName(String calType, int field, int value, - int style, Locale locale) { - return null; - } - - @Override - public Map getDisplayNames(String calType, - int field, int style, Locale locale) { - return null; - } - }; - } - public static CalendarProvider getCalendarProvider() { return new CalendarProvider() { @Override diff --git a/jdk/src/windows/native/java/net/DualStackPlainDatagramSocketImpl.c b/jdk/src/windows/native/java/net/DualStackPlainDatagramSocketImpl.c index cdd969cef79..ec3c311984e 100644 --- a/jdk/src/windows/native/java/net/DualStackPlainDatagramSocketImpl.c +++ b/jdk/src/windows/native/java/net/DualStackPlainDatagramSocketImpl.c @@ -256,14 +256,14 @@ JNIEXPORT jint JNICALL Java_java_net_DualStackPlainDatagramSocketImpl_socketRece packetBuffer = (*env)->GetObjectField(env, dpObj, dp_bufID); packetBufferOffset = (*env)->GetIntField(env, dpObj, dp_offsetID); packetBufferLen = (*env)->GetIntField(env, dpObj, dp_bufLengthID); + /* Note: the buffer needn't be greater than 65,536 (0xFFFF) + * the max size of an IP packet. Anything bigger is truncated anyway. + */ + if (packetBufferLen > MAX_PACKET_LEN) { + packetBufferLen = MAX_PACKET_LEN; + } if (packetBufferLen > MAX_BUFFER_LEN) { - /* Note: the buffer needn't be greater than 65,536 (0xFFFF) - * the max size of an IP packet. Anything bigger is truncated anyway. - */ - if (packetBufferLen > MAX_PACKET_LEN) { - packetBufferLen = MAX_PACKET_LEN; - } fullPacket = (char *)malloc(packetBufferLen); if (!fullPacket) { JNU_ThrowOutOfMemoryError(env, "Native heap allocation failed"); diff --git a/jdk/src/windows/native/java/net/TwoStacksPlainDatagramSocketImpl.c b/jdk/src/windows/native/java/net/TwoStacksPlainDatagramSocketImpl.c index e81f408b3c4..e92914bf832 100644 --- a/jdk/src/windows/native/java/net/TwoStacksPlainDatagramSocketImpl.c +++ b/jdk/src/windows/native/java/net/TwoStacksPlainDatagramSocketImpl.c @@ -145,7 +145,7 @@ static int getFD1(JNIEnv *env, jobject this) { /* * This function returns JNI_TRUE if the datagram size exceeds the underlying * provider's ability to send to the target address. The following OS - * oddies have been observed :- + * oddities have been observed :- * * 1. On Windows 95/98 if we try to send a datagram > 12k to an application * on the same machine then the send will fail silently. @@ -218,7 +218,7 @@ jboolean exceedSizeLimit(JNIEnv *env, jint fd, jint addr, jint size) /* * Step 3: On Windows 95/98 then enumerate the IP addresses on - * this machine. This is necesary because we need to check if the + * this machine. This is neccesary because we need to check if the * datagram is being sent to an application on the same machine. */ if (is95or98) { @@ -565,8 +565,8 @@ Java_java_net_TwoStacksPlainDatagramSocketImpl_connect0(JNIEnv *env, jobject thi if (xp_or_later) { /* SIO_UDP_CONNRESET fixes a bug introduced in Windows 2000, which - * returns connection reset errors un connected UDP sockets (as well - * as connected sockets. The solution is to only enable this feature + * returns connection reset errors on connected UDP sockets (as well + * as connected sockets). The solution is to only enable this feature * when the socket is connected */ DWORD x1, x2; /* ignored result codes */ @@ -690,6 +690,12 @@ Java_java_net_TwoStacksPlainDatagramSocketImpl_send(JNIEnv *env, jobject this, fd = (*env)->GetIntField(env, fdObj, IO_fd_fdID); packetBufferLen = (*env)->GetIntField(env, packet, dp_lengthID); + /* Note: the buffer needn't be greater than 65,536 (0xFFFF)... + * the maximum size of an IP packet. Anything bigger is truncated anyway. + */ + if (packetBufferLen > MAX_PACKET_LEN) { + packetBufferLen = MAX_PACKET_LEN; + } if (connected) { addrp = 0; /* arg to JVM_Sendto () null in this case */ @@ -728,7 +734,7 @@ Java_java_net_TwoStacksPlainDatagramSocketImpl_send(JNIEnv *env, jobject this, } /* When JNI-ifying the JDK's IO routines, we turned - * read's and write's of byte arrays of size greater + * reads and writes of byte arrays of size greater * than 2048 bytes into several operations of size 2048. * This saves a malloc()/memcpy()/free() for big * buffers. This is OK for file IO and TCP, but that diff --git a/jdk/test/ProblemList.txt b/jdk/test/ProblemList.txt index 5e41804c774..3707c7f11ee 100644 --- a/jdk/test/ProblemList.txt +++ b/jdk/test/ProblemList.txt @@ -122,9 +122,6 @@ # jdk_lang -# 8009615 -java/lang/instrument/IsModifiableClassAgent.java generic-all - # 6944188 java/lang/management/ThreadMXBean/ThreadStateTest.java generic-all @@ -137,6 +134,9 @@ java/lang/management/MemoryMXBean/LowMemoryTest2.sh generic-all # 8008200 java/lang/Class/asSubclass/BasicUnit.java generic-all +# 8015780 +java/lang/reflect/Method/GenericStringTest.java generic-all + ############################################################################ # jdk_management @@ -199,12 +199,6 @@ java/net/MulticastSocket/Test.java macosx-all # 7143960 java/net/DatagramSocket/SendDatagramToBadAddress.java macosx-all -# 8014720 -java/net/ResponseCache/B6181108.java generic-all - -# 8014723 -sun/misc/URLClassPath/ClassnameCharTest.java generic-all - # 8014719 sun/net/www/http/HttpClient/ProxyTest.java generic-all @@ -236,9 +230,6 @@ java/nio/channels/DatagramChannel/ChangingAddress.java macosx-all # 7132677 java/nio/channels/Selector/OutOfBand.java macosx-all -# 8003895 -java/nio/channels/AsynchronousChannelGroup/Unbounded.java windows-amd64 - ############################################################################ # jdk_rmi @@ -277,6 +268,13 @@ sun/security/pkcs11/ec/ReadCertificates.java solaris-all sun/security/pkcs11/ec/ReadPKCS12.java solaris-all sun/security/pkcs11/sslecc/ClientJSSEServerJSSE.java solaris-all +# 8005247 +sun/security/pkcs11/ec/TestECDSA.java solaris-all + +# 8009438 +sun/security/pkcs11/Secmod/AddPrivateKey.java linux-all +sun/security/pkcs11/Secmod/TrustAnchors.java linux-all + # 7041639, Solaris DSA keypair generation bug (Note: jdk_util also affected) java/security/KeyPairGenerator/SolarisShortDSA.java solaris-all sun/security/tools/jarsigner/onlymanifest.sh solaris-all @@ -331,6 +329,8 @@ sun/jvmstat/monitor/MonitoredVm/CR6672135.java generic-all # Tests take too long, on sparcs see 7143279 tools/pack200/CommandLineTests.java solaris-all, macosx-all tools/pack200/Pack200Test.java solaris-all, macosx-all +# 8015666 +tools/pack200/TimeStamp.java generic-all # 8007410 tools/launcher/FXLauncherTest.java linux-all diff --git a/jdk/test/com/sun/crypto/provider/Mac/HmacPBESHA1.java b/jdk/test/com/sun/crypto/provider/Mac/HmacPBESHA1.java index 601a766787c..374775f1b3f 100644 --- a/jdk/test/com/sun/crypto/provider/Mac/HmacPBESHA1.java +++ b/jdk/test/com/sun/crypto/provider/Mac/HmacPBESHA1.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,8 +23,8 @@ /** * @test - * @bug 4893959 - * @summary basic test for HmacPBESHA1 + * @bug 4893959 8013069 + * @summary basic test for PBE MAC algorithms. * @author Valerie Peng */ import java.io.PrintStream; @@ -68,8 +68,9 @@ public class HmacPBESHA1 { } Mac mac = Mac.getInstance(algo, PROVIDER); byte[] plainText = new byte[30]; - - mac.init(key); + PBEParameterSpec spec = + new PBEParameterSpec("saltValue".getBytes(), 250); + mac.init(key, spec); mac.update(plainText); byte[] value1 = mac.doFinal(); if (value1.length != length) { diff --git a/jdk/test/com/sun/crypto/provider/Mac/MacClone.java b/jdk/test/com/sun/crypto/provider/Mac/MacClone.java index 1b7ba06372e..4edc2436716 100644 --- a/jdk/test/com/sun/crypto/provider/Mac/MacClone.java +++ b/jdk/test/com/sun/crypto/provider/Mac/MacClone.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,12 +23,13 @@ /* * @test - * @bug 7087021 - * @summary MacClone + * @bug 7087021 8013069 + * @summary Clone tests for all MAC algorithms. * @author Jan Luehe */ +import java.security.spec.AlgorithmParameterSpec; import javax.crypto.*; -import javax.crypto.spec.SecretKeySpec; +import javax.crypto.spec.*; public class MacClone { @@ -39,18 +40,23 @@ public class MacClone { KeyGenerator kgen = KeyGenerator.getInstance("DES"); SecretKey skey = kgen.generateKey(); for (String algo : algos) { - doTest(algo, skey); + doTest(algo, skey, null); } - String[] algos2 = { "HmacPBESHA1" }; + String[] algos2 = { "HmacPBESHA1", "PBEWithHmacSHA1", + "PBEWithHmacSHA224", "PBEWithHmacSHA256", + "PBEWithHmacSHA384", "PBEWithHmacSHA512" }; skey = new SecretKeySpec("whatever".getBytes(), "PBE"); + PBEParameterSpec params = + new PBEParameterSpec("1234567890".getBytes(), 500); for (String algo : algos2) { - doTest(algo, skey); + doTest(algo, skey, params); } System.out.println("Test Passed"); } - private static void doTest(String algo, SecretKey skey) throws Exception { + private static void doTest(String algo, SecretKey skey, + AlgorithmParameterSpec params) throws Exception { // // Clone an uninitialized Mac object // @@ -72,7 +78,7 @@ public class MacClone { // Clone an initialized Mac object // mac = Mac.getInstance(algo, "SunJCE"); - mac.init(skey); + mac.init(skey, params); macClone = (Mac)mac.clone(); System.out.println(macClone.getProvider().toString()); System.out.println(macClone.getAlgorithm()); diff --git a/jdk/test/com/sun/crypto/provider/TLS/TestLeadingZeroes.java b/jdk/test/com/sun/crypto/provider/TLS/TestLeadingZeroes.java new file mode 100644 index 00000000000..a45f65f0a0f --- /dev/null +++ b/jdk/test/com/sun/crypto/provider/TLS/TestLeadingZeroes.java @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8014618 + * @summary Need to strip leading zeros in TlsPremasterSecret of DHKeyAgreement + * @author Pasi Eronen + */ + +import java.io.*; +import java.security.*; +import java.security.spec.*; +import java.security.interfaces.*; +import javax.crypto.*; +import javax.crypto.spec.*; +import javax.crypto.interfaces.*; +import com.sun.crypto.provider.SunJCE; + +/** + * Test that leading zeroes are stripped in TlsPremasterSecret case, + * but are left as-is in other cases. + * + * We use pre-generated keypairs, since with randomly generated keypairs, + * a leading zero happens only (roughly) 1 out of 256 cases. + */ + +public class TestLeadingZeroes { + + private static final String SUNJCE = "SunJCE"; + + private TestLeadingZeroes() {} + + public static void main(String argv[]) throws Exception { + // Add JCE to the list of providers + SunJCE jce = new SunJCE(); + Security.addProvider(jce); + + TestLeadingZeroes keyAgree = new TestLeadingZeroes(); + keyAgree.run(); + System.out.println("Test Passed"); + } + + private void run() throws Exception { + + // decode pre-generated keypairs + KeyFactory kfac = KeyFactory.getInstance("DH"); + PublicKey alicePubKey = + kfac.generatePublic(new X509EncodedKeySpec(alicePubKeyEnc)); + PublicKey bobPubKey = + kfac.generatePublic(new X509EncodedKeySpec(bobPubKeyEnc)); + PrivateKey alicePrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(alicePrivKeyEnc)); + PrivateKey bobPrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(bobPrivKeyEnc)); + + // generate normal shared secret + KeyAgreement aliceKeyAgree = KeyAgreement.getInstance("DH", SUNJCE); + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] sharedSecret = aliceKeyAgree.generateSecret(); + System.out.println("shared secret:\n" + toHexString(sharedSecret)); + + // verify that leading zero is present + if (sharedSecret.length != 128) { + throw new Exception("Unexpected shared secret length"); + } + if (sharedSecret[0] != 0) { + throw new Exception("First byte is not zero as expected"); + } + + // now, test TLS premaster secret + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] tlsPremasterSecret = + aliceKeyAgree.generateSecret("TlsPremasterSecret").getEncoded(); + System.out.println( + "tls premaster secret:\n" + toHexString(tlsPremasterSecret)); + + // check that leading zero has been stripped + if (tlsPremasterSecret.length != 127) { + throw new Exception("Unexpected TLS premaster secret length"); + } + if (tlsPremasterSecret[0] == 0) { + throw new Exception("First byte is zero"); + } + for (int i = 0; i < tlsPremasterSecret.length; i++) { + if (tlsPremasterSecret[i] != sharedSecret[i+1]) { + throw new Exception("Shared secrets differ"); + } + } + + } + + /* + * Converts a byte to hex digit and writes to the supplied buffer + */ + private void byte2hex(byte b, StringBuffer buf) { + char[] hexChars = { '0', '1', '2', '3', '4', '5', '6', '7', '8', + '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + int high = ((b & 0xf0) >> 4); + int low = (b & 0x0f); + buf.append(hexChars[high]); + buf.append(hexChars[low]); + } + + /* + * Converts a byte array to hex string + */ + private String toHexString(byte[] block) { + StringBuffer buf = new StringBuffer(); + + int len = block.length; + + for (int i = 0; i < len; i++) { + byte2hex(block[i], buf); + if (i < len-1) { + buf.append(":"); + } + } + return buf.toString(); + } + + private static final byte alicePubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x24, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x85, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x81, (byte)0x00, + (byte)0xEE, (byte)0xD6, (byte)0xB1, (byte)0xA3, + (byte)0xB4, (byte)0x78, (byte)0x2B, (byte)0x35, + (byte)0xEF, (byte)0xCD, (byte)0x17, (byte)0x86, + (byte)0x63, (byte)0x2B, (byte)0x97, (byte)0x0E, + (byte)0x7A, (byte)0xD1, (byte)0xFF, (byte)0x7A, + (byte)0xEB, (byte)0x57, (byte)0x61, (byte)0xA1, + (byte)0xF7, (byte)0x90, (byte)0x11, (byte)0xA7, + (byte)0x79, (byte)0x28, (byte)0x69, (byte)0xBA, + (byte)0xA7, (byte)0xB2, (byte)0x37, (byte)0x17, + (byte)0xAE, (byte)0x3C, (byte)0x92, (byte)0x89, + (byte)0x88, (byte)0xE5, (byte)0x7E, (byte)0x8E, + (byte)0xF0, (byte)0x24, (byte)0xD0, (byte)0xE1, + (byte)0xC4, (byte)0xB0, (byte)0x26, (byte)0x5A, + (byte)0x1E, (byte)0xBD, (byte)0xA0, (byte)0xCF, + (byte)0x3E, (byte)0x97, (byte)0x2A, (byte)0x13, + (byte)0x92, (byte)0x3B, (byte)0x39, (byte)0xD0, + (byte)0x1D, (byte)0xA3, (byte)0x6B, (byte)0x3E, + (byte)0xC2, (byte)0xBB, (byte)0x14, (byte)0xB6, + (byte)0xE2, (byte)0x4C, (byte)0x0E, (byte)0x5B, + (byte)0x4B, (byte)0xA4, (byte)0x9D, (byte)0xA6, + (byte)0x21, (byte)0xB0, (byte)0xF9, (byte)0xDE, + (byte)0x55, (byte)0xAE, (byte)0x5C, (byte)0x29, + (byte)0x0E, (byte)0xC1, (byte)0xFC, (byte)0xBA, + (byte)0x51, (byte)0xD3, (byte)0xB6, (byte)0x6D, + (byte)0x75, (byte)0x72, (byte)0xDF, (byte)0x43, + (byte)0xAB, (byte)0x94, (byte)0x21, (byte)0x6E, + (byte)0x0C, (byte)0xD1, (byte)0x93, (byte)0x54, + (byte)0x56, (byte)0x7D, (byte)0x4B, (byte)0x90, + (byte)0xF1, (byte)0x94, (byte)0x45, (byte)0xD4, + (byte)0x2A, (byte)0x71, (byte)0xA1, (byte)0xB8, + (byte)0xDD, (byte)0xAA, (byte)0x05, (byte)0xF0, + (byte)0x27, (byte)0x37, (byte)0xBD, (byte)0x44 + }; + + private static final byte alicePrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE3, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x42, + (byte)0x02, (byte)0x40, (byte)0x36, (byte)0x4D, + (byte)0xD0, (byte)0x58, (byte)0x64, (byte)0x91, + (byte)0x78, (byte)0xA2, (byte)0x4B, (byte)0x79, + (byte)0x46, (byte)0xFE, (byte)0xC9, (byte)0xD9, + (byte)0xCA, (byte)0x5C, (byte)0xF9, (byte)0xFD, + (byte)0x6C, (byte)0x5D, (byte)0x76, (byte)0x3A, + (byte)0x41, (byte)0x6D, (byte)0x44, (byte)0x62, + (byte)0x75, (byte)0x93, (byte)0x81, (byte)0x93, + (byte)0x00, (byte)0x4C, (byte)0xB1, (byte)0xD8, + (byte)0x7D, (byte)0x9D, (byte)0xF3, (byte)0x16, + (byte)0x2C, (byte)0x6C, (byte)0x9F, (byte)0x7A, + (byte)0x84, (byte)0xA3, (byte)0x7A, (byte)0xC1, + (byte)0x4F, (byte)0x60, (byte)0xE3, (byte)0xB5, + (byte)0x86, (byte)0x28, (byte)0x08, (byte)0x4D, + (byte)0x94, (byte)0xB6, (byte)0x04, (byte)0x0D, + (byte)0xAC, (byte)0xBD, (byte)0x1F, (byte)0x42, + (byte)0x8F, (byte)0x1B + }; + + private static final byte bobPubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x23, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x84, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x80, (byte)0x2C, + (byte)0x40, (byte)0xFA, (byte)0xF6, (byte)0xA6, + (byte)0xF8, (byte)0xAC, (byte)0xC2, (byte)0x4F, + (byte)0xCD, (byte)0xC7, (byte)0x37, (byte)0x93, + (byte)0xE5, (byte)0xE4, (byte)0x5E, (byte)0x18, + (byte)0x14, (byte)0xE6, (byte)0x50, (byte)0xDA, + (byte)0x55, (byte)0x38, (byte)0x5D, (byte)0x24, + (byte)0xF5, (byte)0x42, (byte)0x68, (byte)0x5F, + (byte)0xF5, (byte)0x15, (byte)0xC8, (byte)0x9B, + (byte)0x5D, (byte)0x06, (byte)0x3D, (byte)0xE1, + (byte)0x52, (byte)0x2F, (byte)0x98, (byte)0xFF, + (byte)0x37, (byte)0xBB, (byte)0x75, (byte)0x48, + (byte)0x48, (byte)0xE9, (byte)0x65, (byte)0x84, + (byte)0x37, (byte)0xBB, (byte)0xB3, (byte)0xE9, + (byte)0x36, (byte)0x01, (byte)0xB4, (byte)0x6A, + (byte)0x1C, (byte)0xB2, (byte)0x11, (byte)0x82, + (byte)0xCE, (byte)0x3D, (byte)0x65, (byte)0xE5, + (byte)0x3C, (byte)0x89, (byte)0xE9, (byte)0x52, + (byte)0x19, (byte)0xBD, (byte)0x58, (byte)0xF6, + (byte)0xA2, (byte)0x03, (byte)0xA8, (byte)0xB2, + (byte)0xA5, (byte)0xDB, (byte)0xEB, (byte)0xF5, + (byte)0x94, (byte)0xF9, (byte)0x46, (byte)0xBE, + (byte)0x45, (byte)0x4C, (byte)0x65, (byte)0xD2, + (byte)0xD1, (byte)0xCF, (byte)0xFF, (byte)0xFF, + (byte)0xFA, (byte)0x38, (byte)0xF1, (byte)0x72, + (byte)0xAB, (byte)0xB9, (byte)0x14, (byte)0x4E, + (byte)0xF5, (byte)0xF0, (byte)0x7A, (byte)0x8E, + (byte)0x45, (byte)0xFD, (byte)0x5B, (byte)0xF9, + (byte)0xA2, (byte)0x97, (byte)0x1B, (byte)0xAE, + (byte)0x2C, (byte)0x7B, (byte)0x6B, (byte)0x7C, + (byte)0x98, (byte)0xFE, (byte)0x58, (byte)0xDD, + (byte)0xBE, (byte)0xF6, (byte)0x1C, (byte)0x8E, + (byte)0xD0, (byte)0xA1, (byte)0x72 + }; + + private static final byte bobPrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE4, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x43, + (byte)0x02, (byte)0x41, (byte)0x00, (byte)0xE0, + (byte)0x31, (byte)0xE7, (byte)0x77, (byte)0xB8, + (byte)0xD0, (byte)0x7E, (byte)0x0A, (byte)0x9B, + (byte)0x94, (byte)0xD5, (byte)0x3D, (byte)0x33, + (byte)0x62, (byte)0x32, (byte)0x51, (byte)0xCE, + (byte)0x74, (byte)0x5C, (byte)0xA5, (byte)0x72, + (byte)0xD9, (byte)0x36, (byte)0xF3, (byte)0x8A, + (byte)0x3F, (byte)0x8B, (byte)0xC6, (byte)0xFE, + (byte)0xEF, (byte)0x94, (byte)0x8B, (byte)0x50, + (byte)0x41, (byte)0x9B, (byte)0x14, (byte)0xC8, + (byte)0xE9, (byte)0x1F, (byte)0x24, (byte)0x1F, + (byte)0x65, (byte)0x8E, (byte)0xD3, (byte)0x85, + (byte)0xD0, (byte)0x68, (byte)0x6C, (byte)0xF1, + (byte)0x79, (byte)0x45, (byte)0xD0, (byte)0x06, + (byte)0xA4, (byte)0xB8, (byte)0xE0, (byte)0x64, + (byte)0xF5, (byte)0x38, (byte)0x72, (byte)0x97, + (byte)0x00, (byte)0x23, (byte)0x5F + }; +} + diff --git a/jdk/test/com/sun/jmx/remote/NotificationMarshalVersions/TestSerializationMismatch.java b/jdk/test/com/sun/jmx/remote/NotificationMarshalVersions/TestSerializationMismatch.java index e45b0d7aa57..f62a6437d51 100644 --- a/jdk/test/com/sun/jmx/remote/NotificationMarshalVersions/TestSerializationMismatch.java +++ b/jdk/test/com/sun/jmx/remote/NotificationMarshalVersions/TestSerializationMismatch.java @@ -12,7 +12,7 @@ import java.util.Arrays; * @bug 6937053 8005472 * * @run clean TestSerializationMismatch - * @run main TestSerializationMismatch + * @run main/othervm TestSerializationMismatch * */ public class TestSerializationMismatch { diff --git a/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanDoubleInvocationTest.java b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanDoubleInvocationTest.java new file mode 100644 index 00000000000..91f30b0376c --- /dev/null +++ b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanDoubleInvocationTest.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Basic Test for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8125 DcmdMBeanDoubleInvocationTest + */ + + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.*; +import javax.management.remote.*; + +public class DcmdMBeanDoubleInvocationTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static void main(String[] args) { + MBeanServerConnection mbs = null; + try { + JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://localhost:8125/jmxrmi"); + JMXConnector connector = JMXConnectorFactory.connect(url); + mbs = connector.getMBeanServerConnection(); + } catch(Throwable t) { + t.printStackTrace(); + } + ObjectName name; + try { + name = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + MBeanInfo info = mbs.getMBeanInfo(name); + String[] helpArgs = {"-all", "\n", "VM.version"}; + Object[] dcmdArgs = {helpArgs}; + String[] signature = {String[].class.getName()}; + String result = (String) mbs.invoke(name, "help", dcmdArgs, signature); + System.out.println(result); + } catch (RuntimeMBeanException ex) { + if (ex.getCause() instanceof IllegalArgumentException) { + System.out.println("Test passed"); + return; + } else { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + } catch (InstanceNotFoundException | IntrospectionException + | ReflectionException | MalformedObjectNameException + | MBeanException|IOException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + System.out.println("Double commands have not been detected"); + throw new RuntimeException("TEST FAILED"); + } +} diff --git a/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanInvocationTest.java b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanInvocationTest.java new file mode 100644 index 00000000000..02132c37e5d --- /dev/null +++ b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanInvocationTest.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Basic Test for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8129 DcmdMBeanInvocationTest + */ + + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.*; +import javax.management.remote.*; + +public class DcmdMBeanInvocationTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static void main(String[] args) { + MBeanServerConnection mbs = null; + try { + JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://localhost:8129/jmxrmi"); + JMXConnector connector = JMXConnectorFactory.connect(url); + mbs = connector.getMBeanServerConnection(); + } catch(Throwable t) { + t.printStackTrace(); + } + ObjectName name; + try { + name = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + MBeanInfo info = mbs.getMBeanInfo(name); + String[] helpArgs = {"-all"}; + Object[] dcmdArgs = {helpArgs}; + String[] signature = {String[].class.getName()}; + String result = (String) mbs.invoke(name, "help", dcmdArgs, signature); + System.out.println(result); + } catch (InstanceNotFoundException | IntrospectionException + | ReflectionException | MalformedObjectNameException + | MBeanException|IOException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + System.out.println("Test passed"); + } +} diff --git a/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanPermissionsTest.java b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanPermissionsTest.java new file mode 100644 index 00000000000..3499897dd9f --- /dev/null +++ b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanPermissionsTest.java @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Permissions Tests for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm DcmdMBeanPermissionsTest + */ + +import java.lang.management.ManagementFactory; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.ReflectPermission; +import java.security.Permission; +import java.util.HashSet; +import java.util.Iterator; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanPermission; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.RuntimeMBeanException; + +/** + * + * @author fparain + */ +public class DcmdMBeanPermissionsTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + static public class CustomSecurityManager extends SecurityManager { + + private HashSet grantedPermissions; + + public CustomSecurityManager() { + grantedPermissions = new HashSet(); + } + + public final void grantPermission(final Permission perm) { + grantedPermissions.add(perm); + } + + public final void denyPermission(final Permission perm) { + Iterator it = grantedPermissions.iterator(); + while (it.hasNext()) { + Permission p = it.next(); + if (p.equals(perm)) { + it.remove(); + } + } + } + + public final void checkPermission(final Permission perm) { + for (Permission p : grantedPermissions) { + if (p.implies(perm)) { + return; + } + } + throw new SecurityException(perm.toString()); + } + }; + + static Permission createPermission(String classname, String name, + String action) { + Permission permission = null; + try { + Class c = Class.forName(classname); + if (action == null) { + try { + Constructor constructor = c.getConstructor(String.class); + permission = (Permission) constructor.newInstance(name); + + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + } + if (permission == null) { + try { + Constructor constructor = c.getConstructor(String.class, + String.class); + permission = (Permission) constructor.newInstance( + name, + action); + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException + | NoSuchMethodException | SecurityException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + } + } catch (ClassNotFoundException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + if (permission == null) { + throw new RuntimeException("TEST FAILED"); + } + return permission; + } + + // return true if invokation triggered a SecurityException + static boolean invokeOperation(MBeanServer mbs, ObjectName on, + MBeanOperationInfo opInfo) { + try { + if (opInfo.getSignature().length == 0) { + mbs.invoke(on, opInfo.getName(), + new Object[0], new String[0]); + } else { + mbs.invoke(on, opInfo.getName(), + new Object[1], new String[]{ String[].class.getName()}); + } + } catch (SecurityException ex) { + ex.printStackTrace(); + return true; + } catch (RuntimeMBeanException ex) { + if (ex.getCause() instanceof SecurityException) { + //ex.printStackTrace(); + return true; + } + } catch (MBeanException | InstanceNotFoundException + | ReflectionException ex) { + throw new RuntimeException("TEST FAILED"); + } + return false; + } + + static void testOperation(MBeanServer mbs, CustomSecurityManager sm, + ObjectName on, MBeanOperationInfo opInfo) { + System.out.println("Testing " + opInfo.getName()); + Descriptor desc = opInfo.getDescriptor(); + if (desc.getFieldValue("dcmd.permissionClass") == null) { + // No special permission required, execution should not trigger + // any security exception + if (invokeOperation(mbs, on, opInfo)) { + throw new RuntimeException("TEST FAILED"); + } + } else { + // Building the required permission + Permission reqPerm = createPermission( + (String)desc.getFieldValue("dcmd.permissionClass"), + (String)desc.getFieldValue("dcmd.permissionName"), + (String)desc.getFieldValue("dcmd.permissionAction")); + // Paranoid mode: check that the SecurityManager has not already + // been granted the permission + sm.denyPermission(reqPerm); + // A special permission is required for this operation, + // invoking it without the permission granted must trigger + // a security exception + if(!invokeOperation(mbs, on, opInfo)) { + throw new RuntimeException("TEST FAILED"); + } + // grant the permission and re-try invoking the operation + sm.grantPermission(reqPerm); + if(invokeOperation(mbs, on, opInfo)) { + throw new RuntimeException("TEST FAILED"); + } + // Clean up + sm.denyPermission(reqPerm); + } + } + + public static void main(final String[] args) { + final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName on = null; + try { + on = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + } catch (MalformedObjectNameException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + MBeanInfo info = null; + try { + info = mbs.getMBeanInfo(on); + } catch (InstanceNotFoundException | IntrospectionException + | ReflectionException ex) { + ex.printStackTrace(); + throw new RuntimeException("TEST FAILED"); + } + CustomSecurityManager sm = new CustomSecurityManager(); + System.setSecurityManager(sm); + // Set of permission required to run the test cleanly + // Some permissions are required by the MBeanServer and other + // platform services (RuntimePermission("createClassLoader"), + // ReflectPermission("suppressAccessChecks"), + // java.util.logging.LoggingPermission("control"), + // RuntimePermission("exitVM.97")). + // Other permissions are required by commands being invoked + // in the test (for instance, RuntimePermission("modifyThreadGroup") + // and RuntimePermission("modifyThread") are checked when + // runFinalization() is invoked by the gcRunFinalization command. + sm.grantPermission(new RuntimePermission("createClassLoader")); + sm.grantPermission(new ReflectPermission("suppressAccessChecks")); + sm.grantPermission(new java.util.logging.LoggingPermission("control", "")); + sm.grantPermission(new java.lang.RuntimePermission("exitVM.97")); + sm.grantPermission(new java.lang.RuntimePermission("modifyThreadGroup")); + sm.grantPermission(new java.lang.RuntimePermission("modifyThread")); + for(MBeanOperationInfo opInfo : info.getOperations()) { + Permission opPermission = new MBeanPermission(info.getClassName(), + opInfo.getName(), + on, + "invoke"); + sm.grantPermission(opPermission); + testOperation(mbs, sm, on, opInfo); + sm.denyPermission(opPermission); + } + System.out.println("TEST PASSED"); + } +} diff --git a/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanTest.java b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanTest.java new file mode 100644 index 00000000000..10ce4240402 --- /dev/null +++ b/jdk/test/com/sun/management/DiagnosticCommandMBean/DcmdMBeanTest.java @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7150256 + * @summary Basic Test for the DiagnosticCommandMBean + * @author Frederic Parain + * + * @run main/othervm -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8127 DcmdMBeanTest + */ + + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.management.Descriptor; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.*; +import javax.management.remote.*; + +public class DcmdMBeanTest { + + private static String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME = + "com.sun.management:type=DiagnosticCommand"; + + public static void main(String[] args) { + MBeanServerConnection mbs = null; + try { + JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://localhost:8127/jmxrmi"); + JMXConnector connector = JMXConnectorFactory.connect(url); + mbs = connector.getMBeanServerConnection(); + } catch(Throwable t) { + t.printStackTrace(); + } + ObjectName name; + try { + name = new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME); + MBeanInfo info = mbs.getMBeanInfo(name); + // the test should check that the MBean doesn't have any + // Attribute, notification or constructor. Current version only + // check operations + System.out.println("Class Name:"+info.getClassName()); + System.out.println("Description:"+info.getDescription()); + MBeanOperationInfo[] opInfo = info.getOperations(); + System.out.println("Operations:"); + for(int i=0; i"); + BasicFileAttributes dstAttrs = Files + .getFileAttributeView(dst, BasicFileAttributeView.class) + .readAttributes(); + System.out.println("mtime: " + dstAttrs.lastModifiedTime()); + System.out.println("ctime: " + dstAttrs.creationTime()); + System.out.println("atime: " + dstAttrs.lastAccessTime()); + + // 1-second granularity + if (attrs.lastModifiedTime().to(TimeUnit.SECONDS) != + dstAttrs.lastModifiedTime().to(TimeUnit.SECONDS) || + attrs.lastAccessTime().to(TimeUnit.SECONDS) != + dstAttrs.lastAccessTime().to(TimeUnit.SECONDS) || + attrs.creationTime().to(TimeUnit.SECONDS) != + dstAttrs.creationTime().to(TimeUnit.SECONDS)) { + throw new RuntimeException("Timestamp Copy Failed!"); + } + Files.delete(fsPath); + } + private static FileSystem newZipFileSystem(Path path, Map env) throws Exception { diff --git a/jdk/test/demo/zipfs/basic.sh b/jdk/test/demo/zipfs/basic.sh index 4b814f9849e..c36f286efa5 100644 --- a/jdk/test/demo/zipfs/basic.sh +++ b/jdk/test/demo/zipfs/basic.sh @@ -22,7 +22,7 @@ # # @test # @bug 6990846 7009092 7009085 7015391 7014948 7005986 7017840 7007596 -# 7157656 8002390 +# 7157656 8002390 7012868 7012856 # @summary Test ZipFileSystem demo # @build Basic PathOps ZipFSTester # @run shell basic.sh diff --git a/jdk/test/java/io/pathNames/General.java b/jdk/test/java/io/pathNames/General.java index fffb1a61118..a156be92be0 100644 --- a/jdk/test/java/io/pathNames/General.java +++ b/jdk/test/java/io/pathNames/General.java @@ -277,8 +277,8 @@ public class General { { check(ans, ask + slash); checkNames(depth, create, - ans, - ask); + ans.endsWith(File.separator) ? ans : ans + File.separator, + ask + slash); } @@ -308,9 +308,6 @@ public class General { String ans, String ask) throws Exception { - ans = ans.endsWith(File.separator) ? ans : ans + File.separator; - ask = ask.endsWith(File.separator) ? ask : ask + File.separator; - int d = depth - 1; File f = new File(ans); String n; diff --git a/jdk/test/java/io/pathNames/GeneralWin32.java b/jdk/test/java/io/pathNames/GeneralWin32.java index 45b007dd8dd..23c34f34ba6 100644 --- a/jdk/test/java/io/pathNames/GeneralWin32.java +++ b/jdk/test/java/io/pathNames/GeneralWin32.java @@ -50,13 +50,13 @@ public class GeneralWin32 extends General { private static final int DEPTH = 2; private static String baseDir = null; private static String userDir = null; + private static String relative = null; /* Pathnames relative to working directory */ private static void checkCaseLookup() throws IOException { /* Use long names here to avoid 8.3 format, which Samba servers often force to lowercase */ - String relative = baseDir.substring(userDir.length() + 1); File d1 = new File(relative, "XyZzY0123"); File d2 = new File(d1, "FOO_bar_BAZ"); File f = new File(d2, "GLORPified"); @@ -79,9 +79,9 @@ public class GeneralWin32 extends General { case of filenames, rather than just using the input case */ File y = new File(userDir, f.getPath()); String ans = y.getPath(); - check(ans, relative + "\\" + "XyZzY0123\\FOO_bar_BAZ\\GLORPified"); - check(ans, relative + "\\" + "xyzzy0123\\foo_bar_baz\\glorpified"); - check(ans, relative + "\\" + "XYZZY0123\\FOO_BAR_BAZ\\GLORPIFIED"); + check(ans, relative + "XyZzY0123\\FOO_bar_BAZ\\GLORPified"); + check(ans, relative + "xyzzy0123\\foo_bar_baz\\glorpified"); + check(ans, relative + "XYZZY0123\\FOO_BAR_BAZ\\GLORPIFIED"); } private static void checkWild(File f) throws Exception { @@ -103,8 +103,7 @@ public class GeneralWin32 extends General { private static void checkRelativePaths() throws Exception { checkCaseLookup(); checkWildCards(); - String relative = baseDir.substring(userDir.length() + 1); - checkNames(3, true, baseDir.toString(), relative); + checkNames(3, true, baseDir, relative); } @@ -136,7 +135,6 @@ public class GeneralWin32 extends General { String ans = exists ? df.getAbsolutePath() : d; if (!ans.endsWith("\\")) ans = ans + "\\"; - String relative = baseDir.substring(userDir.length() + 1); checkNames(depth, false, ans + relative, d + relative); } @@ -171,15 +169,16 @@ public class GeneralWin32 extends General { return; } if (args.length > 0) debug = true; - userDir = System.getProperty("user.dir"); - baseDir = initTestData(6); + userDir = System.getProperty("user.dir") + '\\'; + baseDir = initTestData(6) + '\\'; + relative = baseDir.substring(userDir.length()); checkRelativePaths(); checkDrivePaths(); checkUncPaths(); } private static String initTestData(int maxDepth) throws IOException { - File parent = new File(System.getProperty("user.dir")); + File parent = new File(userDir); String baseDir = null; maxDepth = maxDepth < DEPTH + 2 ? DEPTH + 2 : maxDepth; for (int i = 0; i < maxDepth; i ++) { diff --git a/jdk/test/java/lang/IntegralPrimitiveToString.java b/jdk/test/java/lang/IntegralPrimitiveToString.java new file mode 100644 index 00000000000..0135cd2ef44 --- /dev/null +++ b/jdk/test/java/lang/IntegralPrimitiveToString.java @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Arrays; +import java.util.List; +import java.util.function.LongFunction; +import java.util.function.Function; + +import static org.testng.Assert.assertEquals; + +/** + * @test + * @run testng IntegralPrimitiveToString + * @summary test string conversions for primitive integral types. + * @author Mike Duigou + */ +public class IntegralPrimitiveToString { + + @Test(dataProvider="numbers") + public void testToString(String description, + Function converter, + Function unsignedConverter, + N[] values, + Stringifier[] stringifiers) { + System.out.printf("%s : conversions: %d values: %d\n", description, stringifiers.length, values.length); + for( N value : values) { + BigInteger asBigInt = converter.apply(value); + BigInteger asUnsignedBigInt = unsignedConverter.apply(value); + for(Stringifier stringifier : stringifiers) { + stringifier.assertMatchingToString(value, asBigInt, asUnsignedBigInt, description); + } + } + } + + static class Stringifier { + final boolean signed; + final int radix; + final Function toString; + Stringifier(boolean signed, int radix, Function toString) { + this.signed = signed; + this.radix = radix; + this.toString = toString; + } + + public void assertMatchingToString(N value, BigInteger asSigned, BigInteger asUnsigned, String description) { + String expected = signed + ? asSigned.toString(radix) + : asUnsigned.toString(radix); + + String actual = toString.apply(value); + + assertEquals(actual, expected, description + " conversion should be the same"); + } + } + + @DataProvider(name="numbers", parallel=true) + public Iterator testSetProvider() { + + return Arrays.asList( + new Object[] { "Byte", + (Function) b -> BigInteger.valueOf((long) b), + (Function) b -> BigInteger.valueOf(Integer.toUnsignedLong((byte) b)), + numberProvider((LongFunction) l -> Byte.valueOf((byte) l), Byte.SIZE), + new Stringifier[] { + new Stringifier(true, 10, b -> b.toString()), + new Stringifier(true, 10, b -> Byte.toString(b)) + } + }, + new Object[] { "Short", + (Function) s -> BigInteger.valueOf((long) s), + (Function) s -> BigInteger.valueOf(Integer.toUnsignedLong((short) s)), + numberProvider((LongFunction) l -> Short.valueOf((short) l), Short.SIZE), + new Stringifier[] { + new Stringifier(true, 10, s -> s.toString()), + new Stringifier(true, 10, s -> Short.toString( s)) + } + }, + new Object[] { "Integer", + (Function) i -> BigInteger.valueOf((long) i), + (Function) i -> BigInteger.valueOf(Integer.toUnsignedLong(i)), + numberProvider((LongFunction) l -> Integer.valueOf((int) l), Integer.SIZE), + new Stringifier[] { + new Stringifier(true, 10, i -> i.toString()), + new Stringifier(true, 10, i -> Integer.toString(i)), + new Stringifier(false, 2, Integer::toBinaryString), + new Stringifier(false, 16, Integer::toHexString), + new Stringifier(false, 8, Integer::toOctalString), + new Stringifier(true, 2, i -> Integer.toString(i, 2)), + new Stringifier(true, 8, i -> Integer.toString(i, 8)), + new Stringifier(true, 10, i -> Integer.toString(i, 10)), + new Stringifier(true, 16, i -> Integer.toString(i, 16)), + new Stringifier(true, Character.MAX_RADIX, i -> Integer.toString(i, Character.MAX_RADIX)), + new Stringifier(false, 10, i -> Integer.toUnsignedString(i)), + new Stringifier(false, 2, i -> Integer.toUnsignedString(i, 2)), + new Stringifier(false, 8, i -> Integer.toUnsignedString(i, 8)), + new Stringifier(false, 10, i -> Integer.toUnsignedString(i, 10)), + new Stringifier(false, 16, i -> Integer.toUnsignedString(i, 16)), + new Stringifier(false, Character.MAX_RADIX, i -> Integer.toUnsignedString(i, Character.MAX_RADIX)) + } + }, + new Object[] { "Long", + (Function) BigInteger::valueOf, + (Function) l -> { + if (l >= 0) { + return BigInteger.valueOf((long) l); + } else { + int upper = (int)(l >>> 32); + int lower = (int) (long) l; + + // return (upper << 32) + lower + return (BigInteger.valueOf(Integer.toUnsignedLong(upper))).shiftLeft(32). + add(BigInteger.valueOf(Integer.toUnsignedLong(lower))); + } + }, + numberProvider((LongFunction) Long::valueOf, Long.SIZE), + new Stringifier[] { + new Stringifier(true, 10, l -> l.toString()), + new Stringifier(true, 10, l -> Long.toString(l)), + new Stringifier(false, 2, Long::toBinaryString), + new Stringifier(false, 16, Long::toHexString), + new Stringifier(false, 8, Long::toOctalString), + new Stringifier(true, 2, l -> Long.toString(l, 2)), + new Stringifier(true, 8, l -> Long.toString(l, 8)), + new Stringifier(true, 10, l -> Long.toString(l, 10)), + new Stringifier(true, 16, l -> Long.toString(l, 16)), + new Stringifier(true, Character.MAX_RADIX, l -> Long.toString(l, Character.MAX_RADIX)), + new Stringifier(false, 10, Long::toUnsignedString), + new Stringifier(false, 2, l -> Long.toUnsignedString(l, 2)), + new Stringifier(false, 8, l-> Long.toUnsignedString(l, 8)), + new Stringifier(false, 10, l -> Long.toUnsignedString(l, 10)), + new Stringifier(false, 16, l -> Long.toUnsignedString(l, 16)), + new Stringifier(false, Character.MAX_RADIX, l -> Long.toUnsignedString(l, Character.MAX_RADIX)) + } + } + ).iterator(); + } + private static final long[] SOME_PRIMES = { + 3L, 5L, 7L, 11L, 13L, 17L, 19L, 23L, 29L, 31L, 37L, 41L, 43L, 47L, 53L, + 59L, 61L, 71L, 73L, 79L, 83L, 89L, 97L, 101L, 103L, 107L, 109L, 113L, + 5953L, 5981L, 5987L, 6007L, 6011L, 6029L, 6037L, 6043L, 6047L, 6053L, + 16369L, 16381L, 16411L, 32749L, 32771L, 65521L, 65537L, + (long) Integer.MAX_VALUE }; + + public N[] numberProvider(LongFunction boxer, int bits, N... extras) { + List numbers = new ArrayList<>(); + + for(int bitmag = 0; bitmag < bits; bitmag++) { + long value = 1L << bitmag; + numbers.add(boxer.apply(value)); + numbers.add(boxer.apply(value - 1)); + numbers.add(boxer.apply(value + 1)); + numbers.add(boxer.apply(-value)); + for(int divisor = 0; divisor < SOME_PRIMES.length && value < SOME_PRIMES[divisor]; divisor++) { + numbers.add(boxer.apply(value - SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value + SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value * SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value / SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value | SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value & SOME_PRIMES[divisor])); + numbers.add(boxer.apply(value ^ SOME_PRIMES[divisor])); + } + } + + numbers.addAll(Arrays.asList(extras)); + + return (N[]) numbers.toArray(new Number[numbers.size()]); + } +} diff --git a/jdk/test/java/lang/management/MXBean/MXBeanBehavior.java b/jdk/test/java/lang/management/MXBean/MXBeanBehavior.java index ec7aa4e9333..ccffecabebd 100644 --- a/jdk/test/java/lang/management/MXBean/MXBeanBehavior.java +++ b/jdk/test/java/lang/management/MXBean/MXBeanBehavior.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,10 @@ import java.util.*; import javax.management.*; public class MXBeanBehavior { + // Exclude list: list of platform MBeans that are not MXBeans + public static final HashSet excludeList = new HashSet<>( + Arrays.asList("com.sun.management:type=DiagnosticCommand")); + public static void main(String[] args) throws Exception { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); @@ -92,6 +96,10 @@ public class MXBeanBehavior { by generic MXBean tests. */ private static void test(MBeanServer mbs, ObjectName name) throws Exception { + if(excludeList.contains(name.getCanonicalName())) { + // Skipping not MXBean objects. + return; + } System.out.println("Testing: " + name); MBeanInfo mbi = mbs.getMBeanInfo(name); diff --git a/jdk/test/java/lang/management/ManagementFactory/MBeanServerMXBeanUnsupportedTest.java b/jdk/test/java/lang/management/ManagementFactory/MBeanServerMXBeanUnsupportedTest.java index b7d9b111f6b..00f3768efe6 100644 --- a/jdk/test/java/lang/management/ManagementFactory/MBeanServerMXBeanUnsupportedTest.java +++ b/jdk/test/java/lang/management/ManagementFactory/MBeanServerMXBeanUnsupportedTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2013 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,8 @@ import java.lang.management.ManagementFactory; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; +import java.util.Arrays; +import java.util.HashSet; import javax.management.MBeanServer; import javax.management.MBeanServerBuilder; import javax.management.MBeanServerDelegate; @@ -81,6 +83,9 @@ public class MBeanServerMXBeanUnsupportedTest { public static class MBeanServerForwarderInvocationHandler implements InvocationHandler { + public static final HashSet excludeList = new HashSet( + Arrays.asList("com.sun.management:type=DiagnosticCommand")); + public static MBeanServerForwarder newProxyInstance() { final InvocationHandler handler = @@ -126,15 +131,17 @@ public class MBeanServerMXBeanUnsupportedTest { if (domain.equals("java.lang") || domain.equals("java.util.logging") || domain.equals("com.sun.management")) { - String mxbean = (String) - mbs.getMBeanInfo(name).getDescriptor().getFieldValue("mxbean"); - if (mxbean == null || !mxbean.equals("true")) { - throw new RuntimeException( + if(!excludeList.contains(name.getCanonicalName())) { + String mxbean = (String) + mbs.getMBeanInfo(name).getDescriptor().getFieldValue("mxbean"); + if (mxbean == null || !mxbean.equals("true")) { + throw new RuntimeException( "Platform MBeans must be MXBeans!"); - } - if (!(mbean instanceof StandardMBean)) { - throw new RuntimeException( + } + if (!(mbean instanceof StandardMBean)) { + throw new RuntimeException( "MXBeans must be wrapped in StandardMBean!"); + } } } return result; diff --git a/jdk/test/java/net/CookieHandler/TestHttpCookie.java b/jdk/test/java/net/CookieHandler/TestHttpCookie.java index 1975fef4959..55037a07090 100644 --- a/jdk/test/java/net/CookieHandler/TestHttpCookie.java +++ b/jdk/test/java/net/CookieHandler/TestHttpCookie.java @@ -243,6 +243,10 @@ public class TestHttpCookie { test("set-cookie2: Customer = \"WILE_E_COYOTE\"; Version = \"1\"; Path = \"/acme\"") .n("Customer").v("WILE_E_COYOTE").ver(1).p("/acme"); + // $NAME is reserved; result should be null + test("set-cookie2: $Customer = \"WILE_E_COYOTE\"; Version = \"1\"; Path = \"/acme\"") + .nil(); + // a 'full' cookie test("set-cookie2: Customer=\"WILE_E_COYOTE\"" + ";Version=\"1\"" + diff --git a/jdk/test/java/net/InterfaceAddress/NetworkPrefixLength.java b/jdk/test/java/net/InterfaceAddress/NetworkPrefixLength.java index 211086e7398..448973eb03c 100644 --- a/jdk/test/java/net/InterfaceAddress/NetworkPrefixLength.java +++ b/jdk/test/java/net/InterfaceAddress/NetworkPrefixLength.java @@ -22,7 +22,7 @@ */ /* @test - * @bug 6707289 + * @bug 6707289 7107883 * @summary InterfaceAddress.getNetworkPrefixLength() does not conform to Javadoc */ @@ -47,6 +47,14 @@ public class NetworkPrefixLength { passed = false; debug(nic.getName(), iaddr); } + InetAddress ia = iaddr.getAddress(); + if (ia.isLoopbackAddress() && ia instanceof Inet4Address) { + // assumption: prefix length will always be 8 + if (iaddr.getNetworkPrefixLength() != 8) { + out.println("Expected prefix of 8, got " + iaddr); + passed = false; + } + } } } diff --git a/jdk/test/java/nio/Buffer/Chars.java b/jdk/test/java/nio/Buffer/Chars.java new file mode 100644 index 00000000000..e91b6a73883 --- /dev/null +++ b/jdk/test/java/nio/Buffer/Chars.java @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8014854 + * @summary Exercises CharBuffer#chars on each of the CharBuffer types + * @run testng Chars + */ + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.CharBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; + +public class Chars { + + static final Random RAND = new Random(); + + static final int SIZE = 128 + RAND.nextInt(1024); + + /** + * Randomize the char buffer's position and limit. + */ + static CharBuffer randomizeRange(CharBuffer cb) { + int mid = cb.capacity() >>> 1; + int start = RAND.nextInt(mid); + int end = mid + RAND.nextInt(mid); + cb.position(start); + cb.limit(end); + return cb; + } + + /** + * Randomize the char buffer's contents, position and limit. + */ + static CharBuffer randomize(CharBuffer cb) { + while (cb.hasRemaining()) { + cb.put((char)RAND.nextInt()); + } + return randomizeRange(cb); + } + + /** + * Sums the remaining chars in the char buffer. + */ + static int intSum(CharBuffer cb) { + int sum = 0; + cb.mark(); + while (cb.hasRemaining()) { + sum += cb.get(); + } + cb.reset(); + return sum; + } + + /** + * Creates char buffers to test, adding them to the given list. + */ + static void addCases(CharBuffer cb, List buffers) { + randomize(cb); + buffers.add(cb); + + buffers.add(cb.slice()); + buffers.add(cb.duplicate()); + buffers.add(cb.asReadOnlyBuffer()); + + buffers.add(randomizeRange(cb.slice())); + buffers.add(randomizeRange(cb.duplicate())); + buffers.add(randomizeRange(cb.asReadOnlyBuffer())); + } + + @DataProvider(name = "charbuffers") + public Object[][] createCharBuffers() { + List buffers = new ArrayList<>(); + + // heap + addCases(CharBuffer.allocate(SIZE), buffers); + addCases(CharBuffer.wrap(new char[SIZE]), buffers); + addCases(ByteBuffer.allocate(SIZE*2).order(ByteOrder.BIG_ENDIAN).asCharBuffer(), + buffers); + addCases(ByteBuffer.allocate(SIZE*2).order(ByteOrder.LITTLE_ENDIAN).asCharBuffer(), + buffers); + + // direct + addCases(ByteBuffer.allocateDirect(SIZE*2).order(ByteOrder.BIG_ENDIAN).asCharBuffer(), + buffers); + addCases(ByteBuffer.allocateDirect(SIZE*2).order(ByteOrder.LITTLE_ENDIAN).asCharBuffer(), + buffers); + + // read-only buffer backed by a CharSequence + buffers.add(CharBuffer.wrap(randomize(CharBuffer.allocate(SIZE)))); + + Object[][] params = new Object[buffers.size()][]; + for (int i = 0; i < buffers.size(); i++) { + CharBuffer cb = buffers.get(i); + params[i] = new Object[] { cb.getClass().getName(), cb }; + } + + return params; + } + + @Test(dataProvider = "charbuffers") + public void testChars(String type, CharBuffer cb) { + System.out.format("%s position=%d, limit=%d%n", type, cb.position(), cb.limit()); + int expected = intSum(cb); + assertEquals(cb.chars().sum(), expected); + assertEquals(cb.chars().parallel().sum(), expected); + } +} diff --git a/jdk/test/java/nio/channels/AsynchronousChannelGroup/Unbounded.java b/jdk/test/java/nio/channels/AsynchronousChannelGroup/Unbounded.java index cc1d71e1beb..001a65bbd14 100644 --- a/jdk/test/java/nio/channels/AsynchronousChannelGroup/Unbounded.java +++ b/jdk/test/java/nio/channels/AsynchronousChannelGroup/Unbounded.java @@ -43,47 +43,24 @@ public class Unbounded { static volatile boolean finished; public static void main(String[] args) throws Exception { - // all accepted connections are added to a queue - final ArrayBlockingQueue queue = - new ArrayBlockingQueue(CONCURRENCY_COUNT); - // create listener to accept connections - final AsynchronousServerSocketChannel listener = + AsynchronousServerSocketChannel listener = AsynchronousServerSocketChannel.open() .bind(new InetSocketAddress(0)); - listener.accept((Void)null, new CompletionHandler() { - public void completed(AsynchronousSocketChannel ch, Void att) { - queue.add(ch); - listener.accept((Void)null, this); - } - public void failed(Throwable exc, Void att) { - if (!finished) { - failed = true; - System.err.println("accept failed: " + exc); - } - } - }); - System.out.println("Listener created."); - // establish lots of connections + // establish connections + + AsynchronousSocketChannel[] clients = new AsynchronousSocketChannel[CONCURRENCY_COUNT]; + AsynchronousSocketChannel[] peers = new AsynchronousSocketChannel[CONCURRENCY_COUNT]; + int port = ((InetSocketAddress)(listener.getLocalAddress())).getPort(); SocketAddress sa = new InetSocketAddress(InetAddress.getLocalHost(), port); - AsynchronousSocketChannel[] channels = - new AsynchronousSocketChannel[CONCURRENCY_COUNT]; + for (int i=0; i= 3) - throw x; - Thread.sleep(50); - } - } + clients[i] = AsynchronousSocketChannel.open(); + Future result = clients[i].connect(sa); + peers[i] = listener.accept().get(); + result.get(); } System.out.println("All connection established."); @@ -91,9 +68,9 @@ public class Unbounded { final CyclicBarrier barrier = new CyclicBarrier(CONCURRENCY_COUNT+1); // initiate a read operation on each channel. - for (int i=0; i() { public void completed(Integer bytesRead, AsynchronousSocketChannel ch) { try { @@ -113,13 +90,10 @@ public class Unbounded { System.out.println("All read operations outstanding."); // write data to each of the accepted connections - int remaining = CONCURRENCY_COUNT; - while (remaining > 0) { - AsynchronousSocketChannel ch = queue.take(); - ch.write(ByteBuffer.wrap("welcome".getBytes())).get(); - ch.shutdownOutput(); - ch.close(); - remaining--; + for (AsynchronousSocketChannel peer: peers) { + peer.write(ByteBuffer.wrap("welcome".getBytes())).get(); + peer.shutdownOutput(); + peer.close(); } // wait for all threads to reach the barrier diff --git a/jdk/test/java/nio/file/Files/StreamTest.java b/jdk/test/java/nio/file/Files/StreamTest.java index f89449ad77c..0033dd31f38 100644 --- a/jdk/test/java/nio/file/Files/StreamTest.java +++ b/jdk/test/java/nio/file/Files/StreamTest.java @@ -476,15 +476,25 @@ public class StreamTest { } public void testSecurityException() throws IOException { - Path triggerFile = testFolder.resolve(Paths.get("dir", "SecurityException")); - Files.createFile(triggerFile); - Path sampleFile = testFolder.resolve(Paths.get("dir", "sample")); - Files.createFile(sampleFile); - Path triggerDir = testFolder.resolve(Paths.get("dir2", "SecurityException")); - Files.createDirectories(triggerDir); + Path empty = testFolder.resolve("empty"); + Path triggerFile = Files.createFile(empty.resolve("SecurityException")); + Path sampleFile = Files.createDirectories(empty.resolve("sample")); + + Path dir2 = testFolder.resolve("dir2"); + Path triggerDir = Files.createDirectories(dir2.resolve("SecurityException")); Files.createFile(triggerDir.resolve("fileInSE")); - Path sample = testFolder.resolve(Paths.get("dir2", "file")); - Files.createFile(sample); + Path sample = Files.createFile(dir2.resolve("file")); + + Path triggerLink = null; + Path linkTriggerDir = null; + Path linkTriggerFile = null; + if (supportsLinks) { + Path dir = testFolder.resolve("dir"); + triggerLink = Files.createSymbolicLink(dir.resolve("SecurityException"), empty); + linkTriggerDir = Files.createSymbolicLink(dir.resolve("lnDirSE"), triggerDir); + linkTriggerFile = Files.createSymbolicLink(dir.resolve("lnFileSE"), triggerFile); + } + FaultyFileSystem.FaultyFSProvider fsp = FaultyFileSystem.FaultyFSProvider.getInstance(); FaultyFileSystem fs = (FaultyFileSystem) fsp.newFileSystem(testFolder, null); @@ -492,10 +502,10 @@ public class StreamTest { fsp.setFaultyMode(false); Path fakeRoot = fs.getRoot(); // validate setting - try (CloseableStream s = Files.list(fakeRoot.resolve("dir"))) { + try (CloseableStream s = Files.list(fakeRoot.resolve("empty"))) { String[] result = s.map(path -> path.getFileName().toString()) .toArray(String[]::new); - assertEqualsNoOrder(result, new String[] { "d1","f1", "lnDir2", "SecurityException", "sample" }); + assertEqualsNoOrder(result, new String[] { "SecurityException", "sample" }); } try (CloseableStream s = Files.walk(fakeRoot.resolve("dir2"))) { @@ -504,13 +514,21 @@ public class StreamTest { assertEqualsNoOrder(result, new String[] { "dir2", "SecurityException", "fileInSE", "file" }); } + if (supportsLinks) { + try (CloseableStream s = Files.list(fakeRoot.resolve("dir"))) { + String[] result = s.map(path -> path.getFileName().toString()) + .toArray(String[]::new); + assertEqualsNoOrder(result, new String[] { "d1", "f1", "lnDir2", "SecurityException", "lnDirSE", "lnFileSE" }); + } + } + // execute test fsp.setFaultyMode(true); // ignore file cause SecurityException - try (CloseableStream s = Files.walk(fakeRoot.resolve("dir"))) { + try (CloseableStream s = Files.walk(fakeRoot.resolve("empty"))) { String[] result = s.map(path -> path.getFileName().toString()) .toArray(String[]::new); - assertEqualsNoOrder(result, new String[] { "dir", "d1","f1", "lnDir2", "sample" }); + assertEqualsNoOrder(result, new String[] { "empty", "sample" }); } // skip folder cause SecurityException try (CloseableStream s = Files.walk(fakeRoot.resolve("dir2"))) { @@ -519,11 +537,29 @@ public class StreamTest { assertEqualsNoOrder(result, new String[] { "dir2", "file" }); } + if (supportsLinks) { + // not following links + try (CloseableStream s = Files.walk(fakeRoot.resolve("dir"))) { + String[] result = s.map(path -> path.getFileName().toString()) + .toArray(String[]::new); + assertEqualsNoOrder(result, new String[] { "dir", "d1", "f1", "lnDir2", "lnDirSE", "lnFileSE" }); + } + + // following links + try (CloseableStream s = Files.walk(fakeRoot.resolve("dir"), FileVisitOption.FOLLOW_LINKS)) { + String[] result = s.map(path -> path.getFileName().toString()) + .toArray(String[]::new); + // ?? Should fileInSE show up? + // With FaultyFS, it does as no exception thrown for link to "SecurityException" with read on "lnXxxSE" + assertEqualsNoOrder(result, new String[] { "dir", "d1", "f1", "lnDir2", "file", "lnDirSE", "lnFileSE", "fileInSE" }); + } + } + // list instead of walk - try (CloseableStream s = Files.list(fakeRoot.resolve("dir"))) { + try (CloseableStream s = Files.list(fakeRoot.resolve("empty"))) { String[] result = s.map(path -> path.getFileName().toString()) .toArray(String[]::new); - assertEqualsNoOrder(result, new String[] { "d1","f1", "lnDir2", "sample" }); + assertEqualsNoOrder(result, new String[] { "sample" }); } try (CloseableStream s = Files.list(fakeRoot.resolve("dir2"))) { String[] result = s.map(path -> path.getFileName().toString()) @@ -578,6 +614,11 @@ public class StreamTest { if (fs != null) { fs.close(); } + if (supportsLinks) { + Files.delete(triggerLink); + Files.delete(linkTriggerDir); + Files.delete(linkTriggerFile); + } Files.delete(triggerFile); Files.delete(sampleFile); Files.delete(sample); @@ -589,7 +630,6 @@ public class StreamTest { try (CloseableStream s = Files.lines(testFolder.resolve("notExist"), Charset.forName("UTF-8"))) { s.forEach(l -> fail("File is not even exist!")); } catch (IOException ioe) { - ioe.printStackTrace(System.err); assertTrue(ioe instanceof NoSuchFileException); } } diff --git a/jdk/test/java/security/AccessController/LimitedDoPrivileged.java b/jdk/test/java/security/AccessController/LimitedDoPrivileged.java new file mode 100644 index 00000000000..bbdf677fbea --- /dev/null +++ b/jdk/test/java/security/AccessController/LimitedDoPrivileged.java @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8014097 + * @summary Test the limited privilege scope version of doPrivileged + */ + +import java.security.*; +import java.util.*; + +public class LimitedDoPrivileged { + /* + * Test variations of doPrivileged() and doPrivileged() with a limited privilege scope + * in a sandbox with the usual default permission to read the system properties for the + * file and path separators. + * + * By passing in an "assigned" AccessControlContext that has + * no default permissions we can test how code privileges are being scoped. + */ + + private static final ProtectionDomain domain = + new ProtectionDomain(null, null, null, null); + private static final AccessControlContext acc = + new AccessControlContext(new ProtectionDomain[] { domain }); + private static final PropertyPermission pathPerm = + new PropertyPermission("path.separator", "read"); + private static final PropertyPermission filePerm = + new PropertyPermission("file.separator", "read"); + + public static void main(String[] args) throws Exception { + /* + * Verify that we have the usual default property read permission. + */ + AccessController.getContext().checkPermission(filePerm); + AccessController.getContext().checkPermission(pathPerm); + System.out.println("test 1 passed"); + + /* + * Inject the "no permission" AccessControlContext. + */ + AccessController.doPrivileged(new PrivilegedAction() { + public Object run() { + + /* + * Verify that we no longer have the "file.separator" permission. + */ + try { + AccessController.getContext().checkPermission(pathPerm); + } catch (AccessControlException ace) { + System.out.println("test 2 passed"); + } + + /* + * Verify that we can give ourselves limited privilege to read + * any system property starting with "path.". + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, null, new PropertyPermission("path.*", "read")); + System.out.println("test 3 passed"); + + /* + * Verify that if we give ourselves limited privilege to read + * any system property starting with "path." it won't give us the + * the ability to read "file.separator". + */ + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(filePerm); + return null; + } + }, null, new PropertyPermission("path.*", "read")); + } catch (AccessControlException ace) { + System.out.println("test 4 passed"); + } + + /* + * Verify that capturing and passing in the context with no default + * system property permission grants will prevent access that succeeded + * earlier without the context assignment. + */ + final AccessControlContext context = AccessController.getContext(); + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, context, new PropertyPermission("path.*", "read")); + } catch (AccessControlException ace) { + System.out.println("test 5 passed"); + } + + /* + * Verify that we can give ourselves full privilege to read + * any system property starting with "path.". + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }); + System.out.println("test 6 passed"); + + /* + * Verify that capturing and passing in the context with no default + * system property permission grants will prevent access that succeeded + * earlier without the context assignment. + */ + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, context); + } catch (AccessControlException ace) { + System.out.println("test 7 passed"); + } + + /* + * Verify that we can give ourselves limited privilege to read + * any system property starting with "path." when a limited + * privilege scope context is captured and passed to a regular + * doPrivileged() as an assigned context. + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + + /* + * Capture the limited privilege scope and inject it into the + * regular doPrivileged(). + */ + final AccessControlContext limitedContext = AccessController.getContext(); + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(pathPerm); + return null; + } + }, limitedContext); + return null; + } + }, null, new PropertyPermission("path.*", "read")); + System.out.println("test 8 passed"); + + /* + * Verify that we can give ourselves limited privilege to read + * any system property starting with "path." it won't give us the + * the ability to read "file.separator" when a limited + * privilege scope context is captured and passed to a regular + * doPrivileged() as an assigned context. + */ + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + + /* + * Capture the limited privilege scope and inject it into the + * regular doPrivileged(). + */ + final AccessControlContext limitedContext = AccessController.getContext(); + try { + AccessController.doPrivileged + (new PrivilegedAction() { + public Object run() { + AccessController.getContext().checkPermission(filePerm); + return null; + } + }, limitedContext); + } catch (AccessControlException ace) { + System.out.println("test 9 passed"); + } + return null; + } + }, null, new PropertyPermission("path.*", "read")); + + return null; + } + }, acc); + } +} diff --git a/jdk/test/java/util/Iterator/PrimitiveIteratorDefaults.java b/jdk/test/java/util/Iterator/PrimitiveIteratorDefaults.java new file mode 100644 index 00000000000..2880578b194 --- /dev/null +++ b/jdk/test/java/util/Iterator/PrimitiveIteratorDefaults.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import org.testng.annotations.Test; + +import java.util.PrimitiveIterator; +import java.util.function.Consumer; +import java.util.function.DoubleConsumer; +import java.util.function.IntConsumer; +import java.util.function.LongConsumer; + +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; + +/** + * @test + * @run testng PrimitiveIteratorDefaults + * @summary test default methods on PrimitiveIterator + */ +@Test +public class PrimitiveIteratorDefaults { + + public void testIntForEachRemainingWithNull() { + PrimitiveIterator.OfInt i = new PrimitiveIterator.OfInt() { + @Override + public int nextInt() { + return 0; + } + + @Override + public boolean hasNext() { + return false; + } + }; + + executeAndCatch(() -> i.forEachRemaining((IntConsumer) null)); + executeAndCatch(() -> i.forEachRemaining((Consumer) null)); + } + + public void testLongForEachRemainingWithNull() { + PrimitiveIterator.OfLong i = new PrimitiveIterator.OfLong() { + @Override + public long nextLong() { + return 0; + } + + @Override + public boolean hasNext() { + return false; + } + }; + + executeAndCatch(() -> i.forEachRemaining((LongConsumer) null)); + executeAndCatch(() -> i.forEachRemaining((Consumer) null)); + } + + public void testDoubleForEachRemainingWithNull() { + PrimitiveIterator.OfDouble i = new PrimitiveIterator.OfDouble() { + @Override + public double nextDouble() { + return 0; + } + + @Override + public boolean hasNext() { + return false; + } + }; + + executeAndCatch(() -> i.forEachRemaining((DoubleConsumer) null)); + executeAndCatch(() -> i.forEachRemaining((Consumer) null)); + } + + private void executeAndCatch(Runnable r) { + executeAndCatch(NullPointerException.class, r); + } + + private void executeAndCatch(Class expected, Runnable r) { + Exception caught = null; + try { + r.run(); + } + catch (Exception e) { + caught = e; + } + + assertNotNull(caught, + String.format("No Exception was thrown, expected an Exception of %s to be thrown", + expected.getName())); + assertTrue(expected.isInstance(caught), + String.format("Exception thrown %s not an instance of %s", + caught.getClass().getName(), expected.getName())); + } + +} diff --git a/jdk/test/java/util/Locale/LocaleCategory.sh b/jdk/test/java/util/Locale/LocaleCategory.sh index 84b6119ec3d..7715d354225 100644 --- a/jdk/test/java/util/Locale/LocaleCategory.sh +++ b/jdk/test/java/util/Locale/LocaleCategory.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4700857 6997928 7079486 diff --git a/jdk/test/java/util/Locale/LocaleProviders.java b/jdk/test/java/util/Locale/LocaleProviders.java index e5324e4dd91..30ad90a4bb7 100644 --- a/jdk/test/java/util/Locale/LocaleProviders.java +++ b/jdk/test/java/util/Locale/LocaleProviders.java @@ -64,6 +64,10 @@ public class LocaleProviders { bug8013086Test(args[1], args[2]); break; + case "bug8013903Test": + bug8013903Test(); + break; + default: throw new RuntimeException("Test method '"+methodName+"' not found."); } @@ -195,4 +199,30 @@ public class LocaleProviders { // ParseException is fine in this test, as it's not "UTC" } } + + static void bug8013903Test() { + if (System.getProperty("os.name").startsWith("Windows")) { + Date sampleDate = new Date(0x10000000000L); + String fallbackResult = "Heisei 16.Nov.03 (Wed) AM 11:53:47"; + String jreResult = "\u5e73\u6210 16.11.03 (\u6c34) \u5348\u524d 11:53:47"; + Locale l = new Locale("ja", "JP", "JP"); + SimpleDateFormat sdf = new SimpleDateFormat("GGGG yyyy.MMM.dd '('E')' a hh:mm:ss", l); + String result = sdf.format(sampleDate); + System.out.println(result); + if (LocaleProviderAdapter.getAdapterPreference() + .contains(LocaleProviderAdapter.Type.JRE)) { + if (!jreResult.equals(result)) { + throw new RuntimeException("Format failed. result: \"" + + result + "\", expected: \"" + jreResult); + } + } else { + // should be FALLBACK, as Windows HOST does not return + // display names + if (!fallbackResult.equals(result)) { + throw new RuntimeException("Format failed. result: \"" + + result + "\", expected: \"" + fallbackResult); + } + } + } + } } diff --git a/jdk/test/java/util/Locale/LocaleProviders.sh b/jdk/test/java/util/Locale/LocaleProviders.sh index 4d8bd06ff08..7c238caafe6 100644 --- a/jdk/test/java/util/Locale/LocaleProviders.sh +++ b/jdk/test/java/util/Locale/LocaleProviders.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,11 +21,10 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 6336885 7196799 7197573 7198834 8000245 8000615 8001440 8010666 -# 8013086 8013233 +# 8013086 8013233 8013903 # @summary tests for "java.locale.providers" system property # @compile -XDignore.symbol.file LocaleProviders.java # @run shell/timeout=600 LocaleProviders.sh @@ -300,4 +300,18 @@ PARAM2=JP PARAM3= runTest +# testing 8013903 fix. (Windows only) +METHODNAME=bug8013903Test +PREFLIST=HOST,JRE +PARAM1= +PARAM2= +PARAM3= +runTest +METHODNAME=bug8013903Test +PREFLIST=HOST +PARAM1= +PARAM2= +PARAM3= +runTest + exit $result diff --git a/jdk/test/java/util/Locale/data/deflocale.sh b/jdk/test/java/util/Locale/data/deflocale.sh index b0ee6f82090..101194a9ed8 100644 --- a/jdk/test/java/util/Locale/data/deflocale.sh +++ b/jdk/test/java/util/Locale/data/deflocale.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # # diff --git a/jdk/test/java/util/Map/CheckRandomHashSeed.java b/jdk/test/java/util/Map/CheckRandomHashSeed.java new file mode 100644 index 00000000000..5395ec999ee --- /dev/null +++ b/jdk/test/java/util/Map/CheckRandomHashSeed.java @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8005698 + * @summary Check operation of jdk.map.useRandomSeed property + * @run main CheckRandomHashSeed + * @run main/othervm -Djdk.map.useRandomSeed=false CheckRandomHashSeed + * @run main/othervm -Djdk.map.useRandomSeed=bogus CheckRandomHashSeed + * @run main/othervm -Djdk.map.useRandomSeed=true CheckRandomHashSeed true + * @author Brent Christian + */ +import java.lang.reflect.Field; +import java.util.Map; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Hashtable; +import java.util.WeakHashMap; + +public class CheckRandomHashSeed { + private final static String PROP_NAME = "jdk.map.useRandomSeed"; + static boolean expectRandom = false; + + public static void main(String[] args) { + if (args.length > 0 && args[0].equals("true")) { + expectRandom = true; + } + String hashSeedProp = System.getProperty(PROP_NAME); + boolean propSet = (null != hashSeedProp) + ? Boolean.parseBoolean(hashSeedProp) : false; + if (expectRandom != propSet) { + throw new Error("Error in test setup: " + (expectRandom ? "" : "not " ) + "expecting random hashSeed, but " + PROP_NAME + " is " + (propSet ? "" : "not ") + "enabled"); + } + + testMap(new HashMap()); + testMap(new LinkedHashMap()); + testMap(new WeakHashMap()); + testMap(new Hashtable()); + } + + private static void testMap(Map map) { + int hashSeed = getHashSeed(map); + boolean hashSeedIsZero = (hashSeed == 0); + + if (expectRandom != hashSeedIsZero) { + System.out.println("Test passed for " + map.getClass().getSimpleName() + " - expectRandom: " + expectRandom + ", hashSeed: " + hashSeed); + } else { + throw new Error ("Test FAILED for " + map.getClass().getSimpleName() + " - expectRandom: " + expectRandom + ", hashSeed: " + hashSeed); + } + } + + private static int getHashSeed(Map map) { + try { + if (map instanceof HashMap || map instanceof LinkedHashMap) { + map.put("Key", "Value"); + Field hashSeedField = HashMap.class.getDeclaredField("hashSeed"); + hashSeedField.setAccessible(true); + int hashSeed = hashSeedField.getInt(map); + return hashSeed; + } else { + map.put("Key", "Value"); + Field hashSeedField = map.getClass().getDeclaredField("hashSeed"); + hashSeedField.setAccessible(true); + int hashSeed = hashSeedField.getInt(map); + return hashSeed; + } + } catch(Exception e) { + e.printStackTrace(); + throw new Error(e); + } + } +} diff --git a/jdk/test/java/util/Map/Collisions.java b/jdk/test/java/util/Map/Collisions.java index 21f9e87c282..b7170791777 100644 --- a/jdk/test/java/util/Map/Collisions.java +++ b/jdk/test/java/util/Map/Collisions.java @@ -26,6 +26,7 @@ * @bug 7126277 * @run main Collisions -shortrun * @run main/othervm -Djdk.map.althashing.threshold=0 Collisions -shortrun + * @run main/othervm -Djdk.map.useRandomSeed=true Collisions -shortrun * @summary Ensure Maps behave well with lots of hashCode() collisions. * @author Mike Duigou */ diff --git a/jdk/test/java/util/Map/InPlaceOpsCollisions.java b/jdk/test/java/util/Map/InPlaceOpsCollisions.java new file mode 100644 index 00000000000..4a755bd4415 --- /dev/null +++ b/jdk/test/java/util/Map/InPlaceOpsCollisions.java @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8005698 + * @run main InPlaceOpsCollisions -shortrun + * @run main/othervm -Djdk.map.randomseed=true InPlaceOpsCollisions -shortrun + * @summary Ensure overrides of in-place operations in Maps behave well with lots of collisions. + * @author Brent Christian + */ +import java.util.*; +import java.util.function.*; + +public class InPlaceOpsCollisions { + + /** + * Number of elements per map. + */ + private static final int TEST_SIZE = 5000; + + final static class HashableInteger implements Comparable { + + final int value; + final int hashmask; //yes duplication + + HashableInteger(int value, int hashmask) { + this.value = value; + this.hashmask = hashmask; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HashableInteger) { + HashableInteger other = (HashableInteger) obj; + + return other.value == value; + } + + return false; + } + + @Override + public int hashCode() { + return value % hashmask; + } + + @Override + public int compareTo(HashableInteger o) { + return value - o.value; + } + + @Override + public String toString() { + return Integer.toString(value); + } + } + + static HashableInteger EXTRA_INT_VAL; + static String EXTRA_STRING_VAL; + + private static Object[][] makeTestData(int size) { + HashableInteger UNIQUE_OBJECTS[] = new HashableInteger[size]; + HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[size]; + String UNIQUE_STRINGS[] = new String[size]; + String COLLIDING_STRINGS[] = new String[size]; + + for (int i = 0; i < size; i++) { + UNIQUE_OBJECTS[i] = new HashableInteger(i, Integer.MAX_VALUE); + COLLIDING_OBJECTS[i] = new HashableInteger(i, 10); + UNIQUE_STRINGS[i] = unhash(i); + COLLIDING_STRINGS[i] = (0 == i % 2) + ? UNIQUE_STRINGS[i / 2] + : "\u0000\u0000\u0000\u0000\u0000" + COLLIDING_STRINGS[i - 1]; + } + EXTRA_INT_VAL = new HashableInteger(size, Integer.MAX_VALUE); + EXTRA_STRING_VAL = new String ("Extra Value"); + + return new Object[][] { + new Object[]{"Unique Objects", UNIQUE_OBJECTS}, + new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, + new Object[]{"Unique Strings", UNIQUE_STRINGS}, + new Object[]{"Colliding Strings", COLLIDING_STRINGS} + }; + } + + /** + * Returns a string with a hash equal to the argument. + * + * @return string with a hash equal to the argument. + */ + public static String unhash(int target) { + StringBuilder answer = new StringBuilder(); + if (target < 0) { + // String with hash of Integer.MIN_VALUE, 0x80000000 + answer.append("\\u0915\\u0009\\u001e\\u000c\\u0002"); + + if (target == Integer.MIN_VALUE) { + return answer.toString(); + } + // Find target without sign bit set + target = target & Integer.MAX_VALUE; + } + + unhash0(answer, target); + return answer.toString(); + } + + private static void unhash0(StringBuilder partial, int target) { + int div = target / 31; + int rem = target % 31; + + if (div <= Character.MAX_VALUE) { + if (div != 0) { + partial.append((char) div); + } + partial.append((char) rem); + } else { + unhash0(partial, div); + partial.append((char) rem); + } + } + + private static void realMain(String[] args) throws Throwable { + boolean shortRun = args.length > 0 && args[0].equals("-shortrun"); + + Object[][] mapKeys = makeTestData(shortRun ? (TEST_SIZE / 2) : TEST_SIZE); + + // loop through data sets + for (Object[] keys_desc : mapKeys) { + Map[] maps = (Map[]) new Map[]{ + new HashMap<>(), + new LinkedHashMap<>(), + }; + + // for each map type. + for (Map map : maps) { + String desc = (String) keys_desc[0]; + Object[] keys = (Object[]) keys_desc[1]; + try { + testInPlaceOps(map, desc, keys); + } catch(Exception all) { + unexpected("Failed for " + map.getClass().getName() + " with " + desc, all); + } + } + } + } + + private static void testInsertion(Map map, String keys_desc, T[] keys) { + check("map empty", (map.size() == 0) && map.isEmpty()); + + for (int i = 0; i < keys.length; i++) { + check(String.format("insertion: map expected size m%d != i%d", map.size(), i), + map.size() == i); + check(String.format("insertion: put(%s[%d])", keys_desc, i), null == map.put(keys[i], keys[i])); + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + check(String.format("insertion: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); + } + + check(String.format("map expected size m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + } + + + private static void testInPlaceOps(Map map, String keys_desc, T[] keys) { + System.out.println(map.getClass() + " : " + keys_desc + ", testInPlaceOps"); + System.out.flush(); + + testInsertion(map, keys_desc, keys); + testPutIfAbsent(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testRemoveMapping(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testReplaceOldValue(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testReplaceIfMapped(map, keys_desc, keys); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfAbsent(map, keys_desc, keys, (k) -> getExtraVal(keys[0])); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfAbsent(map, keys_desc, keys, (k) -> null); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfPresent(map, keys_desc, keys, (k, v) -> getExtraVal(keys[0])); + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeIfPresent(map, keys_desc, keys, (k, v) -> null); + + if (!keys_desc.contains("Strings")) { // avoid parseInt() number format error + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeNonNull(map, keys_desc, keys); + } + + map.clear(); + testInsertion(map, keys_desc, keys); + testComputeNull(map, keys_desc, keys); + + if (!keys_desc.contains("Strings")) { // avoid parseInt() number format error + map.clear(); + testInsertion(map, keys_desc, keys); + testMergeNonNull(map, keys_desc, keys); + } + + map.clear(); + testInsertion(map, keys_desc, keys); + testMergeNull(map, keys_desc, keys); + } + + + + private static void testPutIfAbsent(Map map, String keys_desc, T[] keys) { + T extraVal = getExtraVal(keys[0]); + T retVal; + removeOddKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + retVal = map.putIfAbsent(keys[i], extraVal); + if (i % 2 == 0) { // even: not absent, not put + check(String.format("putIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == keys[i]); + check(String.format("putIfAbsent: get(%s[%d])", keys_desc, i), keys[i] == map.get(keys[i])); + check(String.format("putIfAbsent: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); + } else { // odd: absent, was put + check(String.format("putIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == null); + check(String.format("putIfAbsent: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("putIfAbsent: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + } + + private static void testRemoveMapping(Map map, String keys_desc, T[] keys) { + T extraVal = getExtraVal(keys[0]); + boolean removed; + int removes = 0; + remapOddKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + removed = map.remove(keys[i], keys[i]); + if (i % 2 == 0) { // even: original mapping, should be removed + check(String.format("removeMapping: retVal(%s[%d])", keys_desc, i), removed); + check(String.format("removeMapping: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("removeMapping: !containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + check(String.format("removeMapping: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + removes++; + } else { // odd: new mapping, not removed + check(String.format("removeMapping: retVal(%s[%d])", keys_desc, i), !removed); + check(String.format("removeMapping: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("removeMapping: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + check(String.format("removeMapping: containsValue(%s[%d])", keys_desc, i), map.containsValue(extraVal)); + } + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length - removes), + map.size() == keys.length - removes); + } + + private static void testReplaceOldValue(Map map, String keys_desc, T[] keys) { + // remap odds to extraVal + // call replace to replace for extraVal, for all keys + // check that all keys map to value from keys array + T extraVal = getExtraVal(keys[0]); + boolean replaced; + remapOddKeys(map, keys); + + for (int i = 0; i < keys.length; i++) { + replaced = map.replace(keys[i], extraVal, keys[i]); + if (i % 2 == 0) { // even: original mapping, should not be replaced + check(String.format("replaceOldValue: retVal(%s[%d])", keys_desc, i), !replaced); + } else { // odd: new mapping, should be replaced + check(String.format("replaceOldValue: get(%s[%d])", keys_desc, i), replaced); + } + check(String.format("replaceOldValue: get(%s[%d])", keys_desc, i), keys[i] == map.get(keys[i])); + check(String.format("replaceOldValue: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + check(String.format("replaceOldValue: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); +// removes++; + } + check(String.format("replaceOldValue: !containsValue(%s[%s])", keys_desc, extraVal.toString()), !map.containsValue(extraVal)); + check(String.format("map expected size m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + } + + // TODO: Test case for key mapped to null value + private static void testReplaceIfMapped(Map map, String keys_desc, T[] keys) { + // remove odd keys + // call replace for all keys[] + // odd keys should remain absent, even keys should be mapped to EXTRA, no value from keys[] should be in map + T extraVal = getExtraVal(keys[0]); + int expectedSize1 = 0; + removeOddKeys(map, keys); + int expectedSize2 = map.size(); + + for (int i = 0; i < keys.length; i++) { + T retVal = map.replace(keys[i], extraVal); + if (i % 2 == 0) { // even: still in map, should be replaced + check(String.format("replaceIfMapped: retVal(%s[%d])", keys_desc, i), retVal == keys[i]); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize1++; + } else { // odd: was removed, should not be replaced + check(String.format("replaceIfMapped: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } + check(String.format("replaceIfMapped: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("replaceIfMapped: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize1), + map.size() == expectedSize1); + check(String.format("map expected size#2 m%d != k%d", map.size(), expectedSize2), + map.size() == expectedSize2); + + } + + private static void testComputeIfAbsent(Map map, String keys_desc, T[] keys, + Function mappingFunction) { + // remove a third of the keys + // call computeIfAbsent for all keys, func returns EXTRA + // check that removed keys now -> EXTRA, other keys -> original val + T expectedVal = mappingFunction.apply(keys[0]); + T retVal; + int expectedSize = 0; + removeThirdKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + retVal = map.computeIfAbsent(keys[i], mappingFunction); + if (i % 3 != 2) { // key present, not computed + check(String.format("computeIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == keys[i]); + check(String.format("computeIfAbsent: get(%s[%d])", keys_desc, i), keys[i] == map.get(keys[i])); + check(String.format("computeIfAbsent: containsValue(%s[%d])", keys_desc, i), map.containsValue(keys[i])); + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize++; + } else { // key absent, computed unless function return null + check(String.format("computeIfAbsent: (%s[%d]) retVal", keys_desc, i), retVal == expectedVal); + check(String.format("computeIfAbsent: get(%s[%d])", keys_desc, i), expectedVal == map.get(keys[i])); + check(String.format("computeIfAbsent: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + // mapping should not be added if function returns null + check(String.format("insertion: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i]) != (expectedVal == null)); + if (expectedVal != null) { expectedSize++; } + } + } + if (expectedVal != null) { + check(String.format("computeIfAbsent: containsValue(%s[%s])", keys_desc, expectedVal), map.containsValue(expectedVal)); + } + check(String.format("map expected size m%d != k%d", map.size(), expectedSize), + map.size() == expectedSize); + } + + private static void testComputeIfPresent(Map map, String keys_desc, T[] keys, + BiFunction mappingFunction) { + // remove a third of the keys + // call testComputeIfPresent for all keys[] + // removed keys should remain absent, even keys should be mapped to $RESULT + // no value from keys[] should be in map + T funcResult = mappingFunction.apply(keys[0], keys[0]); + int expectedSize1 = 0; + removeThirdKeys(map, keys); + + for (int i = 0; i < keys.length; i++) { + T retVal = map.computeIfPresent(keys[i], mappingFunction); + if (i % 3 != 2) { // key present + if (funcResult == null) { // was removed + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } else { // value was replaced + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize1++; + } + check(String.format("computeIfPresent: retVal(%s[%s])", keys_desc, i), retVal == funcResult); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), funcResult == map.get(keys[i])); + + } else { // odd: was removed, should not be replaced + check(String.format("replaceIfMapped: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("replaceIfMapped: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("replaceIfMapped: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } + check(String.format("replaceIfMapped: !containsValue(%s[%d])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize1), + map.size() == expectedSize1); + } + + private static void testComputeNonNull(Map map, String keys_desc, T[] keys) { + // remove a third of the keys + // call compute() for all keys[] + // all keys should be present: removed keys -> EXTRA, others to k-1 + BiFunction mappingFunction = (k, v) -> { + if (v == null) { + return getExtraVal(keys[0]); + } else { + return keys[Integer.parseInt(k.toString()) - 1]; + } + }; + T extraVal = getExtraVal(keys[0]); + removeThirdKeys(map, keys); + for (int i = 1; i < keys.length; i++) { + T retVal = map.compute(keys[i], mappingFunction); + if (i % 3 != 2) { // key present, should be mapped to k-1 + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == keys[i-1]); + check(String.format("compute: get(%s[%d])", keys_desc, i), keys[i-1] == map.get(keys[i])); + } else { // odd: was removed, should be replaced with EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + } + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + } + check(String.format("map expected size#1 m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("compute: !containsValue(%s,[null])", keys_desc), !map.containsValue(null)); + } + + private static void testComputeNull(Map map, String keys_desc, T[] keys) { + // remove a third of the keys + // call compute() for all keys[] + // removed keys should -> EXTRA + // for other keys: func returns null, should have no mapping + BiFunction mappingFunction = (k, v) -> { + // if absent/null -> EXTRA + // if present -> null + if (v == null) { + return getExtraVal(keys[0]); + } else { + return null; + } + }; + T extraVal = getExtraVal(keys[0]); + int expectedSize = 0; + removeThirdKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + T retVal = map.compute(keys[i], mappingFunction); + if (i % 3 != 2) { // key present, func returned null, should be absent from map + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("compute: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + check(String.format("compute: containsValue(%s[%s])", keys_desc, i), !map.containsValue(keys[i])); + } else { // odd: was removed, should now be mapped to EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize++; + } + } + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize), + map.size() == expectedSize); + } + + private static void testMergeNonNull(Map map, String keys_desc, T[] keys) { + // remove a third of the keys + // call merge() for all keys[] + // all keys should be present: removed keys now -> EXTRA, other keys -> k-1 + + // Map to preceding key + BiFunction mappingFunction = (k, v) -> keys[Integer.parseInt(k.toString()) - 1]; + T extraVal = getExtraVal(keys[0]); + removeThirdKeys(map, keys); + for (int i = 1; i < keys.length; i++) { + T retVal = map.merge(keys[i], extraVal, mappingFunction); + if (i % 3 != 2) { // key present, should be mapped to k-1 + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == keys[i-1]); + check(String.format("compute: get(%s[%d])", keys_desc, i), keys[i-1] == map.get(keys[i])); + } else { // odd: was removed, should be replaced with EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + } + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + } + + check(String.format("map expected size#1 m%d != k%d", map.size(), keys.length), + map.size() == keys.length); + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("compute: !containsValue(%s,[null])", keys_desc), !map.containsValue(null)); + + } + + private static void testMergeNull(Map map, String keys_desc, T[] keys) { + // remove a third of the keys + // call merge() for all keys[] + // result: removed keys -> EXTRA, other keys absent + + BiFunction mappingFunction = (k, v) -> null; + T extraVal = getExtraVal(keys[0]); + int expectedSize = 0; + removeThirdKeys(map, keys); + for (int i = 0; i < keys.length; i++) { + T retVal = map.merge(keys[i], extraVal, mappingFunction); + if (i % 3 != 2) { // key present, func returned null, should be absent from map + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == null); + check(String.format("compute: get(%s[%d])", keys_desc, i), null == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), !map.containsKey(keys[i])); + } else { // odd: was removed, should now be mapped to EXTRA + check(String.format("compute: retVal(%s[%d])", keys_desc, i), retVal == extraVal); + check(String.format("compute: get(%s[%d])", keys_desc, i), extraVal == map.get(keys[i])); + check(String.format("compute: containsKey(%s[%d])", keys_desc, i), map.containsKey(keys[i])); + expectedSize++; + } + check(String.format("compute: containsValue(%s[%s])", keys_desc, i), !map.containsValue(keys[i])); + } + check(String.format("compute: containsValue(%s[%s])", keys_desc, extraVal.toString()), map.containsValue(extraVal)); + check(String.format("map expected size#1 m%d != k%d", map.size(), expectedSize), + map.size() == expectedSize); + } + + /* + * Return the EXTRA val for the key type being used + */ + private static T getExtraVal(T key) { + if (key instanceof HashableInteger) { + return (T)EXTRA_INT_VAL; + } else { + return (T)EXTRA_STRING_VAL; + } + } + + /* + * Remove half of the keys + */ + private static void removeOddKeys(Map map, /*String keys_desc, */ T[] keys) { + int removes = 0; + for (int i = 0; i < keys.length; i++) { + if (i % 2 != 0) { + map.remove(keys[i]); + removes++; + } + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length - removes), + map.size() == keys.length - removes); + } + + /* + * Remove every third key + * This will hopefully leave some removed keys in TreeBins for, e.g., computeIfAbsent + * w/ a func that returns null. + * + * TODO: consider using this in other tests (and maybe adding a remapThirdKeys) + */ + private static void removeThirdKeys(Map map, /*String keys_desc, */ T[] keys) { + int removes = 0; + for (int i = 0; i < keys.length; i++) { + if (i % 3 == 2) { + map.remove(keys[i]); + removes++; + } + } + check(String.format("map expected size m%d != k%d", map.size(), keys.length - removes), + map.size() == keys.length - removes); + } + + /* + * Re-map the odd-numbered keys to map to the EXTRA value + */ + private static void remapOddKeys(Map map, /*String keys_desc, */ T[] keys) { + T extraVal = getExtraVal(keys[0]); + for (int i = 0; i < keys.length; i++) { + if (i % 2 != 0) { + map.put(keys[i], extraVal); + } + } + } + + //--------------------- Infrastructure --------------------------- + static volatile int passed = 0, failed = 0; + + static void pass() { + passed++; + } + + static void fail() { + failed++; + (new Error("Failure")).printStackTrace(System.err); + } + + static void fail(String msg) { + failed++; + (new Error("Failure: " + msg)).printStackTrace(System.err); + } + + static void abort() { + fail(); + System.exit(1); + } + + static void abort(String msg) { + fail(msg); + System.exit(1); + } + + static void unexpected(String msg, Throwable t) { + System.err.println("Unexpected: " + msg); + unexpected(t); + } + + static void unexpected(Throwable t) { + failed++; + t.printStackTrace(System.err); + } + + static void check(boolean cond) { + if (cond) { + pass(); + } else { + fail(); + } + } + + static void check(String desc, boolean cond) { + if (cond) { + pass(); + } else { + fail(desc); + } + } + + static void equal(Object x, Object y) { + if (Objects.equals(x, y)) { + pass(); + } else { + fail(x + " not equal to " + y); + } + } + + public static void main(String[] args) throws Throwable { + Thread.currentThread().setName(Collisions.class.getName()); +// Thread.currentThread().setPriority(Thread.MAX_PRIORITY); + try { + realMain(args); + } catch (Throwable t) { + unexpected(t); + } + + System.out.printf("%nPassed = %d, failed = %d%n%n", passed, failed); + if (failed > 0) { + throw new Error("Some tests failed"); + } + } +} diff --git a/jdk/test/java/util/Map/TreeBinSplitBackToEntries.java b/jdk/test/java/util/Map/TreeBinSplitBackToEntries.java new file mode 100644 index 00000000000..6093147a24d --- /dev/null +++ b/jdk/test/java/util/Map/TreeBinSplitBackToEntries.java @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.*; +import java.lang.reflect.Field; + +/* + * @test + * @bug 8005698 + * @summary Test the case where TreeBin.splitTreeBin() converts a bin back to an Entry list + * @run main TreeBinSplitBackToEntries unused + * @author Brent Christian + */ + +public class TreeBinSplitBackToEntries { + private static int EXPECTED_TREE_THRESHOLD = 16; + + // Easiest if this covers one bit higher then 'bit' in splitTreeBin() on the + // call where the TreeBin is converted back to an Entry list + private static int HASHMASK = 0x7F; + private static boolean verbose = false; + private static boolean fastFail = false; + private static boolean failed = false; + + static void printlnIfVerbose(String msg) { + if (verbose) {System.out.println(msg); } + } + + public static void main(String[] args) { + for (String arg : args) { + switch(arg) { + case "-verbose": + verbose = true; + break; + case "-fastfail": + fastFail = true; + break; + } + } + checkTreeThreshold(); + testMapHiTree(); + testMapLoTree(); + if (failed) { + System.out.println("Test Failed"); + System.exit(1); + } else { + System.out.println("Test Passed"); + } + } + + public static void checkTreeThreshold() { + int threshold = -1; + try { + Class treeBinClass = Class.forName("java.util.HashMap$TreeBin"); + Field treeThreshold = treeBinClass.getDeclaredField("TREE_THRESHOLD"); + treeThreshold.setAccessible(true); + threshold = treeThreshold.getInt(treeBinClass); + } catch (ClassNotFoundException|NoSuchFieldException|IllegalAccessException e) { + e.printStackTrace(); + throw new Error("Problem accessing TreeBin.TREE_THRESHOLD", e); + } + check("Expected TREE_THRESHOLD: " + EXPECTED_TREE_THRESHOLD +", found: " + threshold, + threshold == EXPECTED_TREE_THRESHOLD); + printlnIfVerbose("TREE_THRESHOLD: " + threshold); + } + + public static void testMapHiTree() { + Object[][] mapKeys = makeHiTreeTestData(); + testMapsForKeys(mapKeys, "hiTree"); + } + + public static void testMapLoTree() { + Object[][] mapKeys = makeLoTreeTestData(); + + testMapsForKeys(mapKeys, "loTree"); + } + + public static void testMapsForKeys(Object[][] mapKeys, String desc) { + // loop through data sets + for (Object[] keys_desc : mapKeys) { + Map[] maps = (Map[]) new Map[]{ + new HashMap<>(4, 0.8f), + new LinkedHashMap<>(4, 0.8f), + }; + // for each map type. + for (Map map : maps) { + Object[] keys = (Object[]) keys_desc[1]; + System.out.println(desc + ": testPutThenGet() for " + map.getClass()); + testPutThenGet(map, keys); + } + } + } + + private static void testPutThenGet(Map map, T[] keys) { + for (T key : keys) { + printlnIfVerbose("put()ing 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + ", hashCode=" + Integer.toHexString(key.hashCode())); + map.put(key, key); + } + for (T key : keys) { + check("key: 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + " not found in resulting " + map.getClass().getSimpleName(), map.get(key) != null); + } + } + + /* Data to force a non-empty loTree in TreeBin.splitTreeBin() to be converted back + * into an Entry list + */ + private static Object[][] makeLoTreeTestData() { + HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] { + new HashableInteger( 0x23, HASHMASK), + new HashableInteger( 0x123, HASHMASK), + new HashableInteger( 0x323, HASHMASK), + new HashableInteger( 0x523, HASHMASK), + + new HashableInteger( 0x723, HASHMASK), + new HashableInteger( 0x923, HASHMASK), + new HashableInteger( 0xB23, HASHMASK), + new HashableInteger( 0xD23, HASHMASK), + + new HashableInteger( 0xF23, HASHMASK), + new HashableInteger( 0xF123, HASHMASK), + new HashableInteger( 0x1023, HASHMASK), + new HashableInteger( 0x1123, HASHMASK), + + new HashableInteger( 0x1323, HASHMASK), + new HashableInteger( 0x1523, HASHMASK), + new HashableInteger( 0x1723, HASHMASK), + new HashableInteger( 0x1923, HASHMASK), + + new HashableInteger( 0x1B23, HASHMASK), + new HashableInteger( 0x1D23, HASHMASK), + new HashableInteger( 0x3123, HASHMASK), + new HashableInteger( 0x3323, HASHMASK), + new HashableInteger( 0x3523, HASHMASK), + + new HashableInteger( 0x3723, HASHMASK), + new HashableInteger( 0x1001, HASHMASK), + new HashableInteger( 0x4001, HASHMASK), + new HashableInteger( 0x1, HASHMASK), + }; + return new Object[][] { + new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, + }; + } + + /* Data to force the hiTree in TreeBin.splitTreeBin() to be converted back + * into an Entry list + */ + private static Object[][] makeHiTreeTestData() { + HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] { + new HashableInteger( 0x1, HASHMASK), + new HashableInteger( 0x101, HASHMASK), + new HashableInteger( 0x301, HASHMASK), + new HashableInteger( 0x501, HASHMASK), + new HashableInteger( 0x701, HASHMASK), + + new HashableInteger( 0x1001, HASHMASK), + new HashableInteger( 0x1101, HASHMASK), + new HashableInteger( 0x1301, HASHMASK), + + new HashableInteger( 0x1501, HASHMASK), + new HashableInteger( 0x1701, HASHMASK), + new HashableInteger( 0x4001, HASHMASK), + new HashableInteger( 0x4101, HASHMASK), + new HashableInteger( 0x4301, HASHMASK), + + new HashableInteger( 0x4501, HASHMASK), + new HashableInteger( 0x4701, HASHMASK), + new HashableInteger( 0x8001, HASHMASK), + new HashableInteger( 0x8101, HASHMASK), + + + new HashableInteger( 0x8301, HASHMASK), + new HashableInteger( 0x8501, HASHMASK), + new HashableInteger( 0x8701, HASHMASK), + new HashableInteger( 0x9001, HASHMASK), + + new HashableInteger( 0x23, HASHMASK), + new HashableInteger( 0x123, HASHMASK), + new HashableInteger( 0x323, HASHMASK), + new HashableInteger( 0x523, HASHMASK), + }; + return new Object[][] { + new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, + }; + } + + static void check(String desc, boolean cond) { + if (!cond) { + fail(desc); + } + } + + static void fail(String msg) { + failed = true; + (new Error("Failure: " + msg)).printStackTrace(System.err); + if (fastFail) { + System.exit(1); + } + } + + final static class HashableInteger implements Comparable { + final int value; + final int hashmask; //yes duplication + + HashableInteger(int value, int hashmask) { + this.value = value; + this.hashmask = hashmask; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HashableInteger) { + HashableInteger other = (HashableInteger) obj; + return other.value == value; + } + return false; + } + + @Override + public int hashCode() { + // This version ANDs the mask + return value & hashmask; + } + + @Override + public int compareTo(HashableInteger o) { + return value - o.value; + } + + @Override + public String toString() { + return Integer.toString(value); + } + } +} diff --git a/jdk/test/java/util/PluggableLocale/BreakIteratorProviderTest.sh b/jdk/test/java/util/PluggableLocale/BreakIteratorProviderTest.sh index c98ca539483..050d422324b 100644 --- a/jdk/test/java/util/PluggableLocale/BreakIteratorProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/BreakIteratorProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 diff --git a/jdk/test/java/util/PluggableLocale/CalendarDataProviderTest.sh b/jdk/test/java/util/PluggableLocale/CalendarDataProviderTest.sh index c4d1f90d76c..36234d7cdc7 100644 --- a/jdk/test/java/util/PluggableLocale/CalendarDataProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/CalendarDataProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 7058207 8000986 diff --git a/jdk/test/java/util/PluggableLocale/ClasspathTest.sh b/jdk/test/java/util/PluggableLocale/ClasspathTest.sh index 0d0871b3e4b..99749540553 100644 --- a/jdk/test/java/util/PluggableLocale/ClasspathTest.sh +++ b/jdk/test/java/util/PluggableLocale/ClasspathTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 6388652 diff --git a/jdk/test/java/util/PluggableLocale/CollatorProviderTest.sh b/jdk/test/java/util/PluggableLocale/CollatorProviderTest.sh index ce2f6a285d3..01e1b277f5b 100644 --- a/jdk/test/java/util/PluggableLocale/CollatorProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/CollatorProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 diff --git a/jdk/test/java/util/PluggableLocale/CurrencyNameProviderTest.sh b/jdk/test/java/util/PluggableLocale/CurrencyNameProviderTest.sh index 8fb9459116c..9e8d4f90d61 100644 --- a/jdk/test/java/util/PluggableLocale/CurrencyNameProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/CurrencyNameProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7199750 8000997 diff --git a/jdk/test/java/util/PluggableLocale/DateFormatProviderTest.sh b/jdk/test/java/util/PluggableLocale/DateFormatProviderTest.sh index 228a2484a10..d8a9a288854 100644 --- a/jdk/test/java/util/PluggableLocale/DateFormatProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/DateFormatProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7003643 diff --git a/jdk/test/java/util/PluggableLocale/DateFormatSymbolsProviderTest.sh b/jdk/test/java/util/PluggableLocale/DateFormatSymbolsProviderTest.sh index 9103e8283b9..51ebe6fa630 100644 --- a/jdk/test/java/util/PluggableLocale/DateFormatSymbolsProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/DateFormatSymbolsProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7200341 diff --git a/jdk/test/java/util/PluggableLocale/DecimalFormatSymbolsProviderTest.sh b/jdk/test/java/util/PluggableLocale/DecimalFormatSymbolsProviderTest.sh index 45b84e48f71..7d6e9bf8ada 100644 --- a/jdk/test/java/util/PluggableLocale/DecimalFormatSymbolsProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/DecimalFormatSymbolsProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 diff --git a/jdk/test/java/util/PluggableLocale/ExecTest.sh b/jdk/test/java/util/PluggableLocale/ExecTest.sh index 698d027a4af..3aa001c86ca 100644 --- a/jdk/test/java/util/PluggableLocale/ExecTest.sh +++ b/jdk/test/java/util/PluggableLocale/ExecTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # # diff --git a/jdk/test/java/util/PluggableLocale/GenericTest.sh b/jdk/test/java/util/PluggableLocale/GenericTest.sh index ba91be6a34d..0d53be11c3e 100644 --- a/jdk/test/java/util/PluggableLocale/GenericTest.sh +++ b/jdk/test/java/util/PluggableLocale/GenericTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 diff --git a/jdk/test/java/util/PluggableLocale/LocaleNameProviderTest.sh b/jdk/test/java/util/PluggableLocale/LocaleNameProviderTest.sh index 0df042631d3..489c92c779e 100644 --- a/jdk/test/java/util/PluggableLocale/LocaleNameProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/LocaleNameProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 8000273 diff --git a/jdk/test/java/util/PluggableLocale/NumberFormatProviderTest.sh b/jdk/test/java/util/PluggableLocale/NumberFormatProviderTest.sh index 7f4e902a914..1c952e84de8 100644 --- a/jdk/test/java/util/PluggableLocale/NumberFormatProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/NumberFormatProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 7003643 diff --git a/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh b/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh index 4e34ae09814..88f05c498d3 100644 --- a/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh +++ b/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -20,7 +21,6 @@ # or visit www.oracle.com if you need additional information or have any # questions. # -#!/bin/sh # # @test # @bug 4052440 8003267 diff --git a/jdk/test/java/util/ResourceBundle/Bug6299235Test.sh b/jdk/test/java/util/ResourceBundle/Bug6299235Test.sh index 8bcb74f102d..d348d495d50 100644 --- a/jdk/test/java/util/ResourceBundle/Bug6299235Test.sh +++ b/jdk/test/java/util/ResourceBundle/Bug6299235Test.sh @@ -1,4 +1,4 @@ -# +#!/bin/sh # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # diff --git a/jdk/test/java/util/Spliterator/SpliteratorCollisions.java b/jdk/test/java/util/Spliterator/SpliteratorCollisions.java new file mode 100644 index 00000000000..604d90b9f20 --- /dev/null +++ b/jdk/test/java/util/Spliterator/SpliteratorCollisions.java @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8005698 + * @run testng SpliteratorCollisions + * @summary Spliterator traversing and splitting hash maps containing colliding hashes + * @author Brent Christian + */ + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Spliterator; +import java.util.TreeSet; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.LongConsumer; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import static org.testng.Assert.*; +import static org.testng.Assert.assertEquals; + +@Test +public class SpliteratorCollisions { + + private static List SIZES = Arrays.asList(0, 1, 10, 100, 1000); + + private static class SpliteratorDataBuilder { + List data; + List exp; + Map mExp; + + SpliteratorDataBuilder(List data, List exp) { + this.data = data; + this.exp = exp; + this.mExp = createMap(exp); + } + + Map createMap(List l) { + Map m = new LinkedHashMap<>(); + for (T t : l) { + m.put(t, t); + } + return m; + } + + void add(String description, Collection expected, Supplier> s) { + description = joiner(description).toString(); + data.add(new Object[]{description, expected, s}); + } + + void add(String description, Supplier> s) { + add(description, exp, s); + } + + void addCollection(Function, ? extends Collection> c) { + add("new " + c.apply(Collections.emptyList()).getClass().getName() + ".spliterator()", + () -> c.apply(exp).spliterator()); + } + + void addList(Function, ? extends List> l) { + // @@@ If collection is instance of List then add sub-list tests + addCollection(l); + } + + void addMap(Function, ? extends Map> m) { + String description = "new " + m.apply(Collections.emptyMap()).getClass().getName(); + add(description + ".keySet().spliterator()", () -> m.apply(mExp).keySet().spliterator()); + add(description + ".values().spliterator()", () -> m.apply(mExp).values().spliterator()); + add(description + ".entrySet().spliterator()", mExp.entrySet(), () -> m.apply(mExp).entrySet().spliterator()); + } + + StringBuilder joiner(String description) { + return new StringBuilder(description). + append(" {"). + append("size=").append(exp.size()). + append("}"); + } + } + + static Object[][] spliteratorDataProvider; + + @DataProvider(name = "HashableIntSpliterator") + public static Object[][] spliteratorDataProvider() { + if (spliteratorDataProvider != null) { + return spliteratorDataProvider; + } + + List data = new ArrayList<>(); + for (int size : SIZES) { + List exp = listIntRange(size, false); + SpliteratorDataBuilder db = new SpliteratorDataBuilder<>(data, exp); + + // Maps + db.addMap(HashMap::new); + db.addMap(LinkedHashMap::new); + + // Collections that use HashMap + db.addCollection(HashSet::new); + db.addCollection(LinkedHashSet::new); + db.addCollection(TreeSet::new); + } + return spliteratorDataProvider = data.toArray(new Object[0][]); + } + + static Object[][] spliteratorDataProviderWithNull; + + @DataProvider(name = "HashableIntSpliteratorWithNull") + public static Object[][] spliteratorNullDataProvider() { + if (spliteratorDataProviderWithNull != null) { + return spliteratorDataProviderWithNull; + } + + List data = new ArrayList<>(); + for (int size : SIZES) { + List exp = listIntRange(size, true); + exp.add(0, null); + SpliteratorDataBuilder db = new SpliteratorDataBuilder<>(data, exp); + + // Maps + db.addMap(HashMap::new); + db.addMap(LinkedHashMap::new); + // TODO: add this back in if we decide to keep TreeBin in WeakHashMap + //db.addMap(WeakHashMap::new); + + // Collections that use HashMap + db.addCollection(HashSet::new); + db.addCollection(LinkedHashSet::new); +// db.addCollection(TreeSet::new); + + } + return spliteratorDataProviderWithNull = data.toArray(new Object[0][]); + } + + final static class HashableInteger implements Comparable { + + final int value; + final int hashmask; //yes duplication + + HashableInteger(int value, int hashmask) { + this.value = value; + this.hashmask = hashmask; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HashableInteger) { + HashableInteger other = (HashableInteger) obj; + + return other.value == value; + } + + return false; + } + + @Override + public int hashCode() { + return value % hashmask; + } + + @Override + public int compareTo(HashableInteger o) { + return value - o.value; + } + + @Override + public String toString() { + return Integer.toString(value); + } + } + + private static List listIntRange(int upTo, boolean withNull) { + List exp = new ArrayList<>(); + if (withNull) { + exp.add(null); + } + for (int i = 0; i < upTo; i++) { + exp.add(new HashableInteger(i, 10)); + } + return Collections.unmodifiableList(exp); + } + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testNullPointerException(String description, Collection exp, Supplier s) { + executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining(null)); + executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance(null)); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testNullPointerExceptionWithNull(String description, Collection exp, Supplier s) { + executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining(null)); + executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance(null)); + } + + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testForEach(String description, Collection exp, Supplier s) { + testForEach(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testForEachWithNull(String description, Collection exp, Supplier s) { + testForEach(exp, s, (Consumer b) -> b); + } + + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testTryAdvance(String description, Collection exp, Supplier s) { + testTryAdvance(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testTryAdvanceWithNull(String description, Collection exp, Supplier s) { + testTryAdvance(exp, s, (Consumer b) -> b); + } + +/* skip this test until 8013649 is fixed + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { + testMixedTryAdvanceForEach(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testMixedTryAdvanceForEachWithNull(String description, Collection exp, Supplier s) { + testMixedTryAdvanceForEach(exp, s, (Consumer b) -> b); + } +*/ + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitAfterFullTraversal(String description, Collection exp, Supplier s) { + testSplitAfterFullTraversal(s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitAfterFullTraversalWithNull(String description, Collection exp, Supplier s) { + testSplitAfterFullTraversal(s, (Consumer b) -> b); + } + + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitOnce(String description, Collection exp, Supplier s) { + testSplitOnce(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitOnceWithNull(String description, Collection exp, Supplier s) { + testSplitOnce(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitSixDeep(String description, Collection exp, Supplier s) { + testSplitSixDeep(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitSixDeepWithNull(String description, Collection exp, Supplier s) { + testSplitSixDeep(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliterator") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitUntilNull(String description, Collection exp, Supplier s) { + testSplitUntilNull(exp, s, (Consumer b) -> b); + } + + @Test(dataProvider = "HashableIntSpliteratorWithNull") + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testSplitUntilNullWithNull(String description, Collection exp, Supplier s) { + testSplitUntilNull(exp, s, (Consumer b) -> b); + } + + private static > void testForEach( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + ArrayList fromForEach = new ArrayList<>(); + spliterator = supplier.get(); + Consumer addToFromForEach = boxingAdapter.apply(fromForEach::add); + spliterator.forEachRemaining(addToFromForEach); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + // assert that size, tryAdvance, and forEach are consistent + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, exp.size()); + } + if (exp.contains(null)) { + assertTrue(fromForEach.contains(null)); + } + assertEquals(fromForEach.size(), exp.size()); + + assertContents(fromForEach, exp, isOrdered); + } + + private static > void testTryAdvance( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + spliterator = supplier.get(); + ArrayList fromTryAdvance = new ArrayList<>(); + Consumer addToFromTryAdvance = boxingAdapter.apply(fromTryAdvance::add); + while (spliterator.tryAdvance(addToFromTryAdvance)) { } + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + // assert that size, tryAdvance, and forEach are consistent + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, exp.size()); + } + assertEquals(fromTryAdvance.size(), exp.size()); + + assertContents(fromTryAdvance, exp, isOrdered); + } + + private static > void testMixedTryAdvanceForEach( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + // tryAdvance first few elements, then forEach rest + ArrayList dest = new ArrayList<>(); + spliterator = supplier.get(); + Consumer addToDest = boxingAdapter.apply(dest::add); + for (int i = 0; i < 10 && spliterator.tryAdvance(addToDest); i++) { } + spliterator.forEachRemaining(addToDest); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + // Assert that tryAdvance now produce no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, dest.size()); + } + assertEquals(dest.size(), exp.size()); + + if (isOrdered) { + assertEquals(dest, exp); + } + else { + assertContentsUnordered(dest, exp); + } + } + + private static > void testSplitAfterFullTraversal( + Supplier supplier, + UnaryOperator> boxingAdapter) { + // Full traversal using tryAdvance + Spliterator spliterator = supplier.get(); + while (spliterator.tryAdvance(boxingAdapter.apply(e -> { }))) { } + Spliterator split = spliterator.trySplit(); + assertNull(split); + + // Full traversal using forEach + spliterator = supplier.get(); + spliterator.forEachRemaining(boxingAdapter.apply(e -> { + })); + split = spliterator.trySplit(); + assertNull(split); + + // Full traversal using tryAdvance then forEach + spliterator = supplier.get(); + spliterator.tryAdvance(boxingAdapter.apply(e -> { })); + spliterator.forEachRemaining(boxingAdapter.apply(e -> { + })); + split = spliterator.trySplit(); + assertNull(split); + } + + private static > void testSplitOnce( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + ArrayList fromSplit = new ArrayList<>(); + Spliterator s1 = supplier.get(); + Spliterator s2 = s1.trySplit(); + long s1Size = s1.getExactSizeIfKnown(); + long s2Size = (s2 != null) ? s2.getExactSizeIfKnown() : 0; + + Consumer addToFromSplit = boxingAdapter.apply(fromSplit::add); + if (s2 != null) + s2.forEachRemaining(addToFromSplit); + s1.forEachRemaining(addToFromSplit); + + if (sizeIfKnown >= 0) { + assertEquals(sizeIfKnown, fromSplit.size()); + if (s1Size >= 0 && s2Size >= 0) + assertEquals(sizeIfKnown, s1Size + s2Size); + } + assertContents(fromSplit, exp, isOrdered); + } + + private static > void testSplitSixDeep( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + S spliterator = supplier.get(); + boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); + + for (int depth=0; depth < 6; depth++) { + List dest = new ArrayList<>(); + spliterator = supplier.get(); + + assertSpliterator(spliterator); + + // verify splitting with forEach + visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), false); + assertContents(dest, exp, isOrdered); + + // verify splitting with tryAdvance + dest.clear(); + spliterator = supplier.get(); + visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), true); + assertContents(dest, exp, isOrdered); + } + } + + private static > void visit(int depth, int curLevel, + List dest, S spliterator, UnaryOperator> boxingAdapter, + int rootCharacteristics, boolean useTryAdvance) { + if (curLevel < depth) { + long beforeSize = spliterator.getExactSizeIfKnown(); + Spliterator split = spliterator.trySplit(); + if (split != null) { + assertSpliterator(split, rootCharacteristics); + assertSpliterator(spliterator, rootCharacteristics); + + if ((rootCharacteristics & Spliterator.SUBSIZED) != 0 && + (rootCharacteristics & Spliterator.SIZED) != 0) { + assertEquals(beforeSize, split.estimateSize() + spliterator.estimateSize()); + } + visit(depth, curLevel + 1, dest, split, boxingAdapter, rootCharacteristics, useTryAdvance); + } + visit(depth, curLevel + 1, dest, spliterator, boxingAdapter, rootCharacteristics, useTryAdvance); + } + else { + long sizeIfKnown = spliterator.getExactSizeIfKnown(); + if (useTryAdvance) { + Consumer addToDest = boxingAdapter.apply(dest::add); + int count = 0; + while (spliterator.tryAdvance(addToDest)) { + ++count; + } + + if (sizeIfKnown >= 0) + assertEquals(sizeIfKnown, count); + + // Assert that forEach now produces no elements + spliterator.forEachRemaining(boxingAdapter.apply(e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); + + Spliterator split = spliterator.trySplit(); + assertNull(split); + } + else { + List leafDest = new ArrayList<>(); + Consumer addToLeafDest = boxingAdapter.apply(leafDest::add); + spliterator.forEachRemaining(addToLeafDest); + + if (sizeIfKnown >= 0) + assertEquals(sizeIfKnown, leafDest.size()); + + // Assert that forEach now produces no elements + spliterator.tryAdvance(boxingAdapter.apply(e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); + + Spliterator split = spliterator.trySplit(); + assertNull(split); + + dest.addAll(leafDest); + } + } + } + + private static > void testSplitUntilNull( + Collection exp, + Supplier supplier, + UnaryOperator> boxingAdapter) { + Spliterator s = supplier.get(); + boolean isOrdered = s.hasCharacteristics(Spliterator.ORDERED); + assertSpliterator(s); + + List splits = new ArrayList<>(); + Consumer c = boxingAdapter.apply(splits::add); + + testSplitUntilNull(new SplitNode(c, s)); + assertContents(splits, exp, isOrdered); + } + + private static class SplitNode { + // Constant for every node + final Consumer c; + final int rootCharacteristics; + + final Spliterator s; + + SplitNode(Consumer c, Spliterator s) { + this(c, s.characteristics(), s); + } + + private SplitNode(Consumer c, int rootCharacteristics, Spliterator s) { + this.c = c; + this.rootCharacteristics = rootCharacteristics; + this.s = s; + } + + SplitNode fromSplit(Spliterator split) { + return new SplitNode<>(c, rootCharacteristics, split); + } + } + + /** + * Set the maximum stack capacity to 0.25MB. This should be more than enough to detect a bad spliterator + * while not unduly disrupting test infrastructure given the test data sizes that are used are small. + * Note that j.u.c.ForkJoinPool sets the max queue size to 64M (1 << 26). + */ + private static final int MAXIMUM_STACK_CAPACITY = 1 << 18; // 0.25MB + + private static void testSplitUntilNull(SplitNode e) { + // Use an explicit stack to avoid a StackOverflowException when testing a Spliterator + // that when repeatedly split produces a right-balanced (and maybe degenerate) tree, or + // for a spliterator that is badly behaved. + Deque> stack = new ArrayDeque<>(); + stack.push(e); + + int iteration = 0; + while (!stack.isEmpty()) { + assertTrue(iteration++ < MAXIMUM_STACK_CAPACITY, "Exceeded maximum stack modification count of 1 << 18"); + + e = stack.pop(); + Spliterator parentAndRightSplit = e.s; + + long parentEstimateSize = parentAndRightSplit.estimateSize(); + assertTrue(parentEstimateSize >= 0, + String.format("Split size estimate %d < 0", parentEstimateSize)); + + long parentSize = parentAndRightSplit.getExactSizeIfKnown(); + Spliterator leftSplit = parentAndRightSplit.trySplit(); + if (leftSplit == null) { + parentAndRightSplit.forEachRemaining(e.c); + continue; + } + + assertSpliterator(leftSplit, e.rootCharacteristics); + assertSpliterator(parentAndRightSplit, e.rootCharacteristics); + + if (parentEstimateSize != Long.MAX_VALUE && leftSplit.estimateSize() > 0 && parentAndRightSplit.estimateSize() > 0) { + assertTrue(leftSplit.estimateSize() < parentEstimateSize, + String.format("Left split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + assertTrue(parentAndRightSplit.estimateSize() < parentEstimateSize, + String.format("Right split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + } + else { + assertTrue(leftSplit.estimateSize() <= parentEstimateSize, + String.format("Left split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + assertTrue(parentAndRightSplit.estimateSize() <= parentEstimateSize, + String.format("Right split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize)); + } + + long leftSize = leftSplit.getExactSizeIfKnown(); + long rightSize = parentAndRightSplit.getExactSizeIfKnown(); + if (parentSize >= 0 && leftSize >= 0 && rightSize >= 0) + assertEquals(parentSize, leftSize + rightSize, + String.format("exact left split size %d + exact right split size %d != parent exact split size %d", + leftSize, rightSize, parentSize)); + + // Add right side to stack first so left side is popped off first + stack.push(e.fromSplit(parentAndRightSplit)); + stack.push(e.fromSplit(leftSplit)); + } + } + + private static void assertSpliterator(Spliterator s, int rootCharacteristics) { + if ((rootCharacteristics & Spliterator.SUBSIZED) != 0) { + assertTrue(s.hasCharacteristics(Spliterator.SUBSIZED), + "Child split is not SUBSIZED when root split is SUBSIZED"); + } + assertSpliterator(s); + } + + private static void assertSpliterator(Spliterator s) { + if (s.hasCharacteristics(Spliterator.SUBSIZED)) { + assertTrue(s.hasCharacteristics(Spliterator.SIZED)); + } + if (s.hasCharacteristics(Spliterator.SIZED)) { + assertTrue(s.estimateSize() != Long.MAX_VALUE); + assertTrue(s.getExactSizeIfKnown() >= 0); + } + try { + s.getComparator(); + assertTrue(s.hasCharacteristics(Spliterator.SORTED)); + } catch (IllegalStateException e) { + assertFalse(s.hasCharacteristics(Spliterator.SORTED)); + } + } + + private static void assertContents(Collection actual, Collection expected, boolean isOrdered) { + if (isOrdered) { + assertEquals(actual, expected); + } + else { + assertContentsUnordered(actual, expected); + } + } + + private static void assertContentsUnordered(Iterable actual, Iterable expected) { + assertEquals(toBoxedMultiset(actual), toBoxedMultiset(expected)); + } + + private static Map toBoxedMultiset(Iterable c) { + Map result = new HashMap<>(); + c.forEach((Consumer) e -> { + if (result.containsKey((T)e)) { + result.put((T)e, new HashableInteger(((HashableInteger)result.get(e)).value + 1, 10)); + } else { + result.put((T)e, new HashableInteger(1, 10)); + } + }); + return result; + } + + private void executeAndCatch(Class expected, Runnable r) { + Exception caught = null; + try { + r.run(); + } + catch (Exception e) { + caught = e; + } + + assertNotNull(caught, + String.format("No Exception was thrown, expected an Exception of %s to be thrown", + expected.getName())); + assertTrue(expected.isInstance(caught), + String.format("Exception thrown %s not an instance of %s", + caught.getClass().getName(), expected.getName())); + } + +} diff --git a/jdk/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java b/jdk/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java index cb5ffa90ed3..5990dde97c5 100644 --- a/jdk/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java +++ b/jdk/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java @@ -128,6 +128,10 @@ public class SpliteratorTraversingAndSplittingTest { void addMap(Function, ? extends Map> m) { String description = "new " + m.apply(Collections.emptyMap()).getClass().getName(); + addMap(m, description); + } + + void addMap(Function, ? extends Map> m, String description) { add(description + ".keySet().spliterator()", () -> m.apply(mExp).keySet().spliterator()); add(description + ".values().spliterator()", () -> m.apply(mExp).values().spliterator()); add(description + ".entrySet().spliterator()", mExp.entrySet(), () -> m.apply(mExp).entrySet().spliterator()); @@ -399,12 +403,36 @@ public class SpliteratorTraversingAndSplittingTest { db.addMap(HashMap::new); + db.addMap(m -> { + // Create a Map ensuring that for large sizes + // buckets will contain 2 or more entries + HashMap cm = new HashMap<>(1, m.size() + 1); + // Don't use putAll which inflates the table by + // m.size() * loadFactor, thus creating a very sparse + // map for 1000 entries defeating the purpose of this test, + // in addition it will cause the split until null test to fail + // because the number of valid splits is larger than the + // threshold + for (Map.Entry e : m.entrySet()) + cm.put(e.getKey(), e.getValue()); + return cm; + }, "new java.util.HashMap(1, size + 1)"); + db.addMap(LinkedHashMap::new); db.addMap(IdentityHashMap::new); db.addMap(WeakHashMap::new); + db.addMap(m -> { + // Create a Map ensuring that for large sizes + // buckets will be consist of 2 or more entries + WeakHashMap cm = new WeakHashMap<>(1, m.size() + 1); + for (Map.Entry e : m.entrySet()) + cm.put(e.getKey(), e.getValue()); + return cm; + }, "new java.util.WeakHashMap(1, size + 1)"); + // @@@ Descending maps etc db.addMap(TreeMap::new); diff --git a/jdk/test/java/util/jar/TestExtra.java b/jdk/test/java/util/jar/TestExtra.java index b10629a1bc9..876149b9623 100644 --- a/jdk/test/java/util/jar/TestExtra.java +++ b/jdk/test/java/util/jar/TestExtra.java @@ -23,7 +23,7 @@ /** * @test - * @bug 6480504 + * @bug 6480504 6303183 * @summary Test that client-provided data in the extra field is written and * read correctly, taking into account the JAR_MAGIC written into the extra * field of the first entry of JAR files. @@ -117,8 +117,7 @@ public class TestExtra { ZipInputStream zis = getInputStream(); ze = zis.getNextEntry(); - byte[] e = ze.getExtra(); - check(e.length == 8, "expected extra length is 8, got " + e.length); + checkExtra(data, ze.getExtra()); checkEntry(ze, 0, 0); } @@ -140,10 +139,43 @@ public class TestExtra { ZipInputStream zis = getInputStream(); ze = zis.getNextEntry(); byte[] e = ze.getExtra(); - check(e.length == 8, "expected extra length is 8, got " + e.length); + checkExtra(data, ze.getExtra()); checkEntry(ze, 0, 0); } + // check if all "expected" extra fields equal to their + // corresponding fields in "extra". The "extra" might have + // timestamp fields added by ZOS. + static void checkExtra(byte[] expected, byte[] extra) { + if (expected == null) + return; + int off = 0; + int len = expected.length; + while (off + 4 < len) { + int tag = get16(expected, off); + int sz = get16(expected, off + 2); + int off0 = 0; + int len0 = extra.length; + boolean matched = false; + while (off0 + 4 < len0) { + int tag0 = get16(extra, off0); + int sz0 = get16(extra, off0 + 2); + if (tag == tag0 && sz == sz0) { + matched = true; + for (int i = 0; i < sz; i++) { + if (expected[off + i] != extra[off0 +i]) + matched = false; + } + break; + } + off0 += (4 + sz0); + } + if (!matched) { + fail("Expected extra data [tag=" + tag + "sz=" + sz + "] check failed"); + } + off += (4 + sz); + } + } /** Check that the entry's extra data is correct. */ void checkEntry(ZipEntry ze, int count, int dataLength) { diff --git a/jdk/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorLateBindingFailFastTest.java b/jdk/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorLateBindingFailFastTest.java deleted file mode 100644 index af7ddbf066b..00000000000 --- a/jdk/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorLateBindingFailFastTest.java +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package org.openjdk.tests.java.util.stream; - -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.ConcurrentModificationException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.Spliterator; -import java.util.Stack; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.Vector; -import java.util.WeakHashMap; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.Supplier; - -import static org.testng.Assert.*; - -/** - * @test - * @summary Spliterator last-binding and fail-fast tests - * @run testng SpliteratorLateBindingFailFastTest - */ - -@Test(groups = { "serialization-hostile" }) -public class SpliteratorLateBindingFailFastTest { - - private interface Source { - Collection asCollection(); - void update(); - } - - private static class SpliteratorDataBuilder { - final List data; - - final T newValue; - - final List exp; - - final Map mExp; - - SpliteratorDataBuilder(List data, T newValue, List exp) { - this.data = data; - this.newValue = newValue; - this.exp = exp; - this.mExp = createMap(exp); - } - - Map createMap(List l) { - Map m = new LinkedHashMap<>(); - for (T t : l) { - m.put(t, t); - } - return m; - } - - void add(String description, Supplier> s) { - description = joiner(description).toString(); - data.add(new Object[]{description, s}); - } - - void addCollection(Function, ? extends Collection> f) { - class CollectionSource implements Source { - final Collection c = f.apply(exp); - - final Consumer> updater; - - CollectionSource(Consumer> updater) { - this.updater = updater; - } - - @Override - public Collection asCollection() { - return c; - } - - @Override - public void update() { - updater.accept(c); - } - } - - String description = "new " + f.apply(Collections.emptyList()).getClass().getName() + ".spliterator() "; - add(description + "ADD", () -> new CollectionSource(c -> c.add(newValue))); - add(description + "REMOVE", () -> new CollectionSource(c -> c.remove(c.iterator().next()))); - } - - void addList(Function, ? extends List> l) { - // @@@ If collection is instance of List then add sub-list tests - addCollection(l); - } - - void addMap(Function, ? extends Map> mapConstructor) { - class MapSource implements Source { - final Map m = mapConstructor.apply(mExp); - - final Collection c; - - final Consumer> updater; - - MapSource(Function, Collection> f, Consumer> updater) { - this.c = f.apply(m); - this.updater = updater; - } - - @Override - public Collection asCollection() { - return c; - } - - @Override - public void update() { - updater.accept(m); - } - } - - Map>> actions = new HashMap<>(); - actions.put("ADD", m -> m.put(newValue, newValue)); - actions.put("REMOVE", m -> m.remove(m.keySet().iterator().next())); - - String description = "new " + mapConstructor.apply(Collections.emptyMap()).getClass().getName(); - for (Map.Entry>> e : actions.entrySet()) { - add(description + ".keySet().spliterator() " + e.getKey(), - () -> new MapSource(m -> m.keySet(), e.getValue())); - add(description + ".values().spliterator() " + e.getKey(), - () -> new MapSource(m -> m.values(), e.getValue())); - add(description + ".entrySet().spliterator() " + e.getKey(), - () -> new MapSource>(m -> m.entrySet(), e.getValue())); - } - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorDataProvider; - - @DataProvider(name = "Source") - public static Object[][] spliteratorDataProvider() { - if (spliteratorDataProvider != null) { - return spliteratorDataProvider; - } - - List data = new ArrayList<>(); - SpliteratorDataBuilder db = new SpliteratorDataBuilder<>(data, 5, Arrays.asList(1, 2, 3, 4)); - - // Collections - - db.addList(ArrayList::new); - - db.addList(LinkedList::new); - - db.addList(Vector::new); - - - db.addCollection(HashSet::new); - - db.addCollection(LinkedHashSet::new); - - db.addCollection(TreeSet::new); - - - db.addCollection(c -> { Stack s = new Stack<>(); s.addAll(c); return s;}); - - db.addCollection(PriorityQueue::new); - - // ArrayDeque fails some tests since it's fail-fast support is weaker - // than other collections and limited to detecting most, but not all, - // removals. It probably requires it's own test since it is difficult - // to abstract out the conditions under which it fails-fast. -// db.addCollection(ArrayDeque::new); - - // Maps - - db.addMap(HashMap::new); - - db.addMap(LinkedHashMap::new); - - // This fails when run through jrteg but passes when run though - // ant -// db.addMap(IdentityHashMap::new); - - db.addMap(WeakHashMap::new); - - // @@@ Descending maps etc - db.addMap(TreeMap::new); - - return spliteratorDataProvider = data.toArray(new Object[0][]); - } - - @Test(dataProvider = "Source") - public void lateBindingTestWithForEach(String description, Supplier> ss) { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - - source.update(); - - Set r = new HashSet<>(); - s.forEachRemaining(r::add); - - assertEquals(r, new HashSet<>(c)); - } - - @Test(dataProvider = "Source") - public void lateBindingTestWithTryAdvance(String description, Supplier> ss) { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - - source.update(); - - Set r = new HashSet<>(); - while (s.tryAdvance(r::add)) { } - - assertEquals(r, new HashSet<>(c)); - } - - @Test(dataProvider = "Source") - public void lateBindingTestWithCharacteritics(String description, Supplier> ss) { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - s.characteristics(); - - Set r = new HashSet<>(); - s.forEachRemaining(r::add); - - assertEquals(r, new HashSet<>(c)); - } - - - @Test(dataProvider = "Source") - public void testFailFastTestWithTryAdvance(String description, Supplier> ss) { - { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - - s.tryAdvance(e -> { - }); - source.update(); - - executeAndCatch(() -> s.tryAdvance(e -> { })); - } - - { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - - s.tryAdvance(e -> { - }); - source.update(); - - executeAndCatch(() -> s.forEachRemaining(e -> { - })); - } - } - - @Test(dataProvider = "Source") - public void testFailFastTestWithForEach(String description, Supplier> ss) { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - - executeAndCatch(() -> s.forEachRemaining(e -> { - source.update(); - })); - } - - @Test(dataProvider = "Source") - public void testFailFastTestWithEstimateSize(String description, Supplier> ss) { - { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - - s.estimateSize(); - source.update(); - - executeAndCatch(() -> s.tryAdvance(e -> { })); - } - - { - Source source = ss.get(); - Collection c = source.asCollection(); - Spliterator s = c.spliterator(); - - s.estimateSize(); - source.update(); - - executeAndCatch(() -> s.forEachRemaining(e -> { - })); - } - } - - private void executeAndCatch(Runnable r) { - executeAndCatch(ConcurrentModificationException.class, r); - } - - private void executeAndCatch(Class expected, Runnable r) { - Exception caught = null; - try { - r.run(); - } - catch (Exception e) { - caught = e; - } - - assertNotNull(caught, - String.format("No Exception was thrown, expected an Exception of %s to be thrown", - expected.getName())); - assertTrue(expected.isInstance(caught), - String.format("Exception thrown %s not an instance of %s", - caught.getClass().getName(), expected.getName())); - } - -} diff --git a/jdk/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorTraversingAndSplittingTest.java b/jdk/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorTraversingAndSplittingTest.java deleted file mode 100644 index de9d51cf0d9..00000000000 --- a/jdk/test/java/util/stream/test/org/openjdk/tests/java/util/stream/SpliteratorTraversingAndSplittingTest.java +++ /dev/null @@ -1,1411 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package org.openjdk.tests.java.util.stream; - -/** - * @test - * @summary Spliterator traversing and splitting tests - * @run testng SpliteratorTraversingAndSplittingTest - */ - -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; - -import java.util.AbstractCollection; -import java.util.AbstractList; -import java.util.AbstractSet; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; -import java.util.IdentityHashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.PriorityQueue; -import java.util.Set; -import java.util.SortedSet; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.Stack; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.Vector; -import java.util.WeakHashMap; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.LinkedTransferQueue; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.function.Consumer; -import java.util.function.DoubleConsumer; -import java.util.function.Function; -import java.util.function.IntConsumer; -import java.util.function.LongConsumer; -import java.util.function.Supplier; -import java.util.function.UnaryOperator; - -import static org.testng.Assert.*; -import static org.testng.Assert.assertEquals; - -@Test(groups = { "serialization-hostile" }) -public class SpliteratorTraversingAndSplittingTest { - - private static List SIZES = Arrays.asList(0, 1, 10, 100, 1000); - - private static class SpliteratorDataBuilder { - List data; - - List exp; - - Map mExp; - - SpliteratorDataBuilder(List data, List exp) { - this.data = data; - this.exp = exp; - this.mExp = createMap(exp); - } - - Map createMap(List l) { - Map m = new LinkedHashMap<>(); - for (T t : l) { - m.put(t, t); - } - return m; - } - - void add(String description, Collection expected, Supplier> s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier> s) { - add(description, exp, s); - } - - void addCollection(Function, ? extends Collection> c) { - add("new " + c.apply(Collections.emptyList()).getClass().getName() + ".spliterator()", - () -> c.apply(exp).spliterator()); - } - - void addList(Function, ? extends List> l) { - // @@@ If collection is instance of List then add sub-list tests - addCollection(l); - } - - void addMap(Function, ? extends Map> m) { - String description = "new " + m.apply(Collections.emptyMap()).getClass().getName(); - add(description + ".keySet().spliterator()", () -> m.apply(mExp).keySet().spliterator()); - add(description + ".values().spliterator()", () -> m.apply(mExp).values().spliterator()); - add(description + ".entrySet().spliterator()", mExp.entrySet(), () -> m.apply(mExp).entrySet().spliterator()); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorDataProvider; - - @DataProvider(name = "Spliterator") - public static Object[][] spliteratorDataProvider() { - if (spliteratorDataProvider != null) { - return spliteratorDataProvider; - } - - List data = new ArrayList<>(); - for (int size : SIZES) { - List exp = listIntRange(size); - SpliteratorDataBuilder db = new SpliteratorDataBuilder<>(data, exp); - - // Direct spliterator methods - - db.add("Spliterators.spliterator(Collection, ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Spliterators.spliterator(Iterator, ...)", - () -> Spliterators.spliterator(exp.iterator(), exp.size(), 0)); - - db.add("Spliterators.spliteratorUnknownSize(Iterator, ...)", - () -> Spliterators.spliteratorUnknownSize(exp.iterator(), 0)); - - db.add("Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Spliterator ), ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(exp.spliterator()), exp.size(), 0)); - - db.add("Spliterators.spliterator(T[], ...)", - () -> Spliterators.spliterator(exp.toArray(new Integer[0]), 0)); - - db.add("Arrays.spliterator(T[], ...)", - () -> Arrays.spliterator(exp.toArray(new Integer[0]))); - - class SpliteratorFromIterator extends Spliterators.AbstractSpliterator { - Iterator it; - - SpliteratorFromIterator(Iterator it, long est) { - super(est, Spliterator.SIZED); - this.it = it; - } - - @Override - public boolean tryAdvance(Consumer action) { - if (action == null) - throw new NullPointerException(); - if (it.hasNext()) { - action.accept(it.next()); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractSpliterator()", - () -> new SpliteratorFromIterator(exp.iterator(), exp.size())); - - // Collections - - // default method implementations - - class AbstractCollectionImpl extends AbstractCollection { - Collection c; - - AbstractCollectionImpl(Collection c) { - this.c = c; - } - - @Override - public Iterator iterator() { - return c.iterator(); - } - - @Override - public int size() { - return c.size(); - } - } - db.addCollection( - c -> new AbstractCollectionImpl(c)); - - class AbstractListImpl extends AbstractList { - List l; - - AbstractListImpl(Collection c) { - this.l = new ArrayList<>(c); - } - - @Override - public Integer get(int index) { - return l.get(index); - } - - @Override - public int size() { - return l.size(); - } - } - db.addCollection( - c -> new AbstractListImpl(c)); - - class AbstractSetImpl extends AbstractSet { - Set s; - - AbstractSetImpl(Collection c) { - this.s = new HashSet<>(c); - } - - @Override - public Iterator iterator() { - return s.iterator(); - } - - @Override - public int size() { - return s.size(); - } - } - db.addCollection( - c -> new AbstractSetImpl(c)); - - class AbstractSortedSetImpl extends AbstractSet implements SortedSet { - SortedSet s; - - AbstractSortedSetImpl(Collection c) { - this.s = new TreeSet<>(c); - } - - @Override - public Iterator iterator() { - return s.iterator(); - } - - @Override - public int size() { - return s.size(); - } - - @Override - public Comparator comparator() { - return s.comparator(); - } - - @Override - public SortedSet subSet(Integer fromElement, Integer toElement) { - return s.subSet(fromElement, toElement); - } - - @Override - public SortedSet headSet(Integer toElement) { - return s.headSet(toElement); - } - - @Override - public SortedSet tailSet(Integer fromElement) { - return s.tailSet(fromElement); - } - - @Override - public Integer first() { - return s.first(); - } - - @Override - public Integer last() { - return s.last(); - } - - @Override - public Spliterator spliterator() { - return SortedSet.super.spliterator(); - } - } - db.addCollection( - c -> new AbstractSortedSetImpl(c)); - - // - - db.add("Arrays.asList().spliterator()", - () -> Spliterators.spliterator(Arrays.asList(exp.toArray(new Integer[0])), 0)); - - db.addList(ArrayList::new); - - db.addList(LinkedList::new); - - db.addList(Vector::new); - - - db.addCollection(HashSet::new); - - db.addCollection(LinkedHashSet::new); - - db.addCollection(TreeSet::new); - - - db.addCollection(c -> { Stack s = new Stack<>(); s.addAll(c); return s;}); - - db.addCollection(PriorityQueue::new); - - db.addCollection(ArrayDeque::new); - - - db.addCollection(ConcurrentSkipListSet::new); - - if (size > 0) { - db.addCollection(c -> { - ArrayBlockingQueue abq = new ArrayBlockingQueue<>(size); - abq.addAll(c); - return abq; - }); - } - - db.addCollection(PriorityBlockingQueue::new); - - db.addCollection(LinkedBlockingQueue::new); - - db.addCollection(LinkedTransferQueue::new); - - db.addCollection(ConcurrentLinkedQueue::new); - - db.addCollection(LinkedBlockingDeque::new); - - db.addCollection(CopyOnWriteArrayList::new); - - db.addCollection(CopyOnWriteArraySet::new); - - if (size == 1) { - db.addCollection(c -> Collections.singleton(exp.get(0))); - db.addCollection(c -> Collections.singletonList(exp.get(0))); - } - - // Collections.synchronized/unmodifiable/checked wrappers - db.addCollection(Collections::unmodifiableCollection); - db.addCollection(c -> Collections.unmodifiableSet(new HashSet<>(c))); - db.addCollection(c -> Collections.unmodifiableSortedSet(new TreeSet<>(c))); - db.addList(c -> Collections.unmodifiableList(new ArrayList<>(c))); - db.addMap(Collections::unmodifiableMap); - db.addMap(m -> Collections.unmodifiableSortedMap(new TreeMap<>(m))); - - db.addCollection(Collections::synchronizedCollection); - db.addCollection(c -> Collections.synchronizedSet(new HashSet<>(c))); - db.addCollection(c -> Collections.synchronizedSortedSet(new TreeSet<>(c))); - db.addList(c -> Collections.synchronizedList(new ArrayList<>(c))); - db.addMap(Collections::synchronizedMap); - db.addMap(m -> Collections.synchronizedSortedMap(new TreeMap<>(m))); - - db.addCollection(c -> Collections.checkedCollection(c, Integer.class)); - db.addCollection(c -> Collections.checkedQueue(new ArrayDeque<>(c), Integer.class)); - db.addCollection(c -> Collections.checkedSet(new HashSet<>(c), Integer.class)); - db.addCollection(c -> Collections.checkedSortedSet(new TreeSet<>(c), Integer.class)); - db.addList(c -> Collections.checkedList(new ArrayList<>(c), Integer.class)); - db.addMap(c -> Collections.checkedMap(c, Integer.class, Integer.class)); - db.addMap(m -> Collections.checkedSortedMap(new TreeMap<>(m), Integer.class, Integer.class)); - - // Maps - - db.addMap(HashMap::new); - - db.addMap(LinkedHashMap::new); - - db.addMap(IdentityHashMap::new); - - db.addMap(WeakHashMap::new); - - // @@@ Descending maps etc - db.addMap(TreeMap::new); - - db.addMap(ConcurrentHashMap::new); - - db.addMap(ConcurrentSkipListMap::new); - } - - return spliteratorDataProvider = data.toArray(new Object[0][]); - } - - private static List listIntRange(int upTo) { - List exp = new ArrayList<>(); - for (int i = 0; i < upTo; i++) - exp.add(i); - return Collections.unmodifiableList(exp); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testNullPointerException(String description, Collection exp, Supplier s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining(null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance(null)); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testForEach(String description, Collection exp, Supplier s) { - testForEach(exp, s, (Consumer b) -> b); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testTryAdvance(String description, Collection exp, Supplier s) { - testTryAdvance(exp, s, (Consumer b) -> b); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { - testMixedTryAdvanceForEach(exp, s, (Consumer b) -> b); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testMixedTraverseAndSplit(String description, Collection exp, Supplier s) { - testMixedTraverseAndSplit(exp, s, (Consumer b) -> b); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitAfterFullTraversal(String description, Collection exp, Supplier s) { - testSplitAfterFullTraversal(s, (Consumer b) -> b); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitOnce(String description, Collection exp, Supplier s) { - testSplitOnce(exp, s, (Consumer b) -> b); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitSixDeep(String description, Collection exp, Supplier s) { - testSplitSixDeep(exp, s, (Consumer b) -> b); - } - - @Test(dataProvider = "Spliterator") - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testSplitUntilNull(String description, Collection exp, Supplier s) { - testSplitUntilNull(exp, s, (Consumer b) -> b); - } - - // - - private static class SpliteratorOfIntDataBuilder { - List data; - - List exp; - - SpliteratorOfIntDataBuilder(List data, List exp) { - this.data = data; - this.exp = exp; - } - - void add(String description, List expected, Supplier s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier s) { - add(description, exp, s); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorOfIntDataProvider; - - @DataProvider(name = "Spliterator.OfInt") - public static Object[][] spliteratorOfIntDataProvider() { - if (spliteratorOfIntDataProvider != null) { - return spliteratorOfIntDataProvider; - } - - List data = new ArrayList<>(); - for (int size : SIZES) { - int exp[] = arrayIntRange(size); - SpliteratorOfIntDataBuilder db = new SpliteratorOfIntDataBuilder(data, listIntRange(size)); - - db.add("Spliterators.spliterator(int[], ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Arrays.spliterator(int[], ...)", - () -> Arrays.spliterator(exp)); - - db.add("Spliterators.spliterator(PrimitiveIterator.OfInt, ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); - - db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfInt, ...)", - () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); - - class IntSpliteratorFromArray extends Spliterators.AbstractIntSpliterator { - int[] a; - int index = 0; - - IntSpliteratorFromArray(int[] a) { - super(a.length, Spliterator.SIZED); - this.a = a; - } - - @Override - public boolean tryAdvance(IntConsumer action) { - if (action == null) - throw new NullPointerException(); - if (index < a.length) { - action.accept(a[index++]); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractIntAdvancingSpliterator()", - () -> new IntSpliteratorFromArray(exp)); - } - - return spliteratorOfIntDataProvider = data.toArray(new Object[0][]); - } - - private static int[] arrayIntRange(int upTo) { - int[] exp = new int[upTo]; - for (int i = 0; i < upTo; i++) - exp[i] = i; - return exp; - } - - private static UnaryOperator> intBoxingConsumer() { - class BoxingAdapter implements Consumer, IntConsumer { - private final Consumer b; - - BoxingAdapter(Consumer b) { - this.b = b; - } - - @Override - public void accept(Integer value) { - throw new IllegalStateException(); - } - - @Override - public void accept(int value) { - b.accept(value); - } - } - - return b -> new BoxingAdapter(b); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntNullPointerException(String description, Collection exp, Supplier s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((IntConsumer) null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((IntConsumer) null)); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntForEach(String description, Collection exp, Supplier s) { - testForEach(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntTryAdvance(String description, Collection exp, Supplier s) { - testTryAdvance(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { - testMixedTryAdvanceForEach(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntMixedTraverseAndSplit(String description, Collection exp, Supplier s) { - testMixedTraverseAndSplit(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitAfterFullTraversal(String description, Collection exp, Supplier s) { - testSplitAfterFullTraversal(s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitOnce(String description, Collection exp, Supplier s) { - testSplitOnce(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitSixDeep(String description, Collection exp, Supplier s) { - testSplitSixDeep(exp, s, intBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfInt") - public void testIntSplitUntilNull(String description, Collection exp, Supplier s) { - testSplitUntilNull(exp, s, intBoxingConsumer()); - } - - // - - private static class SpliteratorOfLongDataBuilder { - List data; - - List exp; - - SpliteratorOfLongDataBuilder(List data, List exp) { - this.data = data; - this.exp = exp; - } - - void add(String description, List expected, Supplier s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier s) { - add(description, exp, s); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorOfLongDataProvider; - - @DataProvider(name = "Spliterator.OfLong") - public static Object[][] spliteratorOfLongDataProvider() { - if (spliteratorOfLongDataProvider != null) { - return spliteratorOfLongDataProvider; - } - - List data = new ArrayList<>(); - for (int size : SIZES) { - long exp[] = arrayLongRange(size); - SpliteratorOfLongDataBuilder db = new SpliteratorOfLongDataBuilder(data, listLongRange(size)); - - db.add("Spliterators.spliterator(long[], ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Arrays.spliterator(long[], ...)", - () -> Arrays.spliterator(exp)); - - db.add("Spliterators.spliterator(PrimitiveIterator.OfLong, ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); - - db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfLong, ...)", - () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); - - class LongSpliteratorFromArray extends Spliterators.AbstractLongSpliterator { - long[] a; - int index = 0; - - LongSpliteratorFromArray(long[] a) { - super(a.length, Spliterator.SIZED); - this.a = a; - } - - @Override - public boolean tryAdvance(LongConsumer action) { - if (action == null) - throw new NullPointerException(); - if (index < a.length) { - action.accept(a[index++]); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractLongAdvancingSpliterator()", - () -> new LongSpliteratorFromArray(exp)); - } - - return spliteratorOfLongDataProvider = data.toArray(new Object[0][]); - } - - private static List listLongRange(int upTo) { - List exp = new ArrayList<>(); - for (long i = 0; i < upTo; i++) - exp.add(i); - return Collections.unmodifiableList(exp); - } - - private static long[] arrayLongRange(int upTo) { - long[] exp = new long[upTo]; - for (int i = 0; i < upTo; i++) - exp[i] = i; - return exp; - } - - private static UnaryOperator> longBoxingConsumer() { - class BoxingAdapter implements Consumer, LongConsumer { - private final Consumer b; - - BoxingAdapter(Consumer b) { - this.b = b; - } - - @Override - public void accept(Long value) { - throw new IllegalStateException(); - } - - @Override - public void accept(long value) { - b.accept(value); - } - } - - return b -> new BoxingAdapter(b); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongNullPointerException(String description, Collection exp, Supplier s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((LongConsumer) null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((LongConsumer) null)); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongForEach(String description, Collection exp, Supplier s) { - testForEach(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongTryAdvance(String description, Collection exp, Supplier s) { - testTryAdvance(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { - testMixedTryAdvanceForEach(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongMixedTraverseAndSplit(String description, Collection exp, Supplier s) { - testMixedTraverseAndSplit(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitAfterFullTraversal(String description, Collection exp, Supplier s) { - testSplitAfterFullTraversal(s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitOnce(String description, Collection exp, Supplier s) { - testSplitOnce(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitSixDeep(String description, Collection exp, Supplier s) { - testSplitSixDeep(exp, s, longBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfLong") - public void testLongSplitUntilNull(String description, Collection exp, Supplier s) { - testSplitUntilNull(exp, s, longBoxingConsumer()); - } - - // - - private static class SpliteratorOfDoubleDataBuilder { - List data; - - List exp; - - SpliteratorOfDoubleDataBuilder(List data, List exp) { - this.data = data; - this.exp = exp; - } - - void add(String description, List expected, Supplier s) { - description = joiner(description).toString(); - data.add(new Object[]{description, expected, s}); - } - - void add(String description, Supplier s) { - add(description, exp, s); - } - - StringBuilder joiner(String description) { - return new StringBuilder(description). - append(" {"). - append("size=").append(exp.size()). - append("}"); - } - } - - static Object[][] spliteratorOfDoubleDataProvider; - - @DataProvider(name = "Spliterator.OfDouble") - public static Object[][] spliteratorOfDoubleDataProvider() { - if (spliteratorOfDoubleDataProvider != null) { - return spliteratorOfDoubleDataProvider; - } - - List data = new ArrayList<>(); - for (int size : SIZES) { - double exp[] = arrayDoubleRange(size); - SpliteratorOfDoubleDataBuilder db = new SpliteratorOfDoubleDataBuilder(data, listDoubleRange(size)); - - db.add("Spliterators.spliterator(double[], ...)", - () -> Spliterators.spliterator(exp, 0)); - - db.add("Arrays.spliterator(double[], ...)", - () -> Arrays.spliterator(exp)); - - db.add("Spliterators.spliterator(PrimitiveIterator.OfDouble, ...)", - () -> Spliterators.spliterator(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), exp.length, 0)); - - db.add("Spliterators.spliteratorUnknownSize(PrimitiveIterator.OfDouble, ...)", - () -> Spliterators.spliteratorUnknownSize(Spliterators.iteratorFromSpliterator(Arrays.spliterator(exp)), 0)); - - class DoubleSpliteratorFromArray extends Spliterators.AbstractDoubleSpliterator { - double[] a; - int index = 0; - - DoubleSpliteratorFromArray(double[] a) { - super(a.length, Spliterator.SIZED); - this.a = a; - } - - @Override - public boolean tryAdvance(DoubleConsumer action) { - if (action == null) - throw new NullPointerException(); - if (index < a.length) { - action.accept(a[index++]); - return true; - } - else { - return false; - } - } - } - db.add("new Spliterators.AbstractDoubleAdvancingSpliterator()", - () -> new DoubleSpliteratorFromArray(exp)); - } - - return spliteratorOfDoubleDataProvider = data.toArray(new Object[0][]); - } - - private static List listDoubleRange(int upTo) { - List exp = new ArrayList<>(); - for (double i = 0; i < upTo; i++) - exp.add(i); - return Collections.unmodifiableList(exp); - } - - private static double[] arrayDoubleRange(int upTo) { - double[] exp = new double[upTo]; - for (int i = 0; i < upTo; i++) - exp[i] = i; - return exp; - } - - private static UnaryOperator> doubleBoxingConsumer() { - class BoxingAdapter implements Consumer, DoubleConsumer { - private final Consumer b; - - BoxingAdapter(Consumer b) { - this.b = b; - } - - @Override - public void accept(Double value) { - throw new IllegalStateException(); - } - - @Override - public void accept(double value) { - b.accept(value); - } - } - - return b -> new BoxingAdapter(b); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleNullPointerException(String description, Collection exp, Supplier s) { - executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((DoubleConsumer) null)); - executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((DoubleConsumer) null)); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleForEach(String description, Collection exp, Supplier s) { - testForEach(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleTryAdvance(String description, Collection exp, Supplier s) { - testTryAdvance(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleMixedTryAdvanceForEach(String description, Collection exp, Supplier s) { - testMixedTryAdvanceForEach(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleMixedTraverseAndSplit(String description, Collection exp, Supplier s) { - testMixedTraverseAndSplit(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitAfterFullTraversal(String description, Collection exp, Supplier s) { - testSplitAfterFullTraversal(s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitOnce(String description, Collection exp, Supplier s) { - testSplitOnce(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitSixDeep(String description, Collection exp, Supplier s) { - testSplitSixDeep(exp, s, doubleBoxingConsumer()); - } - - @Test(dataProvider = "Spliterator.OfDouble") - public void testDoubleSplitUntilNull(String description, Collection exp, Supplier s) { - testSplitUntilNull(exp, s, doubleBoxingConsumer()); - } - - // - - private static > void testForEach( - Collection exp, - Supplier supplier, - UnaryOperator> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - ArrayList fromForEach = new ArrayList<>(); - spliterator = supplier.get(); - Consumer addToFromForEach = boxingAdapter.apply(fromForEach::add); - spliterator.forEachRemaining(addToFromForEach); - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - // Assert that tryAdvance now produce no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - // assert that size, tryAdvance, and forEach are consistent - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, exp.size()); - } - assertEquals(fromForEach.size(), exp.size()); - - assertContents(fromForEach, exp, isOrdered); - } - - private static > void testTryAdvance( - Collection exp, - Supplier supplier, - UnaryOperator> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - spliterator = supplier.get(); - ArrayList fromTryAdvance = new ArrayList<>(); - Consumer addToFromTryAdvance = boxingAdapter.apply(fromTryAdvance::add); - while (spliterator.tryAdvance(addToFromTryAdvance)) { } - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - // Assert that tryAdvance now produce no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - // assert that size, tryAdvance, and forEach are consistent - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, exp.size()); - } - assertEquals(fromTryAdvance.size(), exp.size()); - - assertContents(fromTryAdvance, exp, isOrdered); - } - - private static > void testMixedTryAdvanceForEach( - Collection exp, - Supplier supplier, - UnaryOperator> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - // tryAdvance first few elements, then forEach rest - ArrayList dest = new ArrayList<>(); - spliterator = supplier.get(); - Consumer addToDest = boxingAdapter.apply(dest::add); - for (int i = 0; i < 10 && spliterator.tryAdvance(addToDest); i++) { } - spliterator.forEachRemaining(addToDest); - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - // Assert that tryAdvance now produce no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, dest.size()); - } - assertEquals(dest.size(), exp.size()); - - if (isOrdered) { - assertEquals(dest, exp); - } - else { - assertContentsUnordered(dest, exp); - } - } - - private static > void testMixedTraverseAndSplit( - Collection exp, - Supplier supplier, - UnaryOperator> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - ArrayList dest = new ArrayList<>(); - spliterator = supplier.get(); - Consumer b = boxingAdapter.apply(dest::add); - - Spliterator spl1, spl2, spl3; - spliterator.tryAdvance(b); - spl2 = spliterator.trySplit(); - if (spl2 != null) { - spl2.tryAdvance(b); - spl1 = spl2.trySplit(); - if (spl1 != null) { - spl1.tryAdvance(b); - spl1.forEachRemaining(b); - } - spl2.tryAdvance(b); - spl2.forEachRemaining(b); - } - spliterator.tryAdvance(b); - spl3 = spliterator.trySplit(); - if (spl3 != null) { - spl3.tryAdvance(b); - spl3.forEachRemaining(b); - } - spliterator.tryAdvance(b); - spliterator.forEachRemaining(b); - - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, dest.size()); - } - assertEquals(dest.size(), exp.size()); - - if (isOrdered) { - assertEquals(dest, exp); - } - else { - assertContentsUnordered(dest, exp); - } - } - - private static > void testSplitAfterFullTraversal( - Supplier supplier, - UnaryOperator> boxingAdapter) { - // Full traversal using tryAdvance - Spliterator spliterator = supplier.get(); - while (spliterator.tryAdvance(boxingAdapter.apply(e -> { }))) { } - Spliterator split = spliterator.trySplit(); - assertNull(split); - - // Full traversal using forEach - spliterator = supplier.get(); - spliterator.forEachRemaining(boxingAdapter.apply(e -> { - })); - split = spliterator.trySplit(); - assertNull(split); - - // Full traversal using tryAdvance then forEach - spliterator = supplier.get(); - spliterator.tryAdvance(boxingAdapter.apply(e -> { })); - spliterator.forEachRemaining(boxingAdapter.apply(e -> { - })); - split = spliterator.trySplit(); - assertNull(split); - } - - private static > void testSplitOnce( - Collection exp, - Supplier supplier, - UnaryOperator> boxingAdapter) { - S spliterator = supplier.get(); - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - ArrayList fromSplit = new ArrayList<>(); - Spliterator s1 = supplier.get(); - Spliterator s2 = s1.trySplit(); - long s1Size = s1.getExactSizeIfKnown(); - long s2Size = (s2 != null) ? s2.getExactSizeIfKnown() : 0; - Consumer addToFromSplit = boxingAdapter.apply(fromSplit::add); - if (s2 != null) - s2.forEachRemaining(addToFromSplit); - s1.forEachRemaining(addToFromSplit); - - if (sizeIfKnown >= 0) { - assertEquals(sizeIfKnown, fromSplit.size()); - if (s1Size >= 0 && s2Size >= 0) - assertEquals(sizeIfKnown, s1Size + s2Size); - } - assertContents(fromSplit, exp, isOrdered); - } - - private static > void testSplitSixDeep( - Collection exp, - Supplier supplier, - UnaryOperator> boxingAdapter) { - S spliterator = supplier.get(); - boolean isOrdered = spliterator.hasCharacteristics(Spliterator.ORDERED); - - for (int depth=0; depth < 6; depth++) { - List dest = new ArrayList<>(); - spliterator = supplier.get(); - - assertSpliterator(spliterator); - - // verify splitting with forEach - visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), false); - assertContents(dest, exp, isOrdered); - - // verify splitting with tryAdvance - dest.clear(); - spliterator = supplier.get(); - visit(depth, 0, dest, spliterator, boxingAdapter, spliterator.characteristics(), true); - assertContents(dest, exp, isOrdered); - } - } - - private static > - void visit(int depth, int curLevel, - List dest, S spliterator, UnaryOperator> boxingAdapter, - int rootCharacteristics, boolean useTryAdvance) { - if (curLevel < depth) { - long beforeSize = spliterator.getExactSizeIfKnown(); - Spliterator split = spliterator.trySplit(); - if (split != null) { - assertSpliterator(split, rootCharacteristics); - assertSpliterator(spliterator, rootCharacteristics); - - if ((rootCharacteristics & Spliterator.SUBSIZED) != 0 && - (rootCharacteristics & Spliterator.SIZED) != 0) { - assertEquals(beforeSize, split.estimateSize() + spliterator.estimateSize()); - } - visit(depth, curLevel + 1, dest, split, boxingAdapter, rootCharacteristics, useTryAdvance); - } - visit(depth, curLevel + 1, dest, spliterator, boxingAdapter, rootCharacteristics, useTryAdvance); - } - else { - long sizeIfKnown = spliterator.getExactSizeIfKnown(); - if (useTryAdvance) { - Consumer addToDest = boxingAdapter.apply(dest::add); - int count = 0; - while (spliterator.tryAdvance(addToDest)) { - ++count; - } - - if (sizeIfKnown >= 0) - assertEquals(sizeIfKnown, count); - - // Assert that forEach now produces no elements - spliterator.forEachRemaining(boxingAdapter.apply( - e -> fail("Spliterator.forEach produced an element after spliterator exhausted: " + e))); - - Spliterator split = spliterator.trySplit(); - assertNull(split); - } - else { - List leafDest = new ArrayList<>(); - Consumer addToLeafDest = boxingAdapter.apply(leafDest::add); - spliterator.forEachRemaining(addToLeafDest); - - if (sizeIfKnown >= 0) - assertEquals(sizeIfKnown, leafDest.size()); - - // Assert that forEach now produces no elements - spliterator.tryAdvance(boxingAdapter.apply( - e -> fail("Spliterator.tryAdvance produced an element after spliterator exhausted: " + e))); - - Spliterator split = spliterator.trySplit(); - assertNull(split); - - dest.addAll(leafDest); - } - } - } - - private static > void testSplitUntilNull( - Collection exp, - Supplier supplier, - UnaryOperator> boxingAdapter) { - Spliterator s = supplier.get(); - boolean isOrdered = s.hasCharacteristics(Spliterator.ORDERED); - assertSpliterator(s); - - List splits = new ArrayList<>(); - Consumer c = boxingAdapter.apply(splits::add); - - testSplitUntilNull(new SplitNode(c, s)); - assertContents(splits, exp, isOrdered); - } - - private static class SplitNode { - // Constant for every node - final Consumer c; - final int rootCharacteristics; - - final Spliterator s; - - SplitNode(Consumer c, Spliterator s) { - this(c, s.characteristics(), s); - } - - private SplitNode(Consumer c, int rootCharacteristics, Spliterator s) { - this.c = c; - this.rootCharacteristics = rootCharacteristics; - this.s = s; - } - - SplitNode fromSplit(Spliterator split) { - return new SplitNode<>(c, rootCharacteristics, split); - } - } - - /** - * Set the maximum stack capacity to 0.25MB. This should be more than enough to detect a bad spliterator - * while not unduly disrupting test infrastructure given the test data sizes that are used are small. - * Note that j.u.c.ForkJoinPool sets the max queue size to 64M (1 << 26). - */ - private static final int MAXIMUM_STACK_CAPACITY = 1 << 18; // 0.25MB - - private static void testSplitUntilNull(SplitNode e) { - // Use an explicit stack to avoid a StackOverflowException when testing a Spliterator - // that when repeatedly split produces a right-balanced (and maybe degenerate) tree, or - // for a spliterator that is badly behaved. - Deque> stack = new ArrayDeque<>(); - stack.push(e); - - int iteration = 0; - while (!stack.isEmpty()) { - assertTrue(iteration++ < MAXIMUM_STACK_CAPACITY, "Exceeded maximum stack modification count of 1 << 18"); - - e = stack.pop(); - Spliterator parentAndRightSplit = e.s; - - long parentEstimateSize = parentAndRightSplit.estimateSize(); - assertTrue(parentEstimateSize >= 0, - String.format("Split size estimate %d < 0", parentEstimateSize)); - - long parentSize = parentAndRightSplit.getExactSizeIfKnown(); - Spliterator leftSplit = parentAndRightSplit.trySplit(); - if (leftSplit == null) { - parentAndRightSplit.forEachRemaining(e.c); - continue; - } - - assertSpliterator(leftSplit, e.rootCharacteristics); - assertSpliterator(parentAndRightSplit, e.rootCharacteristics); - - if (parentEstimateSize != Long.MAX_VALUE && leftSplit.estimateSize() > 0 && parentAndRightSplit.estimateSize() > 0) { - assertTrue(leftSplit.estimateSize() < parentEstimateSize, - String.format("Left split size estimate %d >= parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - assertTrue(parentAndRightSplit.estimateSize() < parentEstimateSize, - String.format("Right split size estimate %d >= parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - } - else { - assertTrue(leftSplit.estimateSize() <= parentEstimateSize, - String.format("Left split size estimate %d > parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - assertTrue(parentAndRightSplit.estimateSize() <= parentEstimateSize, - String.format("Right split size estimate %d > parent split size estimate %d", - leftSplit.estimateSize(), parentEstimateSize)); - } - - long leftSize = leftSplit.getExactSizeIfKnown(); - long rightSize = parentAndRightSplit.getExactSizeIfKnown(); - if (parentSize >= 0 && leftSize >= 0 && rightSize >= 0) - assertEquals(parentSize, leftSize + rightSize, - String.format("exact left split size %d + exact right split size %d != parent exact split size %d", - leftSize, rightSize, parentSize)); - - // Add right side to stack first so left side is popped off first - stack.push(e.fromSplit(parentAndRightSplit)); - stack.push(e.fromSplit(leftSplit)); - } - } - - private static void assertSpliterator(Spliterator s, int rootCharacteristics) { - if ((rootCharacteristics & Spliterator.SUBSIZED) != 0) { - assertTrue(s.hasCharacteristics(Spliterator.SUBSIZED), - "Child split is not SUBSIZED when root split is SUBSIZED"); - } - assertSpliterator(s); - } - - private static void assertSpliterator(Spliterator s) { - if (s.hasCharacteristics(Spliterator.SUBSIZED)) { - assertTrue(s.hasCharacteristics(Spliterator.SIZED)); - } - if (s.hasCharacteristics(Spliterator.SIZED)) { - assertTrue(s.estimateSize() != Long.MAX_VALUE); - assertTrue(s.getExactSizeIfKnown() >= 0); - } - try { - s.getComparator(); - assertTrue(s.hasCharacteristics(Spliterator.SORTED)); - } catch (IllegalStateException e) { - assertFalse(s.hasCharacteristics(Spliterator.SORTED)); - } - } - - private static void assertContents(Collection actual, Collection expected, boolean isOrdered) { - if (isOrdered) { - assertEquals(actual, expected); - } - else { - assertContentsUnordered(actual, expected); - } - } - - private static void assertContentsUnordered(Iterable actual, Iterable expected) { - assertEquals(toBoxedMultiset(actual), toBoxedMultiset(expected)); - } - - private static Map toBoxedMultiset(Iterable c) { - Map result = new HashMap<>(); - c.forEach(e -> { - if (result.containsKey(e)) result.put(e, result.get(e) + 1); - else result.put(e, 1); - }); - return result; - } - - private void executeAndCatch(Class expected, Runnable r) { - Exception caught = null; - try { - r.run(); - } - catch (Exception e) { - caught = e; - } - - assertNotNull(caught, - String.format("No Exception was thrown, expected an Exception of %s to be thrown", - expected.getName())); - assertTrue(expected.isInstance(caught), - String.format("Exception thrown %s not an instance of %s", - caught.getClass().getName(), expected.getName())); - } - -} diff --git a/jdk/test/java/util/zip/StoredCRC.java b/jdk/test/java/util/zip/StoredCRC.java index b636e5f0c74..8df97748d3f 100644 --- a/jdk/test/java/util/zip/StoredCRC.java +++ b/jdk/test/java/util/zip/StoredCRC.java @@ -77,9 +77,9 @@ public class StoredCRC { unexpected(t); } - // Test that data corruption is detected. Offset 39 was + // Test that data corruption is detected. "offset" was // determined to be in the entry's uncompressed data. - data[39] ^= 1; + data[getDataOffset(data) + 4] ^= 1; zis = new ZipInputStream( new ByteArrayInputStream(data)); @@ -97,6 +97,15 @@ public class StoredCRC { } } + public static final int getDataOffset(byte b[]) { + final int LOCHDR = 30; // LOC header size + final int LOCEXT = 28; // extra field length + final int LOCNAM = 26; // filename length + int lenExt = Byte.toUnsignedInt(b[LOCEXT]) | (Byte.toUnsignedInt(b[LOCEXT + 1]) << 8); + int lenNam = Byte.toUnsignedInt(b[LOCNAM]) | (Byte.toUnsignedInt(b[LOCNAM + 1]) << 8); + return LOCHDR + lenExt + lenNam; + } + //--------------------- Infrastructure --------------------------- static volatile int passed = 0, failed = 0; static boolean pass() {passed++; return true;} diff --git a/jdk/test/java/util/zip/TestExtraTime.java b/jdk/test/java/util/zip/TestExtraTime.java new file mode 100644 index 00000000000..6af11b12055 --- /dev/null +++ b/jdk/test/java/util/zip/TestExtraTime.java @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 4759491 6303183 7012868 + * @summary Test ZOS and ZIS timestamp in extra field correctly + */ + +import java.io.*; +import java.util.TimeZone; +import java.util.concurrent.TimeUnit; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import java.util.zip.ZipOutputStream; + + +public class TestExtraTime { + + public static void main(String[] args) throws Throwable{ + + File src = new File(System.getProperty("test.src", "."), "TestExtraTime.java"); + if (src.exists()) { + long mtime = src.lastModified(); + test(mtime, null); + test(10, null); // ms-dos 1980 epoch problem + test(mtime, TimeZone.getTimeZone("Asia/Shanghai")); + } + } + + private static void test(long mtime, TimeZone tz) throws Throwable { + TimeZone tz0 = TimeZone.getDefault(); + if (tz != null) { + TimeZone.setDefault(tz); + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ZipOutputStream zos = new ZipOutputStream(baos); + ZipEntry ze = new ZipEntry("TestExtreTime.java"); + + ze.setTime(mtime); + zos.putNextEntry(ze); + zos.write(new byte[] { 1,2 ,3, 4}); + zos.close(); + if (tz != null) { + TimeZone.setDefault(tz0); + } + ZipInputStream zis = new ZipInputStream( + new ByteArrayInputStream(baos.toByteArray())); + ze = zis.getNextEntry(); + zis.close(); + + System.out.printf("%tc => %tc%n", mtime, ze.getTime()); + + if (TimeUnit.MILLISECONDS.toSeconds(mtime) != + TimeUnit.MILLISECONDS.toSeconds(ze.getTime())) + throw new RuntimeException("Timestamp storing failed!"); + + } +} diff --git a/jdk/test/java/util/zip/ZipFile/Assortment.java b/jdk/test/java/util/zip/ZipFile/Assortment.java index 7e0d4055f03..234eded3813 100644 --- a/jdk/test/java/util/zip/ZipFile/Assortment.java +++ b/jdk/test/java/util/zip/ZipFile/Assortment.java @@ -22,7 +22,7 @@ */ /* @test - * @bug 4770745 6234507 + * @bug 4770745 6234507 6303183 * @summary test a variety of zip file entries * @author Martin Buchholz */ @@ -54,6 +54,44 @@ public class Assortment { check(condition, "Something's wrong"); } + static final int get16(byte b[], int off) { + return Byte.toUnsignedInt(b[off]) | (Byte.toUnsignedInt(b[off+1]) << 8); + } + + // check if all "expected" extra fields equal to their + // corresponding fields in "extra". The "extra" might have + // timestamp fields added by ZOS. + static boolean equalsExtraData(byte[] expected, byte[] extra) { + if (expected == null) + return true; + int off = 0; + int len = expected.length; + while (off + 4 < len) { + int tag = get16(expected, off); + int sz = get16(expected, off + 2); + int off0 = 0; + int len0 = extra.length; + boolean matched = false; + while (off0 + 4 < len0) { + int tag0 = get16(extra, off0); + int sz0 = get16(extra, off0 + 2); + if (tag == tag0 && sz == sz0) { + matched = true; + for (int i = 0; i < sz; i++) { + if (expected[off + i] != extra[off0 +i]) + matched = false; + } + break; + } + off0 += (4 + sz0); + } + if (!matched) + return false; + off += (4 + sz); + } + return true; + } + private static class Entry { private String name; private int method; @@ -109,7 +147,7 @@ public class Assortment { check((((comment == null) || comment.equals("")) && (e.getComment() == null)) || comment.equals(e.getComment())); - check(Arrays.equals(extra, e.getExtra())); + check(equalsExtraData(extra, e.getExtra())); check(Arrays.equals(data, getData(f, e))); check(e.getSize() == data.length); check((method == ZipEntry.DEFLATED) || @@ -129,8 +167,7 @@ public class Assortment { byte[] extra = (this.extra != null && this.extra.length == 0) ? null : this.extra; - check(Arrays.equals(extra, e.getExtra())); - + check(equalsExtraData(extra, e.getExtra())); check(name.equals(e.getName())); check(method == e.getMethod()); check(e.getSize() == -1 || e.getSize() == data.length); diff --git a/jdk/test/javax/crypto/Cipher/CipherStreamClose.java b/jdk/test/javax/crypto/Cipher/CipherStreamClose.java new file mode 100644 index 00000000000..1e8ff16331d --- /dev/null +++ b/jdk/test/javax/crypto/Cipher/CipherStreamClose.java @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7160837 + * @summary Make sure Cipher IO streams doesn't call extra doFinal if close() + * is called multiple times. Additionally, verify the input and output streams + * match with encryption and decryption with non-stream crypto. + */ + +import java.io.*; +import java.security.DigestOutputStream; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.util.Arrays; + +import javax.crypto.Cipher; +import javax.crypto.CipherOutputStream; +import javax.crypto.CipherInputStream; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import javax.xml.bind.DatatypeConverter; + +public class CipherStreamClose { + private static final String message = "This is the sample message"; + static boolean debug = false; + + /* + * This method does encryption by cipher.doFinal(), and not with + * CipherOutputStream + */ + public static byte[] blockEncrypt(String message, SecretKey key) + throws Exception { + + byte[] data; + Cipher encCipher = Cipher.getInstance("AES/ECB/PKCS5Padding"); + encCipher.init(Cipher.ENCRYPT_MODE, key); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (ObjectOutputStream oos = new ObjectOutputStream(bos)) { + oos.writeObject(message); + } + data = bos.toByteArray(); + } + + if (debug) { + System.out.println(DatatypeConverter.printHexBinary(data)); + } + return encCipher.doFinal(data); + + } + + /* + * This method does decryption by cipher.doFinal(), and not with + * CipherIntputStream + */ + public static Object blockDecrypt(byte[] data, SecretKey key) + throws Exception { + + Cipher c = Cipher.getInstance("AES/ECB/PKCS5Padding"); + c.init(Cipher.DECRYPT_MODE, key); + data = c.doFinal(data); + try (ByteArrayInputStream bis = new ByteArrayInputStream(data)) { + try (ObjectInputStream ois = new ObjectInputStream(bis)) { + return ois.readObject(); + } + } + } + + public static byte[] streamEncrypt(String message, SecretKey key, + MessageDigest digest) + throws Exception { + + byte[] data; + Cipher encCipher = Cipher.getInstance("AES/ECB/PKCS5Padding"); + encCipher.init(Cipher.ENCRYPT_MODE, key); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); + DigestOutputStream dos = new DigestOutputStream(bos, digest); + CipherOutputStream cos = new CipherOutputStream(dos, encCipher)) { + try (ObjectOutputStream oos = new ObjectOutputStream(cos)) { + oos.writeObject(message); + } + data = bos.toByteArray(); + } + + if (debug) { + System.out.println(DatatypeConverter.printHexBinary(data)); + } + return data; + } + + public static Object streamDecrypt(byte[] data, SecretKey key, + MessageDigest digest) throws Exception { + + Cipher decCipher = Cipher.getInstance("AES/ECB/PKCS5Padding"); + decCipher.init(Cipher.DECRYPT_MODE, key); + digest.reset(); + try (ByteArrayInputStream bis = new ByteArrayInputStream(data); + DigestInputStream dis = new DigestInputStream(bis, digest); + CipherInputStream cis = new CipherInputStream(dis, decCipher)) { + + try (ObjectInputStream ois = new ObjectInputStream(cis)) { + return ois.readObject(); + } + } + } + + public static void main(String[] args) throws Exception { + MessageDigest digest = MessageDigest.getInstance("SHA1"); + SecretKeySpec key = new SecretKeySpec( + DatatypeConverter.parseHexBinary( + "12345678123456781234567812345678"), "AES"); + + // Run 'message' through streamEncrypt + byte[] se = streamEncrypt(message, key, digest); + // 'digest' already has the value from the stream, just finish the op + byte[] sd = digest.digest(); + digest.reset(); + // Run 'message' through blockEncrypt + byte[] be = blockEncrypt(message, key); + // Take digest of encrypted blockEncrypt result + byte[] bd = digest.digest(be); + // Verify both returned the same value + if (!Arrays.equals(sd, bd)) { + System.err.println("Stream: "+DatatypeConverter.printHexBinary(se)+ + "\t Digest: "+DatatypeConverter.printHexBinary(sd)); + System.err.println("Block : "+DatatypeConverter.printHexBinary(be)+ + "\t Digest: "+DatatypeConverter.printHexBinary(bd)); + throw new Exception("stream & block encryption does not match"); + } + + digest.reset(); + // Sanity check: Decrypt separately from stream to verify operations + String bm = (String) blockDecrypt(be, key); + if (message.compareTo(bm) != 0) { + System.err.println("Expected: "+message+"\nBlock: "+bm); + throw new Exception("Block decryption does not match expected"); + } + + // Have decryption and digest included in the object stream + String sm = (String) streamDecrypt(se, key, digest); + if (message.compareTo(sm) != 0) { + System.err.println("Expected: "+message+"\nStream: "+sm); + throw new Exception("Stream decryption does not match expected."); + } + } +} diff --git a/jdk/test/sun/java2d/X11SurfaceData/SharedMemoryPixmapsTest/SharedMemoryPixmapsTest.sh b/jdk/test/sun/java2d/X11SurfaceData/SharedMemoryPixmapsTest/SharedMemoryPixmapsTest.sh index bf9b5d18ab2..9c1b4f81156 100644 --- a/jdk/test/sun/java2d/X11SurfaceData/SharedMemoryPixmapsTest/SharedMemoryPixmapsTest.sh +++ b/jdk/test/sun/java2d/X11SurfaceData/SharedMemoryPixmapsTest/SharedMemoryPixmapsTest.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -21,7 +22,6 @@ # questions. # -#!/bin/sh # @test # @bug 6363434 6588884 # @summary Verify that shared memory pixmaps are not broken diff --git a/jdk/test/sun/management/jdp/JdpUnitTest.java b/jdk/test/sun/management/jdp/JdpUnitTest.java index fed4ae216c2..2bd61407378 100644 --- a/jdk/test/sun/management/jdp/JdpUnitTest.java +++ b/jdk/test/sun/management/jdp/JdpUnitTest.java @@ -32,6 +32,12 @@ import sun.management.jdp.JdpException; public class JdpUnitTest { + + static byte[] russian_name = {(byte)0xd0,(byte)0xbf,(byte)0xd1,(byte)0x80,(byte)0xd0,(byte)0xbe,(byte)0xd0,(byte)0xb2, + (byte)0xd0,(byte)0xb5,(byte)0xd1,(byte)0x80,(byte)0xd0,(byte)0xba,(byte)0xd0,(byte)0xb0, + (byte)0x20,(byte)0xd1,(byte)0x81,(byte)0xd0,(byte)0xb2,(byte)0xd1,(byte)0x8f,(byte)0xd0, + (byte)0xb7,(byte)0xd0,(byte)0xb8,(byte)0x0a}; + /** * This test tests that complete packet is build correctly */ @@ -42,7 +48,7 @@ public class JdpUnitTest { { JdpJmxPacket p1 = new JdpJmxPacket(UUID.randomUUID(), "fake://unit-test"); p1.setMainClass("FakeUnitTest"); - p1.setInstanceName("Fake"); + p1.setInstanceName( new String(russian_name,"UTF-8")); byte[] b = p1.getPacketData(); JdpJmxPacket p2 = new JdpJmxPacket(b); diff --git a/jdk/test/sun/net/www/protocol/http/HttpStreams.java b/jdk/test/sun/net/www/protocol/http/HttpStreams.java new file mode 100644 index 00000000000..897cd6572b5 --- /dev/null +++ b/jdk/test/sun/net/www/protocol/http/HttpStreams.java @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8011719 + * @summary Basic checks to verify behavior of returned input streams + */ + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import java.io.*; +import java.net.*; +import java.nio.charset.StandardCharsets; +import java.util.*; + +public class HttpStreams { + + void client(String u) throws Exception { + byte[] ba = new byte[5]; + HttpURLConnection urlc = (HttpURLConnection)(new URL(u)).openConnection(); + int resp = urlc.getResponseCode(); + InputStream is; + if (resp == 200) + is = urlc.getInputStream(); + else + is = urlc.getErrorStream(); + + expectNoThrow(() -> { is.read(); }, "read on open stream should not throw :" + u); + expectNoThrow(() -> { is.close(); }, "close should never throw: " + u); + expectNoThrow(() -> { is.close(); }, "close should never throw: " + u); + expectThrow(() -> { is.read(); }, "read on closed stream should throw: " + u); + expectThrow(() -> { is.read(ba); }, "read on closed stream should throw: " + u); + expectThrow(() -> { is.read(ba, 0, 2); }, "read on closed stream should throw: " + u); + } + + void test() throws Exception { + HttpServer server = null; + try { + server = startHttpServer(); + String baseUrl = "http://localhost:" + server.getAddress().getPort() + "/"; + client(baseUrl + "chunked/"); + client(baseUrl + "fixed/"); + client(baseUrl + "error/"); + client(baseUrl + "chunkedError/"); + + // Test with a response cache + ResponseCache ch = ResponseCache.getDefault(); + ResponseCache.setDefault(new TrivialCacheHandler()); + try { + client(baseUrl + "chunked/"); + client(baseUrl + "fixed/"); + client(baseUrl + "error/"); + client(baseUrl + "chunkedError/"); + } finally { + ResponseCache.setDefault(ch); + } + } finally { + if (server != null) + server.stop(0); + } + + System.out.println("passed: " + pass + ", failed: " + fail); + if (fail > 0) + throw new RuntimeException("some tests failed check output"); + } + + public static void main(String[] args) throws Exception { + (new HttpStreams()).test(); + } + + // HTTP Server + HttpServer startHttpServer() throws IOException { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(0), 0); + httpServer.createContext("/chunked/", new ChunkedHandler()); + httpServer.createContext("/fixed/", new FixedHandler()); + httpServer.createContext("/error/", new ErrorHandler()); + httpServer.createContext("/chunkedError/", new ChunkedErrorHandler()); + httpServer.start(); + return httpServer; + } + + static abstract class AbstractHandler implements HttpHandler { + @Override + public void handle(HttpExchange t) throws IOException { + try (InputStream is = t.getRequestBody()) { + while (is.read() != -1); + } + t.sendResponseHeaders(respCode(), length()); + try (OutputStream os = t.getResponseBody()) { + os.write(message()); + } + t.close(); + } + + abstract int respCode(); + abstract int length(); + abstract byte[] message(); + } + + static class ChunkedHandler extends AbstractHandler { + static final byte[] ba = + "Hello there from chunked handler!".getBytes(StandardCharsets.US_ASCII); + int respCode() { return 200; } + int length() { return 0; } + byte[] message() { return ba; } + } + + static class FixedHandler extends AbstractHandler { + static final byte[] ba = + "Hello there from fixed handler!".getBytes(StandardCharsets.US_ASCII); + int respCode() { return 200; } + int length() { return ba.length; } + byte[] message() { return ba; } + } + + static class ErrorHandler extends AbstractHandler { + static final byte[] ba = + "This is an error mesg from the server!".getBytes(StandardCharsets.US_ASCII); + int respCode() { return 400; } + int length() { return ba.length; } + byte[] message() { return ba; } + } + + static class ChunkedErrorHandler extends ErrorHandler { + int length() { return 0; } + } + + static class TrivialCacheHandler extends ResponseCache + { + public CacheResponse get(URI uri, String rqstMethod, Map rqstHeaders) { + return null; + } + + public CacheRequest put(URI uri, URLConnection conn) { + return new TrivialCacheRequest(); + } + } + + static class TrivialCacheRequest extends CacheRequest + { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + public void abort() {} + public OutputStream getBody() throws IOException { return baos; } + } + + static interface ThrowableRunnable { + void run() throws IOException; + } + + void expectThrow(ThrowableRunnable r, String msg) { + try { r.run(); fail(msg); } catch (IOException x) { pass(); } + } + + void expectNoThrow(ThrowableRunnable r, String msg) { + try { r.run(); pass(); } catch (IOException x) { fail(msg, x); } + } + + private int pass; + private int fail; + void pass() { pass++; } + void fail(String msg, Exception x) { System.out.println(msg); x.printStackTrace(); fail++; } + void fail(String msg) { System.out.println(msg); Thread.dumpStack(); fail++; } +} diff --git a/jdk/test/sun/rmi/rmic/manifestClassPath/run.sh b/jdk/test/sun/rmi/rmic/manifestClassPath/run.sh index f20ab75527e..2dca19b4d18 100644 --- a/jdk/test/sun/rmi/rmic/manifestClassPath/run.sh +++ b/jdk/test/sun/rmi/rmic/manifestClassPath/run.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -21,7 +22,6 @@ # questions. # -#!/bin/sh # @test # @bug 6473331 6485027 6934615 # @summary Test handling of the Class-Path attribute in jar file manifests diff --git a/jdk/test/sun/rmi/rmic/newrmic/equivalence/batch.sh b/jdk/test/sun/rmi/rmic/newrmic/equivalence/batch.sh index b1d581bedf1..020202a1818 100644 --- a/jdk/test/sun/rmi/rmic/newrmic/equivalence/batch.sh +++ b/jdk/test/sun/rmi/rmic/newrmic/equivalence/batch.sh @@ -1,3 +1,4 @@ +#!/bin/sh # # Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -21,7 +22,6 @@ # questions. # -#!/bin/sh # # Usage: batch.sh classpath classes... # diff --git a/jdk/test/sun/security/pkcs11/tls/TestLeadingZeroesP11.java b/jdk/test/sun/security/pkcs11/tls/TestLeadingZeroesP11.java new file mode 100644 index 00000000000..ffaac041d33 --- /dev/null +++ b/jdk/test/sun/security/pkcs11/tls/TestLeadingZeroesP11.java @@ -0,0 +1,410 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8014618 + * @summary Need to strip leading zeros in TlsPremasterSecret of DHKeyAgreement + * @library .. + * @author Pasi Eronen + */ + +import java.io.*; +import java.security.*; +import java.security.spec.*; +import java.security.interfaces.*; +import javax.crypto.*; +import javax.crypto.spec.*; +import javax.crypto.interfaces.*; + +/** + * Test that leading zeroes are stripped in TlsPremasterSecret case, + * but are left as-is in other cases. + * + * We use pre-generated keypairs, since with randomly generated keypairs, + * a leading zero happens only (roughly) 1 out of 256 cases. + */ + +public class TestLeadingZeroesP11 extends PKCS11Test { + + public static void main(String[] args) throws Exception { + main(new TestLeadingZeroesP11()); + } + + public void main(Provider p) throws Exception { + + // decode pre-generated keypairs + KeyFactory kfac = KeyFactory.getInstance("DH", p); + PublicKey alicePubKey = + kfac.generatePublic(new X509EncodedKeySpec(alicePubKeyEnc)); + PublicKey bobPubKey = + kfac.generatePublic(new X509EncodedKeySpec(bobPubKeyEnc)); + PrivateKey alicePrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(alicePrivKeyEnc)); + PrivateKey bobPrivKey = + kfac.generatePrivate(new PKCS8EncodedKeySpec(bobPrivKeyEnc)); + + // generate normal shared secret + KeyAgreement aliceKeyAgree = KeyAgreement.getInstance("DH", p); + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] sharedSecret = aliceKeyAgree.generateSecret(); + System.out.println("shared secret:\n" + toHexString(sharedSecret)); + + // verify that leading zero is present + if (sharedSecret.length != 128) { + throw new Exception("Unexpected shared secret length"); + } + if (sharedSecret[0] != 0) { + throw new Exception("First byte is not zero as expected"); + } + + // now, test TLS premaster secret + aliceKeyAgree.init(alicePrivKey); + aliceKeyAgree.doPhase(bobPubKey, true); + byte[] tlsPremasterSecret = + aliceKeyAgree.generateSecret("TlsPremasterSecret").getEncoded(); + System.out.println( + "tls premaster secret:\n" + toHexString(tlsPremasterSecret)); + + // check that leading zero has been stripped + if (tlsPremasterSecret.length != 127) { + throw new Exception("Unexpected TLS premaster secret length"); + } + if (tlsPremasterSecret[0] == 0) { + throw new Exception("First byte is zero"); + } + for (int i = 0; i < tlsPremasterSecret.length; i++) { + if (tlsPremasterSecret[i] != sharedSecret[i+1]) { + throw new Exception("Shared secrets differ"); + } + } + + } + + /* + * Converts a byte to hex digit and writes to the supplied buffer + */ + private void byte2hex(byte b, StringBuffer buf) { + char[] hexChars = { '0', '1', '2', '3', '4', '5', '6', '7', '8', + '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + int high = ((b & 0xf0) >> 4); + int low = (b & 0x0f); + buf.append(hexChars[high]); + buf.append(hexChars[low]); + } + + /* + * Converts a byte array to hex string + */ + private String toHexString(byte[] block) { + StringBuffer buf = new StringBuffer(); + + int len = block.length; + + for (int i = 0; i < len; i++) { + byte2hex(block[i], buf); + if (i < len-1) { + buf.append(":"); + } + } + return buf.toString(); + } + + private static final byte alicePubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x24, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x85, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x81, (byte)0x00, + (byte)0xEE, (byte)0xD6, (byte)0xB1, (byte)0xA3, + (byte)0xB4, (byte)0x78, (byte)0x2B, (byte)0x35, + (byte)0xEF, (byte)0xCD, (byte)0x17, (byte)0x86, + (byte)0x63, (byte)0x2B, (byte)0x97, (byte)0x0E, + (byte)0x7A, (byte)0xD1, (byte)0xFF, (byte)0x7A, + (byte)0xEB, (byte)0x57, (byte)0x61, (byte)0xA1, + (byte)0xF7, (byte)0x90, (byte)0x11, (byte)0xA7, + (byte)0x79, (byte)0x28, (byte)0x69, (byte)0xBA, + (byte)0xA7, (byte)0xB2, (byte)0x37, (byte)0x17, + (byte)0xAE, (byte)0x3C, (byte)0x92, (byte)0x89, + (byte)0x88, (byte)0xE5, (byte)0x7E, (byte)0x8E, + (byte)0xF0, (byte)0x24, (byte)0xD0, (byte)0xE1, + (byte)0xC4, (byte)0xB0, (byte)0x26, (byte)0x5A, + (byte)0x1E, (byte)0xBD, (byte)0xA0, (byte)0xCF, + (byte)0x3E, (byte)0x97, (byte)0x2A, (byte)0x13, + (byte)0x92, (byte)0x3B, (byte)0x39, (byte)0xD0, + (byte)0x1D, (byte)0xA3, (byte)0x6B, (byte)0x3E, + (byte)0xC2, (byte)0xBB, (byte)0x14, (byte)0xB6, + (byte)0xE2, (byte)0x4C, (byte)0x0E, (byte)0x5B, + (byte)0x4B, (byte)0xA4, (byte)0x9D, (byte)0xA6, + (byte)0x21, (byte)0xB0, (byte)0xF9, (byte)0xDE, + (byte)0x55, (byte)0xAE, (byte)0x5C, (byte)0x29, + (byte)0x0E, (byte)0xC1, (byte)0xFC, (byte)0xBA, + (byte)0x51, (byte)0xD3, (byte)0xB6, (byte)0x6D, + (byte)0x75, (byte)0x72, (byte)0xDF, (byte)0x43, + (byte)0xAB, (byte)0x94, (byte)0x21, (byte)0x6E, + (byte)0x0C, (byte)0xD1, (byte)0x93, (byte)0x54, + (byte)0x56, (byte)0x7D, (byte)0x4B, (byte)0x90, + (byte)0xF1, (byte)0x94, (byte)0x45, (byte)0xD4, + (byte)0x2A, (byte)0x71, (byte)0xA1, (byte)0xB8, + (byte)0xDD, (byte)0xAA, (byte)0x05, (byte)0xF0, + (byte)0x27, (byte)0x37, (byte)0xBD, (byte)0x44 + }; + + private static final byte alicePrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE3, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x42, + (byte)0x02, (byte)0x40, (byte)0x36, (byte)0x4D, + (byte)0xD0, (byte)0x58, (byte)0x64, (byte)0x91, + (byte)0x78, (byte)0xA2, (byte)0x4B, (byte)0x79, + (byte)0x46, (byte)0xFE, (byte)0xC9, (byte)0xD9, + (byte)0xCA, (byte)0x5C, (byte)0xF9, (byte)0xFD, + (byte)0x6C, (byte)0x5D, (byte)0x76, (byte)0x3A, + (byte)0x41, (byte)0x6D, (byte)0x44, (byte)0x62, + (byte)0x75, (byte)0x93, (byte)0x81, (byte)0x93, + (byte)0x00, (byte)0x4C, (byte)0xB1, (byte)0xD8, + (byte)0x7D, (byte)0x9D, (byte)0xF3, (byte)0x16, + (byte)0x2C, (byte)0x6C, (byte)0x9F, (byte)0x7A, + (byte)0x84, (byte)0xA3, (byte)0x7A, (byte)0xC1, + (byte)0x4F, (byte)0x60, (byte)0xE3, (byte)0xB5, + (byte)0x86, (byte)0x28, (byte)0x08, (byte)0x4D, + (byte)0x94, (byte)0xB6, (byte)0x04, (byte)0x0D, + (byte)0xAC, (byte)0xBD, (byte)0x1F, (byte)0x42, + (byte)0x8F, (byte)0x1B + }; + + private static final byte bobPubKeyEnc[] = { + (byte)0x30, (byte)0x82, (byte)0x01, (byte)0x23, + (byte)0x30, (byte)0x81, (byte)0x99, (byte)0x06, + (byte)0x09, (byte)0x2A, (byte)0x86, (byte)0x48, + (byte)0x86, (byte)0xF7, (byte)0x0D, (byte)0x01, + (byte)0x03, (byte)0x01, (byte)0x30, (byte)0x81, + (byte)0x8B, (byte)0x02, (byte)0x81, (byte)0x81, + (byte)0x00, (byte)0xF4, (byte)0x88, (byte)0xFD, + (byte)0x58, (byte)0x4E, (byte)0x49, (byte)0xDB, + (byte)0xCD, (byte)0x20, (byte)0xB4, (byte)0x9D, + (byte)0xE4, (byte)0x91, (byte)0x07, (byte)0x36, + (byte)0x6B, (byte)0x33, (byte)0x6C, (byte)0x38, + (byte)0x0D, (byte)0x45, (byte)0x1D, (byte)0x0F, + (byte)0x7C, (byte)0x88, (byte)0xB3, (byte)0x1C, + (byte)0x7C, (byte)0x5B, (byte)0x2D, (byte)0x8E, + (byte)0xF6, (byte)0xF3, (byte)0xC9, (byte)0x23, + (byte)0xC0, (byte)0x43, (byte)0xF0, (byte)0xA5, + (byte)0x5B, (byte)0x18, (byte)0x8D, (byte)0x8E, + (byte)0xBB, (byte)0x55, (byte)0x8C, (byte)0xB8, + (byte)0x5D, (byte)0x38, (byte)0xD3, (byte)0x34, + (byte)0xFD, (byte)0x7C, (byte)0x17, (byte)0x57, + (byte)0x43, (byte)0xA3, (byte)0x1D, (byte)0x18, + (byte)0x6C, (byte)0xDE, (byte)0x33, (byte)0x21, + (byte)0x2C, (byte)0xB5, (byte)0x2A, (byte)0xFF, + (byte)0x3C, (byte)0xE1, (byte)0xB1, (byte)0x29, + (byte)0x40, (byte)0x18, (byte)0x11, (byte)0x8D, + (byte)0x7C, (byte)0x84, (byte)0xA7, (byte)0x0A, + (byte)0x72, (byte)0xD6, (byte)0x86, (byte)0xC4, + (byte)0x03, (byte)0x19, (byte)0xC8, (byte)0x07, + (byte)0x29, (byte)0x7A, (byte)0xCA, (byte)0x95, + (byte)0x0C, (byte)0xD9, (byte)0x96, (byte)0x9F, + (byte)0xAB, (byte)0xD0, (byte)0x0A, (byte)0x50, + (byte)0x9B, (byte)0x02, (byte)0x46, (byte)0xD3, + (byte)0x08, (byte)0x3D, (byte)0x66, (byte)0xA4, + (byte)0x5D, (byte)0x41, (byte)0x9F, (byte)0x9C, + (byte)0x7C, (byte)0xBD, (byte)0x89, (byte)0x4B, + (byte)0x22, (byte)0x19, (byte)0x26, (byte)0xBA, + (byte)0xAB, (byte)0xA2, (byte)0x5E, (byte)0xC3, + (byte)0x55, (byte)0xE9, (byte)0x2F, (byte)0x78, + (byte)0xC7, (byte)0x02, (byte)0x01, (byte)0x02, + (byte)0x02, (byte)0x02, (byte)0x02, (byte)0x00, + (byte)0x03, (byte)0x81, (byte)0x84, (byte)0x00, + (byte)0x02, (byte)0x81, (byte)0x80, (byte)0x2C, + (byte)0x40, (byte)0xFA, (byte)0xF6, (byte)0xA6, + (byte)0xF8, (byte)0xAC, (byte)0xC2, (byte)0x4F, + (byte)0xCD, (byte)0xC7, (byte)0x37, (byte)0x93, + (byte)0xE5, (byte)0xE4, (byte)0x5E, (byte)0x18, + (byte)0x14, (byte)0xE6, (byte)0x50, (byte)0xDA, + (byte)0x55, (byte)0x38, (byte)0x5D, (byte)0x24, + (byte)0xF5, (byte)0x42, (byte)0x68, (byte)0x5F, + (byte)0xF5, (byte)0x15, (byte)0xC8, (byte)0x9B, + (byte)0x5D, (byte)0x06, (byte)0x3D, (byte)0xE1, + (byte)0x52, (byte)0x2F, (byte)0x98, (byte)0xFF, + (byte)0x37, (byte)0xBB, (byte)0x75, (byte)0x48, + (byte)0x48, (byte)0xE9, (byte)0x65, (byte)0x84, + (byte)0x37, (byte)0xBB, (byte)0xB3, (byte)0xE9, + (byte)0x36, (byte)0x01, (byte)0xB4, (byte)0x6A, + (byte)0x1C, (byte)0xB2, (byte)0x11, (byte)0x82, + (byte)0xCE, (byte)0x3D, (byte)0x65, (byte)0xE5, + (byte)0x3C, (byte)0x89, (byte)0xE9, (byte)0x52, + (byte)0x19, (byte)0xBD, (byte)0x58, (byte)0xF6, + (byte)0xA2, (byte)0x03, (byte)0xA8, (byte)0xB2, + (byte)0xA5, (byte)0xDB, (byte)0xEB, (byte)0xF5, + (byte)0x94, (byte)0xF9, (byte)0x46, (byte)0xBE, + (byte)0x45, (byte)0x4C, (byte)0x65, (byte)0xD2, + (byte)0xD1, (byte)0xCF, (byte)0xFF, (byte)0xFF, + (byte)0xFA, (byte)0x38, (byte)0xF1, (byte)0x72, + (byte)0xAB, (byte)0xB9, (byte)0x14, (byte)0x4E, + (byte)0xF5, (byte)0xF0, (byte)0x7A, (byte)0x8E, + (byte)0x45, (byte)0xFD, (byte)0x5B, (byte)0xF9, + (byte)0xA2, (byte)0x97, (byte)0x1B, (byte)0xAE, + (byte)0x2C, (byte)0x7B, (byte)0x6B, (byte)0x7C, + (byte)0x98, (byte)0xFE, (byte)0x58, (byte)0xDD, + (byte)0xBE, (byte)0xF6, (byte)0x1C, (byte)0x8E, + (byte)0xD0, (byte)0xA1, (byte)0x72 + }; + + private static final byte bobPrivKeyEnc[] = { + (byte)0x30, (byte)0x81, (byte)0xE4, (byte)0x02, + (byte)0x01, (byte)0x00, (byte)0x30, (byte)0x81, + (byte)0x99, (byte)0x06, (byte)0x09, (byte)0x2A, + (byte)0x86, (byte)0x48, (byte)0x86, (byte)0xF7, + (byte)0x0D, (byte)0x01, (byte)0x03, (byte)0x01, + (byte)0x30, (byte)0x81, (byte)0x8B, (byte)0x02, + (byte)0x81, (byte)0x81, (byte)0x00, (byte)0xF4, + (byte)0x88, (byte)0xFD, (byte)0x58, (byte)0x4E, + (byte)0x49, (byte)0xDB, (byte)0xCD, (byte)0x20, + (byte)0xB4, (byte)0x9D, (byte)0xE4, (byte)0x91, + (byte)0x07, (byte)0x36, (byte)0x6B, (byte)0x33, + (byte)0x6C, (byte)0x38, (byte)0x0D, (byte)0x45, + (byte)0x1D, (byte)0x0F, (byte)0x7C, (byte)0x88, + (byte)0xB3, (byte)0x1C, (byte)0x7C, (byte)0x5B, + (byte)0x2D, (byte)0x8E, (byte)0xF6, (byte)0xF3, + (byte)0xC9, (byte)0x23, (byte)0xC0, (byte)0x43, + (byte)0xF0, (byte)0xA5, (byte)0x5B, (byte)0x18, + (byte)0x8D, (byte)0x8E, (byte)0xBB, (byte)0x55, + (byte)0x8C, (byte)0xB8, (byte)0x5D, (byte)0x38, + (byte)0xD3, (byte)0x34, (byte)0xFD, (byte)0x7C, + (byte)0x17, (byte)0x57, (byte)0x43, (byte)0xA3, + (byte)0x1D, (byte)0x18, (byte)0x6C, (byte)0xDE, + (byte)0x33, (byte)0x21, (byte)0x2C, (byte)0xB5, + (byte)0x2A, (byte)0xFF, (byte)0x3C, (byte)0xE1, + (byte)0xB1, (byte)0x29, (byte)0x40, (byte)0x18, + (byte)0x11, (byte)0x8D, (byte)0x7C, (byte)0x84, + (byte)0xA7, (byte)0x0A, (byte)0x72, (byte)0xD6, + (byte)0x86, (byte)0xC4, (byte)0x03, (byte)0x19, + (byte)0xC8, (byte)0x07, (byte)0x29, (byte)0x7A, + (byte)0xCA, (byte)0x95, (byte)0x0C, (byte)0xD9, + (byte)0x96, (byte)0x9F, (byte)0xAB, (byte)0xD0, + (byte)0x0A, (byte)0x50, (byte)0x9B, (byte)0x02, + (byte)0x46, (byte)0xD3, (byte)0x08, (byte)0x3D, + (byte)0x66, (byte)0xA4, (byte)0x5D, (byte)0x41, + (byte)0x9F, (byte)0x9C, (byte)0x7C, (byte)0xBD, + (byte)0x89, (byte)0x4B, (byte)0x22, (byte)0x19, + (byte)0x26, (byte)0xBA, (byte)0xAB, (byte)0xA2, + (byte)0x5E, (byte)0xC3, (byte)0x55, (byte)0xE9, + (byte)0x2F, (byte)0x78, (byte)0xC7, (byte)0x02, + (byte)0x01, (byte)0x02, (byte)0x02, (byte)0x02, + (byte)0x02, (byte)0x00, (byte)0x04, (byte)0x43, + (byte)0x02, (byte)0x41, (byte)0x00, (byte)0xE0, + (byte)0x31, (byte)0xE7, (byte)0x77, (byte)0xB8, + (byte)0xD0, (byte)0x7E, (byte)0x0A, (byte)0x9B, + (byte)0x94, (byte)0xD5, (byte)0x3D, (byte)0x33, + (byte)0x62, (byte)0x32, (byte)0x51, (byte)0xCE, + (byte)0x74, (byte)0x5C, (byte)0xA5, (byte)0x72, + (byte)0xD9, (byte)0x36, (byte)0xF3, (byte)0x8A, + (byte)0x3F, (byte)0x8B, (byte)0xC6, (byte)0xFE, + (byte)0xEF, (byte)0x94, (byte)0x8B, (byte)0x50, + (byte)0x41, (byte)0x9B, (byte)0x14, (byte)0xC8, + (byte)0xE9, (byte)0x1F, (byte)0x24, (byte)0x1F, + (byte)0x65, (byte)0x8E, (byte)0xD3, (byte)0x85, + (byte)0xD0, (byte)0x68, (byte)0x6C, (byte)0xF1, + (byte)0x79, (byte)0x45, (byte)0xD0, (byte)0x06, + (byte)0xA4, (byte)0xB8, (byte)0xE0, (byte)0x64, + (byte)0xF5, (byte)0x38, (byte)0x72, (byte)0x97, + (byte)0x00, (byte)0x23, (byte)0x5F + }; +} + diff --git a/jdk/test/tools/launcher/MultipleJRE.sh b/jdk/test/tools/launcher/MultipleJRE.sh index 0c8e95d29fb..799d003e08f 100644 --- a/jdk/test/tools/launcher/MultipleJRE.sh +++ b/jdk/test/tools/launcher/MultipleJRE.sh @@ -1,3 +1,4 @@ +#!/bin/sh # @test MultipleJRE.sh # @bug 4811102 4953711 4955505 4956301 4991229 4998210 5018605 6387069 6733959 # @build PrintVersion