代码拉取完成,页面将自动刷新
From b22f9684326910c85512a3f8a828aeae342025c7 Mon Sep 17 00:00:00 2001
Subject: Add specialized hashmap version of the long type
---
.../cpu/aarch64/vm_version_aarch64.hpp | 5 +
src/hotspot/share/cds/filemap.cpp | 4 +-
src/hotspot/share/cds/heapShared.hpp | 3 +-
src/hotspot/share/cds/metaspaceShared.cpp | 7 +-
src/hotspot/share/classfile/classLoader.cpp | 51 +
src/hotspot/share/classfile/classLoader.hpp | 3 +
.../classfile/systemDictionaryShared.cpp | 3 +
src/hotspot/share/oops/symbol.hpp | 4 +
src/hotspot/share/runtime/arguments.cpp | 7 +
src/hotspot/share/runtime/globals.hpp | 6 +
src/hotspot/share/runtime/java.cpp | 6 +
src/hotspot/share/runtime/java.hpp | 2 +
src/hotspot/share/runtime/vm_version.cpp | 8 +
test/jdk/java/util/HashMap/HashMap.java | 3798 +++++++++++++++++
test/jdk/java/util/HashMap/LinkedHashMap.java | 798 ++++
15 files changed, 4699 insertions(+), 6 deletions(-)
create mode 100644 test/jdk/java/util/HashMap/HashMap.java
create mode 100644 test/jdk/java/util/HashMap/LinkedHashMap.java
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
index 46c77e48b..eac0db870 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
@@ -142,6 +142,11 @@ public:
static int cpu_variant() { return _variant; }
static int cpu_revision() { return _revision; }
+ static bool is_hisi_enabled() {
+ return (_cpu == CPU_HISILICON &&
+ (_model == 0xd01 || _model == 0xd02 || _model == 0xd22 || _model == 0xd45));
+ }
+
static bool model_is(int cpu_model) {
return _model == cpu_model || _model2 == cpu_model;
}
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index 76e078d12..13a9f22a9 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -1807,8 +1807,8 @@ MemRegion FileMapInfo::get_heap_regions_range_with_current_oop_encoding_mode() {
// open archive objects.
void FileMapInfo::map_heap_regions_impl() {
if (!HeapShared::is_heap_object_archiving_allowed()) {
- log_info(cds)("CDS heap data is being ignored. UseG1GC, "
- "UseCompressedOops and UseCompressedClassPointers are required.");
+ log_info(cds)("CDS heap data is being ignored. UseG1GC, UseCompressedOops, "
+ "UseCompressedClassPointers and !UsePrimHashMap are required.");
return;
}
diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp
index 74de74d6c..f7a1a004f 100644
--- a/src/hotspot/share/cds/heapShared.hpp
+++ b/src/hotspot/share/cds/heapShared.hpp
@@ -352,7 +352,8 @@ private:
static void run_full_gc_in_vm_thread() NOT_CDS_JAVA_HEAP_RETURN;
static bool is_heap_object_archiving_allowed() {
- CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedOops && UseCompressedClassPointers);)
+ CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedOops &&
+ UseCompressedClassPointers && !UsePrimHashMap);)
NOT_CDS_JAVA_HEAP(return false;)
}
diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp
index f4c8d0f81..748166480 100644
--- a/src/hotspot/share/cds/metaspaceShared.cpp
+++ b/src/hotspot/share/cds/metaspaceShared.cpp
@@ -785,10 +785,11 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* k
if(!HeapShared::is_heap_object_archiving_allowed()) {
log_info(cds)(
"Archived java heap is not supported as UseG1GC, "
- "UseCompressedOops and UseCompressedClassPointers are required."
- "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
+ "UseCompressedOops, UseCompressedClassPointers and !UsePrimHashMap are required."
+ "Current settings: UseG1GC=%s, UseCompressedOops=%s, "
+ "UseCompressedClassPointers=%s, UsePrimHashMap=%s",
BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
- BOOL_TO_STR(UseCompressedClassPointers));
+ BOOL_TO_STR(UseCompressedClassPointers), BOOL_TO_STR(UsePrimHashMap));
return;
}
// Find all the interned strings that should be dumped.
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index 6e9b9bcdf..6480cd6f6 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -142,6 +142,9 @@ ClassPathEntry* ClassLoader::_jrt_entry = NULL;
ClassPathEntry* volatile ClassLoader::_first_append_entry_list = NULL;
ClassPathEntry* volatile ClassLoader::_last_append_entry = NULL;
+
+ClassPathEntry* ClassLoader::_prim_collection_entry = NULL;
+
#if INCLUDE_CDS
ClassPathEntry* ClassLoader::_app_classpath_entries = NULL;
ClassPathEntry* ClassLoader::_last_app_classpath_entry = NULL;
@@ -628,6 +631,36 @@ bool ClassLoader::is_in_patch_mod_entries(Symbol* module_name) {
return false;
}
+// Set up the _prim_collection_entry if UsePrimHashMap
+void ClassLoader::set_prim_collection_path(JavaThread *current) {
+ if (!UsePrimHashMap) {
+ return;
+ }
+ const char *prim_collection_jar = "primcollection.jar";
+ char jvm_path[JVM_MAXPATHLEN];
+ os::jvm_path(jvm_path, sizeof(jvm_path));
+ const int trunc_times = 2; // set path/lib/server/libjvm.so to path/lib
+ for (int i = 0; i < trunc_times; ++i) {
+ char *end = strrchr(jvm_path, *os::file_separator());
+ if (end != NULL) *end = '\0';
+ }
+
+ size_t jvm_path_len = strlen(jvm_path);
+ if (jvm_path_len < JVM_MAXPATHLEN - strlen(os::file_separator()) - strlen(prim_collection_jar)) {
+ jio_snprintf(jvm_path + jvm_path_len,
+ JVM_MAXPATHLEN - jvm_path_len,
+ "%s%s", os::file_separator(), prim_collection_jar);
+ }
+ char* error_msg = NULL;
+ jzfile* zip = open_zip_file(jvm_path, &error_msg, current);
+ if (zip != NULL && error_msg == NULL) {
+ _prim_collection_entry = new ClassPathZipEntry(zip, jvm_path, false, false);
+ log_info(class, load)("primcollection path: %s", jvm_path);
+ } else {
+ UsePrimHashMap = false;
+ }
+}
+
// Set up the _jrt_entry if present and boot append path
void ClassLoader::setup_bootstrap_search_path_impl(JavaThread* current, const char *class_path) {
ResourceMark rm(current);
@@ -677,6 +710,8 @@ void ClassLoader::setup_bootstrap_search_path_impl(JavaThread* current, const ch
update_class_path_entry_list(current, path, false, true, false);
}
}
+
+ set_prim_collection_path(current);
}
// During an exploded modules build, each module defined to the boot loader
@@ -1183,6 +1218,22 @@ InstanceKlass* ClassLoader::load_class(Symbol* name, bool search_append_only, TR
stream = search_module_entries(THREAD, _patch_mod_entries, class_name, file_name);
}
}
+ // Load Attempt: primcollection.jar for PrimHashMapRelatedClass
+ if (UsePrimHashMap && (NULL == stream) && name->is_primhashmap_related_class()) {
+ static bool is_first_loading = true;
+ static bool is_first_loading_succeeded = false;
+ stream = _prim_collection_entry->open_stream(THREAD, file_name);
+ if (!is_first_loading) {
+ // exit when some loads succeed while some fail
+ if ((is_first_loading_succeeded && stream == nullptr) ||
+ (!is_first_loading_succeeded && stream != nullptr)) {
+ vm_exit_during_prim_collection_loading();
+ }
+ } else {
+ is_first_loading = false;
+ is_first_loading_succeeded = (stream != nullptr);
+ }
+ }
// Load Attempt #2: [jimage | exploded build]
if (!search_append_only && (NULL == stream)) {
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index bac23a9dd..7c1c68317 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -216,6 +216,8 @@ class ClassLoader: AllStatic {
// Last entry in linked list of appended ClassPathEntry instances
static ClassPathEntry* volatile _last_append_entry;
+ static ClassPathEntry* _prim_collection_entry;
+
// Info used by CDS
CDS_ONLY(static ClassPathEntry* _app_classpath_entries;)
CDS_ONLY(static ClassPathEntry* _last_app_classpath_entry;)
@@ -243,6 +245,7 @@ class ClassLoader: AllStatic {
static void setup_bootstrap_search_path(JavaThread* current);
static void setup_bootstrap_search_path_impl(JavaThread* current, const char *class_path);
static void setup_patch_mod_entries();
+ static void set_prim_collection_path(JavaThread *current);
static void create_javabase();
static void* dll_lookup(void* lib, const char* name, const char* path);
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index c15e8f4df..8a6343fc3 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -2580,6 +2580,9 @@ SystemDictionaryShared::find_record(RunTimeSharedDictionary* static_dict, RunTim
}
InstanceKlass* SystemDictionaryShared::find_builtin_class(Symbol* name) {
+ if (UsePrimHashMap && name->is_primhashmap_related_class()) {
+ return NULL;
+ }
const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, &_dynamic_builtin_dictionary, name);
if (record != NULL) {
assert(!record->_klass->is_hidden(), "hidden class cannot be looked up by name");
diff --git a/src/hotspot/share/oops/symbol.hpp b/src/hotspot/share/oops/symbol.hpp
index 96562d08a..003422ca1 100644
--- a/src/hotspot/share/oops/symbol.hpp
+++ b/src/hotspot/share/oops/symbol.hpp
@@ -194,6 +194,10 @@ class Symbol : public MetaspaceObj {
}
bool equals(const char* str) const { return equals(str, (int) strlen(str)); }
+ bool is_primhashmap_related_class() const {
+ return starts_with("java/util/HashMap") || starts_with("java/util/LinkedHashMap");
+ }
+
// Tests if the symbol starts with the given prefix.
bool starts_with(const char* prefix, int len) const {
return contains_utf8_at(0, prefix, len);
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index f24cabb11..357ad4aca 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -2969,6 +2969,13 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(class, path));
}
+ if (DumpSharedSpaces && UsePrimHashMap) {
+ warning("UsePrimHashMap is confilict with -Xshare:dump. ignoring UsePrimHashMap.");
+ if (FLAG_SET_CMDLINE(UsePrimHashMap, false) != JVMFlag::SUCCESS) {
+ return JNI_EINVAL;
+ }
+ }
+
fix_appclasspath();
return JNI_OK;
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index 680e78c04..4f02cb31a 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -2129,6 +2129,12 @@ const intx ObjectAlignmentInBytes = 8;
JFR_ONLY(product(ccstr, StartFlightRecording, NULL, \
"Start flight recording with options")) \
\
+ product(bool, UsePrimHashMap, false, EXPERIMENTAL, \
+ "The Prim HashMap is a specialized version for long key. " \
+ "Long-Key HashMap can benefit from this in most scenarios." \
+ "Note: The debugging of HashMap.java is inaccurate" \
+ " when UsePrimHashMap is enabled.") \
+ \
product(bool, UseFastSerializer, false, EXPERIMENTAL, \
"Cache-based serialization.It is extremely fast, but it" \
"can only be effective in certain scenarios.") \
diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp
index a77f30b8c..ccfb0806d 100644
--- a/src/hotspot/share/runtime/java.cpp
+++ b/src/hotspot/share/runtime/java.cpp
@@ -690,6 +690,12 @@ void vm_exit_during_cds_dumping(const char* error, const char* message) {
vm_abort(false);
}
+void vm_exit_during_prim_collection_loading() {
+ tty->print_cr("Error occurred during loading prim collection classes: must load all or none from primcollection.jar");
+ // no need to dump core
+ vm_abort(false);
+}
+
void vm_notify_during_shutdown(const char* error, const char* message) {
if (error != NULL) {
tty->print_cr("Error occurred during initialization of VM");
diff --git a/src/hotspot/share/runtime/java.hpp b/src/hotspot/share/runtime/java.hpp
index ad179662f..e796fa84c 100644
--- a/src/hotspot/share/runtime/java.hpp
+++ b/src/hotspot/share/runtime/java.hpp
@@ -59,6 +59,8 @@ extern void vm_shutdown_during_initialization(const char* error, const char* mes
extern void vm_exit_during_cds_dumping(const char* error, const char* message = NULL);
+extern void vm_exit_during_prim_collection_loading();
+
/**
* With the integration of the changes to handle the version string
* as defined by JEP-223, most of the code related to handle the version
diff --git a/src/hotspot/share/runtime/vm_version.cpp b/src/hotspot/share/runtime/vm_version.cpp
index 33a5c792c..1e6756aaa 100644
--- a/src/hotspot/share/runtime/vm_version.cpp
+++ b/src/hotspot/share/runtime/vm_version.cpp
@@ -31,6 +31,14 @@
void VM_Version_init() {
VM_Version::initialize();
+#ifdef AARCH64
+ if (!VM_Version::is_hisi_enabled()) {
+ UsePrimHashMap = false;
+ }
+#else
+ UsePrimHashMap = false;
+#endif
+
if (log_is_enabled(Info, os, cpu)) {
char buf[1024];
ResourceMark rm;
diff --git a/test/jdk/java/util/HashMap/HashMap.java b/test/jdk/java/util/HashMap/HashMap.java
new file mode 100644
index 000000000..4880c2e8f
--- /dev/null
+++ b/test/jdk/java/util/HashMap/HashMap.java
@@ -0,0 +1,3798 @@
+/*
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.util;
+
+import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import jdk.internal.access.SharedSecrets;
+
+/**
+ * Hash table based implementation of the {@code Map} interface. This
+ * implementation provides all of the optional map operations, and permits
+ * {@code null} values and the {@code null} key. (The {@code HashMap}
+ * class is roughly equivalent to {@code Hashtable}, except that it is
+ * unsynchronized and permits nulls.) This class makes no guarantees as to
+ * the order of the map; in particular, it does not guarantee that the order
+ * will remain constant over time.
+ *
+ * <p>This implementation provides constant-time performance for the basic
+ * operations ({@code get} and {@code put}), assuming the hash function
+ * disperses the elements properly among the buckets. Iteration over
+ * collection views requires time proportional to the "capacity" of the
+ * {@code HashMap} instance (the number of buckets) plus its size (the number
+ * of key-value mappings). Thus, it's very important not to set the initial
+ * capacity too high (or the load factor too low) if iteration performance is
+ * important.
+ *
+ * <p>An instance of {@code HashMap} has two parameters that affect its
+ * performance: <i>initial capacity</i> and <i>load factor</i>. The
+ * <i>capacity</i> is the number of buckets in the hash table, and the initial
+ * capacity is simply the capacity at the time the hash table is created. The
+ * <i>load factor</i> is a measure of how full the hash table is allowed to
+ * get before its capacity is automatically increased. When the number of
+ * entries in the hash table exceeds the product of the load factor and the
+ * current capacity, the hash table is <i>rehashed</i> (that is, internal data
+ * structures are rebuilt) so that the hash table has approximately twice the
+ * number of buckets.
+ *
+ * <p>As a general rule, the default load factor (.75) offers a good
+ * tradeoff between time and space costs. Higher values decrease the
+ * space overhead but increase the lookup cost (reflected in most of
+ * the operations of the {@code HashMap} class, including
+ * {@code get} and {@code put}). The expected number of entries in
+ * the map and its load factor should be taken into account when
+ * setting its initial capacity, so as to minimize the number of
+ * rehash operations. If the initial capacity is greater than the
+ * maximum number of entries divided by the load factor, no rehash
+ * operations will ever occur.
+ *
+ * <p>If many mappings are to be stored in a {@code HashMap}
+ * instance, creating it with a sufficiently large capacity will allow
+ * the mappings to be stored more efficiently than letting it perform
+ * automatic rehashing as needed to grow the table. Note that using
+ * many keys with the same {@code hashCode()} is a sure way to slow
+ * down performance of any hash table. To ameliorate impact, when keys
+ * are {@link Comparable}, this class may use comparison order among
+ * keys to help break ties.
+ *
+ * <p><strong>Note that this implementation is not synchronized.</strong>
+ * If multiple threads access a hash map concurrently, and at least one of
+ * the threads modifies the map structurally, it <i>must</i> be
+ * synchronized externally. (A structural modification is any operation
+ * that adds or deletes one or more mappings; merely changing the value
+ * associated with a key that an instance already contains is not a
+ * structural modification.) This is typically accomplished by
+ * synchronizing on some object that naturally encapsulates the map.
+ *
+ * If no such object exists, the map should be "wrapped" using the
+ * {@link Collections#synchronizedMap Collections.synchronizedMap}
+ * method. This is best done at creation time, to prevent accidental
+ * unsynchronized access to the map:<pre>
+ * Map m = Collections.synchronizedMap(new HashMap(...));</pre>
+ *
+ * <p>The iterators returned by all of this class's "collection view methods"
+ * are <i>fail-fast</i>: if the map is structurally modified at any time after
+ * the iterator is created, in any way except through the iterator's own
+ * {@code remove} method, the iterator will throw a
+ * {@link ConcurrentModificationException}. Thus, in the face of concurrent
+ * modification, the iterator fails quickly and cleanly, rather than risking
+ * arbitrary, non-deterministic behavior at an undetermined time in the
+ * future.
+ *
+ * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
+ * as it is, generally speaking, impossible to make any hard guarantees in the
+ * presence of unsynchronized concurrent modification. Fail-fast iterators
+ * throw {@code ConcurrentModificationException} on a best-effort basis.
+ * Therefore, it would be wrong to write a program that depended on this
+ * exception for its correctness: <i>the fail-fast behavior of iterators
+ * should be used only to detect bugs.</i>
+ *
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
+ * Java Collections Framework</a>.
+ *
+ * @param <K> the type of keys maintained by this map
+ * @param <V> the type of mapped values
+ *
+ * @author Doug Lea
+ * @author Josh Bloch
+ * @author Arthur van Hoff
+ * @author Neal Gafter
+ * @see Object#hashCode()
+ * @see Collection
+ * @see Map
+ * @see TreeMap
+ * @see Hashtable
+ * @since 1.2
+ */
+public class HashMap<K,V> extends AbstractMap<K,V>
+ implements Map<K,V>, Cloneable, Serializable {
+
+ @java.io.Serial
+ private static final long serialVersionUID = 362498820763181265L;
+
+ /*
+ * Implementation notes.
+ *
+ * This map usually acts as a binned (bucketed) hash table, but
+ * when bins get too large, they are transformed into bins of
+ * TreeNodes, each structured similarly to those in
+ * java.util.TreeMap. Most methods try to use normal bins, but
+ * relay to TreeNode methods when applicable (simply by checking
+ * instanceof a node). Bins of TreeNodes may be traversed and
+ * used like any others, but additionally support faster lookup
+ * when overpopulated. However, since the vast majority of bins in
+ * normal use are not overpopulated, checking for existence of
+ * tree bins may be delayed in the course of table methods.
+ *
+ * Tree bins (i.e., bins whose elements are all TreeNodes) are
+ * ordered primarily by hashCode, but in the case of ties, if two
+ * elements are of the same "class C implements Comparable<C>",
+ * type then their compareTo method is used for ordering. (We
+ * conservatively check generic types via reflection to validate
+ * this -- see method comparableClassFor). The added complexity
+ * of tree bins is worthwhile in providing worst-case O(log n)
+ * operations when keys either have distinct hashes or are
+ * orderable, Thus, performance degrades gracefully under
+ * accidental or malicious usages in which hashCode() methods
+ * return values that are poorly distributed, as well as those in
+ * which many keys share a hashCode, so long as they are also
+ * Comparable. (If neither of these apply, we may waste about a
+ * factor of two in time and space compared to taking no
+ * precautions. But the only known cases stem from poor user
+ * programming practices that are already so slow that this makes
+ * little difference.)
+ *
+ * Because TreeNodes are about twice the size of regular nodes, we
+ * use them only when bins contain enough nodes to warrant use
+ * (see TREEIFY_THRESHOLD). And when they become too small (due to
+ * removal or resizing) they are converted back to plain bins. In
+ * usages with well-distributed user hashCodes, tree bins are
+ * rarely used. Ideally, under random hashCodes, the frequency of
+ * nodes in bins follows a Poisson distribution
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
+ * parameter of about 0.5 on average for the default resizing
+ * threshold of 0.75, although with a large variance because of
+ * resizing granularity. Ignoring variance, the expected
+ * occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
+ * factorial(k)). The first values are:
+ *
+ * 0: 0.60653066
+ * 1: 0.30326533
+ * 2: 0.07581633
+ * 3: 0.01263606
+ * 4: 0.00157952
+ * 5: 0.00015795
+ * 6: 0.00001316
+ * 7: 0.00000094
+ * 8: 0.00000006
+ * more: less than 1 in ten million
+ *
+ * The root of a tree bin is normally its first node. However,
+ * sometimes (currently only upon Iterator.remove), the root might
+ * be elsewhere, but can be recovered following parent links
+ * (method TreeNode.root()).
+ *
+ * All applicable internal methods accept a hash code as an
+ * argument (as normally supplied from a public method), allowing
+ * them to call each other without recomputing user hashCodes.
+ * Most internal methods also accept a "tab" argument, that is
+ * normally the current table, but may be a new or old one when
+ * resizing or converting.
+ *
+ * When bin lists are treeified, split, or untreeified, we keep
+ * them in the same relative access/traversal order (i.e., field
+ * Node.next) to better preserve locality, and to slightly
+ * simplify handling of splits and traversals that invoke
+ * iterator.remove. When using comparators on insertion, to keep a
+ * total ordering (or as close as is required here) across
+ * rebalancings, we compare classes and identityHashCodes as
+ * tie-breakers.
+ *
+ * The use and transitions among plain vs tree modes is
+ * complicated by the existence of subclass LinkedHashMap. See
+ * below for hook methods defined to be invoked upon insertion,
+ * removal and access that allow LinkedHashMap internals to
+ * otherwise remain independent of these mechanics. (This also
+ * requires that a map instance be passed to some utility methods
+ * that may create new nodes.)
+ *
+ * The concurrent-programming-like SSA-based coding style helps
+ * avoid aliasing errors amid all of the twisty pointer operations.
+ */
+
+ /**
+ * The default initial capacity - MUST be a power of two.
+ */
+ static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
+
+ /**
+ * The maximum capacity, used if a higher value is implicitly specified
+ * by either of the constructors with arguments.
+ * MUST be a power of two <= 1<<30.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The load factor used when none specified in constructor.
+ */
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
+
+ /**
+ * The bin count threshold for using a tree rather than list for a
+ * bin. Bins are converted to trees when adding an element to a
+ * bin with at least this many nodes. The value must be greater
+ * than 2 and should be at least 8 to mesh with assumptions in
+ * tree removal about conversion back to plain bins upon
+ * shrinkage.
+ */
+ static final int TREEIFY_THRESHOLD = 8;
+
+ /**
+ * The bin count threshold for untreeifying a (split) bin during a
+ * resize operation. Should be less than TREEIFY_THRESHOLD, and at
+ * most 6 to mesh with shrinkage detection under removal.
+ */
+ static final int UNTREEIFY_THRESHOLD = 6;
+
+ /**
+ * The smallest table capacity for which bins may be treeified.
+ * (Otherwise the table is resized if too many nodes in a bin.)
+ * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
+ * between resizing and treeification thresholds.
+ */
+ static final int MIN_TREEIFY_CAPACITY = 64;
+
+ /**
+ * The max load factor used for prim long hashmap. The performance of
+ * prim long hashmap will decrease sharply when the value exceeds 0.8f.
+ */
+ static final float MAX_LOAD_FACTOR_FOR_PRIM_MAP = 0.8f;
+
+ static final int NULL_KEY_INDEX_FOR_RPIM_MAP = -1;
+ static final int KEY_NO_EXIST_FOR_PRIM_MAP = -2;
+
+ /**
+ * Basic hash bin node, used for most entries. (See below for
+ * TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
+ */
+ static class Node<K,V> implements Map.Entry<K,V> {
+ final int hash;
+ final K key;
+ V value;
+ Node<K,V> next;
+
+ Node(int hash, K key, V value, Node<K,V> next) {
+ this.hash = hash;
+ this.key = key;
+ this.value = value;
+ this.next = next;
+ }
+
+ public final K getKey() { return key; }
+ public final V getValue() { return value; }
+ public final String toString() { return key + "=" + value; }
+
+ public final int hashCode() {
+ return Objects.hashCode(key) ^ Objects.hashCode(value);
+ }
+
+ public final V setValue(V newValue) {
+ V oldValue = value;
+ value = newValue;
+ return oldValue;
+ }
+
+ public final boolean equals(Object o) {
+ if (o == this)
+ return true;
+
+ return o instanceof Map.Entry<?, ?> e
+ && Objects.equals(key, e.getKey())
+ && Objects.equals(value, e.getValue());
+ }
+ }
+
+ /* ---------------- Static utilities -------------- */
+
+ /**
+ * Computes key.hashCode() and spreads (XORs) higher bits of hash
+ * to lower. Because the table uses power-of-two masking, sets of
+ * hashes that vary only in bits above the current mask will
+ * always collide. (Among known examples are sets of Float keys
+ * holding consecutive whole numbers in small tables.) So we
+ * apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reasonably distributed (so don't benefit from
+ * spreading), and because we use trees to handle large sets of
+ * collisions in bins, we just XOR some shifted bits in the
+ * cheapest possible way to reduce systematic lossage, as well as
+ * to incorporate impact of the highest bits that would otherwise
+ * never be used in index calculations because of table bounds.
+ */
+ static final int hash(Object key) {
+ int h;
+ return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
+ }
+
+ /**
+ * Returns x's Class if it is of the form "class C implements
+ * Comparable<C>", else null.
+ */
+ static Class<?> comparableClassFor(Object x) {
+ if (x instanceof Comparable) {
+ Class<?> c; Type[] ts, as; ParameterizedType p;
+ if ((c = x.getClass()) == String.class) // bypass checks
+ return c;
+ if ((ts = c.getGenericInterfaces()) != null) {
+ for (Type t : ts) {
+ if ((t instanceof ParameterizedType) &&
+ ((p = (ParameterizedType) t).getRawType() ==
+ Comparable.class) &&
+ (as = p.getActualTypeArguments()) != null &&
+ as.length == 1 && as[0] == c) // type arg is c
+ return c;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns k.compareTo(x) if x matches kc (k's screened comparable
+ * class), else 0.
+ */
+ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
+ static int compareComparables(Class<?> kc, Object k, Object x) {
+ return (x == null || x.getClass() != kc ? 0 :
+ ((Comparable)k).compareTo(x));
+ }
+
+ /**
+ * Returns a power of two size for the given target capacity.
+ */
+ static final int tableSizeFor(int cap) {
+ int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1);
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
+ }
+
+ /* ---------------- Fields -------------- */
+
+ /**
+ * The table, initialized on first use, and resized as
+ * necessary. When allocated, length is always a power of two.
+ * (We also tolerate length zero in some operations to allow
+ * bootstrapping mechanics that are currently not needed.)
+ */
+ transient Node<K,V>[] table;
+
+ /**
+ * Holds cached entrySet(). Note that AbstractMap fields are used
+ * for keySet() and values().
+ */
+ transient Set<Map.Entry<K,V>> entrySet;
+
+ /**
+ * The number of key-value mappings contained in this map.
+ */
+ transient int size;
+
+ /**
+ * The number of times this HashMap has been structurally modified
+ * Structural modifications are those that change the number of mappings in
+ * the HashMap or otherwise modify its internal structure (e.g.,
+ * rehash). This field is used to make iterators on Collection-views of
+ * the HashMap fail-fast. (See ConcurrentModificationException).
+ */
+ transient int modCount;
+
+ /**
+ * The next size value at which to resize (capacity * load factor).
+ *
+ * @serial
+ */
+ // (The javadoc description is true upon serialization.
+ // Additionally, if the table array has not been allocated, this
+ // field holds the initial array capacity, or zero signifying
+ // DEFAULT_INITIAL_CAPACITY.)
+ int threshold;
+
+ /**
+ * The load factor for the hash table.
+ *
+ * @serial
+ */
+ final float loadFactor;
+
+ /**
+ * The keys in prim long hashmap.
+ */
+ transient Long[] primMapKeys;
+
+ /**
+ * The values in prim long hashmap.
+ */
+ transient V[] primMapValues;
+
+ /**
+ * Indicates whether the element is valid in prim long hashmap.
+ */
+ transient boolean[] primMapValids;
+
+ /**
+ * The value of null key in prim long hashmap.
+ */
+ transient V primMapValOfNullKey;
+
+ /**
+ * Indicates whether null key exist in prim long hashmap.
+ */
+ transient boolean primMapNullKeyValid;
+
+ /**
+ * Indicates whether the current state is in prim long map.
+ */
+ transient boolean usingPrimHashMap;
+
+ /* ---------------- Public operations -------------- */
+
+ /**
+ * Constructs an empty {@code HashMap} with the specified initial
+ * capacity and load factor.
+ *
+ * @param initialCapacity the initial capacity
+ * @param loadFactor the load factor
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive
+ */
+ public HashMap(int initialCapacity, float loadFactor) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException("Illegal initial capacity: " +
+ initialCapacity);
+ if (initialCapacity > MAXIMUM_CAPACITY)
+ initialCapacity = MAXIMUM_CAPACITY;
+ if (loadFactor <= 0 || Float.isNaN(loadFactor))
+ throw new IllegalArgumentException("Illegal load factor: " +
+ loadFactor);
+ this.loadFactor = loadFactor;
+ this.threshold = tableSizeFor(initialCapacity);
+ if (this.loadFactor > MAX_LOAD_FACTOR_FOR_PRIM_MAP) {
+ disablePrimHashMap();
+ } else {
+ initUsingPrimHashMap();
+ }
+ }
+
+ /**
+ * Constructs an empty {@code HashMap} with the specified initial
+ * capacity and the default load factor (0.75).
+ *
+ * @param initialCapacity the initial capacity.
+ * @throws IllegalArgumentException if the initial capacity is negative.
+ */
+ public HashMap(int initialCapacity) {
+ this(initialCapacity, DEFAULT_LOAD_FACTOR);
+ }
+
+ /**
+ * Constructs an empty {@code HashMap} with the default initial capacity
+ * (16) and the default load factor (0.75).
+ */
+ public HashMap() {
+ initUsingPrimHashMap();
+ this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
+ }
+
+ /**
+ * Constructs a new {@code HashMap} with the same mappings as the
+ * specified {@code Map}. The {@code HashMap} is created with
+ * default load factor (0.75) and an initial capacity sufficient to
+ * hold the mappings in the specified {@code Map}.
+ *
+ * @param m the map whose mappings are to be placed in this map
+ * @throws NullPointerException if the specified map is null
+ */
+ public HashMap(Map<? extends K, ? extends V> m) {
+ initUsingPrimHashMap();
+ this.loadFactor = DEFAULT_LOAD_FACTOR;
+ putMapEntries(m, false);
+ }
+
+ /**
+ * Implements Map.putAll and Map constructor.
+ *
+ * @param m the map
+ * @param evict false when initially constructing this map, else
+ * true (relayed to method afterNodeInsertion).
+ */
+ final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
+ int s = m.size();
+ if (s > 0) {
+ if ((!usePrimHashMap() && table == null) ||
+ (usePrimHashMap() && primMapValids == null)) { // pre-size
+ float ft = ((float)s / loadFactor) + 1.0F;
+ int t = ((ft < (float)MAXIMUM_CAPACITY) ?
+ (int)ft : MAXIMUM_CAPACITY);
+ if (t > threshold)
+ threshold = tableSizeFor(t);
+ } else {
+ if (!usePrimHashMap()) {
+ // Because of linked-list bucket constraints, we cannot
+ // expand all at once, but can reduce total resize
+ // effort by repeated doubling now vs later
+ while (s > threshold && table.length < MAXIMUM_CAPACITY)
+ resize();
+ } else {
+ while (s > threshold && primMapValids.length < MAXIMUM_CAPACITY)
+ primHashMapResize();
+ }
+ }
+
+ for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
+ K key = e.getKey();
+ V value = e.getValue();
+ if (!usePrimHashMap(key)) {
+ putVal(key, value, false, evict, true);
+ } else {
+ primHashMapPutVal((Long)key, value, false);
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns the number of key-value mappings in this map.
+ *
+ * @return the number of key-value mappings in this map
+ */
+ public int size() {
+ return size;
+ }
+
+ /**
+ * Returns {@code true} if this map contains no key-value mappings.
+ *
+ * @return {@code true} if this map contains no key-value mappings
+ */
+ public boolean isEmpty() {
+ return size == 0;
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code (key==null ? k==null :
+ * key.equals(k))}, then this method returns {@code v}; otherwise
+ * it returns {@code null}. (There can be at most one such mapping.)
+ *
+ * <p>A return value of {@code null} does not <i>necessarily</i>
+ * indicate that the map contains no mapping for the key; it's also
+ * possible that the map explicitly maps the key to {@code null}.
+ * The {@link #containsKey containsKey} operation may be used to
+ * distinguish these two cases.
+ *
+ * @see #put(Object, Object)
+ */
+ public V get(Object key) {
+ if (usePrimHashMap()) {
+ return primHashMapGet(key);
+ }
+ Node<K,V> e;
+ return (e = getNode(key)) == null ? null : e.value;
+ }
+
+ /**
+ * Implements Map.get and related methods.
+ *
+ * @param key the key
+ * @return the node, or null if none
+ */
+ final Node<K,V> getNode(Object key) {
+ Node<K,V>[] tab; Node<K,V> first, e; int n, hash; K k;
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ (first = tab[(n - 1) & (hash = hash(key))]) != null) {
+ if (first.hash == hash && // always check first node
+ ((k = first.key) == key || (key != null && key.equals(k))))
+ return first;
+ if ((e = first.next) != null) {
+ if (first instanceof TreeNode)
+ return ((TreeNode<K,V>)first).getTreeNode(hash, key);
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k))))
+ return e;
+ } while ((e = e.next) != null);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns {@code true} if this map contains a mapping for the
+ * specified key.
+ *
+ * @param key The key whose presence in this map is to be tested
+ * @return {@code true} if this map contains a mapping for the specified
+ * key.
+ */
+ public boolean containsKey(Object key) {
+ if (usePrimHashMap()) {
+ return (primHashGetIndexByKey(key) != KEY_NO_EXIST_FOR_PRIM_MAP);
+ }
+ return getNode(key) != null;
+ }
+
+ /**
+ * Associates the specified value with the specified key in this map.
+ * If the map previously contained a mapping for the key, the old
+ * value is replaced.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}.
+ * (A {@code null} return can also indicate that the map
+ * previously associated {@code null} with {@code key}.)
+ */
+ public V put(K key, V value) {
+ if (usePrimHashMap(key)) {
+ return primHashMapPutVal((Long)key, value, false);
+ }
+ return putVal(key, value, false, true, true);
+ }
+
+ /**
+ * Implements Map.put and related methods.
+ *
+ * @param key the key
+ * @param value the value to put
+ * @param onlyIfAbsent if true, don't change existing value
+ * @param evict if false, the table is in creation mode.
+ * @param update if false(in rollback state), don't update size and modcount
+ * @return previous value, or null if none
+ */
+ final V putVal(K key, V value, boolean onlyIfAbsent,
+ boolean evict, boolean update) {
+ int hash = hash(key);
+ Node<K,V>[] tab; Node<K,V> p; int n, i;
+ if ((tab = table) == null || (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if ((p = tab[i = (n - 1) & hash]) == null)
+ tab[i] = newNode(hash, key, value, null);
+ else {
+ Node<K,V> e; K k;
+ if (p.hash == hash &&
+ ((k = p.key) == key || (key != null && key.equals(k))))
+ e = p;
+ else if (p instanceof TreeNode)
+ e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
+ else {
+ for (int binCount = 0; ; ++binCount) {
+ if ((e = p.next) == null) {
+ p.next = newNode(hash, key, value, null);
+ if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
+ treeifyBin(tab, hash);
+ break;
+ }
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k))))
+ break;
+ p = e;
+ }
+ }
+ if (e != null) { // existing mapping for key
+ V oldValue = e.value;
+ if (!onlyIfAbsent || oldValue == null)
+ e.value = value;
+ afterNodeAccess(e);
+ return oldValue;
+ }
+ }
+ if (update) {
+ ++modCount;
+ if (++size > threshold)
+ resize();
+ }
+ afterNodeInsertion(evict);
+ return null;
+ }
+
+ /**
+ * Calculate the new capacity based on the old capacity.
+ * If the old value is 0, the new capacity is set to the
+ * initial capacity saved in the threshold field.
+ * Otherwise, set the new capacity to double the old capacity.
+ * This method update threshold at the same time.
+ *
+ * @return the new capacity. 0 if oldCap reaches the max capacity.
+ */
+ private int calNewCapAndUpdateThreshold(int oldCap) {
+ int oldThr = threshold;
+ int newCap, newThr = 0;
+ if (oldCap > 0) {
+ if (oldCap >= MAXIMUM_CAPACITY) {
+ threshold = Integer.MAX_VALUE;
+ return 0;
+ }
+ else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
+ oldCap >= DEFAULT_INITIAL_CAPACITY)
+ newThr = oldThr << 1; // double threshold
+ }
+ else if (oldThr > 0) // initial capacity was placed in threshold
+ newCap = oldThr;
+ else { // zero initial threshold signifies using defaults
+ newCap = DEFAULT_INITIAL_CAPACITY;
+ newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
+ }
+ if (newThr == 0) {
+ float ft = (float)newCap * loadFactor;
+ newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
+ (int)ft : Integer.MAX_VALUE);
+ }
+ threshold = newThr;
+ return newCap;
+ }
+
+ /**
+ * Initializes or doubles table size. If null, allocates in
+ * accord with initial capacity target held in field threshold.
+ * Otherwise, because we are using power-of-two expansion, the
+ * elements from each bin must either stay at same index, or move
+ * with a power of two offset in the new table.
+ *
+ * @return the table
+ */
+ final Node<K,V>[] resize() {
+ Node<K,V>[] oldTab = table;
+ final int oldCap = (oldTab == null) ? 0 : oldTab.length;
+ int newCap = calNewCapAndUpdateThreshold(oldCap);
+ // 0 means oldCap reaches the MAXIMUM_CAPACITY
+ if (newCap == 0) {
+ return oldTab;
+ }
+ @SuppressWarnings({"rawtypes","unchecked"})
+ Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
+ table = newTab;
+ if (oldTab != null) {
+ for (int j = 0; j < oldCap; ++j) {
+ Node<K,V> e;
+ if ((e = oldTab[j]) != null) {
+ oldTab[j] = null;
+ if (e.next == null)
+ newTab[e.hash & (newCap - 1)] = e;
+ else if (e instanceof TreeNode)
+ ((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
+ else { // preserve order
+ Node<K,V> loHead = null, loTail = null;
+ Node<K,V> hiHead = null, hiTail = null;
+ Node<K,V> next;
+ do {
+ next = e.next;
+ if ((e.hash & oldCap) == 0) {
+ if (loTail == null)
+ loHead = e;
+ else
+ loTail.next = e;
+ loTail = e;
+ }
+ else {
+ if (hiTail == null)
+ hiHead = e;
+ else
+ hiTail.next = e;
+ hiTail = e;
+ }
+ } while ((e = next) != null);
+ if (loTail != null) {
+ loTail.next = null;
+ newTab[j] = loHead;
+ }
+ if (hiTail != null) {
+ hiTail.next = null;
+ newTab[j + oldCap] = hiHead;
+ }
+ }
+ }
+ }
+ }
+ return newTab;
+ }
+
+ /**
+ * Replaces all linked nodes in bin at index for given hash unless
+ * table is too small, in which case resizes instead.
+ */
+ final void treeifyBin(Node<K,V>[] tab, int hash) {
+ int n, index; Node<K,V> e;
+ if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
+ resize();
+ else if ((e = tab[index = (n - 1) & hash]) != null) {
+ TreeNode<K,V> hd = null, tl = null;
+ do {
+ TreeNode<K,V> p = replacementTreeNode(e, null);
+ if (tl == null)
+ hd = p;
+ else {
+ p.prev = tl;
+ tl.next = p;
+ }
+ tl = p;
+ } while ((e = e.next) != null);
+ if ((tab[index] = hd) != null)
+ hd.treeify(tab);
+ }
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this map.
+ * These mappings will replace any mappings that this map had for
+ * any of the keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ * @throws NullPointerException if the specified map is null
+ */
+ public void putAll(Map<? extends K, ? extends V> m) {
+ putMapEntries(m, true);
+ }
+
+ /**
+ * Removes the mapping for the specified key from this map if present.
+ *
+ * @param key key whose mapping is to be removed from the map
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}.
+ * (A {@code null} return can also indicate that the map
+ * previously associated {@code null} with {@code key}.)
+ */
+ public V remove(Object key) {
+ if (usePrimHashMap(key)) {
+ return primHashMapRemoveByKey(key);
+ }
+ Node<K,V> e;
+ return (e = removeNode(hash(key), key, null, false, true)) == null ?
+ null : e.value;
+ }
+
+ /**
+ * Implements Map.remove and related methods.
+ *
+ * @param hash hash for key
+ * @param key the key
+ * @param value the value to match if matchValue, else ignored
+ * @param matchValue if true only remove if value is equal
+ * @param movable if false do not move other nodes while removing
+ * @return the node, or null if none
+ */
+ final Node<K,V> removeNode(int hash, Object key, Object value,
+ boolean matchValue, boolean movable) {
+ Node<K,V>[] tab; Node<K,V> p; int n, index;
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ (p = tab[index = (n - 1) & hash]) != null) {
+ Node<K,V> node = null, e; K k; V v;
+ if (p.hash == hash &&
+ ((k = p.key) == key || (key != null && key.equals(k))))
+ node = p;
+ else if ((e = p.next) != null) {
+ if (p instanceof TreeNode)
+ node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
+ else {
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key ||
+ (key != null && key.equals(k)))) {
+ node = e;
+ break;
+ }
+ p = e;
+ } while ((e = e.next) != null);
+ }
+ }
+ if (node != null && (!matchValue || (v = node.value) == value ||
+ (value != null && value.equals(v)))) {
+ if (node instanceof TreeNode)
+ ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
+ else if (node == p)
+ tab[index] = node.next;
+ else
+ p.next = node.next;
+ ++modCount;
+ --size;
+ afterNodeRemoval(node);
+ return node;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Removes all of the mappings from this map.
+ * The map will be empty after this call returns.
+ */
+ public void clear() {
+ modCount++;
+ if (usePrimHashMap()) {
+ if (size > 0) {
+ size = 0;
+ if (primMapValids != null && primMapValids.length != 0) {
+ Arrays.fill(primMapValids, false);
+ Arrays.fill(primMapValues, null);
+ }
+ primMapNullKeyValid = false;
+ primMapValOfNullKey = null;
+ }
+ } else {
+ Node<K,V>[] tab;
+ if ((tab = table) != null && size > 0) {
+ size = 0;
+ for (int i = 0; i < tab.length; ++i)
+ tab[i] = null;
+ }
+ }
+ }
+
+ /**
+ * Returns {@code true} if this map maps one or more keys to the
+ * specified value.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return {@code true} if this map maps one or more keys to the
+ * specified value
+ */
+ public boolean containsValue(Object value) {
+ if (usePrimHashMap()) {
+ return primHashMapContainsValue(value);
+ }
+ Node<K,V>[] tab; V v;
+ if ((tab = table) != null && size > 0) {
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next) {
+ if ((v = e.value) == value ||
+ (value != null && value.equals(v)))
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own {@code remove} operation), the results of
+ * the iteration are undefined. The set supports element removal,
+ * which removes the corresponding mapping from the map, via the
+ * {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations. It does not support the {@code add} or {@code addAll}
+ * operations.
+ *
+ * @return a set view of the keys contained in this map
+ */
+ public Set<K> keySet() {
+ Set<K> ks = keySet;
+ if (ks == null) {
+ ks = new KeySet();
+ keySet = ks;
+ }
+ return ks;
+ }
+
+ /**
+ * Prepares the array for {@link Collection#toArray(Object[])} implementation.
+ * If supplied array is smaller than this map size, a new array is allocated.
+ * If supplied array is bigger than this map size, a null is written at size index.
+ *
+ * @param a an original array passed to {@code toArray()} method
+ * @param <T> type of array elements
+ * @return an array ready to be filled and returned from {@code toArray()} method.
+ */
+ @SuppressWarnings("unchecked")
+ final <T> T[] prepareArray(T[] a) {
+ int size = this.size;
+ if (a.length < size) {
+ return (T[]) java.lang.reflect.Array
+ .newInstance(a.getClass().getComponentType(), size);
+ }
+ if (a.length > size) {
+ a[size] = null;
+ }
+ return a;
+ }
+
+ /**
+ * Fills an array with this map keys and returns it. This method assumes
+ * that input array is big enough to fit all the keys. Use
+ * {@link #prepareArray(Object[])} to ensure this.
+ *
+ * @param a an array to fill
+ * @param <T> type of array elements
+ * @return supplied array
+ */
+ <T> T[] keysToArray(T[] a) {
+ Object[] r = a;
+ int idx = 0;
+ if (usePrimHashMap()) {
+ int remaining = size;
+ if (primMapNullKeyValid) {
+ r[idx++] = null;
+ --remaining;
+ }
+ if (remaining > 0) {
+ boolean[] valids = primMapValids;
+ Long[] keys = primMapKeys;
+ int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ r[idx++] = keys[i];
+ --remaining;
+ }
+ }
+ }
+ } else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next) {
+ r[idx++] = e.key;
+ }
+ }
+ }
+ }
+ return a;
+ }
+
+ /**
+ * Fills an array with this map values and returns it. This method assumes
+ * that input array is big enough to fit all the values. Use
+ * {@link #prepareArray(Object[])} to ensure this.
+ *
+ * @param a an array to fill
+ * @param <T> type of array elements
+ * @return supplied array
+ */
+ <T> T[] valuesToArray(T[] a) {
+ Object[] r = a;
+ int idx = 0;
+ if (usePrimHashMap()) {
+ int remaining = size;
+ if (primMapNullKeyValid) {
+ r[idx++] = primMapValOfNullKey;
+ --remaining;
+ }
+ if (remaining > 0) {
+ boolean[] valids = primMapValids;
+ V[] values = primMapValues;
+ int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ r[idx++] = values[i];
+ --remaining;
+ }
+ }
+ }
+ } else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next) {
+ r[idx++] = e.value;
+ }
+ }
+ }
+ }
+ return a;
+ }
+
+ final class KeySet extends AbstractSet<K> {
+ public final int size() { return size; }
+ public final void clear() { HashMap.this.clear(); }
+ public final Iterator<K> iterator() {
+ if (usePrimHashMap()) {
+ return new primHashMapKeyIterator();
+ }
+ return new KeyIterator();
+ }
+ public final boolean contains(Object o) { return containsKey(o); }
+ public final boolean remove(Object key) {
+ if (usePrimHashMap(key)) {
+ int index = primHashGetIndexByKey(key);
+ if (index == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return false;
+ }
+ primHashMapRemoveByIndex(index);
+ return true;
+ }
+ return removeNode(hash(key), key, null, false, true) != null;
+ }
+ public final Spliterator<K> spliterator() {
+ if (usePrimHashMap()) {
+ return new primHashMapKeySpliterator(0, -1, 0, 0, primMapNullKeyValid);
+ }
+ return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);
+ }
+
+ public Object[] toArray() {
+ return keysToArray(new Object[size]);
+ }
+
+ public <T> T[] toArray(T[] a) {
+ return keysToArray(prepareArray(a));
+ }
+
+ public final void forEach(Consumer<? super K> action) {
+ int mc = modCount;
+ if (action == null)
+ throw new NullPointerException();
+ if (usePrimHashMap()) {
+ int remaining = size;
+ if (primMapNullKeyValid) {
+ action.accept(null);
+ --remaining;
+ }
+ if (remaining > 0) {
+ boolean[] valids = primMapValids;
+ Long[] keys = primMapKeys;
+ int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ action.accept(castKeyToGenericType(keys[i]));
+ --remaining;
+ }
+ }
+ }
+ } else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next)
+ action.accept(e.key);
+ }
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. If the map is
+ * modified while an iteration over the collection is in progress
+ * (except through the iterator's own {@code remove} operation),
+ * the results of the iteration are undefined. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the {@code Iterator.remove},
+ * {@code Collection.remove}, {@code removeAll},
+ * {@code retainAll} and {@code clear} operations. It does not
+ * support the {@code add} or {@code addAll} operations.
+ *
+ * @return a view of the values contained in this map
+ */
+ public Collection<V> values() {
+ Collection<V> vs = values;
+ if (vs == null) {
+ vs = new Values();
+ values = vs;
+ }
+ return vs;
+ }
+
+ final class Values extends AbstractCollection<V> {
+ public final int size() { return size; }
+ public final void clear() { HashMap.this.clear(); }
+ public final Iterator<V> iterator() {
+ if (usePrimHashMap()) {
+ return new primHashMapValueIterator();
+ }
+ return new ValueIterator();
+ }
+ public final boolean contains(Object o) { return containsValue(o); }
+ public final Spliterator<V> spliterator() {
+ if (usePrimHashMap()) {
+ return new primHashMapValueSpliterator(0, -1, 0, 0, primMapNullKeyValid);
+ }
+ return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0);
+ }
+
+ public Object[] toArray() {
+ return valuesToArray(new Object[size]);
+ }
+
+ public <T> T[] toArray(T[] a) {
+ return valuesToArray(prepareArray(a));
+ }
+
+ public final void forEach(Consumer<? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int mc = modCount;
+ if (usePrimHashMap()) {
+ int remaining = size;
+ if (primMapNullKeyValid) {
+ action.accept(primMapValOfNullKey);
+ --remaining;
+ }
+ if (remaining > 0) {
+ boolean[] valids = primMapValids;
+ V[] values = primMapValues;
+ int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ action.accept(values[i]);
+ --remaining;
+ }
+ }
+ }
+ } else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next)
+ action.accept(e.value);
+ }
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own {@code remove} operation, or through the
+ * {@code setValue} operation on a map entry returned by the
+ * iterator) the results of the iteration are undefined. The set
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the {@code Iterator.remove},
+ * {@code Set.remove}, {@code removeAll}, {@code retainAll} and
+ * {@code clear} operations. It does not support the
+ * {@code add} or {@code addAll} operations.
+ *
+ * @return a set view of the mappings contained in this map
+ */
+ public Set<Map.Entry<K,V>> entrySet() {
+ Set<Map.Entry<K,V>> es;
+ return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
+ }
+
+ final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
+ public final int size() { return size; }
+ public final void clear() { HashMap.this.clear(); }
+ public final Iterator<Map.Entry<K,V>> iterator() {
+ if (usePrimHashMap()) {
+ return new primHashMapEntryIterator();
+ }
+ return new EntryIterator();
+ }
+ public final boolean contains(Object o) {
+ if (!(o instanceof Map.Entry<?, ?> e))
+ return false;
+ Object key = e.getKey();
+ if (usePrimHashMap()) {
+ int index = primHashGetIndexByKey(key);
+ if (index == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return false;
+ }
+ Object value = e.getValue();
+ return (index == NULL_KEY_INDEX_FOR_RPIM_MAP) ?
+ Objects.equals(value, primMapValOfNullKey) :
+ Objects.equals(value, primMapValues[index]);
+ } else {
+ Node<K,V> candidate = getNode(key);
+ return candidate != null && candidate.equals(e);
+ }
+ }
+ public final boolean remove(Object o) {
+ if (o instanceof Map.Entry<?, ?> e) {
+ Object key = e.getKey();
+ Object value = e.getValue();
+ return HashMap.this.remove(key, value);
+ }
+ return false;
+ }
+ public final Spliterator<Map.Entry<K,V>> spliterator() {
+ if (usePrimHashMap()) {
+ return new primHashMapEntrySpliterator(0, -1, 0, 0, primMapNullKeyValid);
+ }
+ return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
+ }
+ public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
+ if (action == null)
+ throw new NullPointerException();
+ if (usePrimHashMap()) {
+ Iterator<Map.Entry<K,V>> iter = iterator();
+ int mc = modCount;
+ while (iter.hasNext()) {
+ Map.Entry<K,V> e = iter.next();
+ action.accept(e);
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ int mc = modCount;
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next)
+ action.accept(e);
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+ }
+ }
+
+ // Overrides of JDK8 Map extension methods
+
+ @Override
+ public V getOrDefault(Object key, V defaultValue) {
+ if (usePrimHashMap()) {
+ int index = primHashGetIndexByKey(key);
+ if (index == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return defaultValue;
+ }
+ return primHashMapGetValByIndex(index);
+ }
+ Node<K,V> e;
+ return (e = getNode(key)) == null ? defaultValue : e.value;
+ }
+
+ @Override
+ public V putIfAbsent(K key, V value) {
+ if (usePrimHashMap(key)) {
+ return primHashMapPutVal((Long)key, value, true);
+ }
+ return putVal(key, value, true, true, true);
+ }
+
+ @Override
+ public boolean remove(Object key, Object value) {
+ if (usePrimHashMap(key)) {
+ int index = primHashGetIndexByKey(key);
+ if (index == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return false;
+ }
+ Object val = primHashMapGetValByIndex(index);
+ if (Objects.equals(value, val)) {
+ primHashMapRemoveByIndex(index);
+ return true;
+ }
+ return false;
+ }
+ return removeNode(hash(key), key, value, true, true) != null;
+ }
+
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ if (usePrimHashMap(key)) {
+ int index = primHashGetIndexByKey(key);
+ if (index == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return false;
+ }
+ if (index == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ if (Objects.equals(oldValue, primMapValOfNullKey)) {
+ primMapValOfNullKey = newValue;
+ return true;
+ }
+ } else {
+ if (Objects.equals(oldValue, primMapValues[index])) {
+ primMapValues[index] = newValue;
+ return true;
+ }
+ }
+ return false;
+ }
+ Node<K,V> e; V v;
+ if ((e = getNode(key)) != null &&
+ ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
+ e.value = newValue;
+ afterNodeAccess(e);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public V replace(K key, V value) {
+ if (usePrimHashMap(key)) {
+ int index = primHashGetIndexByKey(key);
+ if (index == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return null;
+ }
+ V oldValue;
+ if (index == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ oldValue = primMapValOfNullKey;
+ primMapValOfNullKey = value;
+ } else {
+ oldValue = primMapValues[index];
+ primMapValues[index] = value;
+ }
+ return oldValue;
+ }
+ Node<K,V> e;
+ if ((e = getNode(key)) != null) {
+ V oldValue = e.value;
+ e.value = value;
+ afterNodeAccess(e);
+ return oldValue;
+ }
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * <p>This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * mapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * mapping function modified this map
+ */
+ @Override
+ public V computeIfAbsent(K key,
+ Function<? super K, ? extends V> mappingFunction) {
+ if (mappingFunction == null)
+ throw new NullPointerException();
+ if (usePrimHashMap(key)) {
+ return primHashMapComputeIfAbsent(key, mappingFunction);
+ }
+ int hash = hash(key);
+ Node<K,V>[] tab; Node<K,V> first; int n, i;
+ int binCount = 0;
+ TreeNode<K,V> t = null;
+ Node<K,V> old = null;
+ if (size > threshold || (tab = table) == null ||
+ (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if ((first = tab[i = (n - 1) & hash]) != null) {
+ if (first instanceof TreeNode)
+ old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
+ else {
+ Node<K,V> e = first; K k;
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k)))) {
+ old = e;
+ break;
+ }
+ ++binCount;
+ } while ((e = e.next) != null);
+ }
+ V oldValue;
+ if (old != null && (oldValue = old.value) != null) {
+ afterNodeAccess(old);
+ return oldValue;
+ }
+ }
+ int mc = modCount;
+ V v = mappingFunction.apply(key);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (v == null) {
+ return null;
+ } else if (old != null) {
+ old.value = v;
+ afterNodeAccess(old);
+ return v;
+ }
+ else if (t != null)
+ t.putTreeVal(this, tab, hash, key, v);
+ else {
+ tab[i] = newNode(hash, key, v, first);
+ if (binCount >= TREEIFY_THRESHOLD - 1)
+ treeifyBin(tab, hash);
+ }
+ modCount = mc + 1;
+ ++size;
+ afterNodeInsertion(true);
+ return v;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * <p>This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * remapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * remapping function modified this map
+ */
+ @Override
+ public V computeIfPresent(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ if (remappingFunction == null)
+ throw new NullPointerException();
+ if (usePrimHashMap(key)) {
+ return primHashMapComputeIfPresent(key, remappingFunction);
+ }
+ Node<K,V> e; V oldValue;
+ if ((e = getNode(key)) != null &&
+ (oldValue = e.value) != null) {
+ int mc = modCount;
+ V v = remappingFunction.apply(key, oldValue);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (v != null) {
+ e.value = v;
+ afterNodeAccess(e);
+ return v;
+ }
+ else {
+ int hash = hash(key);
+ removeNode(hash, key, null, false, true);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * <p>This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * remapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * remapping function modified this map
+ */
+ @Override
+ public V compute(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ if (remappingFunction == null)
+ throw new NullPointerException();
+ if (usePrimHashMap(key)) {
+ return primHashMapCompute(key, remappingFunction);
+ }
+ int hash = hash(key);
+ Node<K,V>[] tab; Node<K,V> first; int n, i;
+ int binCount = 0;
+ TreeNode<K,V> t = null;
+ Node<K,V> old = null;
+ if (size > threshold || (tab = table) == null ||
+ (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if ((first = tab[i = (n - 1) & hash]) != null) {
+ if (first instanceof TreeNode)
+ old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
+ else {
+ Node<K,V> e = first; K k;
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k)))) {
+ old = e;
+ break;
+ }
+ ++binCount;
+ } while ((e = e.next) != null);
+ }
+ }
+ V oldValue = (old == null) ? null : old.value;
+ int mc = modCount;
+ V v = remappingFunction.apply(key, oldValue);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (old != null) {
+ if (v != null) {
+ old.value = v;
+ afterNodeAccess(old);
+ }
+ else
+ removeNode(hash, key, null, false, true);
+ }
+ else if (v != null) {
+ if (t != null)
+ t.putTreeVal(this, tab, hash, key, v);
+ else {
+ tab[i] = newNode(hash, key, v, first);
+ if (binCount >= TREEIFY_THRESHOLD - 1)
+ treeifyBin(tab, hash);
+ }
+ modCount = mc + 1;
+ ++size;
+ afterNodeInsertion(true);
+ }
+ return v;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * <p>This method will, on a best-effort basis, throw a
+ * {@link ConcurrentModificationException} if it is detected that the
+ * remapping function modifies this map during computation.
+ *
+ * @throws ConcurrentModificationException if it is detected that the
+ * remapping function modified this map
+ */
+ @Override
+ public V merge(K key, V value,
+ BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
+ if (value == null || remappingFunction == null)
+ throw new NullPointerException();
+ if (usePrimHashMap(key)) {
+ return primHashMapMerge(key, value, remappingFunction);
+ }
+ int hash = hash(key);
+ Node<K,V>[] tab; Node<K,V> first; int n, i;
+ int binCount = 0;
+ TreeNode<K,V> t = null;
+ Node<K,V> old = null;
+ if (size > threshold || (tab = table) == null ||
+ (n = tab.length) == 0)
+ n = (tab = resize()).length;
+ if ((first = tab[i = (n - 1) & hash]) != null) {
+ if (first instanceof TreeNode)
+ old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
+ else {
+ Node<K,V> e = first; K k;
+ do {
+ if (e.hash == hash &&
+ ((k = e.key) == key || (key != null && key.equals(k)))) {
+ old = e;
+ break;
+ }
+ ++binCount;
+ } while ((e = e.next) != null);
+ }
+ }
+ if (old != null) {
+ V v;
+ if (old.value != null) {
+ int mc = modCount;
+ v = remappingFunction.apply(old.value, value);
+ if (mc != modCount) {
+ throw new ConcurrentModificationException();
+ }
+ } else {
+ v = value;
+ }
+ if (v != null) {
+ old.value = v;
+ afterNodeAccess(old);
+ }
+ else
+ removeNode(hash, key, null, false, true);
+ return v;
+ } else {
+ if (t != null)
+ t.putTreeVal(this, tab, hash, key, value);
+ else {
+ tab[i] = newNode(hash, key, value, first);
+ if (binCount >= TREEIFY_THRESHOLD - 1)
+ treeifyBin(tab, hash);
+ }
+ ++modCount;
+ ++size;
+ afterNodeInsertion(true);
+ return value;
+ }
+ }
+
+ @Override
+ public void forEach(BiConsumer<? super K, ? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int mc = modCount;
+ if (usePrimHashMap()) {
+ int remaining = size;
+ if (primMapNullKeyValid) {
+ action.accept(null, primMapValOfNullKey);
+ --remaining;
+ }
+ if (remaining > 0) {
+ boolean[] valids = primMapValids;
+ Long[] keys = primMapKeys;
+ V[] values = primMapValues;
+ int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ action.accept(castKeyToGenericType(keys[i]), values[i]);
+ --remaining;
+ }
+ }
+ }
+ if (remaining != 0) {
+ throw new ConcurrentModificationException();
+ }
+ } else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next)
+ action.accept(e.key, e.value);
+ }
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+
+ @Override
+ public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
+ if (function == null)
+ throw new NullPointerException();
+ if (usePrimHashMap()) {
+ int mc = modCount;
+ int remaining = size;
+ if (primMapNullKeyValid) {
+ primMapValOfNullKey = function.apply(null, primMapValOfNullKey);
+ --remaining;
+ }
+ if (remaining > 0) {
+ boolean[] valids = primMapValids;
+ Long[] keys = primMapKeys;
+ V[] values = primMapValues;
+ int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ values[i] = function.apply(castKeyToGenericType(keys[i]), values[i]);
+ --remaining;
+ }
+ }
+ }
+ if (remaining != 0) {
+ throw new ConcurrentModificationException();
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ } else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ int mc = modCount;
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next) {
+ e.value = function.apply(e.key, e.value);
+ }
+ }
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // Cloning and serialization
+
+ /**
+ * Returns a shallow copy of this {@code HashMap} instance: the keys and
+ * values themselves are not cloned.
+ *
+ * @return a shallow copy of this map
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public Object clone() {
+ HashMap<K,V> result;
+ try {
+ result = (HashMap<K,V>)super.clone();
+ } catch (CloneNotSupportedException e) {
+ // this shouldn't happen, since we are Cloneable
+ throw new InternalError(e);
+ }
+ result.reinitialize();
+ result.putMapEntries(this, false);
+ return result;
+ }
+
+ // These methods are also used when serializing HashSets
+ final float loadFactor() { return loadFactor; }
+ final int capacity() {
+ if (usePrimHashMap()) {
+ return (primMapValids != null) ? primMapValids.length :
+ (threshold > 0) ? threshold :
+ DEFAULT_INITIAL_CAPACITY;
+ }
+ return (table != null) ? table.length :
+ (threshold > 0) ? threshold :
+ DEFAULT_INITIAL_CAPACITY;
+ }
+
+ /**
+ * Saves this map to a stream (that is, serializes it).
+ *
+ * @param s the stream
+ * @throws IOException if an I/O error occurs
+ * @serialData The <i>capacity</i> of the HashMap (the length of the
+ * bucket array) is emitted (int), followed by the
+ * <i>size</i> (an int, the number of key-value
+ * mappings), followed by the key (Object) and value (Object)
+ * for each key-value mapping. The key-value mappings are
+ * emitted in no particular order.
+ */
+ @java.io.Serial
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws IOException {
+ int buckets = capacity();
+ // Write out the threshold, loadfactor, and any hidden stuff
+ s.defaultWriteObject();
+ s.writeInt(buckets);
+ s.writeInt(size);
+ internalWriteEntries(s);
+ }
+
+ /**
+ * Reconstitutes this map from a stream (that is, deserializes it).
+ * @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws IOException if an I/O error occurs
+ */
+ @java.io.Serial
+ private void readObject(ObjectInputStream s)
+ throws IOException, ClassNotFoundException {
+
+ ObjectInputStream.GetField fields = s.readFields();
+
+ // Read loadFactor (ignore threshold)
+ float lf = fields.get("loadFactor", 0.75f);
+ if (lf <= 0 || Float.isNaN(lf))
+ throw new InvalidObjectException("Illegal load factor: " + lf);
+
+ lf = Math.min(Math.max(0.25f, lf), 4.0f);
+ HashMap.UnsafeHolder.putLoadFactor(this, lf);
+
+ reinitialize();
+
+ s.readInt(); // Read and ignore number of buckets
+ int mappings = s.readInt(); // Read number of mappings (size)
+ if (mappings < 0) {
+ throw new InvalidObjectException("Illegal mappings count: " + mappings);
+ } else if (mappings == 0) {
+ // use defaults
+ } else if (mappings > 0) {
+ float fc = (float)mappings / lf + 1.0f;
+ int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
+ DEFAULT_INITIAL_CAPACITY :
+ (fc >= MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY :
+ tableSizeFor((int)fc));
+ // null table will use threshold as initial size.
+ // set threshold to cap here to make sure that table.size is power of 2
+ threshold = cap;
+
+ // Check Map.Entry[].class since it's the nearest public type to
+ // what we're actually creating.
+ SharedSecrets.getJavaObjectInputStreamAccess().checkArray(s, Map.Entry[].class, cap);
+
+ // Read the keys and values, and put the mappings in the HashMap
+ for (int i = 0; i < mappings; i++) {
+ @SuppressWarnings("unchecked")
+ K key = (K) s.readObject();
+ @SuppressWarnings("unchecked")
+ V value = (V) s.readObject();
+ if (usePrimHashMap(key)) {
+ primHashMapPutVal((Long)key, value,false);
+ } else {
+ putVal(key, value, false, false, true);
+ }
+ }
+ }
+ }
+
+ // Support for resetting final field during deserializing
+ private static final class UnsafeHolder {
+ private UnsafeHolder() { throw new InternalError(); }
+ private static final jdk.internal.misc.Unsafe unsafe
+ = jdk.internal.misc.Unsafe.getUnsafe();
+ private static final long LF_OFFSET
+ = unsafe.objectFieldOffset(HashMap.class, "loadFactor");
+ static void putLoadFactor(HashMap<?, ?> map, float lf) {
+ unsafe.putFloat(map, LF_OFFSET, lf);
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // iterators
+
+ abstract class HashIterator {
+ Node<K,V> next; // next entry to return
+ Node<K,V> current; // current entry
+ int expectedModCount; // for fast-fail
+ int index; // current slot
+
+ HashIterator() {
+ expectedModCount = modCount;
+ Node<K,V>[] t = table;
+ current = next = null;
+ index = 0;
+ if (t != null && size > 0) { // advance to first entry
+ do {} while (index < t.length && (next = t[index++]) == null);
+ }
+ }
+
+ public final boolean hasNext() {
+ return next != null;
+ }
+
+ final Node<K,V> nextNode() {
+ Node<K,V>[] t;
+ Node<K,V> e = next;
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ if (e == null)
+ throw new NoSuchElementException();
+ if ((next = (current = e).next) == null && (t = table) != null) {
+ do {} while (index < t.length && (next = t[index++]) == null);
+ }
+ return e;
+ }
+
+ public final void remove() {
+ Node<K,V> p = current;
+ if (p == null)
+ throw new IllegalStateException();
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ current = null;
+ removeNode(p.hash, p.key, null, false, false);
+ expectedModCount = modCount;
+ }
+ }
+
+ final class KeyIterator extends HashIterator
+ implements Iterator<K> {
+ public final K next() { return nextNode().key; }
+ }
+
+ final class ValueIterator extends HashIterator
+ implements Iterator<V> {
+ public final V next() { return nextNode().value; }
+ }
+
+ final class EntryIterator extends HashIterator
+ implements Iterator<Map.Entry<K,V>> {
+ public final Map.Entry<K,V> next() { return nextNode(); }
+ }
+
+ /* ------------------------------------------------------------ */
+ // spliterators
+
+ static class HashMapSpliterator<K,V> {
+ final HashMap<K,V> map;
+ Node<K,V> current; // current node
+ int index; // current index, modified on advance/split
+ int fence; // one past last index
+ int est; // size estimate
+ int expectedModCount; // for comodification checks
+
+ HashMapSpliterator(HashMap<K,V> m, int origin,
+ int fence, int est,
+ int expectedModCount) {
+ // rollback to genericMap if this static class is created directly
+ // e.g. HashSet.spliterator
+ m.rollbackToGenericMap();
+ this.map = m;
+ this.index = origin;
+ this.fence = fence;
+ this.est = est;
+ this.expectedModCount = expectedModCount;
+ }
+
+ final int getFence() { // initialize fence and size on first use
+ int hi;
+ if ((hi = fence) < 0) {
+ HashMap<K,V> m = map;
+ est = m.size;
+ expectedModCount = m.modCount;
+ Node<K,V>[] tab = m.table;
+ hi = fence = (tab == null) ? 0 : tab.length;
+ }
+ return hi;
+ }
+
+ public final long estimateSize() {
+ getFence(); // force init
+ return (long) est;
+ }
+ }
+
+ static final class KeySpliterator<K,V>
+ extends HashMapSpliterator<K,V>
+ implements Spliterator<K> {
+ KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public KeySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid || current != null) ? null :
+ new KeySpliterator<>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ public void forEachRemaining(Consumer<? super K> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap<K,V> m = map;
+ Node<K,V>[] tab = m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = (tab == null) ? 0 : tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab != null && tab.length >= hi &&
+ (i = index) >= 0 && (i < (index = hi) || current != null)) {
+ Node<K,V> p = current;
+ current = null;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ action.accept(p.key);
+ p = p.next;
+ }
+ } while (p != null || i < hi);
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super K> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ Node<K,V>[] tab = map.table;
+ if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ K k = current.key;
+ current = current.next;
+ action.accept(k);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT;
+ }
+ }
+
+ static final class ValueSpliterator<K,V>
+ extends HashMapSpliterator<K,V>
+ implements Spliterator<V> {
+ ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public ValueSpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid || current != null) ? null :
+ new ValueSpliterator<>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ public void forEachRemaining(Consumer<? super V> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap<K,V> m = map;
+ Node<K,V>[] tab = m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = (tab == null) ? 0 : tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab != null && tab.length >= hi &&
+ (i = index) >= 0 && (i < (index = hi) || current != null)) {
+ Node<K,V> p = current;
+ current = null;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ action.accept(p.value);
+ p = p.next;
+ }
+ } while (p != null || i < hi);
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super V> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ Node<K,V>[] tab = map.table;
+ if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ V v = current.value;
+ current = current.next;
+ action.accept(v);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
+ }
+ }
+
+ static final class EntrySpliterator<K,V>
+ extends HashMapSpliterator<K,V>
+ implements Spliterator<Map.Entry<K,V>> {
+ EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public EntrySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid || current != null) ? null :
+ new EntrySpliterator<>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap<K,V> m = map;
+ Node<K,V>[] tab = m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = (tab == null) ? 0 : tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab != null && tab.length >= hi &&
+ (i = index) >= 0 && (i < (index = hi) || current != null)) {
+ Node<K,V> p = current;
+ current = null;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ action.accept(p);
+ p = p.next;
+ }
+ } while (p != null || i < hi);
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ Node<K,V>[] tab = map.table;
+ if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ Node<K,V> e = current;
+ current = current.next;
+ action.accept(e);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT;
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // LinkedHashMap support
+
+
+ /*
+ * The following package-protected methods are designed to be
+ * overridden by LinkedHashMap, but not by any other subclass.
+ * Nearly all other internal methods are also package-protected
+ * but are declared final, so can be used by LinkedHashMap, view
+ * classes, and HashSet.
+ */
+
+ // Create a regular (non-tree) node
+ Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
+ return new Node<>(hash, key, value, next);
+ }
+
+ // For conversion from TreeNodes to plain nodes
+ Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
+ return new Node<>(p.hash, p.key, p.value, next);
+ }
+
+ // Create a tree bin node
+ TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
+ return new TreeNode<>(hash, key, value, next);
+ }
+
+ // For treeifyBin
+ TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
+ return new TreeNode<>(p.hash, p.key, p.value, next);
+ }
+
+ /**
+ * Reset to initial default state. Called by clone and readObject.
+ */
+ void reinitialize() {
+ table = null;
+ entrySet = null;
+ keySet = null;
+ values = null;
+ modCount = 0;
+ threshold = 0;
+ size = 0;
+ primMapValids = null;
+ primMapKeys = null;
+ primMapValues = null;
+ primMapNullKeyValid = false;
+ primMapValOfNullKey = null;
+ // loadFactor should be le than 0.8f to make sure there is at least one empty slot for prim raw array
+ if (loadFactor > MAX_LOAD_FACTOR_FOR_PRIM_MAP) {
+ disablePrimHashMap();
+ } else {
+ initUsingPrimHashMap();
+ }
+ }
+
+ // Callbacks to allow LinkedHashMap post-actions
+ void afterNodeAccess(Node<K,V> p) { }
+ void afterNodeInsertion(boolean evict) { }
+ void afterNodeRemoval(Node<K,V> p) { }
+
+ // Called only from writeObject, to ensure compatible ordering.
+ void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
+ int remaining = size;
+ if (usePrimHashMap()) {
+ if (primMapNullKeyValid) {
+ s.writeObject(null);
+ s.writeObject(primMapValOfNullKey);
+ --remaining;
+ }
+ if (remaining > 0) {
+ boolean[] valids = primMapValids;
+ Long[] keys = primMapKeys;
+ V[] values = primMapValues;
+ int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ s.writeObject(castKeyToGenericType(keys[i]));
+ s.writeObject(values[i]);
+ --remaining;
+ }
+ }
+ }
+ } else {
+ Node<K,V>[] tab;
+ if (size > 0 && (tab = table) != null) {
+ for (Node<K,V> e : tab) {
+ for (; e != null; e = e.next) {
+ s.writeObject(e.key);
+ s.writeObject(e.value);
+ --remaining;
+ }
+ }
+ }
+ }
+
+ if (remaining != 0) {
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // Tree bins
+
+ /**
+ * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
+ * extends Node) so can be used as extension of either regular or
+ * linked node.
+ */
+ static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
+ TreeNode<K,V> parent; // red-black tree links
+ TreeNode<K,V> left;
+ TreeNode<K,V> right;
+ TreeNode<K,V> prev; // needed to unlink next upon deletion
+ boolean red;
+ TreeNode(int hash, K key, V val, Node<K,V> next) {
+ super(hash, key, val, next);
+ }
+
+ /**
+ * Returns root of tree containing this node.
+ */
+ final TreeNode<K,V> root() {
+ for (TreeNode<K,V> r = this, p;;) {
+ if ((p = r.parent) == null)
+ return r;
+ r = p;
+ }
+ }
+
+ /**
+ * Ensures that the given root is the first node of its bin.
+ */
+ static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
+ int n;
+ if (root != null && tab != null && (n = tab.length) > 0) {
+ int index = (n - 1) & root.hash;
+ TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
+ if (root != first) {
+ Node<K,V> rn;
+ tab[index] = root;
+ TreeNode<K,V> rp = root.prev;
+ if ((rn = root.next) != null)
+ ((TreeNode<K,V>)rn).prev = rp;
+ if (rp != null)
+ rp.next = rn;
+ if (first != null)
+ first.prev = root;
+ root.next = first;
+ root.prev = null;
+ }
+ assert checkInvariants(root);
+ }
+ }
+
+ /**
+ * Finds the node starting at root p with the given hash and key.
+ * The kc argument caches comparableClassFor(key) upon first use
+ * comparing keys.
+ */
+ final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
+ TreeNode<K,V> p = this;
+ do {
+ int ph, dir; K pk;
+ TreeNode<K,V> pl = p.left, pr = p.right, q;
+ if ((ph = p.hash) > h)
+ p = pl;
+ else if (ph < h)
+ p = pr;
+ else if ((pk = p.key) == k || (k != null && k.equals(pk)))
+ return p;
+ else if (pl == null)
+ p = pr;
+ else if (pr == null)
+ p = pl;
+ else if ((kc != null ||
+ (kc = comparableClassFor(k)) != null) &&
+ (dir = compareComparables(kc, k, pk)) != 0)
+ p = (dir < 0) ? pl : pr;
+ else if ((q = pr.find(h, k, kc)) != null)
+ return q;
+ else
+ p = pl;
+ } while (p != null);
+ return null;
+ }
+
+ /**
+ * Calls find for root node.
+ */
+ final TreeNode<K,V> getTreeNode(int h, Object k) {
+ return ((parent != null) ? root() : this).find(h, k, null);
+ }
+
+ /**
+ * Tie-breaking utility for ordering insertions when equal
+ * hashCodes and non-comparable. We don't require a total
+ * order, just a consistent insertion rule to maintain
+ * equivalence across rebalancings. Tie-breaking further than
+ * necessary simplifies testing a bit.
+ */
+ static int tieBreakOrder(Object a, Object b) {
+ int d;
+ if (a == null || b == null ||
+ (d = a.getClass().getName().
+ compareTo(b.getClass().getName())) == 0)
+ d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
+ -1 : 1);
+ return d;
+ }
+
+ /**
+ * Forms tree of the nodes linked from this node.
+ */
+ final void treeify(Node<K,V>[] tab) {
+ TreeNode<K,V> root = null;
+ for (TreeNode<K,V> x = this, next; x != null; x = next) {
+ next = (TreeNode<K,V>)x.next;
+ x.left = x.right = null;
+ if (root == null) {
+ x.parent = null;
+ x.red = false;
+ root = x;
+ }
+ else {
+ K k = x.key;
+ int h = x.hash;
+ Class<?> kc = null;
+ for (TreeNode<K,V> p = root;;) {
+ int dir, ph;
+ K pk = p.key;
+ if ((ph = p.hash) > h)
+ dir = -1;
+ else if (ph < h)
+ dir = 1;
+ else if ((kc == null &&
+ (kc = comparableClassFor(k)) == null) ||
+ (dir = compareComparables(kc, k, pk)) == 0)
+ dir = tieBreakOrder(k, pk);
+
+ TreeNode<K,V> xp = p;
+ if ((p = (dir <= 0) ? p.left : p.right) == null) {
+ x.parent = xp;
+ if (dir <= 0)
+ xp.left = x;
+ else
+ xp.right = x;
+ root = balanceInsertion(root, x);
+ break;
+ }
+ }
+ }
+ }
+ moveRootToFront(tab, root);
+ }
+
+ /**
+ * Returns a list of non-TreeNodes replacing those linked from
+ * this node.
+ */
+ final Node<K,V> untreeify(HashMap<K,V> map) {
+ Node<K,V> hd = null, tl = null;
+ for (Node<K,V> q = this; q != null; q = q.next) {
+ Node<K,V> p = map.replacementNode(q, null);
+ if (tl == null)
+ hd = p;
+ else
+ tl.next = p;
+ tl = p;
+ }
+ return hd;
+ }
+
+ /**
+ * Tree version of putVal.
+ */
+ final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
+ int h, K k, V v) {
+ Class<?> kc = null;
+ boolean searched = false;
+ TreeNode<K,V> root = (parent != null) ? root() : this;
+ for (TreeNode<K,V> p = root;;) {
+ int dir, ph; K pk;
+ if ((ph = p.hash) > h)
+ dir = -1;
+ else if (ph < h)
+ dir = 1;
+ else if ((pk = p.key) == k || (k != null && k.equals(pk)))
+ return p;
+ else if ((kc == null &&
+ (kc = comparableClassFor(k)) == null) ||
+ (dir = compareComparables(kc, k, pk)) == 0) {
+ if (!searched) {
+ TreeNode<K,V> q, ch;
+ searched = true;
+ if (((ch = p.left) != null &&
+ (q = ch.find(h, k, kc)) != null) ||
+ ((ch = p.right) != null &&
+ (q = ch.find(h, k, kc)) != null))
+ return q;
+ }
+ dir = tieBreakOrder(k, pk);
+ }
+
+ TreeNode<K,V> xp = p;
+ if ((p = (dir <= 0) ? p.left : p.right) == null) {
+ Node<K,V> xpn = xp.next;
+ TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
+ if (dir <= 0)
+ xp.left = x;
+ else
+ xp.right = x;
+ xp.next = x;
+ x.parent = x.prev = xp;
+ if (xpn != null)
+ ((TreeNode<K,V>)xpn).prev = x;
+ moveRootToFront(tab, balanceInsertion(root, x));
+ return null;
+ }
+ }
+ }
+
+ /**
+ * Removes the given node, that must be present before this call.
+ * This is messier than typical red-black deletion code because we
+ * cannot swap the contents of an interior node with a leaf
+ * successor that is pinned by "next" pointers that are accessible
+ * independently during traversal. So instead we swap the tree
+ * linkages. If the current tree appears to have too few nodes,
+ * the bin is converted back to a plain bin. (The test triggers
+ * somewhere between 2 and 6 nodes, depending on tree structure).
+ */
+ final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
+ boolean movable) {
+ int n;
+ if (tab == null || (n = tab.length) == 0)
+ return;
+ int index = (n - 1) & hash;
+ TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
+ TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
+ if (pred == null)
+ tab[index] = first = succ;
+ else
+ pred.next = succ;
+ if (succ != null)
+ succ.prev = pred;
+ if (first == null)
+ return;
+ if (root.parent != null)
+ root = root.root();
+ if (root == null
+ || (movable
+ && (root.right == null
+ || (rl = root.left) == null
+ || rl.left == null))) {
+ tab[index] = first.untreeify(map); // too small
+ return;
+ }
+ TreeNode<K,V> p = this, pl = left, pr = right, replacement;
+ if (pl != null && pr != null) {
+ TreeNode<K,V> s = pr, sl;
+ while ((sl = s.left) != null) // find successor
+ s = sl;
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
+ TreeNode<K,V> sr = s.right;
+ TreeNode<K,V> pp = p.parent;
+ if (s == pr) { // p was s's direct parent
+ p.parent = s;
+ s.right = p;
+ }
+ else {
+ TreeNode<K,V> sp = s.parent;
+ if ((p.parent = sp) != null) {
+ if (s == sp.left)
+ sp.left = p;
+ else
+ sp.right = p;
+ }
+ if ((s.right = pr) != null)
+ pr.parent = s;
+ }
+ p.left = null;
+ if ((p.right = sr) != null)
+ sr.parent = p;
+ if ((s.left = pl) != null)
+ pl.parent = s;
+ if ((s.parent = pp) == null)
+ root = s;
+ else if (p == pp.left)
+ pp.left = s;
+ else
+ pp.right = s;
+ if (sr != null)
+ replacement = sr;
+ else
+ replacement = p;
+ }
+ else if (pl != null)
+ replacement = pl;
+ else if (pr != null)
+ replacement = pr;
+ else
+ replacement = p;
+ if (replacement != p) {
+ TreeNode<K,V> pp = replacement.parent = p.parent;
+ if (pp == null)
+ (root = replacement).red = false;
+ else if (p == pp.left)
+ pp.left = replacement;
+ else
+ pp.right = replacement;
+ p.left = p.right = p.parent = null;
+ }
+
+ TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
+
+ if (replacement == p) { // detach
+ TreeNode<K,V> pp = p.parent;
+ p.parent = null;
+ if (pp != null) {
+ if (p == pp.left)
+ pp.left = null;
+ else if (p == pp.right)
+ pp.right = null;
+ }
+ }
+ if (movable)
+ moveRootToFront(tab, r);
+ }
+
+ /**
+ * Splits nodes in a tree bin into lower and upper tree bins,
+ * or untreeifies if now too small. Called only from resize;
+ * see above discussion about split bits and indices.
+ *
+ * @param map the map
+ * @param tab the table for recording bin heads
+ * @param index the index of the table being split
+ * @param bit the bit of hash to split on
+ */
+ final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
+ TreeNode<K,V> b = this;
+ // Relink into lo and hi lists, preserving order
+ TreeNode<K,V> loHead = null, loTail = null;
+ TreeNode<K,V> hiHead = null, hiTail = null;
+ int lc = 0, hc = 0;
+ for (TreeNode<K,V> e = b, next; e != null; e = next) {
+ next = (TreeNode<K,V>)e.next;
+ e.next = null;
+ if ((e.hash & bit) == 0) {
+ if ((e.prev = loTail) == null)
+ loHead = e;
+ else
+ loTail.next = e;
+ loTail = e;
+ ++lc;
+ }
+ else {
+ if ((e.prev = hiTail) == null)
+ hiHead = e;
+ else
+ hiTail.next = e;
+ hiTail = e;
+ ++hc;
+ }
+ }
+
+ if (loHead != null) {
+ if (lc <= UNTREEIFY_THRESHOLD)
+ tab[index] = loHead.untreeify(map);
+ else {
+ tab[index] = loHead;
+ if (hiHead != null) // (else is already treeified)
+ loHead.treeify(tab);
+ }
+ }
+ if (hiHead != null) {
+ if (hc <= UNTREEIFY_THRESHOLD)
+ tab[index + bit] = hiHead.untreeify(map);
+ else {
+ tab[index + bit] = hiHead;
+ if (loHead != null)
+ hiHead.treeify(tab);
+ }
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // Red-black tree methods, all adapted from CLR
+
+ static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
+ TreeNode<K,V> p) {
+ TreeNode<K,V> r, pp, rl;
+ if (p != null && (r = p.right) != null) {
+ if ((rl = p.right = r.left) != null)
+ rl.parent = p;
+ if ((pp = r.parent = p.parent) == null)
+ (root = r).red = false;
+ else if (pp.left == p)
+ pp.left = r;
+ else
+ pp.right = r;
+ r.left = p;
+ p.parent = r;
+ }
+ return root;
+ }
+
+ static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
+ TreeNode<K,V> p) {
+ TreeNode<K,V> l, pp, lr;
+ if (p != null && (l = p.left) != null) {
+ if ((lr = p.left = l.right) != null)
+ lr.parent = p;
+ if ((pp = l.parent = p.parent) == null)
+ (root = l).red = false;
+ else if (pp.right == p)
+ pp.right = l;
+ else
+ pp.left = l;
+ l.right = p;
+ p.parent = l;
+ }
+ return root;
+ }
+
+ static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
+ TreeNode<K,V> x) {
+ x.red = true;
+ for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
+ if ((xp = x.parent) == null) {
+ x.red = false;
+ return x;
+ }
+ else if (!xp.red || (xpp = xp.parent) == null)
+ return root;
+ if (xp == (xppl = xpp.left)) {
+ if ((xppr = xpp.right) != null && xppr.red) {
+ xppr.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.right) {
+ root = rotateLeft(root, x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ root = rotateRight(root, xpp);
+ }
+ }
+ }
+ }
+ else {
+ if (xppl != null && xppl.red) {
+ xppl.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.left) {
+ root = rotateRight(root, x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ root = rotateLeft(root, xpp);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
+ TreeNode<K,V> x) {
+ for (TreeNode<K,V> xp, xpl, xpr;;) {
+ if (x == null || x == root)
+ return root;
+ else if ((xp = x.parent) == null) {
+ x.red = false;
+ return x;
+ }
+ else if (x.red) {
+ x.red = false;
+ return root;
+ }
+ else if ((xpl = xp.left) == x) {
+ if ((xpr = xp.right) != null && xpr.red) {
+ xpr.red = false;
+ xp.red = true;
+ root = rotateLeft(root, xp);
+ xpr = (xp = x.parent) == null ? null : xp.right;
+ }
+ if (xpr == null)
+ x = xp;
+ else {
+ TreeNode<K,V> sl = xpr.left, sr = xpr.right;
+ if ((sr == null || !sr.red) &&
+ (sl == null || !sl.red)) {
+ xpr.red = true;
+ x = xp;
+ }
+ else {
+ if (sr == null || !sr.red) {
+ if (sl != null)
+ sl.red = false;
+ xpr.red = true;
+ root = rotateRight(root, xpr);
+ xpr = (xp = x.parent) == null ?
+ null : xp.right;
+ }
+ if (xpr != null) {
+ xpr.red = (xp == null) ? false : xp.red;
+ if ((sr = xpr.right) != null)
+ sr.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ root = rotateLeft(root, xp);
+ }
+ x = root;
+ }
+ }
+ }
+ else { // symmetric
+ if (xpl != null && xpl.red) {
+ xpl.red = false;
+ xp.red = true;
+ root = rotateRight(root, xp);
+ xpl = (xp = x.parent) == null ? null : xp.left;
+ }
+ if (xpl == null)
+ x = xp;
+ else {
+ TreeNode<K,V> sl = xpl.left, sr = xpl.right;
+ if ((sl == null || !sl.red) &&
+ (sr == null || !sr.red)) {
+ xpl.red = true;
+ x = xp;
+ }
+ else {
+ if (sl == null || !sl.red) {
+ if (sr != null)
+ sr.red = false;
+ xpl.red = true;
+ root = rotateLeft(root, xpl);
+ xpl = (xp = x.parent) == null ?
+ null : xp.left;
+ }
+ if (xpl != null) {
+ xpl.red = (xp == null) ? false : xp.red;
+ if ((sl = xpl.left) != null)
+ sl.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ root = rotateRight(root, xp);
+ }
+ x = root;
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Recursive invariant check
+ */
+ static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
+ TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
+ tb = t.prev, tn = (TreeNode<K,V>)t.next;
+ if (tb != null && tb.next != t)
+ return false;
+ if (tn != null && tn.prev != t)
+ return false;
+ if (tp != null && t != tp.left && t != tp.right)
+ return false;
+ if (tl != null && (tl.parent != t || tl.hash > t.hash))
+ return false;
+ if (tr != null && (tr.parent != t || tr.hash < t.hash))
+ return false;
+ if (t.red && tl != null && tl.red && tr != null && tr.red)
+ return false;
+ if (tl != null && !checkInvariants(tl))
+ return false;
+ if (tr != null && !checkInvariants(tr))
+ return false;
+ return true;
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // Primitive long HashMap support
+
+ @SuppressWarnings("unchecked")
+ private K castKeyToGenericType(Long key) {
+ return (K)(key);
+ }
+
+ private void initUsingPrimHashMap() {
+ usingPrimHashMap = true;
+ primMapNullKeyValid = false;
+ primMapValOfNullKey = null;
+ }
+
+ /**
+ * set usingPrimHashMap to false to disable primitive long hashmap
+ */
+ final protected void disablePrimHashMap() {
+ usingPrimHashMap = false;
+ }
+
+ private boolean usePrimHashMap() {
+ return usingPrimHashMap;
+ }
+
+ private boolean usePrimHashMap(Object key) {
+ if (!usingPrimHashMap) {
+ return false;
+ }
+ if (key != null && !(key instanceof Long)) {
+ rollbackToGenericMap();
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * This method copy prim hash map's key-values to Node<K,V>[] table
+ * and then set usingPrimHashMap to false
+ */
+ private void rollbackToGenericMap() {
+ if (!usePrimHashMap()) {
+ return ;
+ }
+ // add lock to prevent multiple threads from executing the following rollback code:
+ synchronized (this) {
+ if (!usePrimHashMap()) {
+ return ;
+ }
+ if (size > 0) {
+ // null table will use threshold as initial length.
+ // set threshold according size here to make sure that table.size is power of 2
+ threshold = tableSizeFor(size);
+ // take a snapshot to detect concurrent modification
+ int expectedSize = size;
+ int expectedCount = modCount;
+ int remaining = size;
+ // put existing key-value to GenericMap
+ if (primMapNullKeyValid) {
+ putVal(null, primMapValOfNullKey, false, true, false);
+ --remaining;
+ }
+ final boolean[] valids = primMapValids;
+ if (valids != null) {
+ final Long[] keys = primMapKeys;
+ final V[] values = primMapValues;
+ int Cap = valids.length;
+ for (int i = 0; remaining > 0 && i < Cap; ++i) {
+ if (valids[i]) {
+ putVal(castKeyToGenericType(keys[i]), values[i], false, true, false);
+ --remaining;
+ }
+ }
+ }
+
+ if (remaining != 0 || expectedSize != size || expectedCount != modCount) {
+ throw new ConcurrentModificationException();
+ }
+ // Don't set arrays to null. Keep long map's data to avoid concurrent visit NPE
+ }
+ disablePrimHashMap();
+ }
+ }
+
+ /**
+ * Computes hash to get index.
+ */
+ static private int primHashMapCalculateIndex(Object key, final int mask) {
+ return hash(key) & mask;
+ }
+
+ private void primHashMapResize() {
+ final boolean[] oldValids = primMapValids;
+ int oldCap = (oldValids == null) ? 0 : oldValids.length;
+ int newCap = calNewCapAndUpdateThreshold(oldCap);
+ // 0 means oldCap reaches the MAXIMUM_CAPACITY
+ if (newCap == 0) {
+ throw new IllegalStateException("can't resize due to primitive long map reaches the max capacity");
+ }
+
+ final boolean[] newValids = new boolean[newCap];
+ final Long[] newKeys = new Long[newCap];
+ @SuppressWarnings({"rawtypes","unchecked"})
+ final V[] newValues = (V[])new Object[newCap];
+
+ final int mask = newCap - 1;
+ if (oldValids != null) {
+ final Long[] oldKeys = primMapKeys;
+ final V[] oldValues = primMapValues;
+ int remaining = primMapNullKeyValid ? size - 1 : size;
+ for (int i = 0; remaining > 0 && i < oldCap; ++i) {
+ if (oldValids[i]) {
+ long key = oldKeys[i];
+ V value = oldValues[i];
+ int index = primHashMapCalculateIndex(key, mask);
+ while (newValids[index]) {
+ index = (++index) & mask;
+ }
+ newValids[index] = true;
+ newKeys[index] = key;
+ newValues[index] = value;
+ --remaining;
+ }
+ }
+ }
+ primMapValids = newValids;
+ primMapKeys = newKeys;
+ primMapValues = newValues;
+ }
+
+ /**
+ * Implements Map.put and related methods.
+ *
+ * @param key the key
+ * @param value the value to put
+ * @param onlyIfAbsent if true, don't change existing value
+ * @return previous value, or null if none
+ */
+ private V primHashMapPutVal(Long key, V value, boolean onlyIfAbsent) {
+ if (key == null) {
+ if (primMapNullKeyValid) { // existing mapping for key
+ V oldValue = primMapValOfNullKey;
+ if (!onlyIfAbsent || oldValue == null)
+ primMapValOfNullKey = value;
+ return oldValue;
+ }
+ primMapNullKeyValid = true;
+ primMapValOfNullKey = value;
+ ++modCount;
+ ++size;
+ return null;
+ }
+ if (primMapValids == null || primMapValids.length == 0) {
+ primHashMapResize();
+ }
+
+ final boolean[] valids = primMapValids;
+ final Long[] keys = primMapKeys;
+ final V[] values = primMapValues;
+ int remainingLength = valids.length;
+ final int mask = remainingLength - 1;
+ int index = primHashMapCalculateIndex(key, mask);
+ // find empty slots to insert
+ while (valids[index] && remainingLength > 0) {
+ if (Objects.equals(keys[index], key)) {
+ break;
+ }
+ index = (++index) & mask;
+ --remainingLength;
+ }
+ if (valids[index]) { // existing mapping for key
+ V oldValue = values[index];
+ if (!onlyIfAbsent || oldValue == null) {
+ values[index] = value;
+ }
+ return oldValue;
+ }
+ keys[index] = key;
+ values[index] = value;
+ valids[index] = true;
+ ++modCount;
+ if (++size > threshold)
+ primHashMapResize();
+ return null;
+ }
+
+ /**
+ * find the key's index in prim hashmap
+ *
+ * @param key the key
+ * @return NULL_KEY_INDEX_FOR_RPIM_MAP if key is null and null key valid,
+ * related key-index if found,
+ * or KEY_NO_EXIST_FOR_PRIM_MAP if not found
+ */
+ private int primHashGetIndexByKey(Object key) {
+ if (key == null) {
+ return primMapNullKeyValid ? NULL_KEY_INDEX_FOR_RPIM_MAP : KEY_NO_EXIST_FOR_PRIM_MAP;
+ }
+ if (!(key instanceof Long)) {
+ return KEY_NO_EXIST_FOR_PRIM_MAP;
+ }
+ final boolean[] valids = primMapValids;
+ if (valids == null || valids.length == 0) {
+ return KEY_NO_EXIST_FOR_PRIM_MAP;
+ }
+
+ final Long[] keys = primMapKeys;
+ int remainingLength = valids.length;
+ final int mask = remainingLength - 1;
+ int index = primHashMapCalculateIndex(key, mask);
+ while (valids[index] && remainingLength > 0) {
+ if (Objects.equals(keys[index], key)) {
+ return index;
+ }
+ index = (++index) & mask;
+ --remainingLength;
+ }
+ return KEY_NO_EXIST_FOR_PRIM_MAP;
+ }
+
+ private V primHashMapGetValByIndex(int index) {
+ if (index == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return null;
+ }
+ return (index == NULL_KEY_INDEX_FOR_RPIM_MAP) ? primMapValOfNullKey : primMapValues[index];
+ }
+
+ private V primHashMapGet(Object key) {
+ int index = primHashGetIndexByKey(key);
+ return primHashMapGetValByIndex(index);
+ }
+
+ private boolean primHashMapContainsValue(Object value) {
+ int remaining = size;
+ if (primMapNullKeyValid) {
+ if (Objects.equals(value, primMapValOfNullKey)) {
+ return true;
+ }
+ --remaining;
+ }
+ final boolean[] valids = primMapValids;
+ if (valids == null || valids.length == 0) {
+ return false;
+ }
+ final V[] values = primMapValues;
+ final int length = valids.length;
+ for (int i = 0; remaining > 0 && i < length; ++i) {
+ if (valids[i]) {
+ if (Objects.equals(value, values[i])) {
+ return true;
+ }
+ --remaining;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * remove an element from prim hashmap by index.
+ *
+ * @param index the index of element to be removed
+ * @return the value of key, or null if none
+ */
+ private V primHashMapRemoveByIndex(int index) {
+ int removeIdx = index;
+ if (removeIdx == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ return null;
+ }
+ V oldValue;
+ if (removeIdx == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ oldValue = primMapValOfNullKey;
+ primMapNullKeyValid = false;
+ primMapValOfNullKey = null;
+ } else {
+ oldValue = primMapValues[removeIdx];
+ final boolean[] valids = primMapValids;
+ final V[] values = primMapValues;
+ final Long[] keys = primMapKeys;
+ int mask = valids.length - 1;
+ // Moves the slot, whose expected idx and its actual index overwrite the removed slot, to the removed slot.
+ // Do it recursively until reaching an empty slot(there is always an empty slot since load factor <= 0.8f)
+ int actualIdx = (removeIdx + 1) & mask;
+ while (valids[actualIdx]) {
+ int expectedIdx = primHashMapCalculateIndex(keys[actualIdx], mask);
+ // move actual to remove, then set actual as new remove
+ // | expectedIdx--->removeIdx--->actualIdx | or
+ // |--->actualIdx expectedIdx--->removeIdx--->| or
+ // |--->removeIdx--->actualIdx expectedIdx--->|
+ if ((expectedIdx <= removeIdx && removeIdx < actualIdx) ||
+ (expectedIdx > actualIdx && (expectedIdx <= removeIdx || removeIdx < actualIdx))) {
+ keys[removeIdx] = keys[actualIdx];
+ values[removeIdx] = values[actualIdx];
+ removeIdx = actualIdx;
+ }
+ actualIdx = (++actualIdx) & mask;
+ }
+ valids[removeIdx] = false;
+ values[removeIdx] = null;
+ }
+ ++modCount;
+ --size;
+ return oldValue;
+ }
+
+ /**
+ * remove an element from prim hashmap by key.
+ *
+ * @param key the key
+ * @return the value of key, or null if none
+ */
+ private V primHashMapRemoveByKey(Object key) {
+ int index = primHashGetIndexByKey(key);
+ return primHashMapRemoveByIndex(index);
+ }
+
+ private V primHashMapComputeIfAbsent(K key,
+ Function<? super K, ? extends V> mappingFunction) {
+ int index = primHashGetIndexByKey(key);
+ V oldValue = primHashMapGetValByIndex(index);
+ if (oldValue != null) {
+ return oldValue;
+ }
+ int mc = modCount;
+ V v = mappingFunction.apply(key);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (v == null) {
+ return null;
+ } else if (index != KEY_NO_EXIST_FOR_PRIM_MAP) { // key exist and oldValue is null
+ if (index == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ primMapValOfNullKey = v;
+ } else {
+ primMapValues[index] = v;
+ }
+ return v;
+ } else { // key not exist
+ primHashMapPutVal((Long)key, v, false);
+ return v;
+ }
+ }
+
+ private V primHashMapComputeIfPresent(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ int index = primHashGetIndexByKey(key);
+ V oldValue = primHashMapGetValByIndex(index);
+ if (oldValue == null) {
+ return null;
+ }
+
+ int mc = modCount;
+ V v = remappingFunction.apply(key, oldValue);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (v == null) {
+ primHashMapRemoveByIndex(index);
+ return null;
+ }
+ if (index == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ primMapValOfNullKey = v;
+ } else {
+ primMapValues[index] = v;
+ }
+ return v;
+ }
+
+ private V primHashMapCompute(K key,
+ BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ int index = primHashGetIndexByKey(key);
+ V oldValue = primHashMapGetValByIndex(index);
+ int mc = modCount;
+ V v = remappingFunction.apply(key, oldValue);
+ if (mc != modCount) { throw new ConcurrentModificationException(); }
+ if (index != KEY_NO_EXIST_FOR_PRIM_MAP) {
+ if (v != null) {
+ if (index == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ primMapValOfNullKey = v;
+ } else {
+ primMapValues[index] = v;
+ }
+ } else {
+ primHashMapRemoveByIndex(index);
+ }
+ } else if (v != null) {
+ primHashMapPutVal((Long)key, v, false);
+ }
+ return v;
+ }
+
+ private V primHashMapMerge(K key, V value,
+ BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
+ int index = primHashGetIndexByKey(key);
+ V oldValue = primHashMapGetValByIndex(index);
+ if (index != KEY_NO_EXIST_FOR_PRIM_MAP) {
+ V v;
+ if (oldValue != null) {
+ int mc = modCount;
+ v = remappingFunction.apply(oldValue, value);
+ if (mc != modCount) {
+ throw new ConcurrentModificationException();
+ }
+ } else {
+ v = value;
+ }
+ if (v != null) {
+ if (index == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ primMapValOfNullKey = v;
+ } else {
+ primMapValues[index] = v;
+ }
+ } else {
+ primHashMapRemoveByIndex(index);
+ }
+ return v;
+ } else {
+ primHashMapPutVal((Long)key, value, false);
+ return value;
+ }
+ }
+
+ /**
+ * prim hashmap node, used for key-value iterator.
+ */
+ class primHashMapNode implements Map.Entry<K,V> {
+ final K key;
+ V value;
+ final int nodeIdx;
+
+ primHashMapNode(int nodeIdx) {
+ if (nodeIdx == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ this.key = null;
+ this.value = HashMap.this.primMapValOfNullKey;
+ } else {
+ this.key = HashMap.this.castKeyToGenericType(HashMap.this.primMapKeys[nodeIdx]);
+ this.value = HashMap.this.primMapValues[nodeIdx];
+ }
+ this.nodeIdx = nodeIdx;
+ }
+
+ public final K getKey() { return key; }
+ public final V getValue() { return value; }
+ public final String toString() { return key + "=" + value; }
+
+ public final boolean equals(Object o) {
+ if (o == this)
+ return true;
+
+ return o instanceof Map.Entry<?, ?> e
+ && Objects.equals(key, e.getKey())
+ && Objects.equals(value, e.getValue());
+ }
+
+ public final int hashCode() {
+ return Objects.hashCode(key) ^ Objects.hashCode(value);
+ }
+
+ public final V setValue(V newValue) {
+ V oldValue = value;
+ value = newValue;
+ if (nodeIdx == NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ HashMap.this.primMapValOfNullKey = newValue;
+ } else {
+ HashMap.this.primMapValues[nodeIdx] = newValue;
+ }
+ return oldValue;
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // prim hashmap iterators
+
+ abstract class primHashMapIterator {
+ int nextIndex;
+ int curIndex; // current slot
+ int expectedModCount; // for fast-fail
+ int remaining;
+ int startIndex;
+ int stopIndex;
+ int mask;
+
+ primHashMapIterator() {
+ expectedModCount = modCount;
+ remaining = size;
+ curIndex = nextIndex = KEY_NO_EXIST_FOR_PRIM_MAP;
+ // init startIndex/stopIndex if there are elements in primMapValues
+ if (remaining > (primMapNullKeyValid ? 1 : 0)) {
+ final boolean[] valids = primMapValids;
+ mask = valids.length - 1;
+ // Use reverse traversal to prevent a node from being traversed after it is deleted.
+ // Set stopIndex to the index of the first empty slot (starting from 0), and then set
+ // startIndex to the index of the first valid slot starting from the reverse of stopIndex.
+ stopIndex = 0;
+ while (valids[stopIndex]) {
+ stopIndex = (++stopIndex) & mask;
+ }
+ startIndex = stopIndex;
+ while (!valids[startIndex]) {
+ startIndex = (--startIndex) & mask;
+ }
+ } else {
+ startIndex = stopIndex = KEY_NO_EXIST_FOR_PRIM_MAP;
+ }
+
+ if (remaining > 0) { // advance to first entry
+ if (primMapNullKeyValid) { // always set first node to null key if exist
+ nextIndex = NULL_KEY_INDEX_FOR_RPIM_MAP;
+ } else {
+ nextIndex = startIndex;
+ }
+ }
+ }
+
+ public final boolean hasNext() {
+ return remaining > 0;
+ }
+
+ final void findNext() {
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ if (nextIndex == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ throw new NoSuchElementException();
+ }
+ curIndex = nextIndex;
+ --remaining;
+ if (remaining > 0) {
+ // startIndex has been calculated if curIndex is null key index
+ if (curIndex != NULL_KEY_INDEX_FOR_RPIM_MAP) {
+ final boolean[] valids = primMapValids;
+ while (startIndex != stopIndex) {
+ startIndex = (--startIndex) & mask;
+ if (valids[startIndex]) {
+ break;
+ }
+ }
+ }
+ nextIndex = startIndex;
+ } else {
+ nextIndex = KEY_NO_EXIST_FOR_PRIM_MAP;
+ }
+ }
+
+ public final void remove() {
+ if (curIndex == KEY_NO_EXIST_FOR_PRIM_MAP) {
+ throw new IllegalStateException();
+ }
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ primHashMapRemoveByIndex(curIndex);
+ curIndex = KEY_NO_EXIST_FOR_PRIM_MAP;
+ expectedModCount = modCount;
+ }
+ }
+
+ final class primHashMapKeyIterator extends primHashMapIterator
+ implements Iterator<K> {
+ public final K next() {
+ findNext();
+ return (curIndex == NULL_KEY_INDEX_FOR_RPIM_MAP) ? null : castKeyToGenericType(primMapKeys[curIndex]);
+ }
+ }
+
+ final class primHashMapValueIterator extends primHashMapIterator
+ implements Iterator<V> {
+ public final V next() {
+ findNext();
+ return (curIndex == NULL_KEY_INDEX_FOR_RPIM_MAP) ? primMapValOfNullKey : primMapValues[curIndex];
+ }
+ }
+
+ final class primHashMapEntryIterator extends primHashMapIterator
+ implements Iterator<Map.Entry<K,V>> {
+ public final Map.Entry<K,V> next() {
+ findNext();
+ return new primHashMapNode(curIndex);
+ }
+ }
+
+ /* ------------------------------------------------------------ */
+ // prim hashmap spliterators
+
+ abstract class primHashMapSpliterator<T> implements Spliterator<T> {
+ int index; // current index, modified on advance/split
+ int fence; // one past last index
+ int est; // size estimate
+ int expectedModCount; // for comodification checks
+
+ // indicate whether this spliterator need to process null key or not
+ // always set to false if this spliterator came form trySplit()
+ boolean needToProcessNullKey;
+
+ primHashMapSpliterator(int origin, int fence, int est,
+ int expectedModCount, boolean needToProcessNullKey) {
+ this.index = origin;
+ this.fence = fence;
+ this.est = est;
+ this.expectedModCount = expectedModCount;
+ this.needToProcessNullKey = needToProcessNullKey;
+ }
+
+ final int getFence() { // initialize fence and size on first use
+ int hi;
+ if ((hi = fence) < 0) {
+ est = size;
+ expectedModCount = modCount;
+ boolean[] valids = primMapValids;
+ hi = fence = (valids == null) ? 0 : valids.length;
+ }
+ return hi;
+ }
+
+ public final long estimateSize() {
+ getFence(); // force init
+ return (long) est;
+ }
+ }
+
+ final class primHashMapKeySpliterator
+ extends primHashMapSpliterator<K> {
+ primHashMapKeySpliterator(int origin, int fence, int est,
+ int expectedModCount, boolean needToProcessNullKey) {
+ super(origin, fence, est, expectedModCount, needToProcessNullKey);
+ }
+
+ public primHashMapKeySpliterator trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new primHashMapKeySpliterator(lo, index = mid, est >>>= 1,
+ expectedModCount, false);
+ }
+
+ public void forEachRemaining(Consumer<? super K> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ boolean[] valids = primMapValids;
+ Long[] keys = primMapKeys;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = modCount;
+ hi = fence = (valids == null) ? 0 : valids.length;
+ }
+ else
+ mc = expectedModCount;
+ if (valids != null && valids.length >= hi &&
+ (i = index) >= 0 && i < (index = hi)) {
+ do {
+ if (valids[i]) {
+ action.accept(castKeyToGenericType(keys[i]));
+ }
+ ++i;
+ } while (i < hi);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ if (needToProcessNullKey && primMapNullKeyValid) {
+ action.accept(null);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ needToProcessNullKey = false;
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super K> action) {
+ int hi = getFence(); // force init
+ if (action == null)
+ throw new NullPointerException();
+ boolean[] valids = primMapValids;
+ Long[] keys = primMapKeys;
+ if (valids != null && valids.length >= hi && index >= 0) {
+ while (index < hi) {
+ if (!valids[index]) {
+ ++index;
+ } else {
+ action.accept(castKeyToGenericType(keys[index]));
+ ++index;
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ return true;
+ }
+ }
+ }
+ if (needToProcessNullKey && primMapNullKeyValid) {
+ action.accept(null);
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ needToProcessNullKey = false;
+ return true;
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == size ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT;
+ }
+ }
+
+ final class primHashMapValueSpliterator
+ extends primHashMapSpliterator<V> {
+ primHashMapValueSpliterator(int origin, int fence, int est,
+ int expectedModCount, boolean needToProcessNullKey) {
+ super(origin, fence, est, expectedModCount, needToProcessNullKey);
+ }
+
+ public primHashMapValueSpliterator trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new primHashMapValueSpliterator(lo, index = mid, est >>>= 1,
+ expectedModCount, false);
+ }
+
+ public void forEachRemaining(Consumer<? super V> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ boolean[] valids = primMapValids;
+ V[] values = primMapValues;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = modCount;
+ hi = fence = (valids == null) ? 0 : valids.length;
+ }
+ else
+ mc = expectedModCount;
+ if (valids != null && valids.length >= hi &&
+ (i = index) >= 0 && i < (index = hi)) {
+ do {
+ if (valids[i]) {
+ action.accept(values[i]);
+ }
+ ++i;
+ } while (i < hi);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ if (needToProcessNullKey && primMapNullKeyValid) {
+ action.accept(primMapValOfNullKey);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ needToProcessNullKey = false;
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super V> action) {
+ int hi = getFence(); // force init
+ if (action == null)
+ throw new NullPointerException();
+ boolean[] valids = primMapValids;
+ V[] values = primMapValues;
+ if (valids != null && valids.length >= hi && index >= 0) {
+ while (index < hi) {
+ if (!valids[index]) {
+ ++index;
+ } else {
+ action.accept(values[index]);
+ ++index;
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ if (needToProcessNullKey && primMapNullKeyValid) {
+ action.accept(primMapValOfNullKey);
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ needToProcessNullKey = false;
+ return true;
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == size ? Spliterator.SIZED : 0);
+ }
+ }
+
+ final class primHashMapEntrySpliterator
+ extends primHashMapSpliterator<Map.Entry<K,V>> {
+ primHashMapEntrySpliterator(int origin, int fence, int est,
+ int expectedModCount, boolean needToProcessNullKey) {
+ super(origin, fence, est, expectedModCount, needToProcessNullKey);
+ }
+
+ public primHashMapEntrySpliterator trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new primHashMapEntrySpliterator(lo, index = mid, est >>>= 1,
+ expectedModCount, false);
+ }
+
+ public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ boolean[] valids = primMapValids;
+
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = modCount;
+ hi = fence = (valids == null) ? 0 : valids.length;
+ }
+ else
+ mc = expectedModCount;
+ if (valids != null && valids.length >= hi &&
+ (i = index) >= 0 && i < (index = hi)) {
+ do {
+ if (valids[i]) {
+ action.accept(new primHashMapNode(i));
+ }
+ ++i;
+ } while ( i < hi);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ if (needToProcessNullKey && primMapNullKeyValid) {
+ action.accept(new primHashMapNode(NULL_KEY_INDEX_FOR_RPIM_MAP));
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ needToProcessNullKey = false;
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
+ int hi = getFence(); // force init
+ if (action == null)
+ throw new NullPointerException();
+ boolean[] valids = primMapValids;
+ if (valids != null && valids.length >= hi && index >= 0) {
+ while (index < hi) {
+ if (!valids[index]) {
+ ++index;
+ }
+ else {
+ action.accept(new primHashMapNode(index));
+ ++index;
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ if (needToProcessNullKey && primMapNullKeyValid) {
+ action.accept(new primHashMapNode(NULL_KEY_INDEX_FOR_RPIM_MAP));
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ needToProcessNullKey = false;
+ return true;
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == size ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT;
+ }
+ }
+
+}
diff --git a/test/jdk/java/util/HashMap/LinkedHashMap.java b/test/jdk/java/util/HashMap/LinkedHashMap.java
new file mode 100644
index 000000000..92c1a795a
--- /dev/null
+++ b/test/jdk/java/util/HashMap/LinkedHashMap.java
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.util;
+
+import java.util.function.Consumer;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.io.IOException;
+
+/**
+ * <p>Hash table and linked list implementation of the {@code Map} interface,
+ * with predictable iteration order. This implementation differs from
+ * {@code HashMap} in that it maintains a doubly-linked list running through
+ * all of its entries. This linked list defines the iteration ordering,
+ * which is normally the order in which keys were inserted into the map
+ * (<i>insertion-order</i>). Note that insertion order is not affected
+ * if a key is <i>re-inserted</i> into the map. (A key {@code k} is
+ * reinserted into a map {@code m} if {@code m.put(k, v)} is invoked when
+ * {@code m.containsKey(k)} would return {@code true} immediately prior to
+ * the invocation.)
+ *
+ * <p>This implementation spares its clients from the unspecified, generally
+ * chaotic ordering provided by {@link HashMap} (and {@link Hashtable}),
+ * without incurring the increased cost associated with {@link TreeMap}. It
+ * can be used to produce a copy of a map that has the same order as the
+ * original, regardless of the original map's implementation:
+ * <pre>{@code
+ * void foo(Map<String, Integer> m) {
+ * Map<String, Integer> copy = new LinkedHashMap<>(m);
+ * ...
+ * }
+ * }</pre>
+ * This technique is particularly useful if a module takes a map on input,
+ * copies it, and later returns results whose order is determined by that of
+ * the copy. (Clients generally appreciate having things returned in the same
+ * order they were presented.)
+ *
+ * <p>A special {@link #LinkedHashMap(int,float,boolean) constructor} is
+ * provided to create a linked hash map whose order of iteration is the order
+ * in which its entries were last accessed, from least-recently accessed to
+ * most-recently (<i>access-order</i>). This kind of map is well-suited to
+ * building LRU caches. Invoking the {@code put}, {@code putIfAbsent},
+ * {@code get}, {@code getOrDefault}, {@code compute}, {@code computeIfAbsent},
+ * {@code computeIfPresent}, or {@code merge} methods results
+ * in an access to the corresponding entry (assuming it exists after the
+ * invocation completes). The {@code replace} methods only result in an access
+ * of the entry if the value is replaced. The {@code putAll} method generates one
+ * entry access for each mapping in the specified map, in the order that
+ * key-value mappings are provided by the specified map's entry set iterator.
+ * <i>No other methods generate entry accesses.</i> In particular, operations
+ * on collection-views do <i>not</i> affect the order of iteration of the
+ * backing map.
+ *
+ * <p>The {@link #removeEldestEntry(Map.Entry)} method may be overridden to
+ * impose a policy for removing stale mappings automatically when new mappings
+ * are added to the map.
+ *
+ * <p>This class provides all of the optional {@code Map} operations, and
+ * permits null elements. Like {@code HashMap}, it provides constant-time
+ * performance for the basic operations ({@code add}, {@code contains} and
+ * {@code remove}), assuming the hash function disperses elements
+ * properly among the buckets. Performance is likely to be just slightly
+ * below that of {@code HashMap}, due to the added expense of maintaining the
+ * linked list, with one exception: Iteration over the collection-views
+ * of a {@code LinkedHashMap} requires time proportional to the <i>size</i>
+ * of the map, regardless of its capacity. Iteration over a {@code HashMap}
+ * is likely to be more expensive, requiring time proportional to its
+ * <i>capacity</i>.
+ *
+ * <p>A linked hash map has two parameters that affect its performance:
+ * <i>initial capacity</i> and <i>load factor</i>. They are defined precisely
+ * as for {@code HashMap}. Note, however, that the penalty for choosing an
+ * excessively high value for initial capacity is less severe for this class
+ * than for {@code HashMap}, as iteration times for this class are unaffected
+ * by capacity.
+ *
+ * <p><strong>Note that this implementation is not synchronized.</strong>
+ * If multiple threads access a linked hash map concurrently, and at least
+ * one of the threads modifies the map structurally, it <em>must</em> be
+ * synchronized externally. This is typically accomplished by
+ * synchronizing on some object that naturally encapsulates the map.
+ *
+ * If no such object exists, the map should be "wrapped" using the
+ * {@link Collections#synchronizedMap Collections.synchronizedMap}
+ * method. This is best done at creation time, to prevent accidental
+ * unsynchronized access to the map:<pre>
+ * Map m = Collections.synchronizedMap(new LinkedHashMap(...));</pre>
+ *
+ * A structural modification is any operation that adds or deletes one or more
+ * mappings or, in the case of access-ordered linked hash maps, affects
+ * iteration order. In insertion-ordered linked hash maps, merely changing
+ * the value associated with a key that is already contained in the map is not
+ * a structural modification. <strong>In access-ordered linked hash maps,
+ * merely querying the map with {@code get} is a structural modification.
+ * </strong>)
+ *
+ * <p>The iterators returned by the {@code iterator} method of the collections
+ * returned by all of this class's collection view methods are
+ * <em>fail-fast</em>: if the map is structurally modified at any time after
+ * the iterator is created, in any way except through the iterator's own
+ * {@code remove} method, the iterator will throw a {@link
+ * ConcurrentModificationException}. Thus, in the face of concurrent
+ * modification, the iterator fails quickly and cleanly, rather than risking
+ * arbitrary, non-deterministic behavior at an undetermined time in the future.
+ *
+ * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
+ * as it is, generally speaking, impossible to make any hard guarantees in the
+ * presence of unsynchronized concurrent modification. Fail-fast iterators
+ * throw {@code ConcurrentModificationException} on a best-effort basis.
+ * Therefore, it would be wrong to write a program that depended on this
+ * exception for its correctness: <i>the fail-fast behavior of iterators
+ * should be used only to detect bugs.</i>
+ *
+ * <p>The spliterators returned by the spliterator method of the collections
+ * returned by all of this class's collection view methods are
+ * <em><a href="Spliterator.html#binding">late-binding</a></em>,
+ * <em>fail-fast</em>, and additionally report {@link Spliterator#ORDERED}.
+ *
+ * <p>This class is a member of the
+ * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
+ * Java Collections Framework</a>.
+ *
+ * @implNote
+ * The spliterators returned by the spliterator method of the collections
+ * returned by all of this class's collection view methods are created from
+ * the iterators of the corresponding collections.
+ *
+ * @param <K> the type of keys maintained by this map
+ * @param <V> the type of mapped values
+ *
+ * @author Josh Bloch
+ * @see Object#hashCode()
+ * @see Collection
+ * @see Map
+ * @see HashMap
+ * @see TreeMap
+ * @see Hashtable
+ * @since 1.4
+ */
+public class LinkedHashMap<K,V>
+ extends HashMap<K,V>
+ implements Map<K,V>
+{
+
+ /*
+ * Implementation note. A previous version of this class was
+ * internally structured a little differently. Because superclass
+ * HashMap now uses trees for some of its nodes, class
+ * LinkedHashMap.Entry is now treated as intermediary node class
+ * that can also be converted to tree form. The name of this
+ * class, LinkedHashMap.Entry, is confusing in several ways in its
+ * current context, but cannot be changed. Otherwise, even though
+ * it is not exported outside this package, some existing source
+ * code is known to have relied on a symbol resolution corner case
+ * rule in calls to removeEldestEntry that suppressed compilation
+ * errors due to ambiguous usages. So, we keep the name to
+ * preserve unmodified compilability.
+ *
+ * The changes in node classes also require using two fields
+ * (head, tail) rather than a pointer to a header node to maintain
+ * the doubly-linked before/after list. This class also
+ * previously used a different style of callback methods upon
+ * access, insertion, and removal.
+ */
+
+ /**
+ * HashMap.Node subclass for normal LinkedHashMap entries.
+ */
+ static class Entry<K,V> extends HashMap.Node<K,V> {
+ Entry<K,V> before, after;
+ Entry(int hash, K key, V value, Node<K,V> next) {
+ super(hash, key, value, next);
+ }
+ }
+
+ @java.io.Serial
+ private static final long serialVersionUID = 3801124242820219131L;
+
+ /**
+ * The head (eldest) of the doubly linked list.
+ */
+ transient LinkedHashMap.Entry<K,V> head;
+
+ /**
+ * The tail (youngest) of the doubly linked list.
+ */
+ transient LinkedHashMap.Entry<K,V> tail;
+
+ /**
+ * The iteration ordering method for this linked hash map: {@code true}
+ * for access-order, {@code false} for insertion-order.
+ *
+ * @serial
+ */
+ final boolean accessOrder;
+
+ // internal utilities
+
+ // link at the end of list
+ private void linkNodeLast(LinkedHashMap.Entry<K,V> p) {
+ LinkedHashMap.Entry<K,V> last = tail;
+ tail = p;
+ if (last == null)
+ head = p;
+ else {
+ p.before = last;
+ last.after = p;
+ }
+ }
+
+ // apply src's links to dst
+ private void transferLinks(LinkedHashMap.Entry<K,V> src,
+ LinkedHashMap.Entry<K,V> dst) {
+ LinkedHashMap.Entry<K,V> b = dst.before = src.before;
+ LinkedHashMap.Entry<K,V> a = dst.after = src.after;
+ if (b == null)
+ head = dst;
+ else
+ b.after = dst;
+ if (a == null)
+ tail = dst;
+ else
+ a.before = dst;
+ }
+
+ // overrides of HashMap hook methods
+
+ void reinitialize() {
+ super.reinitialize();
+ disablePrimHashMap();
+ head = tail = null;
+ }
+
+ Node<K,V> newNode(int hash, K key, V value, Node<K,V> e) {
+ LinkedHashMap.Entry<K,V> p =
+ new LinkedHashMap.Entry<>(hash, key, value, e);
+ linkNodeLast(p);
+ return p;
+ }
+
+ Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
+ LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
+ LinkedHashMap.Entry<K,V> t =
+ new LinkedHashMap.Entry<>(q.hash, q.key, q.value, next);
+ transferLinks(q, t);
+ return t;
+ }
+
+ TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
+ TreeNode<K,V> p = new TreeNode<>(hash, key, value, next);
+ linkNodeLast(p);
+ return p;
+ }
+
+ TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
+ LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
+ TreeNode<K,V> t = new TreeNode<>(q.hash, q.key, q.value, next);
+ transferLinks(q, t);
+ return t;
+ }
+
+ void afterNodeRemoval(Node<K,V> e) { // unlink
+ LinkedHashMap.Entry<K,V> p =
+ (LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
+ p.before = p.after = null;
+ if (b == null)
+ head = a;
+ else
+ b.after = a;
+ if (a == null)
+ tail = b;
+ else
+ a.before = b;
+ }
+
+ void afterNodeInsertion(boolean evict) { // possibly remove eldest
+ LinkedHashMap.Entry<K,V> first;
+ if (evict && (first = head) != null && removeEldestEntry(first)) {
+ K key = first.key;
+ removeNode(hash(key), key, null, false, true);
+ }
+ }
+
+ void afterNodeAccess(Node<K,V> e) { // move node to last
+ LinkedHashMap.Entry<K,V> last;
+ if (accessOrder && (last = tail) != e) {
+ LinkedHashMap.Entry<K,V> p =
+ (LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
+ p.after = null;
+ if (b == null)
+ head = a;
+ else
+ b.after = a;
+ if (a != null)
+ a.before = b;
+ else
+ last = b;
+ if (last == null)
+ head = p;
+ else {
+ p.before = last;
+ last.after = p;
+ }
+ tail = p;
+ ++modCount;
+ }
+ }
+
+ void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
+ s.writeObject(e.key);
+ s.writeObject(e.value);
+ }
+ }
+
+ /**
+ * Constructs an empty insertion-ordered {@code LinkedHashMap} instance
+ * with the specified initial capacity and load factor.
+ *
+ * @param initialCapacity the initial capacity
+ * @param loadFactor the load factor
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive
+ */
+ public LinkedHashMap(int initialCapacity, float loadFactor) {
+ super(initialCapacity, loadFactor);
+ disablePrimHashMap();
+ accessOrder = false;
+ }
+
+ /**
+ * Constructs an empty insertion-ordered {@code LinkedHashMap} instance
+ * with the specified initial capacity and a default load factor (0.75).
+ *
+ * @param initialCapacity the initial capacity
+ * @throws IllegalArgumentException if the initial capacity is negative
+ */
+ public LinkedHashMap(int initialCapacity) {
+ super(initialCapacity);
+ disablePrimHashMap();
+ accessOrder = false;
+ }
+
+ /**
+ * Constructs an empty insertion-ordered {@code LinkedHashMap} instance
+ * with the default initial capacity (16) and load factor (0.75).
+ */
+ public LinkedHashMap() {
+ super();
+ disablePrimHashMap();
+ accessOrder = false;
+ }
+
+ /**
+ * Constructs an insertion-ordered {@code LinkedHashMap} instance with
+ * the same mappings as the specified map. The {@code LinkedHashMap}
+ * instance is created with a default load factor (0.75) and an initial
+ * capacity sufficient to hold the mappings in the specified map.
+ *
+ * @param m the map whose mappings are to be placed in this map
+ * @throws NullPointerException if the specified map is null
+ */
+ public LinkedHashMap(Map<? extends K, ? extends V> m) {
+ super();
+ disablePrimHashMap();
+ accessOrder = false;
+ putMapEntries(m, false);
+ }
+
+ /**
+ * Constructs an empty {@code LinkedHashMap} instance with the
+ * specified initial capacity, load factor and ordering mode.
+ *
+ * @param initialCapacity the initial capacity
+ * @param loadFactor the load factor
+ * @param accessOrder the ordering mode - {@code true} for
+ * access-order, {@code false} for insertion-order
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive
+ */
+ public LinkedHashMap(int initialCapacity,
+ float loadFactor,
+ boolean accessOrder) {
+ super(initialCapacity, loadFactor);
+ disablePrimHashMap();
+ this.accessOrder = accessOrder;
+ }
+
+
+ /**
+ * Returns {@code true} if this map maps one or more keys to the
+ * specified value.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return {@code true} if this map maps one or more keys to the
+ * specified value
+ */
+ public boolean containsValue(Object value) {
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
+ V v = e.value;
+ if (v == value || (value != null && value.equals(v)))
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * <p>More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code (key==null ? k==null :
+ * key.equals(k))}, then this method returns {@code v}; otherwise
+ * it returns {@code null}. (There can be at most one such mapping.)
+ *
+ * <p>A return value of {@code null} does not <i>necessarily</i>
+ * indicate that the map contains no mapping for the key; it's also
+ * possible that the map explicitly maps the key to {@code null}.
+ * The {@link #containsKey containsKey} operation may be used to
+ * distinguish these two cases.
+ */
+ public V get(Object key) {
+ Node<K,V> e;
+ if ((e = getNode(key)) == null)
+ return null;
+ if (accessOrder)
+ afterNodeAccess(e);
+ return e.value;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public V getOrDefault(Object key, V defaultValue) {
+ Node<K,V> e;
+ if ((e = getNode(key)) == null)
+ return defaultValue;
+ if (accessOrder)
+ afterNodeAccess(e);
+ return e.value;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void clear() {
+ super.clear();
+ head = tail = null;
+ }
+
+ /**
+ * Returns {@code true} if this map should remove its eldest entry.
+ * This method is invoked by {@code put} and {@code putAll} after
+ * inserting a new entry into the map. It provides the implementor
+ * with the opportunity to remove the eldest entry each time a new one
+ * is added. This is useful if the map represents a cache: it allows
+ * the map to reduce memory consumption by deleting stale entries.
+ *
+ * <p>Sample use: this override will allow the map to grow up to 100
+ * entries and then delete the eldest entry each time a new entry is
+ * added, maintaining a steady state of 100 entries.
+ * <pre>
+ * private static final int MAX_ENTRIES = 100;
+ *
+ * protected boolean removeEldestEntry(Map.Entry eldest) {
+ * return size() > MAX_ENTRIES;
+ * }
+ * </pre>
+ *
+ * <p>This method typically does not modify the map in any way,
+ * instead allowing the map to modify itself as directed by its
+ * return value. It <i>is</i> permitted for this method to modify
+ * the map directly, but if it does so, it <i>must</i> return
+ * {@code false} (indicating that the map should not attempt any
+ * further modification). The effects of returning {@code true}
+ * after modifying the map from within this method are unspecified.
+ *
+ * <p>This implementation merely returns {@code false} (so that this
+ * map acts like a normal map - the eldest element is never removed).
+ *
+ * @param eldest The least recently inserted entry in the map, or if
+ * this is an access-ordered map, the least recently accessed
+ * entry. This is the entry that will be removed it this
+ * method returns {@code true}. If the map was empty prior
+ * to the {@code put} or {@code putAll} invocation resulting
+ * in this invocation, this will be the entry that was just
+ * inserted; in other words, if the map contains a single
+ * entry, the eldest entry is also the newest.
+ * @return {@code true} if the eldest entry should be removed
+ * from the map; {@code false} if it should be retained.
+ */
+ protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
+ return false;
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own {@code remove} operation), the results of
+ * the iteration are undefined. The set supports element removal,
+ * which removes the corresponding mapping from the map, via the
+ * {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations. It does not support the {@code add} or {@code addAll}
+ * operations.
+ * Its {@link Spliterator} typically provides faster sequential
+ * performance but much poorer parallel performance than that of
+ * {@code HashMap}.
+ *
+ * @return a set view of the keys contained in this map
+ */
+ public Set<K> keySet() {
+ Set<K> ks = keySet;
+ if (ks == null) {
+ ks = new LinkedKeySet();
+ keySet = ks;
+ }
+ return ks;
+ }
+
+ @Override
+ final <T> T[] keysToArray(T[] a) {
+ Object[] r = a;
+ int idx = 0;
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
+ r[idx++] = e.key;
+ }
+ return a;
+ }
+
+ @Override
+ final <T> T[] valuesToArray(T[] a) {
+ Object[] r = a;
+ int idx = 0;
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
+ r[idx++] = e.value;
+ }
+ return a;
+ }
+
+ final class LinkedKeySet extends AbstractSet<K> {
+ public final int size() { return size; }
+ public final void clear() { LinkedHashMap.this.clear(); }
+ public final Iterator<K> iterator() {
+ return new LinkedKeyIterator();
+ }
+ public final boolean contains(Object o) { return containsKey(o); }
+ public final boolean remove(Object key) {
+ return removeNode(hash(key), key, null, false, true) != null;
+ }
+ public final Spliterator<K> spliterator() {
+ return Spliterators.spliterator(this, Spliterator.SIZED |
+ Spliterator.ORDERED |
+ Spliterator.DISTINCT);
+ }
+
+ public Object[] toArray() {
+ return keysToArray(new Object[size]);
+ }
+
+ public <T> T[] toArray(T[] a) {
+ return keysToArray(prepareArray(a));
+ }
+
+ public final void forEach(Consumer<? super K> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int mc = modCount;
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
+ action.accept(e.key);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. If the map is
+ * modified while an iteration over the collection is in progress
+ * (except through the iterator's own {@code remove} operation),
+ * the results of the iteration are undefined. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the {@code Iterator.remove},
+ * {@code Collection.remove}, {@code removeAll},
+ * {@code retainAll} and {@code clear} operations. It does not
+ * support the {@code add} or {@code addAll} operations.
+ * Its {@link Spliterator} typically provides faster sequential
+ * performance but much poorer parallel performance than that of
+ * {@code HashMap}.
+ *
+ * @return a view of the values contained in this map
+ */
+ public Collection<V> values() {
+ Collection<V> vs = values;
+ if (vs == null) {
+ vs = new LinkedValues();
+ values = vs;
+ }
+ return vs;
+ }
+
+ final class LinkedValues extends AbstractCollection<V> {
+ public final int size() { return size; }
+ public final void clear() { LinkedHashMap.this.clear(); }
+ public final Iterator<V> iterator() {
+ return new LinkedValueIterator();
+ }
+ public final boolean contains(Object o) { return containsValue(o); }
+ public final Spliterator<V> spliterator() {
+ return Spliterators.spliterator(this, Spliterator.SIZED |
+ Spliterator.ORDERED);
+ }
+
+ public Object[] toArray() {
+ return valuesToArray(new Object[size]);
+ }
+
+ public <T> T[] toArray(T[] a) {
+ return valuesToArray(prepareArray(a));
+ }
+
+ public final void forEach(Consumer<? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int mc = modCount;
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
+ action.accept(e.value);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. If the map is modified
+ * while an iteration over the set is in progress (except through
+ * the iterator's own {@code remove} operation, or through the
+ * {@code setValue} operation on a map entry returned by the
+ * iterator) the results of the iteration are undefined. The set
+ * supports element removal, which removes the corresponding
+ * mapping from the map, via the {@code Iterator.remove},
+ * {@code Set.remove}, {@code removeAll}, {@code retainAll} and
+ * {@code clear} operations. It does not support the
+ * {@code add} or {@code addAll} operations.
+ * Its {@link Spliterator} typically provides faster sequential
+ * performance but much poorer parallel performance than that of
+ * {@code HashMap}.
+ *
+ * @return a set view of the mappings contained in this map
+ */
+ public Set<Map.Entry<K,V>> entrySet() {
+ Set<Map.Entry<K,V>> es;
+ return (es = entrySet) == null ? (entrySet = new LinkedEntrySet()) : es;
+ }
+
+ final class LinkedEntrySet extends AbstractSet<Map.Entry<K,V>> {
+ public final int size() { return size; }
+ public final void clear() { LinkedHashMap.this.clear(); }
+ public final Iterator<Map.Entry<K,V>> iterator() {
+ return new LinkedEntryIterator();
+ }
+ public final boolean contains(Object o) {
+ if (!(o instanceof Map.Entry<?, ?> e))
+ return false;
+ Object key = e.getKey();
+ Node<K,V> candidate = getNode(key);
+ return candidate != null && candidate.equals(e);
+ }
+ public final boolean remove(Object o) {
+ if (o instanceof Map.Entry<?, ?> e) {
+ Object key = e.getKey();
+ Object value = e.getValue();
+ return removeNode(hash(key), key, value, true, true) != null;
+ }
+ return false;
+ }
+ public final Spliterator<Map.Entry<K,V>> spliterator() {
+ return Spliterators.spliterator(this, Spliterator.SIZED |
+ Spliterator.ORDERED |
+ Spliterator.DISTINCT);
+ }
+ public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int mc = modCount;
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
+ action.accept(e);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ // Map overrides
+
+ public void forEach(BiConsumer<? super K, ? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int mc = modCount;
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
+ action.accept(e.key, e.value);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+
+ public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
+ if (function == null)
+ throw new NullPointerException();
+ int mc = modCount;
+ for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
+ e.value = function.apply(e.key, e.value);
+ if (modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+
+ // Iterators
+
+ abstract class LinkedHashIterator {
+ LinkedHashMap.Entry<K,V> next;
+ LinkedHashMap.Entry<K,V> current;
+ int expectedModCount;
+
+ LinkedHashIterator() {
+ next = head;
+ expectedModCount = modCount;
+ current = null;
+ }
+
+ public final boolean hasNext() {
+ return next != null;
+ }
+
+ final LinkedHashMap.Entry<K,V> nextNode() {
+ LinkedHashMap.Entry<K,V> e = next;
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ if (e == null)
+ throw new NoSuchElementException();
+ current = e;
+ next = e.after;
+ return e;
+ }
+
+ public final void remove() {
+ Node<K,V> p = current;
+ if (p == null)
+ throw new IllegalStateException();
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ current = null;
+ removeNode(p.hash, p.key, null, false, false);
+ expectedModCount = modCount;
+ }
+ }
+
+ final class LinkedKeyIterator extends LinkedHashIterator
+ implements Iterator<K> {
+ public final K next() { return nextNode().getKey(); }
+ }
+
+ final class LinkedValueIterator extends LinkedHashIterator
+ implements Iterator<V> {
+ public final V next() { return nextNode().value; }
+ }
+
+ final class LinkedEntryIterator extends LinkedHashIterator
+ implements Iterator<Map.Entry<K,V>> {
+ public final Map.Entry<K,V> next() { return nextNode(); }
+ }
+
+
+}
--
2.22.0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。