2 File: ConcurrentHashMap
4 Written by Doug Lea. Adapted and released, under explicit
5 permission, from JDK1.2 HashMap.java and Hashtable.java which
6 carries the following copyright:
8 * Copyright 1997 by Sun Microsystems, Inc.,
9 * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
10 * All rights reserved.
12 * This software is the confidential and proprietary information
13 * of Sun Microsystems, Inc. ("Confidential Information"). You
14 * shall not disclose such Confidential Information and shall use
15 * it only in accordance with the terms of the license agreement
16 * you entered into with Sun.
20 26nov2000 dl Created, based on ConcurrentReaderHashMap
21 12jan2001 dl public release
22 17nov2001 dl Minor tunings
23 24oct2003 dl Segment implements Serializable
26 package EDU.oswego.cs.dl.util.concurrent;
29 import java.util.AbstractMap;
30 import java.util.AbstractSet;
31 import java.util.AbstractCollection;
32 import java.util.Collection;
34 import java.util.Iterator;
35 import java.util.Enumeration;
36 import java.util.NoSuchElementException;
38 import java.io.Serializable;
39 import java.io.IOException;
40 import java.io.ObjectInputStream;
41 import java.io.ObjectOutputStream;
45 * A version of Hashtable supporting
46 * concurrency for both retrievals and updates:
51 * <dd> Retrievals may overlap updates. (This is the same policy as
52 * ConcurrentReaderHashMap.) Successful retrievals using get(key) and
53 * containsKey(key) usually run without locking. Unsuccessful
54 * retrievals (i.e., when the key is not present) do involve brief
55 * synchronization (locking). Because retrieval operations can
56 * ordinarily overlap with update operations (i.e., put, remove, and
57 * their derivatives), retrievals can only be guaranteed to return the
58 * results of the most recently <em>completed</em> operations holding
59 * upon their onset. Retrieval operations may or may not return
60 * results reflecting in-progress writing operations. However, the
61 * retrieval operations do always return consistent results -- either
62 * those holding before any single modification or after it, but never
63 * a nonsense result. For aggregate operations such as putAll and
64 * clear, concurrent reads may reflect insertion or removal of only
68 * Iterators and Enumerations (i.e., those returned by
69 * keySet().iterator(), entrySet().iterator(), values().iterator(),
70 * keys(), and elements()) return elements reflecting the state of the
71 * hash table at some point at or since the creation of the
72 * iterator/enumeration. They will return at most one instance of
73 * each element (via next()/nextElement()), but might or might not
74 * reflect puts and removes that have been processed since they were
75 * created. They do <em>not</em> throw ConcurrentModificationException.
76 * However, these iterators are designed to be used by only one
77 * thread at a time. Passing an iterator across multiple threads may
78 * lead to unpredictable results if the table is being concurrently
84 * <dd> This class supports a hard-wired preset <em>concurrency
85 * level</em> of 32. This allows a maximum of 32 put and/or remove
86 * operations to proceed concurrently. This level is an upper bound on
87 * concurrency, not a guarantee, since it interacts with how
88 * well-strewn elements are across bins of the table. (The preset
89 * value in part reflects the fact that even on large multiprocessors,
90 * factors other than synchronization tend to be bottlenecks when more
91 * than 32 threads concurrently attempt updates.)
92 * Additionally, operations triggering internal resizing and clearing
93 * do not execute concurrently with any operation.
96 * There is <em>NOT</em> any support for locking the entire table to
97 * prevent updates. This makes it imposssible, for example, to
98 * add an element only if it is not already present, since another
99 * thread may be in the process of doing the same thing.
100 * If you need such capabilities, consider instead using the
101 * ConcurrentReaderHashMap class.
105 * Because of how concurrency control is split up, the
106 * size() and isEmpty() methods require accumulations across 32
107 * control segments, and so might be slightly slower than you expect.
110 * This class may be used as a direct replacement for
111 * java.util.Hashtable in any application that does not rely
112 * on the ability to lock the entire table to prevent updates.
113 * As of this writing, it performs much faster than Hashtable in
114 * typical multi-threaded applications with multiple readers and writers.
115 * Like Hashtable but unlike java.util.HashMap,
116 * this class does NOT allow <tt>null</tt> to be used as a key or
120 * Implementation note: A slightly
121 * faster implementation of this class will be possible once planned
122 * Java Memory Model revisions are in place.
124 * <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
129 public class ConcurrentHashMap
131 implements Map, Cloneable, Serializable {
134 The basic strategy is an optimistic-style scheme based on
135 the guarantee that the hash table and its lists are always
136 kept in a consistent enough state to be read without locking:
138 * Read operations first proceed without locking, by traversing the
139 apparently correct list of the apparently correct bin. If an
140 entry is found, but not invalidated (value field null), it is
141 returned. If not found, operations must recheck (after a memory
142 barrier) to make sure they are using both the right list and
143 the right table (which can change under resizes). If
144 invalidated, reads must acquire main update lock to wait out
145 the update, and then re-traverse.
147 * All list additions are at the front of each bin, making it easy
148 to check changes, and also fast to traverse. Entry next
149 pointers are never assigned. Remove() builds new nodes when
150 necessary to preserve this.
152 * Remove() (also clear()) invalidates removed nodes to alert read
153 operations that they must wait out the full modifications.
155 * Locking for puts, removes (and, when necessary gets, etc)
156 is controlled by Segments, each covering a portion of the
157 table. During operations requiring global exclusivity (mainly
158 resize and clear), ALL of these locks are acquired at once.
159 Note that these segments are NOT contiguous -- they are based
160 on the least 5 bits of hashcodes. This ensures that the same
161 segment controls the same slots before and after resizing, which
162 is necessary for supporting concurrent retrievals. This
163 comes at the price of a mismatch of logical vs physical locality,
164 but this seems not to be a performance problem in practice.
169 * The hash table data.
171 protected transient Entry[] table;
175 * The number of concurrency control segments.
176 * The value can be at most 32 since ints are used
177 * as bitsets over segments. Emprically, it doesn't
178 * seem to pay to decrease it either, so the value should be at least 32.
179 * In other words, do not redefine this :-)
182 protected static final int CONCURRENCY_LEVEL = 32;
185 * Mask value for indexing into segments
188 protected static final int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
191 * Bookkeeping for each concurrency control segment.
192 * Each segment contains a local count of the number of
193 * elements in its region.
194 * However, the main use of a Segment is for its lock.
196 protected final static class Segment implements Serializable {
198 * The number of elements in this segment's region.
199 * It is always updated within synchronized blocks.
204 * Get the count under synch.
206 protected synchronized int getCount() { return count; }
209 * Force a synchronization
211 protected synchronized void synch() {}
215 * The array of concurrency control segments.
218 protected final Segment[] segments = new Segment[CONCURRENCY_LEVEL];
222 * The default initial number of table slots for this table (32).
223 * Used when not otherwise specified in constructor.
225 public static int DEFAULT_INITIAL_CAPACITY = 32;
229 * The minimum capacity, used if a lower value is implicitly specified
230 * by either of the constructors with arguments.
231 * MUST be a power of two.
233 private static final int MINIMUM_CAPACITY = 32;
236 * The maximum capacity, used if a higher value is implicitly specified
237 * by either of the constructors with arguments.
238 * MUST be a power of two <= 1<<30.
240 private static final int MAXIMUM_CAPACITY = 1 << 30;
243 * The default load factor for this table (0.75)
244 * Used when not otherwise specified in constructor.
247 public static final float DEFAULT_LOAD_FACTOR = 0.75f;
250 * The load factor for the hash table.
254 protected final float loadFactor;
257 * Per-segment resize threshold.
261 protected int threshold;
265 * Number of segments voting for resize. The table is
266 * doubled when 1/4 of the segments reach threshold.
267 * Volatile but updated without synch since this is just a heuristic.
270 protected transient volatile int votesForResize;
274 * Return the number of set bits in w.
275 * For a derivation of this algorithm, see
276 * "Algorithms and data structures with applications to
277 * graphics and geometry", by Jurg Nievergelt and Klaus Hinrichs,
278 * Prentice Hall, 1993.
279 * See also notes by Torsten Sillke at
280 * http://www.mathematik.uni-bielefeld.de/~sillke/PROBLEMS/bitcount
282 protected static int bitcount(int w) {
283 w -= (0xaaaaaaaa & w) >>> 1;
284 w = (w & 0x33333333) + ((w >>> 2) & 0x33333333);
285 w = (w + (w >>> 4)) & 0x0f0f0f0f;
292 * Returns the appropriate capacity (power of two) for the specified
293 * initial capacity argument.
295 private int p2capacity(int initialCapacity) {
296 int cap = initialCapacity;
298 // Compute the appropriate capacity
300 if (cap > MAXIMUM_CAPACITY || cap < 0) {
301 result = MAXIMUM_CAPACITY;
303 result = MINIMUM_CAPACITY;
311 * Return hash code for Object x. Since we are using power-of-two
312 * tables, it is worth the effort to improve hashcode via
313 * the same multiplicative scheme as used in IdentityHashMap.
315 protected static int hash(Object x) {
316 int h = x.hashCode();
317 // Multiply by 127 (quickly, via shifts), and mix in some high
318 // bits to help guard against bunching of codes that are
319 // consecutive or equally spaced.
320 return ((h << 7) - h + (h >>> 9) + (h >>> 17));
325 * Check for equality of non-null references x and y.
327 protected boolean eq(Object x, Object y) {
328 return x == y || x.equals(y);
331 /** Create table array and set the per-segment threshold **/
332 protected Entry[] newTable(int capacity) {
333 threshold = (int)(capacity * loadFactor / CONCURRENCY_LEVEL) + 1;
334 return new Entry[capacity];
338 * Constructs a new, empty map with the specified initial
339 * capacity and the specified load factor.
341 * @param initialCapacity the initial capacity.
342 * The actual initial capacity is rounded to the nearest power of two.
343 * @param loadFactor the load factor threshold, used to control resizing.
344 * This value is used in an approximate way: When at least
345 * a quarter of the segments of the table reach per-segment threshold, or
346 * one of the segments itself exceeds overall threshold,
347 * the table is doubled.
348 * This will on average cause resizing when the table-wide
349 * load factor is slightly less than the threshold. If you'd like
350 * to avoid resizing, you can set this to a ridiculously large
352 * @throws IllegalArgumentException if the load factor is nonpositive.
354 public ConcurrentHashMap(int initialCapacity,
356 if (!(loadFactor > 0))
357 throw new IllegalArgumentException("Illegal Load factor: "+
359 this.loadFactor = loadFactor;
360 for (int i = 0; i < segments.length; ++i)
361 segments[i] = new Segment();
362 int cap = p2capacity(initialCapacity);
363 table = newTable(cap);
367 * Constructs a new, empty map with the specified initial
368 * capacity and default load factor.
370 * @param initialCapacity the initial capacity of the
372 * @throws IllegalArgumentException if the initial maximum number
373 * of elements is less
376 public ConcurrentHashMap(int initialCapacity) {
377 this(initialCapacity, DEFAULT_LOAD_FACTOR);
381 * Constructs a new, empty map with a default initial capacity
382 * and default load factor.
384 public ConcurrentHashMap() {
385 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
389 * Constructs a new map with the same mappings as the given map. The
390 * map is created with a capacity of twice the number of mappings in
391 * the given map or 32 (whichever is greater), and a default load factor.
393 public ConcurrentHashMap(Map t) {
394 this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1,
396 DEFAULT_LOAD_FACTOR);
401 * Returns the number of key-value mappings in this map.
403 * @return the number of key-value mappings in this map.
407 for (int i = 0; i < segments.length; ++i)
408 c += segments[i].getCount();
413 * Returns <tt>true</tt> if this map contains no key-value mappings.
415 * @return <tt>true</tt> if this map contains no key-value mappings.
417 public boolean isEmpty() {
418 for (int i = 0; i < segments.length; ++i)
419 if (segments[i].getCount() != 0)
426 * Returns the value to which the specified key is mapped in this table.
428 * @param key a key in the table.
429 * @return the value to which the key is mapped in this table;
430 * <code>null</code> if the key is not mapped to any value in
432 * @exception NullPointerException if the key is
434 * @see #put(Object, Object)
436 public Object get(Object key) {
437 int hash = hash(key); // throws null pointer exception if key null
439 // Try first without locking...
441 int index = hash & (tab.length - 1);
442 Entry first = tab[index];
445 for (e = first; e != null; e = e.next) {
446 if (e.hash == hash && eq(key, e.key)) {
447 Object value = e.value;
455 // Recheck under synch if key apparently not there or interference
456 Segment seg = segments[hash & SEGMENT_MASK];
459 index = hash & (tab.length - 1);
460 Entry newFirst = tab[index];
461 if (e != null || first != newFirst) {
462 for (e = newFirst; e != null; e = e.next) {
463 if (e.hash == hash && eq(key, e.key))
472 * Tests if the specified object is a key in this table.
474 * @param key possible key.
475 * @return <code>true</code> if and only if the specified object
476 * is a key in this table, as determined by the
477 * <tt>equals</tt> method; <code>false</code> otherwise.
478 * @exception NullPointerException if the key is
480 * @see #contains(Object)
483 public boolean containsKey(Object key) {
484 return get(key) != null;
489 * Maps the specified <code>key</code> to the specified
490 * <code>value</code> in this table. Neither the key nor the
491 * value can be <code>null</code>. (Note that this policy is
492 * the same as for java.util.Hashtable, but unlike java.util.HashMap,
493 * which does accept nulls as valid keys and values.)<p>
495 * The value can be retrieved by calling the <code>get</code> method
496 * with a key that is equal to the original key.
498 * @param key the table key.
499 * @param value the value.
500 * @return the previous value of the specified key in this table,
501 * or <code>null</code> if it did not have one.
502 * @exception NullPointerException if the key or value is
504 * @see Object#equals(Object)
507 public Object put(Object key, Object value) {
509 throw new NullPointerException();
511 int hash = hash(key);
512 Segment seg = segments[hash & SEGMENT_MASK];
519 int index = hash & (tab.length-1);
520 Entry first = tab[index];
522 for (Entry e = first; e != null; e = e.next) {
523 if (e.hash == hash && eq(key, e.key)) {
524 Object oldValue = e.value;
530 // Add to front of list
531 Entry newEntry = new Entry(hash, key, value, first);
532 tab[index] = newEntry;
534 if ((segcount = ++seg.count) < threshold)
537 int bit = (1 << (hash & SEGMENT_MASK));
538 votes = votesForResize;
539 if ((votes & bit) == 0)
540 votes = votesForResize |= bit;
543 // Attempt resize if 1/4 segs vote,
544 // or if this seg itself reaches the overall threshold.
545 // (The latter check is just a safeguard to avoid pathological cases.)
546 if (bitcount(votes) >= CONCURRENCY_LEVEL / 4 ||
547 segcount > (threshold * CONCURRENCY_LEVEL))
554 * Gather all locks in order to call rehash, by
555 * recursing within synch blocks for each segment index.
556 * @param index the current segment. initially call value must be 0
557 * @param assumedTab the state of table on first call to resize. If
558 * this changes on any call, the attempt is aborted because the
559 * table has already been resized by another thread.
562 protected void resize(int index, Entry[] assumedTab) {
563 Segment seg = segments[index];
565 if (assumedTab == table) {
567 if (next < segments.length)
568 resize(next, assumedTab);
576 * Rehashes the contents of this map into a new table
577 * with a larger capacity.
579 protected void rehash() {
580 votesForResize = 0; // reset
582 Entry[] oldTable = table;
583 int oldCapacity = oldTable.length;
585 if (oldCapacity >= MAXIMUM_CAPACITY) {
586 threshold = Integer.MAX_VALUE; // avoid retriggering
590 int newCapacity = oldCapacity << 1;
591 Entry[] newTable = newTable(newCapacity);
592 int mask = newCapacity - 1;
595 * Reclassify nodes in each list to new Map. Because we are
596 * using power-of-two expansion, the elements from each bin
597 * must either stay at same index, or move to
598 * oldCapacity+index. We also eliminate unnecessary node
599 * creation by catching cases where old nodes can be reused
600 * because their next fields won't change. Statistically, at
601 * the default threshhold, only about one-sixth of them need
602 * cloning. (The nodes they replace will be garbage
603 * collectable as soon as they are no longer referenced by any
604 * reader thread that may be in the midst of traversing table
608 for (int i = 0; i < oldCapacity ; i++) {
609 // We need to guarantee that any existing reads of old Map can
610 // proceed. So we cannot yet null out each bin.
611 Entry e = oldTable[i];
614 int idx = e.hash & mask;
617 // Single node on list
622 // Reuse trailing consecutive sequence of all same bit
625 for (Entry last = next; last != null; last = last.next) {
626 int k = last.hash & mask;
632 newTable[lastIdx] = lastRun;
634 // Clone all remaining nodes
635 for (Entry p = e; p != lastRun; p = p.next) {
636 int k = p.hash & mask;
637 newTable[k] = new Entry(p.hash, p.key,
638 p.value, newTable[k]);
649 * Removes the key (and its corresponding value) from this
650 * table. This method does nothing if the key is not in the table.
652 * @param key the key that needs to be removed.
653 * @return the value to which the key had been mapped in this table,
654 * or <code>null</code> if the key did not have a mapping.
655 * @exception NullPointerException if the key is
658 public Object remove(Object key) {
659 return remove(key, null);
664 * Removes the (key, value) pair from this
665 * table. This method does nothing if the key is not in the table,
666 * or if the key is associated with a different value. This method
667 * is needed by EntrySet.
669 * @param key the key that needs to be removed.
670 * @param value the associated value. If the value is null,
671 * it means "any value".
672 * @return the value to which the key had been mapped in this table,
673 * or <code>null</code> if the key did not have a mapping.
674 * @exception NullPointerException if the key is
678 protected Object remove(Object key, Object value) {
681 1. Set value field to null, to force get() to retry
682 2. Rebuild the list without this entry.
683 All entries following removed node can stay in list, but
684 all preceeding ones need to be cloned. Traversals rely
685 on this strategy to ensure that elements will not be
686 repeated during iteration.
689 int hash = hash(key);
690 Segment seg = segments[hash & SEGMENT_MASK];
694 int index = hash & (tab.length-1);
695 Entry first = tab[index];
701 if (e.hash == hash && eq(key, e.key))
706 Object oldValue = e.value;
707 if (value != null && !value.equals(oldValue))
713 for (Entry p = first; p != e; p = p.next)
714 head = new Entry(p.hash, p.key, p.value, head);
723 * Returns <tt>true</tt> if this map maps one or more keys to the
724 * specified value. Note: This method requires a full internal
725 * traversal of the hash table, and so is much slower than
726 * method <tt>containsKey</tt>.
728 * @param value value whose presence in this map is to be tested.
729 * @return <tt>true</tt> if this map maps one or more keys to the
731 * @exception NullPointerException if the value is <code>null</code>.
733 public boolean containsValue(Object value) {
735 if (value == null) throw new NullPointerException();
737 for (int s = 0; s < segments.length; ++s) {
738 Segment seg = segments[s];
740 synchronized(seg) { tab = table; }
741 for (int i = s; i < tab.length; i+= segments.length) {
742 for (Entry e = tab[i]; e != null; e = e.next)
743 if (value.equals(e.value))
751 * Tests if some key maps into the specified value in this table.
752 * This operation is more expensive than the <code>containsKey</code>
755 * Note that this method is identical in functionality to containsValue,
756 * (which is part of the Map interface in the collections framework).
758 * @param value a value to search for.
759 * @return <code>true</code> if and only if some key maps to the
760 * <code>value</code> argument in this table as
761 * determined by the <tt>equals</tt> method;
762 * <code>false</code> otherwise.
763 * @exception NullPointerException if the value is <code>null</code>.
764 * @see #containsKey(Object)
765 * @see #containsValue(Object)
769 public boolean contains(Object value) {
770 return containsValue(value);
774 * Copies all of the mappings from the specified map to this one.
776 * These mappings replace any mappings that this map had for any of the
777 * keys currently in the specified Map.
779 * @param t Mappings to be stored in this map.
782 public void putAll(Map t) {
787 // Expand enough to hold at least n elements without resizing.
788 // We can only resize table by factor of two at a time.
789 // It is faster to rehash with fewer elements, so do it now.
793 synchronized(segments[0]) { // must synch on some segment. pick 0.
795 max = threshold * CONCURRENCY_LEVEL;
802 for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
803 Map.Entry entry = (Map.Entry) it.next();
804 put(entry.getKey(), entry.getValue());
809 * Removes all mappings from this map.
812 public void clear() {
813 // We don't need all locks at once so long as locks
814 // are obtained in low to high order
815 for (int s = 0; s < segments.length; ++s) {
816 Segment seg = segments[s];
819 for (int i = s; i < tab.length; i+= segments.length) {
820 for (Entry e = tab[i]; e != null; e = e.next)
830 * Returns a shallow copy of this
831 * <tt>ConcurrentHashMap</tt> instance: the keys and
832 * values themselves are not cloned.
834 * @return a shallow copy of this map.
837 public Object clone() {
838 // We cannot call super.clone, since it would share final segments array,
839 // and there's no way to reassign finals.
840 return new ConcurrentHashMap(this);
845 protected transient Set keySet = null;
846 protected transient Set entrySet = null;
847 protected transient Collection values = null;
850 * Returns a set view of the keys contained in this map. The set is
851 * backed by the map, so changes to the map are reflected in the set, and
852 * vice-versa. The set supports element removal, which removes the
853 * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
854 * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
855 * <tt>clear</tt> operations. It does not support the <tt>add</tt> or
856 * <tt>addAll</tt> operations.
858 * @return a set view of the keys contained in this map.
861 public Set keySet() {
863 return (ks != null)? ks : (keySet = new KeySet());
866 private class KeySet extends AbstractSet {
867 public Iterator iterator() {
868 return new KeyIterator();
871 return ConcurrentHashMap.this.size();
873 public boolean contains(Object o) {
874 return ConcurrentHashMap.this.containsKey(o);
876 public boolean remove(Object o) {
877 return ConcurrentHashMap.this.remove(o) != null;
879 public void clear() {
880 ConcurrentHashMap.this.clear();
885 * Returns a collection view of the values contained in this map. The
886 * collection is backed by the map, so changes to the map are reflected in
887 * the collection, and vice-versa. The collection supports element
888 * removal, which removes the corresponding mapping from this map, via the
889 * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
890 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
891 * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
893 * @return a collection view of the values contained in this map.
896 public Collection values() {
897 Collection vs = values;
898 return (vs != null)? vs : (values = new Values());
901 private class Values extends AbstractCollection {
902 public Iterator iterator() {
903 return new ValueIterator();
906 return ConcurrentHashMap.this.size();
908 public boolean contains(Object o) {
909 return ConcurrentHashMap.this.containsValue(o);
911 public void clear() {
912 ConcurrentHashMap.this.clear();
917 * Returns a collection view of the mappings contained in this map. Each
918 * element in the returned collection is a <tt>Map.Entry</tt>. The
919 * collection is backed by the map, so changes to the map are reflected in
920 * the collection, and vice-versa. The collection supports element
921 * removal, which removes the corresponding mapping from the map, via the
922 * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
923 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
924 * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
926 * @return a collection view of the mappings contained in this map.
929 public Set entrySet() {
931 return (es != null) ? es : (entrySet = new EntrySet());
934 private class EntrySet extends AbstractSet {
935 public Iterator iterator() {
936 return new HashIterator();
938 public boolean contains(Object o) {
939 if (!(o instanceof Map.Entry))
941 Map.Entry entry = (Map.Entry)o;
942 Object v = ConcurrentHashMap.this.get(entry.getKey());
943 return v != null && v.equals(entry.getValue());
945 public boolean remove(Object o) {
946 if (!(o instanceof Map.Entry))
948 Map.Entry e = (Map.Entry)o;
949 return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()) != null;
952 return ConcurrentHashMap.this.size();
954 public void clear() {
955 ConcurrentHashMap.this.clear();
960 * Returns an enumeration of the keys in this table.
962 * @return an enumeration of the keys in this table.
968 public Enumeration keys() {
969 return new KeyIterator();
973 * Returns an enumeration of the values in this table.
974 * Use the Enumeration methods on the returned object to fetch the elements
977 * @return an enumeration of the values in this table.
978 * @see java.util.Enumeration
984 public Enumeration elements() {
985 return new ValueIterator();
989 * ConcurrentHashMap collision list entry.
992 protected static class Entry implements Map.Entry {
994 The use of volatile for value field ensures that
995 we can detect status changes without synchronization.
996 The other fields are never changed, and are
1000 protected final Object key;
1001 protected volatile Object value;
1002 protected final int hash;
1003 protected final Entry next;
1005 Entry(int hash, Object key, Object value, Entry next) {
1014 public Object getKey() {
1019 * Get the value. Note: In an entrySet or entrySet.iterator,
1020 * unless you can guarantee lack of concurrent modification,
1021 * <tt>getValue</tt> <em>might</em> return null, reflecting the
1022 * fact that the entry has been concurrently removed. However,
1023 * there are no assurances that concurrent removals will be
1024 * reflected using this method.
1026 * @return the current value, or null if the entry has been
1027 * detectably removed.
1029 public Object getValue() {
1034 * Set the value of this entry. Note: In an entrySet or
1035 * entrySet.iterator), unless you can guarantee lack of concurrent
1036 * modification, <tt>setValue</tt> is not strictly guaranteed to
1037 * actually replace the value field obtained via the <tt>get</tt>
1038 * operation of the underlying hash table in multithreaded
1039 * applications. If iterator-wide synchronization is not used,
1040 * and any other concurrent <tt>put</tt> or <tt>remove</tt>
1041 * operations occur, sometimes even to <em>other</em> entries,
1042 * then this change is not guaranteed to be reflected in the hash
1043 * table. (It might, or it might not. There are no assurances
1046 * @param value the new value.
1047 * @return the previous value, or null if entry has been detectably
1049 * @exception NullPointerException if the value is <code>null</code>.
1053 public Object setValue(Object value) {
1055 throw new NullPointerException();
1056 Object oldValue = this.value;
1061 public boolean equals(Object o) {
1062 if (!(o instanceof Map.Entry))
1064 Map.Entry e = (Map.Entry)o;
1065 return (key.equals(e.getKey()) && value.equals(e.getValue()));
1068 public int hashCode() {
1069 return key.hashCode() ^ value.hashCode();
1072 public String toString() {
1073 return key + "=" + value;
1078 protected class HashIterator implements Iterator, Enumeration {
1079 protected final Entry[] tab; // snapshot of table
1080 protected int index; // current slot
1081 protected Entry entry = null; // current node of slot
1082 protected Object currentKey; // key for current node
1083 protected Object currentValue; // value for current node
1084 protected Entry lastReturned = null; // last node returned by next
1086 protected HashIterator() {
1087 // force all segments to synch
1088 synchronized(segments[0]) { tab = table; }
1089 for (int i = 1; i < segments.length; ++i) segments[i].synch();
1090 index = tab.length - 1;
1093 public boolean hasMoreElements() { return hasNext(); }
1094 public Object nextElement() { return next(); }
1096 public boolean hasNext() {
1098 currentkey and currentValue are set here to ensure that next()
1099 returns normally if hasNext() returns true. This avoids
1100 surprises especially when final element is removed during
1101 traversal -- instead, we just ignore the removal during
1106 if (entry != null) {
1107 Object v = entry.value;
1109 currentKey = entry.key;
1117 while (entry == null && index >= 0)
1118 entry = tab[index--];
1120 if (entry == null) {
1121 currentKey = currentValue = null;
1127 protected Object returnValueOfNext() { return entry; }
1129 public Object next() {
1130 if (currentKey == null && !hasNext())
1131 throw new NoSuchElementException();
1133 Object result = returnValueOfNext();
1134 lastReturned = entry;
1135 currentKey = currentValue = null;
1140 public void remove() {
1141 if (lastReturned == null)
1142 throw new IllegalStateException();
1143 ConcurrentHashMap.this.remove(lastReturned.key);
1144 lastReturned = null;
1149 protected class KeyIterator extends HashIterator {
1150 protected Object returnValueOfNext() { return currentKey; }
1153 protected class ValueIterator extends HashIterator {
1154 protected Object returnValueOfNext() { return currentValue; }
1158 * Save the state of the <tt>ConcurrentHashMap</tt>
1159 * instance to a stream (i.e.,
1163 * An estimate of the table size, followed by
1164 * the key (Object) and value (Object)
1165 * for each key-value mapping, followed by a null pair.
1166 * The key-value mappings are emitted in no particular order.
1169 private void writeObject(java.io.ObjectOutputStream s)
1170 throws IOException {
1171 // Write out the loadfactor, and any hidden stuff
1172 s.defaultWriteObject();
1174 // Write out capacity estimate. It is OK if this
1175 // changes during the write, since it is only used by
1176 // readObject to set initial capacity, to avoid needless resizings.
1179 synchronized(segments[0]) { cap = table.length; }
1182 // Write out keys and values (alternating)
1183 for (int k = 0; k < segments.length; ++k) {
1184 Segment seg = segments[k];
1186 synchronized(seg) { tab = table; }
1187 for (int i = k; i < tab.length; i+= segments.length) {
1188 for (Entry e = tab[i]; e != null; e = e.next) {
1189 s.writeObject(e.key);
1190 s.writeObject(e.value);
1195 s.writeObject(null);
1196 s.writeObject(null);
1200 * Reconstitute the <tt>ConcurrentHashMap</tt>
1201 * instance from a stream (i.e.,
1204 private void readObject(java.io.ObjectInputStream s)
1205 throws IOException, ClassNotFoundException {
1206 // Read in the threshold, loadfactor, and any hidden stuff
1207 s.defaultReadObject();
1209 int cap = s.readInt();
1210 table = newTable(cap);
1211 for (int i = 0; i < segments.length; ++i)
1212 segments[i] = new Segment();
1215 // Read the keys and values, and put the mappings in the table
1217 Object key = s.readObject();
1218 Object value = s.readObject();