1 /*
2 * Copyright 2026 The Netty Project
3 *
4 * The Netty Project licenses this file to you under the Apache License,
5 * version 2.0 (the "License"); you may not use this file except in compliance
6 * with the License. You may obtain a copy of the License at:
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 * License for the specific language governing permissions and limitations
14 * under the License.
15 */
16 /*
17 * Written by Doug Lea with assistance from members of JCP JSR-166
18 * Expert Group and released to the public domain, as explained at
19 * https://creativecommons.org/publicdomain/zero/1.0/
20 *
21 * With substantial modifications by The Netty Project team.
22 */
23 package io.netty.util.concurrent;
24
25 import io.netty.util.internal.PlatformDependent;
26
27 import java.lang.invoke.MethodHandle;
28 import java.lang.invoke.MethodHandles;
29 import java.lang.invoke.MethodType;
30 import java.lang.invoke.VarHandle;
31 import java.util.Iterator;
32 import java.util.NoSuchElementException;
33 import java.util.Objects;
34 import java.util.concurrent.ThreadLocalRandom;
35 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
36 import java.util.function.BiConsumer;
37 import java.util.function.BiFunction;
38 import java.util.concurrent.atomic.LongAdder;
39
40 import static java.util.Objects.requireNonNull;
41
42 /**
43 * A scalable concurrent multimap implementation.
44 * The map is sorted according to the natural ordering of its {@code int} keys.
45 *
46 * <p>This class implements a concurrent variant of <a
47 * href="https://en.wikipedia.org/wiki/Skip_list" target="_top">SkipLists</a>
48 * providing expected average <i>log(n)</i> time cost for the
49 * {@code containsKey}, {@code get}, {@code put} and
50 * {@code remove} operations and their variants. Insertion, removal,
51 * update, and access operations safely execute concurrently by
52 * multiple threads.
53 *
54 * <p>This class is a multimap, which means the same key can be associated with
55 * multiple values. Each such instance will be represented by a separate
56 * {@code IntEntry}. There is no defined ordering for the values mapped to
57 * the same key.
58 *
59 * <p>As a multimap, certain atomic operations like {@code putIfPresent},
60 * {@code compute}, or {@code computeIfPresent}, cannot be supported.
61 * Likewise, some get-like operations cannot be supported.
62 *
63 * <p>Iterators and spliterators are
64 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
65 *
66 * <p>All {@code IntEntry} pairs returned by methods in this class
67 * represent snapshots of mappings at the time they were
68 * produced. They do <em>not</em> support the {@code Entry.setValue}
69 * method. (Note however that it is possible to change mappings in the
70 * associated map using {@code put}, {@code putIfAbsent}, or
71 * {@code replace}, depending on exactly which effect you need.)
72 *
73 * <p>Beware that bulk operations {@code putAll}, {@code equals},
74 * {@code toArray}, {@code containsValue}, and {@code clear} are
75 * <em>not</em> guaranteed to be performed atomically. For example, an
76 * iterator operating concurrently with a {@code putAll} operation
77 * might view only some of the added elements.
78 *
79 * <p>This class does <em>not</em> permit the use of {@code null} values
80 * because some null return values cannot be reliably distinguished from
81 * the absence of elements.
82 *
83 * @param <V> the type of mapped values
84 */
85 public class ConcurrentSkipListIntObjMultimap<V> implements Iterable<ConcurrentSkipListIntObjMultimap.IntEntry<V>> {
86 /*
87 * This class implements a tree-like two-dimensionally linked skip
88 * list in which the index levels are represented in separate
89 * nodes from the base nodes holding data. There are two reasons
90 * for taking this approach instead of the usual array-based
91 * structure: 1) Array based implementations seem to encounter
92 * more complexity and overhead 2) We can use cheaper algorithms
93 * for the heavily-traversed index lists than can be used for the
94 * base lists. Here's a picture of some of the basics for a
95 * possible list with 2 levels of index:
96 *
97 * Head nodes Index nodes
98 * +-+ right +-+ +-+
99 * |2|---------------->| |--------------------->| |->null
100 * +-+ +-+ +-+
101 * | down | |
102 * v v v
103 * +-+ +-+ +-+ +-+ +-+ +-+
104 * |1|----------->| |->| |------>| |----------->| |------>| |->null
105 * +-+ +-+ +-+ +-+ +-+ +-+
106 * v | | | | |
107 * Nodes next v v v v v
108 * +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+
109 * | |->|A|->|B|->|C|->|D|->|E|->|F|->|G|->|H|->|I|->|J|->|K|->null
110 * +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+
111 *
112 * The base lists use a variant of the HM linked ordered set
113 * algorithm. See Tim Harris, "A pragmatic implementation of
114 * non-blocking linked lists"
115 * https://www.cl.cam.ac.uk/~tlh20/publications.html and Maged
116 * Michael "High Performance Dynamic Lock-Free Hash Tables and
117 * List-Based Sets"
118 * https://www.research.ibm.com/people/m/michael/pubs.htm. The
119 * basic idea in these lists is to mark the "next" pointers of
120 * deleted nodes when deleting to avoid conflicts with concurrent
121 * insertions, and when traversing to keep track of triples
122 * (predecessor, node, successor) in order to detect when and how
123 * to unlink these deleted nodes.
124 *
125 * Rather than using mark-bits to mark list deletions (which can
126 * be slow and space-intensive using AtomicMarkedReference), nodes
127 * use direct CAS'able next pointers. On deletion, instead of
128 * marking a pointer, they splice in another node that can be
129 * thought of as standing for a marked pointer (see method
130 * unlinkNode). Using plain nodes acts roughly like "boxed"
131 * implementations of marked pointers, but uses new nodes only
132 * when nodes are deleted, not for every link. This requires less
133 * space and supports faster traversal. Even if marked references
134 * were better supported by JVMs, traversal using this technique
135 * might still be faster because any search need only read ahead
136 * one more node than otherwise required (to check for trailing
137 * marker) rather than unmasking mark bits or whatever on each
138 * read.
139 *
140 * This approach maintains the essential property needed in the HM
141 * algorithm of changing the next-pointer of a deleted node so
142 * that any other CAS of it will fail, but implements the idea by
143 * changing the pointer to point to a different node (with
144 * otherwise illegal null fields), not by marking it. While it
145 * would be possible to further squeeze space by defining marker
146 * nodes not to have key/value fields, it isn't worth the extra
147 * type-testing overhead. The deletion markers are rarely
148 * encountered during traversal, are easily detected via null
149 * checks that are needed anyway, and are normally quickly garbage
150 * collected. (Note that this technique would not work well in
151 * systems without garbage collection.)
152 *
153 * In addition to using deletion markers, the lists also use
154 * nullness of value fields to indicate deletion, in a style
155 * similar to typical lazy-deletion schemes. If a node's value is
156 * null, then it is considered logically deleted and ignored even
157 * though it is still reachable.
158 *
159 * Here's the sequence of events for a deletion of node n with
160 * predecessor b and successor f, initially:
161 *
162 * +------+ +------+ +------+
163 * ... | b |------>| n |----->| f | ...
164 * +------+ +------+ +------+
165 *
166 * 1. CAS n's value field from non-null to null.
167 * Traversals encountering a node with null value ignore it.
168 * However, ongoing insertions and deletions might still modify
169 * n's next pointer.
170 *
171 * 2. CAS n's next pointer to point to a new marker node.
172 * From this point on, no other nodes can be appended to n.
173 * which avoids deletion errors in CAS-based linked lists.
174 *
175 * +------+ +------+ +------+ +------+
176 * ... | b |------>| n |----->|marker|------>| f | ...
177 * +------+ +------+ +------+ +------+
178 *
179 * 3. CAS b's next pointer over both n and its marker.
180 * From this point on, no new traversals will encounter n,
181 * and it can eventually be GCed.
182 * +------+ +------+
183 * ... | b |----------------------------------->| f | ...
184 * +------+ +------+
185 *
186 * A failure at step 1 leads to simple retry due to a lost race
187 * with another operation. Steps 2-3 can fail because some other
188 * thread noticed during a traversal a node with null value and
189 * helped out by marking and/or unlinking. This helping-out
190 * ensures that no thread can become stuck waiting for progress of
191 * the deleting thread.
192 *
193 * Skip lists add indexing to this scheme, so that the base-level
194 * traversals start close to the locations being found, inserted
195 * or deleted -- usually base level traversals only traverse a few
196 * nodes. This doesn't change the basic algorithm except for the
197 * need to make sure base traversals start at predecessors (here,
198 * b) that are not (structurally) deleted, otherwise retrying
199 * after processing the deletion.
200 *
201 * Index levels are maintained using CAS to link and unlink
202 * successors ("right" fields). Races are allowed in index-list
203 * operations that can (rarely) fail to link in a new index node.
204 * (We can't do this of course for data nodes.) However, even
205 * when this happens, the index lists correctly guide search.
206 * This can impact performance, but since skip lists are
207 * probabilistic anyway, the net result is that under contention,
208 * the effective "p" value may be lower than its nominal value.
209 *
210 * Index insertion and deletion sometimes require a separate
211 * traversal pass occurring after the base-level action, to add or
212 * remove index nodes. This adds to single-threaded overhead, but
213 * improves contended multithreaded performance by narrowing
214 * interference windows, and allows deletion to ensure that all
215 * index nodes will be made unreachable upon return from a public
216 * remove operation, thus avoiding unwanted garbage retention.
217 *
218 * Indexing uses skip list parameters that maintain good search
219 * performance while using sparser-than-usual indices: The
220 * hardwired parameters k=1, p=0.5 (see method doPut) mean that
221 * about one-quarter of the nodes have indices. Of those that do,
222 * half have one level, a quarter have two, and so on (see Pugh's
223 * Skip List Cookbook, sec 3.4), up to a maximum of 62 levels
224 * (appropriate for up to 2^63 elements). The expected total
225 * space requirement for a map is slightly less than for the
226 * current implementation of java.util.TreeMap.
227 *
228 * Changing the level of the index (i.e, the height of the
229 * tree-like structure) also uses CAS. Creation of an index with
230 * height greater than the current level adds a level to the head
231 * index by CAS'ing on a new top-most head. To maintain good
232 * performance after a lot of removals, deletion methods
233 * heuristically try to reduce the height if the topmost levels
234 * appear to be empty. This may encounter races in which it is
235 * possible (but rare) to reduce and "lose" a level just as it is
236 * about to contain an index (that will then never be
237 * encountered). This does no structural harm, and in practice
238 * appears to be a better option than allowing unrestrained growth
239 * of levels.
240 *
241 * This class provides concurrent-reader-style memory consistency,
242 * ensuring that read-only methods report status and/or values no
243 * staler than those holding at method entry. This is done by
244 * performing all publication and structural updates using
245 * (volatile) CAS, placing an acquireFence in a few access
246 * methods, and ensuring that linked objects are transitively
247 * acquired via dependent reads (normally once) unless performing
248 * a volatile-mode CAS operation (that also acts as an acquire and
249 * release). This form of fence-hoisting is similar to RCU and
250 * related techniques (see McKenney's online book
251 * https://www.kernel.org/pub/linux/kernel/people/paulmck/perfbook/perfbook.html)
252 * It minimizes overhead that may otherwise occur when using so
253 * many volatile-mode reads. Using explicit acquireFences is
254 * logistically easier than targeting particular fields to be read
255 * in acquire mode: fences are just hoisted up as far as possible,
256 * to the entry points or loop headers of a few methods. A
257 * potential disadvantage is that these few remaining fences are
258 * not easily optimized away by compilers under exclusively
259 * single-thread use. It requires some care to avoid volatile
260 * mode reads of other fields. (Note that the memory semantics of
261 * a reference dependently read in plain mode exactly once are
262 * equivalent to those for atomic opaque mode.) Iterators and
263 * other traversals encounter each node and value exactly once.
264 * Other operations locate an element (or position to insert an
265 * element) via a sequence of dereferences. This search is broken
266 * into two parts. Method findPredecessor (and its specialized
267 * embeddings) searches index nodes only, returning a base-level
268 * predecessor of the key. Callers carry out the base-level
269 * search, restarting if encountering a marker preventing link
270 * modification. In some cases, it is possible to encounter a
271 * node multiple times while descending levels. For mutative
272 * operations, the reported value is validated using CAS (else
273 * retrying), preserving linearizability with respect to each
274 * other. Others may return any (non-null) value holding in the
275 * course of the method call. (Search-based methods also include
276 * some useless-looking explicit null checks designed to allow
277 * more fields to be nulled out upon removal, to reduce floating
278 * garbage, but which is not currently done, pending discovery of
279 * a way to do this with less impact on other operations.)
280 *
281 * To produce random values without interference across threads,
282 * we use within-JDK thread local random support (via the
283 * "secondary seed", to avoid interference with user-level
284 * ThreadLocalRandom.)
285 *
286 * For explanation of algorithms sharing at least a couple of
287 * features with this one, see Mikhail Fomitchev's thesis
288 * (https://www.cs.yorku.ca/~mikhail/), Keir Fraser's thesis
289 * (https://www.cl.cam.ac.uk/users/kaf24/), and Hakan Sundell's
290 * thesis (https://www.cs.chalmers.se/~phs/).
291 *
292 * Notation guide for local variables
293 * Node: b, n, f, p for predecessor, node, successor, aux
294 * Index: q, r, d for index node, right, down.
295 * Head: h
296 * Keys: k, key
297 * Values: v, value
298 * Comparisons: c
299 */
300
301 /** No-key sentinel value */
302 private final int noKey;
303 /** Lazily initialized topmost index of the skiplist. */
304 private volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ Index<V> head;
305 /** Element count */
306 private final LongAdder adder;
307
308 /**
309 * Nodes hold keys and values, and are singly linked in sorted
310 * order, possibly with some intervening marker nodes. The list is
311 * headed by a header node accessible as head.node. Headers and
312 * marker nodes have null keys. The val field (but currently not
313 * the key field) is nulled out upon deletion.
314 */
315 static final class Node<V> {
316 final int key; // currently, never detached
317 volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ V val;
318 volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ Node<V> next;
319 Node(int key, V value, Node<V> next) {
320 this.key = key;
321 val = value;
322 this.next = next;
323 }
324 }
325
326 /**
327 * Index nodes represent the levels of the skip list.
328 */
329 static final class Index<V> {
330 final Node<V> node; // currently, never detached
331 final Index<V> down;
332 volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ Index<V> right;
333 Index(Node<V> node, Index<V> down, Index<V> right) {
334 this.node = node;
335 this.down = down;
336 this.right = right;
337 }
338 }
339
340 /**
341 * The multimap entry type with primitive {@code int} keys.
342 */
343 public static final class IntEntry<V> implements Comparable<IntEntry<V>> {
344 private final int key;
345 private final V value;
346
347 public IntEntry(int key, V value) {
348 this.key = key;
349 this.value = value;
350 }
351
352 /**
353 * Get the corresponding key.
354 */
355 public int getKey() {
356 return key;
357 }
358
359 /**
360 * Get the corresponding value.
361 */
362 public V getValue() {
363 return value;
364 }
365
366 @Override
367 public boolean equals(Object o) {
368 if (!(o instanceof IntEntry)) {
369 return false;
370 }
371
372 IntEntry<?> intEntry = (IntEntry<?>) o;
373 return key == intEntry.key && Objects.equals(value, intEntry.value);
374 }
375
376 @Override
377 public int hashCode() {
378 int result = key;
379 result = 31 * result + Objects.hashCode(value);
380 return result;
381 }
382
383 @Override
384 public String toString() {
385 return "IntEntry[" + key + " => " + value + ']';
386 }
387
388 @Override
389 public int compareTo(IntEntry<V> o) {
390 return Integer.compare(key, o.key);
391 }
392 }
393
394 /* ---------------- Utilities -------------- */
395
396 /**
397 * Compares using comparator or natural ordering if null.
398 * Called only by methods that have performed required type checks.
399 */
400 static int cpr(int x, int y) {
401 return Integer.compare(x, y);
402 }
403
404 /**
405 * Returns the header for base node list, or null if uninitialized
406 */
407 final Node<V> baseHead() {
408 Index<V> h;
409 acquireFence();
410 return (h = head) == null ? null : h.node;
411 }
412
413 /**
414 * Tries to unlink deleted node n from predecessor b (if both
415 * exist), by first splicing in a marker if not already present.
416 * Upon return, node n is sure to be unlinked from b, possibly
417 * via the actions of some other thread.
418 *
419 * @param b if nonnull, predecessor
420 * @param n if nonnull, node known to be deleted
421 */
422 static <V> void unlinkNode(Node<V> b, Node<V> n, int noKey) {
423 if (b != null && n != null) {
424 Node<V> f, p;
425 for (;;) {
426 if ((f = n.next) != null && f.key == noKey) {
427 p = f.next; // already marked
428 break;
429 } else if (NEXT.compareAndSet(n, f,
430 new Node<V>(noKey, null, f))) {
431 p = f; // add marker
432 break;
433 }
434 }
435 NEXT.compareAndSet(b, n, p);
436 }
437 }
438
439 /**
440 * Adds to element count, initializing adder if necessary
441 *
442 * @param c count to add
443 */
444 private void addCount(long c) {
445 adder.add(c);
446 }
447
448 /**
449 * Returns element count, initializing adder if necessary.
450 */
451 final long getAdderCount() {
452 long c;
453 return (c = adder.sum()) <= 0L ? 0L : c; // ignore transient negatives
454 }
455
456 /* ---------------- Traversal -------------- */
457
458 /**
459 * Returns an index node with key strictly less than given key.
460 * Also unlinks indexes to deleted nodes found along the way.
461 * Callers rely on this side-effect of clearing indices to deleted
462 * nodes.
463 *
464 * @param key if nonnull the key
465 * @return a predecessor node of key, or null if uninitialized or null key
466 */
467 private Node<V> findPredecessor(int key) {
468 Index<V> q;
469 acquireFence();
470 if ((q = head) == null || key == noKey) {
471 return null;
472 } else {
473 for (Index<V> r, d;;) {
474 while ((r = q.right) != null) {
475 Node<V> p; int k;
476 if ((p = r.node) == null || (k = p.key) == noKey ||
477 p.val == null) { // unlink index to deleted node
478 RIGHT.compareAndSet(q, r, r.right);
479 } else if (cpr(key, k) > 0) {
480 q = r;
481 } else {
482 break;
483 }
484 }
485 if ((d = q.down) != null) {
486 q = d;
487 } else {
488 return q.node;
489 }
490 }
491 }
492 }
493
494 /**
495 * Returns node holding key or null if no such, clearing out any
496 * deleted nodes seen along the way. Repeatedly traverses at
497 * base-level looking for key starting at predecessor returned
498 * from findPredecessor, processing base-level deletions as
499 * encountered. Restarts occur, at traversal step encountering
500 * node n, if n's key field is null, indicating it is a marker, so
501 * its predecessor is deleted before continuing, which we help do
502 * by re-finding a valid predecessor. The traversal loops in
503 * doPut, doRemove, and findNear all include the same checks.
504 *
505 * @param key the key
506 * @return node holding key, or null if no such
507 */
508 private Node<V> findNode(int key) {
509 if (key == noKey) {
510 throw new IllegalArgumentException(); // don't postpone errors
511 }
512 Node<V> b;
513 outer: while ((b = findPredecessor(key)) != null) {
514 for (;;) {
515 Node<V> n; int k; int c;
516 if ((n = b.next) == null) {
517 break outer; // empty
518 } else if ((k = n.key) == noKey) {
519 break; // b is deleted
520 } else if (n.val == null) {
521 unlinkNode(b, n, noKey); // n is deleted
522 } else if ((c = cpr(key, k)) > 0) {
523 b = n;
524 } else if (c == 0) {
525 return n;
526 } else {
527 break outer;
528 }
529 }
530 }
531 return null;
532 }
533
534 /**
535 * Gets value for key. Same idea as findNode, except skips over
536 * deletions and markers, and returns first encountered value to
537 * avoid possibly inconsistent rereads.
538 *
539 * @param key the key
540 * @return the value, or null if absent
541 */
542 private V doGet(int key) {
543 Index<V> q;
544 acquireFence();
545 if (key == noKey) {
546 throw new IllegalArgumentException();
547 }
548 V result = null;
549 if ((q = head) != null) {
550 outer: for (Index<V> r, d;;) {
551 while ((r = q.right) != null) {
552 Node<V> p; int k; V v; int c;
553 if ((p = r.node) == null || (k = p.key) == noKey ||
554 (v = p.val) == null) {
555 RIGHT.compareAndSet(q, r, r.right);
556 } else if ((c = cpr(key, k)) > 0) {
557 q = r;
558 } else if (c == 0) {
559 result = v;
560 break outer;
561 } else {
562 break;
563 }
564 }
565 if ((d = q.down) != null) {
566 q = d;
567 } else {
568 Node<V> b, n;
569 if ((b = q.node) != null) {
570 while ((n = b.next) != null) {
571 V v; int c;
572 int k = n.key;
573 if ((v = n.val) == null || k == noKey ||
574 (c = cpr(key, k)) > 0) {
575 b = n;
576 } else {
577 if (c == 0) {
578 result = v;
579 }
580 break;
581 }
582 }
583 }
584 break;
585 }
586 }
587 }
588 return result;
589 }
590
591 /* ---------------- Insertion -------------- */
592
593 /**
594 * Main insertion method. Adds element if not present, or
595 * replaces value if present and onlyIfAbsent is false.
596 *
597 * @param key the key
598 * @param value the value that must be associated with key
599 * @param onlyIfAbsent if should not insert if already present
600 */
601 private V doPut(int key, V value, boolean onlyIfAbsent) {
602 if (key == noKey) {
603 throw new IllegalArgumentException();
604 }
605 for (;;) {
606 Index<V> h; Node<V> b;
607 acquireFence();
608 int levels = 0; // number of levels descended
609 if ((h = head) == null) { // try to initialize
610 Node<V> base = new Node<V>(noKey, null, null);
611 h = new Index<V>(base, null, null);
612 b = HEAD.compareAndSet(this, null, h) ? base : null;
613 } else {
614 for (Index<V> q = h, r, d;;) { // count while descending
615 while ((r = q.right) != null) {
616 Node<V> p; int k;
617 if ((p = r.node) == null || (k = p.key) == noKey ||
618 p.val == null) {
619 RIGHT.compareAndSet(q, r, r.right);
620 } else if (cpr(key, k) > 0) {
621 q = r;
622 } else {
623 break;
624 }
625 }
626 if ((d = q.down) != null) {
627 ++levels;
628 q = d;
629 } else {
630 b = q.node;
631 break;
632 }
633 }
634 }
635 if (b != null) {
636 Node<V> z = null; // new node, if inserted
637 for (;;) { // find insertion point
638 Node<V> n, p; int k; V v; int c;
639 if ((n = b.next) == null) {
640 if (b.key == noKey) { // if empty, type check key now TODO: remove?
641 cpr(key, key);
642 }
643 c = -1;
644 } else if ((k = n.key) == noKey) {
645 break; // can't append; restart
646 } else if ((v = n.val) == null) {
647 unlinkNode(b, n, noKey);
648 c = 1;
649 } else if ((c = cpr(key, k)) > 0) {
650 b = n; // Multimap
651 // } else if (c == 0 &&
652 // (onlyIfAbsent || VAL.compareAndSet(n, v, value))) {
653 // return v;
654 }
655
656 if (c <= 0 &&
657 NEXT.compareAndSet(b, n,
658 p = new Node<V>(key, value, n))) {
659 z = p;
660 break;
661 }
662 }
663
664 if (z != null) {
665 int lr = ThreadLocalRandom.current().nextInt();
666 if ((lr & 0x3) == 0) { // add indices with 1/4 prob
667 int hr = ThreadLocalRandom.current().nextInt();
668 long rnd = ((long) hr << 32) | ((long) lr & 0xffffffffL);
669 int skips = levels; // levels to descend before add
670 Index<V> x = null;
671 for (;;) { // create at most 62 indices
672 x = new Index<V>(z, x, null);
673 if (rnd >= 0L || --skips < 0) {
674 break;
675 } else {
676 rnd <<= 1;
677 }
678 }
679 if (addIndices(h, skips, x, noKey) && skips < 0 &&
680 head == h) { // try to add new level
681 Index<V> hx = new Index<V>(z, x, null);
682 Index<V> nh = new Index<V>(h.node, h, hx);
683 HEAD.compareAndSet(this, h, nh);
684 }
685 if (z.val == null) { // deleted while adding indices
686 findPredecessor(key); // clean
687 }
688 }
689 addCount(1L);
690 return null;
691 }
692 }
693 }
694 }
695
696 /**
697 * Add indices after an insertion. Descends iteratively to the
698 * highest level of insertion, then recursively, to chain index
699 * nodes to lower ones. Returns null on (staleness) failure,
700 * disabling higher-level insertions. Recursion depths are
701 * exponentially less probable.
702 *
703 * @param q starting index for current level
704 * @param skips levels to skip before inserting
705 * @param x index for this insertion
706 */
707 static <V> boolean addIndices(Index<V> q, int skips, Index<V> x, int noKey) {
708 Node<V> z; int key;
709 if (x != null && (z = x.node) != null && (key = z.key) != noKey &&
710 q != null) { // hoist checks
711 boolean retrying = false;
712 for (;;) { // find splice point
713 Index<V> r, d; int c;
714 if ((r = q.right) != null) {
715 Node<V> p; int k;
716 if ((p = r.node) == null || (k = p.key) == noKey ||
717 p.val == null) {
718 RIGHT.compareAndSet(q, r, r.right);
719 c = 0;
720 } else if ((c = cpr(key, k)) > 0) {
721 q = r;
722 } else if (c == 0) {
723 break; // stale
724 }
725 } else {
726 c = -1;
727 }
728
729 if (c < 0) {
730 if ((d = q.down) != null && skips > 0) {
731 --skips;
732 q = d;
733 } else if (d != null && !retrying &&
734 !addIndices(d, 0, x.down, noKey)) {
735 break;
736 } else {
737 x.right = r;
738 if (RIGHT.compareAndSet(q, r, x)) {
739 return true;
740 } else {
741 retrying = true; // re-find splice point
742 }
743 }
744 }
745 }
746 }
747 return false;
748 }
749
750 /* ---------------- Deletion -------------- */
751
752 /**
753 * Main deletion method. Locates node, nulls value, appends a
754 * deletion marker, unlinks predecessor, removes associated index
755 * nodes, and possibly reduces head index level.
756 *
757 * @param key the key
758 * @param value if non-null, the value that must be
759 * associated with key
760 * @return the node, or null if not found
761 */
762 final V doRemove(int key, Object value) {
763 if (key == noKey) {
764 throw new IllegalArgumentException();
765 }
766 V result = null;
767 Node<V> b;
768 outer: while ((b = findPredecessor(key)) != null &&
769 result == null) {
770 for (;;) {
771 Node<V> n; int k; V v; int c;
772 if ((n = b.next) == null) {
773 break outer;
774 } else if ((k = n.key) == noKey) {
775 break;
776 } else if ((v = n.val) == null) {
777 unlinkNode(b, n, noKey);
778 } else if ((c = cpr(key, k)) > 0) {
779 b = n;
780 } else if (c < 0) {
781 break outer;
782 } else if (value != null && !value.equals(v)) {
783 // break outer;
784 b = n; // Multimap.
785 } else if (VAL.compareAndSet(n, v, null)) {
786 result = v;
787 unlinkNode(b, n, noKey);
788 break; // loop to clean up
789 }
790 }
791 }
792 if (result != null) {
793 tryReduceLevel();
794 addCount(-1L);
795 }
796 return result;
797 }
798
799 /**
800 * Possibly reduce head level if it has no nodes. This method can
801 * (rarely) make mistakes, in which case levels can disappear even
802 * though they are about to contain index nodes. This impacts
803 * performance, not correctness. To minimize mistakes as well as
804 * to reduce hysteresis, the level is reduced by one only if the
805 * topmost three levels look empty. Also, if the removed level
806 * looks non-empty after CAS, we try to change it back quick
807 * before anyone notices our mistake! (This trick works pretty
808 * well because this method will practically never make mistakes
809 * unless current thread stalls immediately before first CAS, in
810 * which case it is very unlikely to stall again immediately
811 * afterwards, so will recover.)
812 * <p>
813 * We put up with all this rather than just let levels grow
814 * because otherwise, even a small map that has undergone a large
815 * number of insertions and removals will have a lot of levels,
816 * slowing down access more than would an occasional unwanted
817 * reduction.
818 */
819 private void tryReduceLevel() {
820 Index<V> h, d, e;
821 if ((h = head) != null && h.right == null &&
822 (d = h.down) != null && d.right == null &&
823 (e = d.down) != null && e.right == null &&
824 HEAD.compareAndSet(this, h, d) &&
825 h.right != null) { // recheck
826 HEAD.compareAndSet(this, d, h); // try to backout
827 }
828 }
829
830 /* ---------------- Finding and removing first element -------------- */
831
832 /**
833 * Gets first valid node, unlinking deleted nodes if encountered.
834 * @return first node or null if empty
835 */
836 final Node<V> findFirst() {
837 Node<V> b, n;
838 if ((b = baseHead()) != null) {
839 while ((n = b.next) != null) {
840 if (n.val == null) {
841 unlinkNode(b, n, noKey);
842 } else {
843 return n;
844 }
845 }
846 }
847 return null;
848 }
849
850 /**
851 * Entry snapshot version of findFirst
852 */
853 final IntEntry<V> findFirstEntry() {
854 Node<V> b, n; V v;
855 if ((b = baseHead()) != null) {
856 while ((n = b.next) != null) {
857 if ((v = n.val) == null) {
858 unlinkNode(b, n, noKey);
859 } else {
860 return new IntEntry<V>(n.key, v);
861 }
862 }
863 }
864 return null;
865 }
866
867 /**
868 * Removes first entry; returns its snapshot.
869 * @return null if empty, else snapshot of first entry
870 */
871 private IntEntry<V> doRemoveFirstEntry() {
872 Node<V> b, n; V v;
873 if ((b = baseHead()) != null) {
874 while ((n = b.next) != null) {
875 if ((v = n.val) == null || VAL.compareAndSet(n, v, null)) {
876 int k = n.key;
877 unlinkNode(b, n, noKey);
878 if (v != null) {
879 tryReduceLevel();
880 findPredecessor(k); // clean index
881 addCount(-1L);
882 return new IntEntry<V>(k, v);
883 }
884 }
885 }
886 }
887 return null;
888 }
889
890 /* ---------------- Finding and removing last element -------------- */
891
892 /**
893 * Specialized version of find to get last valid node.
894 * @return last node or null if empty
895 */
896 final Node<V> findLast() {
897 outer: for (;;) {
898 Index<V> q; Node<V> b;
899 acquireFence();
900 if ((q = head) == null) {
901 break;
902 }
903 for (Index<V> r, d;;) {
904 while ((r = q.right) != null) {
905 Node<V> p;
906 if ((p = r.node) == null || p.val == null) {
907 RIGHT.compareAndSet(q, r, r.right);
908 } else {
909 q = r;
910 }
911 }
912 if ((d = q.down) != null) {
913 q = d;
914 } else {
915 b = q.node;
916 break;
917 }
918 }
919 if (b != null) {
920 for (;;) {
921 Node<V> n;
922 if ((n = b.next) == null) {
923 if (b.key == noKey) { // empty
924 break outer;
925 } else {
926 return b;
927 }
928 } else if (n.key == noKey) {
929 break;
930 } else if (n.val == null) {
931 unlinkNode(b, n, noKey);
932 } else {
933 b = n;
934 }
935 }
936 }
937 }
938 return null;
939 }
940
941 /**
942 * Entry version of findLast
943 * @return Entry for last node or null if empty
944 */
945 final IntEntry<V> findLastEntry() {
946 for (;;) {
947 Node<V> n; V v;
948 if ((n = findLast()) == null) {
949 return null;
950 }
951 if ((v = n.val) != null) {
952 return new IntEntry<V>(n.key, v);
953 }
954 }
955 }
956
957 /**
958 * Removes last entry; returns its snapshot.
959 * Specialized variant of doRemove.
960 * @return null if empty, else snapshot of last entry
961 */
962 private IntEntry<V> doRemoveLastEntry() {
963 outer: for (;;) {
964 Index<V> q; Node<V> b;
965 acquireFence();
966 if ((q = head) == null) {
967 break;
968 }
969 for (;;) {
970 Index<V> d, r; Node<V> p;
971 while ((r = q.right) != null) {
972 if ((p = r.node) == null || p.val == null) {
973 RIGHT.compareAndSet(q, r, r.right);
974 } else if (p.next != null) {
975 q = r; // continue only if a successor
976 } else {
977 break;
978 }
979 }
980 if ((d = q.down) != null) {
981 q = d;
982 } else {
983 b = q.node;
984 break;
985 }
986 }
987 if (b != null) {
988 for (;;) {
989 Node<V> n; int k; V v;
990 if ((n = b.next) == null) {
991 if (b.key == noKey) { // empty
992 break outer;
993 } else {
994 break; // retry
995 }
996 } else if ((k = n.key) == noKey) {
997 break;
998 } else if ((v = n.val) == null) {
999 unlinkNode(b, n, noKey);
1000 } else if (n.next != null) {
1001 b = n;
1002 } else if (VAL.compareAndSet(n, v, null)) {
1003 unlinkNode(b, n, noKey);
1004 tryReduceLevel();
1005 findPredecessor(k); // clean index
1006 addCount(-1L);
1007 return new IntEntry<V>(k, v);
1008 }
1009 }
1010 }
1011 }
1012 return null;
1013 }
1014
1015 /* ---------------- Relational operations -------------- */
1016
1017 // Control values OR'ed as arguments to findNear
1018
1019 private static final int EQ = 1;
1020 private static final int LT = 2;
1021 private static final int GT = 0; // Actually checked as !LT
1022
1023 /**
1024 * Variant of findNear returning IntEntry
1025 * @param key the key
1026 * @param rel the relation -- OR'ed combination of EQ, LT, GT
1027 * @return Entry fitting relation, or null if no such
1028 */
1029 final IntEntry<V> findNearEntry(int key, int rel) {
1030 for (;;) {
1031 Node<V> n; V v;
1032 if ((n = findNear(key, rel)) == null) {
1033 return null;
1034 }
1035 if ((v = n.val) != null) {
1036 return new IntEntry<V>(n.key, v);
1037 }
1038 }
1039 }
1040
1041 /**
1042 * Utility for ceiling, floor, lower, higher methods.
1043 * @param key the key
1044 * @param rel the relation -- OR'ed combination of EQ, LT, GT
1045 * @return nearest node fitting relation, or null if no such
1046 */
1047 final Node<V> findNear(int key, int rel) {
1048 if (key == noKey) {
1049 throw new IllegalArgumentException();
1050 }
1051 Node<V> result;
1052 outer: for (Node<V> b;;) {
1053 if ((b = findPredecessor(key)) == null) {
1054 result = null;
1055 break; // empty
1056 }
1057 for (;;) {
1058 Node<V> n; int k; int c;
1059 if ((n = b.next) == null) {
1060 result = (rel & LT) != 0 && b.key != noKey ? b : null;
1061 break outer;
1062 } else if ((k = n.key) == noKey) {
1063 break;
1064 } else if (n.val == null) {
1065 unlinkNode(b, n, noKey);
1066 } else if (((c = cpr(key, k)) == 0 && (rel & EQ) != 0) ||
1067 (c < 0 && (rel & LT) == 0)) {
1068 result = n;
1069 break outer;
1070 } else if (c <= 0 && (rel & LT) != 0) {
1071 result = b.key != noKey ? b : null;
1072 break outer;
1073 } else {
1074 b = n;
1075 }
1076 }
1077 }
1078 return result;
1079 }
1080
1081 /* ---------------- Constructors -------------- */
1082
1083 /**
1084 * Constructs a new, empty map, sorted according to the
1085 * {@linkplain Comparable natural ordering} of the keys.
1086 * @param noKey The value to use as a sentinel for signaling the absence of a key.
1087 */
1088 public ConcurrentSkipListIntObjMultimap(int noKey) {
1089 this.noKey = noKey;
1090 adder = new LongAdder();
1091 }
1092
1093 /* ------ Map API methods ------ */
1094
1095 /**
1096 * Returns {@code true} if this map contains a mapping for the specified
1097 * key.
1098 *
1099 * @param key key whose presence in this map is to be tested
1100 * @return {@code true} if this map contains a mapping for the specified key
1101 * @throws ClassCastException if the specified key cannot be compared
1102 * with the keys currently in the map
1103 * @throws NullPointerException if the specified key is null
1104 */
1105 public boolean containsKey(int key) {
1106 return doGet(key) != null;
1107 }
1108
1109 /**
1110 * Returns the value to which the specified key is mapped,
1111 * or {@code null} if this map contains no mapping for the key.
1112 *
1113 * <p>More formally, if this map contains a mapping from a key
1114 * {@code k} to a value {@code v} such that {@code key} compares
1115 * equal to {@code k} according to the map's ordering, then this
1116 * method returns {@code v}; otherwise it returns {@code null}.
1117 * (There can be at most one such mapping.)
1118 *
1119 * @throws ClassCastException if the specified key cannot be compared
1120 * with the keys currently in the map
1121 * @throws NullPointerException if the specified key is null
1122 */
1123 public V get(int key) {
1124 return doGet(key);
1125 }
1126
1127 /**
1128 * Returns the value to which the specified key is mapped,
1129 * or the given defaultValue if this map contains no mapping for the key.
1130 *
1131 * @param key the key
1132 * @param defaultValue the value to return if this map contains
1133 * no mapping for the given key
1134 * @return the mapping for the key, if present; else the defaultValue
1135 * @throws NullPointerException if the specified key is null
1136 * @since 1.8
1137 */
1138 public V getOrDefault(int key, V defaultValue) {
1139 V v;
1140 return (v = doGet(key)) == null ? defaultValue : v;
1141 }
1142
1143 /**
1144 * Associates the specified value with the specified key in this map.
1145 * If the map previously contained a mapping for the key, the old
1146 * value is replaced.
1147 *
1148 * @param key key with which the specified value is to be associated
1149 * @param value value to be associated with the specified key
1150 * @throws ClassCastException if the specified key cannot be compared
1151 * with the keys currently in the map
1152 * @throws NullPointerException if the specified key or value is null
1153 */
1154 public void put(int key, V value) {
1155 requireNonNull(value);
1156 doPut(key, value, false);
1157 }
1158
1159 /**
1160 * Removes the mapping for the specified key from this map if present.
1161 *
1162 * @param key key for which mapping should be removed
1163 * @return the previous value associated with the specified key, or
1164 * {@code null} if there was no mapping for the key
1165 * @throws ClassCastException if the specified key cannot be compared
1166 * with the keys currently in the map
1167 * @throws NullPointerException if the specified key is null
1168 */
1169 public V remove(int key) {
1170 return doRemove(key, null);
1171 }
1172
1173 /**
1174 * Returns {@code true} if this map maps one or more keys to the
1175 * specified value. This operation requires time linear in the
1176 * map size. Additionally, it is possible for the map to change
1177 * during execution of this method, in which case the returned
1178 * result may be inaccurate.
1179 *
1180 * @param value value whose presence in this map is to be tested
1181 * @return {@code true} if a mapping to {@code value} exists;
1182 * {@code false} otherwise
1183 * @throws NullPointerException if the specified value is null
1184 */
1185 public boolean containsValue(Object value) {
1186 requireNonNull(value);
1187 Node<V> b, n; V v;
1188 if ((b = baseHead()) != null) {
1189 while ((n = b.next) != null) {
1190 if ((v = n.val) != null && value.equals(v)) {
1191 return true;
1192 } else {
1193 b = n;
1194 }
1195 }
1196 }
1197 return false;
1198 }
1199
1200 /**
1201 * Get the approximate size of the collection.
1202 */
1203 public int size() {
1204 long c;
1205 return baseHead() == null ? 0 :
1206 (c = getAdderCount()) >= Integer.MAX_VALUE ?
1207 Integer.MAX_VALUE : (int) c;
1208 }
1209
1210 /**
1211 * Check if the collection is empty.
1212 */
1213 public boolean isEmpty() {
1214 return findFirst() == null;
1215 }
1216
1217 /**
1218 * Removes all of the mappings from this map.
1219 */
1220 public void clear() {
1221 Index<V> h, r, d; Node<V> b;
1222 acquireFence();
1223 while ((h = head) != null) {
1224 if ((r = h.right) != null) { // remove indices
1225 RIGHT.compareAndSet(h, r, null);
1226 } else if ((d = h.down) != null) { // remove levels
1227 HEAD.compareAndSet(this, h, d);
1228 } else {
1229 long count = 0L;
1230 if ((b = h.node) != null) { // remove nodes
1231 Node<V> n; V v;
1232 while ((n = b.next) != null) {
1233 if ((v = n.val) != null &&
1234 VAL.compareAndSet(n, v, null)) {
1235 --count;
1236 v = null;
1237 }
1238 if (v == null) {
1239 unlinkNode(b, n, noKey);
1240 }
1241 }
1242 }
1243 if (count != 0L) {
1244 addCount(count);
1245 } else {
1246 break;
1247 }
1248 }
1249 }
1250 }
1251
1252 /* ------ ConcurrentMap API methods ------ */
1253
1254 /**
1255 * Remove the specific entry with the given key and value, if it exist.
1256 *
1257 * @throws ClassCastException if the specified key cannot be compared
1258 * with the keys currently in the map
1259 * @throws NullPointerException if the specified key is null
1260 */
1261 public boolean remove(int key, Object value) {
1262 if (key == noKey) {
1263 throw new IllegalArgumentException();
1264 }
1265 return value != null && doRemove(key, value) != null;
1266 }
1267
1268 /**
1269 * Replace the specific entry with the given key and value, with the given replacement value,
1270 * if such an entry exist.
1271 *
1272 * @throws ClassCastException if the specified key cannot be compared
1273 * with the keys currently in the map
1274 * @throws NullPointerException if any of the arguments are null
1275 */
1276 public boolean replace(int key, V oldValue, V newValue) {
1277 if (key == noKey) {
1278 throw new IllegalArgumentException();
1279 }
1280 requireNonNull(oldValue);
1281 requireNonNull(newValue);
1282 for (;;) {
1283 Node<V> n; V v;
1284 if ((n = findNode(key)) == null) {
1285 return false;
1286 }
1287 if ((v = n.val) != null) {
1288 if (!oldValue.equals(v)) {
1289 return false;
1290 }
1291 if (VAL.compareAndSet(n, v, newValue)) {
1292 return true;
1293 }
1294 }
1295 }
1296 }
1297
1298 /* ------ SortedMap API methods ------ */
1299
1300 public int firstKey() {
1301 Node<V> n = findFirst();
1302 if (n == null) {
1303 return noKey;
1304 }
1305 return n.key;
1306 }
1307
1308 public int lastKey() {
1309 Node<V> n = findLast();
1310 if (n == null) {
1311 return noKey;
1312 }
1313 return n.key;
1314 }
1315
1316 /* ---------------- Relational operations -------------- */
1317
1318 /**
1319 * Returns a key-value mapping associated with the greatest key
1320 * strictly less than the given key, or {@code null} if there is
1321 * no such key. The returned entry does <em>not</em> support the
1322 * {@code Entry.setValue} method.
1323 *
1324 * @throws NullPointerException if the specified key is null
1325 */
1326 public IntEntry<V> lowerEntry(int key) {
1327 return findNearEntry(key, LT);
1328 }
1329
1330 /**
1331 * @throws NullPointerException if the specified key is null
1332 */
1333 public int lowerKey(int key) {
1334 Node<V> n = findNear(key, LT);
1335 return n == null ? noKey : n.key;
1336 }
1337
1338 /**
1339 * Returns a key-value mapping associated with the greatest key
1340 * less than or equal to the given key, or {@code null} if there
1341 * is no such key. The returned entry does <em>not</em> support
1342 * the {@code Entry.setValue} method.
1343 *
1344 * @param key the key
1345 * @throws NullPointerException if the specified key is null
1346 */
1347 public IntEntry<V> floorEntry(int key) {
1348 return findNearEntry(key, LT | EQ);
1349 }
1350
1351 /**
1352 * @param key the key
1353 * @throws NullPointerException if the specified key is null
1354 */
1355 public int floorKey(int key) {
1356 Node<V> n = findNear(key, LT | EQ);
1357 return n == null ? noKey : n.key;
1358 }
1359
1360 /**
1361 * Returns a key-value mapping associated with the least key
1362 * greater than or equal to the given key, or {@code null} if
1363 * there is no such entry. The returned entry does <em>not</em>
1364 * support the {@code Entry.setValue} method.
1365 *
1366 * @throws NullPointerException if the specified key is null
1367 */
1368 public IntEntry<V> ceilingEntry(int key) {
1369 return findNearEntry(key, GT | EQ);
1370 }
1371
1372 /**
1373 * @throws NullPointerException if the specified key is null
1374 */
1375 public int ceilingKey(int key) {
1376 Node<V> n = findNear(key, GT | EQ);
1377 return n == null ? noKey : n.key;
1378 }
1379
1380 /**
1381 * Returns a key-value mapping associated with the least key
1382 * strictly greater than the given key, or {@code null} if there
1383 * is no such key. The returned entry does <em>not</em> support
1384 * the {@code Entry.setValue} method.
1385 *
1386 * @param key the key
1387 * @throws NullPointerException if the specified key is null
1388 */
1389 public IntEntry<V> higherEntry(int key) {
1390 return findNearEntry(key, GT);
1391 }
1392
1393 /**
1394 * @param key the key
1395 * @throws NullPointerException if the specified key is null
1396 */
1397 public int higherKey(int key) {
1398 Node<V> n = findNear(key, GT);
1399 return n == null ? noKey : n.key;
1400 }
1401
1402 /**
1403 * Returns a key-value mapping associated with the least
1404 * key in this map, or {@code null} if the map is empty.
1405 * The returned entry does <em>not</em> support
1406 * the {@code Entry.setValue} method.
1407 */
1408 public IntEntry<V> firstEntry() {
1409 return findFirstEntry();
1410 }
1411
1412 /**
1413 * Returns a key-value mapping associated with the greatest
1414 * key in this map, or {@code null} if the map is empty.
1415 * The returned entry does <em>not</em> support
1416 * the {@code Entry.setValue} method.
1417 */
1418 public IntEntry<V> lastEntry() {
1419 return findLastEntry();
1420 }
1421
1422 /**
1423 * Removes and returns a key-value mapping associated with
1424 * the least key in this map, or {@code null} if the map is empty.
1425 * The returned entry does <em>not</em> support
1426 * the {@code Entry.setValue} method.
1427 */
1428 public IntEntry<V> pollFirstEntry() {
1429 return doRemoveFirstEntry();
1430 }
1431
1432 /**
1433 * Removes and returns a key-value mapping associated with
1434 * the greatest key in this map, or {@code null} if the map is empty.
1435 * The returned entry does <em>not</em> support
1436 * the {@code Entry.setValue} method.
1437 */
1438 public IntEntry<V> pollLastEntry() {
1439 return doRemoveLastEntry();
1440 }
1441
1442 public IntEntry<V> pollCeilingEntry(int key) {
1443 // TODO optimize this
1444 Node<V> node;
1445 V val;
1446 do {
1447 node = findNear(key, GT | EQ);
1448 if (node == null) {
1449 return null;
1450 }
1451 val = node.val;
1452 } while (val == null || !remove(node.key, val));
1453 return new IntEntry<>(node.key, val);
1454 }
1455
1456 /* ---------------- Iterators -------------- */
1457
1458 /**
1459 * Base of iterator classes
1460 */
1461 abstract class Iter<T> implements Iterator<T> {
1462 /** the last node returned by next() */
1463 Node<V> lastReturned;
1464 /** the next node to return from next(); */
1465 Node<V> next;
1466 /** Cache of next value field to maintain weak consistency */
1467 V nextValue;
1468
1469 /** Initializes ascending iterator for entire range. */
1470 Iter() {
1471 advance(baseHead());
1472 }
1473
1474 @Override
1475 public final boolean hasNext() {
1476 return next != null;
1477 }
1478
1479 /** Advances next to higher entry. */
1480 final void advance(Node<V> b) {
1481 Node<V> n = null;
1482 V v = null;
1483 if ((lastReturned = b) != null) {
1484 while ((n = b.next) != null && (v = n.val) == null) {
1485 b = n;
1486 }
1487 }
1488 nextValue = v;
1489 next = n;
1490 }
1491
1492 @Override
1493 public final void remove() {
1494 Node<V> n; int k;
1495 if ((n = lastReturned) == null || (k = n.key) == noKey) {
1496 throw new IllegalStateException();
1497 }
1498 // It would not be worth all of the overhead to directly
1499 // unlink from here. Using remove is fast enough.
1500 ConcurrentSkipListIntObjMultimap.this.remove(k, n.val); // TODO: inline and optimize this
1501 lastReturned = null;
1502 }
1503 }
1504
1505 final class EntryIterator extends Iter<IntEntry<V>> {
1506 @Override
1507 public IntEntry<V> next() {
1508 Node<V> n;
1509 if ((n = next) == null) {
1510 throw new NoSuchElementException();
1511 }
1512 int k = n.key;
1513 V v = nextValue;
1514 advance(n);
1515 return new IntEntry<V>(k, v);
1516 }
1517 }
1518
1519 @Override
1520 public Iterator<IntEntry<V>> iterator() {
1521 return new EntryIterator();
1522 }
1523
1524 // default Map method overrides
1525
1526 public void forEach(BiConsumer<Integer, ? super V> action) {
1527 requireNonNull(action);
1528 Node<V> b, n; V v;
1529 if ((b = baseHead()) != null) {
1530 while ((n = b.next) != null) {
1531 if ((v = n.val) != null) {
1532 action.accept(n.key, v);
1533 }
1534 b = n;
1535 }
1536 }
1537 }
1538
1539 public void replaceAll(BiFunction<Integer, ? super V, ? extends V> function) {
1540 requireNonNull(function);
1541 Node<V> b, n; V v;
1542 if ((b = baseHead()) != null) {
1543 while ((n = b.next) != null) {
1544 while ((v = n.val) != null) {
1545 V r = function.apply(n.key, v);
1546 requireNonNull(r);
1547 if (VAL.compareAndSet(n, v, r)) {
1548 break;
1549 }
1550 }
1551 b = n;
1552 }
1553 }
1554 }
1555
1556 // VarHandle mechanics
1557 private static final MethodHandle ACQUIRE_FENCE;
1558 private static final AtomicReferenceFieldUpdater<ConcurrentSkipListIntObjMultimap<?>, Index<?>> HEAD;
1559 private static final AtomicReferenceFieldUpdater<Node<?>, Node<?>> NEXT;
1560 private static final AtomicReferenceFieldUpdater<Node<?>, Object> VAL;
1561 private static final AtomicReferenceFieldUpdater<Index<?>, Index<?>> RIGHT;
1562 private static volatile int acquireFenceVariable;
1563 static {
1564 try {
1565 MethodHandles.Lookup lookup = MethodHandles.lookup();
1566
1567 Class<ConcurrentSkipListIntObjMultimap<?>> mapCls = cls(ConcurrentSkipListIntObjMultimap.class);
1568 Class<Index<?>> indexCls = cls(Index.class);
1569 Class<Node<?>> nodeCls = cls(Node.class);
1570
1571 if (PlatformDependent.hasVarHandle()) {
1572 Class<VarHandle> varHandleCls = cls(Class.forName("java.lang.invoke.VarHandle"));
1573 ACQUIRE_FENCE = lookup.findStatic(
1574 varHandleCls,
1575 "acquireFence",
1576 MethodType.methodType(Void.TYPE));
1577 } else {
1578 ACQUIRE_FENCE = lookup.findStatic(
1579 mapCls,
1580 "acquireFenceFallback",
1581 MethodType.methodType(Void.TYPE));
1582 }
1583
1584 HEAD = AtomicReferenceFieldUpdater.newUpdater(mapCls, indexCls, "head");
1585 NEXT = AtomicReferenceFieldUpdater.newUpdater(nodeCls, nodeCls, "next");
1586 VAL = AtomicReferenceFieldUpdater.newUpdater(nodeCls, Object.class, "val");
1587 RIGHT = AtomicReferenceFieldUpdater.newUpdater(indexCls, indexCls, "right");
1588 } catch (ReflectiveOperationException e) {
1589 throw new ExceptionInInitializerError(e);
1590 }
1591 }
1592
1593 @SuppressWarnings("unchecked")
1594 private static <T> Class<T> cls(Class<?> cls) {
1595 return (Class<T>) cls;
1596 }
1597
1598 /**
1599 * Orders LOADS before the fence, with LOADS and STORES after the fence.
1600 */
1601 private static void acquireFence() {
1602 try {
1603 ACQUIRE_FENCE.invokeExact();
1604 } catch (Throwable e) {
1605 LinkageError error = new LinkageError();
1606 error.initCause(e);
1607 throw error;
1608 }
1609 }
1610
1611 private static void acquireFenceFallback() {
1612 // Volatile store prevent prior loads from ordering down.
1613 acquireFenceVariable = 1;
1614 // Volatile load prevent following loads and stores from ordering up.
1615 int ignore = acquireFenceVariable;
1616 // Note: Putting the volatile store before the volatile load ensures
1617 // surrounding loads and stores don't order "into" the fence.
1618 }
1619 }