1 /*
2 * Copyright 2026 The Netty Project
3 *
4 * The Netty Project licenses this file to you under the Apache License,
5 * version 2.0 (the "License"); you may not use this file except in compliance
6 * with the License. You may obtain a copy of the License at:
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 * License for the specific language governing permissions and limitations
14 * under the License.
15 */
16 /*
17 * Written by Doug Lea with assistance from members of JCP JSR-166
18 * Expert Group and released to the public domain, as explained at
19 * https://creativecommons.org/publicdomain/zero/1.0/
20 *
21 * With substantial modifications by The Netty Project team.
22 */
23 package io.netty.util.concurrent;
24
25 import io.netty.util.internal.LongCounter;
26 import io.netty.util.internal.PlatformDependent;
27 import io.netty.util.internal.ThreadLocalRandom;
28
29 import java.util.Iterator;
30 import java.util.NoSuchElementException;
31 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
32
33 import static io.netty.util.internal.ObjectUtil.checkNotNull;
34
35 /**
36 * A scalable concurrent multimap implementation.
37 * The map is sorted according to the natural ordering of its {@code int} keys.
38 *
39 * <p>This class implements a concurrent variant of <a
40 * href="https://en.wikipedia.org/wiki/Skip_list" target="_top">SkipLists</a>
41 * providing expected average <i>log(n)</i> time cost for the
42 * {@code containsKey}, {@code get}, {@code put} and
43 * {@code remove} operations and their variants. Insertion, removal,
44 * update, and access operations safely execute concurrently by
45 * multiple threads.
46 *
47 * <p>This class is a multimap, which means the same key can be associated with
48 * multiple values. Each such instance will be represented by a separate
49 * {@code IntEntry}. There is no defined ordering for the values mapped to
50 * the same key.
51 *
52 * <p>As a multimap, certain atomic operations like {@code putIfPresent},
53 * {@code compute}, or {@code computeIfPresent}, cannot be supported.
54 * Likewise, some get-like operations cannot be supported.
55 *
56 * <p>Iterators and spliterators are
57 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
58 *
59 * <p>All {@code IntEntry} pairs returned by methods in this class
60 * represent snapshots of mappings at the time they were
61 * produced. They do <em>not</em> support the {@code Entry.setValue}
62 * method. (Note however that it is possible to change mappings in the
63 * associated map using {@code put}, {@code putIfAbsent}, or
64 * {@code replace}, depending on exactly which effect you need.)
65 *
66 * <p>Beware that bulk operations {@code putAll}, {@code equals},
67 * {@code toArray}, {@code containsValue}, and {@code clear} are
68 * <em>not</em> guaranteed to be performed atomically. For example, an
69 * iterator operating concurrently with a {@code putAll} operation
70 * might view only some of the added elements.
71 *
72 * <p>This class does <em>not</em> permit the use of {@code null} values
73 * because some null return values cannot be reliably distinguished from
74 * the absence of elements.
75 *
76 * @param <V> the type of mapped values
77 */
78 public class ConcurrentSkipListIntObjMultimap<V> implements Iterable<ConcurrentSkipListIntObjMultimap.IntEntry<V>> {
79 /*
80 * This class implements a tree-like two-dimensionally linked skip
81 * list in which the index levels are represented in separate
82 * nodes from the base nodes holding data. There are two reasons
83 * for taking this approach instead of the usual array-based
84 * structure: 1) Array based implementations seem to encounter
85 * more complexity and overhead 2) We can use cheaper algorithms
86 * for the heavily-traversed index lists than can be used for the
87 * base lists. Here's a picture of some of the basics for a
88 * possible list with 2 levels of index:
89 *
90 * Head nodes Index nodes
91 * +-+ right +-+ +-+
92 * |2|---------------->| |--------------------->| |->null
93 * +-+ +-+ +-+
94 * | down | |
95 * v v v
96 * +-+ +-+ +-+ +-+ +-+ +-+
97 * |1|----------->| |->| |------>| |----------->| |------>| |->null
98 * +-+ +-+ +-+ +-+ +-+ +-+
99 * v | | | | |
100 * Nodes next v v v v v
101 * +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+
102 * | |->|A|->|B|->|C|->|D|->|E|->|F|->|G|->|H|->|I|->|J|->|K|->null
103 * +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+
104 *
105 * The base lists use a variant of the HM linked ordered set
106 * algorithm. See Tim Harris, "A pragmatic implementation of
107 * non-blocking linked lists"
108 * https://www.cl.cam.ac.uk/~tlh20/publications.html and Maged
109 * Michael "High Performance Dynamic Lock-Free Hash Tables and
110 * List-Based Sets"
111 * https://www.research.ibm.com/people/m/michael/pubs.htm. The
112 * basic idea in these lists is to mark the "next" pointers of
113 * deleted nodes when deleting to avoid conflicts with concurrent
114 * insertions, and when traversing to keep track of triples
115 * (predecessor, node, successor) in order to detect when and how
116 * to unlink these deleted nodes.
117 *
118 * Rather than using mark-bits to mark list deletions (which can
119 * be slow and space-intensive using AtomicMarkedReference), nodes
120 * use direct CAS'able next pointers. On deletion, instead of
121 * marking a pointer, they splice in another node that can be
122 * thought of as standing for a marked pointer (see method
123 * unlinkNode). Using plain nodes acts roughly like "boxed"
124 * implementations of marked pointers, but uses new nodes only
125 * when nodes are deleted, not for every link. This requires less
126 * space and supports faster traversal. Even if marked references
127 * were better supported by JVMs, traversal using this technique
128 * might still be faster because any search need only read ahead
129 * one more node than otherwise required (to check for trailing
130 * marker) rather than unmasking mark bits or whatever on each
131 * read.
132 *
133 * This approach maintains the essential property needed in the HM
134 * algorithm of changing the next-pointer of a deleted node so
135 * that any other CAS of it will fail, but implements the idea by
136 * changing the pointer to point to a different node (with
137 * otherwise illegal null fields), not by marking it. While it
138 * would be possible to further squeeze space by defining marker
139 * nodes not to have key/value fields, it isn't worth the extra
140 * type-testing overhead. The deletion markers are rarely
141 * encountered during traversal, are easily detected via null
142 * checks that are needed anyway, and are normally quickly garbage
143 * collected. (Note that this technique would not work well in
144 * systems without garbage collection.)
145 *
146 * In addition to using deletion markers, the lists also use
147 * nullness of value fields to indicate deletion, in a style
148 * similar to typical lazy-deletion schemes. If a node's value is
149 * null, then it is considered logically deleted and ignored even
150 * though it is still reachable.
151 *
152 * Here's the sequence of events for a deletion of node n with
153 * predecessor b and successor f, initially:
154 *
155 * +------+ +------+ +------+
156 * ... | b |------>| n |----->| f | ...
157 * +------+ +------+ +------+
158 *
159 * 1. CAS n's value field from non-null to null.
160 * Traversals encountering a node with null value ignore it.
161 * However, ongoing insertions and deletions might still modify
162 * n's next pointer.
163 *
164 * 2. CAS n's next pointer to point to a new marker node.
165 * From this point on, no other nodes can be appended to n.
166 * which avoids deletion errors in CAS-based linked lists.
167 *
168 * +------+ +------+ +------+ +------+
169 * ... | b |------>| n |----->|marker|------>| f | ...
170 * +------+ +------+ +------+ +------+
171 *
172 * 3. CAS b's next pointer over both n and its marker.
173 * From this point on, no new traversals will encounter n,
174 * and it can eventually be GCed.
175 * +------+ +------+
176 * ... | b |----------------------------------->| f | ...
177 * +------+ +------+
178 *
179 * A failure at step 1 leads to simple retry due to a lost race
180 * with another operation. Steps 2-3 can fail because some other
181 * thread noticed during a traversal a node with null value and
182 * helped out by marking and/or unlinking. This helping-out
183 * ensures that no thread can become stuck waiting for progress of
184 * the deleting thread.
185 *
186 * Skip lists add indexing to this scheme, so that the base-level
187 * traversals start close to the locations being found, inserted
188 * or deleted -- usually base level traversals only traverse a few
189 * nodes. This doesn't change the basic algorithm except for the
190 * need to make sure base traversals start at predecessors (here,
191 * b) that are not (structurally) deleted, otherwise retrying
192 * after processing the deletion.
193 *
194 * Index levels are maintained using CAS to link and unlink
195 * successors ("right" fields). Races are allowed in index-list
196 * operations that can (rarely) fail to link in a new index node.
197 * (We can't do this of course for data nodes.) However, even
198 * when this happens, the index lists correctly guide search.
199 * This can impact performance, but since skip lists are
200 * probabilistic anyway, the net result is that under contention,
201 * the effective "p" value may be lower than its nominal value.
202 *
203 * Index insertion and deletion sometimes require a separate
204 * traversal pass occurring after the base-level action, to add or
205 * remove index nodes. This adds to single-threaded overhead, but
206 * improves contended multithreaded performance by narrowing
207 * interference windows, and allows deletion to ensure that all
208 * index nodes will be made unreachable upon return from a public
209 * remove operation, thus avoiding unwanted garbage retention.
210 *
211 * Indexing uses skip list parameters that maintain good search
212 * performance while using sparser-than-usual indices: The
213 * hardwired parameters k=1, p=0.5 (see method doPut) mean that
214 * about one-quarter of the nodes have indices. Of those that do,
215 * half have one level, a quarter have two, and so on (see Pugh's
216 * Skip List Cookbook, sec 3.4), up to a maximum of 62 levels
217 * (appropriate for up to 2^63 elements). The expected total
218 * space requirement for a map is slightly less than for the
219 * current implementation of java.util.TreeMap.
220 *
221 * Changing the level of the index (i.e, the height of the
222 * tree-like structure) also uses CAS. Creation of an index with
223 * height greater than the current level adds a level to the head
224 * index by CAS'ing on a new top-most head. To maintain good
225 * performance after a lot of removals, deletion methods
226 * heuristically try to reduce the height if the topmost levels
227 * appear to be empty. This may encounter races in which it is
228 * possible (but rare) to reduce and "lose" a level just as it is
229 * about to contain an index (that will then never be
230 * encountered). This does no structural harm, and in practice
231 * appears to be a better option than allowing unrestrained growth
232 * of levels.
233 *
234 * This class provides concurrent-reader-style memory consistency,
235 * ensuring that read-only methods report status and/or values no
236 * staler than those holding at method entry. This is done by
237 * performing all publication and structural updates using
238 * (volatile) CAS, placing an acquireFence in a few access
239 * methods, and ensuring that linked objects are transitively
240 * acquired via dependent reads (normally once) unless performing
241 * a volatile-mode CAS operation (that also acts as an acquire and
242 * release). This form of fence-hoisting is similar to RCU and
243 * related techniques (see McKenney's online book
244 * https://www.kernel.org/pub/linux/kernel/people/paulmck/perfbook/perfbook.html)
245 * It minimizes overhead that may otherwise occur when using so
246 * many volatile-mode reads. Using explicit acquireFences is
247 * logistically easier than targeting particular fields to be read
248 * in acquire mode: fences are just hoisted up as far as possible,
249 * to the entry points or loop headers of a few methods. A
250 * potential disadvantage is that these few remaining fences are
251 * not easily optimized away by compilers under exclusively
252 * single-thread use. It requires some care to avoid volatile
253 * mode reads of other fields. (Note that the memory semantics of
254 * a reference dependently read in plain mode exactly once are
255 * equivalent to those for atomic opaque mode.) Iterators and
256 * other traversals encounter each node and value exactly once.
257 * Other operations locate an element (or position to insert an
258 * element) via a sequence of dereferences. This search is broken
259 * into two parts. Method findPredecessor (and its specialized
260 * embeddings) searches index nodes only, returning a base-level
261 * predecessor of the key. Callers carry out the base-level
262 * search, restarting if encountering a marker preventing link
263 * modification. In some cases, it is possible to encounter a
264 * node multiple times while descending levels. For mutative
265 * operations, the reported value is validated using CAS (else
266 * retrying), preserving linearizability with respect to each
267 * other. Others may return any (non-null) value holding in the
268 * course of the method call. (Search-based methods also include
269 * some useless-looking explicit null checks designed to allow
270 * more fields to be nulled out upon removal, to reduce floating
271 * garbage, but which is not currently done, pending discovery of
272 * a way to do this with less impact on other operations.)
273 *
274 * To produce random values without interference across threads,
275 * we use within-JDK thread local random support (via the
276 * "secondary seed", to avoid interference with user-level
277 * ThreadLocalRandom.)
278 *
279 * For explanation of algorithms sharing at least a couple of
280 * features with this one, see Mikhail Fomitchev's thesis
281 * (https://www.cs.yorku.ca/~mikhail/), Keir Fraser's thesis
282 * (https://www.cl.cam.ac.uk/users/kaf24/), and Hakan Sundell's
283 * thesis (https://www.cs.chalmers.se/~phs/).
284 *
285 * Notation guide for local variables
286 * Node: b, n, f, p for predecessor, node, successor, aux
287 * Index: q, r, d for index node, right, down.
288 * Head: h
289 * Keys: k, key
290 * Values: v, value
291 * Comparisons: c
292 */
293
294 /** No-key sentinel value */
295 private final int noKey;
296 /** Lazily initialized topmost index of the skiplist. */
297 private volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ Index<V> head;
298 /** Element count */
299 private final LongCounter adder;
300
301 /**
302 * Nodes hold keys and values, and are singly linked in sorted
303 * order, possibly with some intervening marker nodes. The list is
304 * headed by a header node accessible as head.node. Headers and
305 * marker nodes have null keys. The val field (but currently not
306 * the key field) is nulled out upon deletion.
307 */
308 static final class Node<V> {
309 final int key; // currently, never detached
310 volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ V val;
311 volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ Node<V> next;
312 Node(int key, V value, Node<V> next) {
313 this.key = key;
314 val = value;
315 this.next = next;
316 }
317 }
318
319 /**
320 * Index nodes represent the levels of the skip list.
321 */
322 static final class Index<V> {
323 final Node<V> node; // currently, never detached
324 final Index<V> down;
325 volatile /*XXX: Volatile only required for ARFU; remove if we can use VarHandle*/ Index<V> right;
326 Index(Node<V> node, Index<V> down, Index<V> right) {
327 this.node = node;
328 this.down = down;
329 this.right = right;
330 }
331 }
332
333 /**
334 * The multimap entry type with primitive {@code int} keys.
335 */
336 public static final class IntEntry<V> implements Comparable<IntEntry<V>> {
337 private final int key;
338 private final V value;
339
340 public IntEntry(int key, V value) {
341 this.key = key;
342 this.value = value;
343 }
344
345 /**
346 * Get the corresponding key.
347 */
348 public int getKey() {
349 return key;
350 }
351
352 /**
353 * Get the corresponding value.
354 */
355 public V getValue() {
356 return value;
357 }
358
359 @Override
360 public boolean equals(Object o) {
361 if (!(o instanceof IntEntry)) {
362 return false;
363 }
364
365 IntEntry<?> intEntry = (IntEntry<?>) o;
366 return key == intEntry.key && (value == intEntry.value || (value != null && value.equals(intEntry.value)));
367 }
368
369 @Override
370 public int hashCode() {
371 int result = key;
372 result = 31 * result + (value == null ? 0 : value.hashCode());
373 return result;
374 }
375
376 @Override
377 public String toString() {
378 return "IntEntry[" + key + " => " + value + ']';
379 }
380
381 @Override
382 public int compareTo(IntEntry<V> o) {
383 return cpr(key, o.key);
384 }
385 }
386
387 /* ---------------- Utilities -------------- */
388
389 /**
390 * Compares using comparator or natural ordering if null.
391 * Called only by methods that have performed required type checks.
392 */
393 static int cpr(int x, int y) {
394 return (x < y) ? -1 : x == y ? 0 : 1;
395 }
396
397 /**
398 * Returns the header for base node list, or null if uninitialized
399 */
400 final Node<V> baseHead() {
401 Index<V> h;
402 acquireFence();
403 return (h = head) == null ? null : h.node;
404 }
405
406 /**
407 * Tries to unlink deleted node n from predecessor b (if both
408 * exist), by first splicing in a marker if not already present.
409 * Upon return, node n is sure to be unlinked from b, possibly
410 * via the actions of some other thread.
411 *
412 * @param b if nonnull, predecessor
413 * @param n if nonnull, node known to be deleted
414 */
415 static <V> void unlinkNode(Node<V> b, Node<V> n, int noKey) {
416 if (b != null && n != null) {
417 Node<V> f, p;
418 for (;;) {
419 if ((f = n.next) != null && f.key == noKey) {
420 p = f.next; // already marked
421 break;
422 } else if (NEXT.compareAndSet(n, f,
423 new Node<V>(noKey, null, f))) {
424 p = f; // add marker
425 break;
426 }
427 }
428 NEXT.compareAndSet(b, n, p);
429 }
430 }
431
432 /**
433 * Adds to element count, initializing adder if necessary
434 *
435 * @param c count to add
436 */
437 private void addCount(long c) {
438 adder.add(c);
439 }
440
441 /**
442 * Returns element count, initializing adder if necessary.
443 */
444 final long getAdderCount() {
445 long c;
446 return (c = adder.value()) <= 0L ? 0L : c; // ignore transient negatives
447 }
448
449 /* ---------------- Traversal -------------- */
450
451 /**
452 * Returns an index node with key strictly less than given key.
453 * Also unlinks indexes to deleted nodes found along the way.
454 * Callers rely on this side-effect of clearing indices to deleted
455 * nodes.
456 *
457 * @param key if nonnull the key
458 * @return a predecessor node of key, or null if uninitialized or null key
459 */
460 private Node<V> findPredecessor(int key) {
461 Index<V> q;
462 acquireFence();
463 if ((q = head) == null || key == noKey) {
464 return null;
465 } else {
466 for (Index<V> r, d;;) {
467 while ((r = q.right) != null) {
468 Node<V> p; int k;
469 if ((p = r.node) == null || (k = p.key) == noKey ||
470 p.val == null) { // unlink index to deleted node
471 RIGHT.compareAndSet(q, r, r.right);
472 } else if (cpr(key, k) > 0) {
473 q = r;
474 } else {
475 break;
476 }
477 }
478 if ((d = q.down) != null) {
479 q = d;
480 } else {
481 return q.node;
482 }
483 }
484 }
485 }
486
487 /**
488 * Returns node holding key or null if no such, clearing out any
489 * deleted nodes seen along the way. Repeatedly traverses at
490 * base-level looking for key starting at predecessor returned
491 * from findPredecessor, processing base-level deletions as
492 * encountered. Restarts occur, at traversal step encountering
493 * node n, if n's key field is null, indicating it is a marker, so
494 * its predecessor is deleted before continuing, which we help do
495 * by re-finding a valid predecessor. The traversal loops in
496 * doPut, doRemove, and findNear all include the same checks.
497 *
498 * @param key the key
499 * @return node holding key, or null if no such
500 */
501 private Node<V> findNode(int key) {
502 if (key == noKey) {
503 throw new IllegalArgumentException(); // don't postpone errors
504 }
505 Node<V> b;
506 outer: while ((b = findPredecessor(key)) != null) {
507 for (;;) {
508 Node<V> n; int k; int c;
509 if ((n = b.next) == null) {
510 break outer; // empty
511 } else if ((k = n.key) == noKey) {
512 break; // b is deleted
513 } else if (n.val == null) {
514 unlinkNode(b, n, noKey); // n is deleted
515 } else if ((c = cpr(key, k)) > 0) {
516 b = n;
517 } else if (c == 0) {
518 return n;
519 } else {
520 break outer;
521 }
522 }
523 }
524 return null;
525 }
526
527 /**
528 * Gets value for key. Same idea as findNode, except skips over
529 * deletions and markers, and returns first encountered value to
530 * avoid possibly inconsistent rereads.
531 *
532 * @param key the key
533 * @return the value, or null if absent
534 */
535 private V doGet(int key) {
536 Index<V> q;
537 acquireFence();
538 if (key == noKey) {
539 throw new IllegalArgumentException();
540 }
541 V result = null;
542 if ((q = head) != null) {
543 outer: for (Index<V> r, d;;) {
544 while ((r = q.right) != null) {
545 Node<V> p; int k; V v; int c;
546 if ((p = r.node) == null || (k = p.key) == noKey ||
547 (v = p.val) == null) {
548 RIGHT.compareAndSet(q, r, r.right);
549 } else if ((c = cpr(key, k)) > 0) {
550 q = r;
551 } else if (c == 0) {
552 result = v;
553 break outer;
554 } else {
555 break;
556 }
557 }
558 if ((d = q.down) != null) {
559 q = d;
560 } else {
561 Node<V> b, n;
562 if ((b = q.node) != null) {
563 while ((n = b.next) != null) {
564 V v; int c;
565 int k = n.key;
566 if ((v = n.val) == null || k == noKey ||
567 (c = cpr(key, k)) > 0) {
568 b = n;
569 } else {
570 if (c == 0) {
571 result = v;
572 }
573 break;
574 }
575 }
576 }
577 break;
578 }
579 }
580 }
581 return result;
582 }
583
584 /* ---------------- Insertion -------------- */
585
586 /**
587 * Main insertion method. Adds element if not present, or
588 * replaces value if present and onlyIfAbsent is false.
589 *
590 * @param key the key
591 * @param value the value that must be associated with key
592 * @param onlyIfAbsent if should not insert if already present
593 */
594 private V doPut(int key, V value, boolean onlyIfAbsent) {
595 if (key == noKey) {
596 throw new IllegalArgumentException();
597 }
598 for (;;) {
599 Index<V> h; Node<V> b;
600 acquireFence();
601 int levels = 0; // number of levels descended
602 if ((h = head) == null) { // try to initialize
603 Node<V> base = new Node<V>(noKey, null, null);
604 h = new Index<V>(base, null, null);
605 b = HEAD.compareAndSet(this, null, h) ? base : null;
606 } else {
607 for (Index<V> q = h, r, d;;) { // count while descending
608 while ((r = q.right) != null) {
609 Node<V> p; int k;
610 if ((p = r.node) == null || (k = p.key) == noKey ||
611 p.val == null) {
612 RIGHT.compareAndSet(q, r, r.right);
613 } else if (cpr(key, k) > 0) {
614 q = r;
615 } else {
616 break;
617 }
618 }
619 if ((d = q.down) != null) {
620 ++levels;
621 q = d;
622 } else {
623 b = q.node;
624 break;
625 }
626 }
627 }
628 if (b != null) {
629 Node<V> z = null; // new node, if inserted
630 for (;;) { // find insertion point
631 Node<V> n, p; int k; V v; int c;
632 if ((n = b.next) == null) {
633 if (b.key == noKey) { // if empty, type check key now TODO: remove?
634 cpr(key, key);
635 }
636 c = -1;
637 } else if ((k = n.key) == noKey) {
638 break; // can't append; restart
639 } else if ((v = n.val) == null) {
640 unlinkNode(b, n, noKey);
641 c = 1;
642 } else if ((c = cpr(key, k)) > 0) {
643 b = n; // Multimap
644 // } else if (c == 0 &&
645 // (onlyIfAbsent || VAL.compareAndSet(n, v, value))) {
646 // return v;
647 }
648
649 if (c <= 0 &&
650 NEXT.compareAndSet(b, n,
651 p = new Node<V>(key, value, n))) {
652 z = p;
653 break;
654 }
655 }
656
657 if (z != null) {
658 int lr = ThreadLocalRandom.current().nextInt();
659 if ((lr & 0x3) == 0) { // add indices with 1/4 prob
660 int hr = ThreadLocalRandom.current().nextInt();
661 long rnd = ((long) hr << 32) | ((long) lr & 0xffffffffL);
662 int skips = levels; // levels to descend before add
663 Index<V> x = null;
664 for (;;) { // create at most 62 indices
665 x = new Index<V>(z, x, null);
666 if (rnd >= 0L || --skips < 0) {
667 break;
668 } else {
669 rnd <<= 1;
670 }
671 }
672 if (addIndices(h, skips, x, noKey) && skips < 0 &&
673 head == h) { // try to add new level
674 Index<V> hx = new Index<V>(z, x, null);
675 Index<V> nh = new Index<V>(h.node, h, hx);
676 HEAD.compareAndSet(this, h, nh);
677 }
678 if (z.val == null) { // deleted while adding indices
679 findPredecessor(key); // clean
680 }
681 }
682 addCount(1L);
683 return null;
684 }
685 }
686 }
687 }
688
689 /**
690 * Add indices after an insertion. Descends iteratively to the
691 * highest level of insertion, then recursively, to chain index
692 * nodes to lower ones. Returns null on (staleness) failure,
693 * disabling higher-level insertions. Recursion depths are
694 * exponentially less probable.
695 *
696 * @param q starting index for current level
697 * @param skips levels to skip before inserting
698 * @param x index for this insertion
699 */
700 static <V> boolean addIndices(Index<V> q, int skips, Index<V> x, int noKey) {
701 Node<V> z; int key;
702 if (x != null && (z = x.node) != null && (key = z.key) != noKey &&
703 q != null) { // hoist checks
704 boolean retrying = false;
705 for (;;) { // find splice point
706 Index<V> r, d; int c;
707 if ((r = q.right) != null) {
708 Node<V> p; int k;
709 if ((p = r.node) == null || (k = p.key) == noKey ||
710 p.val == null) {
711 RIGHT.compareAndSet(q, r, r.right);
712 c = 0;
713 } else if ((c = cpr(key, k)) > 0) {
714 q = r;
715 } else if (c == 0) {
716 break; // stale
717 }
718 } else {
719 c = -1;
720 }
721
722 if (c < 0) {
723 if ((d = q.down) != null && skips > 0) {
724 --skips;
725 q = d;
726 } else if (d != null && !retrying &&
727 !addIndices(d, 0, x.down, noKey)) {
728 break;
729 } else {
730 x.right = r;
731 if (RIGHT.compareAndSet(q, r, x)) {
732 return true;
733 } else {
734 retrying = true; // re-find splice point
735 }
736 }
737 }
738 }
739 }
740 return false;
741 }
742
743 /* ---------------- Deletion -------------- */
744
745 /**
746 * Main deletion method. Locates node, nulls value, appends a
747 * deletion marker, unlinks predecessor, removes associated index
748 * nodes, and possibly reduces head index level.
749 *
750 * @param key the key
751 * @param value if non-null, the value that must be
752 * associated with key
753 * @return the node, or null if not found
754 */
755 final V doRemove(int key, Object value) {
756 if (key == noKey) {
757 throw new IllegalArgumentException();
758 }
759 V result = null;
760 Node<V> b;
761 outer: while ((b = findPredecessor(key)) != null &&
762 result == null) {
763 for (;;) {
764 Node<V> n; int k; V v; int c;
765 if ((n = b.next) == null) {
766 break outer;
767 } else if ((k = n.key) == noKey) {
768 break;
769 } else if ((v = n.val) == null) {
770 unlinkNode(b, n, noKey);
771 } else if ((c = cpr(key, k)) > 0) {
772 b = n;
773 } else if (c < 0) {
774 break outer;
775 } else if (value != null && !value.equals(v)) {
776 // break outer;
777 b = n; // Multimap.
778 } else if (VAL.compareAndSet(n, v, null)) {
779 result = v;
780 unlinkNode(b, n, noKey);
781 break; // loop to clean up
782 }
783 }
784 }
785 if (result != null) {
786 tryReduceLevel();
787 addCount(-1L);
788 }
789 return result;
790 }
791
792 /**
793 * Possibly reduce head level if it has no nodes. This method can
794 * (rarely) make mistakes, in which case levels can disappear even
795 * though they are about to contain index nodes. This impacts
796 * performance, not correctness. To minimize mistakes as well as
797 * to reduce hysteresis, the level is reduced by one only if the
798 * topmost three levels look empty. Also, if the removed level
799 * looks non-empty after CAS, we try to change it back quick
800 * before anyone notices our mistake! (This trick works pretty
801 * well because this method will practically never make mistakes
802 * unless current thread stalls immediately before first CAS, in
803 * which case it is very unlikely to stall again immediately
804 * afterwards, so will recover.)
805 * <p>
806 * We put up with all this rather than just let levels grow
807 * because otherwise, even a small map that has undergone a large
808 * number of insertions and removals will have a lot of levels,
809 * slowing down access more than would an occasional unwanted
810 * reduction.
811 */
812 private void tryReduceLevel() {
813 Index<V> h, d, e;
814 if ((h = head) != null && h.right == null &&
815 (d = h.down) != null && d.right == null &&
816 (e = d.down) != null && e.right == null &&
817 HEAD.compareAndSet(this, h, d) &&
818 h.right != null) { // recheck
819 HEAD.compareAndSet(this, d, h); // try to backout
820 }
821 }
822
823 /* ---------------- Finding and removing first element -------------- */
824
825 /**
826 * Gets first valid node, unlinking deleted nodes if encountered.
827 * @return first node or null if empty
828 */
829 final Node<V> findFirst() {
830 Node<V> b, n;
831 if ((b = baseHead()) != null) {
832 while ((n = b.next) != null) {
833 if (n.val == null) {
834 unlinkNode(b, n, noKey);
835 } else {
836 return n;
837 }
838 }
839 }
840 return null;
841 }
842
843 /**
844 * Entry snapshot version of findFirst
845 */
846 final IntEntry<V> findFirstEntry() {
847 Node<V> b, n; V v;
848 if ((b = baseHead()) != null) {
849 while ((n = b.next) != null) {
850 if ((v = n.val) == null) {
851 unlinkNode(b, n, noKey);
852 } else {
853 return new IntEntry<V>(n.key, v);
854 }
855 }
856 }
857 return null;
858 }
859
860 /**
861 * Removes first entry; returns its snapshot.
862 * @return null if empty, else snapshot of first entry
863 */
864 private IntEntry<V> doRemoveFirstEntry() {
865 Node<V> b, n; V v;
866 if ((b = baseHead()) != null) {
867 while ((n = b.next) != null) {
868 if ((v = n.val) == null || VAL.compareAndSet(n, v, null)) {
869 int k = n.key;
870 unlinkNode(b, n, noKey);
871 if (v != null) {
872 tryReduceLevel();
873 findPredecessor(k); // clean index
874 addCount(-1L);
875 return new IntEntry<V>(k, v);
876 }
877 }
878 }
879 }
880 return null;
881 }
882
883 /* ---------------- Finding and removing last element -------------- */
884
885 /**
886 * Specialized version of find to get last valid node.
887 * @return last node or null if empty
888 */
889 final Node<V> findLast() {
890 outer: for (;;) {
891 Index<V> q; Node<V> b;
892 acquireFence();
893 if ((q = head) == null) {
894 break;
895 }
896 for (Index<V> r, d;;) {
897 while ((r = q.right) != null) {
898 Node<V> p;
899 if ((p = r.node) == null || p.val == null) {
900 RIGHT.compareAndSet(q, r, r.right);
901 } else {
902 q = r;
903 }
904 }
905 if ((d = q.down) != null) {
906 q = d;
907 } else {
908 b = q.node;
909 break;
910 }
911 }
912 if (b != null) {
913 for (;;) {
914 Node<V> n;
915 if ((n = b.next) == null) {
916 if (b.key == noKey) { // empty
917 break outer;
918 } else {
919 return b;
920 }
921 } else if (n.key == noKey) {
922 break;
923 } else if (n.val == null) {
924 unlinkNode(b, n, noKey);
925 } else {
926 b = n;
927 }
928 }
929 }
930 }
931 return null;
932 }
933
934 /**
935 * Entry version of findLast
936 * @return Entry for last node or null if empty
937 */
938 final IntEntry<V> findLastEntry() {
939 for (;;) {
940 Node<V> n; V v;
941 if ((n = findLast()) == null) {
942 return null;
943 }
944 if ((v = n.val) != null) {
945 return new IntEntry<V>(n.key, v);
946 }
947 }
948 }
949
950 /**
951 * Removes last entry; returns its snapshot.
952 * Specialized variant of doRemove.
953 * @return null if empty, else snapshot of last entry
954 */
955 private IntEntry<V> doRemoveLastEntry() {
956 outer: for (;;) {
957 Index<V> q; Node<V> b;
958 acquireFence();
959 if ((q = head) == null) {
960 break;
961 }
962 for (;;) {
963 Index<V> d, r; Node<V> p;
964 while ((r = q.right) != null) {
965 if ((p = r.node) == null || p.val == null) {
966 RIGHT.compareAndSet(q, r, r.right);
967 } else if (p.next != null) {
968 q = r; // continue only if a successor
969 } else {
970 break;
971 }
972 }
973 if ((d = q.down) != null) {
974 q = d;
975 } else {
976 b = q.node;
977 break;
978 }
979 }
980 if (b != null) {
981 for (;;) {
982 Node<V> n; int k; V v;
983 if ((n = b.next) == null) {
984 if (b.key == noKey) { // empty
985 break outer;
986 } else {
987 break; // retry
988 }
989 } else if ((k = n.key) == noKey) {
990 break;
991 } else if ((v = n.val) == null) {
992 unlinkNode(b, n, noKey);
993 } else if (n.next != null) {
994 b = n;
995 } else if (VAL.compareAndSet(n, v, null)) {
996 unlinkNode(b, n, noKey);
997 tryReduceLevel();
998 findPredecessor(k); // clean index
999 addCount(-1L);
1000 return new IntEntry<V>(k, v);
1001 }
1002 }
1003 }
1004 }
1005 return null;
1006 }
1007
1008 /* ---------------- Relational operations -------------- */
1009
1010 // Control values OR'ed as arguments to findNear
1011
1012 private static final int EQ = 1;
1013 private static final int LT = 2;
1014 private static final int GT = 0; // Actually checked as !LT
1015
1016 /**
1017 * Variant of findNear returning IntEntry
1018 * @param key the key
1019 * @param rel the relation -- OR'ed combination of EQ, LT, GT
1020 * @return Entry fitting relation, or null if no such
1021 */
1022 final IntEntry<V> findNearEntry(int key, int rel) {
1023 for (;;) {
1024 Node<V> n; V v;
1025 if ((n = findNear(key, rel)) == null) {
1026 return null;
1027 }
1028 if ((v = n.val) != null) {
1029 return new IntEntry<V>(n.key, v);
1030 }
1031 }
1032 }
1033
1034 /**
1035 * Utility for ceiling, floor, lower, higher methods.
1036 * @param key the key
1037 * @param rel the relation -- OR'ed combination of EQ, LT, GT
1038 * @return nearest node fitting relation, or null if no such
1039 */
1040 final Node<V> findNear(int key, int rel) {
1041 if (key == noKey) {
1042 throw new IllegalArgumentException();
1043 }
1044 Node<V> result;
1045 outer: for (Node<V> b;;) {
1046 if ((b = findPredecessor(key)) == null) {
1047 result = null;
1048 break; // empty
1049 }
1050 for (;;) {
1051 Node<V> n; int k; int c;
1052 if ((n = b.next) == null) {
1053 result = (rel & LT) != 0 && b.key != noKey ? b : null;
1054 break outer;
1055 } else if ((k = n.key) == noKey) {
1056 break;
1057 } else if (n.val == null) {
1058 unlinkNode(b, n, noKey);
1059 } else if (((c = cpr(key, k)) == 0 && (rel & EQ) != 0) ||
1060 (c < 0 && (rel & LT) == 0)) {
1061 result = n;
1062 break outer;
1063 } else if (c <= 0 && (rel & LT) != 0) {
1064 result = b.key != noKey ? b : null;
1065 break outer;
1066 } else {
1067 b = n;
1068 }
1069 }
1070 }
1071 return result;
1072 }
1073
1074 /* ---------------- Constructors -------------- */
1075
1076 /**
1077 * Constructs a new, empty map, sorted according to the
1078 * {@linkplain Comparable natural ordering} of the keys.
1079 * @param noKey The value to use as a sentinel for signaling the absence of a key.
1080 */
1081 public ConcurrentSkipListIntObjMultimap(int noKey) {
1082 this.noKey = noKey;
1083 adder = PlatformDependent.newLongCounter();
1084 }
1085
1086 /* ------ Map API methods ------ */
1087
1088 /**
1089 * Returns {@code true} if this map contains a mapping for the specified
1090 * key.
1091 *
1092 * @param key key whose presence in this map is to be tested
1093 * @return {@code true} if this map contains a mapping for the specified key
1094 * @throws ClassCastException if the specified key cannot be compared
1095 * with the keys currently in the map
1096 * @throws NullPointerException if the specified key is null
1097 */
1098 public boolean containsKey(int key) {
1099 return doGet(key) != null;
1100 }
1101
1102 /**
1103 * Returns the value to which the specified key is mapped,
1104 * or {@code null} if this map contains no mapping for the key.
1105 *
1106 * <p>More formally, if this map contains a mapping from a key
1107 * {@code k} to a value {@code v} such that {@code key} compares
1108 * equal to {@code k} according to the map's ordering, then this
1109 * method returns {@code v}; otherwise it returns {@code null}.
1110 * (There can be at most one such mapping.)
1111 *
1112 * @throws ClassCastException if the specified key cannot be compared
1113 * with the keys currently in the map
1114 * @throws NullPointerException if the specified key is null
1115 */
1116 public V get(int key) {
1117 return doGet(key);
1118 }
1119
1120 /**
1121 * Returns the value to which the specified key is mapped,
1122 * or the given defaultValue if this map contains no mapping for the key.
1123 *
1124 * @param key the key
1125 * @param defaultValue the value to return if this map contains
1126 * no mapping for the given key
1127 * @return the mapping for the key, if present; else the defaultValue
1128 * @throws NullPointerException if the specified key is null
1129 * @since 1.8
1130 */
1131 public V getOrDefault(int key, V defaultValue) {
1132 V v;
1133 return (v = doGet(key)) == null ? defaultValue : v;
1134 }
1135
1136 /**
1137 * Associates the specified value with the specified key in this map.
1138 * If the map previously contained a mapping for the key, the old
1139 * value is replaced.
1140 *
1141 * @param key key with which the specified value is to be associated
1142 * @param value value to be associated with the specified key
1143 * @throws ClassCastException if the specified key cannot be compared
1144 * with the keys currently in the map
1145 * @throws NullPointerException if the specified key or value is null
1146 */
1147 public void put(int key, V value) {
1148 checkNotNull(value, "value");
1149 doPut(key, value, false);
1150 }
1151
1152 /**
1153 * Removes the mapping for the specified key from this map if present.
1154 *
1155 * @param key key for which mapping should be removed
1156 * @return the previous value associated with the specified key, or
1157 * {@code null} if there was no mapping for the key
1158 * @throws ClassCastException if the specified key cannot be compared
1159 * with the keys currently in the map
1160 * @throws NullPointerException if the specified key is null
1161 */
1162 public V remove(int key) {
1163 return doRemove(key, null);
1164 }
1165
1166 /**
1167 * Returns {@code true} if this map maps one or more keys to the
1168 * specified value. This operation requires time linear in the
1169 * map size. Additionally, it is possible for the map to change
1170 * during execution of this method, in which case the returned
1171 * result may be inaccurate.
1172 *
1173 * @param value value whose presence in this map is to be tested
1174 * @return {@code true} if a mapping to {@code value} exists;
1175 * {@code false} otherwise
1176 * @throws NullPointerException if the specified value is null
1177 */
1178 public boolean containsValue(Object value) {
1179 checkNotNull(value, "value");
1180 Node<V> b, n; V v;
1181 if ((b = baseHead()) != null) {
1182 while ((n = b.next) != null) {
1183 if ((v = n.val) != null && value.equals(v)) {
1184 return true;
1185 } else {
1186 b = n;
1187 }
1188 }
1189 }
1190 return false;
1191 }
1192
1193 /**
1194 * Get the approximate size of the collection.
1195 */
1196 public int size() {
1197 long c;
1198 return baseHead() == null ? 0 :
1199 (c = getAdderCount()) >= Integer.MAX_VALUE ?
1200 Integer.MAX_VALUE : (int) c;
1201 }
1202
1203 /**
1204 * Check if the collection is empty.
1205 */
1206 public boolean isEmpty() {
1207 return findFirst() == null;
1208 }
1209
1210 /**
1211 * Removes all of the mappings from this map.
1212 */
1213 public void clear() {
1214 Index<V> h, r, d; Node<V> b;
1215 acquireFence();
1216 while ((h = head) != null) {
1217 if ((r = h.right) != null) { // remove indices
1218 RIGHT.compareAndSet(h, r, null);
1219 } else if ((d = h.down) != null) { // remove levels
1220 HEAD.compareAndSet(this, h, d);
1221 } else {
1222 long count = 0L;
1223 if ((b = h.node) != null) { // remove nodes
1224 Node<V> n; V v;
1225 while ((n = b.next) != null) {
1226 if ((v = n.val) != null &&
1227 VAL.compareAndSet(n, v, null)) {
1228 --count;
1229 v = null;
1230 }
1231 if (v == null) {
1232 unlinkNode(b, n, noKey);
1233 }
1234 }
1235 }
1236 if (count != 0L) {
1237 addCount(count);
1238 } else {
1239 break;
1240 }
1241 }
1242 }
1243 }
1244
1245 /* ------ ConcurrentMap API methods ------ */
1246
1247 /**
1248 * Remove the specific entry with the given key and value, if it exist.
1249 *
1250 * @throws ClassCastException if the specified key cannot be compared
1251 * with the keys currently in the map
1252 * @throws NullPointerException if the specified key is null
1253 */
1254 public boolean remove(int key, Object value) {
1255 if (key == noKey) {
1256 throw new IllegalArgumentException();
1257 }
1258 return value != null && doRemove(key, value) != null;
1259 }
1260
1261 /**
1262 * Replace the specific entry with the given key and value, with the given replacement value,
1263 * if such an entry exist.
1264 *
1265 * @throws ClassCastException if the specified key cannot be compared
1266 * with the keys currently in the map
1267 * @throws NullPointerException if any of the arguments are null
1268 */
1269 public boolean replace(int key, V oldValue, V newValue) {
1270 if (key == noKey) {
1271 throw new IllegalArgumentException();
1272 }
1273 checkNotNull(oldValue, "oldValue");
1274 checkNotNull(newValue, "newValue");
1275 for (;;) {
1276 Node<V> n; V v;
1277 if ((n = findNode(key)) == null) {
1278 return false;
1279 }
1280 if ((v = n.val) != null) {
1281 if (!oldValue.equals(v)) {
1282 return false;
1283 }
1284 if (VAL.compareAndSet(n, v, newValue)) {
1285 return true;
1286 }
1287 }
1288 }
1289 }
1290
1291 /* ------ SortedMap API methods ------ */
1292
1293 public int firstKey() {
1294 Node<V> n = findFirst();
1295 if (n == null) {
1296 return noKey;
1297 }
1298 return n.key;
1299 }
1300
1301 public int lastKey() {
1302 Node<V> n = findLast();
1303 if (n == null) {
1304 return noKey;
1305 }
1306 return n.key;
1307 }
1308
1309 /* ---------------- Relational operations -------------- */
1310
1311 /**
1312 * Returns a key-value mapping associated with the greatest key
1313 * strictly less than the given key, or {@code null} if there is
1314 * no such key. The returned entry does <em>not</em> support the
1315 * {@code Entry.setValue} method.
1316 *
1317 * @throws NullPointerException if the specified key is null
1318 */
1319 public IntEntry<V> lowerEntry(int key) {
1320 return findNearEntry(key, LT);
1321 }
1322
1323 /**
1324 * @throws NullPointerException if the specified key is null
1325 */
1326 public int lowerKey(int key) {
1327 Node<V> n = findNear(key, LT);
1328 return n == null ? noKey : n.key;
1329 }
1330
1331 /**
1332 * Returns a key-value mapping associated with the greatest key
1333 * less than or equal to the given key, or {@code null} if there
1334 * is no such key. The returned entry does <em>not</em> support
1335 * the {@code Entry.setValue} method.
1336 *
1337 * @param key the key
1338 * @throws NullPointerException if the specified key is null
1339 */
1340 public IntEntry<V> floorEntry(int key) {
1341 return findNearEntry(key, LT | EQ);
1342 }
1343
1344 /**
1345 * @param key the key
1346 * @throws NullPointerException if the specified key is null
1347 */
1348 public int floorKey(int key) {
1349 Node<V> n = findNear(key, LT | EQ);
1350 return n == null ? noKey : n.key;
1351 }
1352
1353 /**
1354 * Returns a key-value mapping associated with the least key
1355 * greater than or equal to the given key, or {@code null} if
1356 * there is no such entry. The returned entry does <em>not</em>
1357 * support the {@code Entry.setValue} method.
1358 *
1359 * @throws NullPointerException if the specified key is null
1360 */
1361 public IntEntry<V> ceilingEntry(int key) {
1362 return findNearEntry(key, GT | EQ);
1363 }
1364
1365 /**
1366 * @throws NullPointerException if the specified key is null
1367 */
1368 public int ceilingKey(int key) {
1369 Node<V> n = findNear(key, GT | EQ);
1370 return n == null ? noKey : n.key;
1371 }
1372
1373 /**
1374 * Returns a key-value mapping associated with the least key
1375 * strictly greater than the given key, or {@code null} if there
1376 * is no such key. The returned entry does <em>not</em> support
1377 * the {@code Entry.setValue} method.
1378 *
1379 * @param key the key
1380 * @throws NullPointerException if the specified key is null
1381 */
1382 public IntEntry<V> higherEntry(int key) {
1383 return findNearEntry(key, GT);
1384 }
1385
1386 /**
1387 * @param key the key
1388 * @throws NullPointerException if the specified key is null
1389 */
1390 public int higherKey(int key) {
1391 Node<V> n = findNear(key, GT);
1392 return n == null ? noKey : n.key;
1393 }
1394
1395 /**
1396 * Returns a key-value mapping associated with the least
1397 * key in this map, or {@code null} if the map is empty.
1398 * The returned entry does <em>not</em> support
1399 * the {@code Entry.setValue} method.
1400 */
1401 public IntEntry<V> firstEntry() {
1402 return findFirstEntry();
1403 }
1404
1405 /**
1406 * Returns a key-value mapping associated with the greatest
1407 * key in this map, or {@code null} if the map is empty.
1408 * The returned entry does <em>not</em> support
1409 * the {@code Entry.setValue} method.
1410 */
1411 public IntEntry<V> lastEntry() {
1412 return findLastEntry();
1413 }
1414
1415 /**
1416 * Removes and returns a key-value mapping associated with
1417 * the least key in this map, or {@code null} if the map is empty.
1418 * The returned entry does <em>not</em> support
1419 * the {@code Entry.setValue} method.
1420 */
1421 public IntEntry<V> pollFirstEntry() {
1422 return doRemoveFirstEntry();
1423 }
1424
1425 /**
1426 * Removes and returns a key-value mapping associated with
1427 * the greatest key in this map, or {@code null} if the map is empty.
1428 * The returned entry does <em>not</em> support
1429 * the {@code Entry.setValue} method.
1430 */
1431 public IntEntry<V> pollLastEntry() {
1432 return doRemoveLastEntry();
1433 }
1434
1435 public IntEntry<V> pollCeilingEntry(int key) {
1436 // TODO optimize this
1437 Node<V> node;
1438 V val;
1439 do {
1440 node = findNear(key, GT | EQ);
1441 if (node == null) {
1442 return null;
1443 }
1444 val = node.val;
1445 } while (val == null || !remove(node.key, val));
1446 return new IntEntry<V>(node.key, val);
1447 }
1448
1449 /* ---------------- Iterators -------------- */
1450
1451 /**
1452 * Base of iterator classes
1453 */
1454 abstract class Iter<T> implements Iterator<T> {
1455 /** the last node returned by next() */
1456 Node<V> lastReturned;
1457 /** the next node to return from next(); */
1458 Node<V> next;
1459 /** Cache of next value field to maintain weak consistency */
1460 V nextValue;
1461
1462 /** Initializes ascending iterator for entire range. */
1463 Iter() {
1464 advance(baseHead());
1465 }
1466
1467 @Override
1468 public final boolean hasNext() {
1469 return next != null;
1470 }
1471
1472 /** Advances next to higher entry. */
1473 final void advance(Node<V> b) {
1474 Node<V> n = null;
1475 V v = null;
1476 if ((lastReturned = b) != null) {
1477 while ((n = b.next) != null && (v = n.val) == null) {
1478 b = n;
1479 }
1480 }
1481 nextValue = v;
1482 next = n;
1483 }
1484
1485 @Override
1486 public final void remove() {
1487 Node<V> n; int k;
1488 if ((n = lastReturned) == null || (k = n.key) == noKey) {
1489 throw new IllegalStateException();
1490 }
1491 // It would not be worth all of the overhead to directly
1492 // unlink from here. Using remove is fast enough.
1493 ConcurrentSkipListIntObjMultimap.this.remove(k, n.val); // TODO: inline and optimize this
1494 lastReturned = null;
1495 }
1496 }
1497
1498 final class EntryIterator extends Iter<IntEntry<V>> {
1499 @Override
1500 public IntEntry<V> next() {
1501 Node<V> n;
1502 if ((n = next) == null) {
1503 throw new NoSuchElementException();
1504 }
1505 int k = n.key;
1506 V v = nextValue;
1507 advance(n);
1508 return new IntEntry<V>(k, v);
1509 }
1510 }
1511
1512 @Override
1513 public Iterator<IntEntry<V>> iterator() {
1514 return new EntryIterator();
1515 }
1516
1517 // VarHandle mechanics
1518 private static final AtomicReferenceFieldUpdater<ConcurrentSkipListIntObjMultimap<?>, Index<?>> HEAD;
1519 private static final AtomicReferenceFieldUpdater<Node<?>, Node<?>> NEXT;
1520 private static final AtomicReferenceFieldUpdater<Node<?>, Object> VAL;
1521 private static final AtomicReferenceFieldUpdater<Index<?>, Index<?>> RIGHT;
1522 private static volatile int acquireFenceVariable;
1523 static {
1524 Class<ConcurrentSkipListIntObjMultimap<?>> mapCls = cls(ConcurrentSkipListIntObjMultimap.class);
1525 Class<Index<?>> indexCls = cls(Index.class);
1526 Class<Node<?>> nodeCls = cls(Node.class);
1527
1528 HEAD = AtomicReferenceFieldUpdater.newUpdater(mapCls, indexCls, "head");
1529 NEXT = AtomicReferenceFieldUpdater.newUpdater(nodeCls, nodeCls, "next");
1530 VAL = AtomicReferenceFieldUpdater.newUpdater(nodeCls, Object.class, "val");
1531 RIGHT = AtomicReferenceFieldUpdater.newUpdater(indexCls, indexCls, "right");
1532 }
1533
1534 @SuppressWarnings("unchecked")
1535 private static <T> Class<T> cls(Class<?> cls) {
1536 return (Class<T>) cls;
1537 }
1538
1539 /**
1540 * Orders LOADS before the fence, with LOADS and STORES after the fence.
1541 */
1542 private static void acquireFence() {
1543 // Volatile store prevent prior loads from ordering down.
1544 acquireFenceVariable = 1;
1545 // Volatile load prevent following loads and stores from ordering up.
1546 int ignore = acquireFenceVariable;
1547 // Note: Putting the volatile store before the volatile load ensures
1548 // surrounding loads and stores don't order "into" the fence.
1549 }
1550 }