View Javadoc
1   /*
2    * Copyright 2022 The Netty Project
3    *
4    * The Netty Project licenses this file to you under the Apache License,
5    * version 2.0 (the "License"); you may not use this file except in compliance
6    * with the License. You may obtain a copy of the License at:
7    *
8    *   https://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13   * License for the specific language governing permissions and limitations
14   * under the License.
15   */
16  package io.netty.buffer;
17  
18  import io.netty.util.ByteProcessor;
19  import io.netty.util.CharsetUtil;
20  import io.netty.util.IllegalReferenceCountException;
21  import io.netty.util.NettyRuntime;
22  import io.netty.util.Recycler;
23  import io.netty.util.Recycler.EnhancedHandle;
24  import io.netty.util.ReferenceCounted;
25  import io.netty.util.concurrent.FastThreadLocal;
26  import io.netty.util.concurrent.FastThreadLocalThread;
27  import io.netty.util.concurrent.MpscIntQueue;
28  import io.netty.util.internal.AtomicReferenceCountUpdater;
29  import io.netty.util.internal.ObjectPool;
30  import io.netty.util.internal.ObjectUtil;
31  import io.netty.util.internal.PlatformDependent;
32  import io.netty.util.internal.ReferenceCountUpdater;
33  import io.netty.util.internal.SystemPropertyUtil;
34  import io.netty.util.internal.ThreadExecutorMap;
35  import io.netty.util.internal.UnsafeReferenceCountUpdater;
36  import io.netty.util.internal.UnstableApi;
37  import io.netty.util.internal.VarHandleReferenceCountUpdater;
38  
39  import java.io.IOException;
40  import java.io.InputStream;
41  import java.io.OutputStream;
42  import java.lang.invoke.MethodHandles;
43  import java.lang.invoke.VarHandle;
44  import java.nio.ByteBuffer;
45  import java.nio.ByteOrder;
46  import java.nio.channels.ClosedChannelException;
47  import java.nio.channels.FileChannel;
48  import java.nio.channels.GatheringByteChannel;
49  import java.nio.channels.ScatteringByteChannel;
50  import java.nio.charset.Charset;
51  import java.util.Arrays;
52  import java.util.Queue;
53  import java.util.concurrent.ConcurrentLinkedQueue;
54  import java.util.concurrent.ThreadLocalRandom;
55  import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
56  import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
57  import java.util.concurrent.atomic.LongAdder;
58  import java.util.concurrent.locks.StampedLock;
59  import java.util.function.IntSupplier;
60  
61  import static io.netty.util.internal.ReferenceCountUpdater.getUnsafeOffset;
62  import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater;
63  
64  /**
65   * An auto-tuning pooling allocator, that follows an anti-generational hypothesis.
66   * <p>
67   * The allocator is organized into a list of Magazines, and each magazine has a chunk-buffer that they allocate buffers
68   * from.
69   * <p>
70   * The magazines hold the mutexes that ensure the thread-safety of the allocator, and each thread picks a magazine
71   * based on the id of the thread. This spreads the contention of multi-threaded access across the magazines.
72   * If contention is detected above a certain threshold, the number of magazines are increased in response to the
73   * contention.
74   * <p>
75   * The magazines maintain histograms of the sizes of the allocations they do. The histograms are used to compute the
76   * preferred chunk size. The preferred chunk size is one that is big enough to service 10 allocations of the
77   * 99-percentile size. This way, the chunk size is adapted to the allocation patterns.
78   * <p>
79   * Computing the preferred chunk size is a somewhat expensive operation. Therefore, the frequency with which this is
80   * done, is also adapted to the allocation pattern. If a newly computed preferred chunk is the same as the previous
81   * preferred chunk size, then the frequency is reduced. Otherwise, the frequency is increased.
82   * <p>
83   * This allows the allocator to quickly respond to changes in the application workload,
84   * without suffering undue overhead from maintaining its statistics.
85   * <p>
86   * Since magazines are "relatively thread-local", the allocator has a central queue that allow excess chunks from any
87   * magazine, to be shared with other magazines.
88   * The {@link #createSharedChunkQueue()} method can be overridden to customize this queue.
89   */
90  @UnstableApi
91  final class AdaptivePoolingAllocator {
92      /**
93       * The 128 KiB minimum chunk size is chosen to encourage the system allocator to delegate to mmap for chunk
94       * allocations. For instance, glibc will do this.
95       * This pushes any fragmentation from chunk size deviations off physical memory, onto virtual memory,
96       * which is a much, much larger space. Chunks are also allocated in whole multiples of the minimum
97       * chunk size, which itself is a whole multiple of popular page sizes like 4 KiB, 16 KiB, and 64 KiB.
98       */
99      static final int MIN_CHUNK_SIZE = 128 * 1024;
100     private static final int EXPANSION_ATTEMPTS = 3;
101     private static final int INITIAL_MAGAZINES = 1;
102     private static final int RETIRE_CAPACITY = 256;
103     private static final int MAX_STRIPES = NettyRuntime.availableProcessors() * 2;
104     private static final int BUFS_PER_CHUNK = 8; // For large buffers, aim to have about this many buffers per chunk.
105 
106     /**
107      * The maximum size of a pooled chunk, in bytes. Allocations bigger than this will never be pooled.
108      * <p>
109      * This number is 8 MiB, and is derived from the limitations of internal histograms.
110      */
111     private static final int MAX_CHUNK_SIZE = 8 * 1024 * 1024; // 8 MiB.
112     private static final int MAX_POOLED_BUF_SIZE = MAX_CHUNK_SIZE / BUFS_PER_CHUNK;
113 
114     /**
115      * The capacity if the chunk reuse queues, that allow chunks to be shared across magazines in a group.
116      * The default size is twice {@link NettyRuntime#availableProcessors()},
117      * same as the maximum number of magazines per magazine group.
118      */
119     private static final int CHUNK_REUSE_QUEUE = Math.max(2, SystemPropertyUtil.getInt(
120             "io.netty.allocator.chunkReuseQueueCapacity", NettyRuntime.availableProcessors() * 2));
121 
122     /**
123      * The capacity if the magazine local buffer queue. This queue just pools the outer ByteBuf instance and not
124      * the actual memory and so helps to reduce GC pressure.
125      */
126     private static final int MAGAZINE_BUFFER_QUEUE_CAPACITY = SystemPropertyUtil.getInt(
127             "io.netty.allocator.magazineBufferQueueCapacity", 1024);
128 
129     /**
130      * The size classes are chosen based on the following observation:
131      * <p>
132      * Most allocations, particularly ones above 256 bytes, aim to be a power-of-2. However, many use cases, such
133      * as framing protocols, are themselves operating or moving power-of-2 sized payloads, to which they add a
134      * small amount of overhead, such as headers or checksums.
135      * This means we seem to get a lot of mileage out of having both power-of-2 sizes, and power-of-2-plus-a-bit.
136      * <p>
137      * On the conflicting requirements of both having as few chunks as possible, and having as little wasted
138      * memory within each chunk as possible, this seems to strike a surprisingly good balance for the use cases
139      * tested so far.
140      */
141     private static final int[] SIZE_CLASSES = {
142             32,
143             64,
144             128,
145             256,
146             512,
147             640, // 512 + 128
148             1024,
149             1152, // 1024 + 128
150             2048,
151             2304, // 2048 + 256
152             4096,
153             4352, // 4096 + 256
154             8192,
155             8704, // 8192 + 512
156             16384,
157             16896, // 16384 + 512
158     };
159 
160     private static final int SIZE_CLASSES_COUNT = SIZE_CLASSES.length;
161     private static final byte[] SIZE_INDEXES = new byte[(SIZE_CLASSES[SIZE_CLASSES_COUNT - 1] / 32) + 1];
162 
163     static {
164         if (MAGAZINE_BUFFER_QUEUE_CAPACITY < 2) {
165             throw new IllegalArgumentException("MAGAZINE_BUFFER_QUEUE_CAPACITY: " + MAGAZINE_BUFFER_QUEUE_CAPACITY
166                     + " (expected: >= " + 2 + ')');
167         }
168         int lastIndex = 0;
169         for (int i = 0; i < SIZE_CLASSES_COUNT; i++) {
170             int sizeClass = SIZE_CLASSES[i];
171             //noinspection ConstantValue
172             assert (sizeClass & 5) == 0 : "Size class must be a multiple of 32";
173             int sizeIndex = sizeIndexOf(sizeClass);
174             Arrays.fill(SIZE_INDEXES, lastIndex + 1, sizeIndex + 1, (byte) i);
175             lastIndex = sizeIndex;
176         }
177     }
178 
179     private final ChunkAllocator chunkAllocator;
180     private final ChunkRegistry chunkRegistry;
181     private final MagazineGroup[] sizeClassedMagazineGroups;
182     private final MagazineGroup largeBufferMagazineGroup;
183     private final FastThreadLocal<MagazineGroup[]> threadLocalGroup;
184 
185     AdaptivePoolingAllocator(ChunkAllocator chunkAllocator, boolean useCacheForNonEventLoopThreads) {
186         this.chunkAllocator = ObjectUtil.checkNotNull(chunkAllocator, "chunkAllocator");
187         chunkRegistry = new ChunkRegistry();
188         sizeClassedMagazineGroups = createMagazineGroupSizeClasses(this, false);
189         largeBufferMagazineGroup = new MagazineGroup(
190                 this, chunkAllocator, new HistogramChunkControllerFactory(true), false);
191 
192         threadLocalGroup = new FastThreadLocal<MagazineGroup[]>() {
193             @Override
194             protected MagazineGroup[] initialValue() {
195                 if (useCacheForNonEventLoopThreads || ThreadExecutorMap.currentExecutor() != null) {
196                     return createMagazineGroupSizeClasses(AdaptivePoolingAllocator.this, true);
197                 }
198                 return null;
199             }
200 
201             @Override
202             protected void onRemoval(final MagazineGroup[] groups) throws Exception {
203                 if (groups != null) {
204                     for (MagazineGroup group : groups) {
205                         group.free();
206                     }
207                 }
208             }
209         };
210     }
211 
212     private static MagazineGroup[] createMagazineGroupSizeClasses(
213             AdaptivePoolingAllocator allocator, boolean isThreadLocal) {
214         MagazineGroup[] groups = new MagazineGroup[SIZE_CLASSES.length];
215         for (int i = 0; i < SIZE_CLASSES.length; i++) {
216             int segmentSize = SIZE_CLASSES[i];
217             groups[i] = new MagazineGroup(allocator, allocator.chunkAllocator,
218                     new SizeClassChunkControllerFactory(segmentSize), isThreadLocal);
219         }
220         return groups;
221     }
222 
223     /**
224      * Create a thread-safe multi-producer, multi-consumer queue to hold chunks that spill over from the
225      * internal Magazines.
226      * <p>
227      * Each Magazine can only hold two chunks at any one time: the chunk it currently allocates from,
228      * and the next-in-line chunk which will be used for allocation once the current one has been used up.
229      * This queue will be used by magazines to share any excess chunks they allocate, so that they don't need to
230      * allocate new chunks when their current and next-in-line chunks have both been used up.
231      * <p>
232      * The simplest implementation of this method is to return a new {@link ConcurrentLinkedQueue}.
233      * However, the {@code CLQ} is unbounded, and this means there's no limit to how many chunks can be cached in this
234      * queue.
235      * <p>
236      * Each chunk in this queue can be up to {@link #MAX_CHUNK_SIZE} in size, so it is recommended to use a bounded
237      * queue to limit the maximum memory usage.
238      * <p>
239      * The default implementation will create a bounded queue with a capacity of {@link #CHUNK_REUSE_QUEUE}.
240      *
241      * @return A new multi-producer, multi-consumer queue.
242      */
243     private static Queue<Chunk> createSharedChunkQueue() {
244         return PlatformDependent.newFixedMpmcQueue(CHUNK_REUSE_QUEUE);
245     }
246 
247     ByteBuf allocate(int size, int maxCapacity) {
248         return allocate(size, maxCapacity, Thread.currentThread(), null);
249     }
250 
251     private AdaptiveByteBuf allocate(int size, int maxCapacity, Thread currentThread, AdaptiveByteBuf buf) {
252         AdaptiveByteBuf allocated = null;
253         if (size <= MAX_POOLED_BUF_SIZE) {
254             final int index = sizeClassIndexOf(size);
255             MagazineGroup[] magazineGroups;
256             if (!FastThreadLocalThread.currentThreadWillCleanupFastThreadLocals() ||
257                     (magazineGroups = threadLocalGroup.get()) == null) {
258                 magazineGroups =  sizeClassedMagazineGroups;
259             }
260             if (index < magazineGroups.length) {
261                 allocated = magazineGroups[index].allocate(size, maxCapacity, currentThread, buf);
262             } else {
263                 allocated = largeBufferMagazineGroup.allocate(size, maxCapacity, currentThread, buf);
264             }
265         }
266         if (allocated == null) {
267             allocated = allocateFallback(size, maxCapacity, currentThread, buf);
268         }
269         return allocated;
270     }
271 
272     private static int sizeIndexOf(final int size) {
273         // this is aligning the size to the next multiple of 32 and dividing by 32 to get the size index.
274         return size + 31 >> 5;
275     }
276 
277     static int sizeClassIndexOf(int size) {
278         int sizeIndex = sizeIndexOf(size);
279         if (sizeIndex < SIZE_INDEXES.length) {
280             return SIZE_INDEXES[sizeIndex];
281         }
282         return SIZE_CLASSES_COUNT;
283     }
284 
285     static int[] getSizeClasses() {
286         return SIZE_CLASSES.clone();
287     }
288 
289     private AdaptiveByteBuf allocateFallback(int size, int maxCapacity, Thread currentThread,
290                                              AdaptiveByteBuf buf) {
291         // If we don't already have a buffer, obtain one from the most conveniently available magazine.
292         Magazine magazine;
293         if (buf != null) {
294             Chunk chunk = buf.chunk;
295             if (chunk == null || chunk == Magazine.MAGAZINE_FREED || (magazine = chunk.currentMagazine()) == null) {
296                 magazine = getFallbackMagazine(currentThread);
297             }
298         } else {
299             magazine = getFallbackMagazine(currentThread);
300             buf = magazine.newBuffer();
301         }
302         // Create a one-off chunk for this allocation.
303         AbstractByteBuf innerChunk = chunkAllocator.allocate(size, maxCapacity);
304         Chunk chunk = new Chunk(innerChunk, magazine, false, chunkSize -> true);
305         chunkRegistry.add(chunk);
306         try {
307             chunk.readInitInto(buf, size, size, maxCapacity);
308         } finally {
309             // As the chunk is an one-off we need to always call release explicitly as readInitInto(...)
310             // will take care of retain once when successful. Once The AdaptiveByteBuf is released it will
311             // completely release the Chunk and so the contained innerChunk.
312             chunk.release();
313         }
314         return buf;
315     }
316 
317     private Magazine getFallbackMagazine(Thread currentThread) {
318         Magazine[] mags = largeBufferMagazineGroup.magazines;
319         return mags[(int) currentThread.getId() & mags.length - 1];
320     }
321 
322     /**
323      * Allocate into the given buffer. Used by {@link AdaptiveByteBuf#capacity(int)}.
324      */
325     void reallocate(int size, int maxCapacity, AdaptiveByteBuf into) {
326         AdaptiveByteBuf result = allocate(size, maxCapacity, Thread.currentThread(), into);
327         assert result == into: "Re-allocation created separate buffer instance";
328     }
329 
330     long usedMemory() {
331         return chunkRegistry.totalCapacity();
332     }
333 
334     // Ensure that we release all previous pooled resources when this object is finalized. This is needed as otherwise
335     // we might end up with leaks. While these leaks are usually harmless in reality it would still at least be
336     // very confusing for users.
337     @SuppressWarnings({"FinalizeDeclaration", "deprecation"})
338     @Override
339     protected void finalize() throws Throwable {
340         try {
341             super.finalize();
342         } finally {
343             free();
344         }
345     }
346 
347     private void free() {
348         largeBufferMagazineGroup.free();
349     }
350 
351     static int sizeToBucket(int size) {
352         return HistogramChunkController.sizeToBucket(size);
353     }
354 
355     private static final class MagazineGroup {
356         private final AdaptivePoolingAllocator allocator;
357         private final ChunkAllocator chunkAllocator;
358         private final ChunkControllerFactory chunkControllerFactory;
359         private final Queue<Chunk> chunkReuseQueue;
360         private final StampedLock magazineExpandLock;
361         private final Magazine threadLocalMagazine;
362         private volatile Magazine[] magazines;
363         private volatile boolean freed;
364 
365         MagazineGroup(AdaptivePoolingAllocator allocator,
366                       ChunkAllocator chunkAllocator,
367                       ChunkControllerFactory chunkControllerFactory,
368                       boolean isThreadLocal) {
369             this.allocator = allocator;
370             this.chunkAllocator = chunkAllocator;
371             this.chunkControllerFactory = chunkControllerFactory;
372             chunkReuseQueue = createSharedChunkQueue();
373             if (isThreadLocal) {
374                 magazineExpandLock = null;
375                 threadLocalMagazine = new Magazine(this, false, chunkReuseQueue, chunkControllerFactory.create(this));
376             } else {
377                 magazineExpandLock = new StampedLock();
378                 threadLocalMagazine = null;
379                 Magazine[] mags = new Magazine[INITIAL_MAGAZINES];
380                 for (int i = 0; i < mags.length; i++) {
381                     mags[i] = new Magazine(this, true, chunkReuseQueue, chunkControllerFactory.create(this));
382                 }
383                 magazines = mags;
384             }
385         }
386 
387         public AdaptiveByteBuf allocate(int size, int maxCapacity, Thread currentThread, AdaptiveByteBuf buf) {
388             boolean reallocate = buf != null;
389 
390             // Path for thread-local allocation.
391             Magazine tlMag = threadLocalMagazine;
392             if (tlMag != null) {
393                 if (buf == null) {
394                     buf = tlMag.newBuffer();
395                 }
396                 boolean allocated = tlMag.tryAllocate(size, maxCapacity, buf, reallocate);
397                 assert allocated : "Allocation of threadLocalMagazine must always succeed";
398                 return buf;
399             }
400 
401             // Path for concurrent allocation.
402             long threadId = currentThread.getId();
403             Magazine[] mags;
404             int expansions = 0;
405             do {
406                 mags = magazines;
407                 int mask = mags.length - 1;
408                 int index = (int) (threadId & mask);
409                 for (int i = 0, m = mags.length << 1; i < m; i++) {
410                     Magazine mag = mags[index + i & mask];
411                     if (buf == null) {
412                         buf = mag.newBuffer();
413                     }
414                     if (mag.tryAllocate(size, maxCapacity, buf, reallocate)) {
415                         // Was able to allocate.
416                         return buf;
417                     }
418                 }
419                 expansions++;
420             } while (expansions <= EXPANSION_ATTEMPTS && tryExpandMagazines(mags.length));
421 
422             // The magazines failed us; contention too high and we don't want to spend more effort expanding the array.
423             if (!reallocate && buf != null) {
424                 buf.release(); // Release the previously claimed buffer before we return.
425             }
426             return null;
427         }
428 
429         private boolean tryExpandMagazines(int currentLength) {
430             if (currentLength >= MAX_STRIPES) {
431                 return true;
432             }
433             final Magazine[] mags;
434             long writeLock = magazineExpandLock.tryWriteLock();
435             if (writeLock != 0) {
436                 try {
437                     mags = magazines;
438                     if (mags.length >= MAX_STRIPES || mags.length > currentLength || freed) {
439                         return true;
440                     }
441                     Magazine firstMagazine = mags[0];
442                     Magazine[] expanded = new Magazine[mags.length * 2];
443                     for (int i = 0, l = expanded.length; i < l; i++) {
444                         Magazine m = new Magazine(this, true, chunkReuseQueue, chunkControllerFactory.create(this));
445                         firstMagazine.initializeSharedStateIn(m);
446                         expanded[i] = m;
447                     }
448                     magazines = expanded;
449                 } finally {
450                     magazineExpandLock.unlockWrite(writeLock);
451                 }
452                 for (Magazine magazine : mags) {
453                     magazine.free();
454                 }
455             }
456             return true;
457         }
458 
459         boolean offerToQueue(Chunk buffer) {
460             if (freed) {
461                 return false;
462             }
463 
464             boolean isAdded = chunkReuseQueue.offer(buffer);
465             if (freed && isAdded) {
466                 // Help to free the reuse queue.
467                 freeChunkReuseQueue();
468             }
469             return isAdded;
470         }
471 
472         private void free() {
473             freed = true;
474             if (threadLocalMagazine != null) {
475                 threadLocalMagazine.free();
476             } else {
477                 long stamp = magazineExpandLock.writeLock();
478                 try {
479                     Magazine[] mags = magazines;
480                     for (Magazine magazine : mags) {
481                         magazine.free();
482                     }
483                 } finally {
484                     magazineExpandLock.unlockWrite(stamp);
485                 }
486             }
487             freeChunkReuseQueue();
488         }
489 
490         private void freeChunkReuseQueue() {
491             for (;;) {
492                 Chunk chunk = chunkReuseQueue.poll();
493                 if (chunk == null) {
494                     break;
495                 }
496                 chunk.release();
497             }
498         }
499     }
500 
501     private interface ChunkControllerFactory {
502         ChunkController create(MagazineGroup group);
503     }
504 
505     private interface ChunkController {
506         /**
507          * Compute the "fast max capacity" value for the buffer.
508          */
509         int computeBufferCapacity(int requestedSize, int maxCapacity, boolean isReallocation);
510 
511         /**
512          * Initialize the given chunk factory with shared statistics state (if any) from this factory.
513          */
514         void initializeSharedStateIn(ChunkController chunkController);
515 
516         /**
517          * Allocate a new {@link Chunk} for the given {@link Magazine}.
518          */
519         Chunk newChunkAllocation(int promptingSize, Magazine magazine);
520     }
521 
522     private interface ChunkReleasePredicate {
523         boolean shouldReleaseChunk(int chunkSize);
524     }
525 
526     private static final class SizeClassChunkControllerFactory implements ChunkControllerFactory {
527         // To amortize activation/deactivation of chunks, we should have a minimum number of segments per chunk.
528         // We choose 32 because it seems neither too small nor too big.
529         // For segments of 16 KiB, the chunks will be half a megabyte.
530         private static final int MIN_SEGMENTS_PER_CHUNK = 32;
531         private final int segmentSize;
532         private final int chunkSize;
533         private final int[] segmentOffsets;
534 
535         private SizeClassChunkControllerFactory(int segmentSize) {
536             this.segmentSize = ObjectUtil.checkPositive(segmentSize, "segmentSize");
537             chunkSize = Math.max(MIN_CHUNK_SIZE, segmentSize * MIN_SEGMENTS_PER_CHUNK);
538             int segmentsCount = chunkSize / segmentSize;
539             segmentOffsets = new int[segmentsCount];
540             for (int i = 0; i < segmentsCount; i++) {
541                 segmentOffsets[i] = i * segmentSize;
542             }
543         }
544 
545         @Override
546         public ChunkController create(MagazineGroup group) {
547             return new SizeClassChunkController(group, segmentSize, chunkSize, segmentOffsets);
548         }
549     }
550 
551     private static final class SizeClassChunkController implements ChunkController {
552 
553         private final ChunkAllocator chunkAllocator;
554         private final int segmentSize;
555         private final int chunkSize;
556         private final ChunkRegistry chunkRegistry;
557         private final int[] segmentOffsets;
558 
559         private SizeClassChunkController(MagazineGroup group, int segmentSize, int chunkSize, int[] segmentOffsets) {
560             chunkAllocator = group.chunkAllocator;
561             this.segmentSize = segmentSize;
562             this.chunkSize = chunkSize;
563             chunkRegistry = group.allocator.chunkRegistry;
564             this.segmentOffsets = segmentOffsets;
565         }
566 
567         @Override
568         public int computeBufferCapacity(
569                 int requestedSize, int maxCapacity, boolean isReallocation) {
570             return Math.min(segmentSize, maxCapacity);
571         }
572 
573         @Override
574         public void initializeSharedStateIn(ChunkController chunkController) {
575             // NOOP
576         }
577 
578         @Override
579         public Chunk newChunkAllocation(int promptingSize, Magazine magazine) {
580             AbstractByteBuf chunkBuffer = chunkAllocator.allocate(chunkSize, chunkSize);
581             assert chunkBuffer.capacity() == chunkSize;
582             SizeClassedChunk chunk = new SizeClassedChunk(chunkBuffer, magazine, true,
583                     segmentSize, segmentOffsets, size -> false);
584             chunkRegistry.add(chunk);
585             return chunk;
586         }
587     }
588 
589     private static final class HistogramChunkControllerFactory implements ChunkControllerFactory {
590         private final boolean shareable;
591 
592         private HistogramChunkControllerFactory(boolean shareable) {
593             this.shareable = shareable;
594         }
595 
596         @Override
597         public ChunkController create(MagazineGroup group) {
598             return new HistogramChunkController(group, shareable);
599         }
600     }
601 
602     private static final class HistogramChunkController implements ChunkController, ChunkReleasePredicate {
603         private static final int MIN_DATUM_TARGET = 1024;
604         private static final int MAX_DATUM_TARGET = 65534;
605         private static final int INIT_DATUM_TARGET = 9;
606         private static final int HISTO_BUCKET_COUNT = 16;
607         private static final int[] HISTO_BUCKETS = {
608                 16 * 1024,
609                 24 * 1024,
610                 32 * 1024,
611                 48 * 1024,
612                 64 * 1024,
613                 96 * 1024,
614                 128 * 1024,
615                 192 * 1024,
616                 256 * 1024,
617                 384 * 1024,
618                 512 * 1024,
619                 768 * 1024,
620                 1024 * 1024,
621                 1792 * 1024,
622                 2048 * 1024,
623                 3072 * 1024
624         };
625 
626         private final MagazineGroup group;
627         private final boolean shareable;
628         private final short[][] histos = {
629                 new short[HISTO_BUCKET_COUNT], new short[HISTO_BUCKET_COUNT],
630                 new short[HISTO_BUCKET_COUNT], new short[HISTO_BUCKET_COUNT],
631         };
632         private final ChunkRegistry chunkRegistry;
633         private short[] histo = histos[0];
634         private final int[] sums = new int[HISTO_BUCKET_COUNT];
635 
636         private int histoIndex;
637         private int datumCount;
638         private int datumTarget = INIT_DATUM_TARGET;
639         private boolean hasHadRotation;
640         private volatile int sharedPrefChunkSize = MIN_CHUNK_SIZE;
641         private volatile int localPrefChunkSize = MIN_CHUNK_SIZE;
642         private volatile int localUpperBufSize;
643 
644         private HistogramChunkController(MagazineGroup group, boolean shareable) {
645             this.group = group;
646             this.shareable = shareable;
647             chunkRegistry = group.allocator.chunkRegistry;
648         }
649 
650         @Override
651         public int computeBufferCapacity(
652                 int requestedSize, int maxCapacity, boolean isReallocation) {
653             if (!isReallocation) {
654                 // Only record allocation size if it's not caused by a reallocation that was triggered by capacity
655                 // change of the buffer.
656                 recordAllocationSize(requestedSize);
657             }
658 
659             // Predict starting capacity from localUpperBufSize, but place limits on the max starting capacity
660             // based on the requested size, because localUpperBufSize can potentially be quite large.
661             int startCapLimits;
662             if (requestedSize <= 32768) { // Less than or equal to 32 KiB.
663                 startCapLimits = 65536; // Use at most 64 KiB, which is also the AdaptiveRecvByteBufAllocator max.
664             } else {
665                 startCapLimits = requestedSize * 2; // Otherwise use at most twice the requested memory.
666             }
667             int startingCapacity = Math.min(startCapLimits, localUpperBufSize);
668             startingCapacity = Math.max(requestedSize, Math.min(maxCapacity, startingCapacity));
669             return startingCapacity;
670         }
671 
672         private void recordAllocationSize(int bufferSizeToRecord) {
673             // Use the preserved size from the reused AdaptiveByteBuf, if available.
674             // Otherwise, use the requested buffer size.
675             // This way, we better take into account
676             if (bufferSizeToRecord == 0) {
677                 return;
678             }
679             int bucket = sizeToBucket(bufferSizeToRecord);
680             histo[bucket]++;
681             if (datumCount++ == datumTarget) {
682                 rotateHistograms();
683             }
684         }
685 
686         static int sizeToBucket(int size) {
687             int index = binarySearchInsertionPoint(Arrays.binarySearch(HISTO_BUCKETS, size));
688             return index >= HISTO_BUCKETS.length ? HISTO_BUCKETS.length - 1 : index;
689         }
690 
691         private static int binarySearchInsertionPoint(int index) {
692             if (index < 0) {
693                 index = -(index + 1);
694             }
695             return index;
696         }
697 
698         static int bucketToSize(int sizeBucket) {
699             return HISTO_BUCKETS[sizeBucket];
700         }
701 
702         private void rotateHistograms() {
703             short[][] hs = histos;
704             for (int i = 0; i < HISTO_BUCKET_COUNT; i++) {
705                 sums[i] = (hs[0][i] & 0xFFFF) + (hs[1][i] & 0xFFFF) + (hs[2][i] & 0xFFFF) + (hs[3][i] & 0xFFFF);
706             }
707             int sum = 0;
708             for (int count : sums) {
709                 sum  += count;
710             }
711             int targetPercentile = (int) (sum * 0.99);
712             int sizeBucket = 0;
713             for (; sizeBucket < sums.length; sizeBucket++) {
714                 if (sums[sizeBucket] > targetPercentile) {
715                     break;
716                 }
717                 targetPercentile -= sums[sizeBucket];
718             }
719             hasHadRotation = true;
720             int percentileSize = bucketToSize(sizeBucket);
721             int prefChunkSize = Math.max(percentileSize * BUFS_PER_CHUNK, MIN_CHUNK_SIZE);
722             localUpperBufSize = percentileSize;
723             localPrefChunkSize = prefChunkSize;
724             if (shareable) {
725                 for (Magazine mag : group.magazines) {
726                     HistogramChunkController statistics = (HistogramChunkController) mag.chunkController;
727                     prefChunkSize = Math.max(prefChunkSize, statistics.localPrefChunkSize);
728                 }
729             }
730             if (sharedPrefChunkSize != prefChunkSize) {
731                 // Preferred chunk size changed. Increase check frequency.
732                 datumTarget = Math.max(datumTarget >> 1, MIN_DATUM_TARGET);
733                 sharedPrefChunkSize = prefChunkSize;
734             } else {
735                 // Preferred chunk size did not change. Check less often.
736                 datumTarget = Math.min(datumTarget << 1, MAX_DATUM_TARGET);
737             }
738 
739             histoIndex = histoIndex + 1 & 3;
740             histo = histos[histoIndex];
741             datumCount = 0;
742             Arrays.fill(histo, (short) 0);
743         }
744 
745         /**
746          * Get the preferred chunk size, based on statistics from the {@linkplain #recordAllocationSize(int) recorded}
747          * allocation sizes.
748          * <p>
749          * This method must be thread-safe.
750          *
751          * @return The currently preferred chunk allocation size.
752          */
753         int preferredChunkSize() {
754             return sharedPrefChunkSize;
755         }
756 
757         @Override
758         public void initializeSharedStateIn(ChunkController chunkController) {
759             HistogramChunkController statistics = (HistogramChunkController) chunkController;
760             int sharedPrefChunkSize = this.sharedPrefChunkSize;
761             statistics.localPrefChunkSize = sharedPrefChunkSize;
762             statistics.sharedPrefChunkSize = sharedPrefChunkSize;
763         }
764 
765         @Override
766         public Chunk newChunkAllocation(int promptingSize, Magazine magazine) {
767             int size = Math.max(promptingSize * BUFS_PER_CHUNK, preferredChunkSize());
768             int minChunks = size / MIN_CHUNK_SIZE;
769             if (MIN_CHUNK_SIZE * minChunks < size) {
770                 // Round up to nearest whole MIN_CHUNK_SIZE unit. The MIN_CHUNK_SIZE is an even multiple of many
771                 // popular small page sizes, like 4k, 16k, and 64k, which makes it easier for the system allocator
772                 // to manage the memory in terms of whole pages. This reduces memory fragmentation,
773                 // but without the potentially high overhead that power-of-2 chunk sizes would bring.
774                 size = MIN_CHUNK_SIZE * (1 + minChunks);
775             }
776 
777             // Limit chunks to the max size, even if the histogram suggests to go above it.
778             size = Math.min(size, MAX_CHUNK_SIZE);
779 
780             // If we haven't rotated the histogram yet, optimisticly record this chunk size as our preferred.
781             if (!hasHadRotation && sharedPrefChunkSize == MIN_CHUNK_SIZE) {
782                 sharedPrefChunkSize = size;
783             }
784 
785             ChunkAllocator chunkAllocator = group.chunkAllocator;
786             Chunk chunk = new Chunk(chunkAllocator.allocate(size, size), magazine, true, this);
787             chunkRegistry.add(chunk);
788             return chunk;
789         }
790 
791         @Override
792         public boolean shouldReleaseChunk(int chunkSize) {
793             int preferredSize = preferredChunkSize();
794             int givenChunks = chunkSize / MIN_CHUNK_SIZE;
795             int preferredChunks = preferredSize / MIN_CHUNK_SIZE;
796             int deviation = Math.abs(givenChunks - preferredChunks);
797 
798             // Retire chunks with a 5% probability per unit of MIN_CHUNK_SIZE deviation from preference.
799             return deviation != 0 &&
800                     ThreadLocalRandom.current().nextDouble() * 20.0 < deviation;
801         }
802     }
803 
804     private static final class Magazine {
805         private static final AtomicReferenceFieldUpdater<Magazine, Chunk> NEXT_IN_LINE;
806         static {
807             NEXT_IN_LINE = AtomicReferenceFieldUpdater.newUpdater(Magazine.class, Chunk.class, "nextInLine");
808         }
809         private static final Chunk MAGAZINE_FREED = new Chunk();
810 
811         private static final Recycler<AdaptiveByteBuf> EVENT_LOOP_LOCAL_BUFFER_POOL = new Recycler<AdaptiveByteBuf>() {
812             @Override
813             protected AdaptiveByteBuf newObject(Handle<AdaptiveByteBuf> handle) {
814                 return new AdaptiveByteBuf(handle);
815             }
816         };
817 
818         private Chunk current;
819         @SuppressWarnings("unused") // updated via NEXT_IN_LINE
820         private volatile Chunk nextInLine;
821         private final MagazineGroup group;
822         private final ChunkController chunkController;
823         private final StampedLock allocationLock;
824         private final Queue<AdaptiveByteBuf> bufferQueue;
825         private final ObjectPool.Handle<AdaptiveByteBuf> handle;
826         private final Queue<Chunk> sharedChunkQueue;
827 
828         Magazine(MagazineGroup group, boolean shareable, Queue<Chunk> sharedChunkQueue,
829                  ChunkController chunkController) {
830             this.group = group;
831             this.chunkController = chunkController;
832 
833             if (shareable) {
834                 // We only need the StampedLock if this Magazine will be shared across threads.
835                 allocationLock = new StampedLock();
836                 bufferQueue = PlatformDependent.newFixedMpmcQueue(MAGAZINE_BUFFER_QUEUE_CAPACITY);
837                 handle = new ObjectPool.Handle<AdaptiveByteBuf>() {
838                     @Override
839                     public void recycle(AdaptiveByteBuf self) {
840                         bufferQueue.offer(self);
841                     }
842                 };
843             } else {
844                 allocationLock = null;
845                 bufferQueue = null;
846                 handle = null;
847             }
848             this.sharedChunkQueue = sharedChunkQueue;
849         }
850 
851         public boolean tryAllocate(int size, int maxCapacity, AdaptiveByteBuf buf, boolean reallocate) {
852             if (allocationLock == null) {
853                 // This magazine is not shared across threads, just allocate directly.
854                 return allocate(size, maxCapacity, buf, reallocate);
855             }
856 
857             // Try to retrieve the lock and if successful allocate.
858             long writeLock = allocationLock.tryWriteLock();
859             if (writeLock != 0) {
860                 try {
861                     return allocate(size, maxCapacity, buf, reallocate);
862                 } finally {
863                     allocationLock.unlockWrite(writeLock);
864                 }
865             }
866             return allocateWithoutLock(size, maxCapacity, buf);
867         }
868 
869         private boolean allocateWithoutLock(int size, int maxCapacity, AdaptiveByteBuf buf) {
870             Chunk curr = NEXT_IN_LINE.getAndSet(this, null);
871             if (curr == MAGAZINE_FREED) {
872                 // Allocation raced with a stripe-resize that freed this magazine.
873                 restoreMagazineFreed();
874                 return false;
875             }
876             if (curr == null) {
877                 curr = sharedChunkQueue.poll();
878                 if (curr == null) {
879                     return false;
880                 }
881                 curr.attachToMagazine(this);
882             }
883             boolean allocated = false;
884             int remainingCapacity = curr.remainingCapacity();
885             int startingCapacity = chunkController.computeBufferCapacity(
886                     size, maxCapacity, true /* never update stats as we don't hold the magazine lock */);
887             if (remainingCapacity >= size) {
888                 curr.readInitInto(buf, size, Math.min(remainingCapacity, startingCapacity), maxCapacity);
889                 allocated = true;
890             }
891             try {
892                 if (remainingCapacity >= RETIRE_CAPACITY) {
893                     transferToNextInLineOrRelease(curr);
894                     curr = null;
895                 }
896             } finally {
897                 if (curr != null) {
898                     curr.releaseFromMagazine();
899                 }
900             }
901             return allocated;
902         }
903 
904         private boolean allocate(int size, int maxCapacity, AdaptiveByteBuf buf, boolean reallocate) {
905             int startingCapacity = chunkController.computeBufferCapacity(size, maxCapacity, reallocate);
906             Chunk curr = current;
907             if (curr != null) {
908                 // We have a Chunk that has some space left.
909                 int remainingCapacity = curr.remainingCapacity();
910                 if (remainingCapacity > startingCapacity) {
911                     curr.readInitInto(buf, size, startingCapacity, maxCapacity);
912                     // We still have some bytes left that we can use for the next allocation, just early return.
913                     return true;
914                 }
915 
916                 // At this point we know that this will be the last time current will be used, so directly set it to
917                 // null and release it once we are done.
918                 current = null;
919                 if (remainingCapacity >= size) {
920                     try {
921                         curr.readInitInto(buf, size, remainingCapacity, maxCapacity);
922                         return true;
923                     } finally {
924                         curr.releaseFromMagazine();
925                     }
926                 }
927 
928                 // Check if we either retain the chunk in the nextInLine cache or releasing it.
929                 if (remainingCapacity < RETIRE_CAPACITY) {
930                     curr.releaseFromMagazine();
931                 } else {
932                     // See if it makes sense to transfer the Chunk to the nextInLine cache for later usage.
933                     // This method will release curr if this is not the case
934                     transferToNextInLineOrRelease(curr);
935                 }
936             }
937 
938             assert current == null;
939             // The fast-path for allocations did not work.
940             //
941             // Try to fetch the next "Magazine local" Chunk first, if this fails because we don't have a
942             // next-in-line chunk available, we will poll our centralQueue.
943             // If this fails as well we will just allocate a new Chunk.
944             //
945             // In any case we will store the Chunk as the current so it will be used again for the next allocation and
946             // thus be "reserved" by this Magazine for exclusive usage.
947             curr = NEXT_IN_LINE.getAndSet(this, null);
948             if (curr != null) {
949                 if (curr == MAGAZINE_FREED) {
950                     // Allocation raced with a stripe-resize that freed this magazine.
951                     restoreMagazineFreed();
952                     return false;
953                 }
954 
955                 int remainingCapacity = curr.remainingCapacity();
956                 if (remainingCapacity > startingCapacity) {
957                     // We have a Chunk that has some space left.
958                     curr.readInitInto(buf, size, startingCapacity, maxCapacity);
959                     current = curr;
960                     return true;
961                 }
962 
963                 if (remainingCapacity >= size) {
964                     // At this point we know that this will be the last time curr will be used, so directly set it to
965                     // null and release it once we are done.
966                     try {
967                         curr.readInitInto(buf, size, remainingCapacity, maxCapacity);
968                         return true;
969                     } finally {
970                         // Release in a finally block so even if readInitInto(...) would throw we would still correctly
971                         // release the current chunk before null it out.
972                         curr.releaseFromMagazine();
973                     }
974                 } else {
975                     // Release it as it's too small.
976                     curr.releaseFromMagazine();
977                 }
978             }
979 
980             // Now try to poll from the central queue first
981             curr = sharedChunkQueue.poll();
982             if (curr == null) {
983                 curr = chunkController.newChunkAllocation(size, this);
984             } else {
985                 curr.attachToMagazine(this);
986 
987                 int remainingCapacity = curr.remainingCapacity();
988                 if (remainingCapacity == 0 || remainingCapacity < size) {
989                     // Check if we either retain the chunk in the nextInLine cache or releasing it.
990                     if (remainingCapacity < RETIRE_CAPACITY) {
991                         curr.releaseFromMagazine();
992                     } else {
993                         // See if it makes sense to transfer the Chunk to the nextInLine cache for later usage.
994                         // This method will release curr if this is not the case
995                         transferToNextInLineOrRelease(curr);
996                     }
997                     curr = chunkController.newChunkAllocation(size, this);
998                 }
999             }
1000 
1001             current = curr;
1002             try {
1003                 int remainingCapacity = curr.remainingCapacity();
1004                 assert remainingCapacity >= size;
1005                 if (remainingCapacity > startingCapacity) {
1006                     curr.readInitInto(buf, size, startingCapacity, maxCapacity);
1007                     curr = null;
1008                 } else {
1009                     curr.readInitInto(buf, size, remainingCapacity, maxCapacity);
1010                 }
1011             } finally {
1012                 if (curr != null) {
1013                     // Release in a finally block so even if readInitInto(...) would throw we would still correctly
1014                     // release the current chunk before null it out.
1015                     curr.releaseFromMagazine();
1016                     current = null;
1017                 }
1018             }
1019             return true;
1020         }
1021 
1022         private void restoreMagazineFreed() {
1023             Chunk next = NEXT_IN_LINE.getAndSet(this, MAGAZINE_FREED);
1024             if (next != null && next != MAGAZINE_FREED) {
1025                 // A chunk snuck in through a race. Release it after restoring MAGAZINE_FREED state.
1026                 next.releaseFromMagazine();
1027             }
1028         }
1029 
1030         private void transferToNextInLineOrRelease(Chunk chunk) {
1031             if (NEXT_IN_LINE.compareAndSet(this, null, chunk)) {
1032                 return;
1033             }
1034 
1035             Chunk nextChunk = NEXT_IN_LINE.get(this);
1036             if (nextChunk != null && nextChunk != MAGAZINE_FREED
1037                     && chunk.remainingCapacity() > nextChunk.remainingCapacity()) {
1038                 if (NEXT_IN_LINE.compareAndSet(this, nextChunk, chunk)) {
1039                     nextChunk.releaseFromMagazine();
1040                     return;
1041                 }
1042             }
1043             // Next-in-line is occupied. We don't try to add it to the central queue yet as it might still be used
1044             // by some buffers and so is attached to a Magazine.
1045             // Once a Chunk is completely released by Chunk.release() it will try to move itself to the queue
1046             // as last resort.
1047             chunk.releaseFromMagazine();
1048         }
1049 
1050         boolean trySetNextInLine(Chunk chunk) {
1051             return NEXT_IN_LINE.compareAndSet(this, null, chunk);
1052         }
1053 
1054         void free() {
1055             // Release the current Chunk and the next that was stored for later usage.
1056             restoreMagazineFreed();
1057             long stamp = allocationLock != null ? allocationLock.writeLock() : 0;
1058             try {
1059                 if (current != null) {
1060                     current.releaseFromMagazine();
1061                     current = null;
1062                 }
1063             } finally {
1064                 if (allocationLock != null) {
1065                     allocationLock.unlockWrite(stamp);
1066                 }
1067             }
1068         }
1069 
1070         public AdaptiveByteBuf newBuffer() {
1071             AdaptiveByteBuf buf;
1072             if (handle == null) {
1073                 buf = EVENT_LOOP_LOCAL_BUFFER_POOL.get();
1074             } else {
1075                 buf = bufferQueue.poll();
1076                 if (buf == null) {
1077                     buf = new AdaptiveByteBuf(handle);
1078                 }
1079             }
1080             buf.resetRefCnt();
1081             buf.discardMarks();
1082             return buf;
1083         }
1084 
1085         boolean offerToQueue(Chunk chunk) {
1086             return group.offerToQueue(chunk);
1087         }
1088 
1089         public void initializeSharedStateIn(Magazine other) {
1090             chunkController.initializeSharedStateIn(other.chunkController);
1091         }
1092     }
1093 
1094     private static final class ChunkRegistry {
1095         private final LongAdder totalCapacity = new LongAdder();
1096 
1097         public long totalCapacity() {
1098             return totalCapacity.sum();
1099         }
1100 
1101         public void add(Chunk chunk) {
1102             totalCapacity.add(chunk.capacity());
1103         }
1104 
1105         public void remove(Chunk chunk) {
1106             totalCapacity.add(-chunk.capacity());
1107         }
1108     }
1109 
1110     private static class Chunk implements ReferenceCounted, ChunkInfo {
1111         private static final long REFCNT_FIELD_OFFSET;
1112         private static final AtomicIntegerFieldUpdater<Chunk> AIF_UPDATER;
1113         private static final Object REFCNT_FIELD_VH;
1114         private static final ReferenceCountUpdater<Chunk> updater;
1115 
1116         static {
1117             ReferenceCountUpdater.UpdaterType updaterType = ReferenceCountUpdater.updaterTypeOf(Chunk.class, "refCnt");
1118             switch (updaterType) {
1119                 case Atomic:
1120                     AIF_UPDATER = newUpdater(Chunk.class, "refCnt");
1121                     REFCNT_FIELD_OFFSET = -1;
1122                     REFCNT_FIELD_VH = null;
1123                     updater = new AtomicReferenceCountUpdater<Chunk>() {
1124                         @Override
1125                         protected AtomicIntegerFieldUpdater<Chunk> updater() {
1126                             return AIF_UPDATER;
1127                         }
1128                     };
1129                     break;
1130                 case Unsafe:
1131                     AIF_UPDATER = null;
1132                     REFCNT_FIELD_OFFSET = getUnsafeOffset(Chunk.class, "refCnt");
1133                     REFCNT_FIELD_VH = null;
1134                     updater = new UnsafeReferenceCountUpdater<Chunk>() {
1135                         @Override
1136                         protected long refCntFieldOffset() {
1137                             return REFCNT_FIELD_OFFSET;
1138                         }
1139                     };
1140                     break;
1141                 case VarHandle:
1142                     AIF_UPDATER = null;
1143                     REFCNT_FIELD_OFFSET = -1;
1144                     REFCNT_FIELD_VH = PlatformDependent.findVarHandleOfIntField(MethodHandles.lookup(),
1145                             Chunk.class, "refCnt");
1146                     updater = new VarHandleReferenceCountUpdater<Chunk>() {
1147                         @Override
1148                         protected VarHandle varHandle() {
1149                             return (VarHandle) REFCNT_FIELD_VH;
1150                         }
1151                     };
1152                     break;
1153                 default:
1154                     throw new Error("Unexpected updater type for Chunk: " + updaterType);
1155             }
1156         }
1157 
1158         protected final AbstractByteBuf delegate;
1159         protected Magazine magazine;
1160         private final AdaptivePoolingAllocator allocator;
1161         private final ChunkReleasePredicate chunkReleasePredicate;
1162         private final int capacity;
1163         private final boolean pooled;
1164         protected int allocatedBytes;
1165 
1166         // Value might not equal "real" reference count, all access should be via the updater
1167         @SuppressWarnings({"unused", "FieldMayBeFinal"})
1168         private volatile int refCnt;
1169 
1170         Chunk() {
1171             // Constructor only used by the MAGAZINE_FREED sentinel.
1172             delegate = null;
1173             magazine = null;
1174             allocator = null;
1175             chunkReleasePredicate = null;
1176             capacity = 0;
1177             pooled = false;
1178         }
1179 
1180         Chunk(AbstractByteBuf delegate, Magazine magazine, boolean pooled,
1181               ChunkReleasePredicate chunkReleasePredicate) {
1182             this.delegate = delegate;
1183             this.pooled = pooled;
1184             capacity = delegate.capacity();
1185             updater.setInitialValue(this);
1186             attachToMagazine(magazine);
1187 
1188             // We need the top-level allocator so ByteBuf.capacity(int) can call reallocate()
1189             allocator = magazine.group.allocator;
1190 
1191             this.chunkReleasePredicate = chunkReleasePredicate;
1192 
1193             if (PlatformDependent.isJfrEnabled() && AllocateChunkEvent.isEventEnabled()) {
1194                 AllocateChunkEvent event = new AllocateChunkEvent();
1195                 if (event.shouldCommit()) {
1196                     event.fill(this, AdaptiveByteBufAllocator.class);
1197                     event.pooled = pooled;
1198                     event.threadLocal = magazine.allocationLock == null;
1199                     event.commit();
1200                 }
1201             }
1202         }
1203 
1204         Magazine currentMagazine()  {
1205             return magazine;
1206         }
1207 
1208         void detachFromMagazine() {
1209             if (magazine != null) {
1210                 magazine = null;
1211             }
1212         }
1213 
1214         void attachToMagazine(Magazine magazine) {
1215             assert this.magazine == null;
1216             this.magazine = magazine;
1217         }
1218 
1219         @Override
1220         public Chunk touch(Object hint) {
1221             return this;
1222         }
1223 
1224         @Override
1225         public int refCnt() {
1226             return updater.refCnt(this);
1227         }
1228 
1229         @Override
1230         public Chunk retain() {
1231             return updater.retain(this);
1232         }
1233 
1234         @Override
1235         public Chunk retain(int increment) {
1236             return updater.retain(this, increment);
1237         }
1238 
1239         @Override
1240         public Chunk touch() {
1241             return this;
1242         }
1243 
1244         @Override
1245         public boolean release() {
1246             if (updater.release(this)) {
1247                 deallocate();
1248                 return true;
1249             }
1250             return false;
1251         }
1252 
1253         @Override
1254         public boolean release(int decrement) {
1255             if (updater.release(this, decrement)) {
1256                 deallocate();
1257                 return true;
1258             }
1259             return false;
1260         }
1261 
1262         /**
1263          * Called when a magazine is done using this chunk, probably because it was emptied.
1264          */
1265         boolean releaseFromMagazine() {
1266             return release();
1267         }
1268 
1269         /**
1270          * Called when a ByteBuf is done using its allocation in this chunk.
1271          */
1272         boolean releaseSegment(int ignoredSegmentId) {
1273             return release();
1274         }
1275 
1276         private void deallocate() {
1277             Magazine mag = magazine;
1278             int chunkSize = delegate.capacity();
1279             if (!pooled || chunkReleasePredicate.shouldReleaseChunk(chunkSize) || mag == null) {
1280                 // Drop the chunk if the parent allocator is closed,
1281                 // or if the chunk deviates too much from the preferred chunk size.
1282                 detachFromMagazine();
1283                 onRelease();
1284                 allocator.chunkRegistry.remove(this);
1285                 delegate.release();
1286             } else {
1287                 updater.resetRefCnt(this);
1288                 delegate.setIndex(0, 0);
1289                 allocatedBytes = 0;
1290                 if (!mag.trySetNextInLine(this)) {
1291                     // As this Chunk does not belong to the mag anymore we need to decrease the used memory .
1292                     detachFromMagazine();
1293                     if (!mag.offerToQueue(this)) {
1294                         // The central queue is full. Ensure we release again as we previously did use resetRefCnt()
1295                         // which did increase the reference count by 1.
1296                         boolean released = updater.release(this);
1297                         onRelease();
1298                         allocator.chunkRegistry.remove(this);
1299                         delegate.release();
1300                         assert released;
1301                     } else {
1302                         onReturn(false);
1303                     }
1304                 } else {
1305                     onReturn(true);
1306                 }
1307             }
1308         }
1309 
1310         private void onReturn(boolean returnedToMagazine) {
1311             if (PlatformDependent.isJfrEnabled() && ReturnChunkEvent.isEventEnabled()) {
1312                 ReturnChunkEvent event = new ReturnChunkEvent();
1313                 if (event.shouldCommit()) {
1314                     event.fill(this, AdaptiveByteBufAllocator.class);
1315                     event.returnedToMagazine = returnedToMagazine;
1316                     event.commit();
1317                 }
1318             }
1319         }
1320 
1321         private void onRelease() {
1322             if (PlatformDependent.isJfrEnabled() && FreeChunkEvent.isEventEnabled()) {
1323                 FreeChunkEvent event = new FreeChunkEvent();
1324                 if (event.shouldCommit()) {
1325                     event.fill(this, AdaptiveByteBufAllocator.class);
1326                     event.pooled = pooled;
1327                     event.commit();
1328                 }
1329             }
1330         }
1331 
1332         public void readInitInto(AdaptiveByteBuf buf, int size, int startingCapacity, int maxCapacity) {
1333             int startIndex = allocatedBytes;
1334             allocatedBytes = startIndex + startingCapacity;
1335             Chunk chunk = this;
1336             chunk.retain();
1337             try {
1338                 buf.init(delegate, chunk, 0, 0, startIndex, size, startingCapacity, maxCapacity);
1339                 chunk = null;
1340             } finally {
1341                 if (chunk != null) {
1342                     // If chunk is not null we know that buf.init(...) failed and so we need to manually release
1343                     // the chunk again as we retained it before calling buf.init(...). Beside this we also need to
1344                     // restore the old allocatedBytes value.
1345                     allocatedBytes = startIndex;
1346                     chunk.release();
1347                 }
1348             }
1349         }
1350 
1351         public int remainingCapacity() {
1352             return capacity - allocatedBytes;
1353         }
1354 
1355         @Override
1356         public int capacity() {
1357             return capacity;
1358         }
1359 
1360         @Override
1361         public boolean isDirect() {
1362             return delegate.isDirect();
1363         }
1364 
1365         @Override
1366         public long memoryAddress() {
1367             return delegate._memoryAddress();
1368         }
1369     }
1370 
1371     private static final class SizeClassedChunk extends Chunk {
1372         private static final int FREE_LIST_EMPTY = -1;
1373         private final int segmentSize;
1374         private final MpscIntQueue freeList;
1375 
1376         SizeClassedChunk(AbstractByteBuf delegate, Magazine magazine, boolean pooled, int segmentSize,
1377                          int[] segmentOffsets, ChunkReleasePredicate shouldReleaseChunk) {
1378             super(delegate, magazine, pooled, shouldReleaseChunk);
1379             this.segmentSize = segmentSize;
1380             int segmentCount = segmentOffsets.length;
1381             assert delegate.capacity() / segmentSize == segmentCount;
1382             assert segmentCount > 0: "Chunk must have a positive number of segments";
1383             freeList = MpscIntQueue.create(segmentCount, FREE_LIST_EMPTY);
1384             freeList.fill(segmentCount, new IntSupplier() {
1385                 int counter;
1386                 @Override
1387                 public int getAsInt() {
1388                     return segmentOffsets[counter++];
1389                 }
1390             });
1391         }
1392 
1393         @Override
1394         public void readInitInto(AdaptiveByteBuf buf, int size, int startingCapacity, int maxCapacity) {
1395             int startIndex = freeList.poll();
1396             if (startIndex == FREE_LIST_EMPTY) {
1397                 throw new IllegalStateException("Free list is empty");
1398             }
1399             allocatedBytes += segmentSize;
1400             Chunk chunk = this;
1401             chunk.retain();
1402             try {
1403                 buf.init(delegate, chunk, 0, 0, startIndex, size, startingCapacity, maxCapacity);
1404                 chunk = null;
1405             } finally {
1406                 if (chunk != null) {
1407                     // If chunk is not null we know that buf.init(...) failed and so we need to manually release
1408                     // the chunk again as we retained it before calling buf.init(...). Beside this we also need to
1409                     // restore the old allocatedBytes value.
1410                     allocatedBytes -= segmentSize;
1411                     chunk.releaseSegment(startIndex);
1412                 }
1413             }
1414         }
1415 
1416         @Override
1417         public int remainingCapacity() {
1418             int remainingCapacity = super.remainingCapacity();
1419             if (remainingCapacity > segmentSize) {
1420                 return remainingCapacity;
1421             }
1422             int updatedRemainingCapacity = freeList.size() * segmentSize;
1423             if (updatedRemainingCapacity == remainingCapacity) {
1424                 return remainingCapacity;
1425             }
1426             // update allocatedBytes based on what's available in the free list
1427             allocatedBytes = capacity() - updatedRemainingCapacity;
1428             return updatedRemainingCapacity;
1429         }
1430 
1431         @Override
1432         boolean releaseFromMagazine() {
1433             // Size-classed chunks can be reused before they become empty.
1434             // We can therefor put them in the shared queue as soon as the magazine is done with this chunk.
1435             Magazine mag = magazine;
1436             detachFromMagazine();
1437             if (!mag.offerToQueue(this)) {
1438                 return super.releaseFromMagazine();
1439             }
1440             return false;
1441         }
1442 
1443         @Override
1444         boolean releaseSegment(int startIndex) {
1445             boolean released = release();
1446             boolean segmentReturned = freeList.offer(startIndex);
1447             assert segmentReturned: "Unable to return segment " + startIndex + " to free list";
1448             return released;
1449         }
1450     }
1451 
1452     static final class AdaptiveByteBuf extends AbstractReferenceCountedByteBuf {
1453 
1454         private final ObjectPool.Handle<AdaptiveByteBuf> handle;
1455 
1456         // this both act as adjustment and the start index for a free list segment allocation
1457         private int startIndex;
1458         private AbstractByteBuf rootParent;
1459         Chunk chunk;
1460         private int length;
1461         private int maxFastCapacity;
1462         private ByteBuffer tmpNioBuf;
1463         private boolean hasArray;
1464         private boolean hasMemoryAddress;
1465 
1466         AdaptiveByteBuf(ObjectPool.Handle<AdaptiveByteBuf> recyclerHandle) {
1467             super(0);
1468             handle = ObjectUtil.checkNotNull(recyclerHandle, "recyclerHandle");
1469         }
1470 
1471         void init(AbstractByteBuf unwrapped, Chunk wrapped, int readerIndex, int writerIndex,
1472                   int startIndex, int size, int capacity, int maxCapacity) {
1473             this.startIndex = startIndex;
1474             chunk = wrapped;
1475             length = size;
1476             maxFastCapacity = capacity;
1477             maxCapacity(maxCapacity);
1478             setIndex0(readerIndex, writerIndex);
1479             hasArray = unwrapped.hasArray();
1480             hasMemoryAddress = unwrapped.hasMemoryAddress();
1481             rootParent = unwrapped;
1482             tmpNioBuf = null;
1483 
1484             if (PlatformDependent.isJfrEnabled() && AllocateBufferEvent.isEventEnabled()) {
1485                 AllocateBufferEvent event = new AllocateBufferEvent();
1486                 if (event.shouldCommit()) {
1487                     event.fill(this, AdaptiveByteBufAllocator.class);
1488                     event.chunkPooled = wrapped.pooled;
1489                     Magazine m = wrapped.magazine;
1490                     event.chunkThreadLocal = m != null && m.allocationLock == null;
1491                     event.commit();
1492                 }
1493             }
1494         }
1495 
1496         private AbstractByteBuf rootParent() {
1497             final AbstractByteBuf rootParent = this.rootParent;
1498             if (rootParent != null) {
1499                 return rootParent;
1500             }
1501             throw new IllegalReferenceCountException();
1502         }
1503 
1504         @Override
1505         public int capacity() {
1506             return length;
1507         }
1508 
1509         @Override
1510         public int maxFastWritableBytes() {
1511             return Math.min(maxFastCapacity, maxCapacity()) - writerIndex;
1512         }
1513 
1514         @Override
1515         public ByteBuf capacity(int newCapacity) {
1516             if (length <= newCapacity && newCapacity <= maxFastCapacity) {
1517                 ensureAccessible();
1518                 length = newCapacity;
1519                 return this;
1520             }
1521             checkNewCapacity(newCapacity);
1522             if (newCapacity < capacity()) {
1523                 length = newCapacity;
1524                 trimIndicesToCapacity(newCapacity);
1525                 return this;
1526             }
1527 
1528             if (PlatformDependent.isJfrEnabled() && ReallocateBufferEvent.isEventEnabled()) {
1529                 ReallocateBufferEvent event = new ReallocateBufferEvent();
1530                 if (event.shouldCommit()) {
1531                     event.fill(this, AdaptiveByteBufAllocator.class);
1532                     event.newCapacity = newCapacity;
1533                     event.commit();
1534                 }
1535             }
1536 
1537             // Reallocation required.
1538             Chunk chunk = this.chunk;
1539             AdaptivePoolingAllocator allocator = chunk.allocator;
1540             int readerIndex = this.readerIndex;
1541             int writerIndex = this.writerIndex;
1542             int baseOldRootIndex = startIndex;
1543             int oldCapacity = length;
1544             AbstractByteBuf oldRoot = rootParent();
1545             allocator.reallocate(newCapacity, maxCapacity(), this);
1546             oldRoot.getBytes(baseOldRootIndex, this, 0, oldCapacity);
1547             chunk.releaseSegment(baseOldRootIndex);
1548             this.readerIndex = readerIndex;
1549             this.writerIndex = writerIndex;
1550             return this;
1551         }
1552 
1553         @Override
1554         public ByteBufAllocator alloc() {
1555             return rootParent().alloc();
1556         }
1557 
1558         @Override
1559         public ByteOrder order() {
1560             return rootParent().order();
1561         }
1562 
1563         @Override
1564         public ByteBuf unwrap() {
1565             return null;
1566         }
1567 
1568         @Override
1569         public boolean isDirect() {
1570             return rootParent().isDirect();
1571         }
1572 
1573         @Override
1574         public int arrayOffset() {
1575             return idx(rootParent().arrayOffset());
1576         }
1577 
1578         @Override
1579         public boolean hasMemoryAddress() {
1580             return hasMemoryAddress;
1581         }
1582 
1583         @Override
1584         public long memoryAddress() {
1585             ensureAccessible();
1586             return _memoryAddress();
1587         }
1588 
1589         @Override
1590         long _memoryAddress() {
1591             AbstractByteBuf root = rootParent;
1592             return root != null ? root._memoryAddress() + startIndex : 0L;
1593         }
1594 
1595         @Override
1596         public ByteBuffer nioBuffer(int index, int length) {
1597             checkIndex(index, length);
1598             return rootParent().nioBuffer(idx(index), length);
1599         }
1600 
1601         @Override
1602         public ByteBuffer internalNioBuffer(int index, int length) {
1603             checkIndex(index, length);
1604             return (ByteBuffer) internalNioBuffer().position(index).limit(index + length);
1605         }
1606 
1607         private ByteBuffer internalNioBuffer() {
1608             if (tmpNioBuf == null) {
1609                 tmpNioBuf = rootParent().nioBuffer(startIndex, maxFastCapacity);
1610             }
1611             return (ByteBuffer) tmpNioBuf.clear();
1612         }
1613 
1614         @Override
1615         public ByteBuffer[] nioBuffers(int index, int length) {
1616             checkIndex(index, length);
1617             return rootParent().nioBuffers(idx(index), length);
1618         }
1619 
1620         @Override
1621         public boolean hasArray() {
1622             return hasArray;
1623         }
1624 
1625         @Override
1626         public byte[] array() {
1627             ensureAccessible();
1628             return rootParent().array();
1629         }
1630 
1631         @Override
1632         public ByteBuf copy(int index, int length) {
1633             checkIndex(index, length);
1634             return rootParent().copy(idx(index), length);
1635         }
1636 
1637         @Override
1638         public int nioBufferCount() {
1639             return rootParent().nioBufferCount();
1640         }
1641 
1642         @Override
1643         protected byte _getByte(int index) {
1644             return rootParent()._getByte(idx(index));
1645         }
1646 
1647         @Override
1648         protected short _getShort(int index) {
1649             return rootParent()._getShort(idx(index));
1650         }
1651 
1652         @Override
1653         protected short _getShortLE(int index) {
1654             return rootParent()._getShortLE(idx(index));
1655         }
1656 
1657         @Override
1658         protected int _getUnsignedMedium(int index) {
1659             return rootParent()._getUnsignedMedium(idx(index));
1660         }
1661 
1662         @Override
1663         protected int _getUnsignedMediumLE(int index) {
1664             return rootParent()._getUnsignedMediumLE(idx(index));
1665         }
1666 
1667         @Override
1668         protected int _getInt(int index) {
1669             return rootParent()._getInt(idx(index));
1670         }
1671 
1672         @Override
1673         protected int _getIntLE(int index) {
1674             return rootParent()._getIntLE(idx(index));
1675         }
1676 
1677         @Override
1678         protected long _getLong(int index) {
1679             return rootParent()._getLong(idx(index));
1680         }
1681 
1682         @Override
1683         protected long _getLongLE(int index) {
1684             return rootParent()._getLongLE(idx(index));
1685         }
1686 
1687         @Override
1688         public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) {
1689             checkIndex(index, length);
1690             rootParent().getBytes(idx(index), dst, dstIndex, length);
1691             return this;
1692         }
1693 
1694         @Override
1695         public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) {
1696             checkIndex(index, length);
1697             rootParent().getBytes(idx(index), dst, dstIndex, length);
1698             return this;
1699         }
1700 
1701         @Override
1702         public ByteBuf getBytes(int index, ByteBuffer dst) {
1703             checkIndex(index, dst.remaining());
1704             rootParent().getBytes(idx(index), dst);
1705             return this;
1706         }
1707 
1708         @Override
1709         protected void _setByte(int index, int value) {
1710             rootParent()._setByte(idx(index), value);
1711         }
1712 
1713         @Override
1714         protected void _setShort(int index, int value) {
1715             rootParent()._setShort(idx(index), value);
1716         }
1717 
1718         @Override
1719         protected void _setShortLE(int index, int value) {
1720             rootParent()._setShortLE(idx(index), value);
1721         }
1722 
1723         @Override
1724         protected void _setMedium(int index, int value) {
1725             rootParent()._setMedium(idx(index), value);
1726         }
1727 
1728         @Override
1729         protected void _setMediumLE(int index, int value) {
1730             rootParent()._setMediumLE(idx(index), value);
1731         }
1732 
1733         @Override
1734         protected void _setInt(int index, int value) {
1735             rootParent()._setInt(idx(index), value);
1736         }
1737 
1738         @Override
1739         protected void _setIntLE(int index, int value) {
1740             rootParent()._setIntLE(idx(index), value);
1741         }
1742 
1743         @Override
1744         protected void _setLong(int index, long value) {
1745             rootParent()._setLong(idx(index), value);
1746         }
1747 
1748         @Override
1749         protected void _setLongLE(int index, long value) {
1750             rootParent().setLongLE(idx(index), value);
1751         }
1752 
1753         @Override
1754         public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) {
1755             checkIndex(index, length);
1756             ByteBuffer tmp = (ByteBuffer) internalNioBuffer().clear().position(index);
1757             tmp.put(src, srcIndex, length);
1758             return this;
1759         }
1760 
1761         @Override
1762         public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) {
1763             checkIndex(index, length);
1764             ByteBuffer tmp = (ByteBuffer) internalNioBuffer().clear().position(index);
1765             tmp.put(src.nioBuffer(srcIndex, length));
1766             return this;
1767         }
1768 
1769         @Override
1770         public ByteBuf setBytes(int index, ByteBuffer src) {
1771             checkIndex(index, src.remaining());
1772             ByteBuffer tmp = (ByteBuffer) internalNioBuffer().clear().position(index);
1773             tmp.put(src);
1774             return this;
1775         }
1776 
1777         @Override
1778         public ByteBuf getBytes(int index, OutputStream out, int length)
1779                 throws IOException {
1780             checkIndex(index, length);
1781             if (length != 0) {
1782                 ByteBufUtil.readBytes(alloc(), internalNioBuffer().duplicate(), index, length, out);
1783             }
1784             return this;
1785         }
1786 
1787         @Override
1788         public int getBytes(int index, GatheringByteChannel out, int length)
1789                 throws IOException {
1790             ByteBuffer buf = internalNioBuffer().duplicate();
1791             buf.clear().position(index).limit(index + length);
1792             return out.write(buf);
1793         }
1794 
1795         @Override
1796         public int getBytes(int index, FileChannel out, long position, int length)
1797                 throws IOException {
1798             ByteBuffer buf = internalNioBuffer().duplicate();
1799             buf.clear().position(index).limit(index + length);
1800             return out.write(buf, position);
1801         }
1802 
1803         @Override
1804         public int setBytes(int index, InputStream in, int length)
1805                 throws IOException {
1806             checkIndex(index, length);
1807             final AbstractByteBuf rootParent = rootParent();
1808             if (rootParent.hasArray()) {
1809                 return rootParent.setBytes(idx(index), in, length);
1810             }
1811             byte[] tmp = ByteBufUtil.threadLocalTempArray(length);
1812             int readBytes = in.read(tmp, 0, length);
1813             if (readBytes <= 0) {
1814                 return readBytes;
1815             }
1816             setBytes(index, tmp, 0, readBytes);
1817             return readBytes;
1818         }
1819 
1820         @Override
1821         public int setBytes(int index, ScatteringByteChannel in, int length)
1822                 throws IOException {
1823             try {
1824                 return in.read(internalNioBuffer(index, length));
1825             } catch (ClosedChannelException ignored) {
1826                 return -1;
1827             }
1828         }
1829 
1830         @Override
1831         public int setBytes(int index, FileChannel in, long position, int length)
1832                 throws IOException {
1833             try {
1834                 return in.read(internalNioBuffer(index, length), position);
1835             } catch (ClosedChannelException ignored) {
1836                 return -1;
1837             }
1838         }
1839 
1840         @Override
1841         public int setCharSequence(int index, CharSequence sequence, Charset charset) {
1842             return setCharSequence0(index, sequence, charset, false);
1843         }
1844 
1845         private int setCharSequence0(int index, CharSequence sequence, Charset charset, boolean expand) {
1846             if (charset.equals(CharsetUtil.UTF_8)) {
1847                 int length = ByteBufUtil.utf8MaxBytes(sequence);
1848                 if (expand) {
1849                     ensureWritable0(length);
1850                     checkIndex0(index, length);
1851                 } else {
1852                     checkIndex(index, length);
1853                 }
1854                 return ByteBufUtil.writeUtf8(this, index, length, sequence, sequence.length());
1855             }
1856             if (charset.equals(CharsetUtil.US_ASCII) || charset.equals(CharsetUtil.ISO_8859_1)) {
1857                 int length = sequence.length();
1858                 if (expand) {
1859                     ensureWritable0(length);
1860                     checkIndex0(index, length);
1861                 } else {
1862                     checkIndex(index, length);
1863                 }
1864                 return ByteBufUtil.writeAscii(this, index, sequence, length);
1865             }
1866             byte[] bytes = sequence.toString().getBytes(charset);
1867             if (expand) {
1868                 ensureWritable0(bytes.length);
1869                 // setBytes(...) will take care of checking the indices.
1870             }
1871             setBytes(index, bytes);
1872             return bytes.length;
1873         }
1874 
1875         @Override
1876         public int writeCharSequence(CharSequence sequence, Charset charset) {
1877             int written = setCharSequence0(writerIndex, sequence, charset, true);
1878             writerIndex += written;
1879             return written;
1880         }
1881 
1882         @Override
1883         public int forEachByte(int index, int length, ByteProcessor processor) {
1884             checkIndex(index, length);
1885             int ret = rootParent().forEachByte(idx(index), length, processor);
1886             return forEachResult(ret);
1887         }
1888 
1889         @Override
1890         public int forEachByteDesc(int index, int length, ByteProcessor processor) {
1891             checkIndex(index, length);
1892             int ret = rootParent().forEachByteDesc(idx(index), length, processor);
1893             return forEachResult(ret);
1894         }
1895 
1896         @Override
1897         public ByteBuf setZero(int index, int length) {
1898             checkIndex(index, length);
1899             rootParent().setZero(idx(index), length);
1900             return this;
1901         }
1902 
1903         @Override
1904         public ByteBuf writeZero(int length) {
1905             ensureWritable(length);
1906             rootParent().setZero(idx(writerIndex), length);
1907             writerIndex += length;
1908             return this;
1909         }
1910 
1911         private int forEachResult(int ret) {
1912             if (ret < startIndex) {
1913                 return -1;
1914             }
1915             return ret - startIndex;
1916         }
1917 
1918         @Override
1919         public boolean isContiguous() {
1920             return rootParent().isContiguous();
1921         }
1922 
1923         private int idx(int index) {
1924             return index + startIndex;
1925         }
1926 
1927         @Override
1928         protected void deallocate() {
1929             if (PlatformDependent.isJfrEnabled() && FreeBufferEvent.isEventEnabled()) {
1930                 FreeBufferEvent event = new FreeBufferEvent();
1931                 if (event.shouldCommit()) {
1932                     event.fill(this, AdaptiveByteBufAllocator.class);
1933                     event.commit();
1934                 }
1935             }
1936 
1937             if (chunk != null) {
1938                 chunk.releaseSegment(startIndex);
1939             }
1940             tmpNioBuf = null;
1941             chunk = null;
1942             rootParent = null;
1943             if (handle instanceof EnhancedHandle) {
1944                 EnhancedHandle<AdaptiveByteBuf>  enhancedHandle = (EnhancedHandle<AdaptiveByteBuf>) handle;
1945                 enhancedHandle.unguardedRecycle(this);
1946             } else {
1947                 handle.recycle(this);
1948             }
1949         }
1950     }
1951 
1952     /**
1953      * The strategy for how {@link AdaptivePoolingAllocator} should allocate chunk buffers.
1954      */
1955     interface ChunkAllocator {
1956         /**
1957          * Allocate a buffer for a chunk. This can be any kind of {@link AbstractByteBuf} implementation.
1958          * @param initialCapacity The initial capacity of the returned {@link AbstractByteBuf}.
1959          * @param maxCapacity The maximum capacity of the returned {@link AbstractByteBuf}.
1960          * @return The buffer that represents the chunk memory.
1961          */
1962         AbstractByteBuf allocate(int initialCapacity, int maxCapacity);
1963     }
1964 }