View Javadoc
1   /*
2    * Copyright 2022 The Netty Project
3    *
4    * The Netty Project licenses this file to you under the Apache License,
5    * version 2.0 (the "License"); you may not use this file except in compliance
6    * with the License. You may obtain a copy of the License at:
7    *
8    *   https://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13   * License for the specific language governing permissions and limitations
14   * under the License.
15   */
16  package io.netty.buffer;
17  
18  import io.netty.util.ByteProcessor;
19  import io.netty.util.CharsetUtil;
20  import io.netty.util.IllegalReferenceCountException;
21  import io.netty.util.NettyRuntime;
22  import io.netty.util.Recycler;
23  import io.netty.util.Recycler.EnhancedHandle;
24  import io.netty.util.ReferenceCounted;
25  import io.netty.util.concurrent.FastThreadLocal;
26  import io.netty.util.concurrent.FastThreadLocalThread;
27  import io.netty.util.concurrent.MpscIntQueue;
28  import io.netty.util.internal.AtomicReferenceCountUpdater;
29  import io.netty.util.internal.ObjectPool;
30  import io.netty.util.internal.ObjectUtil;
31  import io.netty.util.internal.PlatformDependent;
32  import io.netty.util.internal.ReferenceCountUpdater;
33  import io.netty.util.internal.SystemPropertyUtil;
34  import io.netty.util.internal.ThreadExecutorMap;
35  import io.netty.util.internal.UnsafeReferenceCountUpdater;
36  import io.netty.util.internal.UnstableApi;
37  import io.netty.util.internal.VarHandleReferenceCountUpdater;
38  
39  import java.io.IOException;
40  import java.io.InputStream;
41  import java.io.OutputStream;
42  import java.lang.invoke.MethodHandles;
43  import java.lang.invoke.VarHandle;
44  import java.nio.ByteBuffer;
45  import java.nio.ByteOrder;
46  import java.nio.channels.ClosedChannelException;
47  import java.nio.channels.FileChannel;
48  import java.nio.channels.GatheringByteChannel;
49  import java.nio.channels.ScatteringByteChannel;
50  import java.nio.charset.Charset;
51  import java.util.Arrays;
52  import java.util.Queue;
53  import java.util.Set;
54  import java.util.concurrent.ConcurrentHashMap;
55  import java.util.concurrent.ConcurrentLinkedQueue;
56  import java.util.concurrent.ThreadLocalRandom;
57  import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
58  import java.util.concurrent.atomic.AtomicLong;
59  import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
60  import java.util.concurrent.atomic.LongAdder;
61  import java.util.concurrent.locks.StampedLock;
62  import java.util.function.IntSupplier;
63  
64  import static io.netty.util.internal.ReferenceCountUpdater.getUnsafeOffset;
65  import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater;
66  
67  /**
68   * An auto-tuning pooling allocator, that follows an anti-generational hypothesis.
69   * <p>
70   * The allocator is organized into a list of Magazines, and each magazine has a chunk-buffer that they allocate buffers
71   * from.
72   * <p>
73   * The magazines hold the mutexes that ensure the thread-safety of the allocator, and each thread picks a magazine
74   * based on the id of the thread. This spreads the contention of multi-threaded access across the magazines.
75   * If contention is detected above a certain threshold, the number of magazines are increased in response to the
76   * contention.
77   * <p>
78   * The magazines maintain histograms of the sizes of the allocations they do. The histograms are used to compute the
79   * preferred chunk size. The preferred chunk size is one that is big enough to service 10 allocations of the
80   * 99-percentile size. This way, the chunk size is adapted to the allocation patterns.
81   * <p>
82   * Computing the preferred chunk size is a somewhat expensive operation. Therefore, the frequency with which this is
83   * done, is also adapted to the allocation pattern. If a newly computed preferred chunk is the same as the previous
84   * preferred chunk size, then the frequency is reduced. Otherwise, the frequency is increased.
85   * <p>
86   * This allows the allocator to quickly respond to changes in the application workload,
87   * without suffering undue overhead from maintaining its statistics.
88   * <p>
89   * Since magazines are "relatively thread-local", the allocator has a central queue that allow excess chunks from any
90   * magazine, to be shared with other magazines.
91   * The {@link #createSharedChunkQueue()} method can be overridden to customize this queue.
92   */
93  @UnstableApi
94  final class AdaptivePoolingAllocator {
95      /**
96       * The 128 KiB minimum chunk size is chosen to encourage the system allocator to delegate to mmap for chunk
97       * allocations. For instance, glibc will do this.
98       * This pushes any fragmentation from chunk size deviations off physical memory, onto virtual memory,
99       * which is a much, much larger space. Chunks are also allocated in whole multiples of the minimum
100      * chunk size, which itself is a whole multiple of popular page sizes like 4 KiB, 16 KiB, and 64 KiB.
101      */
102     static final int MIN_CHUNK_SIZE = 128 * 1024;
103     private static final int EXPANSION_ATTEMPTS = 3;
104     private static final int INITIAL_MAGAZINES = 1;
105     private static final int RETIRE_CAPACITY = 256;
106     private static final int MAX_STRIPES = NettyRuntime.availableProcessors() * 2;
107     private static final int BUFS_PER_CHUNK = 8; // For large buffers, aim to have about this many buffers per chunk.
108 
109     /**
110      * The maximum size of a pooled chunk, in bytes. Allocations bigger than this will never be pooled.
111      * <p>
112      * This number is 8 MiB, and is derived from the limitations of internal histograms.
113      */
114     private static final int MAX_CHUNK_SIZE = 8 * 1024 * 1024; // 8 MiB.
115     private static final int MAX_POOLED_BUF_SIZE = MAX_CHUNK_SIZE / BUFS_PER_CHUNK;
116 
117     /**
118      * The capacity if the chunk reuse queues, that allow chunks to be shared across magazines in a group.
119      * The default size is twice {@link NettyRuntime#availableProcessors()},
120      * same as the maximum number of magazines per magazine group.
121      */
122     private static final int CHUNK_REUSE_QUEUE = Math.max(2, SystemPropertyUtil.getInt(
123             "io.netty.allocator.chunkReuseQueueCapacity", NettyRuntime.availableProcessors() * 2));
124 
125     /**
126      * The capacity if the magazine local buffer queue. This queue just pools the outer ByteBuf instance and not
127      * the actual memory and so helps to reduce GC pressure.
128      */
129     private static final int MAGAZINE_BUFFER_QUEUE_CAPACITY = SystemPropertyUtil.getInt(
130             "io.netty.allocator.magazineBufferQueueCapacity", 1024);
131 
132     /**
133      * The size classes are chosen based on the following observation:
134      * <p>
135      * Most allocations, particularly ones above 256 bytes, aim to be a power-of-2. However, many use cases, such
136      * as framing protocols, are themselves operating or moving power-of-2 sized payloads, to which they add a
137      * small amount of overhead, such as headers or checksums.
138      * This means we seem to get a lot of mileage out of having both power-of-2 sizes, and power-of-2-plus-a-bit.
139      * <p>
140      * On the conflicting requirements of both having as few chunks as possible, and having as little wasted
141      * memory within each chunk as possible, this seems to strike a surprisingly good balance for the use cases
142      * tested so far.
143      */
144     private static final int[] SIZE_CLASSES = {
145             32,
146             64,
147             128,
148             256,
149             512,
150             640, // 512 + 128
151             1024,
152             1152, // 1024 + 128
153             2048,
154             2304, // 2048 + 256
155             4096,
156             4352, // 4096 + 256
157             8192,
158             8704, // 8192 + 512
159             16384,
160             16896, // 16384 + 512
161     };
162 
163     private static final int SIZE_CLASSES_COUNT = SIZE_CLASSES.length;
164     private static final byte[] SIZE_INDEXES = new byte[(SIZE_CLASSES[SIZE_CLASSES_COUNT - 1] / 32) + 1];
165 
166     static {
167         if (MAGAZINE_BUFFER_QUEUE_CAPACITY < 2) {
168             throw new IllegalArgumentException("MAGAZINE_BUFFER_QUEUE_CAPACITY: " + MAGAZINE_BUFFER_QUEUE_CAPACITY
169                     + " (expected: >= " + 2 + ')');
170         }
171         int lastIndex = 0;
172         for (int i = 0; i < SIZE_CLASSES_COUNT; i++) {
173             int sizeClass = SIZE_CLASSES[i];
174             //noinspection ConstantValue
175             assert (sizeClass & 5) == 0 : "Size class must be a multiple of 32";
176             int sizeIndex = sizeIndexOf(sizeClass);
177             Arrays.fill(SIZE_INDEXES, lastIndex + 1, sizeIndex + 1, (byte) i);
178             lastIndex = sizeIndex;
179         }
180     }
181 
182     private final ChunkAllocator chunkAllocator;
183     private final ChunkRegistry chunkRegistry;
184     private final MagazineGroup[] sizeClassedMagazineGroups;
185     private final MagazineGroup largeBufferMagazineGroup;
186     private final FastThreadLocal<MagazineGroup[]> threadLocalGroup;
187 
188     AdaptivePoolingAllocator(ChunkAllocator chunkAllocator, boolean useCacheForNonEventLoopThreads) {
189         this.chunkAllocator = ObjectUtil.checkNotNull(chunkAllocator, "chunkAllocator");
190         chunkRegistry = new ChunkRegistry();
191         sizeClassedMagazineGroups = createMagazineGroupSizeClasses(this, false);
192         largeBufferMagazineGroup = new MagazineGroup(
193                 this, chunkAllocator, new HistogramChunkControllerFactory(true), false);
194 
195         threadLocalGroup = new FastThreadLocal<MagazineGroup[]>() {
196             @Override
197             protected MagazineGroup[] initialValue() {
198                 if (useCacheForNonEventLoopThreads || ThreadExecutorMap.currentExecutor() != null) {
199                     return createMagazineGroupSizeClasses(AdaptivePoolingAllocator.this, true);
200                 }
201                 return null;
202             }
203 
204             @Override
205             protected void onRemoval(final MagazineGroup[] groups) throws Exception {
206                 if (groups != null) {
207                     for (MagazineGroup group : groups) {
208                         group.free();
209                     }
210                 }
211             }
212         };
213     }
214 
215     private static MagazineGroup[] createMagazineGroupSizeClasses(
216             AdaptivePoolingAllocator allocator, boolean isThreadLocal) {
217         MagazineGroup[] groups = new MagazineGroup[SIZE_CLASSES.length];
218         for (int i = 0; i < SIZE_CLASSES.length; i++) {
219             int segmentSize = SIZE_CLASSES[i];
220             groups[i] = new MagazineGroup(allocator, allocator.chunkAllocator,
221                     new SizeClassChunkControllerFactory(segmentSize), isThreadLocal);
222         }
223         return groups;
224     }
225 
226     /**
227      * Create a thread-safe multi-producer, multi-consumer queue to hold chunks that spill over from the
228      * internal Magazines.
229      * <p>
230      * Each Magazine can only hold two chunks at any one time: the chunk it currently allocates from,
231      * and the next-in-line chunk which will be used for allocation once the current one has been used up.
232      * This queue will be used by magazines to share any excess chunks they allocate, so that they don't need to
233      * allocate new chunks when their current and next-in-line chunks have both been used up.
234      * <p>
235      * The simplest implementation of this method is to return a new {@link ConcurrentLinkedQueue}.
236      * However, the {@code CLQ} is unbounded, and this means there's no limit to how many chunks can be cached in this
237      * queue.
238      * <p>
239      * Each chunk in this queue can be up to {@link #MAX_CHUNK_SIZE} in size, so it is recommended to use a bounded
240      * queue to limit the maximum memory usage.
241      * <p>
242      * The default implementation will create a bounded queue with a capacity of {@link #CHUNK_REUSE_QUEUE}.
243      *
244      * @return A new multi-producer, multi-consumer queue.
245      */
246     private static Queue<Chunk> createSharedChunkQueue() {
247         return PlatformDependent.newFixedMpmcQueue(CHUNK_REUSE_QUEUE);
248     }
249 
250     ByteBuf allocate(int size, int maxCapacity) {
251         return allocate(size, maxCapacity, Thread.currentThread(), null);
252     }
253 
254     private AdaptiveByteBuf allocate(int size, int maxCapacity, Thread currentThread, AdaptiveByteBuf buf) {
255         AdaptiveByteBuf allocated = null;
256         if (size <= MAX_POOLED_BUF_SIZE) {
257             final int index = sizeClassIndexOf(size);
258             MagazineGroup[] magazineGroups;
259             if (!FastThreadLocalThread.currentThreadWillCleanupFastThreadLocals() ||
260                     (magazineGroups = threadLocalGroup.get()) == null) {
261                 magazineGroups =  sizeClassedMagazineGroups;
262             }
263             if (index < magazineGroups.length) {
264                 allocated = magazineGroups[index].allocate(size, maxCapacity, currentThread, buf);
265             } else {
266                 allocated = largeBufferMagazineGroup.allocate(size, maxCapacity, currentThread, buf);
267             }
268         }
269         if (allocated == null) {
270             allocated = allocateFallback(size, maxCapacity, currentThread, buf);
271         }
272         return allocated;
273     }
274 
275     private static int sizeIndexOf(final int size) {
276         // this is aligning the size to the next multiple of 32 and dividing by 32 to get the size index.
277         return size + 31 >> 5;
278     }
279 
280     static int sizeClassIndexOf(int size) {
281         int sizeIndex = sizeIndexOf(size);
282         if (sizeIndex < SIZE_INDEXES.length) {
283             return SIZE_INDEXES[sizeIndex];
284         }
285         return SIZE_CLASSES_COUNT;
286     }
287 
288     static int[] getSizeClasses() {
289         return SIZE_CLASSES.clone();
290     }
291 
292     private AdaptiveByteBuf allocateFallback(int size, int maxCapacity, Thread currentThread,
293                                              AdaptiveByteBuf buf) {
294         // If we don't already have a buffer, obtain one from the most conveniently available magazine.
295         Magazine magazine;
296         if (buf != null) {
297             Chunk chunk = buf.chunk;
298             if (chunk == null || chunk == Magazine.MAGAZINE_FREED || (magazine = chunk.currentMagazine()) == null) {
299                 magazine = getFallbackMagazine(currentThread);
300             }
301         } else {
302             magazine = getFallbackMagazine(currentThread);
303             buf = magazine.newBuffer();
304         }
305         // Create a one-off chunk for this allocation.
306         AbstractByteBuf innerChunk = chunkAllocator.allocate(size, maxCapacity);
307         Chunk chunk = new Chunk(innerChunk, magazine, false, chunkSize -> true);
308         chunkRegistry.add(chunk);
309         try {
310             chunk.readInitInto(buf, size, size, maxCapacity);
311         } finally {
312             // As the chunk is an one-off we need to always call release explicitly as readInitInto(...)
313             // will take care of retain once when successful. Once The AdaptiveByteBuf is released it will
314             // completely release the Chunk and so the contained innerChunk.
315             chunk.release();
316         }
317         return buf;
318     }
319 
320     private Magazine getFallbackMagazine(Thread currentThread) {
321         Magazine[] mags = largeBufferMagazineGroup.magazines;
322         return mags[(int) currentThread.getId() & mags.length - 1];
323     }
324 
325     /**
326      * Allocate into the given buffer. Used by {@link AdaptiveByteBuf#capacity(int)}.
327      */
328     void reallocate(int size, int maxCapacity, AdaptiveByteBuf into) {
329         AdaptiveByteBuf result = allocate(size, maxCapacity, Thread.currentThread(), into);
330         assert result == into: "Re-allocation created separate buffer instance";
331     }
332 
333     long usedMemory() {
334         return chunkRegistry.totalCapacity();
335     }
336 
337     // Ensure that we release all previous pooled resources when this object is finalized. This is needed as otherwise
338     // we might end up with leaks. While these leaks are usually harmless in reality it would still at least be
339     // very confusing for users.
340     @SuppressWarnings({"FinalizeDeclaration", "deprecation"})
341     @Override
342     protected void finalize() throws Throwable {
343         try {
344             super.finalize();
345         } finally {
346             free();
347         }
348     }
349 
350     private void free() {
351         largeBufferMagazineGroup.free();
352     }
353 
354     static int sizeToBucket(int size) {
355         return HistogramChunkController.sizeToBucket(size);
356     }
357 
358     private static final class MagazineGroup {
359         private final AdaptivePoolingAllocator allocator;
360         private final ChunkAllocator chunkAllocator;
361         private final ChunkControllerFactory chunkControllerFactory;
362         private final Queue<Chunk> chunkReuseQueue;
363         private final StampedLock magazineExpandLock;
364         private final Magazine threadLocalMagazine;
365         private volatile Magazine[] magazines;
366         private volatile boolean freed;
367 
368         MagazineGroup(AdaptivePoolingAllocator allocator,
369                       ChunkAllocator chunkAllocator,
370                       ChunkControllerFactory chunkControllerFactory,
371                       boolean isThreadLocal) {
372             this.allocator = allocator;
373             this.chunkAllocator = chunkAllocator;
374             this.chunkControllerFactory = chunkControllerFactory;
375             chunkReuseQueue = createSharedChunkQueue();
376             if (isThreadLocal) {
377                 magazineExpandLock = null;
378                 threadLocalMagazine = new Magazine(this, false, chunkReuseQueue, chunkControllerFactory.create(this));
379             } else {
380                 magazineExpandLock = new StampedLock();
381                 threadLocalMagazine = null;
382                 Magazine[] mags = new Magazine[INITIAL_MAGAZINES];
383                 for (int i = 0; i < mags.length; i++) {
384                     mags[i] = new Magazine(this, true, chunkReuseQueue, chunkControllerFactory.create(this));
385                 }
386                 magazines = mags;
387             }
388         }
389 
390         public AdaptiveByteBuf allocate(int size, int maxCapacity, Thread currentThread, AdaptiveByteBuf buf) {
391             boolean reallocate = buf != null;
392 
393             // Path for thread-local allocation.
394             Magazine tlMag = threadLocalMagazine;
395             if (tlMag != null) {
396                 if (buf == null) {
397                     buf = tlMag.newBuffer();
398                 }
399                 boolean allocated = tlMag.tryAllocate(size, maxCapacity, buf, reallocate);
400                 assert allocated : "Allocation of threadLocalMagazine must always succeed";
401                 return buf;
402             }
403 
404             // Path for concurrent allocation.
405             long threadId = currentThread.getId();
406             Magazine[] mags;
407             int expansions = 0;
408             do {
409                 mags = magazines;
410                 int mask = mags.length - 1;
411                 int index = (int) (threadId & mask);
412                 for (int i = 0, m = mags.length << 1; i < m; i++) {
413                     Magazine mag = mags[index + i & mask];
414                     if (buf == null) {
415                         buf = mag.newBuffer();
416                     }
417                     if (mag.tryAllocate(size, maxCapacity, buf, reallocate)) {
418                         // Was able to allocate.
419                         return buf;
420                     }
421                 }
422                 expansions++;
423             } while (expansions <= EXPANSION_ATTEMPTS && tryExpandMagazines(mags.length));
424 
425             // The magazines failed us; contention too high and we don't want to spend more effort expanding the array.
426             if (!reallocate && buf != null) {
427                 buf.release(); // Release the previously claimed buffer before we return.
428             }
429             return null;
430         }
431 
432         private boolean tryExpandMagazines(int currentLength) {
433             if (currentLength >= MAX_STRIPES) {
434                 return true;
435             }
436             final Magazine[] mags;
437             long writeLock = magazineExpandLock.tryWriteLock();
438             if (writeLock != 0) {
439                 try {
440                     mags = magazines;
441                     if (mags.length >= MAX_STRIPES || mags.length > currentLength || freed) {
442                         return true;
443                     }
444                     Magazine firstMagazine = mags[0];
445                     Magazine[] expanded = new Magazine[mags.length * 2];
446                     for (int i = 0, l = expanded.length; i < l; i++) {
447                         Magazine m = new Magazine(this, true, chunkReuseQueue, chunkControllerFactory.create(this));
448                         firstMagazine.initializeSharedStateIn(m);
449                         expanded[i] = m;
450                     }
451                     magazines = expanded;
452                 } finally {
453                     magazineExpandLock.unlockWrite(writeLock);
454                 }
455                 for (Magazine magazine : mags) {
456                     magazine.free();
457                 }
458             }
459             return true;
460         }
461 
462         boolean offerToQueue(Chunk buffer) {
463             if (freed) {
464                 return false;
465             }
466 
467             boolean isAdded = chunkReuseQueue.offer(buffer);
468             if (freed && isAdded) {
469                 // Help to free the reuse queue.
470                 freeChunkReuseQueue();
471             }
472             return isAdded;
473         }
474 
475         private void free() {
476             freed = true;
477             if (threadLocalMagazine != null) {
478                 threadLocalMagazine.free();
479             } else {
480                 long stamp = magazineExpandLock.writeLock();
481                 try {
482                     Magazine[] mags = magazines;
483                     for (Magazine magazine : mags) {
484                         magazine.free();
485                     }
486                 } finally {
487                     magazineExpandLock.unlockWrite(stamp);
488                 }
489             }
490             freeChunkReuseQueue();
491         }
492 
493         private void freeChunkReuseQueue() {
494             for (;;) {
495                 Chunk chunk = chunkReuseQueue.poll();
496                 if (chunk == null) {
497                     break;
498                 }
499                 chunk.release();
500             }
501         }
502     }
503 
504     private interface ChunkControllerFactory {
505         ChunkController create(MagazineGroup group);
506     }
507 
508     private interface ChunkController {
509         /**
510          * Compute the "fast max capacity" value for the buffer.
511          */
512         int computeBufferCapacity(int requestedSize, int maxCapacity, boolean isReallocation);
513 
514         /**
515          * Initialize the given chunk factory with shared statistics state (if any) from this factory.
516          */
517         void initializeSharedStateIn(ChunkController chunkController);
518 
519         /**
520          * Allocate a new {@link Chunk} for the given {@link Magazine}.
521          */
522         Chunk newChunkAllocation(int promptingSize, Magazine magazine);
523     }
524 
525     private interface ChunkReleasePredicate {
526         boolean shouldReleaseChunk(int chunkSize);
527     }
528 
529     private static final class SizeClassChunkControllerFactory implements ChunkControllerFactory {
530         // To amortize activation/deactivation of chunks, we should have a minimum number of segments per chunk.
531         // We choose 32 because it seems neither too small nor too big.
532         // For segments of 16 KiB, the chunks will be half a megabyte.
533         private static final int MIN_SEGMENTS_PER_CHUNK = 32;
534         private final int segmentSize;
535         private final int chunkSize;
536         private final int[] segmentOffsets;
537 
538         private SizeClassChunkControllerFactory(int segmentSize) {
539             this.segmentSize = ObjectUtil.checkPositive(segmentSize, "segmentSize");
540             this.chunkSize = Math.max(MIN_CHUNK_SIZE, segmentSize * MIN_SEGMENTS_PER_CHUNK);
541             int segmentsCount = chunkSize / segmentSize;
542             this.segmentOffsets = new int[segmentsCount];
543             for (int i = 0; i < segmentsCount; i++) {
544                 segmentOffsets[i] = i * segmentSize;
545             }
546         }
547 
548         @Override
549         public ChunkController create(MagazineGroup group) {
550             return new SizeClassChunkController(group, segmentSize, chunkSize, segmentOffsets);
551         }
552     }
553 
554     private static final class SizeClassChunkController implements ChunkController {
555 
556         private final ChunkAllocator chunkAllocator;
557         private final int segmentSize;
558         private final int chunkSize;
559         private final ChunkRegistry chunkRegistry;
560         private final int[] segmentOffsets;
561 
562         private SizeClassChunkController(MagazineGroup group, int segmentSize, int chunkSize, int[] segmentOffsets) {
563             chunkAllocator = group.chunkAllocator;
564             this.segmentSize = segmentSize;
565             this.chunkSize = chunkSize;
566             chunkRegistry = group.allocator.chunkRegistry;
567             this.segmentOffsets = segmentOffsets;
568         }
569 
570         @Override
571         public int computeBufferCapacity(
572                 int requestedSize, int maxCapacity, boolean isReallocation) {
573             return Math.min(segmentSize, maxCapacity);
574         }
575 
576         @Override
577         public void initializeSharedStateIn(ChunkController chunkController) {
578             // NOOP
579         }
580 
581         @Override
582         public Chunk newChunkAllocation(int promptingSize, Magazine magazine) {
583             AbstractByteBuf chunkBuffer = chunkAllocator.allocate(chunkSize, chunkSize);
584             assert chunkBuffer.capacity() == chunkSize;
585             SizeClassedChunk chunk = new SizeClassedChunk(chunkBuffer, magazine, true,
586                     segmentSize, segmentOffsets, size -> false);
587             chunkRegistry.add(chunk);
588             return chunk;
589         }
590     }
591 
592     private static final class HistogramChunkControllerFactory implements ChunkControllerFactory {
593         private final boolean shareable;
594 
595         private HistogramChunkControllerFactory(boolean shareable) {
596             this.shareable = shareable;
597         }
598 
599         @Override
600         public ChunkController create(MagazineGroup group) {
601             return new HistogramChunkController(group, shareable);
602         }
603     }
604 
605     private static final class HistogramChunkController implements ChunkController, ChunkReleasePredicate {
606         private static final int MIN_DATUM_TARGET = 1024;
607         private static final int MAX_DATUM_TARGET = 65534;
608         private static final int INIT_DATUM_TARGET = 9;
609         private static final int HISTO_BUCKET_COUNT = 16;
610         private static final int[] HISTO_BUCKETS = {
611                 16 * 1024,
612                 24 * 1024,
613                 32 * 1024,
614                 48 * 1024,
615                 64 * 1024,
616                 96 * 1024,
617                 128 * 1024,
618                 192 * 1024,
619                 256 * 1024,
620                 384 * 1024,
621                 512 * 1024,
622                 768 * 1024,
623                 1024 * 1024,
624                 1792 * 1024,
625                 2048 * 1024,
626                 3072 * 1024
627         };
628 
629         private final MagazineGroup group;
630         private final boolean shareable;
631         private final short[][] histos = {
632                 new short[HISTO_BUCKET_COUNT], new short[HISTO_BUCKET_COUNT],
633                 new short[HISTO_BUCKET_COUNT], new short[HISTO_BUCKET_COUNT],
634         };
635         private final ChunkRegistry chunkRegistry;
636         private short[] histo = histos[0];
637         private final int[] sums = new int[HISTO_BUCKET_COUNT];
638 
639         private int histoIndex;
640         private int datumCount;
641         private int datumTarget = INIT_DATUM_TARGET;
642         private boolean hasHadRotation;
643         private volatile int sharedPrefChunkSize = MIN_CHUNK_SIZE;
644         private volatile int localPrefChunkSize = MIN_CHUNK_SIZE;
645         private volatile int localUpperBufSize;
646 
647         private HistogramChunkController(MagazineGroup group, boolean shareable) {
648             this.group = group;
649             this.shareable = shareable;
650             chunkRegistry = group.allocator.chunkRegistry;
651         }
652 
653         @Override
654         public int computeBufferCapacity(
655                 int requestedSize, int maxCapacity, boolean isReallocation) {
656             if (!isReallocation) {
657                 // Only record allocation size if it's not caused by a reallocation that was triggered by capacity
658                 // change of the buffer.
659                 recordAllocationSize(requestedSize);
660             }
661 
662             // Predict starting capacity from localUpperBufSize, but place limits on the max starting capacity
663             // based on the requested size, because localUpperBufSize can potentially be quite large.
664             int startCapLimits;
665             if (requestedSize <= 32768) { // Less than or equal to 32 KiB.
666                 startCapLimits = 65536; // Use at most 64 KiB, which is also the AdaptiveRecvByteBufAllocator max.
667             } else {
668                 startCapLimits = requestedSize * 2; // Otherwise use at most twice the requested memory.
669             }
670             int startingCapacity = Math.min(startCapLimits, localUpperBufSize);
671             startingCapacity = Math.max(requestedSize, Math.min(maxCapacity, startingCapacity));
672             return startingCapacity;
673         }
674 
675         private void recordAllocationSize(int bufferSizeToRecord) {
676             // Use the preserved size from the reused AdaptiveByteBuf, if available.
677             // Otherwise, use the requested buffer size.
678             // This way, we better take into account
679             if (bufferSizeToRecord == 0) {
680                 return;
681             }
682             int bucket = sizeToBucket(bufferSizeToRecord);
683             histo[bucket]++;
684             if (datumCount++ == datumTarget) {
685                 rotateHistograms();
686             }
687         }
688 
689         static int sizeToBucket(int size) {
690             int index = binarySearchInsertionPoint(Arrays.binarySearch(HISTO_BUCKETS, size));
691             return index >= HISTO_BUCKETS.length ? HISTO_BUCKETS.length - 1 : index;
692         }
693 
694         private static int binarySearchInsertionPoint(int index) {
695             if (index < 0) {
696                 index = -(index + 1);
697             }
698             return index;
699         }
700 
701         static int bucketToSize(int sizeBucket) {
702             return HISTO_BUCKETS[sizeBucket];
703         }
704 
705         private void rotateHistograms() {
706             short[][] hs = histos;
707             for (int i = 0; i < HISTO_BUCKET_COUNT; i++) {
708                 sums[i] = (hs[0][i] & 0xFFFF) + (hs[1][i] & 0xFFFF) + (hs[2][i] & 0xFFFF) + (hs[3][i] & 0xFFFF);
709             }
710             int sum = 0;
711             for (int count : sums) {
712                 sum  += count;
713             }
714             int targetPercentile = (int) (sum * 0.99);
715             int sizeBucket = 0;
716             for (; sizeBucket < sums.length; sizeBucket++) {
717                 if (sums[sizeBucket] > targetPercentile) {
718                     break;
719                 }
720                 targetPercentile -= sums[sizeBucket];
721             }
722             hasHadRotation = true;
723             int percentileSize = bucketToSize(sizeBucket);
724             int prefChunkSize = Math.max(percentileSize * BUFS_PER_CHUNK, MIN_CHUNK_SIZE);
725             localUpperBufSize = percentileSize;
726             localPrefChunkSize = prefChunkSize;
727             if (shareable) {
728                 for (Magazine mag : group.magazines) {
729                     HistogramChunkController statistics = (HistogramChunkController) mag.chunkController;
730                     prefChunkSize = Math.max(prefChunkSize, statistics.localPrefChunkSize);
731                 }
732             }
733             if (sharedPrefChunkSize != prefChunkSize) {
734                 // Preferred chunk size changed. Increase check frequency.
735                 datumTarget = Math.max(datumTarget >> 1, MIN_DATUM_TARGET);
736                 sharedPrefChunkSize = prefChunkSize;
737             } else {
738                 // Preferred chunk size did not change. Check less often.
739                 datumTarget = Math.min(datumTarget << 1, MAX_DATUM_TARGET);
740             }
741 
742             histoIndex = histoIndex + 1 & 3;
743             histo = histos[histoIndex];
744             datumCount = 0;
745             Arrays.fill(histo, (short) 0);
746         }
747 
748         /**
749          * Get the preferred chunk size, based on statistics from the {@linkplain #recordAllocationSize(int) recorded}
750          * allocation sizes.
751          * <p>
752          * This method must be thread-safe.
753          *
754          * @return The currently preferred chunk allocation size.
755          */
756         int preferredChunkSize() {
757             return sharedPrefChunkSize;
758         }
759 
760         @Override
761         public void initializeSharedStateIn(ChunkController chunkController) {
762             HistogramChunkController statistics = (HistogramChunkController) chunkController;
763             int sharedPrefChunkSize = this.sharedPrefChunkSize;
764             statistics.localPrefChunkSize = sharedPrefChunkSize;
765             statistics.sharedPrefChunkSize = sharedPrefChunkSize;
766         }
767 
768         @Override
769         public Chunk newChunkAllocation(int promptingSize, Magazine magazine) {
770             int size = Math.max(promptingSize * BUFS_PER_CHUNK, preferredChunkSize());
771             int minChunks = size / MIN_CHUNK_SIZE;
772             if (MIN_CHUNK_SIZE * minChunks < size) {
773                 // Round up to nearest whole MIN_CHUNK_SIZE unit. The MIN_CHUNK_SIZE is an even multiple of many
774                 // popular small page sizes, like 4k, 16k, and 64k, which makes it easier for the system allocator
775                 // to manage the memory in terms of whole pages. This reduces memory fragmentation,
776                 // but without the potentially high overhead that power-of-2 chunk sizes would bring.
777                 size = MIN_CHUNK_SIZE * (1 + minChunks);
778             }
779 
780             // Limit chunks to the max size, even if the histogram suggests to go above it.
781             size = Math.min(size, MAX_CHUNK_SIZE);
782 
783             // If we haven't rotated the histogram yet, optimisticly record this chunk size as our preferred.
784             if (!hasHadRotation && sharedPrefChunkSize == MIN_CHUNK_SIZE) {
785                 sharedPrefChunkSize = size;
786             }
787 
788             ChunkAllocator chunkAllocator = group.chunkAllocator;
789             Chunk chunk = new Chunk(chunkAllocator.allocate(size, size), magazine, true, this);
790             chunkRegistry.add(chunk);
791             return chunk;
792         }
793 
794         @Override
795         public boolean shouldReleaseChunk(int chunkSize) {
796             int preferredSize = preferredChunkSize();
797             int givenChunks = chunkSize / MIN_CHUNK_SIZE;
798             int preferredChunks = preferredSize / MIN_CHUNK_SIZE;
799             int deviation = Math.abs(givenChunks - preferredChunks);
800 
801             // Retire chunks with a 5% probability per unit of MIN_CHUNK_SIZE deviation from preference.
802             return deviation != 0 &&
803                     ThreadLocalRandom.current().nextDouble() * 20.0 < deviation;
804         }
805     }
806 
807     private static final class Magazine {
808         private static final AtomicReferenceFieldUpdater<Magazine, Chunk> NEXT_IN_LINE;
809         static {
810             NEXT_IN_LINE = AtomicReferenceFieldUpdater.newUpdater(Magazine.class, Chunk.class, "nextInLine");
811         }
812         private static final Chunk MAGAZINE_FREED = new Chunk();
813 
814         private static final Recycler<AdaptiveByteBuf> EVENT_LOOP_LOCAL_BUFFER_POOL = new Recycler<AdaptiveByteBuf>() {
815             @Override
816             protected AdaptiveByteBuf newObject(Handle<AdaptiveByteBuf> handle) {
817                 return new AdaptiveByteBuf(handle);
818             }
819         };
820 
821         private Chunk current;
822         @SuppressWarnings("unused") // updated via NEXT_IN_LINE
823         private volatile Chunk nextInLine;
824         private final MagazineGroup group;
825         private final ChunkController chunkController;
826         private final AtomicLong usedMemory;
827         private final StampedLock allocationLock;
828         private final Queue<AdaptiveByteBuf> bufferQueue;
829         private final ObjectPool.Handle<AdaptiveByteBuf> handle;
830         private final Queue<Chunk> sharedChunkQueue;
831 
832         Magazine(MagazineGroup group, boolean shareable, Queue<Chunk> sharedChunkQueue,
833                  ChunkController chunkController) {
834             this.group = group;
835             this.chunkController = chunkController;
836 
837             if (shareable) {
838                 // We only need the StampedLock if this Magazine will be shared across threads.
839                 allocationLock = new StampedLock();
840                 bufferQueue = PlatformDependent.newFixedMpmcQueue(MAGAZINE_BUFFER_QUEUE_CAPACITY);
841                 handle = new ObjectPool.Handle<AdaptiveByteBuf>() {
842                     @Override
843                     public void recycle(AdaptiveByteBuf self) {
844                         bufferQueue.offer(self);
845                     }
846                 };
847             } else {
848                 allocationLock = null;
849                 bufferQueue = null;
850                 handle = null;
851             }
852             usedMemory = new AtomicLong();
853             this.sharedChunkQueue = sharedChunkQueue;
854         }
855 
856         public boolean tryAllocate(int size, int maxCapacity, AdaptiveByteBuf buf, boolean reallocate) {
857             if (allocationLock == null) {
858                 // This magazine is not shared across threads, just allocate directly.
859                 return allocate(size, maxCapacity, buf, reallocate);
860             }
861 
862             // Try to retrieve the lock and if successful allocate.
863             long writeLock = allocationLock.tryWriteLock();
864             if (writeLock != 0) {
865                 try {
866                     return allocate(size, maxCapacity, buf, reallocate);
867                 } finally {
868                     allocationLock.unlockWrite(writeLock);
869                 }
870             }
871             return allocateWithoutLock(size, maxCapacity, buf);
872         }
873 
874         private boolean allocateWithoutLock(int size, int maxCapacity, AdaptiveByteBuf buf) {
875             Chunk curr = NEXT_IN_LINE.getAndSet(this, null);
876             if (curr == MAGAZINE_FREED) {
877                 // Allocation raced with a stripe-resize that freed this magazine.
878                 restoreMagazineFreed();
879                 return false;
880             }
881             if (curr == null) {
882                 curr = sharedChunkQueue.poll();
883                 if (curr == null) {
884                     return false;
885                 }
886                 curr.attachToMagazine(this);
887             }
888             boolean allocated = false;
889             int remainingCapacity = curr.remainingCapacity();
890             int startingCapacity = chunkController.computeBufferCapacity(
891                     size, maxCapacity, true /* never update stats as we don't hold the magazine lock */);
892             if (remainingCapacity >= size) {
893                 curr.readInitInto(buf, size, Math.min(remainingCapacity, startingCapacity), maxCapacity);
894                 allocated = true;
895             }
896             try {
897                 if (remainingCapacity >= RETIRE_CAPACITY) {
898                     transferToNextInLineOrRelease(curr);
899                     curr = null;
900                 }
901             } finally {
902                 if (curr != null) {
903                     curr.releaseFromMagazine();
904                 }
905             }
906             return allocated;
907         }
908 
909         private boolean allocate(int size, int maxCapacity, AdaptiveByteBuf buf, boolean reallocate) {
910             int startingCapacity = chunkController.computeBufferCapacity(size, maxCapacity, reallocate);
911             Chunk curr = current;
912             if (curr != null) {
913                 // We have a Chunk that has some space left.
914                 int remainingCapacity = curr.remainingCapacity();
915                 if (remainingCapacity > startingCapacity) {
916                     curr.readInitInto(buf, size, startingCapacity, maxCapacity);
917                     // We still have some bytes left that we can use for the next allocation, just early return.
918                     return true;
919                 }
920 
921                 // At this point we know that this will be the last time current will be used, so directly set it to
922                 // null and release it once we are done.
923                 current = null;
924                 if (remainingCapacity >= size) {
925                     try {
926                         curr.readInitInto(buf, size, remainingCapacity, maxCapacity);
927                         return true;
928                     } finally {
929                         curr.releaseFromMagazine();
930                     }
931                 }
932 
933                 // Check if we either retain the chunk in the nextInLine cache or releasing it.
934                 if (remainingCapacity < RETIRE_CAPACITY) {
935                     curr.releaseFromMagazine();
936                 } else {
937                     // See if it makes sense to transfer the Chunk to the nextInLine cache for later usage.
938                     // This method will release curr if this is not the case
939                     transferToNextInLineOrRelease(curr);
940                 }
941             }
942 
943             assert current == null;
944             // The fast-path for allocations did not work.
945             //
946             // Try to fetch the next "Magazine local" Chunk first, if this fails because we don't have a
947             // next-in-line chunk available, we will poll our centralQueue.
948             // If this fails as well we will just allocate a new Chunk.
949             //
950             // In any case we will store the Chunk as the current so it will be used again for the next allocation and
951             // thus be "reserved" by this Magazine for exclusive usage.
952             curr = NEXT_IN_LINE.getAndSet(this, null);
953             if (curr != null) {
954                 if (curr == MAGAZINE_FREED) {
955                     // Allocation raced with a stripe-resize that freed this magazine.
956                     restoreMagazineFreed();
957                     return false;
958                 }
959 
960                 int remainingCapacity = curr.remainingCapacity();
961                 if (remainingCapacity > startingCapacity) {
962                     // We have a Chunk that has some space left.
963                     curr.readInitInto(buf, size, startingCapacity, maxCapacity);
964                     current = curr;
965                     return true;
966                 }
967 
968                 if (remainingCapacity >= size) {
969                     // At this point we know that this will be the last time curr will be used, so directly set it to
970                     // null and release it once we are done.
971                     try {
972                         curr.readInitInto(buf, size, remainingCapacity, maxCapacity);
973                         return true;
974                     } finally {
975                         // Release in a finally block so even if readInitInto(...) would throw we would still correctly
976                         // release the current chunk before null it out.
977                         curr.releaseFromMagazine();
978                     }
979                 } else {
980                     // Release it as it's too small.
981                     curr.releaseFromMagazine();
982                 }
983             }
984 
985             // Now try to poll from the central queue first
986             curr = sharedChunkQueue.poll();
987             if (curr == null) {
988                 curr = chunkController.newChunkAllocation(size, this);
989             } else {
990                 curr.attachToMagazine(this);
991 
992                 int remainingCapacity = curr.remainingCapacity();
993                 if (remainingCapacity == 0 || remainingCapacity < size) {
994                     // Check if we either retain the chunk in the nextInLine cache or releasing it.
995                     if (remainingCapacity < RETIRE_CAPACITY) {
996                         curr.releaseFromMagazine();
997                     } else {
998                         // See if it makes sense to transfer the Chunk to the nextInLine cache for later usage.
999                         // This method will release curr if this is not the case
1000                         transferToNextInLineOrRelease(curr);
1001                     }
1002                     curr = chunkController.newChunkAllocation(size, this);
1003                 }
1004             }
1005 
1006             current = curr;
1007             try {
1008                 int remainingCapacity = curr.remainingCapacity();
1009                 assert remainingCapacity >= size;
1010                 if (remainingCapacity > startingCapacity) {
1011                     curr.readInitInto(buf, size, startingCapacity, maxCapacity);
1012                     curr = null;
1013                 } else {
1014                     curr.readInitInto(buf, size, remainingCapacity, maxCapacity);
1015                 }
1016             } finally {
1017                 if (curr != null) {
1018                     // Release in a finally block so even if readInitInto(...) would throw we would still correctly
1019                     // release the current chunk before null it out.
1020                     curr.releaseFromMagazine();
1021                     current = null;
1022                 }
1023             }
1024             return true;
1025         }
1026 
1027         private void restoreMagazineFreed() {
1028             Chunk next = NEXT_IN_LINE.getAndSet(this, MAGAZINE_FREED);
1029             if (next != null && next != MAGAZINE_FREED) {
1030                 // A chunk snuck in through a race. Release it after restoring MAGAZINE_FREED state.
1031                 next.releaseFromMagazine();
1032             }
1033         }
1034 
1035         private void transferToNextInLineOrRelease(Chunk chunk) {
1036             if (NEXT_IN_LINE.compareAndSet(this, null, chunk)) {
1037                 return;
1038             }
1039 
1040             Chunk nextChunk = NEXT_IN_LINE.get(this);
1041             if (nextChunk != null && nextChunk != MAGAZINE_FREED
1042                     && chunk.remainingCapacity() > nextChunk.remainingCapacity()) {
1043                 if (NEXT_IN_LINE.compareAndSet(this, nextChunk, chunk)) {
1044                     nextChunk.releaseFromMagazine();
1045                     return;
1046                 }
1047             }
1048             // Next-in-line is occupied. We don't try to add it to the central queue yet as it might still be used
1049             // by some buffers and so is attached to a Magazine.
1050             // Once a Chunk is completely released by Chunk.release() it will try to move itself to the queue
1051             // as last resort.
1052             chunk.releaseFromMagazine();
1053         }
1054 
1055         boolean trySetNextInLine(Chunk chunk) {
1056             return NEXT_IN_LINE.compareAndSet(this, null, chunk);
1057         }
1058 
1059         void free() {
1060             // Release the current Chunk and the next that was stored for later usage.
1061             restoreMagazineFreed();
1062             long stamp = allocationLock != null ? allocationLock.writeLock() : 0;
1063             try {
1064                 if (current != null) {
1065                     current.releaseFromMagazine();
1066                     current = null;
1067                 }
1068             } finally {
1069                 if (allocationLock != null) {
1070                     allocationLock.unlockWrite(stamp);
1071                 }
1072             }
1073         }
1074 
1075         public AdaptiveByteBuf newBuffer() {
1076             AdaptiveByteBuf buf;
1077             if (handle == null) {
1078                 buf = EVENT_LOOP_LOCAL_BUFFER_POOL.get();
1079             } else {
1080                 buf = bufferQueue.poll();
1081                 if (buf == null) {
1082                     buf = new AdaptiveByteBuf(handle);
1083                 }
1084             }
1085             buf.resetRefCnt();
1086             buf.discardMarks();
1087             return buf;
1088         }
1089 
1090         boolean offerToQueue(Chunk chunk) {
1091             return group.offerToQueue(chunk);
1092         }
1093 
1094         public void initializeSharedStateIn(Magazine other) {
1095             chunkController.initializeSharedStateIn(other.chunkController);
1096         }
1097     }
1098 
1099     private static final class ChunkRegistry {
1100         private final LongAdder totalCapacity = new LongAdder();
1101 
1102         public long totalCapacity() {
1103             return totalCapacity.sum();
1104         }
1105 
1106         public void add(Chunk chunk) {
1107             totalCapacity.add(chunk.capacity());
1108         }
1109 
1110         public void remove(Chunk chunk) {
1111             totalCapacity.add(-chunk.capacity());
1112         }
1113     }
1114 
1115     private static class Chunk implements ReferenceCounted, ChunkInfo {
1116         private static final long REFCNT_FIELD_OFFSET;
1117         private static final AtomicIntegerFieldUpdater<Chunk> AIF_UPDATER;
1118         private static final Object REFCNT_FIELD_VH;
1119         private static final ReferenceCountUpdater<Chunk> updater;
1120 
1121         static {
1122             ReferenceCountUpdater.UpdaterType updaterType = ReferenceCountUpdater.updaterTypeOf(Chunk.class, "refCnt");
1123             switch (updaterType) {
1124                 case Atomic:
1125                     AIF_UPDATER = newUpdater(Chunk.class, "refCnt");
1126                     REFCNT_FIELD_OFFSET = -1;
1127                     REFCNT_FIELD_VH = null;
1128                     updater = new AtomicReferenceCountUpdater<Chunk>() {
1129                         @Override
1130                         protected AtomicIntegerFieldUpdater<Chunk> updater() {
1131                             return AIF_UPDATER;
1132                         }
1133                     };
1134                     break;
1135                 case Unsafe:
1136                     AIF_UPDATER = null;
1137                     REFCNT_FIELD_OFFSET = getUnsafeOffset(Chunk.class, "refCnt");
1138                     REFCNT_FIELD_VH = null;
1139                     updater = new UnsafeReferenceCountUpdater<Chunk>() {
1140                         @Override
1141                         protected long refCntFieldOffset() {
1142                             return REFCNT_FIELD_OFFSET;
1143                         }
1144                     };
1145                     break;
1146                 case VarHandle:
1147                     AIF_UPDATER = null;
1148                     REFCNT_FIELD_OFFSET = -1;
1149                     REFCNT_FIELD_VH = PlatformDependent.findVarHandleOfIntField(MethodHandles.lookup(),
1150                             Chunk.class, "refCnt");
1151                     updater = new VarHandleReferenceCountUpdater<Chunk>() {
1152                         @Override
1153                         protected VarHandle varHandle() {
1154                             return (VarHandle) REFCNT_FIELD_VH;
1155                         }
1156                     };
1157                     break;
1158                 default:
1159                     throw new Error("Unexpected updater type for Chunk: " + updaterType);
1160             }
1161         }
1162 
1163         protected final AbstractByteBuf delegate;
1164         protected Magazine magazine;
1165         private final AdaptivePoolingAllocator allocator;
1166         private final ChunkReleasePredicate chunkReleasePredicate;
1167         private final int capacity;
1168         private final boolean pooled;
1169         protected int allocatedBytes;
1170 
1171         // Value might not equal "real" reference count, all access should be via the updater
1172         @SuppressWarnings({"unused", "FieldMayBeFinal"})
1173         private volatile int refCnt;
1174 
1175         Chunk() {
1176             // Constructor only used by the MAGAZINE_FREED sentinel.
1177             delegate = null;
1178             magazine = null;
1179             allocator = null;
1180             chunkReleasePredicate = null;
1181             capacity = 0;
1182             pooled = false;
1183         }
1184 
1185         Chunk(AbstractByteBuf delegate, Magazine magazine, boolean pooled,
1186               ChunkReleasePredicate chunkReleasePredicate) {
1187             this.delegate = delegate;
1188             this.pooled = pooled;
1189             capacity = delegate.capacity();
1190             updater.setInitialValue(this);
1191             attachToMagazine(magazine);
1192 
1193             // We need the top-level allocator so ByteBuf.capacity(int) can call reallocate()
1194             allocator = magazine.group.allocator;
1195 
1196             this.chunkReleasePredicate = chunkReleasePredicate;
1197 
1198             if (PlatformDependent.isJfrEnabled() && AllocateChunkEvent.isEventEnabled()) {
1199                 AllocateChunkEvent event = new AllocateChunkEvent();
1200                 if (event.shouldCommit()) {
1201                     event.fill(this, AdaptiveByteBufAllocator.class);
1202                     event.pooled = pooled;
1203                     event.threadLocal = magazine.allocationLock == null;
1204                     event.commit();
1205                 }
1206             }
1207         }
1208 
1209         Magazine currentMagazine()  {
1210             return magazine;
1211         }
1212 
1213         void detachFromMagazine() {
1214             if (magazine != null) {
1215                 magazine.usedMemory.getAndAdd(-capacity);
1216                 magazine = null;
1217             }
1218         }
1219 
1220         void attachToMagazine(Magazine magazine) {
1221             assert this.magazine == null;
1222             this.magazine = magazine;
1223             magazine.usedMemory.getAndAdd(capacity);
1224         }
1225 
1226         @Override
1227         public Chunk touch(Object hint) {
1228             return this;
1229         }
1230 
1231         @Override
1232         public int refCnt() {
1233             return updater.refCnt(this);
1234         }
1235 
1236         @Override
1237         public Chunk retain() {
1238             return updater.retain(this);
1239         }
1240 
1241         @Override
1242         public Chunk retain(int increment) {
1243             return updater.retain(this, increment);
1244         }
1245 
1246         @Override
1247         public Chunk touch() {
1248             return this;
1249         }
1250 
1251         @Override
1252         public boolean release() {
1253             if (updater.release(this)) {
1254                 deallocate();
1255                 return true;
1256             }
1257             return false;
1258         }
1259 
1260         @Override
1261         public boolean release(int decrement) {
1262             if (updater.release(this, decrement)) {
1263                 deallocate();
1264                 return true;
1265             }
1266             return false;
1267         }
1268 
1269         /**
1270          * Called when a magazine is done using this chunk, probably because it was emptied.
1271          */
1272         boolean releaseFromMagazine() {
1273             return release();
1274         }
1275 
1276         /**
1277          * Called when a ByteBuf is done using its allocation in this chunk.
1278          */
1279         boolean releaseSegment(int ignoredSegmentId) {
1280             return release();
1281         }
1282 
1283         private void deallocate() {
1284             Magazine mag = magazine;
1285             int chunkSize = delegate.capacity();
1286             if (!pooled || chunkReleasePredicate.shouldReleaseChunk(chunkSize) || mag == null) {
1287                 // Drop the chunk if the parent allocator is closed,
1288                 // or if the chunk deviates too much from the preferred chunk size.
1289                 detachFromMagazine();
1290                 onRelease();
1291                 allocator.chunkRegistry.remove(this);
1292                 delegate.release();
1293             } else {
1294                 updater.resetRefCnt(this);
1295                 delegate.setIndex(0, 0);
1296                 allocatedBytes = 0;
1297                 if (!mag.trySetNextInLine(this)) {
1298                     // As this Chunk does not belong to the mag anymore we need to decrease the used memory .
1299                     detachFromMagazine();
1300                     if (!mag.offerToQueue(this)) {
1301                         // The central queue is full. Ensure we release again as we previously did use resetRefCnt()
1302                         // which did increase the reference count by 1.
1303                         boolean released = updater.release(this);
1304                         onRelease();
1305                         allocator.chunkRegistry.remove(this);
1306                         delegate.release();
1307                         assert released;
1308                     } else {
1309                         onReturn(false);
1310                     }
1311                 } else {
1312                     onReturn(true);
1313                 }
1314             }
1315         }
1316 
1317         private void onReturn(boolean returnedToMagazine) {
1318             if (PlatformDependent.isJfrEnabled() && ReturnChunkEvent.isEventEnabled()) {
1319                 ReturnChunkEvent event = new ReturnChunkEvent();
1320                 if (event.shouldCommit()) {
1321                     event.fill(this, AdaptiveByteBufAllocator.class);
1322                     event.returnedToMagazine = returnedToMagazine;
1323                     event.commit();
1324                 }
1325             }
1326         }
1327 
1328         private void onRelease() {
1329             if (PlatformDependent.isJfrEnabled() && FreeChunkEvent.isEventEnabled()) {
1330                 FreeChunkEvent event = new FreeChunkEvent();
1331                 if (event.shouldCommit()) {
1332                     event.fill(this, AdaptiveByteBufAllocator.class);
1333                     event.pooled = pooled;
1334                     event.commit();
1335                 }
1336             }
1337         }
1338 
1339         public void readInitInto(AdaptiveByteBuf buf, int size, int startingCapacity, int maxCapacity) {
1340             int startIndex = allocatedBytes;
1341             allocatedBytes = startIndex + startingCapacity;
1342             Chunk chunk = this;
1343             chunk.retain();
1344             try {
1345                 buf.init(delegate, chunk, 0, 0, startIndex, size, startingCapacity, maxCapacity);
1346                 chunk = null;
1347             } finally {
1348                 if (chunk != null) {
1349                     // If chunk is not null we know that buf.init(...) failed and so we need to manually release
1350                     // the chunk again as we retained it before calling buf.init(...). Beside this we also need to
1351                     // restore the old allocatedBytes value.
1352                     allocatedBytes = startIndex;
1353                     chunk.release();
1354                 }
1355             }
1356         }
1357 
1358         public int remainingCapacity() {
1359             return capacity - allocatedBytes;
1360         }
1361 
1362         @Override
1363         public int capacity() {
1364             return capacity;
1365         }
1366 
1367         @Override
1368         public boolean isDirect() {
1369             return delegate.isDirect();
1370         }
1371 
1372         @Override
1373         public long memoryAddress() {
1374             return delegate._memoryAddress();
1375         }
1376     }
1377 
1378     private static final class SizeClassedChunk extends Chunk {
1379         private static final int FREE_LIST_EMPTY = -1;
1380         private final int segmentSize;
1381         private final MpscIntQueue freeList;
1382 
1383         SizeClassedChunk(AbstractByteBuf delegate, Magazine magazine, boolean pooled, int segmentSize,
1384                          int[] segmentOffsets, ChunkReleasePredicate shouldReleaseChunk) {
1385             super(delegate, magazine, pooled, shouldReleaseChunk);
1386             this.segmentSize = segmentSize;
1387             int segmentCount = segmentOffsets.length;
1388             assert delegate.capacity() / segmentSize == segmentCount;
1389             assert segmentCount > 0: "Chunk must have a positive number of segments";
1390             freeList = MpscIntQueue.create(segmentCount, FREE_LIST_EMPTY);
1391             freeList.fill(segmentCount, new IntSupplier() {
1392                 int counter;
1393                 @Override
1394                 public int getAsInt() {
1395                     return segmentOffsets[counter++];
1396                 }
1397             });
1398         }
1399 
1400         @Override
1401         public void readInitInto(AdaptiveByteBuf buf, int size, int startingCapacity, int maxCapacity) {
1402             int startIndex = freeList.poll();
1403             if (startIndex == FREE_LIST_EMPTY) {
1404                 throw new IllegalStateException("Free list is empty");
1405             }
1406             allocatedBytes += segmentSize;
1407             Chunk chunk = this;
1408             chunk.retain();
1409             try {
1410                 buf.init(delegate, chunk, 0, 0, startIndex, size, startingCapacity, maxCapacity);
1411                 chunk = null;
1412             } finally {
1413                 if (chunk != null) {
1414                     // If chunk is not null we know that buf.init(...) failed and so we need to manually release
1415                     // the chunk again as we retained it before calling buf.init(...). Beside this we also need to
1416                     // restore the old allocatedBytes value.
1417                     allocatedBytes -= segmentSize;
1418                     chunk.releaseSegment(startIndex);
1419                 }
1420             }
1421         }
1422 
1423         @Override
1424         public int remainingCapacity() {
1425             int remainingCapacity = super.remainingCapacity();
1426             if (remainingCapacity > segmentSize) {
1427                 return remainingCapacity;
1428             }
1429             int updatedRemainingCapacity = freeList.size() * segmentSize;
1430             if (updatedRemainingCapacity == remainingCapacity) {
1431                 return remainingCapacity;
1432             }
1433             // update allocatedBytes based on what's available in the free list
1434             allocatedBytes = capacity() - updatedRemainingCapacity;
1435             return updatedRemainingCapacity;
1436         }
1437 
1438         @Override
1439         boolean releaseFromMagazine() {
1440             // Size-classed chunks can be reused before they become empty.
1441             // We can therefor put them in the shared queue as soon as the magazine is done with this chunk.
1442             Magazine mag = magazine;
1443             detachFromMagazine();
1444             if (!mag.offerToQueue(this)) {
1445                 return super.releaseFromMagazine();
1446             }
1447             return false;
1448         }
1449 
1450         @Override
1451         boolean releaseSegment(int startIndex) {
1452             boolean released = release();
1453             boolean segmentReturned = freeList.offer(startIndex);
1454             assert segmentReturned: "Unable to return segment " + startIndex + " to free list";
1455             return released;
1456         }
1457     }
1458 
1459     static final class AdaptiveByteBuf extends AbstractReferenceCountedByteBuf {
1460 
1461         private final ObjectPool.Handle<AdaptiveByteBuf> handle;
1462 
1463         // this both act as adjustment and the start index for a free list segment allocation
1464         private int startIndex;
1465         private AbstractByteBuf rootParent;
1466         Chunk chunk;
1467         private int length;
1468         private int maxFastCapacity;
1469         private ByteBuffer tmpNioBuf;
1470         private boolean hasArray;
1471         private boolean hasMemoryAddress;
1472 
1473         AdaptiveByteBuf(ObjectPool.Handle<AdaptiveByteBuf> recyclerHandle) {
1474             super(0);
1475             handle = ObjectUtil.checkNotNull(recyclerHandle, "recyclerHandle");
1476         }
1477 
1478         void init(AbstractByteBuf unwrapped, Chunk wrapped, int readerIndex, int writerIndex,
1479                   int startIndex, int size, int capacity, int maxCapacity) {
1480             this.startIndex = startIndex;
1481             chunk = wrapped;
1482             length = size;
1483             maxFastCapacity = capacity;
1484             maxCapacity(maxCapacity);
1485             setIndex0(readerIndex, writerIndex);
1486             hasArray = unwrapped.hasArray();
1487             hasMemoryAddress = unwrapped.hasMemoryAddress();
1488             rootParent = unwrapped;
1489             tmpNioBuf = null;
1490 
1491             if (PlatformDependent.isJfrEnabled() && AllocateBufferEvent.isEventEnabled()) {
1492                 AllocateBufferEvent event = new AllocateBufferEvent();
1493                 if (event.shouldCommit()) {
1494                     event.fill(this, AdaptiveByteBufAllocator.class);
1495                     event.chunkPooled = wrapped.pooled;
1496                     Magazine m = wrapped.magazine;
1497                     event.chunkThreadLocal = m != null && m.allocationLock == null;
1498                     event.commit();
1499                 }
1500             }
1501         }
1502 
1503         private AbstractByteBuf rootParent() {
1504             final AbstractByteBuf rootParent = this.rootParent;
1505             if (rootParent != null) {
1506                 return rootParent;
1507             }
1508             throw new IllegalReferenceCountException();
1509         }
1510 
1511         @Override
1512         public int capacity() {
1513             return length;
1514         }
1515 
1516         @Override
1517         public int maxFastWritableBytes() {
1518             return Math.min(maxFastCapacity, maxCapacity()) - writerIndex;
1519         }
1520 
1521         @Override
1522         public ByteBuf capacity(int newCapacity) {
1523             if (length <= newCapacity && newCapacity <= maxFastCapacity) {
1524                 ensureAccessible();
1525                 length = newCapacity;
1526                 return this;
1527             }
1528             checkNewCapacity(newCapacity);
1529             if (newCapacity < capacity()) {
1530                 length = newCapacity;
1531                 trimIndicesToCapacity(newCapacity);
1532                 return this;
1533             }
1534 
1535             if (PlatformDependent.isJfrEnabled() && ReallocateBufferEvent.isEventEnabled()) {
1536                 ReallocateBufferEvent event = new ReallocateBufferEvent();
1537                 if (event.shouldCommit()) {
1538                     event.fill(this, AdaptiveByteBufAllocator.class);
1539                     event.newCapacity = newCapacity;
1540                     event.commit();
1541                 }
1542             }
1543 
1544             // Reallocation required.
1545             Chunk chunk = this.chunk;
1546             AdaptivePoolingAllocator allocator = chunk.allocator;
1547             int readerIndex = this.readerIndex;
1548             int writerIndex = this.writerIndex;
1549             int baseOldRootIndex = startIndex;
1550             int oldCapacity = length;
1551             AbstractByteBuf oldRoot = rootParent();
1552             allocator.reallocate(newCapacity, maxCapacity(), this);
1553             oldRoot.getBytes(baseOldRootIndex, this, 0, oldCapacity);
1554             chunk.releaseSegment(baseOldRootIndex);
1555             this.readerIndex = readerIndex;
1556             this.writerIndex = writerIndex;
1557             return this;
1558         }
1559 
1560         @Override
1561         public ByteBufAllocator alloc() {
1562             return rootParent().alloc();
1563         }
1564 
1565         @Override
1566         public ByteOrder order() {
1567             return rootParent().order();
1568         }
1569 
1570         @Override
1571         public ByteBuf unwrap() {
1572             return null;
1573         }
1574 
1575         @Override
1576         public boolean isDirect() {
1577             return rootParent().isDirect();
1578         }
1579 
1580         @Override
1581         public int arrayOffset() {
1582             return idx(rootParent().arrayOffset());
1583         }
1584 
1585         @Override
1586         public boolean hasMemoryAddress() {
1587             return hasMemoryAddress;
1588         }
1589 
1590         @Override
1591         public long memoryAddress() {
1592             ensureAccessible();
1593             return _memoryAddress();
1594         }
1595 
1596         @Override
1597         long _memoryAddress() {
1598             AbstractByteBuf root = rootParent;
1599             return root != null ? root._memoryAddress() + startIndex : 0L;
1600         }
1601 
1602         @Override
1603         public ByteBuffer nioBuffer(int index, int length) {
1604             checkIndex(index, length);
1605             return rootParent().nioBuffer(idx(index), length);
1606         }
1607 
1608         @Override
1609         public ByteBuffer internalNioBuffer(int index, int length) {
1610             checkIndex(index, length);
1611             return (ByteBuffer) internalNioBuffer().position(index).limit(index + length);
1612         }
1613 
1614         private ByteBuffer internalNioBuffer() {
1615             if (tmpNioBuf == null) {
1616                 tmpNioBuf = rootParent().nioBuffer(startIndex, maxFastCapacity);
1617             }
1618             return (ByteBuffer) tmpNioBuf.clear();
1619         }
1620 
1621         @Override
1622         public ByteBuffer[] nioBuffers(int index, int length) {
1623             checkIndex(index, length);
1624             return rootParent().nioBuffers(idx(index), length);
1625         }
1626 
1627         @Override
1628         public boolean hasArray() {
1629             return hasArray;
1630         }
1631 
1632         @Override
1633         public byte[] array() {
1634             ensureAccessible();
1635             return rootParent().array();
1636         }
1637 
1638         @Override
1639         public ByteBuf copy(int index, int length) {
1640             checkIndex(index, length);
1641             return rootParent().copy(idx(index), length);
1642         }
1643 
1644         @Override
1645         public int nioBufferCount() {
1646             return rootParent().nioBufferCount();
1647         }
1648 
1649         @Override
1650         protected byte _getByte(int index) {
1651             return rootParent()._getByte(idx(index));
1652         }
1653 
1654         @Override
1655         protected short _getShort(int index) {
1656             return rootParent()._getShort(idx(index));
1657         }
1658 
1659         @Override
1660         protected short _getShortLE(int index) {
1661             return rootParent()._getShortLE(idx(index));
1662         }
1663 
1664         @Override
1665         protected int _getUnsignedMedium(int index) {
1666             return rootParent()._getUnsignedMedium(idx(index));
1667         }
1668 
1669         @Override
1670         protected int _getUnsignedMediumLE(int index) {
1671             return rootParent()._getUnsignedMediumLE(idx(index));
1672         }
1673 
1674         @Override
1675         protected int _getInt(int index) {
1676             return rootParent()._getInt(idx(index));
1677         }
1678 
1679         @Override
1680         protected int _getIntLE(int index) {
1681             return rootParent()._getIntLE(idx(index));
1682         }
1683 
1684         @Override
1685         protected long _getLong(int index) {
1686             return rootParent()._getLong(idx(index));
1687         }
1688 
1689         @Override
1690         protected long _getLongLE(int index) {
1691             return rootParent()._getLongLE(idx(index));
1692         }
1693 
1694         @Override
1695         public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) {
1696             checkIndex(index, length);
1697             rootParent().getBytes(idx(index), dst, dstIndex, length);
1698             return this;
1699         }
1700 
1701         @Override
1702         public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) {
1703             checkIndex(index, length);
1704             rootParent().getBytes(idx(index), dst, dstIndex, length);
1705             return this;
1706         }
1707 
1708         @Override
1709         public ByteBuf getBytes(int index, ByteBuffer dst) {
1710             checkIndex(index, dst.remaining());
1711             rootParent().getBytes(idx(index), dst);
1712             return this;
1713         }
1714 
1715         @Override
1716         protected void _setByte(int index, int value) {
1717             rootParent()._setByte(idx(index), value);
1718         }
1719 
1720         @Override
1721         protected void _setShort(int index, int value) {
1722             rootParent()._setShort(idx(index), value);
1723         }
1724 
1725         @Override
1726         protected void _setShortLE(int index, int value) {
1727             rootParent()._setShortLE(idx(index), value);
1728         }
1729 
1730         @Override
1731         protected void _setMedium(int index, int value) {
1732             rootParent()._setMedium(idx(index), value);
1733         }
1734 
1735         @Override
1736         protected void _setMediumLE(int index, int value) {
1737             rootParent()._setMediumLE(idx(index), value);
1738         }
1739 
1740         @Override
1741         protected void _setInt(int index, int value) {
1742             rootParent()._setInt(idx(index), value);
1743         }
1744 
1745         @Override
1746         protected void _setIntLE(int index, int value) {
1747             rootParent()._setIntLE(idx(index), value);
1748         }
1749 
1750         @Override
1751         protected void _setLong(int index, long value) {
1752             rootParent()._setLong(idx(index), value);
1753         }
1754 
1755         @Override
1756         protected void _setLongLE(int index, long value) {
1757             rootParent().setLongLE(idx(index), value);
1758         }
1759 
1760         @Override
1761         public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) {
1762             checkIndex(index, length);
1763             ByteBuffer tmp = (ByteBuffer) internalNioBuffer().clear().position(index);
1764             tmp.put(src, srcIndex, length);
1765             return this;
1766         }
1767 
1768         @Override
1769         public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) {
1770             checkIndex(index, length);
1771             ByteBuffer tmp = (ByteBuffer) internalNioBuffer().clear().position(index);
1772             tmp.put(src.nioBuffer(srcIndex, length));
1773             return this;
1774         }
1775 
1776         @Override
1777         public ByteBuf setBytes(int index, ByteBuffer src) {
1778             checkIndex(index, src.remaining());
1779             ByteBuffer tmp = (ByteBuffer) internalNioBuffer().clear().position(index);
1780             tmp.put(src);
1781             return this;
1782         }
1783 
1784         @Override
1785         public ByteBuf getBytes(int index, OutputStream out, int length)
1786                 throws IOException {
1787             checkIndex(index, length);
1788             if (length != 0) {
1789                 ByteBufUtil.readBytes(alloc(), internalNioBuffer().duplicate(), index, length, out);
1790             }
1791             return this;
1792         }
1793 
1794         @Override
1795         public int getBytes(int index, GatheringByteChannel out, int length)
1796                 throws IOException {
1797             ByteBuffer buf = internalNioBuffer().duplicate();
1798             buf.clear().position(index).limit(index + length);
1799             return out.write(buf);
1800         }
1801 
1802         @Override
1803         public int getBytes(int index, FileChannel out, long position, int length)
1804                 throws IOException {
1805             ByteBuffer buf = internalNioBuffer().duplicate();
1806             buf.clear().position(index).limit(index + length);
1807             return out.write(buf, position);
1808         }
1809 
1810         @Override
1811         public int setBytes(int index, InputStream in, int length)
1812                 throws IOException {
1813             checkIndex(index, length);
1814             final AbstractByteBuf rootParent = rootParent();
1815             if (rootParent.hasArray()) {
1816                 return rootParent.setBytes(idx(index), in, length);
1817             }
1818             byte[] tmp = ByteBufUtil.threadLocalTempArray(length);
1819             int readBytes = in.read(tmp, 0, length);
1820             if (readBytes <= 0) {
1821                 return readBytes;
1822             }
1823             setBytes(index, tmp, 0, readBytes);
1824             return readBytes;
1825         }
1826 
1827         @Override
1828         public int setBytes(int index, ScatteringByteChannel in, int length)
1829                 throws IOException {
1830             try {
1831                 return in.read(internalNioBuffer(index, length).duplicate());
1832             } catch (ClosedChannelException ignored) {
1833                 return -1;
1834             }
1835         }
1836 
1837         @Override
1838         public int setBytes(int index, FileChannel in, long position, int length)
1839                 throws IOException {
1840             try {
1841                 return in.read(internalNioBuffer(index, length).duplicate(), position);
1842             } catch (ClosedChannelException ignored) {
1843                 return -1;
1844             }
1845         }
1846 
1847         @Override
1848         public int setCharSequence(int index, CharSequence sequence, Charset charset) {
1849             return setCharSequence0(index, sequence, charset, false);
1850         }
1851 
1852         private int setCharSequence0(int index, CharSequence sequence, Charset charset, boolean expand) {
1853             if (charset.equals(CharsetUtil.UTF_8)) {
1854                 int length = ByteBufUtil.utf8MaxBytes(sequence);
1855                 if (expand) {
1856                     ensureWritable0(length);
1857                     checkIndex0(index, length);
1858                 } else {
1859                     checkIndex(index, length);
1860                 }
1861                 // Directly pass in the rootParent() with the adjusted index
1862                 return ByteBufUtil.writeUtf8(rootParent(), idx(index), length, sequence, sequence.length());
1863             }
1864             if (charset.equals(CharsetUtil.US_ASCII) || charset.equals(CharsetUtil.ISO_8859_1)) {
1865                 int length = sequence.length();
1866                 if (expand) {
1867                     ensureWritable0(length);
1868                     checkIndex0(index, length);
1869                 } else {
1870                     checkIndex(index, length);
1871                 }
1872                 // Directly pass in the rootParent() with the adjusted index
1873                 return ByteBufUtil.writeAscii(rootParent(), idx(index), sequence, length);
1874             }
1875             byte[] bytes = sequence.toString().getBytes(charset);
1876             if (expand) {
1877                 ensureWritable0(bytes.length);
1878                 // setBytes(...) will take care of checking the indices.
1879             }
1880             setBytes(index, bytes);
1881             return bytes.length;
1882         }
1883 
1884         @Override
1885         public int writeCharSequence(CharSequence sequence, Charset charset) {
1886             int written = setCharSequence0(writerIndex, sequence, charset, true);
1887             writerIndex += written;
1888             return written;
1889         }
1890 
1891         @Override
1892         public int forEachByte(int index, int length, ByteProcessor processor) {
1893             checkIndex(index, length);
1894             int ret = rootParent().forEachByte(idx(index), length, processor);
1895             return forEachResult(ret);
1896         }
1897 
1898         @Override
1899         public int forEachByteDesc(int index, int length, ByteProcessor processor) {
1900             checkIndex(index, length);
1901             int ret = rootParent().forEachByteDesc(idx(index), length, processor);
1902             return forEachResult(ret);
1903         }
1904 
1905         @Override
1906         public ByteBuf setZero(int index, int length) {
1907             checkIndex(index, length);
1908             rootParent().setZero(idx(index), length);
1909             return this;
1910         }
1911 
1912         @Override
1913         public ByteBuf writeZero(int length) {
1914             ensureWritable(length);
1915             rootParent().setZero(idx(writerIndex), length);
1916             writerIndex += length;
1917             return this;
1918         }
1919 
1920         private int forEachResult(int ret) {
1921             if (ret < startIndex) {
1922                 return -1;
1923             }
1924             return ret - startIndex;
1925         }
1926 
1927         @Override
1928         public boolean isContiguous() {
1929             return rootParent().isContiguous();
1930         }
1931 
1932         private int idx(int index) {
1933             return index + startIndex;
1934         }
1935 
1936         @Override
1937         protected void deallocate() {
1938             if (PlatformDependent.isJfrEnabled() && FreeBufferEvent.isEventEnabled()) {
1939                 FreeBufferEvent event = new FreeBufferEvent();
1940                 if (event.shouldCommit()) {
1941                     event.fill(this, AdaptiveByteBufAllocator.class);
1942                     event.commit();
1943                 }
1944             }
1945 
1946             if (chunk != null) {
1947                 chunk.releaseSegment(startIndex);
1948             }
1949             tmpNioBuf = null;
1950             chunk = null;
1951             rootParent = null;
1952             if (handle instanceof EnhancedHandle) {
1953                 EnhancedHandle<AdaptiveByteBuf>  enhancedHandle = (EnhancedHandle<AdaptiveByteBuf>) handle;
1954                 enhancedHandle.unguardedRecycle(this);
1955             } else {
1956                 handle.recycle(this);
1957             }
1958         }
1959     }
1960 
1961     /**
1962      * The strategy for how {@link AdaptivePoolingAllocator} should allocate chunk buffers.
1963      */
1964     interface ChunkAllocator {
1965         /**
1966          * Allocate a buffer for a chunk. This can be any kind of {@link AbstractByteBuf} implementation.
1967          * @param initialCapacity The initial capacity of the returned {@link AbstractByteBuf}.
1968          * @param maxCapacity The maximum capacity of the returned {@link AbstractByteBuf}.
1969          * @return The buffer that represents the chunk memory.
1970          */
1971         AbstractByteBuf allocate(int initialCapacity, int maxCapacity);
1972     }
1973 }