View Javadoc
1   /*
2    * Copyright 2012 The Netty Project
3    *
4    * The Netty Project licenses this file to you under the Apache License,
5    * version 2.0 (the "License"); you may not use this file except in compliance
6    * with the License. You may obtain a copy of the License at:
7    *
8    *   https://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13   * License for the specific language governing permissions and limitations
14   * under the License.
15   */
16  
17  package io.netty.buffer;
18  
19  import io.netty.util.internal.LongCounter;
20  import io.netty.util.internal.PlatformDependent;
21  import io.netty.util.internal.StringUtil;
22  
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Collections;
26  import java.util.List;
27  import java.util.concurrent.atomic.AtomicInteger;
28  import java.util.concurrent.atomic.AtomicReference;
29  import java.util.concurrent.locks.ReentrantLock;
30  
31  import static io.netty.buffer.PoolChunk.isSubpage;
32  import static java.lang.Math.max;
33  
34  abstract class PoolArena<T> implements PoolArenaMetric {
35      private static final boolean HAS_UNSAFE = PlatformDependent.hasUnsafe();
36  
37      enum SizeClass {
38          Small,
39          Normal
40      }
41  
42      final PooledByteBufAllocator parent;
43  
44      final PoolSubpage<T>[] smallSubpagePools;
45  
46      private final PoolChunkList<T> q050;
47      private final PoolChunkList<T> q025;
48      private final PoolChunkList<T> q000;
49      private final PoolChunkList<T> qInit;
50      private final PoolChunkList<T> q075;
51      private final PoolChunkList<T> q100;
52  
53      private final List<PoolChunkListMetric> chunkListMetrics;
54  
55      // Metrics for allocations and deallocations
56      private long allocationsNormal;
57      // We need to use the LongCounter here as this is not guarded via synchronized block.
58      private final LongCounter allocationsSmall = PlatformDependent.newLongCounter();
59      private final LongCounter allocationsHuge = PlatformDependent.newLongCounter();
60      private final LongCounter activeBytesHuge = PlatformDependent.newLongCounter();
61  
62      private long deallocationsSmall;
63      private long deallocationsNormal;
64  
65      // We need to use the LongCounter here as this is not guarded via synchronized block.
66      private final LongCounter deallocationsHuge = PlatformDependent.newLongCounter();
67  
68      // Number of thread caches backed by this arena.
69      final AtomicInteger numThreadCaches = new AtomicInteger();
70  
71      // TODO: Test if adding padding helps under contention
72      //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
73  
74      private final ReentrantLock lock = new ReentrantLock();
75  
76      final SizeClasses sizeClass;
77  
78      protected PoolArena(PooledByteBufAllocator parent, SizeClasses sizeClass) {
79          assert null != sizeClass;
80          this.parent = parent;
81          this.sizeClass = sizeClass;
82          smallSubpagePools = newSubpagePoolArray(sizeClass.nSubpages);
83          for (int i = 0; i < smallSubpagePools.length; i ++) {
84              smallSubpagePools[i] = newSubpagePoolHead(i);
85          }
86  
87          q100 = new PoolChunkList<T>(this, null, 100, Integer.MAX_VALUE, sizeClass.chunkSize);
88          q075 = new PoolChunkList<T>(this, q100, 75, 100, sizeClass.chunkSize);
89          q050 = new PoolChunkList<T>(this, q100, 50, 100, sizeClass.chunkSize);
90          q025 = new PoolChunkList<T>(this, q050, 25, 75, sizeClass.chunkSize);
91          q000 = new PoolChunkList<T>(this, q025, 1, 50, sizeClass.chunkSize);
92          qInit = new PoolChunkList<T>(this, q000, Integer.MIN_VALUE, 25, sizeClass.chunkSize);
93  
94          q100.prevList(q075);
95          q075.prevList(q050);
96          q050.prevList(q025);
97          q025.prevList(q000);
98          q000.prevList(null);
99          qInit.prevList(qInit);
100 
101         List<PoolChunkListMetric> metrics = new ArrayList<PoolChunkListMetric>(6);
102         metrics.add(qInit);
103         metrics.add(q000);
104         metrics.add(q025);
105         metrics.add(q050);
106         metrics.add(q075);
107         metrics.add(q100);
108         chunkListMetrics = Collections.unmodifiableList(metrics);
109     }
110 
111     private PoolSubpage<T> newSubpagePoolHead(int index) {
112         PoolSubpage<T> head = new PoolSubpage<T>(index);
113         head.prev = head;
114         head.next = head;
115         return head;
116     }
117 
118     @SuppressWarnings("unchecked")
119     private PoolSubpage<T>[] newSubpagePoolArray(int size) {
120         return new PoolSubpage[size];
121     }
122 
123     abstract boolean isDirect();
124 
125     PooledByteBuf<T> allocate(PoolThreadCache cache, int reqCapacity, int maxCapacity) {
126         PooledByteBuf<T> buf = newByteBuf(maxCapacity);
127         allocate(cache, buf, reqCapacity);
128         return buf;
129     }
130 
131     private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) {
132         final int sizeIdx = sizeClass.size2SizeIdx(reqCapacity);
133 
134         if (sizeIdx <= sizeClass.smallMaxSizeIdx) {
135             tcacheAllocateSmall(cache, buf, reqCapacity, sizeIdx);
136         } else if (sizeIdx < sizeClass.nSizes) {
137             tcacheAllocateNormal(cache, buf, reqCapacity, sizeIdx);
138         } else {
139             int normCapacity = sizeClass.directMemoryCacheAlignment > 0
140                     ? sizeClass.normalizeSize(reqCapacity) : reqCapacity;
141             // Huge allocations are never served via the cache so just call allocateHuge
142             allocateHuge(buf, normCapacity);
143         }
144     }
145 
146     private void tcacheAllocateSmall(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity,
147                                      final int sizeIdx) {
148 
149         if (cache.allocateSmall(this, buf, reqCapacity, sizeIdx)) {
150             // was able to allocate out of the cache so move on
151             return;
152         }
153 
154         /*
155          * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
156          * {@link PoolChunk#free(long)} may modify the doubly linked list as well.
157          */
158         final PoolSubpage<T> head = smallSubpagePools[sizeIdx];
159         final boolean needsNormalAllocation;
160         head.lock();
161         try {
162             final PoolSubpage<T> s = head.next;
163             needsNormalAllocation = s == head;
164             if (!needsNormalAllocation) {
165                 assert s.doNotDestroy && s.elemSize == sizeClass.sizeIdx2size(sizeIdx) : "doNotDestroy=" +
166                         s.doNotDestroy + ", elemSize=" + s.elemSize + ", sizeIdx=" + sizeIdx;
167                 long handle = s.allocate();
168                 assert handle >= 0;
169                 s.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache);
170             }
171         } finally {
172             head.unlock();
173         }
174 
175         if (needsNormalAllocation) {
176             lock();
177             try {
178                 allocateNormal(buf, reqCapacity, sizeIdx, cache);
179             } finally {
180                 unlock();
181             }
182         }
183 
184         incSmallAllocation();
185     }
186 
187     private void tcacheAllocateNormal(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity,
188                                       final int sizeIdx) {
189         if (cache.allocateNormal(this, buf, reqCapacity, sizeIdx)) {
190             // was able to allocate out of the cache so move on
191             return;
192         }
193         lock();
194         try {
195             allocateNormal(buf, reqCapacity, sizeIdx, cache);
196             ++allocationsNormal;
197         } finally {
198             unlock();
199         }
200     }
201 
202     private void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache) {
203         assert lock.isHeldByCurrentThread();
204         if (q050.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
205             q025.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
206             q000.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
207             qInit.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
208             q075.allocate(buf, reqCapacity, sizeIdx, threadCache)) {
209             return;
210         }
211 
212         // Add a new chunk.
213         PoolChunk<T> c = newChunk(sizeClass.pageSize, sizeClass.nPSizes, sizeClass.pageShifts, sizeClass.chunkSize);
214         boolean success = c.allocate(buf, reqCapacity, sizeIdx, threadCache);
215         assert success;
216         qInit.add(c);
217     }
218 
219     private void incSmallAllocation() {
220         allocationsSmall.increment();
221     }
222 
223     private void allocateHuge(PooledByteBuf<T> buf, int reqCapacity) {
224         PoolChunk<T> chunk = newUnpooledChunk(reqCapacity);
225         activeBytesHuge.add(chunk.chunkSize());
226         buf.initUnpooled(chunk, reqCapacity);
227         allocationsHuge.increment();
228     }
229 
230     void free(PoolChunk<T> chunk, ByteBuffer nioBuffer, long handle, int normCapacity, PoolThreadCache cache) {
231         chunk.decrementPinnedMemory(normCapacity);
232         if (chunk.unpooled) {
233             int size = chunk.chunkSize();
234             destroyChunk(chunk);
235             activeBytesHuge.add(-size);
236             deallocationsHuge.increment();
237         } else {
238             SizeClass sizeClass = sizeClass(handle);
239             if (cache != null && cache.add(this, chunk, nioBuffer, handle, normCapacity, sizeClass)) {
240                 // cached so not free it.
241                 return;
242             }
243 
244             freeChunk(chunk, handle, normCapacity, sizeClass, nioBuffer, false);
245         }
246     }
247 
248     private static SizeClass sizeClass(long handle) {
249         return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal;
250     }
251 
252     void freeChunk(PoolChunk<T> chunk, long handle, int normCapacity, SizeClass sizeClass, ByteBuffer nioBuffer,
253                    boolean finalizer) {
254         final boolean destroyChunk;
255         lock();
256         try {
257             // We only call this if freeChunk is not called because of the PoolThreadCache finalizer as otherwise this
258             // may fail due lazy class-loading in for example tomcat.
259             if (!finalizer) {
260                 switch (sizeClass) {
261                     case Normal:
262                         ++deallocationsNormal;
263                         break;
264                     case Small:
265                         ++deallocationsSmall;
266                         break;
267                     default:
268                         throw new Error();
269                 }
270             }
271             destroyChunk = !chunk.parent.free(chunk, handle, normCapacity, nioBuffer);
272         } finally {
273             unlock();
274         }
275         if (destroyChunk) {
276             // destroyChunk not need to be called while holding the synchronized lock.
277             destroyChunk(chunk);
278         }
279     }
280 
281     void reallocate(PooledByteBuf<T> buf, int newCapacity) {
282         assert newCapacity >= 0 && newCapacity <= buf.maxCapacity();
283 
284         final int oldCapacity;
285         final PoolChunk<T> oldChunk;
286         final ByteBuffer oldNioBuffer;
287         final long oldHandle;
288         final T oldMemory;
289         final int oldOffset;
290         final int oldMaxLength;
291         final PoolThreadCache oldCache;
292 
293         // We synchronize on the ByteBuf itself to ensure there is no "concurrent" reallocations for the same buffer.
294         // We do this to ensure the ByteBuf internal fields that are used to allocate / free are not accessed
295         // concurrently. This is important as otherwise we might end up corrupting our internal state of our data
296         // structures.
297         //
298         // Also note we don't use a Lock here but just synchronized even tho this might seem like a bad choice for Loom.
299         // This is done to minimize the overhead per ByteBuf. The time this would block another thread should be
300         // relative small and so not be a problem for Loom.
301         // See https://github.com/netty/netty/issues/13467
302         synchronized (buf) {
303             oldCapacity = buf.length;
304             if (oldCapacity == newCapacity) {
305                 return;
306             }
307 
308             oldChunk = buf.chunk;
309             oldNioBuffer = buf.tmpNioBuf;
310             oldHandle = buf.handle;
311             oldMemory = buf.memory;
312             oldOffset = buf.offset;
313             oldMaxLength = buf.maxLength;
314             oldCache = buf.cache;
315 
316             // This does not touch buf's reader/writer indices
317             allocate(parent.threadCache(), buf, newCapacity);
318         }
319         int bytesToCopy;
320         if (newCapacity > oldCapacity) {
321             bytesToCopy = oldCapacity;
322         } else {
323             buf.trimIndicesToCapacity(newCapacity);
324             bytesToCopy = newCapacity;
325         }
326         memoryCopy(oldMemory, oldOffset, buf, bytesToCopy);
327         free(oldChunk, oldNioBuffer, oldHandle, oldMaxLength, oldCache);
328     }
329 
330     @Override
331     public int numThreadCaches() {
332         return numThreadCaches.get();
333     }
334 
335     @Override
336     public int numTinySubpages() {
337         return 0;
338     }
339 
340     @Override
341     public int numSmallSubpages() {
342         return smallSubpagePools.length;
343     }
344 
345     @Override
346     public int numChunkLists() {
347         return chunkListMetrics.size();
348     }
349 
350     @Override
351     public List<PoolSubpageMetric> tinySubpages() {
352         return Collections.emptyList();
353     }
354 
355     @Override
356     public List<PoolSubpageMetric> smallSubpages() {
357         return subPageMetricList(smallSubpagePools);
358     }
359 
360     @Override
361     public List<PoolChunkListMetric> chunkLists() {
362         return chunkListMetrics;
363     }
364 
365     private static List<PoolSubpageMetric> subPageMetricList(PoolSubpage<?>[] pages) {
366         List<PoolSubpageMetric> metrics = new ArrayList<PoolSubpageMetric>();
367         for (PoolSubpage<?> head : pages) {
368             if (head.next == head) {
369                 continue;
370             }
371             PoolSubpage<?> s = head.next;
372             for (;;) {
373                 metrics.add(s);
374                 s = s.next;
375                 if (s == head) {
376                     break;
377                 }
378             }
379         }
380         return metrics;
381     }
382 
383     @Override
384     public long numAllocations() {
385         final long allocsNormal;
386         lock();
387         try {
388             allocsNormal = allocationsNormal;
389         } finally {
390             unlock();
391         }
392         return allocationsSmall.value() + allocsNormal + allocationsHuge.value();
393     }
394 
395     @Override
396     public long numTinyAllocations() {
397         return 0;
398     }
399 
400     @Override
401     public long numSmallAllocations() {
402         return allocationsSmall.value();
403     }
404 
405     @Override
406     public long numNormalAllocations() {
407         lock();
408         try {
409             return allocationsNormal;
410         } finally {
411             unlock();
412         }
413     }
414 
415     @Override
416     public long numDeallocations() {
417         final long deallocs;
418         lock();
419         try {
420             deallocs = deallocationsSmall + deallocationsNormal;
421         } finally {
422             unlock();
423         }
424         return deallocs + deallocationsHuge.value();
425     }
426 
427     @Override
428     public long numTinyDeallocations() {
429         return 0;
430     }
431 
432     @Override
433     public long numSmallDeallocations() {
434         lock();
435         try {
436             return deallocationsSmall;
437         } finally {
438             unlock();
439         }
440     }
441 
442     @Override
443     public long numNormalDeallocations() {
444         lock();
445         try {
446             return deallocationsNormal;
447         } finally {
448             unlock();
449         }
450     }
451 
452     @Override
453     public long numHugeAllocations() {
454         return allocationsHuge.value();
455     }
456 
457     @Override
458     public long numHugeDeallocations() {
459         return deallocationsHuge.value();
460     }
461 
462     @Override
463     public  long numActiveAllocations() {
464         long val = allocationsSmall.value() + allocationsHuge.value()
465                 - deallocationsHuge.value();
466         lock();
467         try {
468             val += allocationsNormal - (deallocationsSmall + deallocationsNormal);
469         } finally {
470             unlock();
471         }
472         return max(val, 0);
473     }
474 
475     @Override
476     public long numActiveTinyAllocations() {
477         return 0;
478     }
479 
480     @Override
481     public long numActiveSmallAllocations() {
482         return max(numSmallAllocations() - numSmallDeallocations(), 0);
483     }
484 
485     @Override
486     public long numActiveNormalAllocations() {
487         final long val;
488         lock();
489         try {
490             val = allocationsNormal - deallocationsNormal;
491         } finally {
492             unlock();
493         }
494         return max(val, 0);
495     }
496 
497     @Override
498     public long numActiveHugeAllocations() {
499         return max(numHugeAllocations() - numHugeDeallocations(), 0);
500     }
501 
502     @Override
503     public long numActiveBytes() {
504         long val = activeBytesHuge.value();
505         lock();
506         try {
507             for (int i = 0; i < chunkListMetrics.size(); i++) {
508                 for (PoolChunkMetric m: chunkListMetrics.get(i)) {
509                     val += m.chunkSize();
510                 }
511             }
512         } finally {
513             unlock();
514         }
515         return max(0, val);
516     }
517 
518     /**
519      * Return an estimate of the number of bytes that are currently pinned to buffer instances, by the arena. The
520      * pinned memory is not accessible for use by any other allocation, until the buffers using have all been released.
521      */
522     public long numPinnedBytes() {
523         long val = activeBytesHuge.value(); // Huge chunks are exact-sized for the buffers they were allocated to.
524         for (int i = 0; i < chunkListMetrics.size(); i++) {
525             for (PoolChunkMetric m: chunkListMetrics.get(i)) {
526                 val += ((PoolChunk<?>) m).pinnedBytes();
527             }
528         }
529         return max(0, val);
530     }
531 
532     protected abstract PoolChunk<T> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize);
533     protected abstract PoolChunk<T> newUnpooledChunk(int capacity);
534     protected abstract PooledByteBuf<T> newByteBuf(int maxCapacity);
535     protected abstract void memoryCopy(T src, int srcOffset, PooledByteBuf<T> dst, int length);
536     protected abstract void destroyChunk(PoolChunk<T> chunk);
537 
538     @Override
539     public String toString() {
540         lock();
541         try {
542             StringBuilder buf = new StringBuilder()
543                     .append("Chunk(s) at 0~25%:")
544                     .append(StringUtil.NEWLINE)
545                     .append(qInit)
546                     .append(StringUtil.NEWLINE)
547                     .append("Chunk(s) at 0~50%:")
548                     .append(StringUtil.NEWLINE)
549                     .append(q000)
550                     .append(StringUtil.NEWLINE)
551                     .append("Chunk(s) at 25~75%:")
552                     .append(StringUtil.NEWLINE)
553                     .append(q025)
554                     .append(StringUtil.NEWLINE)
555                     .append("Chunk(s) at 50~100%:")
556                     .append(StringUtil.NEWLINE)
557                     .append(q050)
558                     .append(StringUtil.NEWLINE)
559                     .append("Chunk(s) at 75~100%:")
560                     .append(StringUtil.NEWLINE)
561                     .append(q075)
562                     .append(StringUtil.NEWLINE)
563                     .append("Chunk(s) at 100%:")
564                     .append(StringUtil.NEWLINE)
565                     .append(q100)
566                     .append(StringUtil.NEWLINE)
567                     .append("small subpages:");
568             appendPoolSubPages(buf, smallSubpagePools);
569             buf.append(StringUtil.NEWLINE);
570             return buf.toString();
571         } finally {
572             unlock();
573         }
574     }
575 
576     private static void appendPoolSubPages(StringBuilder buf, PoolSubpage<?>[] subpages) {
577         for (int i = 0; i < subpages.length; i ++) {
578             PoolSubpage<?> head = subpages[i];
579             if (head.next == head || head.next == null) {
580                 continue;
581             }
582 
583             buf.append(StringUtil.NEWLINE)
584                     .append(i)
585                     .append(": ");
586             PoolSubpage<?> s = head.next;
587             while (s != null) {
588                 buf.append(s);
589                 s = s.next;
590                 if (s == head) {
591                     break;
592                 }
593             }
594         }
595     }
596 
597     @Override
598     protected final void finalize() throws Throwable {
599         try {
600             super.finalize();
601         } finally {
602             destroyPoolSubPages(smallSubpagePools);
603             destroyPoolChunkLists(qInit, q000, q025, q050, q075, q100);
604         }
605     }
606 
607     private static void destroyPoolSubPages(PoolSubpage<?>[] pages) {
608         for (PoolSubpage<?> page : pages) {
609             page.destroy();
610         }
611     }
612 
613     private void destroyPoolChunkLists(PoolChunkList<T>... chunkLists) {
614         for (PoolChunkList<T> chunkList: chunkLists) {
615             chunkList.destroy(this);
616         }
617     }
618 
619     static final class HeapArena extends PoolArena<byte[]> {
620         private final AtomicReference<PoolChunk<byte[]>> lastDestroyedChunk;
621 
622         HeapArena(PooledByteBufAllocator parent, SizeClasses sizeClass) {
623             super(parent, sizeClass);
624             lastDestroyedChunk = new AtomicReference<PoolChunk<byte[]>>();
625         }
626 
627         private static byte[] newByteArray(int size) {
628             return PlatformDependent.allocateUninitializedArray(size);
629         }
630 
631         @Override
632         boolean isDirect() {
633             return false;
634         }
635 
636         @Override
637         protected PoolChunk<byte[]> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
638             PoolChunk<byte[]> chunk = lastDestroyedChunk.getAndSet(null);
639             if (chunk != null) {
640                 assert chunk.chunkSize == chunkSize &&
641                         chunk.pageSize == pageSize &&
642                         chunk.maxPageIdx == maxPageIdx &&
643                         chunk.pageShifts == pageShifts;
644                 return chunk; // The parameters are always the same, so it's fine to reuse a previously allocated chunk.
645             }
646             return new PoolChunk<byte[]>(
647                     this, null, newByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx);
648         }
649 
650         @Override
651         protected PoolChunk<byte[]> newUnpooledChunk(int capacity) {
652             return new PoolChunk<byte[]>(this, null, newByteArray(capacity), capacity);
653         }
654 
655         @Override
656         protected void destroyChunk(PoolChunk<byte[]> chunk) {
657             // Rely on GC. But keep one chunk for reuse.
658             if (!chunk.unpooled && lastDestroyedChunk.get() == null) {
659                 lastDestroyedChunk.set(chunk); // The check-and-set does not need to be atomic.
660             }
661         }
662 
663         @Override
664         protected PooledByteBuf<byte[]> newByteBuf(int maxCapacity) {
665             return HAS_UNSAFE ? PooledUnsafeHeapByteBuf.newUnsafeInstance(maxCapacity)
666                     : PooledHeapByteBuf.newInstance(maxCapacity);
667         }
668 
669         @Override
670         protected void memoryCopy(byte[] src, int srcOffset, PooledByteBuf<byte[]> dst, int length) {
671             if (length == 0) {
672                 return;
673             }
674 
675             System.arraycopy(src, srcOffset, dst.memory, dst.offset, length);
676         }
677     }
678 
679     static final class DirectArena extends PoolArena<ByteBuffer> {
680 
681         DirectArena(PooledByteBufAllocator parent, SizeClasses sizeClass) {
682             super(parent, sizeClass);
683         }
684 
685         @Override
686         boolean isDirect() {
687             return true;
688         }
689 
690         @Override
691         protected PoolChunk<ByteBuffer> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
692             if (sizeClass.directMemoryCacheAlignment == 0) {
693                 ByteBuffer memory = allocateDirect(chunkSize);
694                 return new PoolChunk<ByteBuffer>(this, memory, memory, pageSize, pageShifts,
695                         chunkSize, maxPageIdx);
696             }
697 
698             final ByteBuffer base = allocateDirect(chunkSize + sizeClass.directMemoryCacheAlignment);
699             final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, sizeClass.directMemoryCacheAlignment);
700             return new PoolChunk<ByteBuffer>(this, base, memory, pageSize,
701                     pageShifts, chunkSize, maxPageIdx);
702         }
703 
704         @Override
705         protected PoolChunk<ByteBuffer> newUnpooledChunk(int capacity) {
706             if (sizeClass.directMemoryCacheAlignment == 0) {
707                 ByteBuffer memory = allocateDirect(capacity);
708                 return new PoolChunk<ByteBuffer>(this, memory, memory, capacity);
709             }
710 
711             final ByteBuffer base = allocateDirect(capacity + sizeClass.directMemoryCacheAlignment);
712             final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, sizeClass.directMemoryCacheAlignment);
713             return new PoolChunk<ByteBuffer>(this, base, memory, capacity);
714         }
715 
716         private static ByteBuffer allocateDirect(int capacity) {
717             return PlatformDependent.useDirectBufferNoCleaner() ?
718                     PlatformDependent.allocateDirectNoCleaner(capacity) : ByteBuffer.allocateDirect(capacity);
719         }
720 
721         @Override
722         protected void destroyChunk(PoolChunk<ByteBuffer> chunk) {
723             if (PlatformDependent.useDirectBufferNoCleaner()) {
724                 PlatformDependent.freeDirectNoCleaner((ByteBuffer) chunk.base);
725             } else {
726                 PlatformDependent.freeDirectBuffer((ByteBuffer) chunk.base);
727             }
728         }
729 
730         @Override
731         protected PooledByteBuf<ByteBuffer> newByteBuf(int maxCapacity) {
732             if (HAS_UNSAFE) {
733                 return PooledUnsafeDirectByteBuf.newInstance(maxCapacity);
734             } else {
735                 return PooledDirectByteBuf.newInstance(maxCapacity);
736             }
737         }
738 
739         @Override
740         protected void memoryCopy(ByteBuffer src, int srcOffset, PooledByteBuf<ByteBuffer> dstBuf, int length) {
741             if (length == 0) {
742                 return;
743             }
744 
745             if (HAS_UNSAFE) {
746                 PlatformDependent.copyMemory(
747                         PlatformDependent.directBufferAddress(src) + srcOffset,
748                         PlatformDependent.directBufferAddress(dstBuf.memory) + dstBuf.offset, length);
749             } else {
750                 // We must duplicate the NIO buffers because they may be accessed by other Netty buffers.
751                 src = src.duplicate();
752                 ByteBuffer dst = dstBuf.internalNioBuffer();
753                 src.position(srcOffset).limit(srcOffset + length);
754                 dst.position(dstBuf.offset);
755                 dst.put(src);
756             }
757         }
758     }
759 
760     void lock() {
761         lock.lock();
762     }
763 
764     void unlock() {
765         lock.unlock();
766     }
767 
768     @Override
769     public int sizeIdx2size(int sizeIdx) {
770         return sizeClass.sizeIdx2size(sizeIdx);
771     }
772 
773     @Override
774     public int sizeIdx2sizeCompute(int sizeIdx) {
775         return sizeClass.sizeIdx2sizeCompute(sizeIdx);
776     }
777 
778     @Override
779     public long pageIdx2size(int pageIdx) {
780         return sizeClass.pageIdx2size(pageIdx);
781     }
782 
783     @Override
784     public long pageIdx2sizeCompute(int pageIdx) {
785         return sizeClass.pageIdx2sizeCompute(pageIdx);
786     }
787 
788     @Override
789     public int size2SizeIdx(int size) {
790         return sizeClass.size2SizeIdx(size);
791     }
792 
793     @Override
794     public int pages2pageIdx(int pages) {
795         return sizeClass.pages2pageIdx(pages);
796     }
797 
798     @Override
799     public int pages2pageIdxFloor(int pages) {
800         return sizeClass.pages2pageIdxFloor(pages);
801     }
802 
803     @Override
804     public int normalizeSize(int size) {
805         return sizeClass.normalizeSize(size);
806     }
807 }