View Javadoc
1   /*
2    * Copyright 2012 The Netty Project
3    *
4    * The Netty Project licenses this file to you under the Apache License,
5    * version 2.0 (the "License"); you may not use this file except in compliance
6    * with the License. You may obtain a copy of the License at:
7    *
8    *   https://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13   * License for the specific language governing permissions and limitations
14   * under the License.
15   */
16  
17  package io.netty.buffer;
18  
19  import io.netty.util.internal.LongCounter;
20  import io.netty.util.internal.PlatformDependent;
21  import io.netty.util.internal.StringUtil;
22  
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Collections;
26  import java.util.List;
27  import java.util.concurrent.atomic.AtomicInteger;
28  import java.util.concurrent.atomic.AtomicReference;
29  import java.util.concurrent.locks.ReentrantLock;
30  
31  import static io.netty.buffer.PoolChunk.isSubpage;
32  import static java.lang.Math.max;
33  
34  abstract class PoolArena<T> implements PoolArenaMetric {
35      private static final boolean HAS_UNSAFE = PlatformDependent.hasUnsafe();
36  
37      enum SizeClass {
38          Small,
39          Normal
40      }
41  
42      final PooledByteBufAllocator parent;
43  
44      final PoolSubpage<T>[] smallSubpagePools;
45  
46      private final PoolChunkList<T> q050;
47      private final PoolChunkList<T> q025;
48      private final PoolChunkList<T> q000;
49      private final PoolChunkList<T> qInit;
50      private final PoolChunkList<T> q075;
51      private final PoolChunkList<T> q100;
52  
53      private final List<PoolChunkListMetric> chunkListMetrics;
54  
55      // Metrics for allocations and deallocations
56      private long allocationsNormal;
57      // We need to use the LongCounter here as this is not guarded via synchronized block.
58      private final LongCounter allocationsSmall = PlatformDependent.newLongCounter();
59      private final LongCounter allocationsHuge = PlatformDependent.newLongCounter();
60      private final LongCounter activeBytesHuge = PlatformDependent.newLongCounter();
61  
62      private long deallocationsSmall;
63      private long deallocationsNormal;
64  
65      private long pooledChunkAllocations;
66      private long pooledChunkDeallocations;
67  
68      // We need to use the LongCounter here as this is not guarded via synchronized block.
69      private final LongCounter deallocationsHuge = PlatformDependent.newLongCounter();
70  
71      // Number of thread caches backed by this arena.
72      final AtomicInteger numThreadCaches = new AtomicInteger();
73  
74      // TODO: Test if adding padding helps under contention
75      //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
76  
77      private final ReentrantLock lock = new ReentrantLock();
78  
79      final SizeClasses sizeClass;
80  
81      protected PoolArena(PooledByteBufAllocator parent, SizeClasses sizeClass) {
82          assert null != sizeClass;
83          this.parent = parent;
84          this.sizeClass = sizeClass;
85          smallSubpagePools = newSubpagePoolArray(sizeClass.nSubpages);
86          for (int i = 0; i < smallSubpagePools.length; i ++) {
87              smallSubpagePools[i] = newSubpagePoolHead(i);
88          }
89  
90          q100 = new PoolChunkList<T>(this, null, 100, Integer.MAX_VALUE, sizeClass.chunkSize);
91          q075 = new PoolChunkList<T>(this, q100, 75, 100, sizeClass.chunkSize);
92          q050 = new PoolChunkList<T>(this, q100, 50, 100, sizeClass.chunkSize);
93          q025 = new PoolChunkList<T>(this, q050, 25, 75, sizeClass.chunkSize);
94          q000 = new PoolChunkList<T>(this, q025, 1, 50, sizeClass.chunkSize);
95          qInit = new PoolChunkList<T>(this, q000, Integer.MIN_VALUE, 25, sizeClass.chunkSize);
96  
97          q100.prevList(q075);
98          q075.prevList(q050);
99          q050.prevList(q025);
100         q025.prevList(q000);
101         q000.prevList(null);
102         qInit.prevList(qInit);
103 
104         List<PoolChunkListMetric> metrics = new ArrayList<PoolChunkListMetric>(6);
105         metrics.add(qInit);
106         metrics.add(q000);
107         metrics.add(q025);
108         metrics.add(q050);
109         metrics.add(q075);
110         metrics.add(q100);
111         chunkListMetrics = Collections.unmodifiableList(metrics);
112     }
113 
114     private PoolSubpage<T> newSubpagePoolHead(int index) {
115         PoolSubpage<T> head = new PoolSubpage<T>(index);
116         head.prev = head;
117         head.next = head;
118         return head;
119     }
120 
121     @SuppressWarnings("unchecked")
122     private PoolSubpage<T>[] newSubpagePoolArray(int size) {
123         return new PoolSubpage[size];
124     }
125 
126     abstract boolean isDirect();
127 
128     PooledByteBuf<T> allocate(PoolThreadCache cache, int reqCapacity, int maxCapacity) {
129         PooledByteBuf<T> buf = newByteBuf(maxCapacity);
130         allocate(cache, buf, reqCapacity);
131         return buf;
132     }
133 
134     private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) {
135         final int sizeIdx = sizeClass.size2SizeIdx(reqCapacity);
136 
137         if (sizeIdx <= sizeClass.smallMaxSizeIdx) {
138             tcacheAllocateSmall(cache, buf, reqCapacity, sizeIdx);
139         } else if (sizeIdx < sizeClass.nSizes) {
140             tcacheAllocateNormal(cache, buf, reqCapacity, sizeIdx);
141         } else {
142             int normCapacity = sizeClass.directMemoryCacheAlignment > 0
143                     ? sizeClass.normalizeSize(reqCapacity) : reqCapacity;
144             // Huge allocations are never served via the cache so just call allocateHuge
145             allocateHuge(buf, normCapacity);
146         }
147     }
148 
149     private void tcacheAllocateSmall(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity,
150                                      final int sizeIdx) {
151 
152         if (cache.allocateSmall(this, buf, reqCapacity, sizeIdx)) {
153             // was able to allocate out of the cache so move on
154             return;
155         }
156 
157         /*
158          * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
159          * {@link PoolChunk#free(long)} may modify the doubly linked list as well.
160          */
161         final PoolSubpage<T> head = smallSubpagePools[sizeIdx];
162         final boolean needsNormalAllocation;
163         head.lock();
164         try {
165             final PoolSubpage<T> s = head.next;
166             needsNormalAllocation = s == head;
167             if (!needsNormalAllocation) {
168                 assert s.doNotDestroy && s.elemSize == sizeClass.sizeIdx2size(sizeIdx) : "doNotDestroy=" +
169                         s.doNotDestroy + ", elemSize=" + s.elemSize + ", sizeIdx=" + sizeIdx;
170                 long handle = s.allocate();
171                 assert handle >= 0;
172                 s.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache);
173             }
174         } finally {
175             head.unlock();
176         }
177 
178         if (needsNormalAllocation) {
179             lock();
180             try {
181                 allocateNormal(buf, reqCapacity, sizeIdx, cache);
182             } finally {
183                 unlock();
184             }
185         }
186 
187         incSmallAllocation();
188     }
189 
190     private void tcacheAllocateNormal(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity,
191                                       final int sizeIdx) {
192         if (cache.allocateNormal(this, buf, reqCapacity, sizeIdx)) {
193             // was able to allocate out of the cache so move on
194             return;
195         }
196         lock();
197         try {
198             allocateNormal(buf, reqCapacity, sizeIdx, cache);
199             ++allocationsNormal;
200         } finally {
201             unlock();
202         }
203     }
204 
205     private void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache) {
206         assert lock.isHeldByCurrentThread();
207         if (q050.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
208             q025.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
209             q000.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
210             qInit.allocate(buf, reqCapacity, sizeIdx, threadCache) ||
211             q075.allocate(buf, reqCapacity, sizeIdx, threadCache)) {
212             return;
213         }
214 
215         // Add a new chunk.
216         PoolChunk<T> c = newChunk(sizeClass.pageSize, sizeClass.nPSizes, sizeClass.pageShifts, sizeClass.chunkSize);
217         boolean success = c.allocate(buf, reqCapacity, sizeIdx, threadCache);
218         assert success;
219         qInit.add(c);
220         ++pooledChunkAllocations;
221     }
222 
223     private void incSmallAllocation() {
224         allocationsSmall.increment();
225     }
226 
227     private void allocateHuge(PooledByteBuf<T> buf, int reqCapacity) {
228         PoolChunk<T> chunk = newUnpooledChunk(reqCapacity);
229         activeBytesHuge.add(chunk.chunkSize());
230         buf.initUnpooled(chunk, reqCapacity);
231         allocationsHuge.increment();
232     }
233 
234     void free(PoolChunk<T> chunk, ByteBuffer nioBuffer, long handle, int normCapacity, PoolThreadCache cache) {
235         chunk.decrementPinnedMemory(normCapacity);
236         if (chunk.unpooled) {
237             int size = chunk.chunkSize();
238             destroyChunk(chunk);
239             activeBytesHuge.add(-size);
240             deallocationsHuge.increment();
241         } else {
242             SizeClass sizeClass = sizeClass(handle);
243             if (cache != null && cache.add(this, chunk, nioBuffer, handle, normCapacity, sizeClass)) {
244                 // cached so not free it.
245                 return;
246             }
247 
248             freeChunk(chunk, handle, normCapacity, sizeClass, nioBuffer, false);
249         }
250     }
251 
252     private static SizeClass sizeClass(long handle) {
253         return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal;
254     }
255 
256     void freeChunk(PoolChunk<T> chunk, long handle, int normCapacity, SizeClass sizeClass, ByteBuffer nioBuffer,
257                    boolean finalizer) {
258         final boolean destroyChunk;
259         lock();
260         try {
261             // We only call this if freeChunk is not called because of the PoolThreadCache finalizer as otherwise this
262             // may fail due lazy class-loading in for example tomcat.
263             if (!finalizer) {
264                 switch (sizeClass) {
265                     case Normal:
266                         ++deallocationsNormal;
267                         break;
268                     case Small:
269                         ++deallocationsSmall;
270                         break;
271                     default:
272                         throw new Error();
273                 }
274             }
275             destroyChunk = !chunk.parent.free(chunk, handle, normCapacity, nioBuffer);
276             if (destroyChunk) {
277                 // all other destroyChunk calls come from the arena itself being finalized, so don't need to be counted
278                 ++pooledChunkDeallocations;
279             }
280         } finally {
281             unlock();
282         }
283         if (destroyChunk) {
284             // destroyChunk not need to be called while holding the synchronized lock.
285             destroyChunk(chunk);
286         }
287     }
288 
289     void reallocate(PooledByteBuf<T> buf, int newCapacity) {
290         assert newCapacity >= 0 && newCapacity <= buf.maxCapacity();
291 
292         final int oldCapacity;
293         final PoolChunk<T> oldChunk;
294         final ByteBuffer oldNioBuffer;
295         final long oldHandle;
296         final T oldMemory;
297         final int oldOffset;
298         final int oldMaxLength;
299         final PoolThreadCache oldCache;
300 
301         // We synchronize on the ByteBuf itself to ensure there is no "concurrent" reallocations for the same buffer.
302         // We do this to ensure the ByteBuf internal fields that are used to allocate / free are not accessed
303         // concurrently. This is important as otherwise we might end up corrupting our internal state of our data
304         // structures.
305         //
306         // Also note we don't use a Lock here but just synchronized even tho this might seem like a bad choice for Loom.
307         // This is done to minimize the overhead per ByteBuf. The time this would block another thread should be
308         // relative small and so not be a problem for Loom.
309         // See https://github.com/netty/netty/issues/13467
310         synchronized (buf) {
311             oldCapacity = buf.length;
312             if (oldCapacity == newCapacity) {
313                 return;
314             }
315 
316             oldChunk = buf.chunk;
317             oldNioBuffer = buf.tmpNioBuf;
318             oldHandle = buf.handle;
319             oldMemory = buf.memory;
320             oldOffset = buf.offset;
321             oldMaxLength = buf.maxLength;
322             oldCache = buf.cache;
323 
324             // This does not touch buf's reader/writer indices
325             allocate(parent.threadCache(), buf, newCapacity);
326         }
327         int bytesToCopy;
328         if (newCapacity > oldCapacity) {
329             bytesToCopy = oldCapacity;
330         } else {
331             buf.trimIndicesToCapacity(newCapacity);
332             bytesToCopy = newCapacity;
333         }
334         memoryCopy(oldMemory, oldOffset, buf, bytesToCopy);
335         free(oldChunk, oldNioBuffer, oldHandle, oldMaxLength, oldCache);
336     }
337 
338     @Override
339     public int numThreadCaches() {
340         return numThreadCaches.get();
341     }
342 
343     @Override
344     public int numTinySubpages() {
345         return 0;
346     }
347 
348     @Override
349     public int numSmallSubpages() {
350         return smallSubpagePools.length;
351     }
352 
353     @Override
354     public int numChunkLists() {
355         return chunkListMetrics.size();
356     }
357 
358     @Override
359     public List<PoolSubpageMetric> tinySubpages() {
360         return Collections.emptyList();
361     }
362 
363     @Override
364     public List<PoolSubpageMetric> smallSubpages() {
365         return subPageMetricList(smallSubpagePools);
366     }
367 
368     @Override
369     public List<PoolChunkListMetric> chunkLists() {
370         return chunkListMetrics;
371     }
372 
373     private static List<PoolSubpageMetric> subPageMetricList(PoolSubpage<?>[] pages) {
374         List<PoolSubpageMetric> metrics = new ArrayList<PoolSubpageMetric>();
375         for (PoolSubpage<?> head : pages) {
376             if (head.next == head) {
377                 continue;
378             }
379             PoolSubpage<?> s = head.next;
380             for (;;) {
381                 metrics.add(s);
382                 s = s.next;
383                 if (s == head) {
384                     break;
385                 }
386             }
387         }
388         return metrics;
389     }
390 
391     @Override
392     public long numAllocations() {
393         final long allocsNormal;
394         lock();
395         try {
396             allocsNormal = allocationsNormal;
397         } finally {
398             unlock();
399         }
400         return allocationsSmall.value() + allocsNormal + allocationsHuge.value();
401     }
402 
403     @Override
404     public long numTinyAllocations() {
405         return 0;
406     }
407 
408     @Override
409     public long numSmallAllocations() {
410         return allocationsSmall.value();
411     }
412 
413     @Override
414     public long numNormalAllocations() {
415         lock();
416         try {
417             return allocationsNormal;
418         } finally {
419             unlock();
420         }
421     }
422 
423     @Override
424     public long numChunkAllocations() {
425         lock();
426         try {
427             return pooledChunkAllocations;
428         } finally {
429             unlock();
430         }
431     }
432 
433     @Override
434     public long numDeallocations() {
435         final long deallocs;
436         lock();
437         try {
438             deallocs = deallocationsSmall + deallocationsNormal;
439         } finally {
440             unlock();
441         }
442         return deallocs + deallocationsHuge.value();
443     }
444 
445     @Override
446     public long numTinyDeallocations() {
447         return 0;
448     }
449 
450     @Override
451     public long numSmallDeallocations() {
452         lock();
453         try {
454             return deallocationsSmall;
455         } finally {
456             unlock();
457         }
458     }
459 
460     @Override
461     public long numNormalDeallocations() {
462         lock();
463         try {
464             return deallocationsNormal;
465         } finally {
466             unlock();
467         }
468     }
469 
470     @Override
471     public long numChunkDeallocations() {
472         lock();
473         try {
474             return pooledChunkDeallocations;
475         } finally {
476             unlock();
477         }
478     }
479 
480     @Override
481     public long numHugeAllocations() {
482         return allocationsHuge.value();
483     }
484 
485     @Override
486     public long numHugeDeallocations() {
487         return deallocationsHuge.value();
488     }
489 
490     @Override
491     public  long numActiveAllocations() {
492         long val = allocationsSmall.value() + allocationsHuge.value()
493                 - deallocationsHuge.value();
494         lock();
495         try {
496             val += allocationsNormal - (deallocationsSmall + deallocationsNormal);
497         } finally {
498             unlock();
499         }
500         return max(val, 0);
501     }
502 
503     @Override
504     public long numActiveTinyAllocations() {
505         return 0;
506     }
507 
508     @Override
509     public long numActiveSmallAllocations() {
510         return max(numSmallAllocations() - numSmallDeallocations(), 0);
511     }
512 
513     @Override
514     public long numActiveNormalAllocations() {
515         final long val;
516         lock();
517         try {
518             val = allocationsNormal - deallocationsNormal;
519         } finally {
520             unlock();
521         }
522         return max(val, 0);
523     }
524 
525     @Override
526     public long numActiveChunks() {
527         final long val;
528         lock();
529         try {
530             val = pooledChunkAllocations - pooledChunkDeallocations;
531         } finally {
532             unlock();
533         }
534         return max(val, 0);
535     }
536 
537     @Override
538     public long numActiveHugeAllocations() {
539         return max(numHugeAllocations() - numHugeDeallocations(), 0);
540     }
541 
542     @Override
543     public long numActiveBytes() {
544         long val = activeBytesHuge.value();
545         lock();
546         try {
547             for (int i = 0; i < chunkListMetrics.size(); i++) {
548                 for (PoolChunkMetric m: chunkListMetrics.get(i)) {
549                     val += m.chunkSize();
550                 }
551             }
552         } finally {
553             unlock();
554         }
555         return max(0, val);
556     }
557 
558     /**
559      * Return an estimate of the number of bytes that are currently pinned to buffer instances, by the arena. The
560      * pinned memory is not accessible for use by any other allocation, until the buffers using have all been released.
561      */
562     public long numPinnedBytes() {
563         long val = activeBytesHuge.value(); // Huge chunks are exact-sized for the buffers they were allocated to.
564         for (int i = 0; i < chunkListMetrics.size(); i++) {
565             for (PoolChunkMetric m: chunkListMetrics.get(i)) {
566                 val += ((PoolChunk<?>) m).pinnedBytes();
567             }
568         }
569         return max(0, val);
570     }
571 
572     protected abstract PoolChunk<T> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize);
573     protected abstract PoolChunk<T> newUnpooledChunk(int capacity);
574     protected abstract PooledByteBuf<T> newByteBuf(int maxCapacity);
575     protected abstract void memoryCopy(T src, int srcOffset, PooledByteBuf<T> dst, int length);
576     protected abstract void destroyChunk(PoolChunk<T> chunk);
577 
578     @Override
579     public String toString() {
580         lock();
581         try {
582             StringBuilder buf = new StringBuilder()
583                     .append("Chunk(s) at 0~25%:")
584                     .append(StringUtil.NEWLINE)
585                     .append(qInit)
586                     .append(StringUtil.NEWLINE)
587                     .append("Chunk(s) at 0~50%:")
588                     .append(StringUtil.NEWLINE)
589                     .append(q000)
590                     .append(StringUtil.NEWLINE)
591                     .append("Chunk(s) at 25~75%:")
592                     .append(StringUtil.NEWLINE)
593                     .append(q025)
594                     .append(StringUtil.NEWLINE)
595                     .append("Chunk(s) at 50~100%:")
596                     .append(StringUtil.NEWLINE)
597                     .append(q050)
598                     .append(StringUtil.NEWLINE)
599                     .append("Chunk(s) at 75~100%:")
600                     .append(StringUtil.NEWLINE)
601                     .append(q075)
602                     .append(StringUtil.NEWLINE)
603                     .append("Chunk(s) at 100%:")
604                     .append(StringUtil.NEWLINE)
605                     .append(q100)
606                     .append(StringUtil.NEWLINE)
607                     .append("small subpages:");
608             appendPoolSubPages(buf, smallSubpagePools);
609             buf.append(StringUtil.NEWLINE);
610             return buf.toString();
611         } finally {
612             unlock();
613         }
614     }
615 
616     private static void appendPoolSubPages(StringBuilder buf, PoolSubpage<?>[] subpages) {
617         for (int i = 0; i < subpages.length; i ++) {
618             PoolSubpage<?> head = subpages[i];
619             if (head.next == head || head.next == null) {
620                 continue;
621             }
622 
623             buf.append(StringUtil.NEWLINE)
624                     .append(i)
625                     .append(": ");
626             PoolSubpage<?> s = head.next;
627             while (s != null) {
628                 buf.append(s);
629                 s = s.next;
630                 if (s == head) {
631                     break;
632                 }
633             }
634         }
635     }
636 
637     @Override
638     protected final void finalize() throws Throwable {
639         try {
640             super.finalize();
641         } finally {
642             destroyPoolSubPages(smallSubpagePools);
643             destroyPoolChunkLists(qInit, q000, q025, q050, q075, q100);
644         }
645     }
646 
647     private static void destroyPoolSubPages(PoolSubpage<?>[] pages) {
648         for (PoolSubpage<?> page : pages) {
649             page.destroy();
650         }
651     }
652 
653     private void destroyPoolChunkLists(PoolChunkList<T>... chunkLists) {
654         for (PoolChunkList<T> chunkList: chunkLists) {
655             chunkList.destroy(this);
656         }
657     }
658 
659     static final class HeapArena extends PoolArena<byte[]> {
660         private final AtomicReference<PoolChunk<byte[]>> lastDestroyedChunk;
661 
662         HeapArena(PooledByteBufAllocator parent, SizeClasses sizeClass) {
663             super(parent, sizeClass);
664             lastDestroyedChunk = new AtomicReference<>();
665         }
666 
667         private static byte[] newByteArray(int size) {
668             return PlatformDependent.allocateUninitializedArray(size);
669         }
670 
671         @Override
672         boolean isDirect() {
673             return false;
674         }
675 
676         @Override
677         protected PoolChunk<byte[]> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
678             PoolChunk<byte[]> chunk = lastDestroyedChunk.getAndSet(null);
679             if (chunk != null) {
680                 assert chunk.chunkSize == chunkSize &&
681                         chunk.pageSize == pageSize &&
682                         chunk.maxPageIdx == maxPageIdx &&
683                         chunk.pageShifts == pageShifts;
684                 return chunk; // The parameters are always the same, so it's fine to reuse a previously allocated chunk.
685             }
686             return new PoolChunk<byte[]>(
687                     this, null, newByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx);
688         }
689 
690         @Override
691         protected PoolChunk<byte[]> newUnpooledChunk(int capacity) {
692             return new PoolChunk<byte[]>(this, null, newByteArray(capacity), capacity);
693         }
694 
695         @Override
696         protected void destroyChunk(PoolChunk<byte[]> chunk) {
697             // Rely on GC. But keep one chunk for reuse.
698             if (!chunk.unpooled && lastDestroyedChunk.get() == null) {
699                 lastDestroyedChunk.set(chunk); // The check-and-set does not need to be atomic.
700             }
701         }
702 
703         @Override
704         protected PooledByteBuf<byte[]> newByteBuf(int maxCapacity) {
705             return HAS_UNSAFE ? PooledUnsafeHeapByteBuf.newUnsafeInstance(maxCapacity)
706                     : PooledHeapByteBuf.newInstance(maxCapacity);
707         }
708 
709         @Override
710         protected void memoryCopy(byte[] src, int srcOffset, PooledByteBuf<byte[]> dst, int length) {
711             if (length == 0) {
712                 return;
713             }
714 
715             System.arraycopy(src, srcOffset, dst.memory, dst.offset, length);
716         }
717     }
718 
719     static final class DirectArena extends PoolArena<ByteBuffer> {
720 
721         DirectArena(PooledByteBufAllocator parent, SizeClasses sizeClass) {
722             super(parent, sizeClass);
723         }
724 
725         @Override
726         boolean isDirect() {
727             return true;
728         }
729 
730         @Override
731         protected PoolChunk<ByteBuffer> newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
732             if (sizeClass.directMemoryCacheAlignment == 0) {
733                 ByteBuffer memory = allocateDirect(chunkSize);
734                 return new PoolChunk<ByteBuffer>(this, memory, memory, pageSize, pageShifts,
735                         chunkSize, maxPageIdx);
736             }
737 
738             final ByteBuffer base = allocateDirect(chunkSize + sizeClass.directMemoryCacheAlignment);
739             final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, sizeClass.directMemoryCacheAlignment);
740             return new PoolChunk<ByteBuffer>(this, base, memory, pageSize,
741                     pageShifts, chunkSize, maxPageIdx);
742         }
743 
744         @Override
745         protected PoolChunk<ByteBuffer> newUnpooledChunk(int capacity) {
746             if (sizeClass.directMemoryCacheAlignment == 0) {
747                 ByteBuffer memory = allocateDirect(capacity);
748                 return new PoolChunk<ByteBuffer>(this, memory, memory, capacity);
749             }
750 
751             final ByteBuffer base = allocateDirect(capacity + sizeClass.directMemoryCacheAlignment);
752             final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, sizeClass.directMemoryCacheAlignment);
753             return new PoolChunk<ByteBuffer>(this, base, memory, capacity);
754         }
755 
756         private static ByteBuffer allocateDirect(int capacity) {
757             return PlatformDependent.useDirectBufferNoCleaner() ?
758                     PlatformDependent.allocateDirectNoCleaner(capacity) : ByteBuffer.allocateDirect(capacity);
759         }
760 
761         @Override
762         protected void destroyChunk(PoolChunk<ByteBuffer> chunk) {
763             if (PlatformDependent.useDirectBufferNoCleaner()) {
764                 PlatformDependent.freeDirectNoCleaner((ByteBuffer) chunk.base);
765             } else {
766                 PlatformDependent.freeDirectBuffer((ByteBuffer) chunk.base);
767             }
768         }
769 
770         @Override
771         protected PooledByteBuf<ByteBuffer> newByteBuf(int maxCapacity) {
772             if (HAS_UNSAFE) {
773                 return PooledUnsafeDirectByteBuf.newInstance(maxCapacity);
774             } else {
775                 return PooledDirectByteBuf.newInstance(maxCapacity);
776             }
777         }
778 
779         @Override
780         protected void memoryCopy(ByteBuffer src, int srcOffset, PooledByteBuf<ByteBuffer> dstBuf, int length) {
781             if (length == 0) {
782                 return;
783             }
784 
785             if (HAS_UNSAFE) {
786                 PlatformDependent.copyMemory(
787                         PlatformDependent.directBufferAddress(src) + srcOffset,
788                         PlatformDependent.directBufferAddress(dstBuf.memory) + dstBuf.offset, length);
789             } else {
790                 // We must duplicate the NIO buffers because they may be accessed by other Netty buffers.
791                 src = src.duplicate();
792                 ByteBuffer dst = dstBuf.internalNioBuffer();
793                 src.position(srcOffset).limit(srcOffset + length);
794                 dst.position(dstBuf.offset);
795                 dst.put(src);
796             }
797         }
798     }
799 
800     void lock() {
801         lock.lock();
802     }
803 
804     void unlock() {
805         lock.unlock();
806     }
807 
808     @Override
809     public int sizeIdx2size(int sizeIdx) {
810         return sizeClass.sizeIdx2size(sizeIdx);
811     }
812 
813     @Override
814     public int sizeIdx2sizeCompute(int sizeIdx) {
815         return sizeClass.sizeIdx2sizeCompute(sizeIdx);
816     }
817 
818     @Override
819     public long pageIdx2size(int pageIdx) {
820         return sizeClass.pageIdx2size(pageIdx);
821     }
822 
823     @Override
824     public long pageIdx2sizeCompute(int pageIdx) {
825         return sizeClass.pageIdx2sizeCompute(pageIdx);
826     }
827 
828     @Override
829     public int size2SizeIdx(int size) {
830         return sizeClass.size2SizeIdx(size);
831     }
832 
833     @Override
834     public int pages2pageIdx(int pages) {
835         return sizeClass.pages2pageIdx(pages);
836     }
837 
838     @Override
839     public int pages2pageIdxFloor(int pages) {
840         return sizeClass.pages2pageIdxFloor(pages);
841     }
842 
843     @Override
844     public int normalizeSize(int size) {
845         return sizeClass.normalizeSize(size);
846     }
847 }