View Javadoc

1   /*
2    * Copyright 2012 The Netty Project
3    *
4    * The Netty Project licenses this file to you under the Apache License,
5    * version 2.0 (the "License"); you may not use this file except in compliance
6    * with the License. You may obtain a copy of the License at:
7    *
8    *   http://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13   * License for the specific language governing permissions and limitations
14   * under the License.
15   */
16  
17  package io.netty.buffer;
18  
19  import io.netty.util.internal.LongCounter;
20  import io.netty.util.internal.PlatformDependent;
21  import io.netty.util.internal.StringUtil;
22  
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Collections;
26  import java.util.List;
27  import java.util.concurrent.atomic.AtomicInteger;
28  
29  import static java.lang.Math.max;
30  
31  abstract class PoolArena<T> implements PoolArenaMetric {
32      static final boolean HAS_UNSAFE = PlatformDependent.hasUnsafe();
33  
34      enum SizeClass {
35          Tiny,
36          Small,
37          Normal
38      }
39  
40      static final int numTinySubpagePools = 512 >>> 4;
41  
42      final PooledByteBufAllocator parent;
43  
44      private final int maxOrder;
45      final int pageSize;
46      final int pageShifts;
47      final int chunkSize;
48      final int subpageOverflowMask;
49      final int numSmallSubpagePools;
50      final int directMemoryCacheAlignment;
51      final int directMemoryCacheAlignmentMask;
52      private final PoolSubpage<T>[] tinySubpagePools;
53      private final PoolSubpage<T>[] smallSubpagePools;
54  
55      private final PoolChunkList<T> q050;
56      private final PoolChunkList<T> q025;
57      private final PoolChunkList<T> q000;
58      private final PoolChunkList<T> qInit;
59      private final PoolChunkList<T> q075;
60      private final PoolChunkList<T> q100;
61  
62      private final List<PoolChunkListMetric> chunkListMetrics;
63  
64      // Metrics for allocations and deallocations
65      private long allocationsNormal;
66      // We need to use the LongCounter here as this is not guarded via synchronized block.
67      private final LongCounter allocationsTiny = PlatformDependent.newLongCounter();
68      private final LongCounter allocationsSmall = PlatformDependent.newLongCounter();
69      private final LongCounter allocationsHuge = PlatformDependent.newLongCounter();
70      private final LongCounter activeBytesHuge = PlatformDependent.newLongCounter();
71  
72      private long deallocationsTiny;
73      private long deallocationsSmall;
74      private long deallocationsNormal;
75  
76      // We need to use the LongCounter here as this is not guarded via synchronized block.
77      private final LongCounter deallocationsHuge = PlatformDependent.newLongCounter();
78  
79      // Number of thread caches backed by this arena.
80      final AtomicInteger numThreadCaches = new AtomicInteger();
81  
82      // TODO: Test if adding padding helps under contention
83      //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
84  
85      protected PoolArena(PooledByteBufAllocator parent, int pageSize,
86            int maxOrder, int pageShifts, int chunkSize, int cacheAlignment) {
87          this.parent = parent;
88          this.pageSize = pageSize;
89          this.maxOrder = maxOrder;
90          this.pageShifts = pageShifts;
91          this.chunkSize = chunkSize;
92          directMemoryCacheAlignment = cacheAlignment;
93          directMemoryCacheAlignmentMask = cacheAlignment - 1;
94          subpageOverflowMask = ~(pageSize - 1);
95          tinySubpagePools = newSubpagePoolArray(numTinySubpagePools);
96          for (int i = 0; i < tinySubpagePools.length; i ++) {
97              tinySubpagePools[i] = newSubpagePoolHead(pageSize);
98          }
99  
100         numSmallSubpagePools = pageShifts - 9;
101         smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools);
102         for (int i = 0; i < smallSubpagePools.length; i ++) {
103             smallSubpagePools[i] = newSubpagePoolHead(pageSize);
104         }
105 
106         q100 = new PoolChunkList<T>(this, null, 100, Integer.MAX_VALUE, chunkSize);
107         q075 = new PoolChunkList<T>(this, q100, 75, 100, chunkSize);
108         q050 = new PoolChunkList<T>(this, q075, 50, 100, chunkSize);
109         q025 = new PoolChunkList<T>(this, q050, 25, 75, chunkSize);
110         q000 = new PoolChunkList<T>(this, q025, 1, 50, chunkSize);
111         qInit = new PoolChunkList<T>(this, q000, Integer.MIN_VALUE, 25, chunkSize);
112 
113         q100.prevList(q075);
114         q075.prevList(q050);
115         q050.prevList(q025);
116         q025.prevList(q000);
117         q000.prevList(null);
118         qInit.prevList(qInit);
119 
120         List<PoolChunkListMetric> metrics = new ArrayList<PoolChunkListMetric>(6);
121         metrics.add(qInit);
122         metrics.add(q000);
123         metrics.add(q025);
124         metrics.add(q050);
125         metrics.add(q075);
126         metrics.add(q100);
127         chunkListMetrics = Collections.unmodifiableList(metrics);
128     }
129 
130     private PoolSubpage<T> newSubpagePoolHead(int pageSize) {
131         PoolSubpage<T> head = new PoolSubpage<T>(pageSize);
132         head.prev = head;
133         head.next = head;
134         return head;
135     }
136 
137     @SuppressWarnings("unchecked")
138     private PoolSubpage<T>[] newSubpagePoolArray(int size) {
139         return new PoolSubpage[size];
140     }
141 
142     abstract boolean isDirect();
143 
144     PooledByteBuf<T> allocate(PoolThreadCache cache, int reqCapacity, int maxCapacity) {
145         PooledByteBuf<T> buf = newByteBuf(maxCapacity);
146         allocate(cache, buf, reqCapacity);
147         return buf;
148     }
149 
150     static int tinyIdx(int normCapacity) {
151         return normCapacity >>> 4;
152     }
153 
154     static int smallIdx(int normCapacity) {
155         int tableIdx = 0;
156         int i = normCapacity >>> 10;
157         while (i != 0) {
158             i >>>= 1;
159             tableIdx ++;
160         }
161         return tableIdx;
162     }
163 
164     // capacity < pageSize
165     boolean isTinyOrSmall(int normCapacity) {
166         return (normCapacity & subpageOverflowMask) == 0;
167     }
168 
169     // normCapacity < 512
170     static boolean isTiny(int normCapacity) {
171         return (normCapacity & 0xFFFFFE00) == 0;
172     }
173 
174     private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) {
175         final int normCapacity = normalizeCapacity(reqCapacity);
176         if (isTinyOrSmall(normCapacity)) { // capacity < pageSize
177             int tableIdx;
178             PoolSubpage<T>[] table;
179             boolean tiny = isTiny(normCapacity);
180             if (tiny) { // < 512
181                 if (cache.allocateTiny(this, buf, reqCapacity, normCapacity)) {
182                     // was able to allocate out of the cache so move on
183                     return;
184                 }
185                 tableIdx = tinyIdx(normCapacity);
186                 table = tinySubpagePools;
187             } else {
188                 if (cache.allocateSmall(this, buf, reqCapacity, normCapacity)) {
189                     // was able to allocate out of the cache so move on
190                     return;
191                 }
192                 tableIdx = smallIdx(normCapacity);
193                 table = smallSubpagePools;
194             }
195 
196             final PoolSubpage<T> head = table[tableIdx];
197 
198             /**
199              * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
200              * {@link PoolChunk#free(long)} may modify the doubly linked list as well.
201              */
202             synchronized (head) {
203                 final PoolSubpage<T> s = head.next;
204                 if (s != head) {
205                     assert s.doNotDestroy && s.elemSize == normCapacity;
206                     long handle = s.allocate();
207                     assert handle >= 0;
208                     s.chunk.initBufWithSubpage(buf, handle, reqCapacity);
209                     incTinySmallAllocation(tiny);
210                     return;
211                 }
212             }
213             synchronized (this) {
214                 allocateNormal(buf, reqCapacity, normCapacity);
215             }
216 
217             incTinySmallAllocation(tiny);
218             return;
219         }
220         if (normCapacity <= chunkSize) {
221             if (cache.allocateNormal(this, buf, reqCapacity, normCapacity)) {
222                 // was able to allocate out of the cache so move on
223                 return;
224             }
225             synchronized (this) {
226                 allocateNormal(buf, reqCapacity, normCapacity);
227                 ++allocationsNormal;
228             }
229         } else {
230             // Huge allocations are never served via the cache so just call allocateHuge
231             allocateHuge(buf, reqCapacity);
232         }
233     }
234 
235     // Method must be called inside synchronized(this) { ... } block
236     private void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int normCapacity) {
237         if (q050.allocate(buf, reqCapacity, normCapacity) || q025.allocate(buf, reqCapacity, normCapacity) ||
238             q000.allocate(buf, reqCapacity, normCapacity) || qInit.allocate(buf, reqCapacity, normCapacity) ||
239             q075.allocate(buf, reqCapacity, normCapacity)) {
240             return;
241         }
242 
243         // Add a new chunk.
244         PoolChunk<T> c = newChunk(pageSize, maxOrder, pageShifts, chunkSize);
245         long handle = c.allocate(normCapacity);
246         assert handle > 0;
247         c.initBuf(buf, handle, reqCapacity);
248         qInit.add(c);
249     }
250 
251     private void incTinySmallAllocation(boolean tiny) {
252         if (tiny) {
253             allocationsTiny.increment();
254         } else {
255             allocationsSmall.increment();
256         }
257     }
258 
259     private void allocateHuge(PooledByteBuf<T> buf, int reqCapacity) {
260         PoolChunk<T> chunk = newUnpooledChunk(reqCapacity);
261         activeBytesHuge.add(chunk.chunkSize());
262         buf.initUnpooled(chunk, reqCapacity);
263         allocationsHuge.increment();
264     }
265 
266     void free(PoolChunk<T> chunk, long handle, int normCapacity, PoolThreadCache cache) {
267         if (chunk.unpooled) {
268             int size = chunk.chunkSize();
269             destroyChunk(chunk);
270             activeBytesHuge.add(-size);
271             deallocationsHuge.increment();
272         } else {
273             SizeClass sizeClass = sizeClass(normCapacity);
274             if (cache != null && cache.add(this, chunk, handle, normCapacity, sizeClass)) {
275                 // cached so not free it.
276                 return;
277             }
278 
279             freeChunk(chunk, handle, sizeClass);
280         }
281     }
282 
283     private SizeClass sizeClass(int normCapacity) {
284         if (!isTinyOrSmall(normCapacity)) {
285             return SizeClass.Normal;
286         }
287         return isTiny(normCapacity) ? SizeClass.Tiny : SizeClass.Small;
288     }
289 
290     void freeChunk(PoolChunk<T> chunk, long handle, SizeClass sizeClass) {
291         final boolean destroyChunk;
292         synchronized (this) {
293             switch (sizeClass) {
294             case Normal:
295                 ++deallocationsNormal;
296                 break;
297             case Small:
298                 ++deallocationsSmall;
299                 break;
300             case Tiny:
301                 ++deallocationsTiny;
302                 break;
303             default:
304                 throw new Error();
305             }
306             destroyChunk = !chunk.parent.free(chunk, handle);
307         }
308         if (destroyChunk) {
309             // destroyChunk not need to be called while holding the synchronized lock.
310             destroyChunk(chunk);
311         }
312     }
313 
314     PoolSubpage<T> findSubpagePoolHead(int elemSize) {
315         int tableIdx;
316         PoolSubpage<T>[] table;
317         if (isTiny(elemSize)) { // < 512
318             tableIdx = elemSize >>> 4;
319             table = tinySubpagePools;
320         } else {
321             tableIdx = 0;
322             elemSize >>>= 10;
323             while (elemSize != 0) {
324                 elemSize >>>= 1;
325                 tableIdx ++;
326             }
327             table = smallSubpagePools;
328         }
329 
330         return table[tableIdx];
331     }
332 
333     int normalizeCapacity(int reqCapacity) {
334         if (reqCapacity < 0) {
335             throw new IllegalArgumentException("capacity: " + reqCapacity + " (expected: 0+)");
336         }
337 
338         if (reqCapacity >= chunkSize) {
339             return directMemoryCacheAlignment == 0 ? reqCapacity : alignCapacity(reqCapacity);
340         }
341 
342         if (!isTiny(reqCapacity)) { // >= 512
343             // Doubled
344 
345             int normalizedCapacity = reqCapacity;
346             normalizedCapacity --;
347             normalizedCapacity |= normalizedCapacity >>>  1;
348             normalizedCapacity |= normalizedCapacity >>>  2;
349             normalizedCapacity |= normalizedCapacity >>>  4;
350             normalizedCapacity |= normalizedCapacity >>>  8;
351             normalizedCapacity |= normalizedCapacity >>> 16;
352             normalizedCapacity ++;
353 
354             if (normalizedCapacity < 0) {
355                 normalizedCapacity >>>= 1;
356             }
357             assert directMemoryCacheAlignment == 0 || (normalizedCapacity & directMemoryCacheAlignmentMask) == 0;
358 
359             return normalizedCapacity;
360         }
361 
362         if (directMemoryCacheAlignment > 0) {
363             return alignCapacity(reqCapacity);
364         }
365 
366         // Quantum-spaced
367         if ((reqCapacity & 15) == 0) {
368             return reqCapacity;
369         }
370 
371         return (reqCapacity & ~15) + 16;
372     }
373 
374     int alignCapacity(int reqCapacity) {
375         int delta = reqCapacity & directMemoryCacheAlignmentMask;
376         return delta == 0 ? reqCapacity : reqCapacity + directMemoryCacheAlignment - delta;
377     }
378 
379     void reallocate(PooledByteBuf<T> buf, int newCapacity, boolean freeOldMemory) {
380         if (newCapacity < 0 || newCapacity > buf.maxCapacity()) {
381             throw new IllegalArgumentException("newCapacity: " + newCapacity);
382         }
383 
384         int oldCapacity = buf.length;
385         if (oldCapacity == newCapacity) {
386             return;
387         }
388 
389         PoolChunk<T> oldChunk = buf.chunk;
390         long oldHandle = buf.handle;
391         T oldMemory = buf.memory;
392         int oldOffset = buf.offset;
393         int oldMaxLength = buf.maxLength;
394         int readerIndex = buf.readerIndex();
395         int writerIndex = buf.writerIndex();
396 
397         allocate(parent.threadCache(), buf, newCapacity);
398         if (newCapacity > oldCapacity) {
399             memoryCopy(
400                     oldMemory, oldOffset,
401                     buf.memory, buf.offset, oldCapacity);
402         } else if (newCapacity < oldCapacity) {
403             if (readerIndex < newCapacity) {
404                 if (writerIndex > newCapacity) {
405                     writerIndex = newCapacity;
406                 }
407                 memoryCopy(
408                         oldMemory, oldOffset + readerIndex,
409                         buf.memory, buf.offset + readerIndex, writerIndex - readerIndex);
410             } else {
411                 readerIndex = writerIndex = newCapacity;
412             }
413         }
414 
415         buf.setIndex(readerIndex, writerIndex);
416 
417         if (freeOldMemory) {
418             free(oldChunk, oldHandle, oldMaxLength, buf.cache);
419         }
420     }
421 
422     /**
423      * Returns the number of thread caches backed by this arena.
424      */
425     public int numThreadCaches() {
426         return numThreadCaches.get();
427     }
428 
429     @Override
430     public int numTinySubpages() {
431         return tinySubpagePools.length;
432     }
433 
434     @Override
435     public int numSmallSubpages() {
436         return smallSubpagePools.length;
437     }
438 
439     @Override
440     public int numChunkLists() {
441         return chunkListMetrics.size();
442     }
443 
444     @Override
445     public List<PoolSubpageMetric> tinySubpages() {
446         return subPageMetricList(tinySubpagePools);
447     }
448 
449     @Override
450     public List<PoolSubpageMetric> smallSubpages() {
451         return subPageMetricList(smallSubpagePools);
452     }
453 
454     @Override
455     public List<PoolChunkListMetric> chunkLists() {
456         return chunkListMetrics;
457     }
458 
459     private static List<PoolSubpageMetric> subPageMetricList(PoolSubpage<?>[] pages) {
460         List<PoolSubpageMetric> metrics = new ArrayList<PoolSubpageMetric>();
461         for (PoolSubpage<?> head : pages) {
462             if (head.next == head) {
463                 continue;
464             }
465             PoolSubpage<?> s = head.next;
466             for (;;) {
467                 metrics.add(s);
468                 s = s.next;
469                 if (s == head) {
470                     break;
471                 }
472             }
473         }
474         return metrics;
475     }
476 
477     @Override
478     public long numAllocations() {
479         final long allocsNormal;
480         synchronized (this) {
481             allocsNormal = allocationsNormal;
482         }
483         return allocationsTiny.value() + allocationsSmall.value() + allocsNormal + allocationsHuge.value();
484     }
485 
486     @Override
487     public long numTinyAllocations() {
488         return allocationsTiny.value();
489     }
490 
491     @Override
492     public long numSmallAllocations() {
493         return allocationsSmall.value();
494     }
495 
496     @Override
497     public synchronized long numNormalAllocations() {
498         return allocationsNormal;
499     }
500 
501     @Override
502     public long numDeallocations() {
503         final long deallocs;
504         synchronized (this) {
505             deallocs = deallocationsTiny + deallocationsSmall + deallocationsNormal;
506         }
507         return deallocs + deallocationsHuge.value();
508     }
509 
510     @Override
511     public synchronized long numTinyDeallocations() {
512         return deallocationsTiny;
513     }
514 
515     @Override
516     public synchronized long numSmallDeallocations() {
517         return deallocationsSmall;
518     }
519 
520     @Override
521     public synchronized long numNormalDeallocations() {
522         return deallocationsNormal;
523     }
524 
525     @Override
526     public long numHugeAllocations() {
527         return allocationsHuge.value();
528     }
529 
530     @Override
531     public long numHugeDeallocations() {
532         return deallocationsHuge.value();
533     }
534 
535     @Override
536     public  long numActiveAllocations() {
537         long val = allocationsTiny.value() + allocationsSmall.value() + allocationsHuge.value()
538                 - deallocationsHuge.value();
539         synchronized (this) {
540             val += allocationsNormal - (deallocationsTiny + deallocationsSmall + deallocationsNormal);
541         }
542         return max(val, 0);
543     }
544 
545     @Override
546     public long numActiveTinyAllocations() {
547         return max(numTinyAllocations() - numTinyDeallocations(), 0);
548     }
549 
550     @Override
551     public long numActiveSmallAllocations() {
552         return max(numSmallAllocations() - numSmallDeallocations(), 0);
553     }
554 
555     @Override
556     public long numActiveNormalAllocations() {
557         final long val;
558         synchronized (this) {
559             val = allocationsNormal - deallocationsNormal;
560         }
561         return max(val, 0);
562     }
563 
564     @Override
565     public long numActiveHugeAllocations() {
566         return max(numHugeAllocations() - numHugeDeallocations(), 0);
567     }
568 
569     @Override
570     public long numActiveBytes() {
571         long val = activeBytesHuge.value();
572         synchronized (this) {
573             for (int i = 0; i < chunkListMetrics.size(); i++) {
574                 for (PoolChunkMetric m: chunkListMetrics.get(i)) {
575                     val += m.chunkSize();
576                 }
577             }
578         }
579         return max(0, val);
580     }
581 
582     protected abstract PoolChunk<T> newChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize);
583     protected abstract PoolChunk<T> newUnpooledChunk(int capacity);
584     protected abstract PooledByteBuf<T> newByteBuf(int maxCapacity);
585     protected abstract void memoryCopy(T src, int srcOffset, T dst, int dstOffset, int length);
586     protected abstract void destroyChunk(PoolChunk<T> chunk);
587 
588     @Override
589     public synchronized String toString() {
590         StringBuilder buf = new StringBuilder()
591             .append("Chunk(s) at 0~25%:")
592             .append(StringUtil.NEWLINE)
593             .append(qInit)
594             .append(StringUtil.NEWLINE)
595             .append("Chunk(s) at 0~50%:")
596             .append(StringUtil.NEWLINE)
597             .append(q000)
598             .append(StringUtil.NEWLINE)
599             .append("Chunk(s) at 25~75%:")
600             .append(StringUtil.NEWLINE)
601             .append(q025)
602             .append(StringUtil.NEWLINE)
603             .append("Chunk(s) at 50~100%:")
604             .append(StringUtil.NEWLINE)
605             .append(q050)
606             .append(StringUtil.NEWLINE)
607             .append("Chunk(s) at 75~100%:")
608             .append(StringUtil.NEWLINE)
609             .append(q075)
610             .append(StringUtil.NEWLINE)
611             .append("Chunk(s) at 100%:")
612             .append(StringUtil.NEWLINE)
613             .append(q100)
614             .append(StringUtil.NEWLINE)
615             .append("tiny subpages:");
616         appendPoolSubPages(buf, tinySubpagePools);
617         buf.append(StringUtil.NEWLINE)
618            .append("small subpages:");
619         appendPoolSubPages(buf, smallSubpagePools);
620         buf.append(StringUtil.NEWLINE);
621 
622         return buf.toString();
623     }
624 
625     private static void appendPoolSubPages(StringBuilder buf, PoolSubpage<?>[] subpages) {
626         for (int i = 0; i < subpages.length; i ++) {
627             PoolSubpage<?> head = subpages[i];
628             if (head.next == head) {
629                 continue;
630             }
631 
632             buf.append(StringUtil.NEWLINE)
633                     .append(i)
634                     .append(": ");
635             PoolSubpage<?> s = head.next;
636             for (;;) {
637                 buf.append(s);
638                 s = s.next;
639                 if (s == head) {
640                     break;
641                 }
642             }
643         }
644     }
645 
646     @Override
647     protected final void finalize() throws Throwable {
648         try {
649             super.finalize();
650         } finally {
651             destroyPoolSubPages(smallSubpagePools);
652             destroyPoolSubPages(tinySubpagePools);
653             destroyPoolChunkLists(qInit, q000, q025, q050, q075, q100);
654         }
655     }
656 
657     private static void destroyPoolSubPages(PoolSubpage<?>[] pages) {
658         for (PoolSubpage<?> page : pages) {
659             page.destroy();
660         }
661     }
662 
663     private void destroyPoolChunkLists(PoolChunkList<T>... chunkLists) {
664         for (PoolChunkList<T> chunkList: chunkLists) {
665             chunkList.destroy(this);
666         }
667     }
668 
669     static final class HeapArena extends PoolArena<byte[]> {
670 
671         HeapArena(PooledByteBufAllocator parent, int pageSize, int maxOrder,
672                 int pageShifts, int chunkSize, int directMemoryCacheAlignment) {
673             super(parent, pageSize, maxOrder, pageShifts, chunkSize,
674                     directMemoryCacheAlignment);
675         }
676 
677         private static byte[] newByteArray(int size) {
678             return PlatformDependent.allocateUninitializedArray(size);
679         }
680 
681         @Override
682         boolean isDirect() {
683             return false;
684         }
685 
686         @Override
687         protected PoolChunk<byte[]> newChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize) {
688             return new PoolChunk<byte[]>(this, newByteArray(chunkSize), pageSize, maxOrder, pageShifts, chunkSize, 0);
689         }
690 
691         @Override
692         protected PoolChunk<byte[]> newUnpooledChunk(int capacity) {
693             return new PoolChunk<byte[]>(this, newByteArray(capacity), capacity, 0);
694         }
695 
696         @Override
697         protected void destroyChunk(PoolChunk<byte[]> chunk) {
698             // Rely on GC.
699         }
700 
701         @Override
702         protected PooledByteBuf<byte[]> newByteBuf(int maxCapacity) {
703             return HAS_UNSAFE ? PooledUnsafeHeapByteBuf.newUnsafeInstance(maxCapacity)
704                     : PooledHeapByteBuf.newInstance(maxCapacity);
705         }
706 
707         @Override
708         protected void memoryCopy(byte[] src, int srcOffset, byte[] dst, int dstOffset, int length) {
709             if (length == 0) {
710                 return;
711             }
712 
713             System.arraycopy(src, srcOffset, dst, dstOffset, length);
714         }
715     }
716 
717     static final class DirectArena extends PoolArena<ByteBuffer> {
718 
719         DirectArena(PooledByteBufAllocator parent, int pageSize, int maxOrder,
720                 int pageShifts, int chunkSize, int directMemoryCacheAlignment) {
721             super(parent, pageSize, maxOrder, pageShifts, chunkSize,
722                     directMemoryCacheAlignment);
723         }
724 
725         @Override
726         boolean isDirect() {
727             return true;
728         }
729 
730         private int offsetCacheLine(ByteBuffer memory) {
731             // We can only calculate the offset if Unsafe is present as otherwise directBufferAddress(...) will
732             // throw an NPE.
733             return HAS_UNSAFE ?
734                     (int) (PlatformDependent.directBufferAddress(memory) & directMemoryCacheAlignmentMask) : 0;
735         }
736 
737         @Override
738         protected PoolChunk<ByteBuffer> newChunk(int pageSize, int maxOrder,
739                 int pageShifts, int chunkSize) {
740             if (directMemoryCacheAlignment == 0) {
741                 return new PoolChunk<ByteBuffer>(this,
742                         allocateDirect(chunkSize), pageSize, maxOrder,
743                         pageShifts, chunkSize, 0);
744             }
745             final ByteBuffer memory = allocateDirect(chunkSize
746                     + directMemoryCacheAlignment);
747             return new PoolChunk<ByteBuffer>(this, memory, pageSize,
748                     maxOrder, pageShifts, chunkSize,
749                     offsetCacheLine(memory));
750         }
751 
752         @Override
753         protected PoolChunk<ByteBuffer> newUnpooledChunk(int capacity) {
754             if (directMemoryCacheAlignment == 0) {
755                 return new PoolChunk<ByteBuffer>(this,
756                         allocateDirect(capacity), capacity, 0);
757             }
758             final ByteBuffer memory = allocateDirect(capacity
759                     + directMemoryCacheAlignment);
760             return new PoolChunk<ByteBuffer>(this, memory, capacity,
761                     offsetCacheLine(memory));
762         }
763 
764         private static ByteBuffer allocateDirect(int capacity) {
765             return PlatformDependent.useDirectBufferNoCleaner() ?
766                     PlatformDependent.allocateDirectNoCleaner(capacity) : ByteBuffer.allocateDirect(capacity);
767         }
768 
769         @Override
770         protected void destroyChunk(PoolChunk<ByteBuffer> chunk) {
771             if (PlatformDependent.useDirectBufferNoCleaner()) {
772                 PlatformDependent.freeDirectNoCleaner(chunk.memory);
773             } else {
774                 PlatformDependent.freeDirectBuffer(chunk.memory);
775             }
776         }
777 
778         @Override
779         protected PooledByteBuf<ByteBuffer> newByteBuf(int maxCapacity) {
780             if (HAS_UNSAFE) {
781                 return PooledUnsafeDirectByteBuf.newInstance(maxCapacity);
782             } else {
783                 return PooledDirectByteBuf.newInstance(maxCapacity);
784             }
785         }
786 
787         @Override
788         protected void memoryCopy(ByteBuffer src, int srcOffset, ByteBuffer dst, int dstOffset, int length) {
789             if (length == 0) {
790                 return;
791             }
792 
793             if (HAS_UNSAFE) {
794                 PlatformDependent.copyMemory(
795                         PlatformDependent.directBufferAddress(src) + srcOffset,
796                         PlatformDependent.directBufferAddress(dst) + dstOffset, length);
797             } else {
798                 // We must duplicate the NIO buffers because they may be accessed by other Netty buffers.
799                 src = src.duplicate();
800                 dst = dst.duplicate();
801                 src.position(srcOffset).limit(srcOffset + length);
802                 dst.position(dstOffset);
803                 dst.put(src);
804             }
805         }
806     }
807 }