1 /*
2 * Copyright 2024 The Netty Project
3 *
4 * The Netty Project licenses this file to you under the Apache License,
5 * version 2.0 (the "License"); you may not use this file except in compliance
6 * with the License. You may obtain a copy of the License at:
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 * License for the specific language governing permissions and limitations
14 * under the License.
15 */
16 package io.netty.channel;
17
18 import io.netty.util.concurrent.MultithreadEventExecutorGroup;
19 import io.netty.util.concurrent.ThreadAwareExecutor;
20
21 /**
22 * The context for an {@link IoHandler} that is run by an {@link ThreadAwareExecutor}.
23 * All methods <strong>MUST</strong> be executed on the {@link ThreadAwareExecutor} thread
24 * (which means {@link ThreadAwareExecutor#isExecutorThread(Thread)} (Thread)} must return {@code true}).
25 */
26 public interface IoHandlerContext {
27 /**
28 * Returns {@code true} if blocking for IO is allowed or if we should try to do a non-blocking request for IO to be
29 * ready.
30 *
31 * @return {@code true} if allowed, {@code false} otherwise.
32 */
33 boolean canBlock();
34
35 /**
36 * Returns the amount of time left until the scheduled task with the closest deadline should run.
37 *
38 * @param currentTimeNanos the current nanos.
39 * @return nanos
40 */
41 long delayNanos(long currentTimeNanos);
42
43 /**
44 * Returns the absolute point in time at which the next
45 * closest scheduled task should run or {@code -1} if nothing is scheduled to run.
46 *
47 * @return deadline.
48 */
49 long deadlineNanos();
50
51 /**
52 * Reports the amount of time in nanoseconds that was spent actively processing I/O events.
53 * <p>
54 * This metric is needed for the dynamic, utilization-based auto-scaling feature
55 * in {@link MultithreadEventExecutorGroup}. The reported time
56 * allows the auto-scaler to accurately measure the I/O workload of an event loop.
57 * <p>
58 * {@code IoHandler} implementations should measure the time spent in their event processing
59 * logic and report the duration via this method. This should only include time spent
60 * actively handling ready I/O events and should <strong>not</strong> include time spent blocking or
61 * waiting for I/O (e.g., in an {@code epoll_wait}) call.
62 * <p>
63 * The default implementation of this method is a no-op. Failing to override it in an
64 * {@link IoHandlerContext} that supports auto-scaling will result in the I/O utilization
65 * being perceived as zero.
66 *
67 * @param activeNanos The duration in nanoseconds of active, non-blocking I/O work.
68 */
69 default void reportActiveIoTime(long activeNanos) {
70 // no-op
71 }
72
73 /**
74 * Returns {@code true} if the I/O handler should measure and report its active I/O time.
75 * This is used as a guard to avoid the overhead of calling {@link System#nanoTime()}
76 * when the feature is not in use.
77 *
78 * @return {@code true} if active I/O time should be reported, {@code false} otherwise.
79 */
80 default boolean shouldReportActiveIoTime() {
81 return false;
82 }
83 }