最經常使用的 ByteBuf 模式是將數據存儲在 JVM 的堆空間中。 這種模式被稱爲支撐數組
(backing array), 它能在沒有使用池化的狀況下提供快速的分配和釋放。java
直接緩衝區的內容將駐留在常規的會被垃圾回收的堆以外。直接緩衝區對於網絡數據傳輸是理想的選擇。由於若是你的數據包含在一個在堆上分配的緩衝區中,那麼事實上,在經過套接字發送它以前,JVM將會在內部把你的緩衝區複製到一個直接緩衝區中。
直接緩衝區的主要缺點是,相對於基於堆的緩衝區,它們的分配和釋放都較爲昂貴。apache
經驗代表,Bytebuf的最佳實踐是在IO通訊線程的讀寫緩衝區使用DirectByteBuf,後端業務使用HeapByteBuf。bootstrap
讓咱們考慮一下一個由兩部分——頭部和主體——組成的將經過 HTTP 協議傳輸的消息。這兩部分由應用程序的不一樣模塊產生, 將會在消息被髮送的時候組裝。該應用程序能夠選擇爲多個消息重用相同的消息主體。當這種狀況發生時,對於每一個消息都將會建立一個新的頭部。
由於咱們不想爲每一個消息都從新分配這兩個緩衝區,因此使用 CompositeByteBuf 是一個
完美的選擇。
須要注意的是, Netty使用了CompositeByteBuf來優化套接字的I/O操做,儘量地消除了
由JDK的緩衝區實現所致使的性能以及內存使用率的懲罰。這種優化發生在Netty的核心代碼中,所以不會被暴露出來,可是你應該知道它所帶來的影響。後端
能夠經過 Channel(每一個均可以有一個不一樣的 ByteBufAllocator 實例)或者綁定到
ChannelHandler 的 ChannelHandlerContext 獲取一個到 ByteBufAllocator 的引用。
池化了ByteBuf的實例以提升性能並最大限度地減小內存碎片。此實現使用了一種稱爲jemalloc的已被大量現代操做系統所採用的高效方法來分配內存。
該方式在netty中是默認方式。數組
可能某些狀況下,你未能獲取一個到 ByteBufAllocator 的引用。對於這種狀況,Netty 提供了一個簡單的稱爲 Unpooled 的工具類, 它提供了靜態的輔助方法來建立未池化的 ByteBuf實例。網絡
HttpServer.java
package http.server; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; public class HttpServer { private static Log log = LogFactory.getLog(HttpServer.class); public void start(int port) throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { // server端發送的是httpResponse,因此要使用HttpResponseEncoder進行編碼 ch.pipeline().addLast(new HttpResponseEncoder()); // server端接收到的是httpRequest,因此要使用HttpRequestDecoder進行解碼 ch.pipeline().addLast(new HttpRequestDecoder()); ch.pipeline().addLast(new HttpServerInboundHandler()); } }).option(ChannelOption.SO_BACKLOG, 128) .childOption(ChannelOption.SO_KEEPALIVE, true); ChannelFuture f = b.bind(port).sync(); f.channel().closeFuture().sync(); } finally { workerGroup.shutdownGracefully(); bossGroup.shutdownGracefully(); } } public static void main(String[] args) throws Exception { HttpServer server = new HttpServer(); log.info("Http Server listening on 5656 ..."); server.start(5656); } }
HttpServerInboundHandler.javaapp
package http.server; import static io.netty.handler.codec.http.HttpResponseStatus.OK; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpVersion; public class HttpServerInboundHandler extends ChannelInboundHandlerAdapter { private static Log log = LogFactory.getLog(HttpServerInboundHandler.class); // private HttpRequest request; // static ByteBuf buf = Unpooled.wrappedBuffer("hello world".getBytes()); byte[] bs = "hello world".getBytes(); @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { // if (msg instanceof HttpRequest) { // request = (HttpRequest) msg; // // String uri = request.uri(); // // System.out.println("Uri:" + uri); // } // if (msg instanceof HttpContent) { // HttpContent content = (HttpContent) msg; // ByteBuf buf = content.content(); // // System.out.println(buf.toString(io.netty.util.CharsetUtil.UTF_8)); // buf.release(); // String res = "hello world."; // ByteBuf buf = Unpooled.wrappedBuffer(bs); // ByteBuf buf = Unpooled.directBuffer(); // ByteBuf buf = Unpooled.buffer(); // ByteBuf buf = ctx.alloc().heapBuffer();// 池化堆內存 // ByteBuf buf = ctx.alloc().directBuffer(); // 池化直接內存 // ByteBuf buf = Unpooled.buffer();// 非池化堆內存 ByteBuf buf = Unpooled.directBuffer();// 非池化堆內存 buf.writeBytes(bs); FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, OK, buf); // response.headers().set(HttpHeaderNames.CONTENT_TYPE, "text/plain"); response.headers().set(HttpHeaderNames.CONTENT_LENGTH, response.content().readableBytes()); /* * if (HttpHeaders.isKeepAlive(request)) { response.headers().set(CONNECTION, Values.KEEP_ALIVE); } */ ctx.write(response); // ctx.flush(); // } } @Override public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { ctx.flush(); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { log.error(cause.getMessage()); ctx.close(); } }
Running 20s test @ http://127.0.0.1:5656/ 4 threads and 100 connections Thread Stats Avg Stdev Max +/- Stdev Latency 3.76ms 9.31ms 180.34ms 92.96% Req/Sec 138.20k 43.16k 210.22k 66.50% 10957832 requests in 20.09s, 522.51MB read Requests/sec: 545332.08 Transfer/sec: 26.00MB real 0m20.104s user 0m10.441s sys 0m44.703s
Running 20s test @ http://127.0.0.1:5656/ 4 threads and 100 connections Thread Stats Avg Stdev Max +/- Stdev Latency 4.47ms 9.99ms 149.70ms 91.76% Req/Sec 138.51k 41.31k 209.94k 63.38% 10981466 requests in 20.09s, 523.64MB read Requests/sec: 546684.37 Transfer/sec: 26.07MB real 0m20.098s user 0m10.890s sys 0m45.081s
Running 20s test @ http://127.0.0.1:5656/ 4 threads and 100 connections Thread Stats Avg Stdev Max +/- Stdev Latency 4.00ms 8.72ms 150.05ms 91.52% Req/Sec 138.84k 42.05k 209.72k 63.81% 11017442 requests in 20.09s, 525.35MB read Requests/sec: 548379.99 Transfer/sec: 26.15MB real 0m20.101s user 0m10.639s sys 0m45.191s
Running 20s test @ http://127.0.0.1:5656/ 4 threads and 100 connections Thread Stats Avg Stdev Max +/- Stdev Latency 3.64ms 9.36ms 156.79ms 92.71% Req/Sec 124.55k 33.90k 191.90k 71.61% 9890536 requests in 20.07s, 471.62MB read Requests/sec: 492854.62 Transfer/sec: 23.50MB real 0m20.076s user 0m9.774s sys 0m41.801s