/*
 * Copyright 2012 The Netty Project
 *
 * The Netty Project licenses this file to you under the Apache License,
 * version 2.0 (the "License"); you may not use this file except in compliance
 * with the License. You may obtain a copy of the License at:
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */
package io.netty.handler.codec.http;

import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.DecoderResult;
import io.netty.handler.codec.PrematureChannelClosureException;
import io.netty.handler.codec.TooLongFrameException;
import io.netty.util.ByteProcessor;
import io.netty.util.internal.AppendableCharSequence;

import java.util.List;

Decodes ByteBufs into HttpMessages and HttpContents.

Parameters that prevents excessive memory consumption

NameMeaning
maxInitialLineLength The maximum length of the initial line (e.g. "GET / HTTP/1.0" or "HTTP/1.0 200 OK") If the length of the initial line exceeds this value, a TooLongFrameException will be raised.
maxHeaderSize The maximum length of all headers. If the sum of the length of each header exceeds this value, a TooLongFrameException will be raised.
maxChunkSize The maximum length of the content or each chunk. If the content length (or the length of each chunk) exceeds this value, the content or chunk will be split into multiple HttpContents whose length is maxChunkSize at maximum.

Chunked Content

If the content of an HTTP message is greater than maxChunkSize or the transfer encoding of the HTTP message is 'chunked', this decoder generates one HttpMessage instance and its following HttpContents per single HTTP message to avoid excessive memory consumption. For example, the following HTTP message:
GET / HTTP/1.1
Transfer-Encoding: chunked
1a
abcdefghijklmnopqrstuvwxyz
10
1234567890abcdef
0
Content-MD5: ...
[blank line]
triggers HttpRequestDecoder to generate 3 objects:
  1. An HttpRequest,
  2. The first HttpContent whose content is 'abcdefghijklmnopqrstuvwxyz',
  3. The second LastHttpContent whose content is '1234567890abcdef', which marks the end of the content.
If you prefer not to handle HttpContents by yourself for your convenience, insert HttpObjectAggregator after this decoder in the ChannelPipeline. However, please note that your server might not be as memory efficient as without the aggregator.

Extensibility

Please note that this decoder is designed to be extended to implement a protocol derived from HTTP, such as RTSP and ICAP. To implement the decoder of such a derived protocol, extend this class and implement all abstract methods properly.
/** * Decodes {@link ByteBuf}s into {@link HttpMessage}s and * {@link HttpContent}s. * * <h3>Parameters that prevents excessive memory consumption</h3> * <table border="1"> * <tr> * <th>Name</th><th>Meaning</th> * </tr> * <tr> * <td>{@code maxInitialLineLength}</td> * <td>The maximum length of the initial line * (e.g. {@code "GET / HTTP/1.0"} or {@code "HTTP/1.0 200 OK"}) * If the length of the initial line exceeds this value, a * {@link TooLongFrameException} will be raised.</td> * </tr> * <tr> * <td>{@code maxHeaderSize}</td> * <td>The maximum length of all headers. If the sum of the length of each * header exceeds this value, a {@link TooLongFrameException} will be raised.</td> * </tr> * <tr> * <td>{@code maxChunkSize}</td> * <td>The maximum length of the content or each chunk. If the content length * (or the length of each chunk) exceeds this value, the content or chunk * will be split into multiple {@link HttpContent}s whose length is * {@code maxChunkSize} at maximum.</td> * </tr> * </table> * * <h3>Chunked Content</h3> * * If the content of an HTTP message is greater than {@code maxChunkSize} or * the transfer encoding of the HTTP message is 'chunked', this decoder * generates one {@link HttpMessage} instance and its following * {@link HttpContent}s per single HTTP message to avoid excessive memory * consumption. For example, the following HTTP message: * <pre> * GET / HTTP/1.1 * Transfer-Encoding: chunked * * 1a * abcdefghijklmnopqrstuvwxyz * 10 * 1234567890abcdef * 0 * Content-MD5: ... * <i>[blank line]</i> * </pre> * triggers {@link HttpRequestDecoder} to generate 3 objects: * <ol> * <li>An {@link HttpRequest},</li> * <li>The first {@link HttpContent} whose content is {@code 'abcdefghijklmnopqrstuvwxyz'},</li> * <li>The second {@link LastHttpContent} whose content is {@code '1234567890abcdef'}, which marks * the end of the content.</li> * </ol> * * If you prefer not to handle {@link HttpContent}s by yourself for your * convenience, insert {@link HttpObjectAggregator} after this decoder in the * {@link ChannelPipeline}. However, please note that your server might not * be as memory efficient as without the aggregator. * * <h3>Extensibility</h3> * * Please note that this decoder is designed to be extended to implement * a protocol derived from HTTP, such as * <a href="http://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol">RTSP</a> and * <a href="http://en.wikipedia.org/wiki/Internet_Content_Adaptation_Protocol">ICAP</a>. * To implement the decoder of such a derived protocol, extend this class and * implement all abstract methods properly. */
public abstract class HttpObjectDecoder extends ByteToMessageDecoder { private static final String EMPTY_VALUE = ""; private final int maxChunkSize; private final boolean chunkedSupported; protected final boolean validateHeaders; private final HeaderParser headerParser; private final LineParser lineParser; private HttpMessage message; private long chunkSize; private long contentLength = Long.MIN_VALUE; private volatile boolean resetRequested; // These will be updated by splitHeader(...) private CharSequence name; private CharSequence value; private LastHttpContent trailer;
The internal state of HttpObjectDecoder. Internal use only.
/** * The internal state of {@link HttpObjectDecoder}. * <em>Internal use only</em>. */
private enum State { SKIP_CONTROL_CHARS, READ_INITIAL, READ_HEADER, READ_VARIABLE_LENGTH_CONTENT, READ_FIXED_LENGTH_CONTENT, READ_CHUNK_SIZE, READ_CHUNKED_CONTENT, READ_CHUNK_DELIMITER, READ_CHUNK_FOOTER, BAD_MESSAGE, UPGRADED } private State currentState = State.SKIP_CONTROL_CHARS;
Creates a new instance with the default maxInitialLineLength (4096}, maxHeaderSize (8192), and maxChunkSize (8192).
/** * Creates a new instance with the default * {@code maxInitialLineLength (4096}}, {@code maxHeaderSize (8192)}, and * {@code maxChunkSize (8192)}. */
protected HttpObjectDecoder() { this(4096, 8192, 8192, true); }
Creates a new instance with the specified parameters.
/** * Creates a new instance with the specified parameters. */
protected HttpObjectDecoder( int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean chunkedSupported) { this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported, true); }
Creates a new instance with the specified parameters.
/** * Creates a new instance with the specified parameters. */
protected HttpObjectDecoder( int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean chunkedSupported, boolean validateHeaders) { this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported, validateHeaders, 128); } protected HttpObjectDecoder( int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean chunkedSupported, boolean validateHeaders, int initialBufferSize) { if (maxInitialLineLength <= 0) { throw new IllegalArgumentException( "maxInitialLineLength must be a positive integer: " + maxInitialLineLength); } if (maxHeaderSize <= 0) { throw new IllegalArgumentException( "maxHeaderSize must be a positive integer: " + maxHeaderSize); } if (maxChunkSize <= 0) { throw new IllegalArgumentException( "maxChunkSize must be a positive integer: " + maxChunkSize); } AppendableCharSequence seq = new AppendableCharSequence(initialBufferSize); lineParser = new LineParser(seq, maxInitialLineLength); headerParser = new HeaderParser(seq, maxHeaderSize); this.maxChunkSize = maxChunkSize; this.chunkedSupported = chunkedSupported; this.validateHeaders = validateHeaders; } @Override protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception { if (resetRequested) { resetNow(); } switch (currentState) { case SKIP_CONTROL_CHARS: { if (!skipControlCharacters(buffer)) { return; } currentState = State.READ_INITIAL; } case READ_INITIAL: try { AppendableCharSequence line = lineParser.parse(buffer); if (line == null) { return; } String[] initialLine = splitInitialLine(line); if (initialLine.length < 3) { // Invalid initial line - ignore. currentState = State.SKIP_CONTROL_CHARS; return; } message = createMessage(initialLine); currentState = State.READ_HEADER; // fall-through } catch (Exception e) { out.add(invalidMessage(buffer, e)); return; } case READ_HEADER: try { State nextState = readHeaders(buffer); if (nextState == null) { return; } currentState = nextState; switch (nextState) { case SKIP_CONTROL_CHARS: // fast-path // No content is expected. out.add(message); out.add(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; case READ_CHUNK_SIZE: if (!chunkedSupported) { throw new IllegalArgumentException("Chunked messages not supported"); } // Chunked encoding - generate HttpMessage first. HttpChunks will follow. out.add(message); return; default: /** * <a href="https://tools.ietf.org/html/rfc7230#section-3.3.3">RFC 7230, 3.3.3</a> states that if a * request does not have either a transfer-encoding or a content-length header then the message body * length is 0. However for a response the body length is the number of octets received prior to the * server closing the connection. So we treat this as variable length chunked encoding. */ long contentLength = contentLength(); if (contentLength == 0 || contentLength == -1 && isDecodingRequest()) { out.add(message); out.add(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; } assert nextState == State.READ_FIXED_LENGTH_CONTENT || nextState == State.READ_VARIABLE_LENGTH_CONTENT; out.add(message); if (nextState == State.READ_FIXED_LENGTH_CONTENT) { // chunkSize will be decreased as the READ_FIXED_LENGTH_CONTENT state reads data chunk by chunk. chunkSize = contentLength; } // We return here, this forces decode to be called again where we will decode the content return; } } catch (Exception e) { out.add(invalidMessage(buffer, e)); return; } case READ_VARIABLE_LENGTH_CONTENT: { // Keep reading data as a chunk until the end of connection is reached. int toRead = Math.min(buffer.readableBytes(), maxChunkSize); if (toRead > 0) { ByteBuf content = buffer.readRetainedSlice(toRead); out.add(new DefaultHttpContent(content)); } return; } case READ_FIXED_LENGTH_CONTENT: { int readLimit = buffer.readableBytes(); // Check if the buffer is readable first as we use the readable byte count // to create the HttpChunk. This is needed as otherwise we may end up with // create a HttpChunk instance that contains an empty buffer and so is // handled like it is the last HttpChunk. // // See https://github.com/netty/netty/issues/433 if (readLimit == 0) { return; } int toRead = Math.min(readLimit, maxChunkSize); if (toRead > chunkSize) { toRead = (int) chunkSize; } ByteBuf content = buffer.readRetainedSlice(toRead); chunkSize -= toRead; if (chunkSize == 0) { // Read all content. out.add(new DefaultLastHttpContent(content, validateHeaders)); resetNow(); } else { out.add(new DefaultHttpContent(content)); } return; } /** * everything else after this point takes care of reading chunked content. basically, read chunk size, * read chunk, read and ignore the CRLF and repeat until 0 */ case READ_CHUNK_SIZE: try { AppendableCharSequence line = lineParser.parse(buffer); if (line == null) { return; } int chunkSize = getChunkSize(line.toString()); this.chunkSize = chunkSize; if (chunkSize == 0) { currentState = State.READ_CHUNK_FOOTER; return; } currentState = State.READ_CHUNKED_CONTENT; // fall-through } catch (Exception e) { out.add(invalidChunk(buffer, e)); return; } case READ_CHUNKED_CONTENT: { assert chunkSize <= Integer.MAX_VALUE; int toRead = Math.min((int) chunkSize, maxChunkSize); toRead = Math.min(toRead, buffer.readableBytes()); if (toRead == 0) { return; } HttpContent chunk = new DefaultHttpContent(buffer.readRetainedSlice(toRead)); chunkSize -= toRead; out.add(chunk); if (chunkSize != 0) { return; } currentState = State.READ_CHUNK_DELIMITER; // fall-through } case READ_CHUNK_DELIMITER: { final int wIdx = buffer.writerIndex(); int rIdx = buffer.readerIndex(); while (wIdx > rIdx) { byte next = buffer.getByte(rIdx++); if (next == HttpConstants.LF) { currentState = State.READ_CHUNK_SIZE; break; } } buffer.readerIndex(rIdx); return; } case READ_CHUNK_FOOTER: try { LastHttpContent trailer = readTrailingHeaders(buffer); if (trailer == null) { return; } out.add(trailer); resetNow(); return; } catch (Exception e) { out.add(invalidChunk(buffer, e)); return; } case BAD_MESSAGE: { // Keep discarding until disconnection. buffer.skipBytes(buffer.readableBytes()); break; } case UPGRADED: { int readableBytes = buffer.readableBytes(); if (readableBytes > 0) { // Keep on consuming as otherwise we may trigger an DecoderException, // other handler will replace this codec with the upgraded protocol codec to // take the traffic over at some point then. // See https://github.com/netty/netty/issues/2173 out.add(buffer.readBytes(readableBytes)); } break; } } } @Override protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { super.decodeLast(ctx, in, out); if (resetRequested) { // If a reset was requested by decodeLast() we need to do it now otherwise we may produce a // LastHttpContent while there was already one. resetNow(); } // Handle the last unfinished message. if (message != null) { boolean chunked = HttpUtil.isTransferEncodingChunked(message); if (currentState == State.READ_VARIABLE_LENGTH_CONTENT && !in.isReadable() && !chunked) { // End of connection. out.add(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; } if (currentState == State.READ_HEADER) { // If we are still in the state of reading headers we need to create a new invalid message that // signals that the connection was closed before we received the headers. out.add(invalidMessage(Unpooled.EMPTY_BUFFER, new PrematureChannelClosureException("Connection closed before received headers"))); resetNow(); return; } // Check if the closure of the connection signifies the end of the content. boolean prematureClosure; if (isDecodingRequest() || chunked) { // The last request did not wait for a response. prematureClosure = true; } else { // Compare the length of the received content and the 'Content-Length' header. // If the 'Content-Length' header is absent, the length of the content is determined by the end of the // connection, so it is perfectly fine. prematureClosure = contentLength() > 0; } if (!prematureClosure) { out.add(LastHttpContent.EMPTY_LAST_CONTENT); } resetNow(); } } @Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof HttpExpectationFailedEvent) { switch (currentState) { case READ_FIXED_LENGTH_CONTENT: case READ_VARIABLE_LENGTH_CONTENT: case READ_CHUNK_SIZE: reset(); break; default: break; } } super.userEventTriggered(ctx, evt); } protected boolean isContentAlwaysEmpty(HttpMessage msg) { if (msg instanceof HttpResponse) { HttpResponse res = (HttpResponse) msg; int code = res.status().code(); // Correctly handle return codes of 1xx. // // See: // - http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html Section 4.4 // - https://github.com/netty/netty/issues/222 if (code >= 100 && code < 200) { // One exception: Hixie 76 websocket handshake response return !(code == 101 && !res.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT) && res.headers().contains(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET, true)); } switch (code) { case 204: case 304: return true; } } return false; }
Returns true if the server switched to a different protocol than HTTP/1.0 or HTTP/1.1, e.g. HTTP/2 or Websocket. Returns false if the upgrade happened in a different layer, e.g. upgrade from HTTP/1.1 to HTTP/1.1 over TLS.
/** * Returns true if the server switched to a different protocol than HTTP/1.0 or HTTP/1.1, e.g. HTTP/2 or Websocket. * Returns false if the upgrade happened in a different layer, e.g. upgrade from HTTP/1.1 to HTTP/1.1 over TLS. */
protected boolean isSwitchingToNonHttp1Protocol(HttpResponse msg) { if (msg.status().code() != HttpResponseStatus.SWITCHING_PROTOCOLS.code()) { return false; } String newProtocol = msg.headers().get(HttpHeaderNames.UPGRADE); return newProtocol == null || !newProtocol.contains(HttpVersion.HTTP_1_0.text()) && !newProtocol.contains(HttpVersion.HTTP_1_1.text()); }
Resets the state of the decoder so that it is ready to decode a new message. This method is useful for handling a rejected request with Expect: 100-continue header.
/** * Resets the state of the decoder so that it is ready to decode a new message. * This method is useful for handling a rejected request with {@code Expect: 100-continue} header. */
public void reset() { resetRequested = true; } private void resetNow() { HttpMessage message = this.message; this.message = null; name = null; value = null; contentLength = Long.MIN_VALUE; lineParser.reset(); headerParser.reset(); trailer = null; if (!isDecodingRequest()) { HttpResponse res = (HttpResponse) message; if (res != null && isSwitchingToNonHttp1Protocol(res)) { currentState = State.UPGRADED; return; } } resetRequested = false; currentState = State.SKIP_CONTROL_CHARS; } private HttpMessage invalidMessage(ByteBuf in, Exception cause) { currentState = State.BAD_MESSAGE; // Advance the readerIndex so that ByteToMessageDecoder does not complain // when we produced an invalid message without consuming anything. in.skipBytes(in.readableBytes()); if (message == null) { message = createInvalidMessage(); } message.setDecoderResult(DecoderResult.failure(cause)); HttpMessage ret = message; message = null; return ret; } private HttpContent invalidChunk(ByteBuf in, Exception cause) { currentState = State.BAD_MESSAGE; // Advance the readerIndex so that ByteToMessageDecoder does not complain // when we produced an invalid message without consuming anything. in.skipBytes(in.readableBytes()); HttpContent chunk = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER); chunk.setDecoderResult(DecoderResult.failure(cause)); message = null; trailer = null; return chunk; } private static boolean skipControlCharacters(ByteBuf buffer) { boolean skiped = false; final int wIdx = buffer.writerIndex(); int rIdx = buffer.readerIndex(); while (wIdx > rIdx) { int c = buffer.getUnsignedByte(rIdx++); if (!Character.isISOControl(c) && !Character.isWhitespace(c)) { rIdx--; skiped = true; break; } } buffer.readerIndex(rIdx); return skiped; } private State readHeaders(ByteBuf buffer) { final HttpMessage message = this.message; final HttpHeaders headers = message.headers(); AppendableCharSequence line = headerParser.parse(buffer); if (line == null) { return null; } if (line.length() > 0) { do { char firstChar = line.charAt(0); if (name != null && (firstChar == ' ' || firstChar == '\t')) { //please do not make one line from below code //as it breaks +XX:OptimizeStringConcat optimization String trimmedLine = line.toString().trim(); String valueStr = String.valueOf(value); value = valueStr + ' ' + trimmedLine; } else { if (name != null) { headers.add(name, value); } splitHeader(line); } line = headerParser.parse(buffer); if (line == null) { return null; } } while (line.length() > 0); } // Add the last header. if (name != null) { headers.add(name, value); } // reset name and value fields name = null; value = null; State nextState; if (isContentAlwaysEmpty(message)) { HttpUtil.setTransferEncodingChunked(message, false); nextState = State.SKIP_CONTROL_CHARS; } else if (HttpUtil.isTransferEncodingChunked(message)) { nextState = State.READ_CHUNK_SIZE; } else if (contentLength() >= 0) { nextState = State.READ_FIXED_LENGTH_CONTENT; } else { nextState = State.READ_VARIABLE_LENGTH_CONTENT; } return nextState; } private long contentLength() { if (contentLength == Long.MIN_VALUE) { contentLength = HttpUtil.getContentLength(message, -1L); } return contentLength; } private LastHttpContent readTrailingHeaders(ByteBuf buffer) { AppendableCharSequence line = headerParser.parse(buffer); if (line == null) { return null; } CharSequence lastHeader = null; if (line.length() > 0) { LastHttpContent trailer = this.trailer; if (trailer == null) { trailer = this.trailer = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER, validateHeaders); } do { char firstChar = line.charAt(0); if (lastHeader != null && (firstChar == ' ' || firstChar == '\t')) { List<String> current = trailer.trailingHeaders().getAll(lastHeader); if (!current.isEmpty()) { int lastPos = current.size() - 1; //please do not make one line from below code //as it breaks +XX:OptimizeStringConcat optimization String lineTrimmed = line.toString().trim(); String currentLastPos = current.get(lastPos); current.set(lastPos, currentLastPos + lineTrimmed); } } else { splitHeader(line); CharSequence headerName = name; if (!HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(headerName) && !HttpHeaderNames.TRANSFER_ENCODING.contentEqualsIgnoreCase(headerName) && !HttpHeaderNames.TRAILER.contentEqualsIgnoreCase(headerName)) { trailer.trailingHeaders().add(headerName, value); } lastHeader = name; // reset name and value fields name = null; value = null; } line = headerParser.parse(buffer); if (line == null) { return null; } } while (line.length() > 0); this.trailer = null; return trailer; } return LastHttpContent.EMPTY_LAST_CONTENT; } protected abstract boolean isDecodingRequest(); protected abstract HttpMessage createMessage(String[] initialLine) throws Exception; protected abstract HttpMessage createInvalidMessage(); private static int getChunkSize(String hex) { hex = hex.trim(); for (int i = 0; i < hex.length(); i ++) { char c = hex.charAt(i); if (c == ';' || Character.isWhitespace(c) || Character.isISOControl(c)) { hex = hex.substring(0, i); break; } } return Integer.parseInt(hex, 16); } private static String[] splitInitialLine(AppendableCharSequence sb) { int aStart; int aEnd; int bStart; int bEnd; int cStart; int cEnd; aStart = findNonWhitespace(sb, 0); aEnd = findWhitespace(sb, aStart); bStart = findNonWhitespace(sb, aEnd); bEnd = findWhitespace(sb, bStart); cStart = findNonWhitespace(sb, bEnd); cEnd = findEndOfString(sb); return new String[] { sb.subStringUnsafe(aStart, aEnd), sb.subStringUnsafe(bStart, bEnd), cStart < cEnd? sb.subStringUnsafe(cStart, cEnd) : "" }; } private void splitHeader(AppendableCharSequence sb) { final int length = sb.length(); int nameStart; int nameEnd; int colonEnd; int valueStart; int valueEnd; nameStart = findNonWhitespace(sb, 0); for (nameEnd = nameStart; nameEnd < length; nameEnd ++) { char ch = sb.charAt(nameEnd); if (ch == ':' || Character.isWhitespace(ch)) { break; } } for (colonEnd = nameEnd; colonEnd < length; colonEnd ++) { if (sb.charAt(colonEnd) == ':') { colonEnd ++; break; } } name = sb.subStringUnsafe(nameStart, nameEnd); valueStart = findNonWhitespace(sb, colonEnd); if (valueStart == length) { value = EMPTY_VALUE; } else { valueEnd = findEndOfString(sb); value = sb.subStringUnsafe(valueStart, valueEnd); } } private static int findNonWhitespace(AppendableCharSequence sb, int offset) { for (int result = offset; result < sb.length(); ++result) { if (!Character.isWhitespace(sb.charAtUnsafe(result))) { return result; } } return sb.length(); } private static int findWhitespace(AppendableCharSequence sb, int offset) { for (int result = offset; result < sb.length(); ++result) { if (Character.isWhitespace(sb.charAtUnsafe(result))) { return result; } } return sb.length(); } private static int findEndOfString(AppendableCharSequence sb) { for (int result = sb.length() - 1; result > 0; --result) { if (!Character.isWhitespace(sb.charAtUnsafe(result))) { return result + 1; } } return 0; } private static class HeaderParser implements ByteProcessor { private final AppendableCharSequence seq; private final int maxLength; private int size; HeaderParser(AppendableCharSequence seq, int maxLength) { this.seq = seq; this.maxLength = maxLength; } public AppendableCharSequence parse(ByteBuf buffer) { final int oldSize = size; seq.reset(); int i = buffer.forEachByte(this); if (i == -1) { size = oldSize; return null; } buffer.readerIndex(i + 1); return seq; } public void reset() { size = 0; } @Override public boolean process(byte value) throws Exception { char nextByte = (char) (value & 0xFF); if (nextByte == HttpConstants.CR) { return true; } if (nextByte == HttpConstants.LF) { return false; } if (++ size > maxLength) { // TODO: Respond with Bad Request and discard the traffic // or close the connection. // No need to notify the upstream handlers - just log. // If decoding a response, just throw an exception. throw newException(maxLength); } seq.append(nextByte); return true; } protected TooLongFrameException newException(int maxLength) { return new TooLongFrameException("HTTP header is larger than " + maxLength + " bytes."); } } private static final class LineParser extends HeaderParser { LineParser(AppendableCharSequence seq, int maxLength) { super(seq, maxLength); } @Override public AppendableCharSequence parse(ByteBuf buffer) { reset(); return super.parse(buffer); } @Override protected TooLongFrameException newException(int maxLength) { return new TooLongFrameException("An HTTP line is larger than " + maxLength + " bytes."); } } }