Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
… Go1.18
  • Loading branch information
archanaravindar authored and dbenoit17 committed Dec 12, 2023
1 parent 0c8b1fc commit c43ce40
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 5 deletions.
34 changes: 29 additions & 5 deletions src/net/http/internal/chunked.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ type chunkedReader struct {
err error
buf [2]byte
checkEnd bool // whether need to check for \r\n chunk footer
excess int64 // "excessive" chunk overhead, for malicious sender detection
}

func (cr *chunkedReader) beginChunk() {
Expand All @@ -49,10 +50,38 @@ func (cr *chunkedReader) beginChunk() {
if cr.err != nil {
return
}
cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data
line = trimTrailingWhitespace(line)
line, cr.err = removeChunkExtension(line)
if cr.err != nil {
return
}
cr.n, cr.err = parseHexUint(line)
if cr.err != nil {
return
}
// A sender who sends one byte per chunk will send 5 bytes of overhead
// for every byte of data. ("1\r\nX\r\n" to send "X".)
// We want to allow this, since streaming a byte at a time can be legitimate.
//
// A sender can use chunk extensions to add arbitrary amounts of additional
// data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
// We don't want to disallow extensions (although we discard them),
// but we also don't want to allow a sender to reduce the signal/noise ratio
// arbitrarily.
//
// We track the amount of excess overhead read,
// and produce an error if it grows too large.
//
// Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
// plus twice the amount of real data in the chunk.
cr.excess -= 16 + (2 * int64(cr.n))
if cr.excess < 0 {
cr.excess = 0
}
if cr.excess > 16*1024 {
cr.err = errors.New("chunked encoding contains too much non-data")
}
if cr.n == 0 {
cr.err = io.EOF
}
Expand Down Expand Up @@ -140,11 +169,6 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) {
if len(p) >= maxLineLength {
return nil, ErrLineTooLong
}
p = trimTrailingWhitespace(p)
p, err = removeChunkExtension(p)
if err != nil {
return nil, err
}
return p, nil
}

Expand Down
59 changes: 59 additions & 0 deletions src/net/http/internal/chunked_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,3 +239,62 @@ func TestChunkEndReadError(t *testing.T) {
t.Errorf("expected %v, got %v", readErr, err)
}
}

func TestChunkReaderTooMuchOverhead(t *testing.T) {
// If the sender is sending 100x as many chunk header bytes as chunk data,
// we should reject the stream at some point.
chunk := []byte("1;")
for i := 0; i < 100; i++ {
chunk = append(chunk, 'a') // chunk extension
}
chunk = append(chunk, "\r\nX\r\n"...)
const bodylen = 1 << 20
r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
if i < bodylen {
return chunk, nil
}
return []byte("0\r\n"), nil
}})
_, err := io.ReadAll(r)
if err == nil {
t.Fatalf("successfully read body with excessive overhead; want error")
}
}

func TestChunkReaderByteAtATime(t *testing.T) {
// Sending one byte per chunk should not trip the excess-overhead detection.
const bodylen = 1 << 20
r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
if i < bodylen {
return []byte("1\r\nX\r\n"), nil
}
return []byte("0\r\n"), nil
}})
got, err := io.ReadAll(r)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(got) != bodylen {
t.Errorf("read %v bytes, want %v", len(got), bodylen)
}
}

type funcReader struct {
f func(iteration int) ([]byte, error)
i int
b []byte
err error
}

func (r *funcReader) Read(p []byte) (n int, err error) {
if len(r.b) == 0 && r.err == nil {
r.b, r.err = r.f(r.i)
r.i++
}
n = copy(p, r.b)
r.b = r.b[n:]
if len(r.b) > 0 {
return n, nil
}
return n, r.err
}

0 comments on commit c43ce40

Please sign in to comment.