Skip to content

Commit

Permalink
Backport of patch for 2023-39326 to Go1.15
Browse files Browse the repository at this point in the history
  • Loading branch information
archanaravindar committed Dec 8, 2023
1 parent c18fcd3 commit 898b45f
Show file tree
Hide file tree
Showing 2 changed files with 89 additions and 5 deletions.
34 changes: 29 additions & 5 deletions src/net/http/internal/chunked.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ type chunkedReader struct {
err error
buf [2]byte
checkEnd bool // whether need to check for \r\n chunk footer
excess int64 // "excessive" chunk overhead, for malicious sender detection
}

func (cr *chunkedReader) beginChunk() {
Expand All @@ -49,10 +50,38 @@ func (cr *chunkedReader) beginChunk() {
if cr.err != nil {
return
}
cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data
line = trimTrailingWhitespace(line)
line, cr.err = removeChunkExtension(line)
if cr.err != nil {
return
}
cr.n, cr.err = parseHexUint(line)
if cr.err != nil {
return
}
// A sender who sends one byte per chunk will send 5 bytes of overhead
// for every byte of data. ("1\r\nX\r\n" to send "X".)
// We want to allow this, since streaming a byte at a time can be legitimate.
//
// A sender can use chunk extensions to add arbitrary amounts of additional
// data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
// We don't want to disallow extensions (although we discard them),
// but we also don't want to allow a sender to reduce the signal/noise ratio
// arbitrarily.
//
// We track the amount of excess overhead read,
// and produce an error if it grows too large.
//
// Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
// plus twice the amount of real data in the chunk.
cr.excess -= 16 + (2 * int64(cr.n))
if cr.excess < 0 {
cr.excess = 0
}
if cr.excess > 16*1024 {
cr.err = errors.New("chunked encoding contains too much non-data")
}
if cr.n == 0 {
cr.err = io.EOF
}
Expand Down Expand Up @@ -133,11 +162,6 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) {
if len(p) >= maxLineLength {
return nil, ErrLineTooLong
}
p = trimTrailingWhitespace(p)
p, err = removeChunkExtension(p)
if err != nil {
return nil, err
}
return p, nil
}

Expand Down
60 changes: 60 additions & 0 deletions src/net/http/internal/chunked_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,3 +212,63 @@ func TestChunkReadPartial(t *testing.T) {
}

}


func TestChunkReaderTooMuchOverhead(t *testing.T) {
// If the sender is sending 100x as many chunk header bytes as chunk data,
// we should reject the stream at some point.
chunk := []byte("1;")
for i := 0; i < 100; i++ {
chunk = append(chunk, 'a') // chunk extension
}
chunk = append(chunk, "\r\nX\r\n"...)
const bodylen = 1 << 20
r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
if i < bodylen {
return chunk, nil
}
return []byte("0\r\n"), nil
}})
_, err := io.ReadAll(r)
if err == nil {
t.Fatalf("successfully read body with excessive overhead; want error")
}
}

func TestChunkReaderByteAtATime(t *testing.T) {
// Sending one byte per chunk should not trip the excess-overhead detection.
const bodylen = 1 << 20
r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
if i < bodylen {
return []byte("1\r\nX\r\n"), nil
}
return []byte("0\r\n"), nil
}})
got, err := io.ReadAll(r)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(got) != bodylen {
t.Errorf("read %v bytes, want %v", len(got), bodylen)
}
}

type funcReader struct {
f func(iteration int) ([]byte, error)
i int
b []byte
err error
}

func (r *funcReader) Read(p []byte) (n int, err error) {
if len(r.b) == 0 && r.err == nil {
r.b, r.err = r.f(r.i)
r.i++
}
n = copy(p, r.b)
r.b = r.b[n:]
if len(r.b) > 0 {
return n, nil
}
return n, r.err
}

0 comments on commit 898b45f

Please sign in to comment.