mirror of https://github.com/caddyserver/caddy.git
* reverseproxy: Don't buffer chunked requests (fix #5366)
Mostly reverts 845bc4d50b
(#5289)
Adds warning for unsafe config.
Deprecates unsafe properties in favor of simpler, safer designed ones.
* Update modules/caddyhttp/reverseproxy/caddyfile.go
Co-authored-by: Y.Horie <u5.horie@gmail.com>
* Update modules/caddyhttp/reverseproxy/reverseproxy.go
Co-authored-by: Y.Horie <u5.horie@gmail.com>
* Update modules/caddyhttp/reverseproxy/reverseproxy.go
Co-authored-by: Y.Horie <u5.horie@gmail.com>
* Remove unused code
---------
Co-authored-by: Y.Horie <u5.horie@gmail.com>
This commit is contained in:
parent
90798f3eea
commit
4b119a475f
|
@ -521,19 +521,8 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
|||
h.FlushInterval = caddy.Duration(dur)
|
||||
}
|
||||
|
||||
case "buffer_requests":
|
||||
if d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
h.BufferRequests = true
|
||||
|
||||
case "buffer_responses":
|
||||
if d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
h.BufferResponses = true
|
||||
|
||||
case "max_buffer_size":
|
||||
case "request_buffers", "response_buffers":
|
||||
subdir := d.Val()
|
||||
if !d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
|
@ -544,7 +533,39 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
|||
if d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
h.MaxBufferSize = int64(size)
|
||||
if subdir == "request_buffers" {
|
||||
h.RequestBuffers = int64(size)
|
||||
} else if subdir == "response_buffers" {
|
||||
h.ResponseBuffers = int64(size)
|
||||
|
||||
}
|
||||
|
||||
// TODO: These three properties are deprecated; remove them sometime after v2.6.4
|
||||
case "buffer_requests": // TODO: deprecated
|
||||
if d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
caddy.Log().Named("config.adapter.caddyfile").Warn("DEPRECATED: buffer_requests: use request_buffers instead (with a maximum buffer size)")
|
||||
h.DeprecatedBufferRequests = true
|
||||
case "buffer_responses": // TODO: deprecated
|
||||
if d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
caddy.Log().Named("config.adapter.caddyfile").Warn("DEPRECATED: buffer_responses: use response_buffers instead (with a maximum buffer size)")
|
||||
h.DeprecatedBufferResponses = true
|
||||
case "max_buffer_size": // TODO: deprecated
|
||||
if !d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
size, err := humanize.ParseBytes(d.Val())
|
||||
if err != nil {
|
||||
return d.Errf("invalid byte size '%s': %v", d.Val(), err)
|
||||
}
|
||||
if d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
caddy.Log().Named("config.adapter.caddyfile").Warn("DEPRECATED: max_buffer_size: use request_buffers and/or response_buffers instead (with maximum buffer sizes)")
|
||||
h.DeprecatedMaxBufferSize = int64(size)
|
||||
|
||||
case "trusted_proxies":
|
||||
for d.NextArg() {
|
||||
|
|
|
@ -136,21 +136,27 @@ type Handler struct {
|
|||
// are also set implicitly.
|
||||
Headers *headers.Handler `json:"headers,omitempty"`
|
||||
|
||||
// If true, the entire request body will be read and buffered
|
||||
// in memory before being proxied to the backend. This should
|
||||
// be avoided if at all possible for performance reasons, but
|
||||
// could be useful if the backend is intolerant of read latency.
|
||||
BufferRequests bool `json:"buffer_requests,omitempty"`
|
||||
// DEPRECATED: Do not use; will be removed. See request_buffers instead.
|
||||
DeprecatedBufferRequests bool `json:"buffer_requests,omitempty"`
|
||||
|
||||
// If true, the entire response body will be read and buffered
|
||||
// in memory before being proxied to the client. This should
|
||||
// be avoided if at all possible for performance reasons, but
|
||||
// DEPRECATED: Do not use; will be removed. See response_buffers instead.
|
||||
DeprecatedBufferResponses bool `json:"buffer_responses,omitempty"`
|
||||
|
||||
// DEPRECATED: Do not use; will be removed. See request_buffers and response_buffers instead.
|
||||
DeprecatedMaxBufferSize int64 `json:"max_buffer_size,omitempty"`
|
||||
|
||||
// If nonzero, the entire request body up to this size will be read
|
||||
// and buffered in memory before being proxied to the backend. This
|
||||
// should be avoided if at all possible for performance reasons, but
|
||||
// could be useful if the backend is intolerant of read latency or
|
||||
// chunked encodings.
|
||||
RequestBuffers int64 `json:"request_buffers,omitempty"`
|
||||
|
||||
// If nonzero, the entire response body up to this size will be read
|
||||
// and buffered in memory before being proxied to the client. This
|
||||
// should be avoided if at all possible for performance reasons, but
|
||||
// could be useful if the backend has tighter memory constraints.
|
||||
BufferResponses bool `json:"buffer_responses,omitempty"`
|
||||
|
||||
// If body buffering is enabled, the maximum size of the buffers
|
||||
// used for the requests and responses (in bytes).
|
||||
MaxBufferSize int64 `json:"max_buffer_size,omitempty"`
|
||||
ResponseBuffers int64 `json:"response_buffers,omitempty"`
|
||||
|
||||
// If configured, rewrites the copy of the upstream request.
|
||||
// Allows changing the request method and URI (path and query).
|
||||
|
@ -221,11 +227,28 @@ func (h *Handler) Provision(ctx caddy.Context) error {
|
|||
h.connections = make(map[io.ReadWriteCloser]openConnection)
|
||||
h.connectionsMu = new(sync.Mutex)
|
||||
|
||||
// TODO: remove deprecated fields sometime after v2.6.4
|
||||
if h.DeprecatedBufferRequests {
|
||||
h.logger.Warn("DEPRECATED: buffer_requests: this property will be removed soon; use request_buffers instead (and set a maximum buffer size)")
|
||||
}
|
||||
if h.DeprecatedBufferResponses {
|
||||
h.logger.Warn("DEPRECATED: buffer_responses: this property will be removed soon; use response_buffers instead (and set a maximum buffer size)")
|
||||
}
|
||||
if h.DeprecatedMaxBufferSize != 0 {
|
||||
h.logger.Warn("DEPRECATED: max_buffer_size: this property will be removed soon; use request_buffers and/or response_buffers instead (and set maximum buffer sizes)")
|
||||
}
|
||||
|
||||
// warn about unsafe buffering config
|
||||
if h.RequestBuffers == -1 || h.ResponseBuffers == -1 {
|
||||
h.logger.Warn("UNLIMITED BUFFERING: buffering is enabled without any cap on buffer size, which can result in OOM crashes")
|
||||
}
|
||||
|
||||
// verify SRV compatibility - TODO: LookupSRV deprecated; will be removed
|
||||
for i, v := range h.Upstreams {
|
||||
if v.LookupSRV == "" {
|
||||
continue
|
||||
}
|
||||
h.logger.Warn("DEPRECATED: lookup_srv: will be removed in a near-future version of Caddy; use the http.reverse_proxy.upstreams.srv module instead")
|
||||
if h.HealthChecks != nil && h.HealthChecks.Active != nil {
|
||||
return fmt.Errorf(`upstream: lookup_srv is incompatible with active health checks: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV)
|
||||
}
|
||||
|
@ -622,9 +645,8 @@ func (h Handler) prepareRequest(req *http.Request, repl *caddy.Replacer) (*http.
|
|||
// attacks, so it is strongly recommended to only use this
|
||||
// feature if absolutely required, if read timeouts are
|
||||
// set, and if body size is limited
|
||||
if (h.BufferRequests || isChunkedRequest(req)) && req.Body != nil {
|
||||
req.Body, req.ContentLength = h.bufferedBody(req.Body)
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(req.ContentLength, 10))
|
||||
if h.RequestBuffers != 0 && req.Body != nil {
|
||||
req.Body, _ = h.bufferedBody(req.Body, h.RequestBuffers)
|
||||
}
|
||||
|
||||
if req.ContentLength == 0 {
|
||||
|
@ -852,8 +874,8 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, origRe
|
|||
}
|
||||
|
||||
// if enabled, buffer the response body
|
||||
if h.BufferResponses {
|
||||
res.Body, _ = h.bufferedBody(res.Body)
|
||||
if h.ResponseBuffers != 0 {
|
||||
res.Body, _ = h.bufferedBody(res.Body, h.ResponseBuffers)
|
||||
}
|
||||
|
||||
// see if any response handler is configured for this response from the backend
|
||||
|
@ -1122,15 +1144,20 @@ func (h Handler) provisionUpstream(upstream *Upstream) {
|
|||
}
|
||||
}
|
||||
|
||||
// bufferedBody reads originalBody into a buffer, then returns a reader for the buffer.
|
||||
// Always close the return value when done with it, just like if it was the original body!
|
||||
func (h Handler) bufferedBody(originalBody io.ReadCloser) (io.ReadCloser, int64) {
|
||||
// bufferedBody reads originalBody into a buffer with maximum size of limit (-1 for unlimited),
|
||||
// then returns a reader for the buffer along with how many bytes were buffered. Always close
|
||||
// the return value when done with it, just like if it was the original body! If limit is 0
|
||||
// (which it shouldn't be), this function returns its input; i.e. is a no-op, for safety.
|
||||
func (h Handler) bufferedBody(originalBody io.ReadCloser, limit int64) (io.ReadCloser, int64) {
|
||||
if limit == 0 {
|
||||
return originalBody, 0
|
||||
}
|
||||
var written int64
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
if h.MaxBufferSize > 0 {
|
||||
n, err := io.CopyN(buf, originalBody, h.MaxBufferSize)
|
||||
if err != nil || n == h.MaxBufferSize {
|
||||
if limit > 0 {
|
||||
n, err := io.CopyN(buf, originalBody, limit)
|
||||
if err != nil || n == limit {
|
||||
return bodyReadCloser{
|
||||
Reader: io.MultiReader(buf, originalBody),
|
||||
buf: buf,
|
||||
|
@ -1147,15 +1174,6 @@ func (h Handler) bufferedBody(originalBody io.ReadCloser) (io.ReadCloser, int64)
|
|||
}, written
|
||||
}
|
||||
|
||||
func isChunkedRequest(req *http.Request) bool {
|
||||
for _, transferEncoding := range req.TransferEncoding {
|
||||
if transferEncoding == "chunked" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// cloneRequest makes a semi-deep clone of origReq.
|
||||
//
|
||||
// Most of this code is borrowed from the Go stdlib reverse proxy,
|
||||
|
|
Loading…
Reference in New Issue