reverse_proxy: Allow buffering of client requests

This is a bad idea, but some backends apparently require it. See
discussion in #176.
This commit is contained in:
Matthew Holt 2019-11-15 17:15:33 -07:00
parent af26a03da1
commit 1228dd7d93
No known key found for this signature in database
GPG Key ID: 2A349DD577D586A5
2 changed files with 39 additions and 13 deletions

View File

@ -15,14 +15,18 @@
package reverseproxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/caddyserver/caddy/v2"
@ -38,13 +42,14 @@ func init() {
// Handler implements a highly configurable and production-ready reverse proxy.
type Handler struct {
TransportRaw json.RawMessage `json:"transport,omitempty"`
CBRaw json.RawMessage `json:"circuit_breaker,omitempty"`
LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
HealthChecks *HealthChecks `json:"health_checks,omitempty"`
Upstreams UpstreamPool `json:"upstreams,omitempty"`
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
Headers *headers.Handler `json:"headers,omitempty"`
TransportRaw json.RawMessage `json:"transport,omitempty"`
CBRaw json.RawMessage `json:"circuit_breaker,omitempty"`
LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
HealthChecks *HealthChecks `json:"health_checks,omitempty"`
Upstreams UpstreamPool `json:"upstreams,omitempty"`
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
Headers *headers.Handler `json:"headers,omitempty"`
BufferRequests bool `json:"buffer_requests,omitempty"`
Transport http.RoundTripper `json:"-"`
CB CircuitBreaker `json:"-"`
@ -222,6 +227,27 @@ func (h *Handler) Cleanup() error {
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
// if enabled, buffer client request;
// this should only be enabled if the
// upstream requires it and does not
// work with "slow clients" (gunicorn,
// etc.) - this obviously has a perf
// overhead and makes the proxy at
// risk of exhausting memory and more
// suseptible to slowloris attacks,
// so it is strongly recommended to
// only use this feature if absolutely
// required, if read timeouts are set,
// and if body size is limited
if h.BufferRequests {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
io.Copy(buf, r.Body)
r.Body.Close()
r.Body = ioutil.NopCloser(buf)
}
// prepare the request for proxying; this is needed only once
err := h.prepareRequest(r)
if err != nil {
@ -662,12 +688,11 @@ type DialError struct {
error
}
// TODO: see if we can use this
// var bufPool = sync.Pool{
// New: func() interface{} {
// return new(bytes.Buffer)
// },
// }
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// Interface guards
var (

View File

@ -110,6 +110,7 @@ func (h Handler) copyResponse(dst io.Writer, src io.Reader, flushInterval time.D
// buf = h.BufferPool.Get()
// defer h.BufferPool.Put(buf)
// }
// // we could also see about a pool that returns values like this: make([]byte, 32*1024)
_, err := h.copyBuffer(dst, src, buf)
return err
}