From b02b6019639fd310b969bd6b46dfaab5060b4a2a Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 3 Sep 2021 12:00:29 +0800 Subject: [PATCH 01/99] [tcpproxy] first commit --- pkg/object/tcpserver/context.go | 15 +++++ pkg/object/tcpserver/spec.go | 47 ++++++++++++++++ pkg/object/tcpserver/tcpserver.go | 93 +++++++++++++++++++++++++++++++ pkg/util/tcpstat/tcpstat.go | 73 ++++++++++++++++++++++++ 4 files changed, 228 insertions(+) create mode 100644 pkg/object/tcpserver/context.go create mode 100644 pkg/object/tcpserver/spec.go create mode 100644 pkg/object/tcpserver/tcpserver.go create mode 100644 pkg/util/tcpstat/tcpstat.go diff --git a/pkg/object/tcpserver/context.go b/pkg/object/tcpserver/context.go new file mode 100644 index 0000000000..3735d0d5d1 --- /dev/null +++ b/pkg/object/tcpserver/context.go @@ -0,0 +1,15 @@ +package tcpserver + +import ( + stdcontext "context" + "time" +) + +const ( + serverShutdownTimeout = 30 * time.Second +) + +func serverShutdownContext() (stdcontext.Context, stdcontext.CancelFunc) { + ctx, cancelFunc := stdcontext.WithTimeout(stdcontext.Background(), serverShutdownTimeout) + return ctx, cancelFunc +} diff --git a/pkg/object/tcpserver/spec.go b/pkg/object/tcpserver/spec.go new file mode 100644 index 0000000000..5defc15510 --- /dev/null +++ b/pkg/object/tcpserver/spec.go @@ -0,0 +1,47 @@ +package tcpserver + +import ( + "fmt" + + "github.com/megaease/easegress/pkg/tracing" + "github.com/megaease/easegress/pkg/util/ipfilter" +) + +type ( + // Spec describes the TcpServer. + Spec struct { + Port uint16 `yaml:"port" jsonschema:"required,minimum=1"` + MaxConnections uint32 `yaml:"maxConnections" jsonschema:"omitempty,minimum=1"` + + // By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. + Backlog int32 `yaml:"backlog" jsonschema:"omitempty,minimum=-1"` + SendBuf uint32 `yaml:"sendBuf" jsonschema:"omitempty"` + RecvBuf uint32 `yaml:"recvBuf" jsonschema:"omitempty"` + Reuseport bool `yaml:"reuseport" jsonschema:"omitempty"` + KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` + TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` + ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` + ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` + + // Support multiple certs, preserve the certbase64 and keybase64 + // for backward compatibility + SSL bool `yaml:"ssl" jsonschema:"omitempty"` + CertBase64 string `yaml:"certBase64" jsonschema:"omitempty,format=base64"` + KeyBase64 string `yaml:"keyBase64" jsonschema:"omitempty,format=base64"` + + IPFilter *ipfilter.Spec `yaml:"ipFilter,omitempty" jsonschema:"omitempty"` + + Tracing *tracing.Spec `yaml:"tracing" jsonschema:"omitempty"` + } +) + +// Validate validates TcpServerSpec. +func (spec *Spec) Validate() error { + if spec.SSL { + if spec.CertBase64 == "" || spec.KeyBase64 == "" { + return fmt.Errorf("tcp proxy ssl parameters is incomplete") + } + // TODO need check ssl parameters + } + return nil +} diff --git a/pkg/object/tcpserver/tcpserver.go b/pkg/object/tcpserver/tcpserver.go new file mode 100644 index 0000000000..e86d8f061f --- /dev/null +++ b/pkg/object/tcpserver/tcpserver.go @@ -0,0 +1,93 @@ +package tcpserver + +import ( + "github.com/megaease/easegress/pkg/protocol" + "sync/atomic" + + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/limitlistener" + "github.com/megaease/easegress/pkg/util/tcpstat" +) + +const ( + // Category is the category of HTTPServer. + Category = supervisor.CategoryTrafficGate + + // Kind is the kind of HTTPServer. + Kind = "TCPServer" +) + +func init() { + supervisor.Register(&TCPServer{}) +} + +type ( + stateType string + + eventCheckFailed struct{} + eventServeFailed struct { + startNum uint64 + err error + } + eventReload struct { + nextSuperSpec *supervisor.Spec + } + eventClose struct{ done chan struct{} } + + TCPServer struct { + superSpec *supervisor.Spec + spec *Spec + startNum uint64 + eventChan chan interface{} + + // status + state atomic.Value // stateType + err atomic.Value // error + + tcpstat *tcpstat.TcpStat + limitListener *limitlistener.LimitListener + } + + // Status contains all status generated by runtime, for displaying to users. + Status struct { + Health string `yaml:"health"` + + State stateType `yaml:"state"` + Error string `yaml:"error,omitempty"` + + *tcpstat.Status + } +) + +func (T *TCPServer) Category() supervisor.ObjectCategory { + return Category +} + +func (T *TCPServer) Kind() string { + return Kind +} + +func (T *TCPServer) DefaultSpec() interface{} { + return &Spec{ + KeepAlive: true, + MaxConnections: 10240, + } +} + +func (T *TCPServer) Status() *supervisor.Status { + panic("implement me") +} + +func (T *TCPServer) Close() { + panic("implement me") +} + +// Init initializes HTTPServer. +func (T *TCPServer) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { + panic("implement me") +} + +// Inherit inherits previous generation of HTTPServer. +func (T *TCPServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { + panic("implement me") +} diff --git a/pkg/util/tcpstat/tcpstat.go b/pkg/util/tcpstat/tcpstat.go new file mode 100644 index 0000000000..9d05af0f03 --- /dev/null +++ b/pkg/util/tcpstat/tcpstat.go @@ -0,0 +1,73 @@ +package tcpstat + +import ( + "sync" + + "github.com/megaease/easegress/pkg/util/sampler" + "github.com/rcrowley/go-metrics" +) + +type ( + // TcpStat is the statistics tool for TCP traffic. + TcpStat struct { + mutex sync.Mutex + + count uint64 // for tcp connection + rate1 metrics.EWMA + rate5 metrics.EWMA + rate15 metrics.EWMA + + errCount uint64 + errRate1 metrics.EWMA + errRate5 metrics.EWMA + errRate15 metrics.EWMA + + m1ErrPercent float64 + m5ErrPercent float64 + m15ErrPercent float64 + + total uint64 + min uint64 + mean uint64 + max uint64 + + durationSampler *sampler.DurationSampler + + reqSize uint64 + respSize uint64 + } + + // Status contains all status generated by HTTPStat. + Status struct { + Count uint64 `yaml:"count"` + M1 float64 `yaml:"m1"` + M5 float64 `yaml:"m5"` + M15 float64 `yaml:"m15"` + + ErrCount uint64 `yaml:"errCount"` + M1Err float64 `yaml:"m1Err"` + M5Err float64 `yaml:"m5Err"` + M15Err float64 `yaml:"m15Err"` + + M1ErrPercent float64 `yaml:"m1ErrPercent"` + M5ErrPercent float64 `yaml:"m5ErrPercent"` + M15ErrPercent float64 `yaml:"m15ErrPercent"` + + Min uint64 `yaml:"min"` + Max uint64 `yaml:"max"` + Mean uint64 `yaml:"mean"` + + P25 float64 `yaml:"p25"` + P50 float64 `yaml:"p50"` + P75 float64 `yaml:"p75"` + P95 float64 `yaml:"p95"` + P98 float64 `yaml:"p98"` + P99 float64 `yaml:"p99"` + P999 float64 `yaml:"p999"` + + ReqSize uint64 `yaml:"reqSize"` + RespSize uint64 `yaml:"respSize"` + + Codes map[int]uint64 `yaml:"codes"` + } +) From 34fe9db7fd4bd3a46ebd89cf1ec0f8b09fcdad3a Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 3 Sep 2021 17:47:12 +0800 Subject: [PATCH 02/99] [tcpproxy] add layer4pipeline and layer4proxy(incomplete) --- pkg/context/httpcontext.go | 29 ++ pkg/filter/layer4proxy/masterslavereader.go | 103 +++++ pkg/filter/layer4proxy/pool.go | 143 ++++++ pkg/filter/layer4proxy/proxy.go | 99 +++++ pkg/filter/layer4proxy/server.go | 275 ++++++++++++ pkg/filter/proxy/masterslavereader.go | 2 +- pkg/object/httpserver/httpserver.go | 2 +- pkg/object/layer4pipeline/layer4pipeline.go | 420 ++++++++++++++++++ pkg/object/layer4pipeline/registry.go | 101 +++++ pkg/object/layer4pipeline/spec.go | 124 ++++++ pkg/object/tcpserver/pool.go | 1 + pkg/object/tcpserver/runtime.go | 128 ++++++ pkg/object/tcpserver/tcpserver.go | 42 +- pkg/supervisor/registry.go | 2 +- .../tcpstat.go => layer4stat/layer4stat.go} | 14 +- 15 files changed, 1462 insertions(+), 23 deletions(-) create mode 100644 pkg/filter/layer4proxy/masterslavereader.go create mode 100644 pkg/filter/layer4proxy/pool.go create mode 100644 pkg/filter/layer4proxy/proxy.go create mode 100644 pkg/filter/layer4proxy/server.go create mode 100644 pkg/object/layer4pipeline/layer4pipeline.go create mode 100644 pkg/object/layer4pipeline/registry.go create mode 100644 pkg/object/layer4pipeline/spec.go create mode 100644 pkg/object/tcpserver/pool.go create mode 100644 pkg/object/tcpserver/runtime.go rename pkg/util/{tcpstat/tcpstat.go => layer4stat/layer4stat.go} (86%) diff --git a/pkg/context/httpcontext.go b/pkg/context/httpcontext.go index 0190bcb7b4..f6aabe0be0 100644 --- a/pkg/context/httpcontext.go +++ b/pkg/context/httpcontext.go @@ -42,6 +42,35 @@ type ( // HandlerCaller is a helper function to call the handler HandlerCaller func(lastResult string) string + // Layer4Context is all context of an TCP processing. + // It is not goroutine-safe, callers must use Lock/Unlock + // to protect it by themselves. + Layer4Context interface { + Lock() + Unlock() + + stdcontext.Context + Cancel(err error) + Cancelled() bool + ClientDisconnected() bool + + Duration() time.Duration // For log, sample, etc. + OnFinish(func()) // For setting final client statistics, etc. + AddTag(tag string) // For debug, log, etc. + + Finish() + + Host() string + SetHost(host string) + Port() uint16 + SetPort(port uint16) + + ClientIP() string + + CallNextHandler(lastResult string) string + SetHandlerCaller(caller HandlerCaller) + } + // HTTPContext is all context of an HTTP processing. // It is not goroutine-safe, callers must use Lock/Unlock // to protect it by themselves. diff --git a/pkg/filter/layer4proxy/masterslavereader.go b/pkg/filter/layer4proxy/masterslavereader.go new file mode 100644 index 0000000000..bfa6c9fc76 --- /dev/null +++ b/pkg/filter/layer4proxy/masterslavereader.go @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4proxy + +import ( + "bytes" + "io" +) + +type ( + // masterSlaveReader reads bytes to master, + // and synchronize them to slave. + // Currently, only support one slave. + masterSlaveReader struct { + masterReader io.Reader + slaveReader io.Reader + } + + masterReader struct { + r io.Reader + buffChan chan []byte + } + + slaveReader struct { + unreadBuff *bytes.Buffer + buffChan chan []byte + } +) + +func newMasterSlaveReader(r io.Reader) (io.ReadCloser, io.Reader) { + buffChan := make(chan []byte, 10) + mr := &masterReader{ + r: r, + buffChan: buffChan, + } + sr := &slaveReader{ + unreadBuff: bytes.NewBuffer(nil), + buffChan: buffChan, + } + + return mr, sr +} + +func (mr *masterReader) Read(p []byte) (n int, err error) { + buff := bytes.NewBuffer(nil) + tee := io.TeeReader(mr.r, buff) + n, err = tee.Read(p) + + if n != 0 { + mr.buffChan <- buff.Bytes() + } + + if err == io.EOF { + close(mr.buffChan) + } + + return n, err +} + +func (mr *masterReader) Close() error { + if closer, ok := mr.r.(io.ReadCloser); ok { + return closer.Close() + } + + return nil +} + +func (sr *slaveReader) Read(p []byte) (int, error) { + buff, ok := <-sr.buffChan + + if !ok { + return 0, io.EOF + } + + var n int + // NOTE: This if-branch is defensive programming, + // Because the callers of Read of both master and slave + // are the same, so it never happens that len(p) < len(buff). + // else-branch is faster because it is one less copy operation than if-branch. + if sr.unreadBuff.Len() > 0 || len(p) < len(buff) { + sr.unreadBuff.Write(buff) + n, _ = sr.unreadBuff.Read(p) + } else { + n = copy(p, buff) + } + + return n, nil +} diff --git a/pkg/filter/layer4proxy/pool.go b/pkg/filter/layer4proxy/pool.go new file mode 100644 index 0000000000..9937b81d55 --- /dev/null +++ b/pkg/filter/layer4proxy/pool.go @@ -0,0 +1,143 @@ +package layer4proxy + +import ( + "fmt" + "github.com/google/martian/log" + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/layer4stat" + "github.com/megaease/easegress/pkg/util/memorycache" + "github.com/megaease/easegress/pkg/util/stringtool" + "io" + "net" +) + +type ( + pool struct { + spec *PoolSpec + + tagPrefix string + writeResponse bool + + servers *servers + layer4stat *layer4stat.Layer4Stat + memoryCache *memorycache.MemoryCache + } + + // PoolSpec describes a pool of servers. + PoolSpec struct { + SpanName string `yaml:"spanName" jsonschema:"omitempty"` + ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` + Servers []*Server `yaml:"servers" jsonschema:"omitempty"` + ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` + ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` + LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` + MemoryCache *memorycache.Spec `yaml:"memoryCache,omitempty" jsonschema:"omitempty"` + } + + // PoolStatus is the status of Pool. + PoolStatus struct { + Stat *layer4stat.Status `yaml:"stat"` + } +) + +// Validate validates poolSpec. +func (s PoolSpec) Validate() error { + if s.ServiceName == "" && len(s.Servers) == 0 { + return fmt.Errorf("both serviceName and servers are empty") + } + + serversGotWeight := 0 + for _, server := range s.Servers { + if server.Weight > 0 { + serversGotWeight++ + } + } + if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { + return fmt.Errorf("not all servers have weight(%d/%d)", + serversGotWeight, len(s.Servers)) + } + + if s.ServiceName == "" { + servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) + if servers.len() == 0 { + return fmt.Errorf("serversTags picks none of servers") + } + } + + return nil +} + +func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string, writeResponse bool) *pool { + + var memoryCache *memorycache.MemoryCache + if spec.MemoryCache != nil { + memoryCache = memorycache.New(spec.MemoryCache) + } + + return &pool{ + spec: spec, + + tagPrefix: tagPrefix, + writeResponse: writeResponse, + + servers: newServers(super, spec), + layer4stat: layer4stat.New(), + memoryCache: memoryCache, + } +} + +func (p *pool) status() *PoolStatus { + s := &PoolStatus{Stat: p.layer4stat.Status()} + return s +} + +func (p *pool) handle(ctx context.Layer4Context, reqBody io.Reader) string { + addTag := func(subPrefix, msg string) { + tag := stringtool.Cat(p.tagPrefix, "#", subPrefix, ": ", msg) + ctx.Lock() + ctx.AddTag(tag) + ctx.Unlock() + } + + server, err := p.servers.next(ctx) + if err != nil { + addTag("serverErr", err.Error()) + return resultInternalError + } + addTag("addr", server.Address) + + addr, err := net.ResolveTCPAddr("tcp", server.Address) + if err != nil { + log.Errorf("%v", err) + } + conn, err := net.DialTCP("tcp", nil, addr) + if err != nil { + log.Errorf("%v", err) + } + + conn.SetKeepAlive(true) // TODO need to export tcp server config in layer4Context + + ctx.Lock() + defer ctx.Unlock() + // NOTE: The code below can't use addTag and setStatusCode in case of deadlock. + + go func() { + // TODO do tcp proxy + }() + + //go func() { + // // NOTE: Need to be read to completion and closed. + // // Reference: https://golang.org/pkg/net/http/#Response + // // And we do NOT do statistics of duration and respSize + // // for it, because we can't wait for it to finish. + // defer resp.Body.Close() + // io.Copy(ioutil.Discard, resp.Body) + //}() + + return "" +} + +func (p *pool) close() { + p.servers.close() +} diff --git a/pkg/filter/layer4proxy/proxy.go b/pkg/filter/layer4proxy/proxy.go new file mode 100644 index 0000000000..c6e4416812 --- /dev/null +++ b/pkg/filter/layer4proxy/proxy.go @@ -0,0 +1,99 @@ +package layer4proxy + +import ( + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/object/layer4pipeline" +) + +const ( + // Kind is the kind of Proxy. + Kind = "Proxy" + + resultFallback = "fallback" + resultInternalError = "internalError" + resultClientError = "clientError" + resultServerError = "serverError" +) + +var results = []string{ + resultFallback, + resultInternalError, + resultClientError, + resultServerError, +} + +func init() { + layer4pipeline.Register(&Proxy{}) +} + +type ( + // Proxy is the filter Proxy. + Proxy struct { + filterSpec *layer4pipeline.FilterSpec + spec *Spec + + mainPool *pool + candidatePools []*pool + mirrorPool *pool + } + + // Spec describes the Proxy. + Spec struct { + MainPool *PoolSpec `yaml:"mainPool" jsonschema:"required"` + CandidatePools []*PoolSpec `yaml:"candidatePools,omitempty" jsonschema:"omitempty"` + MirrorPool *PoolSpec `yaml:"mirrorPool,omitempty" jsonschema:"omitempty"` + } + + // Status is the status of Proxy. + Status struct { + MainPool *PoolStatus `yaml:"mainPool"` + CandidatePools []*PoolStatus `yaml:"candidatePools,omitempty"` + MirrorPool *PoolStatus `yaml:"mirrorPool,omitempty"` + } +) + +func (p *Proxy) Kind() string { + return Kind +} + +func (p *Proxy) DefaultSpec() interface{} { + return &Spec{} +} + +func (p *Proxy) Description() string { + return "Proxy sets the proxy of proxy servers" +} + +func (p *Proxy) Results() []string { + panic("implement me") +} + +func (p *Proxy) Init(filterSpec *layer4pipeline.FilterSpec) { + panic("implement me") +} + +func (p *Proxy) Inherit(filterSpec *layer4pipeline.FilterSpec, previousGeneration layer4pipeline.Filter) { + panic("implement me") +} + +func (p *Proxy) Handle(layer4Context context.Layer4Context) (result string) { + panic("implement me") +} + +func (p *Proxy) Status() interface{} { + panic("implement me") +} + +func (p *Proxy) Close() { + p.mainPool.close() + + if p.candidatePools != nil { + for _, v := range p.candidatePools { + v.close() + } + } + + if p.mirrorPool != nil { + p.mirrorPool.close() + } +} diff --git a/pkg/filter/layer4proxy/server.go b/pkg/filter/layer4proxy/server.go new file mode 100644 index 0000000000..46bae040cd --- /dev/null +++ b/pkg/filter/layer4proxy/server.go @@ -0,0 +1,275 @@ +package layer4proxy + +import ( + "fmt" + "math/rand" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/object/serviceregistry" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/hashtool" + "github.com/megaease/easegress/pkg/util/stringtool" +) + +const ( + // PolicyRoundRobin is the policy of round-robin. + PolicyRoundRobin = "roundRobin" + // PolicyRandom is the policy of random. + PolicyRandom = "random" + // PolicyWeightedRandom is the policy of weighted random. + PolicyWeightedRandom = "weightedRandom" + // PolicyIPHash is the policy of ip hash. + PolicyIPHash = "ipHash" + + retryTimeout = 3 * time.Second +) + +type ( + servers struct { + poolSpec *PoolSpec + super *supervisor.Supervisor + + mutex sync.Mutex + serviceRegistry *serviceregistry.ServiceRegistry + serviceWatcher serviceregistry.ServiceWatcher + static *staticServers + done chan struct{} + } + + staticServers struct { + count uint64 + weightsSum int + servers []*Server + lb LoadBalance + } + + // Server is proxy server. + Server struct { + Address string `yaml:"url" jsonschema:"required,format=hostport"` + Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` + Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` + } + + // LoadBalance is load balance for multiple servers. + LoadBalance struct { + Policy string `yaml:"policy" jsonschema:"required,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash"` + } +) + +func (s *Server) String() string { + return fmt.Sprintf("%s,%v,%d", s.Address, s.Tags, s.Weight) +} + +func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { + s := &servers{ + poolSpec: poolSpec, + super: super, + done: make(chan struct{}), + } + + s.useStaticServers() + + if poolSpec.ServiceRegistry == "" || poolSpec.ServiceName == "" { + return s + } + + s.serviceRegistry = s.super.MustGetSystemController(serviceregistry.Kind). + Instance().(*serviceregistry.ServiceRegistry) + + s.tryUseService() + s.serviceWatcher = s.serviceRegistry.NewServiceWatcher(s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + + go s.watchService() + + return s +} + +func (s *servers) watchService() { + for { + select { + case <-s.done: + return + case event := <-s.serviceWatcher.Watch(): + s.handleEvent(event) + } + } +} + +func (s *servers) handleEvent(event *serviceregistry.ServiceEvent) { + s.useService(event.Instances) +} + +func (s *servers) tryUseService() { + serviceInstanceSpecs, err := s.serviceRegistry.ListServiceInstances(s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + + if err != nil { + logger.Errorf("get service %s/%s failed: %v", + s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName, err) + s.useStaticServers() + return + } + + s.useService(serviceInstanceSpecs) +} + +func (s *servers) useService(serviceInstanceSpecs map[string]*serviceregistry.ServiceInstanceSpec) { + var servers []*Server + for _, instance := range serviceInstanceSpecs { + servers = append(servers, &Server{ + Address: instance.Address + ":" + strconv.Itoa(int(instance.Port)), + Tags: instance.Tags, + Weight: instance.Weight, + }) + } + if len(servers) == 0 { + logger.Errorf("%s/%s: empty service instance", + s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + s.useStaticServers() + return + } + + dynamicServers := newStaticServers(servers, s.poolSpec.ServersTags, s.poolSpec.LoadBalance) + if dynamicServers.len() == 0 { + logger.Errorf("%s/%s: no service instance satisfy tags: %v", + s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName, s.poolSpec.ServersTags) + s.useStaticServers() + } + + logger.Infof("use dynamic service: %s/%s", s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + + s.mutex.Lock() + defer s.mutex.Unlock() + s.static = dynamicServers +} + +func (s *servers) useStaticServers() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.static = newStaticServers(s.poolSpec.Servers, s.poolSpec.ServersTags, s.poolSpec.LoadBalance) +} + +func (s *servers) snapshot() *staticServers { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.static +} + +func (s *servers) len() int { + static := s.snapshot() + + return static.len() +} + +func (s *servers) next(ctx context.Layer4Context) (*Server, error) { + static := s.snapshot() + + if static.len() == 0 { + return nil, fmt.Errorf("no server available") + } + + return static.next(ctx), nil +} + +func (s *servers) close() { + close(s.done) + + if s.serviceWatcher != nil { + s.serviceWatcher.Stop() + } +} + +func newStaticServers(servers []*Server, tags []string, lb *LoadBalance) *staticServers { + if servers == nil { + servers = make([]*Server, 0) + } + + ss := &staticServers{} + if lb == nil { + ss.lb.Policy = PolicyRoundRobin + } else { + ss.lb = *lb + } + + defer ss.prepare() + + if len(tags) == 0 { + ss.servers = servers + return ss + } + + chosenServers := make([]*Server, 0) + for _, server := range servers { + for _, tag := range tags { + if stringtool.StrInSlice(tag, server.Tags) { + chosenServers = append(chosenServers, server) + break + } + } + } + ss.servers = chosenServers + + return ss +} + +func (ss *staticServers) prepare() { + for _, server := range ss.servers { + ss.weightsSum += server.Weight + } +} + +func (ss *staticServers) len() int { + return len(ss.servers) +} + +func (ss *staticServers) next(ctx context.Layer4Context) *Server { + switch ss.lb.Policy { + case PolicyRoundRobin: + return ss.roundRobin() + case PolicyRandom: + return ss.random() + case PolicyWeightedRandom: + return ss.weightedRandom() + case PolicyIPHash: + return ss.ipHash(ctx) + } + + logger.Errorf("BUG: unknown load balance policy: %s", ss.lb.Policy) + return ss.roundRobin() +} + +func (ss *staticServers) roundRobin() *Server { + count := atomic.AddUint64(&ss.count, 1) + // NOTE: start from 0. + count-- + return ss.servers[int(count)%len(ss.servers)] +} + +func (ss *staticServers) random() *Server { + return ss.servers[rand.Intn(len(ss.servers))] +} + +func (ss *staticServers) weightedRandom() *Server { + randomWeight := rand.Intn(ss.weightsSum) + for _, server := range ss.servers { + randomWeight -= server.Weight + if randomWeight < 0 { + return server + } + } + + logger.Errorf("BUG: weighted random can't pick a server: sum(%d) servers(%+v)", + ss.weightsSum, ss.servers) + + return ss.random() +} + +func (ss *staticServers) ipHash(ctx context.Layer4Context) *Server { + sum32 := int(hashtool.Hash32(ctx.ClientIP())) + return ss.servers[sum32%len(ss.servers)] +} diff --git a/pkg/filter/proxy/masterslavereader.go b/pkg/filter/proxy/masterslavereader.go index 06d235df4f..f958030a45 100644 --- a/pkg/filter/proxy/masterslavereader.go +++ b/pkg/filter/proxy/masterslavereader.go @@ -25,7 +25,7 @@ import ( type ( // masterSlaveReader reads bytes to master, // and synchronize them to slave. - // Currently only support one slave. + // Currently, only support one slave. masterSlaveReader struct { masterReader io.Reader slaveReader io.Reader diff --git a/pkg/object/httpserver/httpserver.go b/pkg/object/httpserver/httpserver.go index 5ec34619de..8637a42f1e 100644 --- a/pkg/object/httpserver/httpserver.go +++ b/pkg/object/httpserver/httpserver.go @@ -81,7 +81,7 @@ func (hs *HTTPServer) Inherit(superSpec *supervisor.Spec, previousGeneration sup } } -// Status is the wrapper of runtime's Status. +// Status is the wrapper of runtimes Status. func (hs *HTTPServer) Status() *supervisor.Status { return &supervisor.Status{ ObjectStatus: hs.runtime.Status(), diff --git a/pkg/object/layer4pipeline/layer4pipeline.go b/pkg/object/layer4pipeline/layer4pipeline.go new file mode 100644 index 0000000000..e4a0f56592 --- /dev/null +++ b/pkg/object/layer4pipeline/layer4pipeline.go @@ -0,0 +1,420 @@ +package layer4pipeline + +import ( + "fmt" + "reflect" + "sync" + "time" + + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocol" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/stringtool" + "github.com/megaease/easegress/pkg/util/yamltool" +) + +const ( + // Category is the category of Layer4Pipeline. + Category = supervisor.CategoryPipeline + + // Kind is the kind of Layer4Pipeline. + Kind = "Layer4Pipeline" + + // LabelEND is the built-in label for jumping of flow. + LabelEND = "END" +) + +func init() { + supervisor.Register(&Layer4Pipeline{}) +} + +type ( + // Layer4Pipeline is Object Layer4Pipeline. + Layer4Pipeline struct { + superSpec *supervisor.Spec + spec *Spec + + muxMapper protocol.MuxMapper + runningFilters []*runningFilter + } + + runningFilter struct { + spec *FilterSpec + jumpIf map[string]string + rootFilter Filter + filter Filter + } + + // Spec describes the Layer4Pipeline. + Spec struct { + Flow []Flow `yaml:"flow" jsonschema:"omitempty"` + Filters []map[string]interface{} `yaml:"filters" jsonschema:"required"` + } + + // Flow controls the flow of pipeline. + Flow struct { + Filter string `yaml:"filter" jsonschema:"required,format=urlname"` + JumpIf map[string]string `yaml:"jumpIf" jsonschema:"omitempty"` + } + + // Status is the status of Layer4Pipeline. + Status struct { + Health string `yaml:"health"` + + Filters map[string]interface{} `yaml:"filters"` + } + + // PipelineContext contains the context of the Layer4Pipeline. + PipelineContext struct { + FilterStats *FilterStat + } + + // FilterStat records the statistics of the running filter. + FilterStat struct { + Name string + Kind string + Result string + Duration time.Duration + Next []*FilterStat + } +) + +func (fs *FilterStat) selfDuration() time.Duration { + d := fs.Duration + for _, s := range fs.Next { + d -= s.Duration + } + return d +} + +// context.TCPContext: *PipelineContext +var runningContexts = sync.Map{} + +func newAndSetPipelineContext(ctx context.Layer4Context) *PipelineContext { + pipeCtx := &PipelineContext{} + runningContexts.Store(ctx, pipeCtx) + return pipeCtx +} + +// GetPipelineContext returns the corresponding PipelineContext of the TCPContext, +// and a bool flag to represent it succeed or not. +func GetPipelineContext(ctx context.Layer4Context) (*PipelineContext, bool) { + value, ok := runningContexts.Load(ctx) + if !ok { + return nil, false + } + + pipeCtx, ok := value.(*PipelineContext) + if !ok { + logger.Errorf("BUG: want *PipelineContext, got %T", value) + return nil, false + } + + return pipeCtx, true +} + +func deletePipelineContext(ctx context.Layer4Context) { + runningContexts.Delete(ctx) +} + +func extractFiltersData(config []byte) interface{} { + var whole map[string]interface{} + yamltool.Unmarshal(config, &whole) + return whole["filters"] +} + +// Validate validates the meta information +func (meta *FilterMetaSpec) Validate() error { + if len(meta.Name) == 0 { + return fmt.Errorf("filter name is required") + } + if len(meta.Kind) == 0 { + return fmt.Errorf("filter kind is required") + } + + if meta.Name == LabelEND { + return fmt.Errorf("can't use %s(built-in label) for filter name", LabelEND) + } + + return nil +} + +// Validate validates Spec. +func (s Spec) Validate() (err error) { + errPrefix := "filters" + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%s: %s", errPrefix, r) + } + }() + + config := yamltool.Marshal(s) + + filtersData := extractFiltersData(config) + if filtersData == nil { + return fmt.Errorf("filters is required") + } + + filterSpecs := make(map[string]*FilterSpec) + for _, filterSpec := range s.Filters { + // NOTE: Nil supervisor is fine in spec validating phrase. + spec, err := NewFilterSpec(filterSpec, nil) + if err != nil { + panic(err) + } + + if _, exists := filterSpecs[spec.Name()]; exists { + panic(fmt.Errorf("conflict name: %s", spec.Name())) + } + filterSpecs[spec.Name()] = spec + } + + errPrefix = "flow" + + filters := make(map[string]struct{}) + for _, f := range s.Flow { + if _, exists := filters[f.Filter]; exists { + panic(fmt.Errorf("repeated filter %s", f.Filter)) + } + } + + labelsValid := map[string]struct{}{LabelEND: {}} + for i := len(s.Flow) - 1; i >= 0; i-- { + f := s.Flow[i] + spec, exists := filterSpecs[f.Filter] + if !exists { + panic(fmt.Errorf("filter %s not found", f.Filter)) + } + expectedResults := spec.RootFilter().Results() + for result, label := range f.JumpIf { + if !stringtool.StrInSlice(result, expectedResults) { + panic(fmt.Errorf("filter %s: result %s is not in %v", + f.Filter, result, expectedResults)) + } + if _, exists := labelsValid[label]; !exists { + panic(fmt.Errorf("filter %s: label %s not found", + f.Filter, label)) + } + } + labelsValid[f.Filter] = struct{}{} + } + return nil +} + +// Category returns the category of Layer4Pipeline. +func (hp *Layer4Pipeline) Category() supervisor.ObjectCategory { + return Category +} + +// Kind returns the kind of Layer4Pipeline. +func (hp *Layer4Pipeline) Kind() string { + return Kind +} + +// DefaultSpec returns the default spec of Layer4Pipeline. +func (hp *Layer4Pipeline) DefaultSpec() interface{} { + return &Spec{} +} + +// Init initializes Layer4Pipeline. +func (hp *Layer4Pipeline) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { + hp.superSpec, hp.spec, hp.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper + + hp.reload(nil /*no previous generation*/) +} + +// Inherit inherits previous generation of Layer4Pipeline. +func (hp *Layer4Pipeline) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { + hp.superSpec, hp.spec, hp.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper + + hp.reload(previousGeneration.(*Layer4Pipeline)) + + // NOTE: It's filters' responsibility to inherit and clean their resources. + // previousGeneration.Close() +} + +func (hp *Layer4Pipeline) reload(previousGeneration *Layer4Pipeline) { + runningFilters := make([]*runningFilter, 0) + if len(hp.spec.Flow) == 0 { + for _, filterSpec := range hp.spec.Filters { + spec, err := NewFilterSpec(filterSpec, hp.superSpec.Super()) + if err != nil { + panic(err) + } + + runningFilters = append(runningFilters, &runningFilter{ + spec: spec, + }) + } + } else { + for _, f := range hp.spec.Flow { + var spec *FilterSpec + for _, filterSpec := range hp.spec.Filters { + var err error + spec, err = NewFilterSpec(filterSpec, hp.superSpec.Super()) + if err != nil { + panic(err) + } + if spec.Name() == f.Filter { + break + } + } + if spec == nil { + panic(fmt.Errorf("flow filter %s not found in filters", f.Filter)) + } + + runningFilters = append(runningFilters, &runningFilter{ + spec: spec, + jumpIf: f.JumpIf, + }) + } + } + + pipelineName := hp.superSpec.Name() + var filterBuffs []context.FilterBuff + for _, runningFilter := range runningFilters { + name, kind := runningFilter.spec.Name(), runningFilter.spec.Kind() + rootFilter, exists := filterRegistry[kind] + if !exists { + panic(fmt.Errorf("kind %s not found", kind)) + } + + var prevInstance Filter + if previousGeneration != nil { + runningFilter := previousGeneration.getRunningFilter(name) + if runningFilter != nil { + prevInstance = runningFilter.filter + } + } + + filter := reflect.New(reflect.TypeOf(rootFilter).Elem()).Interface().(Filter) + runningFilter.spec.meta.Pipeline = pipelineName + if prevInstance == nil { + filter.Init(runningFilter.spec) + } else { + filter.Inherit(runningFilter.spec, prevInstance) + } + + runningFilter.filter, runningFilter.rootFilter = filter, rootFilter + + filterBuffs = append(filterBuffs, context.FilterBuff{ + Name: name, + Buff: []byte(runningFilter.spec.YAMLConfig()), + }) + } + + hp.runningFilters = runningFilters +} + +func (hp *Layer4Pipeline) getNextFilterIndex(index int, result string) int { + // return index + 1 if last filter succeeded + if result == "" { + return index + 1 + } + + // check the jumpIf table of current filter, return its index if the jump + // target is valid and -1 otherwise + filter := hp.runningFilters[index] + if !stringtool.StrInSlice(result, filter.rootFilter.Results()) { + format := "BUG: invalid result %s not in %v" + logger.Errorf(format, result, filter.rootFilter.Results()) + } + + if len(filter.jumpIf) == 0 { + return -1 + } + name, ok := filter.jumpIf[result] + if !ok { + return -1 + } + if name == LabelEND { + return len(hp.runningFilters) + } + + for index++; index < len(hp.runningFilters); index++ { + if hp.runningFilters[index].spec.Name() == name { + return index + } + } + + return -1 +} + +// Handle is the handler to deal with layer4 +func (hp *Layer4Pipeline) Handle(ctx context.Layer4Context) { + pipeCtx := newAndSetPipelineContext(ctx) + defer deletePipelineContext(ctx) + + filterIndex := -1 + filterStat := &FilterStat{} + + handle := func(lastResult string) string { + // Filters are called recursively as a stack, so we need to save current + // state and restore it before return + lastIndex := filterIndex + lastStat := filterStat + defer func() { + filterIndex = lastIndex + filterStat = lastStat + }() + + filterIndex = hp.getNextFilterIndex(filterIndex, lastResult) + if filterIndex == len(hp.runningFilters) { + return "" // reach the end of pipeline + } else if filterIndex == -1 { + return lastResult // an error occurs but no filter can handle it + } + + filter := hp.runningFilters[filterIndex] + name := filter.spec.Name() + filterStat = &FilterStat{Name: name, Kind: filter.spec.Kind()} + + startTime := time.Now() + result := filter.filter.Handle(ctx) + filterStat.Duration = time.Since(startTime) + filterStat.Result = result + + lastStat.Next = append(lastStat.Next, filterStat) + return result + } + + ctx.SetHandlerCaller(handle) + handle("") + + if len(filterStat.Next) > 0 { + pipeCtx.FilterStats = filterStat.Next[0] + } +} + +func (hp *Layer4Pipeline) getRunningFilter(name string) *runningFilter { + for _, filter := range hp.runningFilters { + if filter.spec.Name() == name { + return filter + } + } + + return nil +} + +// Status returns Status generated by Runtime. +func (hp *Layer4Pipeline) Status() *supervisor.Status { + s := &Status{ + Filters: make(map[string]interface{}), + } + + for _, runningFilter := range hp.runningFilters { + s.Filters[runningFilter.spec.Name()] = runningFilter.filter.Status() + } + + return &supervisor.Status{ + ObjectStatus: s, + } +} + +// Close closes Layer4Pipeline. +func (hp *Layer4Pipeline) Close() { + for _, runningFilter := range hp.runningFilters { + runningFilter.filter.Close() + } +} diff --git a/pkg/object/layer4pipeline/registry.go b/pkg/object/layer4pipeline/registry.go new file mode 100644 index 0000000000..883c5aeab1 --- /dev/null +++ b/pkg/object/layer4pipeline/registry.go @@ -0,0 +1,101 @@ +package layer4pipeline + +import ( + "fmt" + "reflect" + + "github.com/megaease/easegress/pkg/context" +) + +type ( + // Filter is the common interface for filters handling HTTP traffic. + Filter interface { + // Kind returns the unique kind name to represent itself. + Kind() string + + // DefaultSpec returns the default spec. + DefaultSpec() interface{} + + // Description returns the description of the filter. + Description() string + + // Results returns all possible results, the normal result + // (i.e. empty string) could not be in it. + Results() []string + + // Init initializes the Filter. + Init(filterSpec *FilterSpec) + + // Inherit also initializes the Filter. + // But it needs to handle the lifecycle of the previous generation. + // So its own responsibility for the filter to inherit and clean the previous generation stuff. + // The http pipeline won't call Close for the previous generation. + Inherit(filterSpec *FilterSpec, previousGeneration Filter) + + // Handle handles one tcp request, all possible results + // need be registered in Results. + Handle(tcpContext context.Layer4Context) (result string) + + // Status returns its runtime status. + // It could return nil. + Status() interface{} + + // Close closes itself. + Close() + } +) + +var filterRegistry = map[string]Filter{} + +// Register registers filter. +func Register(f Filter) { + if f.Kind() == "" { + panic(fmt.Errorf("%T: empty kind", f)) + } + + existedFilter, existed := filterRegistry[f.Kind()] + if existed { + panic(fmt.Errorf("%T and %T got same kind: %s", f, existedFilter, f.Kind())) + } + + // Checking filter type. + filterType := reflect.TypeOf(f) + if filterType.Kind() != reflect.Ptr { + panic(fmt.Errorf("%s: want a pointer, got %s", f.Kind(), filterType.Kind())) + } + if filterType.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("%s elem: want a struct, got %s", f.Kind(), filterType.Kind())) + } + + // Checking spec type. + specType := reflect.TypeOf(f.DefaultSpec()) + if specType.Kind() != reflect.Ptr { + panic(fmt.Errorf("%s spec: want a pointer, got %s", f.Kind(), specType.Kind())) + } + if specType.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("%s spec elem: want a struct, got %s", f.Kind(), specType.Elem().Kind())) + } + + // Checking results. + results := make(map[string]struct{}) + for _, result := range f.Results() { + _, exists := results[result] + if exists { + panic(fmt.Errorf("repeated result: %s", result)) + } + results[result] = struct{}{} + } + + filterRegistry[f.Kind()] = f +} + +// GetFilterRegistry get the filter registry. +func GetFilterRegistry() map[string]Filter { + result := map[string]Filter{} + + for kind, f := range filterRegistry { + result[kind] = f + } + + return result +} diff --git a/pkg/object/layer4pipeline/spec.go b/pkg/object/layer4pipeline/spec.go new file mode 100644 index 0000000000..82de417e88 --- /dev/null +++ b/pkg/object/layer4pipeline/spec.go @@ -0,0 +1,124 @@ +package layer4pipeline + +import ( + "fmt" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/yamltool" + "github.com/megaease/easegress/pkg/v" +) + +type ( + // FilterSpec is the universal spec for all filters. + FilterSpec struct { + super *supervisor.Supervisor + + rawSpec map[string]interface{} + yamlConfig string + meta *FilterMetaSpec + filterSpec interface{} + rootFilter Filter + } + + // FilterMetaSpec is metadata for all specs. + FilterMetaSpec struct { + Name string `yaml:"name" jsonschema:"required,format=urlname"` + Kind string `yaml:"kind" jsonschema:"required"` + Pipeline string `yaml:"-" jsonschema:"-"` + } +) + +// NewFilterSpec creates a filter spec and validates it. +func NewFilterSpec(originalRawSpec map[string]interface{}, super *supervisor.Supervisor) ( + s *FilterSpec, err error) { + + s = &FilterSpec{super: super} + + defer func() { + if r := recover(); r != nil { + s = nil + err = fmt.Errorf("%v", r) + } else { + err = nil + } + }() + + yamlBuff := yamltool.Marshal(originalRawSpec) + + // Meta part. + meta := &FilterMetaSpec{} + yamltool.Unmarshal(yamlBuff, meta) + verr := v.Validate(meta) + if !verr.Valid() { + panic(verr) + } + + // Filter self part. + rootFilter, exists := filterRegistry[meta.Kind] + if !exists { + panic(fmt.Errorf("kind %s not found", meta.Kind)) + } + filterSpec := rootFilter.DefaultSpec() + yamltool.Unmarshal(yamlBuff, filterSpec) + verr = v.Validate(filterSpec) + if !verr.Valid() { + // TODO: Make the invalid part more accurate. e,g: + // filters: jsonschemaErrs: + // - 'policies.0: name is required' + // to + // filters: jsonschemaErrs: + // - 'rateLimiter.policies.0: name is required' + panic(verr) + } + + // Build final yaml config and raw spec. + var rawSpec map[string]interface{} + filterBuff := yamltool.Marshal(filterSpec) + yamltool.Unmarshal(filterBuff, &rawSpec) + + metaBuff := yamltool.Marshal(meta) + yamltool.Unmarshal(metaBuff, &rawSpec) + + yamlConfig := string(yamltool.Marshal(rawSpec)) + + s.meta = meta + s.filterSpec = filterSpec + s.rawSpec = rawSpec + s.yamlConfig = yamlConfig + s.rootFilter = rootFilter + + return +} + +// Super returns +func (s *FilterSpec) Super() *supervisor.Supervisor { + return s.super +} + +// Name returns name. +func (s *FilterSpec) Name() string { return s.meta.Name } + +// Kind returns kind. +func (s *FilterSpec) Kind() string { return s.meta.Kind } + +// Pipeline returns the name of the pipeline this filter belongs to. +func (s *FilterSpec) Pipeline() string { return s.meta.Pipeline } + +// YAMLConfig returns the config in yaml format. +func (s *FilterSpec) YAMLConfig() string { + return s.yamlConfig +} + +// RawSpec returns raw spec in type map[string]interface{}. +func (s *FilterSpec) RawSpec() map[string]interface{} { + return s.rawSpec +} + +// FilterSpec returns the filter spec in its own type. +func (s *FilterSpec) FilterSpec() interface{} { + return s.filterSpec +} + +// RootFilter returns the root filter of the filter spec. +func (s *FilterSpec) RootFilter() Filter { + return s.rootFilter +} diff --git a/pkg/object/tcpserver/pool.go b/pkg/object/tcpserver/pool.go new file mode 100644 index 0000000000..1e129e5661 --- /dev/null +++ b/pkg/object/tcpserver/pool.go @@ -0,0 +1 @@ +package tcpserver diff --git a/pkg/object/tcpserver/runtime.go b/pkg/object/tcpserver/runtime.go new file mode 100644 index 0000000000..057a7973fe --- /dev/null +++ b/pkg/object/tcpserver/runtime.go @@ -0,0 +1,128 @@ +package tcpserver + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocol" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/layer4stat" + "github.com/megaease/easegress/pkg/util/limitlistener" +) + +type runtime struct { + superSpec *supervisor.Spec + spec *Spec + startNum uint64 + eventChan chan interface{} + + // status + state atomic.Value // stateType + err atomic.Value // error + + tcpstat *layer4stat.Layer4Stat + limitListener *limitlistener.LimitListener +} + +func (r *runtime) Close() { + done := make(chan struct{}) + r.eventChan <- &eventClose{done: done} + <-done +} + +func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) *runtime { + r := &runtime{ + superSpec: superSpec, + eventChan: make(chan interface{}, 10), + } + + r.setState(stateNil) + r.setError(errNil) + + go r.fsm() + // go r.checkFailed() + + return r +} + +func (r *runtime) setState(state stateType) { + r.state.Store(state) +} + +func (r *runtime) getState() stateType { + return r.state.Load().(stateType) +} + +func (r *runtime) setError(err error) { + if err == nil { + r.err.Store(errNil) + } else { + // NOTE: For type safe. + r.err.Store(fmt.Errorf("%v", err)) + } +} + +func (r *runtime) getError() error { + err := r.err.Load() + if err == nil { + return nil + } + return err.(error) +} + +// FSM is the finite-state-machine for the runtime. +func (r *runtime) fsm() { + for e := range r.eventChan { + switch e := e.(type) { + case *eventCheckFailed: + r.handleEventCheckFailed(e) + case *eventServeFailed: + r.handleEventServeFailed(e) + case *eventReload: + r.handleEventReload(e) + case *eventClose: + r.handleEventClose(e) + // NOTE: We don't close hs.eventChan, + // in case of panic of any other goroutines + // to send event to it later. + return + default: + logger.Errorf("BUG: unknown event: %T\n", e) + } + } +} + +func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { + +} + +func (r *runtime) handleEventServeFailed(e *eventServeFailed) { + if r.startNum > e.startNum { + return + } + r.setState(stateFailed) + r.setError(e.err) +} + +func (r *runtime) handleEventReload(e *eventReload) { + +} + +func (r *runtime) handleEventClose(e *eventClose) { + +} + +func (r *runtime) checkFailed() { + ticker := time.NewTicker(checkFailedTimeout) + for range ticker.C { + state := r.getState() + if state == stateFailed { + r.eventChan <- &eventCheckFailed{} + } else if state == stateClosed { + ticker.Stop() + return + } + } +} diff --git a/pkg/object/tcpserver/tcpserver.go b/pkg/object/tcpserver/tcpserver.go index e86d8f061f..0b6e1ea03e 100644 --- a/pkg/object/tcpserver/tcpserver.go +++ b/pkg/object/tcpserver/tcpserver.go @@ -1,26 +1,41 @@ package tcpserver import ( + "fmt" + "github.com/megaease/easegress/pkg/graceupdate" "github.com/megaease/easegress/pkg/protocol" - "sync/atomic" + "time" "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/limitlistener" - "github.com/megaease/easegress/pkg/util/tcpstat" + "github.com/megaease/easegress/pkg/util/layer4stat" ) const ( - // Category is the category of HTTPServer. + // Category is the category of TCPServer. Category = supervisor.CategoryTrafficGate // Kind is the kind of HTTPServer. Kind = "TCPServer" + + checkFailedTimeout = 10 * time.Second + + topNum = 10 + + stateNil stateType = "nil" + stateFailed stateType = "failed" + stateRunning stateType = "running" + stateClosed stateType = "closed" ) func init() { supervisor.Register(&TCPServer{}) } +var ( + errNil = fmt.Errorf("") + gnet = graceupdate.Global +) + type ( stateType string @@ -35,17 +50,7 @@ type ( eventClose struct{ done chan struct{} } TCPServer struct { - superSpec *supervisor.Spec - spec *Spec - startNum uint64 - eventChan chan interface{} - - // status - state atomic.Value // stateType - err atomic.Value // error - - tcpstat *tcpstat.TcpStat - limitListener *limitlistener.LimitListener + runtime *runtime } // Status contains all status generated by runtime, for displaying to users. @@ -55,14 +60,16 @@ type ( State stateType `yaml:"state"` Error string `yaml:"error,omitempty"` - *tcpstat.Status + *layer4stat.Status } ) +// Category get object category: supervisor.CategoryTrafficGate func (T *TCPServer) Category() supervisor.ObjectCategory { return Category } +// Kind get object kind: http server func (T *TCPServer) Kind() string { return Kind } @@ -78,8 +85,9 @@ func (T *TCPServer) Status() *supervisor.Status { panic("implement me") } +// Close http server func (T *TCPServer) Close() { - panic("implement me") + T.runtime.Close() } // Init initializes HTTPServer. diff --git a/pkg/supervisor/registry.go b/pkg/supervisor/registry.go index 5d08eaf037..db5deffd48 100644 --- a/pkg/supervisor/registry.go +++ b/pkg/supervisor/registry.go @@ -66,7 +66,7 @@ type ( // Inherit also initializes the Object. // But it needs to handle the lifecycle of the previous generation. - // So it's own responsibility for the object to inherit and clean the previous generation stuff. + // So its own responsibility for the object to inherit and clean the previous generation stuff. // The supervisor won't call Close for the previous generation. Inherit(superSpec *Spec, previousGeneration Object, muxMapper protocol.MuxMapper) } diff --git a/pkg/util/tcpstat/tcpstat.go b/pkg/util/layer4stat/layer4stat.go similarity index 86% rename from pkg/util/tcpstat/tcpstat.go rename to pkg/util/layer4stat/layer4stat.go index 9d05af0f03..c33da5fc11 100644 --- a/pkg/util/tcpstat/tcpstat.go +++ b/pkg/util/layer4stat/layer4stat.go @@ -1,4 +1,4 @@ -package tcpstat +package layer4stat import ( "sync" @@ -8,8 +8,8 @@ import ( ) type ( - // TcpStat is the statistics tool for TCP traffic. - TcpStat struct { + // Layer4Stat is the statistics tool for TCP traffic. + Layer4Stat struct { mutex sync.Mutex count uint64 // for tcp connection @@ -71,3 +71,11 @@ type ( Codes map[int]uint64 `yaml:"codes"` } ) + +func (s *Layer4Stat) Status() *Status { + panic("implement me") +} + +func New() *Layer4Stat { + panic("implement me") +} From b689c5e15390986611d5e5feabccecef562094fc Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 3 Sep 2021 23:29:09 +0800 Subject: [PATCH 03/99] [tcpproxy] raw tcp proxy model --- pkg/filter/layer4proxy/pool.go | 55 ++++++++++++++++++++------------ pkg/filter/layer4proxy/server.go | 32 +++++++++++++++---- pkg/option/option.go | 2 +- 3 files changed, 61 insertions(+), 28 deletions(-) diff --git a/pkg/filter/layer4proxy/pool.go b/pkg/filter/layer4proxy/pool.go index 9937b81d55..ed9745f1b4 100644 --- a/pkg/filter/layer4proxy/pool.go +++ b/pkg/filter/layer4proxy/pool.go @@ -4,15 +4,19 @@ import ( "fmt" "github.com/google/martian/log" "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/layer4stat" "github.com/megaease/easegress/pkg/util/memorycache" "github.com/megaease/easegress/pkg/util/stringtool" "io" "net" + "time" ) type ( + protocol string + pool struct { spec *PoolSpec @@ -26,6 +30,7 @@ type ( // PoolSpec describes a pool of servers. PoolSpec struct { + Protocol protocol `yaml:"protocol" jsonschema:"required" ` SpanName string `yaml:"spanName" jsonschema:"omitempty"` ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` Servers []*Server `yaml:"servers" jsonschema:"omitempty"` @@ -92,7 +97,7 @@ func (p *pool) status() *PoolStatus { return s } -func (p *pool) handle(ctx context.Layer4Context, reqBody io.Reader) string { +func (p *pool) handle(ctx context.Layer4Context, clientConn *net.TCPConn) string { addTag := func(subPrefix, msg string) { tag := stringtool.Cat(p.tagPrefix, "#", subPrefix, ": ", msg) ctx.Lock() @@ -105,39 +110,49 @@ func (p *pool) handle(ctx context.Layer4Context, reqBody io.Reader) string { addTag("serverErr", err.Error()) return resultInternalError } - addTag("addr", server.Address) + addTag("addr", server.HostPort) - addr, err := net.ResolveTCPAddr("tcp", server.Address) + rawConn, err := net.DialTimeout("tcp", server.HostPort, 1000*time.Millisecond) if err != nil { - log.Errorf("%v", err) + log.Errorf("dial tcp for addr: % failed, err: %v", server.HostPort, err) } - conn, err := net.DialTCP("tcp", nil, addr) + backendConn := rawConn.(*net.TCPConn) + + defer func(backendConn *net.TCPConn) { + closeErr := backendConn.Close() + if closeErr != nil { + logger.Warnf("close backend conn for %v failed, err: %v", server.HostPort, err) + } + }(backendConn) + + errChan := make(chan error) + go p.connCopy(backendConn, clientConn, errChan) + go p.connCopy(clientConn, backendConn, errChan) + + err = <-errChan if err != nil { - log.Errorf("%v", err) + logger.Errorf("Error during connection: %v", err) } - conn.SetKeepAlive(true) // TODO need to export tcp server config in layer4Context + err = <-errChan // TODO export tcp config for backend conn, watch client/backend error ctx.Lock() defer ctx.Unlock() // NOTE: The code below can't use addTag and setStatusCode in case of deadlock. - go func() { - // TODO do tcp proxy - }() - - //go func() { - // // NOTE: Need to be read to completion and closed. - // // Reference: https://golang.org/pkg/net/http/#Response - // // And we do NOT do statistics of duration and respSize - // // for it, because we can't wait for it to finish. - // defer resp.Body.Close() - // io.Copy(ioutil.Discard, resp.Body) - //}() - return "" } func (p *pool) close() { p.servers.close() } + +func (p *pool) connCopy(dst *net.TCPConn, src *net.TCPConn, errCh chan error) { + _, err := io.Copy(dst, src) + errCh <- err + + errClose := dst.CloseWrite() + if errClose != nil { + logger.Debugf("Error while terminating connection: %v", errClose) + } +} diff --git a/pkg/filter/layer4proxy/server.go b/pkg/filter/layer4proxy/server.go index 46bae040cd..983fdc4043 100644 --- a/pkg/filter/layer4proxy/server.go +++ b/pkg/filter/layer4proxy/server.go @@ -3,6 +3,7 @@ package layer4proxy import ( "fmt" "math/rand" + "net" "strconv" "sync" "sync/atomic" @@ -50,9 +51,10 @@ type ( // Server is proxy server. Server struct { - Address string `yaml:"url" jsonschema:"required,format=hostport"` - Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` - Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` + Address *net.TCPAddr + HostPort string `yaml:"HostPort" jsonschema:"required,format=hostport"` + Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` + Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` } // LoadBalance is load balance for multiple servers. @@ -61,8 +63,24 @@ type ( } ) +func (s *servers) Validated() error { + if s.poolSpec.Protocol == "tcp" { + for _, server := range s.static.servers { + if addr, err := net.ResolveTCPAddr("tcp", server.HostPort); err != nil { + logger.Errorf("resolve tcp addr failed, host port: %v, %v", server.HostPort, err) + return err + } else { + server.Address = addr + } + } + } + + // TODO check udp address + return nil +} + func (s *Server) String() string { - return fmt.Sprintf("%s,%v,%d", s.Address, s.Tags, s.Weight) + return fmt.Sprintf("%s,%v,%d", s.HostPort, s.Tags, s.Weight) } func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { @@ -121,9 +139,9 @@ func (s *servers) useService(serviceInstanceSpecs map[string]*serviceregistry.Se var servers []*Server for _, instance := range serviceInstanceSpecs { servers = append(servers, &Server{ - Address: instance.Address + ":" + strconv.Itoa(int(instance.Port)), - Tags: instance.Tags, - Weight: instance.Weight, + HostPort: instance.Address + ":" + strconv.Itoa(int(instance.Port)), + Tags: instance.Tags, + Weight: instance.Weight, }) } if len(servers) == 0 { diff --git a/pkg/option/option.go b/pkg/option/option.go index 26997f728a..854de0b0e5 100644 --- a/pkg/option/option.go +++ b/pkg/option/option.go @@ -108,7 +108,7 @@ func New() *Options { opt.flags.StringSliceVar(&opt.ClusterAdvertiseClientURLs, "cluster-advertise-client-urls", []string{"http://localhost:2379"}, "List of this member’s client URLs to advertise to the rest of the cluster.") opt.flags.StringSliceVar(&opt.ClusterInitialAdvertisePeerURLs, "cluster-initial-advertise-peer-urls", []string{"http://localhost:2380"}, "List of this member’s peer URLs to advertise to the rest of the cluster.") opt.flags.StringSliceVar(&opt.ClusterJoinURLs, "cluster-join-urls", nil, "List of URLs to join, when the first url is the same with any one of cluster-initial-advertise-peer-urls, it means to join itself, and this config will be treated empty.") - opt.flags.StringVar(&opt.APIAddr, "api-addr", "localhost:2381", "Address([host]:port) to listen on for administration traffic.") + opt.flags.StringVar(&opt.APIAddr, "api-addr", "localhost:2381", "HostPort([host]:port) to listen on for administration traffic.") opt.flags.BoolVar(&opt.Debug, "debug", false, "Flag to set lowest log level from INFO downgrade DEBUG.") opt.flags.StringSliceVar(&opt.InitialObjectConfigFiles, "initial-object-config-files", nil, "List of configuration files for initial objects, these objects will be created at startup if not already exist.") From 1885b163edc8b1da29c7f8f159deaf355f5d6099 Mon Sep 17 00:00:00 2001 From: "jinxiaodong@cmii.chinamobile.com" <1990ziyou> Date: Sat, 4 Sep 2021 20:39:15 +0800 Subject: [PATCH 04/99] [tcpproxy] add some function for tcp proxy model --- pkg/context/httpcontext.go | 33 ++++++++++ pkg/filter/layer4proxy/pool.go | 13 ++-- pkg/object/httpserver/runtime.go | 2 +- pkg/object/tcpserver/{pool.go => mux.go} | 0 pkg/object/tcpserver/runtime.go | 79 +++++++++++++++++++++++- pkg/object/tcpserver/spec.go | 5 +- 6 files changed, 123 insertions(+), 9 deletions(-) rename pkg/object/tcpserver/{pool.go => mux.go} (100%) diff --git a/pkg/context/httpcontext.go b/pkg/context/httpcontext.go index f6aabe0be0..45e8cda519 100644 --- a/pkg/context/httpcontext.go +++ b/pkg/context/httpcontext.go @@ -21,6 +21,7 @@ import ( stdcontext "context" "fmt" "io" + "net" "net/http" "runtime/debug" "strings" @@ -54,6 +55,8 @@ type ( Cancelled() bool ClientDisconnected() bool + ClientConn() *net.TCPConn + Duration() time.Duration // For log, sample, etc. OnFinish(func()) // For setting final client statistics, etc. AddTag(tag string) // For debug, log, etc. @@ -180,6 +183,32 @@ type ( cancelFunc stdcontext.CancelFunc err error } + + tcpContext struct { + mutex sync.Mutex + + startTime *time.Time + endTime *time.Time + + tags []string + + clientConn *net.TCPConn + backendConn *net.TCPConn + stdctx stdcontext.Context + err error + } + + udpContext struct { + mutex sync.Mutex + + startTime *time.Time + endTime *time.Time + + tags []string + + stdctx stdcontext.Context + err error + } ) // New creates an HTTPContext. @@ -365,3 +394,7 @@ func (ctx *httpContext) SaveReqToTemplate(filterName string) error { func (ctx *httpContext) SaveRspToTemplate(filterName string) error { return ctx.ht.SaveResponse(filterName, ctx) } + +func (ctx *tcpContext) saveBackendConn(conn *net.TCPConn) { + ctx.backendConn = conn +} diff --git a/pkg/filter/layer4proxy/pool.go b/pkg/filter/layer4proxy/pool.go index ed9745f1b4..810dfb3735 100644 --- a/pkg/filter/layer4proxy/pool.go +++ b/pkg/filter/layer4proxy/pool.go @@ -1,6 +1,7 @@ package layer4proxy import ( + "bufio" "fmt" "github.com/google/martian/log" "github.com/megaease/easegress/pkg/context" @@ -97,7 +98,8 @@ func (p *pool) status() *PoolStatus { return s } -func (p *pool) handle(ctx context.Layer4Context, clientConn *net.TCPConn) string { +func (p *pool) handle(ctx context.Layer4Context) string { + addTag := func(subPrefix, msg string) { tag := stringtool.Cat(p.tagPrefix, "#", subPrefix, ": ", msg) ctx.Lock() @@ -126,8 +128,8 @@ func (p *pool) handle(ctx context.Layer4Context, clientConn *net.TCPConn) string }(backendConn) errChan := make(chan error) - go p.connCopy(backendConn, clientConn, errChan) - go p.connCopy(clientConn, backendConn, errChan) + go p.connCopy(backendConn, ctx.ClientConn(), errChan) + go p.connCopy(ctx.ClientConn(), backendConn, errChan) err = <-errChan if err != nil { @@ -148,7 +150,10 @@ func (p *pool) close() { } func (p *pool) connCopy(dst *net.TCPConn, src *net.TCPConn, errCh chan error) { - _, err := io.Copy(dst, src) + writer := bufio.NewWriter(dst) + reader := bufio.NewReader(src) + _, err := io.Copy(writer, reader) + _ = writer.Flush() // need flush bytes in buffer errCh <- err errClose := dst.CloseWrite() diff --git a/pkg/object/httpserver/runtime.go b/pkg/object/httpserver/runtime.go index 45bb39b3b2..cf75595f1a 100644 --- a/pkg/object/httpserver/runtime.go +++ b/pkg/object/httpserver/runtime.go @@ -163,7 +163,7 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.MuxM nextSpec := nextSuperSpec.ObjectSpec().(*Spec) - // r.limitListener does not created just after the process started and the config load for the first time. + // r.limitListener does not create just after the process started and the config load for the first time. if nextSpec != nil && r.limitListener != nil { r.limitListener.SetMaxConnection(nextSpec.MaxConnections) } diff --git a/pkg/object/tcpserver/pool.go b/pkg/object/tcpserver/mux.go similarity index 100% rename from pkg/object/tcpserver/pool.go rename to pkg/object/tcpserver/mux.go diff --git a/pkg/object/tcpserver/runtime.go b/pkg/object/tcpserver/runtime.go index 057a7973fe..3795d50e99 100644 --- a/pkg/object/tcpserver/runtime.go +++ b/pkg/object/tcpserver/runtime.go @@ -2,6 +2,8 @@ package tcpserver import ( "fmt" + "net" + "runtime/debug" "sync/atomic" "time" @@ -16,7 +18,7 @@ type runtime struct { superSpec *supervisor.Spec spec *Spec startNum uint64 - eventChan chan interface{} + eventChan chan interface{} // receive traffic controller event // status state atomic.Value // stateType @@ -111,7 +113,8 @@ func (r *runtime) handleEventReload(e *eventReload) { } func (r *runtime) handleEventClose(e *eventClose) { - + r.closeServer() + close(e.done) } func (r *runtime) checkFailed() { @@ -126,3 +129,75 @@ func (r *runtime) checkFailed() { } } } + +func (r *runtime) startServer() { + + listener, err := gnet.Listen("tcp", fmt.Sprintf("%s:%d", r.spec.IP, r.spec.Port)) + if err != nil { + r.setState(stateFailed) + r.setError(err) + logger.Errorf("listen tcp conn for %s:%d failed, err: %v", r.spec.IP, r.spec.Port, err) + + _ = listener.Close() + r.eventChan <- &eventServeFailed{ + err: err, + startNum: r.startNum, + } + return + } + + r.startNum++ + r.setState(stateRunning) + r.setError(nil) + + limitListener := limitlistener.NewLimitListener(listener, r.spec.MaxConnections) + r.limitListener = limitListener + go r.runTCPProxyServer() +} + +// runTCPProxyServer bind to specific address, accept tcp conn +func (r *runtime) runTCPProxyServer() { + + go func() { + defer func() { + if e := recover(); e != nil { + logger.Errorf("listen tcp for %s:%d crashed, trace: %s", r.spec.IP, r.spec.Port, string(debug.Stack())) + } + }() + + for { + var netConn, err = (*r.limitListener).Accept() + conn := netConn.(*net.TCPConn) + if err == nil { + go func() { + defer func() { + if e := recover(); e != nil { + logger.Errorf("tcp conn handler for %s:%d crashed, trace: %s", r.spec.IP, + r.spec.Port, string(debug.Stack())) + } + }() + + r.setTcpConf(conn) + //fn(conn) + }() + } else { + // only record accept error, didn't close listener + logger.Errorf("tcp conn handler for %s:%d crashed, trace: %s", r.spec.IP, + r.spec.Port, string(debug.Stack())) + break + } + } + }() +} + +func (r *runtime) setTcpConf(conn *net.TCPConn) { + _ = conn.SetKeepAlive(r.spec.KeepAlive) + _ = conn.SetNoDelay(r.spec.TcpNodelay) + _ = conn.SetReadBuffer(r.spec.RecvBuf) + _ = conn.SetWriteBuffer(r.spec.SendBuf) + // TODO set deadline for tpc connection +} + +func (r *runtime) closeServer() { + _ = r.limitListener.Close() +} diff --git a/pkg/object/tcpserver/spec.go b/pkg/object/tcpserver/spec.go index 5defc15510..7719b21b22 100644 --- a/pkg/object/tcpserver/spec.go +++ b/pkg/object/tcpserver/spec.go @@ -10,13 +10,14 @@ import ( type ( // Spec describes the TcpServer. Spec struct { + IP string `yaml:"ip" jsonschema:"required,minimum=1"` Port uint16 `yaml:"port" jsonschema:"required,minimum=1"` MaxConnections uint32 `yaml:"maxConnections" jsonschema:"omitempty,minimum=1"` // By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. Backlog int32 `yaml:"backlog" jsonschema:"omitempty,minimum=-1"` - SendBuf uint32 `yaml:"sendBuf" jsonschema:"omitempty"` - RecvBuf uint32 `yaml:"recvBuf" jsonschema:"omitempty"` + SendBuf int `yaml:"sendBuf" jsonschema:"omitempty"` + RecvBuf int `yaml:"recvBuf" jsonschema:"omitempty"` Reuseport bool `yaml:"reuseport" jsonschema:"omitempty"` KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` From 05ebe9e17852ca1416bb4f0b276e0e098d183032 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 7 Sep 2021 14:53:46 +0800 Subject: [PATCH 05/99] [util] add simple io buffer pool --- pkg/util/iobufferpool/iobufferpool.go | 60 +++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 pkg/util/iobufferpool/iobufferpool.go diff --git a/pkg/util/iobufferpool/iobufferpool.go b/pkg/util/iobufferpool/iobufferpool.go new file mode 100644 index 0000000000..bd7d3d2602 --- /dev/null +++ b/pkg/util/iobufferpool/iobufferpool.go @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import ( + "bytes" + "sync" +) + +var ioBufferPool IOBufferPool + +// IOBufferPool io buffer pool, especially use for udp packet +type IOBufferPool struct { + pool sync.Pool +} + +func (p *IOBufferPool) take() (buf *bytes.Buffer) { + v := p.pool.Get() + if v == nil { + buf = bytes.NewBuffer(nil) + } else { + buf = v.(*bytes.Buffer) + } + return +} + +func (p *IOBufferPool) give(buf *bytes.Buffer) { + buf.Truncate(0) + p.pool.Put(buf) +} + +// GetIoBuffer returns IoBuffer from pool +func GetIoBuffer() *bytes.Buffer { + return ioBufferPool.take() +} + +func NewIOBuffer() *bytes.Buffer { + return GetIoBuffer() +} + +// PutIoBuffer returns IoBuffer to pool +func PutIoBuffer(buf *bytes.Buffer) error { + ioBufferPool.give(buf) + return nil +} From 831d246a89bad80c697fca2e285187327efc22ca Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 8 Sep 2021 14:15:11 +0800 Subject: [PATCH 06/99] [util] add timerpool(copy from nats-io/nats) --- pkg/util/timerpool/timerpool.go | 58 +++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 pkg/util/timerpool/timerpool.go diff --git a/pkg/util/timerpool/timerpool.go b/pkg/util/timerpool/timerpool.go new file mode 100644 index 0000000000..d2e07f5554 --- /dev/null +++ b/pkg/util/timerpool/timerpool.go @@ -0,0 +1,58 @@ +// Copyright 2017-2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timerpool + +import ( + "sync" + "time" +) + +// copy from https://github.com/nats-io/nats.go/blob/main/timer.go + +// global pool of *time.Timer's. can be used by multiple goroutines concurrently. +var globalTimerPool timerPool + +// timerPool provides GC-able pooling of *time.Timer's. +// can be used by multiple goroutines concurrently. +type timerPool struct { + p sync.Pool +} + +// Get returns a timer that completes after the given duration. +func (tp *timerPool) Get(d time.Duration) *time.Timer { + if t, _ := tp.p.Get().(*time.Timer); t != nil { + t.Reset(d) + return t + } + + return time.NewTimer(d) +} + +// Put pools the given timer. +// +// There is no need to call t.Stop() before calling Put. +// +// Put will try to stop the timer before pooling. If the +// given timer already expired, Put will read the unreceived +// value if there is one. +func (tp *timerPool) Put(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + + tp.p.Put(t) +} From 98ea2f53f821dde27196d565a7206b2ac90976f3 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 9 Sep 2021 10:03:56 +0800 Subject: [PATCH 07/99] [pipeline] layer4pipeline add license --- pkg/object/layer4pipeline/layer4pipeline.go | 40 +++++++++++++++++++++ pkg/object/layer4pipeline/registry.go | 17 +++++++++ pkg/object/layer4pipeline/spec.go | 18 ++++++++++ 3 files changed, 75 insertions(+) diff --git a/pkg/object/layer4pipeline/layer4pipeline.go b/pkg/object/layer4pipeline/layer4pipeline.go index e4a0f56592..522e82fced 100644 --- a/pkg/object/layer4pipeline/layer4pipeline.go +++ b/pkg/object/layer4pipeline/layer4pipeline.go @@ -1,6 +1,7 @@ package layer4pipeline import ( + "bytes" "fmt" "reflect" "sync" @@ -88,6 +89,45 @@ func (fs *FilterStat) selfDuration() time.Duration { return d } +func (ctx *PipelineContext) log() string { + if ctx.FilterStats == nil { + return "" + } + + var buf bytes.Buffer + var fn func(stat *FilterStat) + + fn = func(stat *FilterStat) { + buf.WriteString(stat.Name) + buf.WriteByte('(') + buf.WriteString(stat.Result) + if stat.Result != "" { + buf.WriteByte(',') + } + buf.WriteString(stat.selfDuration().String()) + buf.WriteByte(')') + if len(stat.Next) == 0 { + return + } + buf.WriteString("->") + if len(stat.Next) > 1 { + buf.WriteByte('[') + } + for i, s := range stat.Next { + if i > 0 { + buf.WriteByte(',') + } + fn(s) + } + if len(stat.Next) > 1 { + buf.WriteByte(']') + } + } + + fn(ctx.FilterStats) + return buf.String() +} + // context.TCPContext: *PipelineContext var runningContexts = sync.Map{} diff --git a/pkg/object/layer4pipeline/registry.go b/pkg/object/layer4pipeline/registry.go index 883c5aeab1..3af009acd9 100644 --- a/pkg/object/layer4pipeline/registry.go +++ b/pkg/object/layer4pipeline/registry.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package layer4pipeline import ( diff --git a/pkg/object/layer4pipeline/spec.go b/pkg/object/layer4pipeline/spec.go index 82de417e88..681b9c3c6d 100644 --- a/pkg/object/layer4pipeline/spec.go +++ b/pkg/object/layer4pipeline/spec.go @@ -1,7 +1,25 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package layer4pipeline import ( "fmt" + "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/yamltool" "github.com/megaease/easegress/pkg/v" From b5de9a0ad6b7e6266bc7a9430e9a18467c845818 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 9 Sep 2021 17:56:05 +0800 Subject: [PATCH 08/99] [layer4proxy] add read loop and write loop --- pkg/context/handlercaller.go | 6 + pkg/context/httpcontext.go | 64 -- pkg/context/layer4context.go | 220 +++++++ pkg/filter/layer4proxy/masterslavereader.go | 103 --- pkg/filter/layer4proxy/proxy.go | 99 --- pkg/object/layer4rawserver/connection.go | 466 ++++++++++++++ pkg/object/layer4rawserver/constants.go | 47 ++ pkg/object/layer4rawserver/layer4server.go | 94 +++ pkg/object/layer4rawserver/listener.go | 185 ++++++ pkg/object/layer4rawserver/mux.go | 134 ++++ pkg/object/layer4rawserver/runtime.go | 258 ++++++++ pkg/object/layer4rawserver/spec.go | 49 ++ .../layer4rawserver/upstream}/pool.go | 105 +--- pkg/object/layer4rawserver/upstream/proxy.go | 35 ++ .../layer4rawserver/upstream}/server.go | 29 +- pkg/object/tcpserver/context.go | 15 - pkg/object/tcpserver/mux.go | 1 - pkg/object/tcpserver/runtime.go | 203 ------ pkg/object/tcpserver/spec.go | 48 -- pkg/object/tcpserver/tcpserver.go | 101 --- pkg/protocol/layer4.go | 33 + pkg/supervisor/registry.go | 9 + pkg/util/connectionwrapper/connection.go | 466 ++++++++++++++ pkg/util/connectionwrapper/constant.go | 35 ++ pkg/util/gracenet/gracenet.go | 5 + pkg/util/iobufferpool/buffer.go | 134 ++++ pkg/util/iobufferpool/bytebuffer_pool.go | 153 +++++ pkg/util/iobufferpool/iobuffer.go | 591 ++++++++++++++++++ pkg/util/iobufferpool/iobuffer_pool.go | 76 +++ pkg/util/iobufferpool/iobufferpool.go | 60 -- pkg/util/limitlistener/limitlistener.go | 4 +- pkg/util/timerpool/timerpool.go | 8 +- 32 files changed, 3058 insertions(+), 778 deletions(-) create mode 100644 pkg/context/handlercaller.go create mode 100644 pkg/context/layer4context.go delete mode 100644 pkg/filter/layer4proxy/masterslavereader.go delete mode 100644 pkg/filter/layer4proxy/proxy.go create mode 100644 pkg/object/layer4rawserver/connection.go create mode 100644 pkg/object/layer4rawserver/constants.go create mode 100644 pkg/object/layer4rawserver/layer4server.go create mode 100644 pkg/object/layer4rawserver/listener.go create mode 100644 pkg/object/layer4rawserver/mux.go create mode 100644 pkg/object/layer4rawserver/runtime.go create mode 100644 pkg/object/layer4rawserver/spec.go rename pkg/{filter/layer4proxy => object/layer4rawserver/upstream}/pool.go (52%) create mode 100644 pkg/object/layer4rawserver/upstream/proxy.go rename pkg/{filter/layer4proxy => object/layer4rawserver/upstream}/server.go (88%) delete mode 100644 pkg/object/tcpserver/context.go delete mode 100644 pkg/object/tcpserver/mux.go delete mode 100644 pkg/object/tcpserver/runtime.go delete mode 100644 pkg/object/tcpserver/spec.go delete mode 100644 pkg/object/tcpserver/tcpserver.go create mode 100644 pkg/protocol/layer4.go create mode 100644 pkg/util/connectionwrapper/connection.go create mode 100644 pkg/util/connectionwrapper/constant.go create mode 100644 pkg/util/gracenet/gracenet.go create mode 100644 pkg/util/iobufferpool/buffer.go create mode 100644 pkg/util/iobufferpool/bytebuffer_pool.go create mode 100644 pkg/util/iobufferpool/iobuffer.go create mode 100644 pkg/util/iobufferpool/iobuffer_pool.go delete mode 100644 pkg/util/iobufferpool/iobufferpool.go diff --git a/pkg/context/handlercaller.go b/pkg/context/handlercaller.go new file mode 100644 index 0000000000..ae78256b0d --- /dev/null +++ b/pkg/context/handlercaller.go @@ -0,0 +1,6 @@ +package context + +type ( + // HandlerCaller is a helper function to call the handler + HandlerCaller func(lastResult string) string +) diff --git a/pkg/context/httpcontext.go b/pkg/context/httpcontext.go index 45e8cda519..79e5286785 100644 --- a/pkg/context/httpcontext.go +++ b/pkg/context/httpcontext.go @@ -21,7 +21,6 @@ import ( stdcontext "context" "fmt" "io" - "net" "net/http" "runtime/debug" "strings" @@ -40,39 +39,6 @@ import ( ) type ( - // HandlerCaller is a helper function to call the handler - HandlerCaller func(lastResult string) string - - // Layer4Context is all context of an TCP processing. - // It is not goroutine-safe, callers must use Lock/Unlock - // to protect it by themselves. - Layer4Context interface { - Lock() - Unlock() - - stdcontext.Context - Cancel(err error) - Cancelled() bool - ClientDisconnected() bool - - ClientConn() *net.TCPConn - - Duration() time.Duration // For log, sample, etc. - OnFinish(func()) // For setting final client statistics, etc. - AddTag(tag string) // For debug, log, etc. - - Finish() - - Host() string - SetHost(host string) - Port() uint16 - SetPort(port uint16) - - ClientIP() string - - CallNextHandler(lastResult string) string - SetHandlerCaller(caller HandlerCaller) - } // HTTPContext is all context of an HTTP processing. // It is not goroutine-safe, callers must use Lock/Unlock @@ -183,32 +149,6 @@ type ( cancelFunc stdcontext.CancelFunc err error } - - tcpContext struct { - mutex sync.Mutex - - startTime *time.Time - endTime *time.Time - - tags []string - - clientConn *net.TCPConn - backendConn *net.TCPConn - stdctx stdcontext.Context - err error - } - - udpContext struct { - mutex sync.Mutex - - startTime *time.Time - endTime *time.Time - - tags []string - - stdctx stdcontext.Context - err error - } ) // New creates an HTTPContext. @@ -394,7 +334,3 @@ func (ctx *httpContext) SaveReqToTemplate(filterName string) error { func (ctx *httpContext) SaveRspToTemplate(filterName string) error { return ctx.ht.SaveResponse(filterName, ctx) } - -func (ctx *tcpContext) saveBackendConn(conn *net.TCPConn) { - ctx.backendConn = conn -} diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go new file mode 100644 index 0000000000..505c938e02 --- /dev/null +++ b/pkg/context/layer4context.go @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package context + +import ( + "bytes" + stdcontext "context" + "github.com/megaease/easegress/pkg/object/layer4rawserver" + "github.com/megaease/easegress/pkg/util/connectionwrapper" + "net" + "sync" + "time" +) + +type ( + // Layer4Context is all context of an TCP processing. + // It is not goroutine-safe, callers must use Lock/Unlock + // to protect it by themselves. + Layer4Context interface { + Lock() + Unlock() + + Protocol() string + ConnectionArgs() *ConnectionArgs + SetConnectionArgs(args *ConnectionArgs) + LocalAddr() net.Addr + SetLocalAddr(addr net.Addr) + RemoteAddr() net.Addr + SetRemoteAddr(addr net.Addr) + + Stop() + + stdcontext.Context + Cancel(err error) + Cancelled() bool + ClientDisconnected() bool + + ClientConn() *connectionwrapper.Connection + + Duration() time.Duration // For log, sample, etc. + OnFinish(func()) // For setting final client statistics, etc. + AddTag(tag string) // For debug, log, etc. + + Finish() + + CallNextHandler(lastResult string) string + SetHandlerCaller(caller HandlerCaller) + } + + ConnectionArgs struct { + TCPNodelay bool + Linger bool + SendBufSize uint32 + RecvBufSize uint32 + ProxyTimeout uint32 + ProxyReadTimeout int64 // connection read timeout(milliseconds) + ProxyWriteTimeout int64 // connection write timeout(milliseconds) + + startOnce sync.Once // make sure read loop and write loop start only once + } + + layer4Context struct { + mutex sync.Mutex + + protocol string + localAddr net.Addr + remoteAddr net.Addr + clientConn *connectionwrapper.Connection + + connectionArgs *ConnectionArgs + + readBuffer bytes.Buffer + writeBuffers net.Buffers + ioBuffers []bytes.Buffer + writeBufferChan chan *[]bytes.Buffer + stopChan chan struct{} // notify quit read loop and write loop + + startTime *time.Time // connection accept time + endTime *time.Time // connection close time + + caller HandlerCaller + } +) + +// NewLayer4Context creates an Layer4Context. +func NewLayer4Context(protocol string, conn *connectionwrapper.Connection, mux *layer4rawserver.Mux) *layer4Context { + + // TODO add mux for mux mapper + + startTime := time.Now() + res := layer4Context{ + protocol: protocol, + clientConn: conn, + localAddr: conn.Conn.LocalAddr(), + remoteAddr: conn.Conn.RemoteAddr(), + + startTime: &startTime, + stopChan: make(chan struct{}), + mutex: sync.Mutex{}, + } + return &res +} + +func (ctx *layer4Context) Protocol() string { + return ctx.protocol +} + +func (ctx *layer4Context) ConnectionArgs() *ConnectionArgs { + return ctx.connectionArgs +} + +func (ctx *layer4Context) SetConnectionArgs(args *ConnectionArgs) { + ctx.connectionArgs = args +} + +func (ctx *layer4Context) LocalAddr() net.Addr { + return ctx.localAddr +} + +func (ctx *layer4Context) SetLocalAddr(localAddr net.Addr) { + ctx.localAddr = localAddr +} + +func (ctx *layer4Context) RemoteAddr() net.Addr { + return ctx.remoteAddr +} + +func (ctx *layer4Context) SetRemoteAddr(addr net.Addr) { + ctx.remoteAddr = addr +} + +func (ctx *layer4Context) Stop() { + endTime := time.Now() + ctx.endTime = &endTime + + // TODO add stat for context +} + +func (ctx *layer4Context) Deadline() (deadline time.Time, ok bool) { + panic("implement me") +} + +func (ctx *layer4Context) Done() <-chan struct{} { + panic("implement me") +} + +func (ctx *layer4Context) Err() error { + panic("implement me") +} + +func (ctx *layer4Context) Value(key interface{}) interface{} { + panic("implement me") +} + +func (ctx *layer4Context) Cancel(err error) { + panic("implement me") +} + +func (ctx *layer4Context) Cancelled() bool { + panic("implement me") +} + +func (ctx *layer4Context) ClientDisconnected() bool { + panic("implement me") +} + +func (ctx *layer4Context) ClientConn() *connectionwrapper.Connection { + return ctx.clientConn +} + +func (ctx *layer4Context) OnFinish(f func()) { + panic("implement me") +} + +func (ctx *layer4Context) AddTag(tag string) { + panic("implement me") +} + +func (ctx *layer4Context) Finish() { + panic("implement me") +} + +func (ctx *layer4Context) Lock() { + ctx.mutex.Lock() +} + +func (ctx *layer4Context) Unlock() { + ctx.mutex.Unlock() +} + +func (ctx *layer4Context) CallNextHandler(lastResult string) string { + return ctx.caller(lastResult) +} + +func (ctx *layer4Context) SetHandlerCaller(caller HandlerCaller) { + ctx.caller = caller +} + +func (ctx *layer4Context) Duration() time.Duration { + if ctx.endTime != nil { + return ctx.endTime.Sub(*ctx.startTime) + } + + return time.Now().Sub(*ctx.startTime) +} diff --git a/pkg/filter/layer4proxy/masterslavereader.go b/pkg/filter/layer4proxy/masterslavereader.go deleted file mode 100644 index bfa6c9fc76..0000000000 --- a/pkg/filter/layer4proxy/masterslavereader.go +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4proxy - -import ( - "bytes" - "io" -) - -type ( - // masterSlaveReader reads bytes to master, - // and synchronize them to slave. - // Currently, only support one slave. - masterSlaveReader struct { - masterReader io.Reader - slaveReader io.Reader - } - - masterReader struct { - r io.Reader - buffChan chan []byte - } - - slaveReader struct { - unreadBuff *bytes.Buffer - buffChan chan []byte - } -) - -func newMasterSlaveReader(r io.Reader) (io.ReadCloser, io.Reader) { - buffChan := make(chan []byte, 10) - mr := &masterReader{ - r: r, - buffChan: buffChan, - } - sr := &slaveReader{ - unreadBuff: bytes.NewBuffer(nil), - buffChan: buffChan, - } - - return mr, sr -} - -func (mr *masterReader) Read(p []byte) (n int, err error) { - buff := bytes.NewBuffer(nil) - tee := io.TeeReader(mr.r, buff) - n, err = tee.Read(p) - - if n != 0 { - mr.buffChan <- buff.Bytes() - } - - if err == io.EOF { - close(mr.buffChan) - } - - return n, err -} - -func (mr *masterReader) Close() error { - if closer, ok := mr.r.(io.ReadCloser); ok { - return closer.Close() - } - - return nil -} - -func (sr *slaveReader) Read(p []byte) (int, error) { - buff, ok := <-sr.buffChan - - if !ok { - return 0, io.EOF - } - - var n int - // NOTE: This if-branch is defensive programming, - // Because the callers of Read of both master and slave - // are the same, so it never happens that len(p) < len(buff). - // else-branch is faster because it is one less copy operation than if-branch. - if sr.unreadBuff.Len() > 0 || len(p) < len(buff) { - sr.unreadBuff.Write(buff) - n, _ = sr.unreadBuff.Read(p) - } else { - n = copy(p, buff) - } - - return n, nil -} diff --git a/pkg/filter/layer4proxy/proxy.go b/pkg/filter/layer4proxy/proxy.go deleted file mode 100644 index c6e4416812..0000000000 --- a/pkg/filter/layer4proxy/proxy.go +++ /dev/null @@ -1,99 +0,0 @@ -package layer4proxy - -import ( - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/object/layer4pipeline" -) - -const ( - // Kind is the kind of Proxy. - Kind = "Proxy" - - resultFallback = "fallback" - resultInternalError = "internalError" - resultClientError = "clientError" - resultServerError = "serverError" -) - -var results = []string{ - resultFallback, - resultInternalError, - resultClientError, - resultServerError, -} - -func init() { - layer4pipeline.Register(&Proxy{}) -} - -type ( - // Proxy is the filter Proxy. - Proxy struct { - filterSpec *layer4pipeline.FilterSpec - spec *Spec - - mainPool *pool - candidatePools []*pool - mirrorPool *pool - } - - // Spec describes the Proxy. - Spec struct { - MainPool *PoolSpec `yaml:"mainPool" jsonschema:"required"` - CandidatePools []*PoolSpec `yaml:"candidatePools,omitempty" jsonschema:"omitempty"` - MirrorPool *PoolSpec `yaml:"mirrorPool,omitempty" jsonschema:"omitempty"` - } - - // Status is the status of Proxy. - Status struct { - MainPool *PoolStatus `yaml:"mainPool"` - CandidatePools []*PoolStatus `yaml:"candidatePools,omitempty"` - MirrorPool *PoolStatus `yaml:"mirrorPool,omitempty"` - } -) - -func (p *Proxy) Kind() string { - return Kind -} - -func (p *Proxy) DefaultSpec() interface{} { - return &Spec{} -} - -func (p *Proxy) Description() string { - return "Proxy sets the proxy of proxy servers" -} - -func (p *Proxy) Results() []string { - panic("implement me") -} - -func (p *Proxy) Init(filterSpec *layer4pipeline.FilterSpec) { - panic("implement me") -} - -func (p *Proxy) Inherit(filterSpec *layer4pipeline.FilterSpec, previousGeneration layer4pipeline.Filter) { - panic("implement me") -} - -func (p *Proxy) Handle(layer4Context context.Layer4Context) (result string) { - panic("implement me") -} - -func (p *Proxy) Status() interface{} { - panic("implement me") -} - -func (p *Proxy) Close() { - p.mainPool.close() - - if p.candidatePools != nil { - for _, v := range p.candidatePools { - v.close() - } - } - - if p.mirrorPool != nil { - p.mirrorPool.close() - } -} diff --git a/pkg/object/layer4rawserver/connection.go b/pkg/object/layer4rawserver/connection.go new file mode 100644 index 0000000000..443c80f79a --- /dev/null +++ b/pkg/object/layer4rawserver/connection.go @@ -0,0 +1,466 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4rawserver + +import ( + "io" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/timerpool" +) + +// ConnectionCloseType represent connection close type +type ConnectionCloseType string + +//Connection close types +const ( + // FlushWrite means write buffer to underlying io then close connection + FlushWrite ConnectionCloseType = "FlushWrite" + // NoFlush means close connection without flushing buffer + NoFlush ConnectionCloseType = "NoFlush" +) + +// ConnectionEvent type +type ConnectionEvent string + +// ConnectionEvent types +const ( + RemoteClose ConnectionEvent = "RemoteClose" + LocalClose ConnectionEvent = "LocalClose" + OnReadErrClose ConnectionEvent = "OnReadErrClose" + OnWriteErrClose ConnectionEvent = "OnWriteErrClose" + OnConnect ConnectionEvent = "OnConnect" + Connected ConnectionEvent = "ConnectedFlag" + ConnectTimeout ConnectionEvent = "ConnectTimeout" + ConnectFailed ConnectionEvent = "ConnectFailed" + OnReadTimeout ConnectionEvent = "OnReadTimeout" + OnWriteTimeout ConnectionEvent = "OnWriteTimeout" +) + +type Connection struct { + net.Conn + + closed uint32 + connected uint32 + startOnce sync.Once + + // readLoop/writeLoop goroutine fields: + internalStopChan chan struct{} + readEnabled bool + readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters + + lastBytesSizeRead int64 + lastWriteSizeWrite int64 + + curWriteBufferData []iobufferpool.IoBuffer + readBuffer iobufferpool.IoBuffer + writeBuffers net.Buffers + ioBuffers []iobufferpool.IoBuffer + writeBufferChan chan *[]iobufferpool.IoBuffer +} + +func New(conn net.Conn) *Connection { + return &Connection{ + Conn: conn, + } +} + +func (c *Connection) StartRWLoop(ctx context.Layer4Context) { + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection close due to read loop crashed failed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose, ctx) + }() + }() + c.startReadLoop(ctx) + }() + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection close due to write loop crashed failed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose, ctx) + }() + }() + c.startWriteLoop(ctx) + }() +} + +func (c *Connection) startReadLoop(ctx context.Layer4Context) { + for { + select { + case <-c.internalStopChan: + return + case <-c.readEnabledChan: + default: + if c.readEnabled { + err := c.doRead(ctx) + if err != nil { + if te, ok := err.(net.Error); ok && te.Timeout() { + if ctx.Protocol() == "tcp" && c.readBuffer != nil && + c.readBuffer.Len() == 0 && c.readBuffer.Cap() > DefaultBufferReadCapacity { + c.readBuffer.Free() + c.readBuffer.Alloc(DefaultBufferReadCapacity) + } + continue + } + + if c.lastBytesSizeRead == 0 || err == io.EOF { + logger.Debugf("%s connection write loop closed, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } else { + logger.Errorf("%s connection write loop closed, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } + + if err == io.EOF { + _ = c.Close(NoFlush, RemoteClose, ctx) + } else { + _ = c.Close(NoFlush, OnReadErrClose, ctx) + } + return + } + } else { + select { + case <-c.readEnabledChan: + case <-time.After(100 * time.Millisecond): + } + } + } + + } +} + +func (c *Connection) startWriteLoop(ctx context.Layer4Context) { + defer func() { + close(c.writeBufferChan) + }() + + var err error + for { + select { + case <-c.internalStopChan: + return + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + + QUIT: + for i := 0; i < 10; i++ { + select { + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + default: + break QUIT + } + + c.setWriteDeadline(ctx) + _, err = c.doWrite(ctx) + } + } + + if err != nil { + + if err == iobufferpool.EOF { + logger.Debugf("%s connection write loop occur error, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + c.Close(NoFlush, LocalClose, ctx) + } else { + logger.Errorf("%s connection write loop occur error, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } + + if te, ok := err.(net.Error); ok && te.Timeout() { + c.Close(NoFlush, OnWriteTimeout, ctx) + } + + if ctx.Protocol() == "udp" && strings.Contains(err.Error(), "connection refused") { + c.Close(NoFlush, RemoteClose, ctx) + } + //other write errs not close connection, because readbuffer may have unread data, wait for readloop close connection, + + return + } + } +} + +func (c *Connection) appendBuffer(buffers *[]iobufferpool.IoBuffer) { + if buffers == nil { + return + } + for _, buf := range *buffers { + if buf == nil { + continue + } + c.ioBuffers = append(c.ioBuffers, buf) + c.writeBuffers = append(c.writeBuffers, buf.Bytes()) + } +} + +func (c *Connection) doRead(ctx context.Layer4Context) (err error) { + if c.readBuffer == nil { + switch ctx.Protocol() { + case "udp": + // A UDP socket will Read up to the size of the receiving buffer and will discard the rest + c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + default: + c.readBuffer = iobufferpool.GetIoBuffer(DefaultBufferReadCapacity) + } + } + + var bytesRead int64 + c.setReadDeadline(ctx) + bytesRead, err = c.readBuffer.ReadOnce(c.Conn) + + if err != nil { + if atomic.LoadUint32(&c.closed) == 1 { + return err + } + + if te, ok := err.(net.Error); ok && te.Timeout() { + // TODO add timeout handle(such as send keepalive msg to active connection) + + if bytesRead == 0 { + return err + } + } else if err != io.EOF { + return err + } + } + + //todo: ReadOnce maybe always return (0, nil) and causes dead loop (hack) + if bytesRead == 0 && err == nil { + err = io.EOF + logger.Errorf("%s connection read maybe always return (0, nil), local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } + c.lastBytesSizeRead = int64(c.readBuffer.Len()) + return +} + +// Write send recv data(batch mode) to upstream +func (c *Connection) Write(ctx context.Layer4Context, buffers ...iobufferpool.IoBuffer) (err error) { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + err = ErrConnectionHasClosed + } + }() + + // TODO get filters from layer4 pipeline, transform buffers via filters + + select { + case c.writeBufferChan <- &buffers: + return + default: + } + + t := timerpool.Get(DefaultConnTryTimeout) + select { + case c.writeBufferChan <- &buffers: + case <-t.C: + err = ErrWriteBufferChanTimeout + } + timerpool.Put(t) + return +} + +func (c *Connection) setWriteDeadline(ctx context.Layer4Context) { + args := ctx.ConnectionArgs() + if args.ProxyWriteTimeout > 0 { + _ = c.Conn.SetWriteDeadline(time.Now().Add(time.Duration(args.ProxyWriteTimeout) * time.Millisecond)) + } else { + switch ctx.Protocol() { + case "udp": + _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultUDPIdleTimeout)) + case "tcp": + _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultConnWriteTimeout)) + } + } +} + +func (c *Connection) setReadDeadline(ctx context.Layer4Context) { + args := ctx.ConnectionArgs() + if args.ProxyWriteTimeout > 0 { + _ = c.Conn.SetReadDeadline(time.Now().Add(time.Duration(args.ProxyReadTimeout) * time.Millisecond)) + } else { + switch ctx.Protocol() { + case "udp": + _ = c.Conn.SetReadDeadline(time.Now().Add(DefaultUDPReadTimeout)) + case "tcp": + _ = c.Conn.SetReadDeadline(time.Now().Add(ConnReadTimeout)) + } + } +} + +// Close handle connection close event +func (c *Connection) Close(ccType ConnectionCloseType, eventType ConnectionEvent, ctx context.Layer4Context) (err error) { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + err = ErrConnectionHasClosed + } + }() + + if ccType == FlushWrite { + _ = c.Write(ctx, iobufferpool.NewIoBufferEOF()) + return nil + } + + if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { + return nil + } + + // connection failed in client mode + if c.Conn == nil || reflect.ValueOf(c.Conn).IsNil() { + return nil + } + + // close tcp conn read first + if tconn, ok := c.Conn.(*net.TCPConn); ok { + logger.Errorf("%s connection close read, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + _ = tconn.CloseRead() + } + + // close conn recv, then notify read/write loop to exit + close(c.internalStopChan) + _ = c.Conn.Close() + + logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + return nil +} + +func (c *Connection) writeBufLen() (bufLen int) { + for _, buf := range c.writeBuffers { + bufLen += len(buf) + } + return +} + +func (c *Connection) doWrite(ctx context.Layer4Context) (interface{}, error) { + bytesSent, err := c.doWriteIO(ctx) + if err != nil && atomic.LoadUint32(&c.closed) == 1 { + return 0, nil + } + + c.lastWriteSizeWrite = int64(c.writeBufLen()) + return bytesSent, err +} + +// +func (c *Connection) doWriteIO(ctx context.Layer4Context) (bytesSent int64, err error) { + buffers := c.writeBuffers + switch ctx.Protocol() { + case "udp": + addr := ctx.RemoteAddr().(*net.UDPAddr) + n := 0 + bytesSent = 0 + for _, buf := range c.ioBuffers { + if c.Conn.RemoteAddr() == nil { + n, err = c.Conn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) + } else { + n, err = c.Conn.Write(buf.Bytes()) + } + if err != nil { + break + } + bytesSent += int64(n) + } + case "tcp": + bytesSent, err = buffers.WriteTo(c.Conn) + } + + if err != nil { + return bytesSent, err + } + + for i, buf := range c.ioBuffers { + c.ioBuffers[i] = nil + c.writeBuffers[i] = nil + if buf.EOF() { + err = iobufferpool.EOF + } + if e := iobufferpool.PutIoBuffer(buf); e != nil { + logger.Errorf("%s connection give io buffer failed, local addr: %s, remote addr: %s, err: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), err.Error()) + } + } + c.ioBuffers = c.ioBuffers[:0] + c.writeBuffers = c.writeBuffers[:0] + return +} + +func (c *Connection) SetNoDelay(enable bool) { + if c.Conn != nil { + if tconn, ok := c.Conn.(*net.TCPConn); ok { + _ = tconn.SetNoDelay(enable) + } + } +} + +func (c *Connection) ReadEnabled() bool { + return c.readEnabled +} + +func (c *Connection) State() ConnState { + if atomic.LoadUint32(&c.closed) == 1 { + return ConnClosed + } + if atomic.LoadUint32(&c.connected) == 1 { + return ConnActive + } + return ConnInit +} diff --git a/pkg/object/layer4rawserver/constants.go b/pkg/object/layer4rawserver/constants.go new file mode 100644 index 0000000000..5d09208b12 --- /dev/null +++ b/pkg/object/layer4rawserver/constants.go @@ -0,0 +1,47 @@ +package layer4rawserver + +import ( + "errors" + "time" +) + +var ( + ErrConnectionHasClosed = errors.New("connection has closed") + ErrWriteTryLockTimeout = errors.New("write trylock has timeout") + ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") +) + +// Default connection arguments +const ( + DefaultBufferReadCapacity = 1 << 7 + + DefaultConnReadTimeout = 15 * time.Second + DefaultConnWriteTimeout = 15 * time.Second + DefaultConnTryTimeout = 60 * time.Second + DefaultIdleTimeout = 90 * time.Second + DefaultUDPIdleTimeout = 5 * time.Second + DefaultUDPReadTimeout = 1 * time.Second + ConnReadTimeout = 15 * time.Second +) + +// ConnState Connection status +type ConnState int + +// Connection statuses +const ( + ConnInit ConnState = iota + ConnActive + ConnClosed +) + +type ListenerState int + +// listener state +// ListenerActivated means listener is activated, an activated listener can be started or stopped +// ListenerRunning means listener is running, start a running listener will be ignored. +// ListenerStopped means listener is stopped, start a stopped listener without restart flag will be ignored. +const ( + ListenerActivated ListenerState = iota + ListenerRunning + ListenerStopped +) diff --git a/pkg/object/layer4rawserver/layer4server.go b/pkg/object/layer4rawserver/layer4server.go new file mode 100644 index 0000000000..e8ea8628dd --- /dev/null +++ b/pkg/object/layer4rawserver/layer4server.go @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4rawserver + +import ( + "github.com/megaease/easegress/pkg/protocol" + "github.com/megaease/easegress/pkg/supervisor" +) + +const ( + // Category is the category of HTTPServer. + Category = supervisor.CategoryTrafficGate + + // Kind is the kind of HTTPServer. + Kind = "Layer4Server" +) + +func init() { + supervisor.Register(&Layer4Server{}) +} + +type ( + // Layer4Server is Object of tpc/udp server. + Layer4Server struct { + runtime *runtime + } +) + +// Category returns the category of Layer4Server. +func (l4 *Layer4Server) Category() supervisor.ObjectCategory { + return Category +} + +// Kind returns the kind of Layer4Server. +func (l4 *Layer4Server) Kind() string { + return Kind +} + +// DefaultSpec returns the default spec of Layer4Server. +func (l4 *Layer4Server) DefaultSpec() interface{} { + return &Spec{ + BindPort: true, + MaxConnections: 10240, + ProxyConnectTimeout: 15 * 1000, + } +} + +// Init initializes Layer4Server. +func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { + + l4.runtime = newRuntime(superSpec, muxMapper) + + l4.runtime.eventChan <- &eventReload{ + nextSuperSpec: superSpec, + muxMapper: muxMapper, + } +} + +// Inherit inherits previous generation of Layer4Server. +func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.Layer4MuxMapper) { + l4.runtime = previousGeneration.(*Layer4Server).runtime + + l4.runtime.eventChan <- &eventReload{ + nextSuperSpec: superSpec, + muxMapper: muxMapper, + } +} + +// Status is the wrapper of runtimes Status. +func (l4 *Layer4Server) Status() *supervisor.Status { + return &supervisor.Status{ + ObjectStatus: l4.runtime.Status(), + } +} + +// Close closes Layer4Server. +func (l4 *Layer4Server) Close() { + l4.runtime.Close() +} diff --git a/pkg/object/layer4rawserver/listener.go b/pkg/object/layer4rawserver/listener.go new file mode 100644 index 0000000000..19517ad5ea --- /dev/null +++ b/pkg/object/layer4rawserver/listener.go @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4rawserver + +import ( + "context" + "fmt" + "net" + "runtime/debug" + "sync" + "time" + + context2 "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/connectionwrapper" + "github.com/megaease/easegress/pkg/util/limitlistener" +) + +type listener struct { + m *Mux + packetConn net.PacketConn // udp connection + limitListener *limitlistener.LimitListener // tcp connection listener with connection limit + + state stateType + listenAddr string + protocol string // enum:udp/tcp + keepalive bool + reuseport bool + maxConnections uint32 + + mutex *sync.Mutex +} + +func NewListener(spec *Spec, m *Mux) *listener { + listen := &listener{ + m: m, + protocol: spec.Protocol, + keepalive: spec.KeepAlive, + reuseport: spec.Reuseport, + maxConnections: spec.MaxConnections, + mutex: &sync.Mutex{}, + } + if spec.LocalAddr == "" { + listen.listenAddr = fmt.Sprintf(":%d", spec.Port) + } else { + listen.listenAddr = fmt.Sprintf("%s:%d", spec.LocalAddr, spec.Port) + } + return listen +} + +func (l *listener) setMaxConnection(maxConn uint32) { + l.limitListener.SetMaxConnection(maxConn) +} + +func (l *listener) start() { + +} + +func (l *listener) listen() error { + switch l.protocol { + case "udp": + c := net.ListenConfig{} + if ul, err := c.ListenPacket(context.Background(), l.protocol, l.listenAddr); err != nil { + return err + } else { + l.packetConn = ul + } + case "tcp": + if tl, err := net.Listen(l.protocol, l.listenAddr); err != nil { + return err + } else { + l.limitListener = limitlistener.NewLimitListener(tl, l.maxConnections) + } + } + return nil +} + +func (l *listener) accept(ctx context2.Layer4Context) error { + rl, err := l.limitListener.Accept() + if err != nil { + return err + } + + go func(ctx context2.Layer4Context) { + if r := recover(); r != nil { + logger.Errorf("failed tp accept conn for %s %s\n, stack trace: \n", + l.protocol, l.listenAddr, debug.Stack()) + } + + ctx.SetRemoteAddr(rl.RemoteAddr()) // fix it + }(ctx) + return nil +} + +func (l *listener) readUpdPacket(ctx context2.Layer4Context) { + go func(ctx context2.Layer4Context) { + if r := recover(); r != nil { + logger.Errorf("failed tp accept conn for %s %s\n, stack trace: \n", + l.protocol, l.listenAddr, debug.Stack()) + } + + }(ctx) +} + +func (l *listener) acceptEventLoop() { + + for { + if tconn, err := l.limitListener.Accept(); err != nil { + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + logger.Infof("tcp listener(%s) stop accept connection due to deadline, err: %s", + l.listenAddr, nerr) + return + } + + if ope, ok := err.(*net.OpError); ok { + // not timeout error and not temporary, which means the error is non-recoverable + if !(ope.Timeout() && ope.Temporary()) { + // accept error raised by sockets closing + if ope.Op == "accept" { + logger.Errorf("tcp listener(%s) stop accept connection due to listener closed", + l.listenAddr) + } else { + logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", + l.listenAddr, err.Error()) + } + return + } + } else { + logger.Errorf("tcp listener(%s) stop accept connection with unknown error: %s.", + l.listenAddr, err.Error()) + } + } else { + host, _, splitErr := net.SplitHostPort(tconn.RemoteAddr().String()) + if splitErr != nil || !l.m.AllowIP(host) { + logger.Debugf("reject remote connection from: %s", tconn.RemoteAddr().String()) + _ = tconn.Close() + } else { + go func() { + conn := connectionwrapper.New(tconn) + ctx := context2.NewLayer4Context("tcp", conn, l.m) + conn.StartRWLoop(ctx) + }() + } + } + } +} + +func (l *listener) Stop() error { + var err error + switch l.protocol { + case "udp": + err = l.packetConn.SetDeadline(time.Now()) + case "tcp": + err = l.limitListener.Listener.(*net.TCPListener).SetDeadline(time.Now()) + } + return err +} + +func (l *listener) Close() error { + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.limitListener != nil { + return l.limitListener.Close() + } + if l.packetConn != nil { + return l.packetConn.Close() + } + return nil +} diff --git a/pkg/object/layer4rawserver/mux.go b/pkg/object/layer4rawserver/mux.go new file mode 100644 index 0000000000..9d64f41c45 --- /dev/null +++ b/pkg/object/layer4rawserver/mux.go @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4rawserver + +import ( + "net" + "sync/atomic" + + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocol" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/ipfilter" + "github.com/megaease/easegress/pkg/util/stringtool" +) + +type ( + Mux struct { + rules atomic.Value // *MuxRules + } + + MuxRules struct { + superSpec *supervisor.Spec + spec *Spec + + muxMapper protocol.Layer4MuxMapper + + ipFilter *ipfilter.IPFilter + ipFilterChan *ipfilter.IPFilters + } +) + +// newIPFilterChain returns nil if the number of final filters is zero. +func newIPFilterChain(parentIPFilters *ipfilter.IPFilters, childSpec *ipfilter.Spec) *ipfilter.IPFilters { + var ipFilters *ipfilter.IPFilters + if parentIPFilters != nil { + ipFilters = ipfilter.NewIPFilters(parentIPFilters.Filters()...) + } else { + ipFilters = ipfilter.NewIPFilters() + } + + if childSpec != nil { + ipFilters.Append(ipfilter.New(childSpec)) + } + + if len(ipFilters.Filters()) == 0 { + return nil + } + + return ipFilters +} + +func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { + if spec == nil { + return nil + } + + return ipfilter.New(spec) +} + +func (mr *MuxRules) pass(ctx context.Layer4Context) bool { + if mr.ipFilter == nil { + return true + } + + switch addr := ctx.RemoteAddr().(type) { + case *net.UDPAddr: + return mr.ipFilter.Allow(addr.IP.String()) + case *net.TCPAddr: + return mr.ipFilter.Allow(addr.IP.String()) + default: + logger.Warnf("invalid remote addr type") + } + return false +} + +func newMux(mapper protocol.Layer4MuxMapper) *Mux { + m := &Mux{} + + m.rules.Store(&MuxRules{ + spec: &Spec{}, + muxMapper: mapper, + }) + + return m +} + +func (m *Mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { + spec := superSpec.ObjectSpec().(*Spec) + + rules := &MuxRules{ + superSpec: superSpec, + spec: spec, + muxMapper: muxMapper, + ipFilter: newIPFilter(spec.IPFilter), + ipFilterChan: newIPFilterChain(nil, spec.IPFilter), + } + m.rules.Store(rules) +} + +func (m *Mux) handleIPNotAllow(ctx context.Layer4Context) { + ctx.AddTag(stringtool.Cat("ip ", ctx.RemoteAddr().String(), " not allow")) +} + +func (m *Mux) AllowIP(ipStr string) bool { + rules := m.rules.Load().(*MuxRules) + if rules == nil { + return true + } + return rules.ipFilter.Allow(ipStr) +} + +func (m *Mux) GetHandler(name string) (protocol.Layer4Handler, bool) { + rules := m.rules.Load().(*MuxRules) + if rules == nil { + return nil, false + } + return rules.muxMapper.GetHandler(name) +} diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4rawserver/runtime.go new file mode 100644 index 0000000000..8a1cc6de42 --- /dev/null +++ b/pkg/object/layer4rawserver/runtime.go @@ -0,0 +1,258 @@ +package layer4rawserver + +import ( + "fmt" + "reflect" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocol" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/layer4stat" +) + +const ( + checkFailedTimeout = 10 * time.Second + + stateNil stateType = "nil" + stateFailed stateType = "failed" + stateRunning stateType = "running" + stateClosed stateType = "closed" +) + +var ( + errNil = fmt.Errorf("") +) + +type ( + stateType string + + eventCheckFailed struct{} + eventServeFailed struct { + startNum uint64 + err error + } + eventReload struct { + nextSuperSpec *supervisor.Spec + muxMapper protocol.Layer4MuxMapper + } + eventClose struct{ done chan struct{} } + + runtime struct { + superSpec *supervisor.Spec + spec *Spec + + startNum uint64 + mux *Mux + eventChan chan interface{} // receive traffic controller event + + // status + state atomic.Value // stateType + err atomic.Value // error + + tcpstat *layer4stat.Layer4Stat + listener *listener + } + + // Status contains all status generated by runtime, for displaying to users. + Status struct { + Health string `yaml:"health"` + + State stateType `yaml:"state"` + Error string `yaml:"error,omitempty"` + + // TODO add stat info + } +) + +func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) *runtime { + r := &runtime{ + superSpec: superSpec, + eventChan: make(chan interface{}, 10), + } + + r.mux = newMux(muxMapper) + r.setState(stateNil) + r.setError(errNil) + + go r.fsm() + go r.checkFailed() + + return r +} + +func (r *runtime) Close() { + done := make(chan struct{}) + r.eventChan <- &eventClose{done: done} + <-done +} + +// Status returns HTTPServer Status. +func (r *runtime) Status() *Status { + health := r.getError().Error() + + return &Status{ + Health: health, + State: r.getState(), + Error: r.getError().Error(), + } +} + +// FSM is the finite-state-machine for the runtime. +func (r *runtime) fsm() { + for e := range r.eventChan { + switch e := e.(type) { + case *eventCheckFailed: + r.handleEventCheckFailed(e) + case *eventServeFailed: + r.handleEventServeFailed(e) + case *eventReload: + r.handleEventReload(e) + case *eventClose: + r.handleEventClose(e) + // NOTE: We don't close hs.eventChan, + // in case of panic of any other goroutines + // to send event to it later. + return + default: + logger.Errorf("BUG: unknown event: %T\n", e) + } + } +} + +func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { + r.superSpec = nextSuperSpec + r.mux.reloadRules(nextSuperSpec, muxMapper) + + nextSpec := nextSuperSpec.ObjectSpec().(*Spec) + + // r.listener does not create just after the process started and the config load for the first time. + if nextSpec != nil && r.listener != nil { + r.listener.setMaxConnection(nextSpec.MaxConnections) + } + + // NOTE: Due to the mechanism of supervisor, + // nextSpec must not be nil, just defensive programming here. + switch { + case r.spec == nil && nextSpec == nil: + logger.Errorf("BUG: nextSpec is nil") + // Nothing to do. + case r.spec == nil && nextSpec != nil: + r.spec = nextSpec + r.startServer() + case r.spec != nil && nextSpec == nil: + logger.Errorf("BUG: nextSpec is nil") + r.spec = nil + r.closeServer() + case r.spec != nil && nextSpec != nil: + if r.needRestartServer(nextSpec) { + r.spec = nextSpec + r.closeServer() + r.startServer() + } else { + r.spec = nextSpec + } + } +} + +func (r *runtime) setState(state stateType) { + r.state.Store(state) +} + +func (r *runtime) getState() stateType { + return r.state.Load().(stateType) +} + +func (r *runtime) setError(err error) { + if err == nil { + r.err.Store(errNil) + } else { + // NOTE: For type safe. + r.err.Store(fmt.Errorf("%v", err)) + } +} + +func (r *runtime) getError() error { + err := r.err.Load() + if err == nil { + return nil + } + return err.(error) +} + +func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { + +} + +func (r *runtime) handleEventServeFailed(e *eventServeFailed) { + if r.startNum > e.startNum { + return + } + r.setState(stateFailed) + r.setError(e.err) +} + +func (r *runtime) handleEventReload(e *eventReload) { + +} + +func (r *runtime) handleEventClose(e *eventClose) { + r.closeServer() + close(e.done) +} + +func (r *runtime) startServer() { + l := NewListener(r.spec, r.mux) + err := l.listen() + if err != nil { + r.setState(stateFailed) + r.setError(err) + logger.Errorf("listen tcp conn for %s:%d failed, err: %v", r.spec.LocalAddr, r.spec.Port, err) + + _ = l.Close() + r.eventChan <- &eventServeFailed{ + err: err, + startNum: r.startNum, + } + return + } + + r.startNum++ + r.setState(stateRunning) + r.setError(nil) + + r.listener = l + go r.listener.start() +} + +func (r *runtime) closeServer() { + _ = r.listener.Close() +} + +func (r *runtime) checkFailed() { + ticker := time.NewTicker(checkFailedTimeout) + for range ticker.C { + state := r.getState() + if state == stateFailed { + r.eventChan <- &eventCheckFailed{} + } else if state == stateClosed { + ticker.Stop() + return + } + } +} + +func (r *runtime) needRestartServer(nextSpec *Spec) bool { + x := *r.spec + y := *nextSpec + + // The change of options below need not restart the HTTP server. + x.MaxConnections, y.MaxConnections = 0, 0 + x.IPFilter, y.IPFilter = nil, nil + x.ProxyConnectTimeout, y.ProxyTimeout = 0, 0 + x.ProxyTimeout, y.ProxyTimeout = 0, 0 + + // The update of rules need not to shutdown server. + return !reflect.DeepEqual(x, y) +} diff --git a/pkg/object/layer4rawserver/spec.go b/pkg/object/layer4rawserver/spec.go new file mode 100644 index 0000000000..7c58cf567c --- /dev/null +++ b/pkg/object/layer4rawserver/spec.go @@ -0,0 +1,49 @@ +package layer4rawserver + +import ( + "github.com/megaease/easegress/pkg/util/ipfilter" +) + +type ( + // Spec describes the TcpServer. + Spec struct { + Protocol string `yaml:"protocol" jsonschema:"required,enum=tcp,enum=udp"` + LocalAddr string `yaml:"localAddr" jsonschema:"omitempty"` + Port uint16 `yaml:"port" json:"port" jsonschema:"required"` + BindPort bool `yaml:"bindPort" jsonschema:"omitempty"` + MaxConnections uint32 `yaml:"maxConnections" jsonschema:"omitempty,minimum=1"` + + // By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. + Backlog int32 `yaml:"backlog" jsonschema:"omitempty,minimum=-1"` + SendBuf int `yaml:"sendBuf" jsonschema:"omitempty"` + RecvBuf int `yaml:"recvBuf" jsonschema:"omitempty"` + Reuseport bool `yaml:"reuseport" jsonschema:"omitempty"` + KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` + TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` + ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` + ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` + + IPFilter *ipfilter.Spec `yaml:"ipFilter,omitempty" jsonschema:"omitempty"` + } + + ListenerConfig struct { + Protocol string `yaml:"protocol" jsonschema:"required,enum=tcp,enum=udp"` + LocalAddr string `yaml:"localAddr" jsonschema:"omitempty"` + Port uint16 `yaml:"port" json:"port" jsonschema:"required"` + BindPort bool `yaml:"bindPort" jsonschema:"omitempty"` + // By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. + Backlog int32 `yaml:"backlog" jsonschema:"omitempty,minimum=-1"` + SendBuf int `yaml:"sendBuf" jsonschema:"omitempty"` + RecvBuf int `yaml:"recvBuf" jsonschema:"omitempty"` + Reuseport bool `yaml:"reuseport" jsonschema:"omitempty"` + KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` + TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` + ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` + ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` + } +) + +// Validate validates TcpServerSpec. +func (spec *Spec) Validate() error { + return nil +} diff --git a/pkg/filter/layer4proxy/pool.go b/pkg/object/layer4rawserver/upstream/pool.go similarity index 52% rename from pkg/filter/layer4proxy/pool.go rename to pkg/object/layer4rawserver/upstream/pool.go index 810dfb3735..17462175ca 100644 --- a/pkg/filter/layer4proxy/pool.go +++ b/pkg/object/layer4rawserver/upstream/pool.go @@ -1,18 +1,32 @@ -package layer4proxy +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package upstream import ( - "bufio" "fmt" - "github.com/google/martian/log" + "net" + "time" + "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/layer4stat" "github.com/megaease/easegress/pkg/util/memorycache" - "github.com/megaease/easegress/pkg/util/stringtool" - "io" - "net" - "time" ) type ( @@ -21,12 +35,10 @@ type ( pool struct { spec *PoolSpec - tagPrefix string - writeResponse bool + tagPrefix string - servers *servers - layer4stat *layer4stat.Layer4Stat - memoryCache *memorycache.MemoryCache + servers *servers + layer4stat *layer4stat.Layer4Stat } // PoolSpec describes a pool of servers. @@ -74,22 +86,13 @@ func (s PoolSpec) Validate() error { return nil } -func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string, writeResponse bool) *pool { - - var memoryCache *memorycache.MemoryCache - if spec.MemoryCache != nil { - memoryCache = memorycache.New(spec.MemoryCache) - } +func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { return &pool{ - spec: spec, - - tagPrefix: tagPrefix, - writeResponse: writeResponse, - - servers: newServers(super, spec), - layer4stat: layer4stat.New(), - memoryCache: memoryCache, + spec: spec, + tagPrefix: tagPrefix, + servers: newServers(super, spec), + layer4stat: layer4stat.New(), } } @@ -100,47 +103,18 @@ func (p *pool) status() *PoolStatus { func (p *pool) handle(ctx context.Layer4Context) string { - addTag := func(subPrefix, msg string) { - tag := stringtool.Cat(p.tagPrefix, "#", subPrefix, ": ", msg) - ctx.Lock() - ctx.AddTag(tag) - ctx.Unlock() - } - server, err := p.servers.next(ctx) if err != nil { - addTag("serverErr", err.Error()) return resultInternalError } - addTag("addr", server.HostPort) - rawConn, err := net.DialTimeout("tcp", server.HostPort, 1000*time.Millisecond) + upstreamConn, err := net.DialTimeout("tcp", server.HostPort, 1000*time.Millisecond) if err != nil { - log.Errorf("dial tcp for addr: % failed, err: %v", server.HostPort, err) + logger.Errorf("dial tcp for addr: % failed, err: %v", server.HostPort, err) } - backendConn := rawConn.(*net.TCPConn) + _ = upstreamConn.Close() - defer func(backendConn *net.TCPConn) { - closeErr := backendConn.Close() - if closeErr != nil { - logger.Warnf("close backend conn for %v failed, err: %v", server.HostPort, err) - } - }(backendConn) - - errChan := make(chan error) - go p.connCopy(backendConn, ctx.ClientConn(), errChan) - go p.connCopy(ctx.ClientConn(), backendConn, errChan) - - err = <-errChan - if err != nil { - logger.Errorf("Error during connection: %v", err) - } - - err = <-errChan // TODO export tcp config for backend conn, watch client/backend error - - ctx.Lock() - defer ctx.Unlock() - // NOTE: The code below can't use addTag and setStatusCode in case of deadlock. + // TODO do layer4 proxy return "" } @@ -148,16 +122,3 @@ func (p *pool) handle(ctx context.Layer4Context) string { func (p *pool) close() { p.servers.close() } - -func (p *pool) connCopy(dst *net.TCPConn, src *net.TCPConn, errCh chan error) { - writer := bufio.NewWriter(dst) - reader := bufio.NewReader(src) - _, err := io.Copy(writer, reader) - _ = writer.Flush() // need flush bytes in buffer - errCh <- err - - errClose := dst.CloseWrite() - if errClose != nil { - logger.Debugf("Error while terminating connection: %v", errClose) - } -} diff --git a/pkg/object/layer4rawserver/upstream/proxy.go b/pkg/object/layer4rawserver/upstream/proxy.go new file mode 100644 index 0000000000..594f7a066d --- /dev/null +++ b/pkg/object/layer4rawserver/upstream/proxy.go @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package upstream + +const ( + // Kind is the kind of Proxy. + Kind = "Proxy" + + resultFallback = "fallback" + resultInternalError = "internalError" + resultClientError = "clientError" + resultServerError = "serverError" +) + +var results = []string{ + resultFallback, + resultInternalError, + resultClientError, + resultServerError, +} diff --git a/pkg/filter/layer4proxy/server.go b/pkg/object/layer4rawserver/upstream/server.go similarity index 88% rename from pkg/filter/layer4proxy/server.go rename to pkg/object/layer4rawserver/upstream/server.go index 983fdc4043..14a8f63b40 100644 --- a/pkg/filter/layer4proxy/server.go +++ b/pkg/object/layer4rawserver/upstream/server.go @@ -1,4 +1,21 @@ -package layer4proxy +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package upstream import ( "fmt" @@ -51,7 +68,6 @@ type ( // Server is proxy server. Server struct { - Address *net.TCPAddr HostPort string `yaml:"HostPort" jsonschema:"required,format=hostport"` Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` @@ -66,11 +82,9 @@ type ( func (s *servers) Validated() error { if s.poolSpec.Protocol == "tcp" { for _, server := range s.static.servers { - if addr, err := net.ResolveTCPAddr("tcp", server.HostPort); err != nil { + if _, err := net.ResolveTCPAddr("tcp", server.HostPort); err != nil { logger.Errorf("resolve tcp addr failed, host port: %v, %v", server.HostPort, err) return err - } else { - server.Address = addr } } } @@ -288,6 +302,9 @@ func (ss *staticServers) weightedRandom() *Server { } func (ss *staticServers) ipHash(ctx context.Layer4Context) *Server { - sum32 := int(hashtool.Hash32(ctx.ClientIP())) + remoteAddr := ctx.RemoteAddr().String() + host, _, _ := net.SplitHostPort(remoteAddr) + + sum32 := int(hashtool.Hash32(host)) return ss.servers[sum32%len(ss.servers)] } diff --git a/pkg/object/tcpserver/context.go b/pkg/object/tcpserver/context.go deleted file mode 100644 index 3735d0d5d1..0000000000 --- a/pkg/object/tcpserver/context.go +++ /dev/null @@ -1,15 +0,0 @@ -package tcpserver - -import ( - stdcontext "context" - "time" -) - -const ( - serverShutdownTimeout = 30 * time.Second -) - -func serverShutdownContext() (stdcontext.Context, stdcontext.CancelFunc) { - ctx, cancelFunc := stdcontext.WithTimeout(stdcontext.Background(), serverShutdownTimeout) - return ctx, cancelFunc -} diff --git a/pkg/object/tcpserver/mux.go b/pkg/object/tcpserver/mux.go deleted file mode 100644 index 1e129e5661..0000000000 --- a/pkg/object/tcpserver/mux.go +++ /dev/null @@ -1 +0,0 @@ -package tcpserver diff --git a/pkg/object/tcpserver/runtime.go b/pkg/object/tcpserver/runtime.go deleted file mode 100644 index 3795d50e99..0000000000 --- a/pkg/object/tcpserver/runtime.go +++ /dev/null @@ -1,203 +0,0 @@ -package tcpserver - -import ( - "fmt" - "net" - "runtime/debug" - "sync/atomic" - "time" - - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/protocol" - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/layer4stat" - "github.com/megaease/easegress/pkg/util/limitlistener" -) - -type runtime struct { - superSpec *supervisor.Spec - spec *Spec - startNum uint64 - eventChan chan interface{} // receive traffic controller event - - // status - state atomic.Value // stateType - err atomic.Value // error - - tcpstat *layer4stat.Layer4Stat - limitListener *limitlistener.LimitListener -} - -func (r *runtime) Close() { - done := make(chan struct{}) - r.eventChan <- &eventClose{done: done} - <-done -} - -func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) *runtime { - r := &runtime{ - superSpec: superSpec, - eventChan: make(chan interface{}, 10), - } - - r.setState(stateNil) - r.setError(errNil) - - go r.fsm() - // go r.checkFailed() - - return r -} - -func (r *runtime) setState(state stateType) { - r.state.Store(state) -} - -func (r *runtime) getState() stateType { - return r.state.Load().(stateType) -} - -func (r *runtime) setError(err error) { - if err == nil { - r.err.Store(errNil) - } else { - // NOTE: For type safe. - r.err.Store(fmt.Errorf("%v", err)) - } -} - -func (r *runtime) getError() error { - err := r.err.Load() - if err == nil { - return nil - } - return err.(error) -} - -// FSM is the finite-state-machine for the runtime. -func (r *runtime) fsm() { - for e := range r.eventChan { - switch e := e.(type) { - case *eventCheckFailed: - r.handleEventCheckFailed(e) - case *eventServeFailed: - r.handleEventServeFailed(e) - case *eventReload: - r.handleEventReload(e) - case *eventClose: - r.handleEventClose(e) - // NOTE: We don't close hs.eventChan, - // in case of panic of any other goroutines - // to send event to it later. - return - default: - logger.Errorf("BUG: unknown event: %T\n", e) - } - } -} - -func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { - -} - -func (r *runtime) handleEventServeFailed(e *eventServeFailed) { - if r.startNum > e.startNum { - return - } - r.setState(stateFailed) - r.setError(e.err) -} - -func (r *runtime) handleEventReload(e *eventReload) { - -} - -func (r *runtime) handleEventClose(e *eventClose) { - r.closeServer() - close(e.done) -} - -func (r *runtime) checkFailed() { - ticker := time.NewTicker(checkFailedTimeout) - for range ticker.C { - state := r.getState() - if state == stateFailed { - r.eventChan <- &eventCheckFailed{} - } else if state == stateClosed { - ticker.Stop() - return - } - } -} - -func (r *runtime) startServer() { - - listener, err := gnet.Listen("tcp", fmt.Sprintf("%s:%d", r.spec.IP, r.spec.Port)) - if err != nil { - r.setState(stateFailed) - r.setError(err) - logger.Errorf("listen tcp conn for %s:%d failed, err: %v", r.spec.IP, r.spec.Port, err) - - _ = listener.Close() - r.eventChan <- &eventServeFailed{ - err: err, - startNum: r.startNum, - } - return - } - - r.startNum++ - r.setState(stateRunning) - r.setError(nil) - - limitListener := limitlistener.NewLimitListener(listener, r.spec.MaxConnections) - r.limitListener = limitListener - go r.runTCPProxyServer() -} - -// runTCPProxyServer bind to specific address, accept tcp conn -func (r *runtime) runTCPProxyServer() { - - go func() { - defer func() { - if e := recover(); e != nil { - logger.Errorf("listen tcp for %s:%d crashed, trace: %s", r.spec.IP, r.spec.Port, string(debug.Stack())) - } - }() - - for { - var netConn, err = (*r.limitListener).Accept() - conn := netConn.(*net.TCPConn) - if err == nil { - go func() { - defer func() { - if e := recover(); e != nil { - logger.Errorf("tcp conn handler for %s:%d crashed, trace: %s", r.spec.IP, - r.spec.Port, string(debug.Stack())) - } - }() - - r.setTcpConf(conn) - //fn(conn) - }() - } else { - // only record accept error, didn't close listener - logger.Errorf("tcp conn handler for %s:%d crashed, trace: %s", r.spec.IP, - r.spec.Port, string(debug.Stack())) - break - } - } - }() -} - -func (r *runtime) setTcpConf(conn *net.TCPConn) { - _ = conn.SetKeepAlive(r.spec.KeepAlive) - _ = conn.SetNoDelay(r.spec.TcpNodelay) - _ = conn.SetReadBuffer(r.spec.RecvBuf) - _ = conn.SetWriteBuffer(r.spec.SendBuf) - // TODO set deadline for tpc connection -} - -func (r *runtime) closeServer() { - _ = r.limitListener.Close() -} diff --git a/pkg/object/tcpserver/spec.go b/pkg/object/tcpserver/spec.go deleted file mode 100644 index 7719b21b22..0000000000 --- a/pkg/object/tcpserver/spec.go +++ /dev/null @@ -1,48 +0,0 @@ -package tcpserver - -import ( - "fmt" - - "github.com/megaease/easegress/pkg/tracing" - "github.com/megaease/easegress/pkg/util/ipfilter" -) - -type ( - // Spec describes the TcpServer. - Spec struct { - IP string `yaml:"ip" jsonschema:"required,minimum=1"` - Port uint16 `yaml:"port" jsonschema:"required,minimum=1"` - MaxConnections uint32 `yaml:"maxConnections" jsonschema:"omitempty,minimum=1"` - - // By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. - Backlog int32 `yaml:"backlog" jsonschema:"omitempty,minimum=-1"` - SendBuf int `yaml:"sendBuf" jsonschema:"omitempty"` - RecvBuf int `yaml:"recvBuf" jsonschema:"omitempty"` - Reuseport bool `yaml:"reuseport" jsonschema:"omitempty"` - KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` - TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` - ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` - ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` - - // Support multiple certs, preserve the certbase64 and keybase64 - // for backward compatibility - SSL bool `yaml:"ssl" jsonschema:"omitempty"` - CertBase64 string `yaml:"certBase64" jsonschema:"omitempty,format=base64"` - KeyBase64 string `yaml:"keyBase64" jsonschema:"omitempty,format=base64"` - - IPFilter *ipfilter.Spec `yaml:"ipFilter,omitempty" jsonschema:"omitempty"` - - Tracing *tracing.Spec `yaml:"tracing" jsonschema:"omitempty"` - } -) - -// Validate validates TcpServerSpec. -func (spec *Spec) Validate() error { - if spec.SSL { - if spec.CertBase64 == "" || spec.KeyBase64 == "" { - return fmt.Errorf("tcp proxy ssl parameters is incomplete") - } - // TODO need check ssl parameters - } - return nil -} diff --git a/pkg/object/tcpserver/tcpserver.go b/pkg/object/tcpserver/tcpserver.go deleted file mode 100644 index 0b6e1ea03e..0000000000 --- a/pkg/object/tcpserver/tcpserver.go +++ /dev/null @@ -1,101 +0,0 @@ -package tcpserver - -import ( - "fmt" - "github.com/megaease/easegress/pkg/graceupdate" - "github.com/megaease/easegress/pkg/protocol" - "time" - - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/layer4stat" -) - -const ( - // Category is the category of TCPServer. - Category = supervisor.CategoryTrafficGate - - // Kind is the kind of HTTPServer. - Kind = "TCPServer" - - checkFailedTimeout = 10 * time.Second - - topNum = 10 - - stateNil stateType = "nil" - stateFailed stateType = "failed" - stateRunning stateType = "running" - stateClosed stateType = "closed" -) - -func init() { - supervisor.Register(&TCPServer{}) -} - -var ( - errNil = fmt.Errorf("") - gnet = graceupdate.Global -) - -type ( - stateType string - - eventCheckFailed struct{} - eventServeFailed struct { - startNum uint64 - err error - } - eventReload struct { - nextSuperSpec *supervisor.Spec - } - eventClose struct{ done chan struct{} } - - TCPServer struct { - runtime *runtime - } - - // Status contains all status generated by runtime, for displaying to users. - Status struct { - Health string `yaml:"health"` - - State stateType `yaml:"state"` - Error string `yaml:"error,omitempty"` - - *layer4stat.Status - } -) - -// Category get object category: supervisor.CategoryTrafficGate -func (T *TCPServer) Category() supervisor.ObjectCategory { - return Category -} - -// Kind get object kind: http server -func (T *TCPServer) Kind() string { - return Kind -} - -func (T *TCPServer) DefaultSpec() interface{} { - return &Spec{ - KeepAlive: true, - MaxConnections: 10240, - } -} - -func (T *TCPServer) Status() *supervisor.Status { - panic("implement me") -} - -// Close http server -func (T *TCPServer) Close() { - T.runtime.Close() -} - -// Init initializes HTTPServer. -func (T *TCPServer) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { - panic("implement me") -} - -// Inherit inherits previous generation of HTTPServer. -func (T *TCPServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { - panic("implement me") -} diff --git a/pkg/protocol/layer4.go b/pkg/protocol/layer4.go new file mode 100644 index 0000000000..a1937f37cb --- /dev/null +++ b/pkg/protocol/layer4.go @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package protocol + +import "github.com/megaease/easegress/pkg/context" + +type ( + // Layer4Handler is the common handler for the all backends + // which handle the traffic from layer4(tcp/udp) server. + Layer4Handler interface { + Handle(ctx context.Layer4Context) + } + + // Layer4MuxMapper gets layer4 handler pipeline with mutex + Layer4MuxMapper interface { + GetHandler(name string) (Layer4Handler, bool) + } +) diff --git a/pkg/supervisor/registry.go b/pkg/supervisor/registry.go index db5deffd48..83241c32bd 100644 --- a/pkg/supervisor/registry.go +++ b/pkg/supervisor/registry.go @@ -69,6 +69,15 @@ type ( // So its own responsibility for the object to inherit and clean the previous generation stuff. // The supervisor won't call Close for the previous generation. Inherit(superSpec *Spec, previousGeneration Object, muxMapper protocol.MuxMapper) + + // InitLayer4 initializes the Object. + InitLayer4(superSpec *Spec, muxMapper protocol.Layer4MuxMapper) + + // InheritLayer4 also initializes the Object. + // But it needs to handle the lifecycle of the previous generation. + // So its own responsibility for the object to inherit and clean the previous generation stuff. + // The supervisor won't call Close for the previous generation. + InheritLayer4(superSpec *Spec, previousGeneration Object, muxMapper protocol.Layer4MuxMapper) } // TrafficGate is the object in category of TrafficGate. diff --git a/pkg/util/connectionwrapper/connection.go b/pkg/util/connectionwrapper/connection.go new file mode 100644 index 0000000000..a6a78ee114 --- /dev/null +++ b/pkg/util/connectionwrapper/connection.go @@ -0,0 +1,466 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package connectionwrapper + +import ( + "io" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/timerpool" +) + +// ConnectionCloseType represent connection close type +type ConnectionCloseType string + +//Connection close types +const ( + // FlushWrite means write buffer to underlying io then close connection + FlushWrite ConnectionCloseType = "FlushWrite" + // NoFlush means close connection without flushing buffer + NoFlush ConnectionCloseType = "NoFlush" +) + +// ConnectionEvent type +type ConnectionEvent string + +// ConnectionEvent types +const ( + RemoteClose ConnectionEvent = "RemoteClose" + LocalClose ConnectionEvent = "LocalClose" + OnReadErrClose ConnectionEvent = "OnReadErrClose" + OnWriteErrClose ConnectionEvent = "OnWriteErrClose" + OnConnect ConnectionEvent = "OnConnect" + Connected ConnectionEvent = "ConnectedFlag" + ConnectTimeout ConnectionEvent = "ConnectTimeout" + ConnectFailed ConnectionEvent = "ConnectFailed" + OnReadTimeout ConnectionEvent = "OnReadTimeout" + OnWriteTimeout ConnectionEvent = "OnWriteTimeout" +) + +type Connection struct { + net.Conn + + closed uint32 + connected uint32 + startOnce sync.Once + + // readLoop/writeLoop goroutine fields: + internalStopChan chan struct{} + readEnabled bool + readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters + + lastBytesSizeRead int64 + lastWriteSizeWrite int64 + + curWriteBufferData []iobufferpool.IoBuffer + readBuffer iobufferpool.IoBuffer + writeBuffers net.Buffers + ioBuffers []iobufferpool.IoBuffer + writeBufferChan chan *[]iobufferpool.IoBuffer +} + +func New(conn net.Conn) *Connection { + return &Connection{ + Conn: conn, + } +} + +func (c *Connection) StartRWLoop(ctx context.Layer4Context) { + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection close due to read loop crashed failed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose, ctx) + }() + }() + c.startReadLoop(ctx) + }() + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection close due to write loop crashed failed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose, ctx) + }() + }() + c.startWriteLoop(ctx) + }() +} + +func (c *Connection) startReadLoop(ctx context.Layer4Context) { + for { + select { + case <-c.internalStopChan: + return + case <-c.readEnabledChan: + default: + if c.readEnabled { + err := c.doRead(ctx) + if err != nil { + if te, ok := err.(net.Error); ok && te.Timeout() { + if ctx.Protocol() == "tcp" && c.readBuffer != nil && + c.readBuffer.Len() == 0 && c.readBuffer.Cap() > DefaultBufferReadCapacity { + c.readBuffer.Free() + c.readBuffer.Alloc(DefaultBufferReadCapacity) + } + continue + } + + if c.lastBytesSizeRead == 0 || err == io.EOF { + logger.Debugf("%s connection write loop closed, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } else { + logger.Errorf("%s connection write loop closed, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } + + if err == io.EOF { + _ = c.Close(NoFlush, RemoteClose, ctx) + } else { + _ = c.Close(NoFlush, OnReadErrClose, ctx) + } + return + } + } else { + select { + case <-c.readEnabledChan: + case <-time.After(100 * time.Millisecond): + } + } + } + + } +} + +func (c *Connection) startWriteLoop(ctx context.Layer4Context) { + defer func() { + close(c.writeBufferChan) + }() + + var err error + for { + select { + case <-c.internalStopChan: + return + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + + QUIT: + for i := 0; i < 10; i++ { + select { + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + default: + break QUIT + } + + c.setWriteDeadline(ctx) + _, err = c.doWrite(ctx) + } + } + + if err != nil { + + if err == iobufferpool.EOF { + logger.Debugf("%s connection write loop occur error, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + c.Close(NoFlush, LocalClose, ctx) + } else { + logger.Errorf("%s connection write loop occur error, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } + + if te, ok := err.(net.Error); ok && te.Timeout() { + c.Close(NoFlush, OnWriteTimeout, ctx) + } + + if ctx.Protocol() == "udp" && strings.Contains(err.Error(), "connection refused") { + c.Close(NoFlush, RemoteClose, ctx) + } + //other write errs not close connection, because readbuffer may have unread data, wait for readloop close connection, + + return + } + } +} + +func (c *Connection) appendBuffer(buffers *[]iobufferpool.IoBuffer) { + if buffers == nil { + return + } + for _, buf := range *buffers { + if buf == nil { + continue + } + c.ioBuffers = append(c.ioBuffers, buf) + c.writeBuffers = append(c.writeBuffers, buf.Bytes()) + } +} + +func (c *Connection) doRead(ctx context.Layer4Context) (err error) { + if c.readBuffer == nil { + switch ctx.Protocol() { + case "udp": + // A UDP socket will Read up to the size of the receiving buffer and will discard the rest + c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + default: + c.readBuffer = iobufferpool.GetIoBuffer(DefaultBufferReadCapacity) + } + } + + var bytesRead int64 + c.setReadDeadline(ctx) + bytesRead, err = c.readBuffer.ReadOnce(c.Conn) + + if err != nil { + if atomic.LoadUint32(&c.closed) == 1 { + return err + } + + if te, ok := err.(net.Error); ok && te.Timeout() { + // TODO add timeout handle(such as send keepalive msg to active connection) + + if bytesRead == 0 { + return err + } + } else if err != io.EOF { + return err + } + } + + //todo: ReadOnce maybe always return (0, nil) and causes dead loop (hack) + if bytesRead == 0 && err == nil { + err = io.EOF + logger.Errorf("%s connection read maybe always return (0, nil), local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + } + c.lastBytesSizeRead = int64(c.readBuffer.Len()) + return +} + +// Write send recv data(batch mode) to upstream +func (c *Connection) Write(ctx context.Layer4Context, buffers ...iobufferpool.IoBuffer) (err error) { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + err = ErrConnectionHasClosed + } + }() + + // TODO get filters from layer4 pipeline, transform buffers via filters + + select { + case c.writeBufferChan <- &buffers: + return + default: + } + + t := timerpool.Get(DefaultConnTryTimeout) + select { + case c.writeBufferChan <- &buffers: + case <-t.C: + err = ErrWriteBufferChanTimeout + } + timerpool.Put(t) + return +} + +func (c *Connection) setWriteDeadline(ctx context.Layer4Context) { + args := ctx.ConnectionArgs() + if args.ProxyWriteTimeout > 0 { + _ = c.Conn.SetWriteDeadline(time.Now().Add(time.Duration(args.ProxyWriteTimeout) * time.Millisecond)) + } else { + switch ctx.Protocol() { + case "udp": + _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultUDPIdleTimeout)) + case "tcp": + _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultConnWriteTimeout)) + } + } +} + +func (c *Connection) setReadDeadline(ctx context.Layer4Context) { + args := ctx.ConnectionArgs() + if args.ProxyWriteTimeout > 0 { + _ = c.Conn.SetReadDeadline(time.Now().Add(time.Duration(args.ProxyReadTimeout) * time.Millisecond)) + } else { + switch ctx.Protocol() { + case "udp": + _ = c.Conn.SetReadDeadline(time.Now().Add(DefaultUDPReadTimeout)) + case "tcp": + _ = c.Conn.SetReadDeadline(time.Now().Add(ConnReadTimeout)) + } + } +} + +// Close handle connection close event +func (c *Connection) Close(ccType ConnectionCloseType, eventType ConnectionEvent, ctx context.Layer4Context) (err error) { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) + err = ErrConnectionHasClosed + } + }() + + if ccType == FlushWrite { + _ = c.Write(ctx, iobufferpool.NewIoBufferEOF()) + return nil + } + + if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { + return nil + } + + // connection failed in client mode + if c.Conn == nil || reflect.ValueOf(c.Conn).IsNil() { + return nil + } + + // close tcp conn read first + if tconn, ok := c.Conn.(*net.TCPConn); ok { + logger.Errorf("%s connection close read, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + _ = tconn.CloseRead() + } + + // close conn recv, then notify read/write loop to exit + close(c.internalStopChan) + _ = c.Conn.Close() + + logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) + return nil +} + +func (c *Connection) writeBufLen() (bufLen int) { + for _, buf := range c.writeBuffers { + bufLen += len(buf) + } + return +} + +func (c *Connection) doWrite(ctx context.Layer4Context) (interface{}, error) { + bytesSent, err := c.doWriteIO(ctx) + if err != nil && atomic.LoadUint32(&c.closed) == 1 { + return 0, nil + } + + c.lastWriteSizeWrite = int64(c.writeBufLen()) + return bytesSent, err +} + +// +func (c *Connection) doWriteIO(ctx context.Layer4Context) (bytesSent int64, err error) { + buffers := c.writeBuffers + switch ctx.Protocol() { + case "udp": + addr := ctx.RemoteAddr().(*net.UDPAddr) + n := 0 + bytesSent = 0 + for _, buf := range c.ioBuffers { + if c.Conn.RemoteAddr() == nil { + n, err = c.Conn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) + } else { + n, err = c.Conn.Write(buf.Bytes()) + } + if err != nil { + break + } + bytesSent += int64(n) + } + case "tcp": + bytesSent, err = buffers.WriteTo(c.Conn) + } + + if err != nil { + return bytesSent, err + } + + for i, buf := range c.ioBuffers { + c.ioBuffers[i] = nil + c.writeBuffers[i] = nil + if buf.EOF() { + err = iobufferpool.EOF + } + if e := iobufferpool.PutIoBuffer(buf); e != nil { + logger.Errorf("%s connection give io buffer failed, local addr: %s, remote addr: %s, err: %s", + ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), err.Error()) + } + } + c.ioBuffers = c.ioBuffers[:0] + c.writeBuffers = c.writeBuffers[:0] + return +} + +func (c *Connection) SetNoDelay(enable bool) { + if c.Conn != nil { + if tconn, ok := c.Conn.(*net.TCPConn); ok { + _ = tconn.SetNoDelay(enable) + } + } +} + +func (c *Connection) ReadEnabled() bool { + return c.readEnabled +} + +func (c *Connection) State() ConnState { + if atomic.LoadUint32(&c.closed) == 1 { + return ConnClosed + } + if atomic.LoadUint32(&c.connected) == 1 { + return ConnActive + } + return ConnInit +} diff --git a/pkg/util/connectionwrapper/constant.go b/pkg/util/connectionwrapper/constant.go new file mode 100644 index 0000000000..c6e122f5da --- /dev/null +++ b/pkg/util/connectionwrapper/constant.go @@ -0,0 +1,35 @@ +package connectionwrapper + +import ( + "errors" + "time" +) + +var ( + ErrConnectionHasClosed = errors.New("connection has closed") + ErrWriteTryLockTimeout = errors.New("write trylock has timeout") + ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") +) + +// Default connection arguments +const ( + DefaultBufferReadCapacity = 1 << 7 + + DefaultConnReadTimeout = 15 * time.Second + DefaultConnWriteTimeout = 15 * time.Second + DefaultConnTryTimeout = 60 * time.Second + DefaultIdleTimeout = 90 * time.Second + DefaultUDPIdleTimeout = 5 * time.Second + DefaultUDPReadTimeout = 1 * time.Second + ConnReadTimeout = 15 * time.Second +) + +// ConnState Connection status +type ConnState int + +// Connection statuses +const ( + ConnInit ConnState = iota + ConnActive + ConnClosed +) diff --git a/pkg/util/gracenet/gracenet.go b/pkg/util/gracenet/gracenet.go new file mode 100644 index 0000000000..1de891d3bd --- /dev/null +++ b/pkg/util/gracenet/gracenet.go @@ -0,0 +1,5 @@ +package gracenet + +import "github.com/megaease/easegress/pkg/graceupdate" + +var GNet = graceupdate.Global diff --git a/pkg/util/iobufferpool/buffer.go b/pkg/util/iobufferpool/buffer.go new file mode 100644 index 0000000000..a7e134cc05 --- /dev/null +++ b/pkg/util/iobufferpool/buffer.go @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import "io" + +type IoBuffer interface { + // Read reads the next len(p) bytes from the buffer or until the buffer + // is drained. The return value n is the number of bytes read. If the + // buffer has no data to return, err is io.EOF (unless len(p) is zero); + // otherwise it is nil. + Read(p []byte) (n int, err error) + + // ReadOnce make a one-shot read and appends it to the buffer, growing + // the buffer as needed. The return value n is the number of bytes read. Any + // error except io.EOF encountered during the read is also returned. If the + // buffer becomes too large, ReadFrom will panic with ErrTooLarge. + ReadOnce(r io.Reader) (n int64, err error) + + // ReadFrom reads data from r until EOF and appends it to the buffer, growing + // the buffer as needed. The return value n is the number of bytes read. Any + // error except io.EOF encountered during the read is also returned. If the + // buffer becomes too large, ReadFrom will panic with ErrTooLarge. + ReadFrom(r io.Reader) (n int64, err error) + + // Grow updates the length of the buffer by n, growing the buffer as + // needed. The return value n is the length of p; err is always nil. If the + // buffer becomes too large, Write will panic with ErrTooLarge. + Grow(n int) error + + // Write appends the contents of p to the buffer, growing the buffer as + // needed. The return value n is the length of p; err is always nil. If the + // buffer becomes too large, Write will panic with ErrTooLarge. + Write(p []byte) (n int, err error) + + // WriteString appends the string to the buffer, growing the buffer as + // needed. The return value n is the length of s; err is always nil. If the + // buffer becomes too large, Write will panic with ErrTooLarge. + WriteString(s string) (n int, err error) + + // WriteByte appends the byte to the buffer, growing the buffer as + // needed. The return value n is the length of s; err is always nil. If the + // buffer becomes too large, Write will panic with ErrTooLarge. + WriteByte(p byte) error + + // WriteUint16 appends the uint16 to the buffer, growing the buffer as + // needed. The return value n is the length of s; err is always nil. If the + // buffer becomes too large, Write will panic with ErrTooLarge. + WriteUint16(p uint16) error + + // WriteUint32 appends the uint32 to the buffer, growing the buffer as + // needed. The return value n is the length of s; err is always nil. If the + // buffer becomes too large, Write will panic with ErrTooLarge. + WriteUint32(p uint32) error + + // WriteUint64 appends the uint64 to the buffer, growing the buffer as + // needed. The return value n is the length of s; err is always nil. If the + // buffer becomes too large, Write will panic with ErrTooLarge. + WriteUint64(p uint64) error + + // WriteTo writes data to w until the buffer is drained or an error occurs. + // The return value n is the number of bytes written; it always fits into an + // int, but it is int64 to match the io.WriterTo interface. Any error + // encountered during the write is also returned. + WriteTo(w io.Writer) (n int64, err error) + + // Peek returns n bytes from buffer, without draining any buffered data. + // If n > readable buffer, nil will be returned. + // It can be used in codec to check first-n-bytes magic bytes + // Note: do not change content in return bytes, use write instead + Peek(n int) []byte + + // Bytes returns all bytes from buffer, without draining any buffered data. + // It can be used to get fixed-length content, such as headers, body. + // Note: do not change content in return bytes, use write instead + Bytes() []byte + + // Drain drains a offset length of bytes in buffer. + // It can be used with Bytes(), after consuming a fixed-length of data + Drain(offset int) + + // Len returns the number of bytes of the unread portion of the buffer; + // b.Len() == len(b.Bytes()). + Len() int + + // Cap returns the capacity of the buffer's underlying byte slice, that is, the + // total space allocated for the buffer's data. + Cap() int + + // Reset resets the buffer to be empty, + // but it retains the underlying storage for use by future writes. + Reset() + + // Clone makes a copy of IoBuffer struct + Clone() IoBuffer + + // String returns the contents of the unread portion of the buffer + // as a string. If the Buffer is a nil pointer, it returns "". + String() string + + // Alloc alloc bytes from BytePoolBuffer + Alloc(int) + + // Free free bytes to BytePoolBuffer + Free() + + // Count sets and returns reference count + Count(int32) int32 + + // EOF returns whether Io is EOF on the connection + EOF() bool + + //SetEOF sets the IoBuffer EOF + SetEOF(eof bool) + + Append(data []byte) error + + CloseWithError(err error) +} diff --git a/pkg/util/iobufferpool/bytebuffer_pool.go b/pkg/util/iobufferpool/bytebuffer_pool.go new file mode 100644 index 0000000000..5529a1f14a --- /dev/null +++ b/pkg/util/iobufferpool/bytebuffer_pool.go @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import ( + "sync" +) + +const minShift = 6 +const maxShift = 18 +const errSlot = -1 + +var bbPool *byteBufferPool + +func init() { + bbPool = newByteBufferPool() +} + +// byteBufferPool is []byte pools +type byteBufferPool struct { + minShift int + minSize int + maxSize int + + pool []*bufferSlot +} + +type bufferSlot struct { + defaultSize int + pool sync.Pool +} + +// newByteBufferPool returns byteBufferPool +func newByteBufferPool() *byteBufferPool { + p := &byteBufferPool{ + minShift: minShift, + minSize: 1 << minShift, + maxSize: 1 << maxShift, + } + for i := 0; i <= maxShift-minShift; i++ { + slab := &bufferSlot{ + defaultSize: 1 << (uint)(i+minShift), + } + p.pool = append(p.pool, slab) + } + + return p +} + +func (p *byteBufferPool) slot(size int) int { + if size > p.maxSize { + return errSlot + } + slot := 0 + shift := 0 + if size > p.minSize { + size-- + for size > 0 { + size = size >> 1 + shift++ + } + slot = shift - p.minShift + } + + return slot +} + +func newBytes(size int) []byte { + return make([]byte, size) +} + +// take returns *[]byte from byteBufferPool +func (p *byteBufferPool) take(size int) *[]byte { + slot := p.slot(size) + if slot == errSlot { + b := newBytes(size) + return &b + } + v := p.pool[slot].pool.Get() + if v == nil { + b := newBytes(p.pool[slot].defaultSize) + b = b[0:size] + return &b + } + b := v.(*[]byte) + *b = (*b)[0:size] + return b +} + +// give returns *[]byte to byteBufferPool +func (p *byteBufferPool) give(buf *[]byte) { + if buf == nil { + return + } + size := cap(*buf) + slot := p.slot(size) + if slot == errSlot { + return + } + if size != int(p.pool[slot].defaultSize) { + return + } + p.pool[slot].pool.Put(buf) +} + +type ByteBufferPoolContainer struct { + bytes []*[]byte + *byteBufferPool +} + +func NewByteBufferPoolContainer() *ByteBufferPoolContainer { + return &ByteBufferPoolContainer{ + byteBufferPool: bbPool, + } +} + +func (c *ByteBufferPoolContainer) Reset() { + for _, buf := range c.bytes { + c.give(buf) + } + c.bytes = c.bytes[:0] +} + +func (c *ByteBufferPoolContainer) Take(size int) *[]byte { + buf := c.take(size) + c.bytes = append(c.bytes, buf) + return buf +} + +// GetBytes returns *[]byte from byteBufferPool +func GetBytes(size int) *[]byte { + return bbPool.take(size) +} + +// PutBytes Put *[]byte to byteBufferPool +func PutBytes(buf *[]byte) { + bbPool.give(buf) +} diff --git a/pkg/util/iobufferpool/iobuffer.go b/pkg/util/iobufferpool/iobuffer.go new file mode 100644 index 0000000000..8072f9b339 --- /dev/null +++ b/pkg/util/iobufferpool/iobuffer.go @@ -0,0 +1,591 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import ( + "encoding/binary" + "errors" + "io" + "sync" + "sync/atomic" + "time" +) + +const ( + AutoExpand = -1 + MinRead = 1 << 9 + MaxRead = 1 << 17 + ResetOffMark = -1 + DefaultSize = 1 << 4 + MaxBufferLength = 1 << 20 + MaxThreshold = 1 << 22 +) + +var nullByte []byte + +var ( + EOF = errors.New("EOF") + ErrTooLarge = errors.New("io buffer: too large") + ErrNegativeCount = errors.New("io buffer: negative count") + ErrInvalidWriteCount = errors.New("io buffer: invalid write count") + ConnReadTimeout = 15 * time.Second +) + +type pipe struct { + IoBuffer + mu sync.Mutex + c sync.Cond + + err error +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.IoBuffer == nil { + return 0 + } + return p.IoBuffer.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.IoBuffer != nil && p.IoBuffer.Len() > 0 { + return p.IoBuffer.Read(d) + } + if p.err != nil { + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + return len(d), p.IoBuffer.Append(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { + if err == nil { + err = io.EOF + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + p.err = err + defer p.c.Signal() +} + +func NewPipeBuffer(capacity int) IoBuffer { + return &pipe{ + IoBuffer: newIoBuffer(capacity), + } +} + +// ioBuffer is an implementation of IoBuffer +type ioBuffer struct { + buf []byte // contents: buf[off : len(buf)] + off int // read from &buf[off], write to &buf[len(buf)] + offMark int + count int32 + eof bool + + b *[]byte +} + +func newIoBuffer(capacity int) IoBuffer { + buffer := &ioBuffer{ + offMark: ResetOffMark, + count: 1, + } + if capacity <= 0 { + capacity = DefaultSize + } + buffer.b = GetBytes(capacity) + buffer.buf = (*buffer.b)[:0] + return buffer +} + +func NewIoBufferString(s string) IoBuffer { + if s == "" { + return newIoBuffer(0) + } + return &ioBuffer{ + buf: []byte(s), + offMark: ResetOffMark, + count: 1, + } +} + +func NewIoBufferBytes(bytes []byte) IoBuffer { + if bytes == nil { + return NewIoBuffer(0) + } + return &ioBuffer{ + buf: bytes, + offMark: ResetOffMark, + count: 1, + } +} + +func NewIoBufferEOF() IoBuffer { + buf := newIoBuffer(0) + buf.SetEOF(true) + return buf +} + +func (b *ioBuffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + b.Reset() + + if len(p) == 0 { + return + } + + return 0, io.EOF + } + + n = copy(p, b.buf[b.off:]) + b.off += n + + return +} + +func (b *ioBuffer) Grow(n int) error { + _, ok := b.tryGrowByReslice(n) + + if !ok { + b.grow(n) + } + + return nil +} + +func (b *ioBuffer) ReadOnce(r io.Reader) (n int64, err error) { + var m int + + if b.off > 0 && b.off >= len(b.buf) { + b.Reset() + } + + if b.off >= (cap(b.buf) - len(b.buf)) { + b.copy(0) + } + + // free max buffers avoid memleak + if b.off == len(b.buf) && cap(b.buf) > MaxBufferLength { + b.Free() + b.Alloc(MaxRead) + } + + l := cap(b.buf) - len(b.buf) + + m, err = r.Read(b.buf[len(b.buf):cap(b.buf)]) + + b.buf = b.buf[0 : len(b.buf)+m] + n = int64(m) + + // Not enough space anywhere, we need to allocate. + if l == m { + b.copy(AutoExpand) + } + + return n, err +} + +func (b *ioBuffer) ReadFrom(r io.Reader) (n int64, err error) { + if b.off >= len(b.buf) { + b.Reset() + } + + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + b.copy(MinRead) + } else { + b.copy(0) + } + } + + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + + if e == io.EOF { + break + } + + if m == 0 { + break + } + + if e != nil { + return n, e + } + } + + return +} + +func (b *ioBuffer) Write(p []byte) (n int, err error) { + m, ok := b.tryGrowByReslice(len(p)) + + if !ok { + m = b.grow(len(p)) + } + + return copy(b.buf[m:], p), nil +} + +func (b *ioBuffer) WriteString(s string) (n int, err error) { + m, ok := b.tryGrowByReslice(len(s)) + + if !ok { + m = b.grow(len(s)) + } + + return copy(b.buf[m:], s), nil +} + +func (b *ioBuffer) tryGrowByReslice(n int) (int, bool) { + if l := len(b.buf); l+n <= cap(b.buf) { + b.buf = b.buf[:l+n] + + return l, true + } + + return 0, false +} + +func (b *ioBuffer) grow(n int) int { + m := b.Len() + + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Reset() + } + + // Try to grow by means of a reslice. + if i, ok := b.tryGrowByReslice(n); ok { + return i + } + + if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + b.copy(0) + } else { + // Not enough space anywhere, we need to allocate. + b.copy(n) + } + + // Restore b.off and len(b.buf). + b.off = 0 + b.buf = b.buf[:m+n] + + return m +} + +func (b *ioBuffer) WriteTo(w io.Writer) (n int64, err error) { + for b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + + if m > nBytes { + panic(ErrInvalidWriteCount) + } + + b.off += m + n += int64(m) + + if e != nil { + return n, e + } + + if m == 0 || m == nBytes { + return n, nil + } + } + + return +} + +func (b *ioBuffer) WriteByte(p byte) error { + m, ok := b.tryGrowByReslice(1) + + if !ok { + m = b.grow(1) + } + + b.buf[m] = p + return nil +} + +func (b *ioBuffer) WriteUint16(p uint16) error { + m, ok := b.tryGrowByReslice(2) + + if !ok { + m = b.grow(2) + } + + binary.BigEndian.PutUint16(b.buf[m:], p) + return nil +} + +func (b *ioBuffer) WriteUint32(p uint32) error { + m, ok := b.tryGrowByReslice(4) + + if !ok { + m = b.grow(4) + } + + binary.BigEndian.PutUint32(b.buf[m:], p) + return nil +} + +func (b *ioBuffer) WriteUint64(p uint64) error { + m, ok := b.tryGrowByReslice(8) + + if !ok { + m = b.grow(8) + } + + binary.BigEndian.PutUint64(b.buf[m:], p) + return nil +} + +func (b *ioBuffer) Append(data []byte) error { + if b.off >= len(b.buf) { + b.Reset() + } + + dataLen := len(data) + + if free := cap(b.buf) - len(b.buf); free < dataLen { + // not enough space at end + if b.off+free < dataLen { + // not enough space using beginning of buffer; + // double buffer capacity + b.copy(dataLen) + } else { + b.copy(0) + } + } + + m := copy(b.buf[len(b.buf):len(b.buf)+dataLen], data) + b.buf = b.buf[0 : len(b.buf)+m] + + return nil +} + +func (b *ioBuffer) AppendByte(data byte) error { + return b.Append([]byte{data}) +} + +func (b *ioBuffer) Peek(n int) []byte { + if len(b.buf)-b.off < n { + return nil + } + + return b.buf[b.off : b.off+n] +} + +func (b *ioBuffer) Mark() { + b.offMark = b.off +} + +func (b *ioBuffer) Restore() { + if b.offMark != ResetOffMark { + b.off = b.offMark + b.offMark = ResetOffMark + } +} + +func (b *ioBuffer) Bytes() []byte { + return b.buf[b.off:] +} + +func (b *ioBuffer) Cut(offset int) IoBuffer { + if b.off+offset > len(b.buf) { + return nil + } + + buf := make([]byte, offset) + + copy(buf, b.buf[b.off:b.off+offset]) + b.off += offset + b.offMark = ResetOffMark + + return &ioBuffer{ + buf: buf, + off: 0, + } +} + +func (b *ioBuffer) Drain(offset int) { + if b.off+offset > len(b.buf) { + return + } + + b.off += offset + b.offMark = ResetOffMark +} + +func (b *ioBuffer) String() string { + return string(b.buf[b.off:]) +} + +func (b *ioBuffer) Len() int { + return len(b.buf) - b.off +} + +func (b *ioBuffer) Cap() int { + return cap(b.buf) +} + +func (b *ioBuffer) Reset() { + b.buf = b.buf[:0] + b.off = 0 + b.offMark = ResetOffMark + b.eof = false +} + +func (b *ioBuffer) available() int { + return len(b.buf) - b.off +} + +func (b *ioBuffer) Clone() IoBuffer { + buf := GetIoBuffer(b.Len()) + buf.Write(b.Bytes()) + + buf.SetEOF(b.EOF()) + + return buf +} + +func (b *ioBuffer) Free() { + b.Reset() + b.giveSlice() +} + +func (b *ioBuffer) Alloc(size int) { + if b.buf != nil { + b.Free() + } + if size <= 0 { + size = DefaultSize + } + b.b = b.makeSlice(size) + b.buf = *b.b + b.buf = b.buf[:0] +} + +func (b *ioBuffer) Count(count int32) int32 { + return atomic.AddInt32(&b.count, count) +} + +func (b *ioBuffer) EOF() bool { + return b.eof +} + +func (b *ioBuffer) SetEOF(eof bool) { + b.eof = eof +} + +//The expand parameter means the following: +//A, if expand > 0, cap(newbuf) is calculated according to cap(oldbuf) and expand. +//B, if expand == AutoExpand, cap(newbuf) is calculated only according to cap(oldbuf). +//C, if expand == 0, only copy, buf not be expanded. +func (b *ioBuffer) copy(expand int) { + var newBuf []byte + var bufp *[]byte + + if expand > 0 || expand == AutoExpand { + cap := cap(b.buf) + // when buf cap greater than MaxThreshold, start Slow Grow. + if cap < 2*MinRead { + cap = 2 * MinRead + } else if cap < MaxThreshold { + cap = 2 * cap + } else { + cap = cap + cap/4 + } + + if expand == AutoExpand { + expand = 0 + } + + bufp = b.makeSlice(cap + expand) + newBuf = *bufp + copy(newBuf, b.buf[b.off:]) + PutBytes(b.b) + b.b = bufp + } else { + newBuf = b.buf + copy(newBuf, b.buf[b.off:]) + } + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 +} + +func (b *ioBuffer) makeSlice(n int) *[]byte { + return GetBytes(n) +} + +func (b *ioBuffer) giveSlice() { + if b.b != nil { + PutBytes(b.b) + b.b = nil + b.buf = nullByte + } +} + +func (b *ioBuffer) CloseWithError(err error) { +} diff --git a/pkg/util/iobufferpool/iobuffer_pool.go b/pkg/util/iobufferpool/iobuffer_pool.go new file mode 100644 index 0000000000..2ff77cf5ef --- /dev/null +++ b/pkg/util/iobufferpool/iobuffer_pool.go @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import ( + "errors" + "sync" +) + +const UdpPacketMaxSize = 64 * 1024 + +var ibPool IoBufferPool + +// IoBufferPool is Iobuffer Pool +type IoBufferPool struct { + pool sync.Pool +} + +// take returns IoBuffer from IoBufferPool +func (p *IoBufferPool) take(size int) (buf IoBuffer) { + v := p.pool.Get() + if v == nil { + buf = newIoBuffer(size) + } else { + buf = v.(IoBuffer) + buf.Alloc(size) + buf.Count(1) + } + return +} + +// give returns IoBuffer to IoBufferPool +func (p *IoBufferPool) give(buf IoBuffer) { + buf.Free() + p.pool.Put(buf) +} + +// GetIoBuffer returns IoBuffer from pool +func GetIoBuffer(size int) IoBuffer { + return ibPool.take(size) +} + +// NewIoBuffer is an alias for GetIoBuffer +func NewIoBuffer(size int) IoBuffer { + return GetIoBuffer(size) +} + +// PutIoBuffer returns IoBuffer to pool +func PutIoBuffer(buf IoBuffer) error { + count := buf.Count(-1) + if count > 0 { + return nil + } else if count < 0 { + return errors.New("PutIoBuffer duplicate") + } + if p, _ := buf.(*pipe); p != nil { + buf = p.IoBuffer + } + ibPool.give(buf) + return nil +} diff --git a/pkg/util/iobufferpool/iobufferpool.go b/pkg/util/iobufferpool/iobufferpool.go deleted file mode 100644 index bd7d3d2602..0000000000 --- a/pkg/util/iobufferpool/iobufferpool.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iobufferpool - -import ( - "bytes" - "sync" -) - -var ioBufferPool IOBufferPool - -// IOBufferPool io buffer pool, especially use for udp packet -type IOBufferPool struct { - pool sync.Pool -} - -func (p *IOBufferPool) take() (buf *bytes.Buffer) { - v := p.pool.Get() - if v == nil { - buf = bytes.NewBuffer(nil) - } else { - buf = v.(*bytes.Buffer) - } - return -} - -func (p *IOBufferPool) give(buf *bytes.Buffer) { - buf.Truncate(0) - p.pool.Put(buf) -} - -// GetIoBuffer returns IoBuffer from pool -func GetIoBuffer() *bytes.Buffer { - return ioBufferPool.take() -} - -func NewIOBuffer() *bytes.Buffer { - return GetIoBuffer() -} - -// PutIoBuffer returns IoBuffer to pool -func PutIoBuffer(buf *bytes.Buffer) error { - ioBufferPool.give(buf) - return nil -} diff --git a/pkg/util/limitlistener/limitlistener.go b/pkg/util/limitlistener/limitlistener.go index b91aad1c6e..ee3af26763 100644 --- a/pkg/util/limitlistener/limitlistener.go +++ b/pkg/util/limitlistener/limitlistener.go @@ -47,8 +47,8 @@ type LimitListener struct { closeOnce sync.Once // ensures the done chan is only closed once } -// acquire acquires the limiting semaphore. Returns true if successfully -// accquired, false if the listener is closed and the semaphore is not +// acquire the limiting semaphore. Returns true if successfully +// acquired, false if the listener is closed and the semaphore is not // acquired. func (l *LimitListener) acquire() bool { return l.sem.AcquireWithContext(l.ctx) == nil diff --git a/pkg/util/timerpool/timerpool.go b/pkg/util/timerpool/timerpool.go index d2e07f5554..a444c41c6a 100644 --- a/pkg/util/timerpool/timerpool.go +++ b/pkg/util/timerpool/timerpool.go @@ -30,8 +30,8 @@ type timerPool struct { } // Get returns a timer that completes after the given duration. -func (tp *timerPool) Get(d time.Duration) *time.Timer { - if t, _ := tp.p.Get().(*time.Timer); t != nil { +func Get(d time.Duration) *time.Timer { + if t, _ := globalTimerPool.p.Get().(*time.Timer); t != nil { t.Reset(d) return t } @@ -46,7 +46,7 @@ func (tp *timerPool) Get(d time.Duration) *time.Timer { // Put will try to stop the timer before pooling. If the // given timer already expired, Put will read the unreceived // value if there is one. -func (tp *timerPool) Put(t *time.Timer) { +func Put(t *time.Timer) { if !t.Stop() { select { case <-t.C: @@ -54,5 +54,5 @@ func (tp *timerPool) Put(t *time.Timer) { } } - tp.p.Put(t) + globalTimerPool.p.Put(t) } From 391be9956e6f39a3b31f8cb1f4211e9b1d0c7ad0 Mon Sep 17 00:00:00 2001 From: "jinxiaodong@cmii.chinamobile.com" <1990ziyou> Date: Fri, 10 Sep 2021 00:08:29 +0800 Subject: [PATCH 09/99] [tcpproxy] create upstream conn --- pkg/context/layer4context.go | 3 + pkg/filter/layer4proxy/pool.go | 176 +++++++++++++ pkg/filter/layer4proxy/proxy.go | 236 ++++++++++++++++++ .../upstream => filter/layer4proxy}/server.go | 43 ++-- pkg/object/layer4rawserver/upstream/pool.go | 124 --------- pkg/object/layer4rawserver/upstream/proxy.go | 35 --- pkg/util/connectionwrapper/connection.go | 4 + pkg/util/layer4filter/layer4filter.go | 86 +++++++ 8 files changed, 522 insertions(+), 185 deletions(-) create mode 100644 pkg/filter/layer4proxy/pool.go create mode 100644 pkg/filter/layer4proxy/proxy.go rename pkg/{object/layer4rawserver/upstream => filter/layer4proxy}/server.go (86%) delete mode 100644 pkg/object/layer4rawserver/upstream/pool.go delete mode 100644 pkg/object/layer4rawserver/upstream/proxy.go create mode 100644 pkg/util/layer4filter/layer4filter.go diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 505c938e02..8c93cddc90 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -20,6 +20,7 @@ package context import ( "bytes" stdcontext "context" + "github.com/megaease/easegress/pkg/filter/layer4proxy" "github.com/megaease/easegress/pkg/object/layer4rawserver" "github.com/megaease/easegress/pkg/util/connectionwrapper" "net" @@ -51,6 +52,8 @@ type ( ClientDisconnected() bool ClientConn() *connectionwrapper.Connection + UpStreamConn() *layer4proxy.UpStreamConn + SetUpStreamConn(conn *layer4proxy.UpStreamConn) Duration() time.Duration // For log, sample, etc. OnFinish(func()) // For setting final client statistics, etc. diff --git a/pkg/filter/layer4proxy/pool.go b/pkg/filter/layer4proxy/pool.go new file mode 100644 index 0000000000..36f8911735 --- /dev/null +++ b/pkg/filter/layer4proxy/pool.go @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4proxy + +import ( + "fmt" + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/layer4filter" + "github.com/megaease/easegress/pkg/util/layer4stat" + "net" +) + +type ( + pool struct { + spec *PoolSpec + + tagPrefix string + filter *layer4filter.Layer4filter + + servers *servers + layer4Stat *layer4stat.Layer4Stat + } + + // PoolSpec describes a pool of servers. + PoolSpec struct { + SpanName string `yaml:"spanName" jsonschema:"omitempty"` + ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` + Filter *layer4filter.Spec `yaml:"filter" jsonschema:"omitempty"` + Servers []*Server `yaml:"servers" jsonschema:"omitempty"` + ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` + ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` + LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` + } + + // PoolStatus is the status of Pool. + PoolStatus struct { + Stat *layer4stat.Status `yaml:"stat"` + } + + UpStreamConn struct { + conn net.Conn + done chan struct{} + writeBufferChan chan iobufferpool.IoBuffer + } +) + +func NewUpStreamConn(conn net.Conn) *UpStreamConn { + return &UpStreamConn{ + conn: conn, + writeBufferChan: make(chan iobufferpool.IoBuffer, 8), + } +} + +// Validate validates poolSpec. +func (s PoolSpec) Validate() error { + if s.ServiceName == "" && len(s.Servers) == 0 { + return fmt.Errorf("both serviceName and servers are empty") + } + + serversGotWeight := 0 + for _, server := range s.Servers { + if server.Weight > 0 { + serversGotWeight++ + } + } + if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { + return fmt.Errorf("not all servers have weight(%d/%d)", + serversGotWeight, len(s.Servers)) + } + + if s.ServiceName == "" { + servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) + if servers.len() == 0 { + return fmt.Errorf("serversTags picks none of servers") + } + } + + return nil +} + +func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { + + return &pool{ + spec: spec, + tagPrefix: tagPrefix, + + servers: newServers(super, spec), + layer4Stat: layer4stat.New(), + } +} + +func (p *pool) status() *PoolStatus { + s := &PoolStatus{Stat: p.layer4Stat.Status()} + return s +} + +func (u *UpStreamConn) Write(source iobufferpool.IoBuffer) { + buf := source.Clone() + source.Drain(buf.Len()) + u.writeBufferChan <- buf +} + +func (u *UpStreamConn) WriteLoop() { + for { + select { + case buf, ok := <-u.writeBufferChan: + if !ok { + return + } + + iobuf := buf.(iobufferpool.IoBuffer) + for { + n, err := u.conn.Write(iobuf.Bytes()) + if n == 0 || err != nil { + return + } + iobuf.Drain(n) + } + case <-u.done: + return + } + } +} + +func (p *pool) handle(ctx context.Layer4Context) string { + + conn := ctx.UpStreamConn() + if conn == nil { + server, err := p.servers.next(ctx) + if err != nil { + return resultInternalError + } + + switch ctx.Protocol() { + case "tcp": + if tconn, dialErr := net.Dial("tcp", server.Addr); dialErr != nil { + logger.Errorf("dial tcp to %s failed, err: %s", server.Addr, dialErr.Error()) + return resultServerError + } else { + upstreamConn := NewUpStreamConn(tconn) + ctx.SetUpStreamConn(upstreamConn) + go upstreamConn.WriteLoop() + + go func() { + // TODO do upstream connection read + }() + } + case "udp": + + } + } + + return "" +} + +func (p *pool) close() { + p.servers.close() +} diff --git a/pkg/filter/layer4proxy/proxy.go b/pkg/filter/layer4proxy/proxy.go new file mode 100644 index 0000000000..0bf5b4db62 --- /dev/null +++ b/pkg/filter/layer4proxy/proxy.go @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4proxy + +import ( + "fmt" + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/object/layer4pipeline" + "github.com/megaease/easegress/pkg/util/fallback" +) + +const ( + // Kind is the kind of Proxy. + Kind = "Proxy" + + resultInternalError = "internalError" + resultClientError = "clientError" + resultServerError = "serverError" +) + +var results = []string{ + resultInternalError, + resultClientError, + resultServerError, +} + +func init() { + layer4pipeline.Register(&Proxy{}) +} + +type ( + // Proxy is the filter Proxy. + Proxy struct { + filterSpec *layer4pipeline.FilterSpec + spec *Spec + + fallback *fallback.Fallback + + mainPool *pool + candidatePools []*pool + mirrorPool *pool + } + + // Spec describes the Proxy. + Spec struct { + Fallback *FallbackSpec `yaml:"fallback,omitempty" jsonschema:"omitempty"` + MainPool *PoolSpec `yaml:"mainPool" jsonschema:"required"` + CandidatePools []*PoolSpec `yaml:"candidatePools,omitempty" jsonschema:"omitempty"` + MirrorPool *PoolSpec `yaml:"mirrorPool,omitempty" jsonschema:"omitempty"` + FailureCodes []int `yaml:"failureCodes" jsonschema:"omitempty,uniqueItems=true,format=httpcode-array"` + } + + // FallbackSpec describes the fallback policy. + FallbackSpec struct { + ForCodes bool `yaml:"forCodes"` + fallback.Spec `yaml:",inline"` + } + + // Status is the status of Proxy. + Status struct { + MainPool *PoolStatus `yaml:"mainPool"` + CandidatePools []*PoolStatus `yaml:"candidatePools,omitempty"` + MirrorPool *PoolStatus `yaml:"mirrorPool,omitempty"` + } +) + +// Validate validates Spec. +func (s Spec) Validate() error { + // NOTE: The tag of v parent may be behind mainPool. + if s.MainPool == nil { + return fmt.Errorf("mainPool is required") + } + + if s.MainPool.Filter != nil { + return fmt.Errorf("filter must be empty in mainPool") + } + + if len(s.CandidatePools) > 0 { + for _, v := range s.CandidatePools { + if v.Filter == nil { + return fmt.Errorf("filter of candidatePool is required") + } + } + } + + if s.MirrorPool != nil { + if s.MirrorPool.Filter == nil { + return fmt.Errorf("filter of mirrorPool is required") + } + } + + if len(s.FailureCodes) == 0 { + if s.Fallback != nil { + return fmt.Errorf("fallback needs failureCodes") + } + } + + return nil +} + +// Kind returns the kind of Proxy. +func (b *Proxy) Kind() string { + return Kind +} + +// DefaultSpec returns the default spec of Proxy. +func (b *Proxy) DefaultSpec() interface{} { + return &Spec{} +} + +// Description returns the description of Proxy. +func (b *Proxy) Description() string { + return "Proxy sets the proxy of proxy servers" +} + +// Results returns the results of Proxy. +func (b *Proxy) Results() []string { + return results +} + +// Init initializes Proxy. +func (b *Proxy) Init(filterSpec *layer4pipeline.FilterSpec) { + b.filterSpec, b.spec = filterSpec, filterSpec.FilterSpec().(*Spec) + b.reload() +} + +// Inherit inherits previous generation of Proxy. +func (b *Proxy) Inherit(filterSpec *layer4pipeline.FilterSpec, previousGeneration layer4pipeline.Filter) { + previousGeneration.Close() + b.Init(filterSpec) +} + +func (b *Proxy) reload() { + super := b.filterSpec.Super() + + b.mainPool = newPool(super, b.spec.MainPool, "proxy#main") + + if b.spec.Fallback != nil { + b.fallback = fallback.New(&b.spec.Fallback.Spec) + } + + if len(b.spec.CandidatePools) > 0 { + var candidatePools []*pool + for k := range b.spec.CandidatePools { + candidatePools = append(candidatePools, + newPool(super, b.spec.CandidatePools[k], fmt.Sprintf("proxy#candidate#%d", k))) + } + b.candidatePools = candidatePools + } + if b.spec.MirrorPool != nil { + b.mirrorPool = newPool(super, b.spec.MirrorPool, "proxy#mirror") + } +} + +// Status returns Proxy status. +func (b *Proxy) Status() interface{} { + s := &Status{ + MainPool: b.mainPool.status(), + } + if b.candidatePools != nil { + for k := range b.candidatePools { + s.CandidatePools = append(s.CandidatePools, b.candidatePools[k].status()) + } + } + if b.mirrorPool != nil { + s.MirrorPool = b.mirrorPool.status() + } + return s +} + +// Close closes Proxy. +func (b *Proxy) Close() { + b.mainPool.close() + + if b.candidatePools != nil { + for _, v := range b.candidatePools { + v.close() + } + } + + if b.mirrorPool != nil { + b.mirrorPool.close() + } +} + +func (b *Proxy) fallbackForCodes(ctx context.HTTPContext) bool { + if b.fallback != nil && b.spec.Fallback.ForCodes { + for _, code := range b.spec.FailureCodes { + if ctx.Response().StatusCode() == code { + b.fallback.Fallback(ctx) + return true + } + } + } + return false +} + +// Handle handles HTTPContext. +func (b *Proxy) Handle(ctx context.Layer4Context) (result string) { + result = b.handle(ctx) + return ctx.CallNextHandler(result) +} + +func (b *Proxy) handle(ctx context.Layer4Context) (result string) { + // TODO add mirror pool proxy + + var p *pool + if len(b.candidatePools) > 0 { + for k, v := range b.candidatePools { + if v.filter.Filter(ctx) { + p = b.candidatePools[k] + break + } + } + } + + if p == nil { + p = b.mainPool + } + return p.handle(ctx) +} diff --git a/pkg/object/layer4rawserver/upstream/server.go b/pkg/filter/layer4proxy/server.go similarity index 86% rename from pkg/object/layer4rawserver/upstream/server.go rename to pkg/filter/layer4proxy/server.go index 14a8f63b40..2cf96bc27b 100644 --- a/pkg/object/layer4rawserver/upstream/server.go +++ b/pkg/filter/layer4proxy/server.go @@ -15,13 +15,12 @@ * limitations under the License. */ -package upstream +package layer4proxy import ( "fmt" "math/rand" "net" - "strconv" "sync" "sync/atomic" "time" @@ -68,33 +67,25 @@ type ( // Server is proxy server. Server struct { - HostPort string `yaml:"HostPort" jsonschema:"required,format=hostport"` - Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` - Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` + Addr string `yaml:"url" jsonschema:"required,format=hostport"` + Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` + Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` } // LoadBalance is load balance for multiple servers. LoadBalance struct { - Policy string `yaml:"policy" jsonschema:"required,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash"` + Policy string `yaml:"policy" jsonschema:"required,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash"` + HeaderHashKey string `yaml:"headerHashKey" jsonschema:"omitempty"` } ) -func (s *servers) Validated() error { - if s.poolSpec.Protocol == "tcp" { - for _, server := range s.static.servers { - if _, err := net.ResolveTCPAddr("tcp", server.HostPort); err != nil { - logger.Errorf("resolve tcp addr failed, host port: %v, %v", server.HostPort, err) - return err - } - } - } - - // TODO check udp address - return nil +func (s *Server) String() string { + return fmt.Sprintf("%s,%v,%d", s.Addr, s.Tags, s.Weight) } -func (s *Server) String() string { - return fmt.Sprintf("%s,%v,%d", s.HostPort, s.Tags, s.Weight) +// Validate validates LoadBalance. +func (lb LoadBalance) Validate() error { + return nil } func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { @@ -153,9 +144,9 @@ func (s *servers) useService(serviceInstanceSpecs map[string]*serviceregistry.Se var servers []*Server for _, instance := range serviceInstanceSpecs { servers = append(servers, &Server{ - HostPort: instance.Address + ":" + strconv.Itoa(int(instance.Port)), - Tags: instance.Tags, - Weight: instance.Weight, + Addr: instance.URL(), + Tags: instance.Tags, + Weight: instance.Weight, }) } if len(servers) == 0 { @@ -272,6 +263,7 @@ func (ss *staticServers) next(ctx context.Layer4Context) *Server { } logger.Errorf("BUG: unknown load balance policy: %s", ss.lb.Policy) + return ss.roundRobin() } @@ -302,9 +294,8 @@ func (ss *staticServers) weightedRandom() *Server { } func (ss *staticServers) ipHash(ctx context.Layer4Context) *Server { - remoteAddr := ctx.RemoteAddr().String() - host, _, _ := net.SplitHostPort(remoteAddr) - + addr := ctx.RemoteAddr().String() + host, _, _ := net.SplitHostPort(addr) sum32 := int(hashtool.Hash32(host)) return ss.servers[sum32%len(ss.servers)] } diff --git a/pkg/object/layer4rawserver/upstream/pool.go b/pkg/object/layer4rawserver/upstream/pool.go deleted file mode 100644 index 17462175ca..0000000000 --- a/pkg/object/layer4rawserver/upstream/pool.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package upstream - -import ( - "fmt" - "net" - "time" - - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/layer4stat" - "github.com/megaease/easegress/pkg/util/memorycache" -) - -type ( - protocol string - - pool struct { - spec *PoolSpec - - tagPrefix string - - servers *servers - layer4stat *layer4stat.Layer4Stat - } - - // PoolSpec describes a pool of servers. - PoolSpec struct { - Protocol protocol `yaml:"protocol" jsonschema:"required" ` - SpanName string `yaml:"spanName" jsonschema:"omitempty"` - ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` - Servers []*Server `yaml:"servers" jsonschema:"omitempty"` - ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` - LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` - MemoryCache *memorycache.Spec `yaml:"memoryCache,omitempty" jsonschema:"omitempty"` - } - - // PoolStatus is the status of Pool. - PoolStatus struct { - Stat *layer4stat.Status `yaml:"stat"` - } -) - -// Validate validates poolSpec. -func (s PoolSpec) Validate() error { - if s.ServiceName == "" && len(s.Servers) == 0 { - return fmt.Errorf("both serviceName and servers are empty") - } - - serversGotWeight := 0 - for _, server := range s.Servers { - if server.Weight > 0 { - serversGotWeight++ - } - } - if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { - return fmt.Errorf("not all servers have weight(%d/%d)", - serversGotWeight, len(s.Servers)) - } - - if s.ServiceName == "" { - servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) - if servers.len() == 0 { - return fmt.Errorf("serversTags picks none of servers") - } - } - - return nil -} - -func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { - - return &pool{ - spec: spec, - tagPrefix: tagPrefix, - servers: newServers(super, spec), - layer4stat: layer4stat.New(), - } -} - -func (p *pool) status() *PoolStatus { - s := &PoolStatus{Stat: p.layer4stat.Status()} - return s -} - -func (p *pool) handle(ctx context.Layer4Context) string { - - server, err := p.servers.next(ctx) - if err != nil { - return resultInternalError - } - - upstreamConn, err := net.DialTimeout("tcp", server.HostPort, 1000*time.Millisecond) - if err != nil { - logger.Errorf("dial tcp for addr: % failed, err: %v", server.HostPort, err) - } - _ = upstreamConn.Close() - - // TODO do layer4 proxy - - return "" -} - -func (p *pool) close() { - p.servers.close() -} diff --git a/pkg/object/layer4rawserver/upstream/proxy.go b/pkg/object/layer4rawserver/upstream/proxy.go deleted file mode 100644 index 594f7a066d..0000000000 --- a/pkg/object/layer4rawserver/upstream/proxy.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package upstream - -const ( - // Kind is the kind of Proxy. - Kind = "Proxy" - - resultFallback = "fallback" - resultInternalError = "internalError" - resultClientError = "clientError" - resultServerError = "serverError" -) - -var results = []string{ - resultFallback, - resultInternalError, - resultClientError, - resultServerError, -} diff --git a/pkg/util/connectionwrapper/connection.go b/pkg/util/connectionwrapper/connection.go index a6a78ee114..abc802ec1a 100644 --- a/pkg/util/connectionwrapper/connection.go +++ b/pkg/util/connectionwrapper/connection.go @@ -464,3 +464,7 @@ func (c *Connection) State() ConnState { } return ConnInit } + +func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { + return c.readBuffer +} diff --git a/pkg/util/layer4filter/layer4filter.go b/pkg/util/layer4filter/layer4filter.go new file mode 100644 index 0000000000..93a701148b --- /dev/null +++ b/pkg/util/layer4filter/layer4filter.go @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4filter + +import ( + "math/rand" + "net" + "time" + + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/util/hashtool" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +const ( + policyIPHash string = "ipHash" + policyRandom = "random" +) + +type ( + // Spec describes Layer4filter. + Spec struct { + Probability *Probability `yaml:"probability,omitempty" jsonschema:"omitempty"` + } + + // Layer4filter filters layer4 traffic. + Layer4filter struct { + spec *Spec + } + + // Probability filters layer4 traffic by probability. + Probability struct { + PerMill uint32 `yaml:"perMill" jsonschema:"required,minimum=1,maximum=1000"` + Policy string `yaml:"policy" jsonschema:"required,enum=ipHash,enum=headerHash,enum=random"` + } +) + +// New creates an HTTPFilter. +func New(spec *Spec) *Layer4filter { + hf := &Layer4filter{ + spec: spec, + } + return hf +} + +// Filter filters Layer4Context. +func (hf *Layer4filter) Filter(ctx context.Layer4Context) bool { + return hf.filterProbability(ctx) +} + +func (hf *Layer4filter) filterProbability(ctx context.Layer4Context) bool { + prob := hf.spec.Probability + + var result uint32 + switch prob.Policy { + case policyRandom: + result = uint32(rand.Int31n(1000)) + case policyIPHash: + default: + host, _, _ := net.SplitHostPort(ctx.RemoteAddr().String()) + result = hashtool.Hash32(host) + } + + if result%1000 < prob.PerMill { + return true + } + return false +} From 7431d908a94c68b70bc2fc0e3570ff2c51bd7a25 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 10 Sep 2021 11:50:19 +0800 Subject: [PATCH 10/99] [layer4proxy] resolve cycle dependence --- pkg/context/layer4context.go | 21 ++++++++------------- pkg/filter/layer4proxy/pool.go | 2 +- pkg/util/connection/connection.go | 18 ++++++++++++++++++ 3 files changed, 27 insertions(+), 14 deletions(-) create mode 100644 pkg/util/connection/connection.go diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 8c93cddc90..6a720f82ea 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -20,9 +20,6 @@ package context import ( "bytes" stdcontext "context" - "github.com/megaease/easegress/pkg/filter/layer4proxy" - "github.com/megaease/easegress/pkg/object/layer4rawserver" - "github.com/megaease/easegress/pkg/util/connectionwrapper" "net" "sync" "time" @@ -51,9 +48,9 @@ type ( Cancelled() bool ClientDisconnected() bool - ClientConn() *connectionwrapper.Connection - UpStreamConn() *layer4proxy.UpStreamConn - SetUpStreamConn(conn *layer4proxy.UpStreamConn) + ClientConn() net.Conn + UpStreamConn() net.Conn + SetUpStreamConn(conn net.Conn) Duration() time.Duration // For log, sample, etc. OnFinish(func()) // For setting final client statistics, etc. @@ -83,7 +80,7 @@ type ( protocol string localAddr net.Addr remoteAddr net.Addr - clientConn *connectionwrapper.Connection + clientConn net.Conn connectionArgs *ConnectionArgs @@ -101,16 +98,14 @@ type ( ) // NewLayer4Context creates an Layer4Context. -func NewLayer4Context(protocol string, conn *connectionwrapper.Connection, mux *layer4rawserver.Mux) *layer4Context { - - // TODO add mux for mux mapper +func NewLayer4Context(protocol string, conn net.Conn) *layer4Context { startTime := time.Now() res := layer4Context{ protocol: protocol, clientConn: conn, - localAddr: conn.Conn.LocalAddr(), - remoteAddr: conn.Conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + remoteAddr: conn.RemoteAddr(), startTime: &startTime, stopChan: make(chan struct{}), @@ -182,7 +177,7 @@ func (ctx *layer4Context) ClientDisconnected() bool { panic("implement me") } -func (ctx *layer4Context) ClientConn() *connectionwrapper.Connection { +func (ctx *layer4Context) ClientConn() net.Conn { return ctx.clientConn } diff --git a/pkg/filter/layer4proxy/pool.go b/pkg/filter/layer4proxy/pool.go index 36f8911735..d7a75be959 100644 --- a/pkg/filter/layer4proxy/pool.go +++ b/pkg/filter/layer4proxy/pool.go @@ -155,8 +155,8 @@ func (p *pool) handle(ctx context.Layer4Context) string { logger.Errorf("dial tcp to %s failed, err: %s", server.Addr, dialErr.Error()) return resultServerError } else { + ctx.SetUpStreamConn(tconn) upstreamConn := NewUpStreamConn(tconn) - ctx.SetUpStreamConn(upstreamConn) go upstreamConn.WriteLoop() go func() { diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go new file mode 100644 index 0000000000..e1aada762a --- /dev/null +++ b/pkg/util/connection/connection.go @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package connection From 0d33c87ce51757e9bbd36ac65aaf7c06afaa026b Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 10 Sep 2021 16:34:02 +0800 Subject: [PATCH 11/99] [layer4proxy] extract connection(30%) --- pkg/util/connection/connection.go | 251 ++++++++++++++++++++++++++++++ pkg/util/connection/constant.go | 67 ++++++++ 2 files changed, 318 insertions(+) create mode 100644 pkg/util/connection/constant.go diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index e1aada762a..1f0ef44cc9 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -16,3 +16,254 @@ */ package connection + +import ( + "github.com/casbin/casbin/v2/log" + "io" + "net" + "reflect" + "sync" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/iobufferpool" +) + +type Connection struct { + conn net.Conn + closed uint32 + protocol string + localAddr net.Addr + remoteAddr net.Addr + + readEnabled bool + readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters + + lastBytesSizeRead int64 + lastWriteSizeWrite int64 + readBuffer iobufferpool.IoBuffer + writeBuffers net.Buffers + ioBuffers []iobufferpool.IoBuffer + writeBufferChan chan *[]iobufferpool.IoBuffer + + mu sync.Mutex + startOnce sync.Once + stopChan chan struct{} + + onRead func(buffer iobufferpool.IoBuffer) + onWrite func(src iobufferpool.IoBuffer) iobufferpool.IoBuffer +} + +func New(conn net.Conn, stopChan chan struct{}, remoteAddr net.Addr) *Connection { + res := &Connection{ + conn: conn, + protocol: conn.LocalAddr().Network(), + localAddr: conn.LocalAddr(), + + mu: sync.Mutex{}, + stopChan: stopChan, + readEnabledChan: make(chan bool, 1), + } + + if remoteAddr != nil { + res.remoteAddr = remoteAddr + } else { + res.remoteAddr = conn.RemoteAddr() + } + return res +} + +func (c *Connection) Start() { + if c.protocol == "udp" && c.conn.RemoteAddr() == nil { + return + } + + c.startOnce.Do(func() { + c.startRWLoop() + }) +} + +func (c *Connection) startRWLoop() { + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + } + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection close due to read loop crashed failed, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose) + }() + }() + c.startReadLoop() + }() + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + } + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection close due to write loop crashed failed, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose) + }() + }() + c.startWriteLoop() + }() +} + +func (c *Connection) startReadLoop() { + for { + select { + case <-c.stopChan: + return + case <-c.readEnabledChan: + default: + if c.readEnabled { + err := c.doReadIO() + if err != nil { + if te, ok := err.(net.Error); ok && te.Timeout() { + if c.protocol == "tcp" && c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > DefaultBufferReadCapacity { + c.readBuffer.Free() + c.readBuffer.Alloc(DefaultBufferReadCapacity) + } + continue + } + + // normal close or health check, modify log level + if c.lastBytesSizeRead == 0 || err == io.EOF { + logger.Debugf("%s connection error on read, local addr: %s, remote addr: %s, err: %s", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), err.Error()) + } else { + logger.Errorf("%s connection error on read, local addr: %s, remote addr: %s, err: %s", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), err.Error()) + } + + if err == io.EOF { + _ = c.Close(NoFlush, RemoteClose) + } else { + _ = c.Close(NoFlush, OnReadErrClose) + } + return + } + } else { + select { + case <-c.readEnabledChan: + case <-time.After(100 * time.Millisecond): + } + } + } + } +} + +func (c *Connection) setReadDeadline() { + switch c.protocol { + case "udp": + _ = c.conn.SetReadDeadline(time.Now().Add(1 * time.Second)) + case "tcp": + _ = c.conn.SetReadDeadline(time.Now().Add(15 * time.Second)) + } +} + +func (c *Connection) startWriteLoop() { + +} + +func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + err = ErrConnectionHasClosed + } + }() + + if ccType == FlushWrite { + _ = c.Write(iobufferpool.NewIoBufferEOF()) + return nil + } + + if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { + return nil + } + + // connection failed in client mode + if c.conn == nil || reflect.ValueOf(c.conn).IsNil() { + return nil + } + + // close tcp conn read first + if tconn, ok := c.conn.(*net.TCPConn); ok { + logger.Errorf("%s connection close read, local addr: %s, remote addr: %s", + c.protocol, c.localAddr.String(), c.remoteAddr.String()) + _ = tconn.CloseRead() + } + + // close conn recv, then notify read/write loop to exit + close(c.stopChan) + _ = c.conn.Close() + + logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", + c.protocol, c.localAddr.String(), c.remoteAddr.String()) + return nil +} + +func (c *Connection) doReadIO() (err error) { + if c.readBuffer == nil { + switch c.protocol { + case "udp": + // A UDP socket will Read up to the size of the receiving buffer and will discard the rest + c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + default: // unix or tcp + c.readBuffer = iobufferpool.GetIoBuffer(DefaultBufferReadCapacity) + } + } + + var bytesRead int64 + c.setReadDeadline() + bytesRead, err = c.readBuffer.ReadOnce(c.conn) + + if err != nil { + if atomic.LoadUint32(&c.closed) == 1 { + return err + } + if te, ok := err.(net.Error); ok && te.Timeout() { + if bytesRead == 0 { + return err + } + } else if err != io.EOF { + return err + } + } + + if bytesRead == 0 && err == nil { + err = io.EOF + log.DefaultLogger.Errorf("[network] ReadOnce maybe always return (0, nil) and causes dead loop, Connection = %d, Local Address = %+v, Remote Address = %+v", + c.id, c.rawConnection.LocalAddr(), c.RemoteAddr()) + } + + c.onRead(bytesRead) + return +} + +func (c *Connection) doWriteIO() { + +} + +func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) interface{} { + +} diff --git a/pkg/util/connection/constant.go b/pkg/util/connection/constant.go new file mode 100644 index 0000000000..450c2fb41a --- /dev/null +++ b/pkg/util/connection/constant.go @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package connection + +import ( + "errors" + "time" +) + +// CloseType represent connection close type +type CloseType string + +//Connection close types +const ( + // FlushWrite means write buffer to underlying io then close connection + FlushWrite CloseType = "FlushWrite" + // NoFlush means close connection without flushing buffer + NoFlush CloseType = "NoFlush" +) + +// Event type +type Event string + +// ConnectionEvent types +const ( + RemoteClose Event = "RemoteClose" + LocalClose Event = "LocalClose" + OnReadErrClose Event = "OnReadErrClose" + OnWriteErrClose Event = "OnWriteErrClose" + OnConnect Event = "OnConnect" + Connected Event = "ConnectedFlag" + ConnectTimeout Event = "ConnectTimeout" + ConnectFailed Event = "ConnectFailed" + OnReadTimeout Event = "OnReadTimeout" + OnWriteTimeout Event = "OnWriteTimeout" +) + +var ( + ErrConnectionHasClosed = errors.New("connection has closed") + ErrWriteTryLockTimeout = errors.New("write trylock has timeout") + ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") +) + +// Network related const +const ( + DefaultBufferReadCapacity = 1 << 7 + + NetBufferDefaultSize = 0 + NetBufferDefaultCapacity = 1 << 4 + + DefaultConnectTimeout = 10 * time.Second +) From b0623c48b36f25da91f1a7db58b0726ff32343bf Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 10 Sep 2021 18:17:31 +0800 Subject: [PATCH 12/99] [layer4proxy] abstract connection operation --- pkg/util/connection/connection.go | 208 +++++++++- pkg/util/connection/constant.go | 10 + pkg/util/connectionwrapper/connection.go | 470 ----------------------- pkg/util/connectionwrapper/constant.go | 35 -- 4 files changed, 209 insertions(+), 514 deletions(-) delete mode 100644 pkg/util/connectionwrapper/connection.go delete mode 100644 pkg/util/connectionwrapper/constant.go diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index 1f0ef44cc9..5feb6b77d5 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -18,20 +18,22 @@ package connection import ( - "github.com/casbin/casbin/v2/log" "io" "net" "reflect" + "strings" "sync" "sync/atomic" "time" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/timerpool" ) type Connection struct { conn net.Conn + connected uint32 closed uint32 protocol string localAddr net.Addr @@ -51,13 +53,15 @@ type Connection struct { startOnce sync.Once stopChan chan struct{} - onRead func(buffer iobufferpool.IoBuffer) - onWrite func(src iobufferpool.IoBuffer) iobufferpool.IoBuffer + onRead func(buffer iobufferpool.IoBuffer) // execute read filters + onWrite func(src []iobufferpool.IoBuffer) []iobufferpool.IoBuffer // execute write filters } -func New(conn net.Conn, stopChan chan struct{}, remoteAddr net.Addr) *Connection { +// NewClientConnection wrap connection create from client +func NewClientConnection(conn net.Conn, stopChan chan struct{}, remoteAddr net.Addr) *Connection { res := &Connection{ conn: conn, + connected: 1, protocol: conn.LocalAddr().Network(), localAddr: conn.LocalAddr(), @@ -84,6 +88,16 @@ func (c *Connection) Start() { }) } +func (c *Connection) State() ConnState { + if atomic.LoadUint32(&c.closed) == 1 { + return ConnClosed + } + if atomic.LoadUint32(&c.connected) == 1 { + return ConnActive + } + return ConnInit +} + func (c *Connection) startRWLoop() { go func() { defer func() { @@ -180,7 +194,66 @@ func (c *Connection) setReadDeadline() { } func (c *Connection) startWriteLoop() { + var err error + for { + select { + case <-c.stopChan: + return + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + OUTER: + for i := 0; i < 10; i++ { + select { + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + default: + break OUTER + } + } + + c.setWriteDeadline() + _, err = c.doWrite() + } + if err != nil { + if err == iobufferpool.EOF { + logger.Debugf("%s connection error on write, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), err) + _ = c.Close(NoFlush, LocalClose) + } else { + logger.Errorf("%s connection error on write, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), err) + } + + if te, ok := err.(net.Error); ok && te.Timeout() { + _ = c.Close(NoFlush, OnWriteTimeout) + } + if c.protocol == "udp" && strings.Contains(err.Error(), "connection refused") { + _ = c.Close(NoFlush, RemoteClose) + } + //other write errs not close connection, because readbuffer may have unread data, wait for readloop close connection, + return + } + } +} + +func (c *Connection) appendBuffer(ioBuffers *[]iobufferpool.IoBuffer) { + if ioBuffers == nil { + return + } + for _, buf := range *ioBuffers { + if buf == nil { + continue + } + c.ioBuffers = append(c.ioBuffers, buf) + c.writeBuffers = append(c.writeBuffers, buf.Bytes()) + } } func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { @@ -252,18 +325,135 @@ func (c *Connection) doReadIO() (err error) { if bytesRead == 0 && err == nil { err = io.EOF - log.DefaultLogger.Errorf("[network] ReadOnce maybe always return (0, nil) and causes dead loop, Connection = %d, Local Address = %+v, Remote Address = %+v", - c.id, c.rawConnection.LocalAddr(), c.RemoteAddr()) + logger.Errorf("%s connection ReadOnce maybe always return (0, nil) and causes dead loop, local addr: %s, remote addr: %s", + c.protocol, c.localAddr.String(), c.remoteAddr.String()) + } + + if !c.readEnabled { + return + } + + if bufLen := c.readBuffer.Len(); bufLen == 0 { + return + } else { + buf := c.readBuffer.Clone() + c.readBuffer.Drain(bufLen) + c.onRead(buf) + + if int64(bufLen) != c.lastBytesSizeRead { + c.lastBytesSizeRead = int64(bufLen) + } + } + return +} + +func (c *Connection) doWrite() (int64, error) { + bytesSent, err := c.doWriteIO() + if err != nil && atomic.LoadUint32(&c.closed) == 1 { + return 0, nil + } + + if bytesSent > 0 { + bytesBufSize := int64(c.writeBufLen()) + if int64(c.writeBufLen()) != c.lastWriteSizeWrite { + c.lastWriteSizeWrite = bytesBufSize + } } + return bytesSent, err +} - c.onRead(bytesRead) +func (c *Connection) writeBufLen() (bufLen int) { + for _, buf := range c.writeBuffers { + bufLen += len(buf) + } return } -func (c *Connection) doWriteIO() { +func (c *Connection) doWriteIO() (bytesSent int64, err error) { + buffers := c.writeBuffers + switch c.protocol { + case "tcp": + bytesSent, err = buffers.WriteTo(c.conn) + case "udp": + addr := c.remoteAddr.(*net.UDPAddr) + n := 0 + bytesSent = 0 + for _, buf := range c.ioBuffers { + if c.conn.RemoteAddr() == nil { + n, err = c.conn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) + } else { + n, err = c.conn.Write(buf.Bytes()) + } + if err != nil { + break + } + bytesSent += int64(n) + } + } + if err != nil { + return bytesSent, err + } + for i, buf := range c.ioBuffers { + c.ioBuffers[i] = nil + c.writeBuffers[i] = nil + if buf.EOF() { + err = iobufferpool.EOF + } + if e := iobufferpool.PutIoBuffer(buf); e != nil { + logger.Errorf("%s connection PutIoBuffer error, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), err) + } + } + c.ioBuffers = c.ioBuffers[:0] + c.writeBuffers = c.writeBuffers[:0] + return } -func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) interface{} { +// Write receive other connection data +func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + err = ErrConnectionHasClosed + } + }() + + bufs := c.onWrite(buffers) + if bufs == nil { + return + } + + select { + case c.writeBufferChan <- &buffers: + return + default: + } + + // fail after 60s + t := timerpool.Get(60 * time.Second) + select { + case c.writeBufferChan <- &buffers: + case <-t.C: + err = ErrWriteBufferChanTimeout + } + timerpool.Put(t) + return +} + +func (c *Connection) setWriteDeadline() { + switch c.protocol { + case "udp": + _ = c.conn.SetWriteDeadline(time.Now().Add(5 * time.Second)) + case "tcp": + _ = c.conn.SetWriteDeadline(time.Now().Add(15 * time.Second)) + } +} +// UpstreamConnection wrap connection to upstream +type UpstreamConnection struct { + Connection + connectTimeout time.Duration + connectOnce sync.Once } diff --git a/pkg/util/connection/constant.go b/pkg/util/connection/constant.go index 450c2fb41a..ae4c7fd512 100644 --- a/pkg/util/connection/constant.go +++ b/pkg/util/connection/constant.go @@ -65,3 +65,13 @@ const ( DefaultConnectTimeout = 10 * time.Second ) + +// ConnState status +type ConnState int + +// Connection statuses +const ( + ConnInit ConnState = iota + ConnActive + ConnClosed +) diff --git a/pkg/util/connectionwrapper/connection.go b/pkg/util/connectionwrapper/connection.go deleted file mode 100644 index abc802ec1a..0000000000 --- a/pkg/util/connectionwrapper/connection.go +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package connectionwrapper - -import ( - "io" - "net" - "reflect" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/util/iobufferpool" - "github.com/megaease/easegress/pkg/util/timerpool" -) - -// ConnectionCloseType represent connection close type -type ConnectionCloseType string - -//Connection close types -const ( - // FlushWrite means write buffer to underlying io then close connection - FlushWrite ConnectionCloseType = "FlushWrite" - // NoFlush means close connection without flushing buffer - NoFlush ConnectionCloseType = "NoFlush" -) - -// ConnectionEvent type -type ConnectionEvent string - -// ConnectionEvent types -const ( - RemoteClose ConnectionEvent = "RemoteClose" - LocalClose ConnectionEvent = "LocalClose" - OnReadErrClose ConnectionEvent = "OnReadErrClose" - OnWriteErrClose ConnectionEvent = "OnWriteErrClose" - OnConnect ConnectionEvent = "OnConnect" - Connected ConnectionEvent = "ConnectedFlag" - ConnectTimeout ConnectionEvent = "ConnectTimeout" - ConnectFailed ConnectionEvent = "ConnectFailed" - OnReadTimeout ConnectionEvent = "OnReadTimeout" - OnWriteTimeout ConnectionEvent = "OnWriteTimeout" -) - -type Connection struct { - net.Conn - - closed uint32 - connected uint32 - startOnce sync.Once - - // readLoop/writeLoop goroutine fields: - internalStopChan chan struct{} - readEnabled bool - readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters - - lastBytesSizeRead int64 - lastWriteSizeWrite int64 - - curWriteBufferData []iobufferpool.IoBuffer - readBuffer iobufferpool.IoBuffer - writeBuffers net.Buffers - ioBuffers []iobufferpool.IoBuffer - writeBufferChan chan *[]iobufferpool.IoBuffer -} - -func New(conn net.Conn) *Connection { - return &Connection{ - Conn: conn, - } -} - -func (c *Connection) StartRWLoop(ctx context.Layer4Context) { - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection close due to read loop crashed failed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - }() - _ = c.Close(NoFlush, LocalClose, ctx) - }() - }() - c.startReadLoop(ctx) - }() - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection close due to write loop crashed failed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - }() - _ = c.Close(NoFlush, LocalClose, ctx) - }() - }() - c.startWriteLoop(ctx) - }() -} - -func (c *Connection) startReadLoop(ctx context.Layer4Context) { - for { - select { - case <-c.internalStopChan: - return - case <-c.readEnabledChan: - default: - if c.readEnabled { - err := c.doRead(ctx) - if err != nil { - if te, ok := err.(net.Error); ok && te.Timeout() { - if ctx.Protocol() == "tcp" && c.readBuffer != nil && - c.readBuffer.Len() == 0 && c.readBuffer.Cap() > DefaultBufferReadCapacity { - c.readBuffer.Free() - c.readBuffer.Alloc(DefaultBufferReadCapacity) - } - continue - } - - if c.lastBytesSizeRead == 0 || err == io.EOF { - logger.Debugf("%s connection write loop closed, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } else { - logger.Errorf("%s connection write loop closed, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } - - if err == io.EOF { - _ = c.Close(NoFlush, RemoteClose, ctx) - } else { - _ = c.Close(NoFlush, OnReadErrClose, ctx) - } - return - } - } else { - select { - case <-c.readEnabledChan: - case <-time.After(100 * time.Millisecond): - } - } - } - - } -} - -func (c *Connection) startWriteLoop(ctx context.Layer4Context) { - defer func() { - close(c.writeBufferChan) - }() - - var err error - for { - select { - case <-c.internalStopChan: - return - case buf, ok := <-c.writeBufferChan: - if !ok { - return - } - c.appendBuffer(buf) - - QUIT: - for i := 0; i < 10; i++ { - select { - case buf, ok := <-c.writeBufferChan: - if !ok { - return - } - c.appendBuffer(buf) - default: - break QUIT - } - - c.setWriteDeadline(ctx) - _, err = c.doWrite(ctx) - } - } - - if err != nil { - - if err == iobufferpool.EOF { - logger.Debugf("%s connection write loop occur error, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - c.Close(NoFlush, LocalClose, ctx) - } else { - logger.Errorf("%s connection write loop occur error, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } - - if te, ok := err.(net.Error); ok && te.Timeout() { - c.Close(NoFlush, OnWriteTimeout, ctx) - } - - if ctx.Protocol() == "udp" && strings.Contains(err.Error(), "connection refused") { - c.Close(NoFlush, RemoteClose, ctx) - } - //other write errs not close connection, because readbuffer may have unread data, wait for readloop close connection, - - return - } - } -} - -func (c *Connection) appendBuffer(buffers *[]iobufferpool.IoBuffer) { - if buffers == nil { - return - } - for _, buf := range *buffers { - if buf == nil { - continue - } - c.ioBuffers = append(c.ioBuffers, buf) - c.writeBuffers = append(c.writeBuffers, buf.Bytes()) - } -} - -func (c *Connection) doRead(ctx context.Layer4Context) (err error) { - if c.readBuffer == nil { - switch ctx.Protocol() { - case "udp": - // A UDP socket will Read up to the size of the receiving buffer and will discard the rest - c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - default: - c.readBuffer = iobufferpool.GetIoBuffer(DefaultBufferReadCapacity) - } - } - - var bytesRead int64 - c.setReadDeadline(ctx) - bytesRead, err = c.readBuffer.ReadOnce(c.Conn) - - if err != nil { - if atomic.LoadUint32(&c.closed) == 1 { - return err - } - - if te, ok := err.(net.Error); ok && te.Timeout() { - // TODO add timeout handle(such as send keepalive msg to active connection) - - if bytesRead == 0 { - return err - } - } else if err != io.EOF { - return err - } - } - - //todo: ReadOnce maybe always return (0, nil) and causes dead loop (hack) - if bytesRead == 0 && err == nil { - err = io.EOF - logger.Errorf("%s connection read maybe always return (0, nil), local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } - c.lastBytesSizeRead = int64(c.readBuffer.Len()) - return -} - -// Write send recv data(batch mode) to upstream -func (c *Connection) Write(ctx context.Layer4Context, buffers ...iobufferpool.IoBuffer) (err error) { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - err = ErrConnectionHasClosed - } - }() - - // TODO get filters from layer4 pipeline, transform buffers via filters - - select { - case c.writeBufferChan <- &buffers: - return - default: - } - - t := timerpool.Get(DefaultConnTryTimeout) - select { - case c.writeBufferChan <- &buffers: - case <-t.C: - err = ErrWriteBufferChanTimeout - } - timerpool.Put(t) - return -} - -func (c *Connection) setWriteDeadline(ctx context.Layer4Context) { - args := ctx.ConnectionArgs() - if args.ProxyWriteTimeout > 0 { - _ = c.Conn.SetWriteDeadline(time.Now().Add(time.Duration(args.ProxyWriteTimeout) * time.Millisecond)) - } else { - switch ctx.Protocol() { - case "udp": - _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultUDPIdleTimeout)) - case "tcp": - _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultConnWriteTimeout)) - } - } -} - -func (c *Connection) setReadDeadline(ctx context.Layer4Context) { - args := ctx.ConnectionArgs() - if args.ProxyWriteTimeout > 0 { - _ = c.Conn.SetReadDeadline(time.Now().Add(time.Duration(args.ProxyReadTimeout) * time.Millisecond)) - } else { - switch ctx.Protocol() { - case "udp": - _ = c.Conn.SetReadDeadline(time.Now().Add(DefaultUDPReadTimeout)) - case "tcp": - _ = c.Conn.SetReadDeadline(time.Now().Add(ConnReadTimeout)) - } - } -} - -// Close handle connection close event -func (c *Connection) Close(ccType ConnectionCloseType, eventType ConnectionEvent, ctx context.Layer4Context) (err error) { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - err = ErrConnectionHasClosed - } - }() - - if ccType == FlushWrite { - _ = c.Write(ctx, iobufferpool.NewIoBufferEOF()) - return nil - } - - if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { - return nil - } - - // connection failed in client mode - if c.Conn == nil || reflect.ValueOf(c.Conn).IsNil() { - return nil - } - - // close tcp conn read first - if tconn, ok := c.Conn.(*net.TCPConn); ok { - logger.Errorf("%s connection close read, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - _ = tconn.CloseRead() - } - - // close conn recv, then notify read/write loop to exit - close(c.internalStopChan) - _ = c.Conn.Close() - - logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - return nil -} - -func (c *Connection) writeBufLen() (bufLen int) { - for _, buf := range c.writeBuffers { - bufLen += len(buf) - } - return -} - -func (c *Connection) doWrite(ctx context.Layer4Context) (interface{}, error) { - bytesSent, err := c.doWriteIO(ctx) - if err != nil && atomic.LoadUint32(&c.closed) == 1 { - return 0, nil - } - - c.lastWriteSizeWrite = int64(c.writeBufLen()) - return bytesSent, err -} - -// -func (c *Connection) doWriteIO(ctx context.Layer4Context) (bytesSent int64, err error) { - buffers := c.writeBuffers - switch ctx.Protocol() { - case "udp": - addr := ctx.RemoteAddr().(*net.UDPAddr) - n := 0 - bytesSent = 0 - for _, buf := range c.ioBuffers { - if c.Conn.RemoteAddr() == nil { - n, err = c.Conn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) - } else { - n, err = c.Conn.Write(buf.Bytes()) - } - if err != nil { - break - } - bytesSent += int64(n) - } - case "tcp": - bytesSent, err = buffers.WriteTo(c.Conn) - } - - if err != nil { - return bytesSent, err - } - - for i, buf := range c.ioBuffers { - c.ioBuffers[i] = nil - c.writeBuffers[i] = nil - if buf.EOF() { - err = iobufferpool.EOF - } - if e := iobufferpool.PutIoBuffer(buf); e != nil { - logger.Errorf("%s connection give io buffer failed, local addr: %s, remote addr: %s, err: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), err.Error()) - } - } - c.ioBuffers = c.ioBuffers[:0] - c.writeBuffers = c.writeBuffers[:0] - return -} - -func (c *Connection) SetNoDelay(enable bool) { - if c.Conn != nil { - if tconn, ok := c.Conn.(*net.TCPConn); ok { - _ = tconn.SetNoDelay(enable) - } - } -} - -func (c *Connection) ReadEnabled() bool { - return c.readEnabled -} - -func (c *Connection) State() ConnState { - if atomic.LoadUint32(&c.closed) == 1 { - return ConnClosed - } - if atomic.LoadUint32(&c.connected) == 1 { - return ConnActive - } - return ConnInit -} - -func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { - return c.readBuffer -} diff --git a/pkg/util/connectionwrapper/constant.go b/pkg/util/connectionwrapper/constant.go deleted file mode 100644 index c6e122f5da..0000000000 --- a/pkg/util/connectionwrapper/constant.go +++ /dev/null @@ -1,35 +0,0 @@ -package connectionwrapper - -import ( - "errors" - "time" -) - -var ( - ErrConnectionHasClosed = errors.New("connection has closed") - ErrWriteTryLockTimeout = errors.New("write trylock has timeout") - ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") -) - -// Default connection arguments -const ( - DefaultBufferReadCapacity = 1 << 7 - - DefaultConnReadTimeout = 15 * time.Second - DefaultConnWriteTimeout = 15 * time.Second - DefaultConnTryTimeout = 60 * time.Second - DefaultIdleTimeout = 90 * time.Second - DefaultUDPIdleTimeout = 5 * time.Second - DefaultUDPReadTimeout = 1 * time.Second - ConnReadTimeout = 15 * time.Second -) - -// ConnState Connection status -type ConnState int - -// Connection statuses -const ( - ConnInit ConnState = iota - ConnActive - ConnClosed -) From c3147597ba94c96b4a20f0f14fb40647ef4b0c3e Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 12 Sep 2021 12:17:34 +0800 Subject: [PATCH 13/99] [layer4proxy] extract connection to client connection and upstream connection --- pkg/util/connection/connection.go | 75 +++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-) diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index 5feb6b77d5..16cc4f75dd 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -18,6 +18,7 @@ package connection import ( + "errors" "io" "net" "reflect" @@ -58,16 +59,19 @@ type Connection struct { } // NewClientConnection wrap connection create from client -func NewClientConnection(conn net.Conn, stopChan chan struct{}, remoteAddr net.Addr) *Connection { +func NewClientConnection(conn net.Conn, remoteAddr net.Addr, stopChan chan struct{}) *Connection { res := &Connection{ conn: conn, connected: 1, protocol: conn.LocalAddr().Network(), localAddr: conn.LocalAddr(), - mu: sync.Mutex{}, - stopChan: stopChan, + readEnabled: true, readEnabledChan: make(chan bool, 1), + writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), + + mu: sync.Mutex{}, + stopChan: stopChan, } if remoteAddr != nil { @@ -457,3 +461,68 @@ type UpstreamConnection struct { connectTimeout time.Duration connectOnce sync.Once } + +func NewUpstreamConnection(connectTimeout time.Duration, remoteAddr net.Addr, stopChan chan struct{}, + onRead func(buffer iobufferpool.IoBuffer), onWrite func(src []iobufferpool.IoBuffer) []iobufferpool.IoBuffer) *UpstreamConnection { + res := &UpstreamConnection{ + Connection: Connection{ + connected: 1, + remoteAddr: remoteAddr, + + readEnabled: true, + readEnabledChan: make(chan bool, 1), + writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), + + mu: sync.Mutex{}, + stopChan: stopChan, + onRead: onRead, + onWrite: onWrite, + }, + connectTimeout: connectTimeout, + } + if res.remoteAddr != nil { + res.Connection.protocol = res.remoteAddr.Network() + } + return res +} + +func (u *UpstreamConnection) connect() (event Event, err error) { + timeout := u.connectTimeout + if timeout == 0 { + timeout = 10 * time.Second + } + addr := u.remoteAddr + if addr == nil { + return ConnectFailed, errors.New("upstream addr is nil") + } + u.conn, err = net.DialTimeout(u.protocol, addr.String(), timeout) + if err != nil { + if err == io.EOF { + event = RemoteClose + } else if err, ok := err.(net.Error); ok && err.Timeout() { + event = ConnectTimeout + } else { + event = ConnectFailed + } + return + } + atomic.StoreUint32(&u.connected, 1) + event = Connected + u.localAddr = u.conn.LocalAddr() + return +} + +func (u *UpstreamConnection) Connect() (err error) { + u.connectOnce.Do(func() { + var event Event + event, err = u.connect() + if err == nil { + u.Start() + } + logger.Debugf("connect upstream, upstream addr: %s, event: %+v, err: %+v", u.remoteAddr, event, err) + if event != Connected { + close(u.stopChan) // if upstream connection failed, close client connection + } + }) + return +} From 534956fdab242d460aa99e1dcfc92bb10eb61df7 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 12 Sep 2021 22:45:57 +0800 Subject: [PATCH 14/99] [layer4proxy] simplify context methods --- pkg/context/layer4context.go | 158 +++++++++++++---------------------- 1 file changed, 57 insertions(+), 101 deletions(-) diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 6a720f82ea..23d6e05d3e 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -18,8 +18,7 @@ package context import ( - "bytes" - stdcontext "context" + "github.com/megaease/easegress/pkg/util/iobufferpool" "net" "sync" "time" @@ -34,29 +33,19 @@ type ( Unlock() Protocol() string - ConnectionArgs() *ConnectionArgs - SetConnectionArgs(args *ConnectionArgs) LocalAddr() net.Addr - SetLocalAddr(addr net.Addr) - RemoteAddr() net.Addr - SetRemoteAddr(addr net.Addr) + ClientAddr() net.Addr + UpstreamAddr() net.Addr + SetUpstreamAddr(addr net.Addr) - Stop() - - stdcontext.Context - Cancel(err error) - Cancelled() bool - ClientDisconnected() bool - - ClientConn() net.Conn - UpStreamConn() net.Conn - SetUpStreamConn(conn net.Conn) - - Duration() time.Duration // For log, sample, etc. - OnFinish(func()) // For setting final client statistics, etc. - AddTag(tag string) // For debug, log, etc. + GetReadBuffer() iobufferpool.IoBuffer + AppendReadBuffer(buffer iobufferpool.IoBuffer) + GetWriteBuffer() iobufferpool.IoBuffer + AppendWriteBuffer(buffer iobufferpool.IoBuffer) Finish() + Duration() time.Duration + StopChan() chan struct{} // client connection and upstream connection stop by this chan CallNextHandler(lastResult string) string SetHandlerCaller(caller HandlerCaller) @@ -77,19 +66,16 @@ type ( layer4Context struct { mutex sync.Mutex - protocol string - localAddr net.Addr - remoteAddr net.Addr - clientConn net.Conn + protocol string + localAddr net.Addr + clientAddr net.Addr + upstreamAddr net.Addr + stopChan chan struct{} // notify quit read loop and write loop + readBuffer iobufferpool.IoBuffer + writeBuffer iobufferpool.IoBuffer connectionArgs *ConnectionArgs - readBuffer bytes.Buffer - writeBuffers net.Buffers - ioBuffers []bytes.Buffer - writeBufferChan chan *[]bytes.Buffer - stopChan chan struct{} // notify quit read loop and write loop - startTime *time.Time // connection accept time endTime *time.Time // connection close time @@ -98,107 +84,73 @@ type ( ) // NewLayer4Context creates an Layer4Context. -func NewLayer4Context(protocol string, conn net.Conn) *layer4Context { +func NewLayer4Context(protocol string, localAddr net.Addr, clientAddr net.Addr, stopChan chan struct{}) *layer4Context { startTime := time.Now() res := layer4Context{ + mutex: sync.Mutex{}, protocol: protocol, - clientConn: conn, - localAddr: conn.LocalAddr(), - remoteAddr: conn.RemoteAddr(), - - startTime: &startTime, - stopChan: make(chan struct{}), - mutex: sync.Mutex{}, + localAddr: localAddr, + clientAddr: clientAddr, + stopChan: stopChan, + startTime: &startTime, } return &res } -func (ctx *layer4Context) Protocol() string { - return ctx.protocol +func (ctx *layer4Context) Lock() { + ctx.mutex.Lock() } -func (ctx *layer4Context) ConnectionArgs() *ConnectionArgs { - return ctx.connectionArgs +func (ctx *layer4Context) Unlock() { + ctx.mutex.Unlock() } -func (ctx *layer4Context) SetConnectionArgs(args *ConnectionArgs) { - ctx.connectionArgs = args +// Protocol get proxy protocol +func (ctx *layer4Context) Protocol() string { + return ctx.protocol } func (ctx *layer4Context) LocalAddr() net.Addr { return ctx.localAddr } -func (ctx *layer4Context) SetLocalAddr(localAddr net.Addr) { - ctx.localAddr = localAddr -} - -func (ctx *layer4Context) RemoteAddr() net.Addr { - return ctx.remoteAddr -} - -func (ctx *layer4Context) SetRemoteAddr(addr net.Addr) { - ctx.remoteAddr = addr -} - -func (ctx *layer4Context) Stop() { - endTime := time.Now() - ctx.endTime = &endTime - - // TODO add stat for context -} - -func (ctx *layer4Context) Deadline() (deadline time.Time, ok bool) { - panic("implement me") +func (ctx *layer4Context) ClientAddr() net.Addr { + return ctx.ClientAddr() } -func (ctx *layer4Context) Done() <-chan struct{} { - panic("implement me") +func (ctx *layer4Context) UpstreamAddr() net.Addr { + return ctx.upstreamAddr } -func (ctx *layer4Context) Err() error { - panic("implement me") +func (ctx *layer4Context) SetUpstreamAddr(addr net.Addr) { + ctx.upstreamAddr = addr } -func (ctx *layer4Context) Value(key interface{}) interface{} { - panic("implement me") +func (ctx *layer4Context) StopChan() chan struct{} { + return ctx.stopChan } -func (ctx *layer4Context) Cancel(err error) { - panic("implement me") +func (ctx *layer4Context) GetReadBuffer() iobufferpool.IoBuffer { + return ctx.readBuffer } -func (ctx *layer4Context) Cancelled() bool { - panic("implement me") -} - -func (ctx *layer4Context) ClientDisconnected() bool { - panic("implement me") -} - -func (ctx *layer4Context) ClientConn() net.Conn { - return ctx.clientConn -} - -func (ctx *layer4Context) OnFinish(f func()) { - panic("implement me") -} - -func (ctx *layer4Context) AddTag(tag string) { - panic("implement me") -} - -func (ctx *layer4Context) Finish() { - panic("implement me") +func (ctx *layer4Context) AppendReadBuffer(buffer iobufferpool.IoBuffer) { + if buffer == nil || buffer.Len() == 0 { + return + } + _ = ctx.readBuffer.Append(buffer.Bytes()) } -func (ctx *layer4Context) Lock() { - ctx.mutex.Lock() +func (ctx *layer4Context) GetWriteBuffer() iobufferpool.IoBuffer { + return ctx.writeBuffer } -func (ctx *layer4Context) Unlock() { - ctx.mutex.Unlock() +func (ctx *layer4Context) AppendWriteBuffer(buffer iobufferpool.IoBuffer) { + if buffer == nil || buffer.Len() == 0 { + return + } + _ = ctx.writeBuffer.Append(buffer.Bytes()) } func (ctx *layer4Context) CallNextHandler(lastResult string) string { @@ -209,10 +161,14 @@ func (ctx *layer4Context) SetHandlerCaller(caller HandlerCaller) { ctx.caller = caller } +func (ctx *layer4Context) Finish() { + finish := time.Now() + ctx.endTime = &finish +} + func (ctx *layer4Context) Duration() time.Duration { if ctx.endTime != nil { return ctx.endTime.Sub(*ctx.startTime) } - return time.Now().Sub(*ctx.startTime) } From 9395ff3edcff93f7cedeaa6b5dd57460cce9405c Mon Sep 17 00:00:00 2001 From: jxd134 Date: Mon, 13 Sep 2021 15:22:39 +0800 Subject: [PATCH 15/99] [layer4proxy]almost finish layer4pipeline and layer4context --- pkg/context/handlercaller.go | 17 +++ pkg/context/layer4context.go | 37 +++++- pkg/object/httppipeline/httppipeline.go | 2 +- pkg/object/layer4pipeline/layer4pipeline.go | 120 ++++++++++++-------- pkg/object/layer4pipeline/registry.go | 8 +- pkg/protocol/layer4.go | 13 ++- pkg/util/connection/connection.go | 12 ++ 7 files changed, 150 insertions(+), 59 deletions(-) diff --git a/pkg/context/handlercaller.go b/pkg/context/handlercaller.go index ae78256b0d..dc04ffbf19 100644 --- a/pkg/context/handlercaller.go +++ b/pkg/context/handlercaller.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package context type ( diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 23d6e05d3e..20c845a7fe 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -18,6 +18,7 @@ package context import ( + "github.com/megaease/easegress/pkg/util/connection" "github.com/megaease/easegress/pkg/util/iobufferpool" "net" "sync" @@ -43,6 +44,9 @@ type ( GetWriteBuffer() iobufferpool.IoBuffer AppendWriteBuffer(buffer iobufferpool.IoBuffer) + WriteToClient(buffer iobufferpool.IoBuffer) + WriteToUpstream(buffer iobufferpool.IoBuffer) + Finish() Duration() time.Duration StopChan() chan struct{} // client connection and upstream connection stop by this chan @@ -72,6 +76,9 @@ type ( upstreamAddr net.Addr stopChan chan struct{} // notify quit read loop and write loop + clientConn connection.Connection + upstreamConn connection.UpstreamConnection + readBuffer iobufferpool.IoBuffer writeBuffer iobufferpool.IoBuffer connectionArgs *ConnectionArgs @@ -84,14 +91,14 @@ type ( ) // NewLayer4Context creates an Layer4Context. -func NewLayer4Context(protocol string, localAddr net.Addr, clientAddr net.Addr, stopChan chan struct{}) *layer4Context { +func NewLayer4Context(clientConn *connection.Connection, stopChan chan struct{}) *layer4Context { startTime := time.Now() res := layer4Context{ mutex: sync.Mutex{}, - protocol: protocol, - localAddr: localAddr, - clientAddr: clientAddr, + protocol: clientConn.Protocol(), + localAddr: clientConn.LocalAddr(), + clientAddr: clientConn.RemoteAddr(), stopChan: stopChan, startTime: &startTime, } @@ -119,6 +126,7 @@ func (ctx *layer4Context) ClientAddr() net.Addr { return ctx.ClientAddr() } +// UpstreamAddr get upstream addr func (ctx *layer4Context) UpstreamAddr() net.Addr { return ctx.upstreamAddr } @@ -131,10 +139,12 @@ func (ctx *layer4Context) StopChan() chan struct{} { return ctx.stopChan } +// GetReadBuffer get read buffer func (ctx *layer4Context) GetReadBuffer() iobufferpool.IoBuffer { return ctx.readBuffer } +// AppendReadBuffer filter receive client data, append data to ctx read buffer for other filters handle func (ctx *layer4Context) AppendReadBuffer(buffer iobufferpool.IoBuffer) { if buffer == nil || buffer.Len() == 0 { return @@ -142,10 +152,12 @@ func (ctx *layer4Context) AppendReadBuffer(buffer iobufferpool.IoBuffer) { _ = ctx.readBuffer.Append(buffer.Bytes()) } +// GetWriteBuffer get write buffer func (ctx *layer4Context) GetWriteBuffer() iobufferpool.IoBuffer { return ctx.writeBuffer } +// AppendWriteBuffer filter receive upstream data, append data to ctx write buffer for other filters handle func (ctx *layer4Context) AppendWriteBuffer(buffer iobufferpool.IoBuffer) { if buffer == nil || buffer.Len() == 0 { return @@ -153,6 +165,22 @@ func (ctx *layer4Context) AppendWriteBuffer(buffer iobufferpool.IoBuffer) { _ = ctx.writeBuffer.Append(buffer.Bytes()) } +// WriteToClient filter handle client upload data, send result to upstream connection +func (ctx *layer4Context) WriteToClient(buffer iobufferpool.IoBuffer) error { + if buffer == nil || buffer.Len() == 0 { + return nil + } + return ctx.upstreamConn.Write(buffer) +} + +// WriteToUpstream filter handle client upload data, send result to upstream connection +func (ctx *layer4Context) WriteToUpstream(buffer iobufferpool.IoBuffer) error { + if buffer == nil || buffer.Len() == 0 { + return nil + } + return ctx.clientConn.Write(buffer) +} + func (ctx *layer4Context) CallNextHandler(lastResult string) string { return ctx.caller(lastResult) } @@ -166,6 +194,7 @@ func (ctx *layer4Context) Finish() { ctx.endTime = &finish } +// Duration get context execute duration func (ctx *layer4Context) Duration() time.Duration { if ctx.endTime != nil { return ctx.endTime.Sub(*ctx.startTime) diff --git a/pkg/object/httppipeline/httppipeline.go b/pkg/object/httppipeline/httppipeline.go index bd9ac0b93e..92c1c903c3 100644 --- a/pkg/object/httppipeline/httppipeline.go +++ b/pkg/object/httppipeline/httppipeline.go @@ -305,7 +305,7 @@ func (hp *HTTPPipeline) DefaultSpec() interface{} { return &Spec{} } -// Init initilizes HTTPPipeline. +// Init initializes HTTPPipeline. func (hp *HTTPPipeline) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { hp.superSpec, hp.spec, hp.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper diff --git a/pkg/object/layer4pipeline/layer4pipeline.go b/pkg/object/layer4pipeline/layer4pipeline.go index 522e82fced..5bdd564369 100644 --- a/pkg/object/layer4pipeline/layer4pipeline.go +++ b/pkg/object/layer4pipeline/layer4pipeline.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package layer4pipeline import ( @@ -36,7 +53,7 @@ type ( superSpec *supervisor.Spec spec *Spec - muxMapper protocol.MuxMapper + muxMapper protocol.Layer4MuxMapper runningFilters []*runningFilter } @@ -128,7 +145,7 @@ func (ctx *PipelineContext) log() string { return buf.String() } -// context.TCPContext: *PipelineContext +// context.Layer4Pipeline: *PipelineContext var runningContexts = sync.Map{} func newAndSetPipelineContext(ctx context.Layer4Context) *PipelineContext { @@ -137,7 +154,7 @@ func newAndSetPipelineContext(ctx context.Layer4Context) *PipelineContext { return pipeCtx } -// GetPipelineContext returns the corresponding PipelineContext of the TCPContext, +// GetPipelineContext returns the corresponding PipelineContext of the Layer4Context, // and a bool flag to represent it succeed or not. func GetPipelineContext(ctx context.Layer4Context) (*PipelineContext, bool) { value, ok := runningContexts.Load(ctx) @@ -176,7 +193,6 @@ func (meta *FilterMetaSpec) Validate() error { if meta.Name == LabelEND { return fmt.Errorf("can't use %s(built-in label) for filter name", LabelEND) } - return nil } @@ -211,7 +227,6 @@ func (s Spec) Validate() (err error) { } errPrefix = "flow" - filters := make(map[string]struct{}) for _, f := range s.Flow { if _, exists := filters[f.Filter]; exists { @@ -243,42 +258,42 @@ func (s Spec) Validate() (err error) { } // Category returns the category of Layer4Pipeline. -func (hp *Layer4Pipeline) Category() supervisor.ObjectCategory { +func (l *Layer4Pipeline) Category() supervisor.ObjectCategory { return Category } // Kind returns the kind of Layer4Pipeline. -func (hp *Layer4Pipeline) Kind() string { +func (l *Layer4Pipeline) Kind() string { return Kind } // DefaultSpec returns the default spec of Layer4Pipeline. -func (hp *Layer4Pipeline) DefaultSpec() interface{} { +func (l *Layer4Pipeline) DefaultSpec() interface{} { return &Spec{} } // Init initializes Layer4Pipeline. -func (hp *Layer4Pipeline) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { - hp.superSpec, hp.spec, hp.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper +func (l *Layer4Pipeline) Init(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { + l.superSpec, l.spec, l.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper - hp.reload(nil /*no previous generation*/) + l.reload(nil /*no previous generation*/) } // Inherit inherits previous generation of Layer4Pipeline. -func (hp *Layer4Pipeline) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { - hp.superSpec, hp.spec, hp.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper +func (l *Layer4Pipeline) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.Layer4MuxMapper) { + l.superSpec, l.spec, l.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper - hp.reload(previousGeneration.(*Layer4Pipeline)) + l.reload(previousGeneration.(*Layer4Pipeline)) // NOTE: It's filters' responsibility to inherit and clean their resources. // previousGeneration.Close() } -func (hp *Layer4Pipeline) reload(previousGeneration *Layer4Pipeline) { +func (l *Layer4Pipeline) reload(previousGeneration *Layer4Pipeline) { runningFilters := make([]*runningFilter, 0) - if len(hp.spec.Flow) == 0 { - for _, filterSpec := range hp.spec.Filters { - spec, err := NewFilterSpec(filterSpec, hp.superSpec.Super()) + if len(l.spec.Flow) == 0 { + for _, filterSpec := range l.spec.Filters { + spec, err := NewFilterSpec(filterSpec, l.superSpec.Super()) if err != nil { panic(err) } @@ -288,11 +303,11 @@ func (hp *Layer4Pipeline) reload(previousGeneration *Layer4Pipeline) { }) } } else { - for _, f := range hp.spec.Flow { + for _, f := range l.spec.Flow { var spec *FilterSpec - for _, filterSpec := range hp.spec.Filters { + for _, filterSpec := range l.spec.Filters { var err error - spec, err = NewFilterSpec(filterSpec, hp.superSpec.Super()) + spec, err = NewFilterSpec(filterSpec, l.superSpec.Super()) if err != nil { panic(err) } @@ -311,8 +326,7 @@ func (hp *Layer4Pipeline) reload(previousGeneration *Layer4Pipeline) { } } - pipelineName := hp.superSpec.Name() - var filterBuffs []context.FilterBuff + pipelineName := l.superSpec.Name() for _, runningFilter := range runningFilters { name, kind := runningFilter.spec.Name(), runningFilter.spec.Kind() rootFilter, exists := filterRegistry[kind] @@ -337,17 +351,12 @@ func (hp *Layer4Pipeline) reload(previousGeneration *Layer4Pipeline) { } runningFilter.filter, runningFilter.rootFilter = filter, rootFilter - - filterBuffs = append(filterBuffs, context.FilterBuff{ - Name: name, - Buff: []byte(runningFilter.spec.YAMLConfig()), - }) } - hp.runningFilters = runningFilters + l.runningFilters = runningFilters } -func (hp *Layer4Pipeline) getNextFilterIndex(index int, result string) int { +func (l *Layer4Pipeline) getNextFilterIndex(index int, result string) int { // return index + 1 if last filter succeeded if result == "" { return index + 1 @@ -355,7 +364,7 @@ func (hp *Layer4Pipeline) getNextFilterIndex(index int, result string) int { // check the jumpIf table of current filter, return its index if the jump // target is valid and -1 otherwise - filter := hp.runningFilters[index] + filter := l.runningFilters[index] if !stringtool.StrInSlice(result, filter.rootFilter.Results()) { format := "BUG: invalid result %s not in %v" logger.Errorf(format, result, filter.rootFilter.Results()) @@ -369,20 +378,28 @@ func (hp *Layer4Pipeline) getNextFilterIndex(index int, result string) int { return -1 } if name == LabelEND { - return len(hp.runningFilters) + return len(l.runningFilters) } - for index++; index < len(hp.runningFilters); index++ { - if hp.runningFilters[index].spec.Name() == name { + for index++; index < len(l.runningFilters); index++ { + if l.runningFilters[index].spec.Name() == name { return index } } - return -1 } -// Handle is the handler to deal with layer4 -func (hp *Layer4Pipeline) Handle(ctx context.Layer4Context) { +// InboundHandle is the handler to deal with layer4 inbound data +func (l *Layer4Pipeline) InboundHandle(ctx context.Layer4Context) { + l.innerHandle(ctx, true) +} + +// OutboundHandle is the handler to deal with layer4 outbound data +func (l *Layer4Pipeline) OutboundHandle(ctx context.Layer4Context) { + l.innerHandle(ctx, false) +} + +func (l *Layer4Pipeline) innerHandle(ctx context.Layer4Context, isInbound bool) { pipeCtx := newAndSetPipelineContext(ctx) defer deletePipelineContext(ctx) @@ -390,6 +407,7 @@ func (hp *Layer4Pipeline) Handle(ctx context.Layer4Context) { filterStat := &FilterStat{} handle := func(lastResult string) string { + // Filters are called recursively as a stack, so we need to save current // state and restore it before return lastIndex := filterIndex @@ -399,19 +417,24 @@ func (hp *Layer4Pipeline) Handle(ctx context.Layer4Context) { filterStat = lastStat }() - filterIndex = hp.getNextFilterIndex(filterIndex, lastResult) - if filterIndex == len(hp.runningFilters) { + filterIndex = l.getNextFilterIndex(filterIndex, lastResult) + if filterIndex == len(l.runningFilters) { return "" // reach the end of pipeline } else if filterIndex == -1 { return lastResult // an error occurs but no filter can handle it } - filter := hp.runningFilters[filterIndex] + filter := l.runningFilters[filterIndex] name := filter.spec.Name() - filterStat = &FilterStat{Name: name, Kind: filter.spec.Kind()} + filterStat = &FilterStat{Name: name, Kind: filter.spec.Kind()} startTime := time.Now() - result := filter.filter.Handle(ctx) + var result string + if isInbound { + result = filter.filter.InboundHandle(ctx) + } else { + result = filter.filter.OutboundHandle(ctx) + } filterStat.Duration = time.Since(startTime) filterStat.Result = result @@ -427,23 +450,22 @@ func (hp *Layer4Pipeline) Handle(ctx context.Layer4Context) { } } -func (hp *Layer4Pipeline) getRunningFilter(name string) *runningFilter { - for _, filter := range hp.runningFilters { +func (l *Layer4Pipeline) getRunningFilter(name string) *runningFilter { + for _, filter := range l.runningFilters { if filter.spec.Name() == name { return filter } } - return nil } // Status returns Status generated by Runtime. -func (hp *Layer4Pipeline) Status() *supervisor.Status { +func (l *Layer4Pipeline) Status() *supervisor.Status { s := &Status{ Filters: make(map[string]interface{}), } - for _, runningFilter := range hp.runningFilters { + for _, runningFilter := range l.runningFilters { s.Filters[runningFilter.spec.Name()] = runningFilter.filter.Status() } @@ -453,8 +475,8 @@ func (hp *Layer4Pipeline) Status() *supervisor.Status { } // Close closes Layer4Pipeline. -func (hp *Layer4Pipeline) Close() { - for _, runningFilter := range hp.runningFilters { +func (l *Layer4Pipeline) Close() { + for _, runningFilter := range l.runningFilters { runningFilter.filter.Close() } } diff --git a/pkg/object/layer4pipeline/registry.go b/pkg/object/layer4pipeline/registry.go index 3af009acd9..02ab67d3a2 100644 --- a/pkg/object/layer4pipeline/registry.go +++ b/pkg/object/layer4pipeline/registry.go @@ -49,9 +49,11 @@ type ( // The http pipeline won't call Close for the previous generation. Inherit(filterSpec *FilterSpec, previousGeneration Filter) - // Handle handles one tcp request, all possible results - // need be registered in Results. - Handle(tcpContext context.Layer4Context) (result string) + // InboundHandle handle layer4 inbound data + InboundHandle(tcpContext context.Layer4Context) (result string) + + // OutboundHandle handle layer4 outbound data + OutboundHandle(tcpContext context.Layer4Context) (result string) // Status returns its runtime status. // It could return nil. diff --git a/pkg/protocol/layer4.go b/pkg/protocol/layer4.go index a1937f37cb..f6e1dbbcfe 100644 --- a/pkg/protocol/layer4.go +++ b/pkg/protocol/layer4.go @@ -17,13 +17,22 @@ package protocol -import "github.com/megaease/easegress/pkg/context" +import ( + "github.com/megaease/easegress/pkg/context" +) type ( // Layer4Handler is the common handler for the all backends // which handle the traffic from layer4(tcp/udp) server. Layer4Handler interface { - Handle(ctx context.Layer4Context) + + // InboundHandler filter handle inbound stream from client via ctx + // put handle result to object and pass to next filter + InboundHandler(ctx context.Layer4Context, object interface{}) + + // OutboundHandler filter handle inbound stream from upstream via ctx + // put handle result to object and pass to next filter + OutboundHandler(ctx context.Layer4Context, object interface{}) } // Layer4MuxMapper gets layer4 handler pipeline with mutex diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index 16cc4f75dd..8ed43daa91 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -82,6 +82,18 @@ func NewClientConnection(conn net.Conn, remoteAddr net.Addr, stopChan chan struc return res } +func (c *Connection) Protocol() string { + return c.conn.LocalAddr().Network() +} + +func (c *Connection) LocalAddr() net.Addr { + return c.localAddr +} + +func (c *Connection) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + func (c *Connection) Start() { if c.protocol == "udp" && c.conn.RemoteAddr() == nil { return From c61b7c5c6eb1cc13f7c93cad4c54c3e8edbfab4b Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 16 Sep 2021 11:55:34 +0800 Subject: [PATCH 16/99] [util] add license for iobuffer(copy from mosn) --- pkg/util/iobufferpool/buffer.go | 12 ++++++------ pkg/util/iobufferpool/bytebuffer_pool.go | 12 ++++++------ pkg/util/iobufferpool/iobuffer.go | 12 ++++++------ pkg/util/iobufferpool/iobuffer_pool.go | 14 +++++++------- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/util/iobufferpool/buffer.go b/pkg/util/iobufferpool/buffer.go index a7e134cc05..5f6c52631a 100644 --- a/pkg/util/iobufferpool/buffer.go +++ b/pkg/util/iobufferpool/buffer.go @@ -1,10 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/pkg/util/iobufferpool/bytebuffer_pool.go b/pkg/util/iobufferpool/bytebuffer_pool.go index 5529a1f14a..c158053382 100644 --- a/pkg/util/iobufferpool/bytebuffer_pool.go +++ b/pkg/util/iobufferpool/bytebuffer_pool.go @@ -1,10 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/pkg/util/iobufferpool/iobuffer.go b/pkg/util/iobufferpool/iobuffer.go index 8072f9b339..8f9aa0f479 100644 --- a/pkg/util/iobufferpool/iobuffer.go +++ b/pkg/util/iobufferpool/iobuffer.go @@ -1,10 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/pkg/util/iobufferpool/iobuffer_pool.go b/pkg/util/iobufferpool/iobuffer_pool.go index 2ff77cf5ef..8faa161c72 100644 --- a/pkg/util/iobufferpool/iobuffer_pool.go +++ b/pkg/util/iobufferpool/iobuffer_pool.go @@ -1,10 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -26,7 +26,7 @@ const UdpPacketMaxSize = 64 * 1024 var ibPool IoBufferPool -// IoBufferPool is Iobuffer Pool +// IoBufferPool is IoBuffer Pool type IoBufferPool struct { pool sync.Pool } From e78a5126ded08013b6becde1eb4a7543ecc36eb9 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 16 Sep 2021 17:32:44 +0800 Subject: [PATCH 17/99] [layer4proxy] add udp proxy(70%) --- pkg/context/layer4context.go | 12 +- pkg/filter/layer4proxy/pool.go | 176 ------- pkg/filter/layer4proxy/proxy.go | 236 --------- .../layer4rawserver/backendserver.go} | 33 +- pkg/object/layer4rawserver/connection.go | 466 ------------------ pkg/object/layer4rawserver/constants.go | 47 -- pkg/object/layer4rawserver/layer4server.go | 1 - pkg/object/layer4rawserver/listener.go | 194 +++++--- pkg/object/layer4rawserver/mux.go | 38 +- pkg/object/layer4rawserver/pool.go | 53 ++ pkg/object/layer4rawserver/runtime.go | 128 ++++- pkg/object/layer4rawserver/spec.go | 97 +++- pkg/object/layer4rawserver/udpreceiver.go | 28 ++ pkg/registry/registry.go | 1 + pkg/util/connection/connection.go | 148 +++--- pkg/util/layer4stat/layer4stat.go | 19 + pkg/util/timerpool/timerpool.go | 2 +- 17 files changed, 513 insertions(+), 1166 deletions(-) delete mode 100644 pkg/filter/layer4proxy/pool.go delete mode 100644 pkg/filter/layer4proxy/proxy.go rename pkg/{filter/layer4proxy/server.go => object/layer4rawserver/backendserver.go} (96%) delete mode 100644 pkg/object/layer4rawserver/connection.go delete mode 100644 pkg/object/layer4rawserver/constants.go create mode 100644 pkg/object/layer4rawserver/pool.go create mode 100644 pkg/object/layer4rawserver/udpreceiver.go diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 20c845a7fe..724e55f25a 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -166,19 +166,19 @@ func (ctx *layer4Context) AppendWriteBuffer(buffer iobufferpool.IoBuffer) { } // WriteToClient filter handle client upload data, send result to upstream connection -func (ctx *layer4Context) WriteToClient(buffer iobufferpool.IoBuffer) error { +func (ctx *layer4Context) WriteToClient(buffer iobufferpool.IoBuffer) { if buffer == nil || buffer.Len() == 0 { - return nil + return } - return ctx.upstreamConn.Write(buffer) + _ = ctx.upstreamConn.Write(buffer) } // WriteToUpstream filter handle client upload data, send result to upstream connection -func (ctx *layer4Context) WriteToUpstream(buffer iobufferpool.IoBuffer) error { +func (ctx *layer4Context) WriteToUpstream(buffer iobufferpool.IoBuffer) { if buffer == nil || buffer.Len() == 0 { - return nil + return } - return ctx.clientConn.Write(buffer) + _ = ctx.clientConn.Write(buffer) } func (ctx *layer4Context) CallNextHandler(lastResult string) string { diff --git a/pkg/filter/layer4proxy/pool.go b/pkg/filter/layer4proxy/pool.go deleted file mode 100644 index d7a75be959..0000000000 --- a/pkg/filter/layer4proxy/pool.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4proxy - -import ( - "fmt" - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/iobufferpool" - "github.com/megaease/easegress/pkg/util/layer4filter" - "github.com/megaease/easegress/pkg/util/layer4stat" - "net" -) - -type ( - pool struct { - spec *PoolSpec - - tagPrefix string - filter *layer4filter.Layer4filter - - servers *servers - layer4Stat *layer4stat.Layer4Stat - } - - // PoolSpec describes a pool of servers. - PoolSpec struct { - SpanName string `yaml:"spanName" jsonschema:"omitempty"` - ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` - Filter *layer4filter.Spec `yaml:"filter" jsonschema:"omitempty"` - Servers []*Server `yaml:"servers" jsonschema:"omitempty"` - ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` - LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` - } - - // PoolStatus is the status of Pool. - PoolStatus struct { - Stat *layer4stat.Status `yaml:"stat"` - } - - UpStreamConn struct { - conn net.Conn - done chan struct{} - writeBufferChan chan iobufferpool.IoBuffer - } -) - -func NewUpStreamConn(conn net.Conn) *UpStreamConn { - return &UpStreamConn{ - conn: conn, - writeBufferChan: make(chan iobufferpool.IoBuffer, 8), - } -} - -// Validate validates poolSpec. -func (s PoolSpec) Validate() error { - if s.ServiceName == "" && len(s.Servers) == 0 { - return fmt.Errorf("both serviceName and servers are empty") - } - - serversGotWeight := 0 - for _, server := range s.Servers { - if server.Weight > 0 { - serversGotWeight++ - } - } - if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { - return fmt.Errorf("not all servers have weight(%d/%d)", - serversGotWeight, len(s.Servers)) - } - - if s.ServiceName == "" { - servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) - if servers.len() == 0 { - return fmt.Errorf("serversTags picks none of servers") - } - } - - return nil -} - -func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { - - return &pool{ - spec: spec, - tagPrefix: tagPrefix, - - servers: newServers(super, spec), - layer4Stat: layer4stat.New(), - } -} - -func (p *pool) status() *PoolStatus { - s := &PoolStatus{Stat: p.layer4Stat.Status()} - return s -} - -func (u *UpStreamConn) Write(source iobufferpool.IoBuffer) { - buf := source.Clone() - source.Drain(buf.Len()) - u.writeBufferChan <- buf -} - -func (u *UpStreamConn) WriteLoop() { - for { - select { - case buf, ok := <-u.writeBufferChan: - if !ok { - return - } - - iobuf := buf.(iobufferpool.IoBuffer) - for { - n, err := u.conn.Write(iobuf.Bytes()) - if n == 0 || err != nil { - return - } - iobuf.Drain(n) - } - case <-u.done: - return - } - } -} - -func (p *pool) handle(ctx context.Layer4Context) string { - - conn := ctx.UpStreamConn() - if conn == nil { - server, err := p.servers.next(ctx) - if err != nil { - return resultInternalError - } - - switch ctx.Protocol() { - case "tcp": - if tconn, dialErr := net.Dial("tcp", server.Addr); dialErr != nil { - logger.Errorf("dial tcp to %s failed, err: %s", server.Addr, dialErr.Error()) - return resultServerError - } else { - ctx.SetUpStreamConn(tconn) - upstreamConn := NewUpStreamConn(tconn) - go upstreamConn.WriteLoop() - - go func() { - // TODO do upstream connection read - }() - } - case "udp": - - } - } - - return "" -} - -func (p *pool) close() { - p.servers.close() -} diff --git a/pkg/filter/layer4proxy/proxy.go b/pkg/filter/layer4proxy/proxy.go deleted file mode 100644 index 0bf5b4db62..0000000000 --- a/pkg/filter/layer4proxy/proxy.go +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4proxy - -import ( - "fmt" - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/object/layer4pipeline" - "github.com/megaease/easegress/pkg/util/fallback" -) - -const ( - // Kind is the kind of Proxy. - Kind = "Proxy" - - resultInternalError = "internalError" - resultClientError = "clientError" - resultServerError = "serverError" -) - -var results = []string{ - resultInternalError, - resultClientError, - resultServerError, -} - -func init() { - layer4pipeline.Register(&Proxy{}) -} - -type ( - // Proxy is the filter Proxy. - Proxy struct { - filterSpec *layer4pipeline.FilterSpec - spec *Spec - - fallback *fallback.Fallback - - mainPool *pool - candidatePools []*pool - mirrorPool *pool - } - - // Spec describes the Proxy. - Spec struct { - Fallback *FallbackSpec `yaml:"fallback,omitempty" jsonschema:"omitempty"` - MainPool *PoolSpec `yaml:"mainPool" jsonschema:"required"` - CandidatePools []*PoolSpec `yaml:"candidatePools,omitempty" jsonschema:"omitempty"` - MirrorPool *PoolSpec `yaml:"mirrorPool,omitempty" jsonschema:"omitempty"` - FailureCodes []int `yaml:"failureCodes" jsonschema:"omitempty,uniqueItems=true,format=httpcode-array"` - } - - // FallbackSpec describes the fallback policy. - FallbackSpec struct { - ForCodes bool `yaml:"forCodes"` - fallback.Spec `yaml:",inline"` - } - - // Status is the status of Proxy. - Status struct { - MainPool *PoolStatus `yaml:"mainPool"` - CandidatePools []*PoolStatus `yaml:"candidatePools,omitempty"` - MirrorPool *PoolStatus `yaml:"mirrorPool,omitempty"` - } -) - -// Validate validates Spec. -func (s Spec) Validate() error { - // NOTE: The tag of v parent may be behind mainPool. - if s.MainPool == nil { - return fmt.Errorf("mainPool is required") - } - - if s.MainPool.Filter != nil { - return fmt.Errorf("filter must be empty in mainPool") - } - - if len(s.CandidatePools) > 0 { - for _, v := range s.CandidatePools { - if v.Filter == nil { - return fmt.Errorf("filter of candidatePool is required") - } - } - } - - if s.MirrorPool != nil { - if s.MirrorPool.Filter == nil { - return fmt.Errorf("filter of mirrorPool is required") - } - } - - if len(s.FailureCodes) == 0 { - if s.Fallback != nil { - return fmt.Errorf("fallback needs failureCodes") - } - } - - return nil -} - -// Kind returns the kind of Proxy. -func (b *Proxy) Kind() string { - return Kind -} - -// DefaultSpec returns the default spec of Proxy. -func (b *Proxy) DefaultSpec() interface{} { - return &Spec{} -} - -// Description returns the description of Proxy. -func (b *Proxy) Description() string { - return "Proxy sets the proxy of proxy servers" -} - -// Results returns the results of Proxy. -func (b *Proxy) Results() []string { - return results -} - -// Init initializes Proxy. -func (b *Proxy) Init(filterSpec *layer4pipeline.FilterSpec) { - b.filterSpec, b.spec = filterSpec, filterSpec.FilterSpec().(*Spec) - b.reload() -} - -// Inherit inherits previous generation of Proxy. -func (b *Proxy) Inherit(filterSpec *layer4pipeline.FilterSpec, previousGeneration layer4pipeline.Filter) { - previousGeneration.Close() - b.Init(filterSpec) -} - -func (b *Proxy) reload() { - super := b.filterSpec.Super() - - b.mainPool = newPool(super, b.spec.MainPool, "proxy#main") - - if b.spec.Fallback != nil { - b.fallback = fallback.New(&b.spec.Fallback.Spec) - } - - if len(b.spec.CandidatePools) > 0 { - var candidatePools []*pool - for k := range b.spec.CandidatePools { - candidatePools = append(candidatePools, - newPool(super, b.spec.CandidatePools[k], fmt.Sprintf("proxy#candidate#%d", k))) - } - b.candidatePools = candidatePools - } - if b.spec.MirrorPool != nil { - b.mirrorPool = newPool(super, b.spec.MirrorPool, "proxy#mirror") - } -} - -// Status returns Proxy status. -func (b *Proxy) Status() interface{} { - s := &Status{ - MainPool: b.mainPool.status(), - } - if b.candidatePools != nil { - for k := range b.candidatePools { - s.CandidatePools = append(s.CandidatePools, b.candidatePools[k].status()) - } - } - if b.mirrorPool != nil { - s.MirrorPool = b.mirrorPool.status() - } - return s -} - -// Close closes Proxy. -func (b *Proxy) Close() { - b.mainPool.close() - - if b.candidatePools != nil { - for _, v := range b.candidatePools { - v.close() - } - } - - if b.mirrorPool != nil { - b.mirrorPool.close() - } -} - -func (b *Proxy) fallbackForCodes(ctx context.HTTPContext) bool { - if b.fallback != nil && b.spec.Fallback.ForCodes { - for _, code := range b.spec.FailureCodes { - if ctx.Response().StatusCode() == code { - b.fallback.Fallback(ctx) - return true - } - } - } - return false -} - -// Handle handles HTTPContext. -func (b *Proxy) Handle(ctx context.Layer4Context) (result string) { - result = b.handle(ctx) - return ctx.CallNextHandler(result) -} - -func (b *Proxy) handle(ctx context.Layer4Context) (result string) { - // TODO add mirror pool proxy - - var p *pool - if len(b.candidatePools) > 0 { - for k, v := range b.candidatePools { - if v.filter.Filter(ctx) { - p = b.candidatePools[k] - break - } - } - } - - if p == nil { - p = b.mainPool - } - return p.handle(ctx) -} diff --git a/pkg/filter/layer4proxy/server.go b/pkg/object/layer4rawserver/backendserver.go similarity index 96% rename from pkg/filter/layer4proxy/server.go rename to pkg/object/layer4rawserver/backendserver.go index 2cf96bc27b..d52f1f72ea 100644 --- a/pkg/filter/layer4proxy/server.go +++ b/pkg/object/layer4rawserver/backendserver.go @@ -15,15 +15,13 @@ * limitations under the License. */ -package layer4proxy +package layer4rawserver import ( "fmt" "math/rand" - "net" "sync" "sync/atomic" - "time" "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/logger" @@ -42,8 +40,6 @@ const ( PolicyWeightedRandom = "weightedRandom" // PolicyIPHash is the policy of ip hash. PolicyIPHash = "ipHash" - - retryTimeout = 3 * time.Second ) type ( @@ -79,15 +75,6 @@ type ( } ) -func (s *Server) String() string { - return fmt.Sprintf("%s,%v,%d", s.Addr, s.Tags, s.Weight) -} - -// Validate validates LoadBalance. -func (lb LoadBalance) Validate() error { - return nil -} - func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { s := &servers{ poolSpec: poolSpec, @@ -96,22 +83,23 @@ func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { } s.useStaticServers() - if poolSpec.ServiceRegistry == "" || poolSpec.ServiceName == "" { return s } s.serviceRegistry = s.super.MustGetSystemController(serviceregistry.Kind). Instance().(*serviceregistry.ServiceRegistry) - s.tryUseService() s.serviceWatcher = s.serviceRegistry.NewServiceWatcher(s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) go s.watchService() - return s } +func (s *Server) String() string { + return fmt.Sprintf("%s,%v,%d", s.Addr, s.Tags, s.Weight) +} + func (s *servers) watchService() { for { select { @@ -136,7 +124,6 @@ func (s *servers) tryUseService() { s.useStaticServers() return } - s.useService(serviceInstanceSpecs) } @@ -185,17 +172,14 @@ func (s *servers) snapshot() *staticServers { func (s *servers) len() int { static := s.snapshot() - return static.len() } func (s *servers) next(ctx context.Layer4Context) (*Server, error) { static := s.snapshot() - if static.len() == 0 { return nil, fmt.Errorf("no server available") } - return static.next(ctx), nil } @@ -236,7 +220,6 @@ func newStaticServers(servers []*Server, tags []string, lb *LoadBalance) *static } } ss.servers = chosenServers - return ss } @@ -261,9 +244,7 @@ func (ss *staticServers) next(ctx context.Layer4Context) *Server { case PolicyIPHash: return ss.ipHash(ctx) } - logger.Errorf("BUG: unknown load balance policy: %s", ss.lb.Policy) - return ss.roundRobin() } @@ -294,8 +275,6 @@ func (ss *staticServers) weightedRandom() *Server { } func (ss *staticServers) ipHash(ctx context.Layer4Context) *Server { - addr := ctx.RemoteAddr().String() - host, _, _ := net.SplitHostPort(addr) - sum32 := int(hashtool.Hash32(host)) + sum32 := int(hashtool.Hash32(ctx.ClientAddr().String())) return ss.servers[sum32%len(ss.servers)] } diff --git a/pkg/object/layer4rawserver/connection.go b/pkg/object/layer4rawserver/connection.go deleted file mode 100644 index 443c80f79a..0000000000 --- a/pkg/object/layer4rawserver/connection.go +++ /dev/null @@ -1,466 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4rawserver - -import ( - "io" - "net" - "reflect" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/util/iobufferpool" - "github.com/megaease/easegress/pkg/util/timerpool" -) - -// ConnectionCloseType represent connection close type -type ConnectionCloseType string - -//Connection close types -const ( - // FlushWrite means write buffer to underlying io then close connection - FlushWrite ConnectionCloseType = "FlushWrite" - // NoFlush means close connection without flushing buffer - NoFlush ConnectionCloseType = "NoFlush" -) - -// ConnectionEvent type -type ConnectionEvent string - -// ConnectionEvent types -const ( - RemoteClose ConnectionEvent = "RemoteClose" - LocalClose ConnectionEvent = "LocalClose" - OnReadErrClose ConnectionEvent = "OnReadErrClose" - OnWriteErrClose ConnectionEvent = "OnWriteErrClose" - OnConnect ConnectionEvent = "OnConnect" - Connected ConnectionEvent = "ConnectedFlag" - ConnectTimeout ConnectionEvent = "ConnectTimeout" - ConnectFailed ConnectionEvent = "ConnectFailed" - OnReadTimeout ConnectionEvent = "OnReadTimeout" - OnWriteTimeout ConnectionEvent = "OnWriteTimeout" -) - -type Connection struct { - net.Conn - - closed uint32 - connected uint32 - startOnce sync.Once - - // readLoop/writeLoop goroutine fields: - internalStopChan chan struct{} - readEnabled bool - readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters - - lastBytesSizeRead int64 - lastWriteSizeWrite int64 - - curWriteBufferData []iobufferpool.IoBuffer - readBuffer iobufferpool.IoBuffer - writeBuffers net.Buffers - ioBuffers []iobufferpool.IoBuffer - writeBufferChan chan *[]iobufferpool.IoBuffer -} - -func New(conn net.Conn) *Connection { - return &Connection{ - Conn: conn, - } -} - -func (c *Connection) StartRWLoop(ctx context.Layer4Context) { - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection close due to read loop crashed failed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - }() - _ = c.Close(NoFlush, LocalClose, ctx) - }() - }() - c.startReadLoop(ctx) - }() - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection close due to write loop crashed failed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - } - }() - _ = c.Close(NoFlush, LocalClose, ctx) - }() - }() - c.startWriteLoop(ctx) - }() -} - -func (c *Connection) startReadLoop(ctx context.Layer4Context) { - for { - select { - case <-c.internalStopChan: - return - case <-c.readEnabledChan: - default: - if c.readEnabled { - err := c.doRead(ctx) - if err != nil { - if te, ok := err.(net.Error); ok && te.Timeout() { - if ctx.Protocol() == "tcp" && c.readBuffer != nil && - c.readBuffer.Len() == 0 && c.readBuffer.Cap() > DefaultBufferReadCapacity { - c.readBuffer.Free() - c.readBuffer.Alloc(DefaultBufferReadCapacity) - } - continue - } - - if c.lastBytesSizeRead == 0 || err == io.EOF { - logger.Debugf("%s connection write loop closed, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } else { - logger.Errorf("%s connection write loop closed, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } - - if err == io.EOF { - _ = c.Close(NoFlush, RemoteClose, ctx) - } else { - _ = c.Close(NoFlush, OnReadErrClose, ctx) - } - return - } - } else { - select { - case <-c.readEnabledChan: - case <-time.After(100 * time.Millisecond): - } - } - } - - } -} - -func (c *Connection) startWriteLoop(ctx context.Layer4Context) { - defer func() { - close(c.writeBufferChan) - }() - - var err error - for { - select { - case <-c.internalStopChan: - return - case buf, ok := <-c.writeBufferChan: - if !ok { - return - } - c.appendBuffer(buf) - - QUIT: - for i := 0; i < 10; i++ { - select { - case buf, ok := <-c.writeBufferChan: - if !ok { - return - } - c.appendBuffer(buf) - default: - break QUIT - } - - c.setWriteDeadline(ctx) - _, err = c.doWrite(ctx) - } - } - - if err != nil { - - if err == iobufferpool.EOF { - logger.Debugf("%s connection write loop occur error, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - c.Close(NoFlush, LocalClose, ctx) - } else { - logger.Errorf("%s connection write loop occur error, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } - - if te, ok := err.(net.Error); ok && te.Timeout() { - c.Close(NoFlush, OnWriteTimeout, ctx) - } - - if ctx.Protocol() == "udp" && strings.Contains(err.Error(), "connection refused") { - c.Close(NoFlush, RemoteClose, ctx) - } - //other write errs not close connection, because readbuffer may have unread data, wait for readloop close connection, - - return - } - } -} - -func (c *Connection) appendBuffer(buffers *[]iobufferpool.IoBuffer) { - if buffers == nil { - return - } - for _, buf := range *buffers { - if buf == nil { - continue - } - c.ioBuffers = append(c.ioBuffers, buf) - c.writeBuffers = append(c.writeBuffers, buf.Bytes()) - } -} - -func (c *Connection) doRead(ctx context.Layer4Context) (err error) { - if c.readBuffer == nil { - switch ctx.Protocol() { - case "udp": - // A UDP socket will Read up to the size of the receiving buffer and will discard the rest - c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - default: - c.readBuffer = iobufferpool.GetIoBuffer(DefaultBufferReadCapacity) - } - } - - var bytesRead int64 - c.setReadDeadline(ctx) - bytesRead, err = c.readBuffer.ReadOnce(c.Conn) - - if err != nil { - if atomic.LoadUint32(&c.closed) == 1 { - return err - } - - if te, ok := err.(net.Error); ok && te.Timeout() { - // TODO add timeout handle(such as send keepalive msg to active connection) - - if bytesRead == 0 { - return err - } - } else if err != io.EOF { - return err - } - } - - //todo: ReadOnce maybe always return (0, nil) and causes dead loop (hack) - if bytesRead == 0 && err == nil { - err = io.EOF - logger.Errorf("%s connection read maybe always return (0, nil), local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - } - c.lastBytesSizeRead = int64(c.readBuffer.Len()) - return -} - -// Write send recv data(batch mode) to upstream -func (c *Connection) Write(ctx context.Layer4Context, buffers ...iobufferpool.IoBuffer) (err error) { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - err = ErrConnectionHasClosed - } - }() - - // TODO get filters from layer4 pipeline, transform buffers via filters - - select { - case c.writeBufferChan <- &buffers: - return - default: - } - - t := timerpool.Get(DefaultConnTryTimeout) - select { - case c.writeBufferChan <- &buffers: - case <-t.C: - err = ErrWriteBufferChanTimeout - } - timerpool.Put(t) - return -} - -func (c *Connection) setWriteDeadline(ctx context.Layer4Context) { - args := ctx.ConnectionArgs() - if args.ProxyWriteTimeout > 0 { - _ = c.Conn.SetWriteDeadline(time.Now().Add(time.Duration(args.ProxyWriteTimeout) * time.Millisecond)) - } else { - switch ctx.Protocol() { - case "udp": - _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultUDPIdleTimeout)) - case "tcp": - _ = c.Conn.SetWriteDeadline(time.Now().Add(DefaultConnWriteTimeout)) - } - } -} - -func (c *Connection) setReadDeadline(ctx context.Layer4Context) { - args := ctx.ConnectionArgs() - if args.ProxyWriteTimeout > 0 { - _ = c.Conn.SetReadDeadline(time.Now().Add(time.Duration(args.ProxyReadTimeout) * time.Millisecond)) - } else { - switch ctx.Protocol() { - case "udp": - _ = c.Conn.SetReadDeadline(time.Now().Add(DefaultUDPReadTimeout)) - case "tcp": - _ = c.Conn.SetReadDeadline(time.Now().Add(ConnReadTimeout)) - } - } -} - -// Close handle connection close event -func (c *Connection) Close(ccType ConnectionCloseType, eventType ConnectionEvent, ctx context.Layer4Context) (err error) { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), r) - err = ErrConnectionHasClosed - } - }() - - if ccType == FlushWrite { - _ = c.Write(ctx, iobufferpool.NewIoBufferEOF()) - return nil - } - - if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { - return nil - } - - // connection failed in client mode - if c.Conn == nil || reflect.ValueOf(c.Conn).IsNil() { - return nil - } - - // close tcp conn read first - if tconn, ok := c.Conn.(*net.TCPConn); ok { - logger.Errorf("%s connection close read, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - _ = tconn.CloseRead() - } - - // close conn recv, then notify read/write loop to exit - close(c.internalStopChan) - _ = c.Conn.Close() - - logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String()) - return nil -} - -func (c *Connection) writeBufLen() (bufLen int) { - for _, buf := range c.writeBuffers { - bufLen += len(buf) - } - return -} - -func (c *Connection) doWrite(ctx context.Layer4Context) (interface{}, error) { - bytesSent, err := c.doWriteIO(ctx) - if err != nil && atomic.LoadUint32(&c.closed) == 1 { - return 0, nil - } - - c.lastWriteSizeWrite = int64(c.writeBufLen()) - return bytesSent, err -} - -// -func (c *Connection) doWriteIO(ctx context.Layer4Context) (bytesSent int64, err error) { - buffers := c.writeBuffers - switch ctx.Protocol() { - case "udp": - addr := ctx.RemoteAddr().(*net.UDPAddr) - n := 0 - bytesSent = 0 - for _, buf := range c.ioBuffers { - if c.Conn.RemoteAddr() == nil { - n, err = c.Conn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) - } else { - n, err = c.Conn.Write(buf.Bytes()) - } - if err != nil { - break - } - bytesSent += int64(n) - } - case "tcp": - bytesSent, err = buffers.WriteTo(c.Conn) - } - - if err != nil { - return bytesSent, err - } - - for i, buf := range c.ioBuffers { - c.ioBuffers[i] = nil - c.writeBuffers[i] = nil - if buf.EOF() { - err = iobufferpool.EOF - } - if e := iobufferpool.PutIoBuffer(buf); e != nil { - logger.Errorf("%s connection give io buffer failed, local addr: %s, remote addr: %s, err: %s", - ctx.Protocol(), ctx.LocalAddr().String(), ctx.RemoteAddr().String(), err.Error()) - } - } - c.ioBuffers = c.ioBuffers[:0] - c.writeBuffers = c.writeBuffers[:0] - return -} - -func (c *Connection) SetNoDelay(enable bool) { - if c.Conn != nil { - if tconn, ok := c.Conn.(*net.TCPConn); ok { - _ = tconn.SetNoDelay(enable) - } - } -} - -func (c *Connection) ReadEnabled() bool { - return c.readEnabled -} - -func (c *Connection) State() ConnState { - if atomic.LoadUint32(&c.closed) == 1 { - return ConnClosed - } - if atomic.LoadUint32(&c.connected) == 1 { - return ConnActive - } - return ConnInit -} diff --git a/pkg/object/layer4rawserver/constants.go b/pkg/object/layer4rawserver/constants.go deleted file mode 100644 index 5d09208b12..0000000000 --- a/pkg/object/layer4rawserver/constants.go +++ /dev/null @@ -1,47 +0,0 @@ -package layer4rawserver - -import ( - "errors" - "time" -) - -var ( - ErrConnectionHasClosed = errors.New("connection has closed") - ErrWriteTryLockTimeout = errors.New("write trylock has timeout") - ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") -) - -// Default connection arguments -const ( - DefaultBufferReadCapacity = 1 << 7 - - DefaultConnReadTimeout = 15 * time.Second - DefaultConnWriteTimeout = 15 * time.Second - DefaultConnTryTimeout = 60 * time.Second - DefaultIdleTimeout = 90 * time.Second - DefaultUDPIdleTimeout = 5 * time.Second - DefaultUDPReadTimeout = 1 * time.Second - ConnReadTimeout = 15 * time.Second -) - -// ConnState Connection status -type ConnState int - -// Connection statuses -const ( - ConnInit ConnState = iota - ConnActive - ConnClosed -) - -type ListenerState int - -// listener state -// ListenerActivated means listener is activated, an activated listener can be started or stopped -// ListenerRunning means listener is running, start a running listener will be ignored. -// ListenerStopped means listener is stopped, start a stopped listener without restart flag will be ignored. -const ( - ListenerActivated ListenerState = iota - ListenerRunning - ListenerStopped -) diff --git a/pkg/object/layer4rawserver/layer4server.go b/pkg/object/layer4rawserver/layer4server.go index e8ea8628dd..79e88cee3d 100644 --- a/pkg/object/layer4rawserver/layer4server.go +++ b/pkg/object/layer4rawserver/layer4server.go @@ -54,7 +54,6 @@ func (l4 *Layer4Server) Kind() string { // DefaultSpec returns the default spec of Layer4Server. func (l4 *Layer4Server) DefaultSpec() interface{} { return &Spec{ - BindPort: true, MaxConnections: 10240, ProxyConnectTimeout: 15 * 1000, } diff --git a/pkg/object/layer4rawserver/listener.go b/pkg/object/layer4rawserver/listener.go index 19517ad5ea..c9facca9d4 100644 --- a/pkg/object/layer4rawserver/listener.go +++ b/pkg/object/layer4rawserver/listener.go @@ -18,109 +18,123 @@ package layer4rawserver import ( - "context" + stdcontext "context" "fmt" + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/limitlistener" "net" "runtime/debug" "sync" - "time" +) - context2 "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/util/connectionwrapper" - "github.com/megaease/easegress/pkg/util/limitlistener" +type ListenerState int + +// listener state +// ListenerInited means listener is inited, an inited listener can be started or stopped +// ListenerRunning means listener is running, start a running listener will be ignored. +// ListenerStopped means listener is stopped, start a stopped listener without restart flag will be ignored. +const ( + ListenerInited ListenerState = iota + ListenerRunning + ListenerStopped ) type listener struct { - m *Mux - packetConn net.PacketConn // udp connection - limitListener *limitlistener.LimitListener // tcp connection listener with connection limit + m *mux + name string + udpListener net.PacketConn // udp connection listener + tcpListener *limitlistener.LimitListener // tcp connection listener with connection limit - state stateType + state ListenerState listenAddr string protocol string // enum:udp/tcp keepalive bool reuseport bool maxConnections uint32 - mutex *sync.Mutex + mutex *sync.Mutex + stopChan chan struct{} // connection listen to this stopChan + + onTcpAccept func(conn net.Conn, listenerStopChan chan struct{}) + onUdpAccept func(clientAddr net.Addr, buffer iobufferpool.IoBuffer) } -func NewListener(spec *Spec, m *Mux) *listener { +func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStopChan chan struct{})) *listener { listen := &listener{ - m: m, + state: ListenerInited, + listenAddr: fmt.Sprintf(":%d", spec.Port), protocol: spec.Protocol, keepalive: spec.KeepAlive, - reuseport: spec.Reuseport, maxConnections: spec.MaxConnections, - mutex: &sync.Mutex{}, - } - if spec.LocalAddr == "" { - listen.listenAddr = fmt.Sprintf(":%d", spec.Port) - } else { - listen.listenAddr = fmt.Sprintf("%s:%d", spec.LocalAddr, spec.Port) + onTcpAccept: onAccept, + + mutex: &sync.Mutex{}, } return listen } -func (l *listener) setMaxConnection(maxConn uint32) { - l.limitListener.SetMaxConnection(maxConn) -} - func (l *listener) start() { + ignored := func() bool { + l.mutex.Lock() + defer l.mutex.Unlock() + + switch l.state { + case ListenerRunning: + logger.Debugf("listener %s %s is already running", l.protocol, l.listenAddr) + return true + case ListenerStopped: + logger.Debugf("listener %s %s restart", l.protocol, l.listenAddr) + if err := l.listen(); err != nil { + logger.Errorf("listener %s %s restart failed, err: %+v", l.protocol, l.listenAddr, err) + return true + } + default: + if l.udpListener == nil && l.tcpListener == nil { + if err := l.listen(); err != nil { + logger.Errorf("listener %s %s start failed, err: %+v", l.protocol, l.listenAddr, err) + } + } + } + l.state = ListenerRunning + return false + }() + + if ignored { + return + } + switch l.protocol { + case "udp": + l.readMsgEventLoop() + case "tcp": + l.acceptEventLoop() + } } func (l *listener) listen() error { switch l.protocol { case "udp": c := net.ListenConfig{} - if ul, err := c.ListenPacket(context.Background(), l.protocol, l.listenAddr); err != nil { + if ul, err := c.ListenPacket(stdcontext.Background(), l.protocol, l.listenAddr); err != nil { return err } else { - l.packetConn = ul + l.udpListener = ul } case "tcp": if tl, err := net.Listen(l.protocol, l.listenAddr); err != nil { return err } else { - l.limitListener = limitlistener.NewLimitListener(tl, l.maxConnections) + l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConnections) } } return nil } -func (l *listener) accept(ctx context2.Layer4Context) error { - rl, err := l.limitListener.Accept() - if err != nil { - return err - } - - go func(ctx context2.Layer4Context) { - if r := recover(); r != nil { - logger.Errorf("failed tp accept conn for %s %s\n, stack trace: \n", - l.protocol, l.listenAddr, debug.Stack()) - } - - ctx.SetRemoteAddr(rl.RemoteAddr()) // fix it - }(ctx) - return nil -} - -func (l *listener) readUpdPacket(ctx context2.Layer4Context) { - go func(ctx context2.Layer4Context) { - if r := recover(); r != nil { - logger.Errorf("failed tp accept conn for %s %s\n, stack trace: \n", - l.protocol, l.listenAddr, debug.Stack()) - } - - }(ctx) -} - func (l *listener) acceptEventLoop() { for { - if tconn, err := l.limitListener.Accept(); err != nil { + if tconn, err := l.tcpListener.Accept(); err != nil { if nerr, ok := err.(net.Error); ok && nerr.Timeout() { logger.Infof("tcp listener(%s) stop accept connection due to deadline, err: %s", l.listenAddr, nerr) @@ -149,37 +163,69 @@ func (l *listener) acceptEventLoop() { if splitErr != nil || !l.m.AllowIP(host) { logger.Debugf("reject remote connection from: %s", tconn.RemoteAddr().String()) _ = tconn.Close() - } else { - go func() { - conn := connectionwrapper.New(tconn) - ctx := context2.NewLayer4Context("tcp", conn, l.m) - conn.StartRWLoop(ctx) - }() + continue } + go l.onTcpAccept(tconn, l.stopChan) } } } -func (l *listener) Stop() error { - var err error - switch l.protocol { - case "udp": - err = l.packetConn.SetDeadline(time.Now()) - case "tcp": - err = l.limitListener.Listener.(*net.TCPListener).SetDeadline(time.Now()) +func (l *listener) setMaxConnection(maxConn uint32) { + l.tcpListener.SetMaxConnection(maxConn) +} + +func (l *listener) readMsgEventLoop() { + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("failed to read udp msg for %s\n, stack trace: \n", l.listenAddr, debug.Stack()) + l.readMsgEventLoop() + } + }() + + l.readMsgLoop() + }() +} + +func (l *listener) readMsgLoop() { + conn := l.udpListener.(*net.UDPConn) + buf := iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + defer iobufferpool.PutIoBuffer(buf) + + for { + buf.Reset() + n, rAddr, err := conn.ReadFromUDP(buf.Bytes()[:buf.Cap()]) + _ = buf.Grow(n) + + if err != nil { + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + logger.Infof("udp listener %s stop receiving packet by deadline", l.listenAddr) + return + } + if ope, ok := err.(*net.OpError); ok { + if !(ope.Timeout() && ope.Temporary()) { + logger.Errorf("udp listener %s occurs non-recoverable error, stop listening and receiving", l.listenAddr) + return + } + } + logger.Errorf("udp listener %s receiving packet occur error: %+v", l.listenAddr, err) + continue + } + + l.onUdpAccept(rAddr, buf) } - return err } -func (l *listener) Close() error { +func (l *listener) close() error { l.mutex.Lock() defer l.mutex.Unlock() - if l.limitListener != nil { - return l.limitListener.Close() + if l.tcpListener != nil { + return l.tcpListener.Close() } - if l.packetConn != nil { - return l.packetConn.Close() + if l.udpListener != nil { + return l.udpListener.Close() } + close(l.stopChan) // TODO listener关闭时,需要关闭已建立的连接吗 return nil } diff --git a/pkg/object/layer4rawserver/mux.go b/pkg/object/layer4rawserver/mux.go index 9d64f41c45..66b12caeae 100644 --- a/pkg/object/layer4rawserver/mux.go +++ b/pkg/object/layer4rawserver/mux.go @@ -26,15 +26,14 @@ import ( "github.com/megaease/easegress/pkg/protocol" "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/ipfilter" - "github.com/megaease/easegress/pkg/util/stringtool" ) type ( - Mux struct { - rules atomic.Value // *MuxRules + mux struct { + rules atomic.Value // *muxRules } - MuxRules struct { + muxRules struct { superSpec *supervisor.Spec spec *Spec @@ -61,7 +60,6 @@ func newIPFilterChain(parentIPFilters *ipfilter.IPFilters, childSpec *ipfilter.S if len(ipFilters.Filters()) == 0 { return nil } - return ipFilters } @@ -73,12 +71,12 @@ func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { return ipfilter.New(spec) } -func (mr *MuxRules) pass(ctx context.Layer4Context) bool { +func (mr *muxRules) pass(ctx context.Layer4Context) bool { if mr.ipFilter == nil { return true } - switch addr := ctx.RemoteAddr().(type) { + switch addr := ctx.ClientAddr().(type) { case *net.UDPAddr: return mr.ipFilter.Allow(addr.IP.String()) case *net.TCPAddr: @@ -89,21 +87,20 @@ func (mr *MuxRules) pass(ctx context.Layer4Context) bool { return false } -func newMux(mapper protocol.Layer4MuxMapper) *Mux { - m := &Mux{} +func newMux(mapper protocol.Layer4MuxMapper) *mux { + m := &mux{} - m.rules.Store(&MuxRules{ + m.rules.Store(&muxRules{ spec: &Spec{}, muxMapper: mapper, }) - return m } -func (m *Mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { +func (m *mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { spec := superSpec.ObjectSpec().(*Spec) - rules := &MuxRules{ + rules := &muxRules{ superSpec: superSpec, spec: spec, muxMapper: muxMapper, @@ -113,22 +110,21 @@ func (m *Mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.Layer4M m.rules.Store(rules) } -func (m *Mux) handleIPNotAllow(ctx context.Layer4Context) { - ctx.AddTag(stringtool.Cat("ip ", ctx.RemoteAddr().String(), " not allow")) -} - -func (m *Mux) AllowIP(ipStr string) bool { - rules := m.rules.Load().(*MuxRules) +func (m *mux) AllowIP(ipStr string) bool { + rules := m.rules.Load().(*muxRules) if rules == nil { return true } return rules.ipFilter.Allow(ipStr) } -func (m *Mux) GetHandler(name string) (protocol.Layer4Handler, bool) { - rules := m.rules.Load().(*MuxRules) +func (m *mux) GetHandler(name string) (protocol.Layer4Handler, bool) { + rules := m.rules.Load().(*muxRules) if rules == nil { return nil, false } return rules.muxMapper.GetHandler(name) } + +func (m *mux) close() { +} diff --git a/pkg/object/layer4rawserver/pool.go b/pkg/object/layer4rawserver/pool.go new file mode 100644 index 0000000000..dcbffb6713 --- /dev/null +++ b/pkg/object/layer4rawserver/pool.go @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4rawserver + +import ( + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/layer4stat" +) + +type ( + // pool backend server pool + pool struct { + spec *PoolSpec + + tagPrefix string + layer4Stat *layer4stat.Layer4Stat + servers *servers + } +) + +func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { + return &pool{ + spec: spec, + + tagPrefix: tagPrefix, + layer4Stat: layer4stat.New(), + servers: newServers(super, spec), + } +} + +func (p *pool) status() *PoolStatus { + s := &PoolStatus{Stat: p.layer4Stat.Status()} + return s +} + +func (p *pool) close() { + p.servers.close() +} diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4rawserver/runtime.go index 8a1cc6de42..f695126e32 100644 --- a/pkg/object/layer4rawserver/runtime.go +++ b/pkg/object/layer4rawserver/runtime.go @@ -1,7 +1,28 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package layer4rawserver import ( "fmt" + "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/util/connection" + "github.com/megaease/easegress/pkg/util/iobufferpool" + "net" "reflect" "sync/atomic" "time" @@ -33,6 +54,7 @@ type ( startNum uint64 err error } + eventReload struct { nextSuperSpec *supervisor.Spec muxMapper protocol.Layer4MuxMapper @@ -43,16 +65,17 @@ type ( superSpec *supervisor.Spec spec *Spec - startNum uint64 - mux *Mux + state atomic.Value // runtime running state + err atomic.Value // runtime running error + + startNum uint64 // runtime start num eventChan chan interface{} // receive traffic controller event - // status - state atomic.Value // stateType - err atomic.Value // error + mux *mux // mux for layer4 pipeline + pool *pool - tcpstat *layer4stat.Layer4Stat listener *listener + tcpstat *layer4stat.Layer4Stat } // Status contains all status generated by runtime, for displaying to users. @@ -61,8 +84,6 @@ type ( State stateType `yaml:"state"` Error string `yaml:"error,omitempty"` - - // TODO add stat info } ) @@ -126,6 +147,7 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.Laye r.mux.reloadRules(nextSuperSpec, muxMapper) nextSpec := nextSuperSpec.ObjectSpec().(*Spec) + r.pool = newPool(nextSuperSpec.Super(), nextSpec.Pool, "") // r.listener does not create just after the process started and the config load for the first time. if nextSpec != nil && r.listener != nil { @@ -203,14 +225,94 @@ func (r *runtime) handleEventClose(e *eventClose) { } func (r *runtime) startServer() { - l := NewListener(r.spec, r.mux) + onAccept := func(conn net.Conn, listenerStopChan chan struct{}) { + internalStopChan := make(chan struct{}, 1) + switch r.spec.Protocol { + case "tcp": + clientConn := connection.NewClientConnection(conn, conn.RemoteAddr(), listenerStopChan, internalStopChan) + ctx := context.NewLayer4Context(clientConn, listenerStopChan) + if server, err := r.pool.servers.next(ctx); err != nil { + logger.Errorf("Get layer4 proxy upstream server failed, err: %+v", err) + _ = clientConn.Close(connection.NoFlush, connection.OnConnect) + } else { + upstreamAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) + upstreamConn := connection.NewUpstreamConnection(time.Duration(r.spec.ProxyTimeout)*time.Millisecond, upstreamAddr, listenerStopChan) + if err := upstreamConn.Connect(); err == nil { + if handle, ok := r.mux.GetHandler(r.spec.Protocol); ok { + clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + handle.InboundHandler(ctx, buffer) + buf := ctx.GetReadBuffer().Clone() + ctx.GetReadBuffer().Drain(buf.Len()) + ctx.WriteToUpstream(buf) + }) + upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + handle.OutboundHandler(ctx, buffer) + buf := ctx.GetReadBuffer().Clone() + ctx.GetReadBuffer().Drain(buf.Len()) + ctx.WriteToClient(buf) + }) + } else { + clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + buf := buffer.Clone() + buffer.Drain(buf.Len()) + ctx.WriteToUpstream(buf) + }) + upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + buf := buffer.Clone() + buffer.Drain(buf.Len()) + ctx.WriteToClient(buf) + }) + } + } + } + case "udp": + clientConn := connection.NewClientConnection(conn, conn.RemoteAddr(), listenerStopChan, internalStopChan) + ctx := context.NewLayer4Context(clientConn, listenerStopChan) + if server, err := r.pool.servers.next(ctx); err != nil { + logger.Errorf("Get layer4 proxy upstream server failed, err: %+v", err) + _ = clientConn.Close(connection.NoFlush, connection.OnConnect) + } else { + upstreamAddr, _ := net.ResolveTCPAddr("udp", server.Addr) + upstreamConn := connection.NewUpstreamConnection(time.Duration(r.spec.ProxyTimeout)*time.Millisecond, upstreamAddr, listenerStopChan) + if err := upstreamConn.Connect(); err == nil { + if handle, ok := r.mux.GetHandler(r.spec.Protocol); ok { + clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + handle.InboundHandler(ctx, buffer) + buf := ctx.GetReadBuffer().Clone() + ctx.GetReadBuffer().Reset() + ctx.WriteToUpstream(buf) + }) + upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + handle.OutboundHandler(ctx, buffer) + buf := ctx.GetReadBuffer().Clone() + ctx.GetReadBuffer().Reset() + ctx.WriteToClient(buf) + }) + } else { + clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + buf := buffer.Clone() + buffer.Drain(buf.Len()) + ctx.WriteToUpstream(buf) + }) + upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + buf := buffer.Clone() + buffer.Drain(buf.Len()) + ctx.WriteToClient(buf) + }) + } + } + } + } + } + + l := newListener(r.spec, onAccept) err := l.listen() if err != nil { r.setState(stateFailed) r.setError(err) - logger.Errorf("listen tcp conn for %s:%d failed, err: %v", r.spec.LocalAddr, r.spec.Port, err) + logger.Errorf("listen tcp conn for :%d failed, err: %v", r.spec.Port, err) - _ = l.Close() + _ = l.close() r.eventChan <- &eventServeFailed{ err: err, startNum: r.startNum, @@ -227,7 +329,8 @@ func (r *runtime) startServer() { } func (r *runtime) closeServer() { - _ = r.listener.Close() + _ = r.listener.close() // TODO close established connection when listener closed? + logger.Infof("listener for %s :%d closed,") } func (r *runtime) checkFailed() { @@ -250,6 +353,7 @@ func (r *runtime) needRestartServer(nextSpec *Spec) bool { // The change of options below need not restart the HTTP server. x.MaxConnections, y.MaxConnections = 0, 0 x.IPFilter, y.IPFilter = nil, nil + x.Pool, y.Pool = nil, nil x.ProxyConnectTimeout, y.ProxyTimeout = 0, 0 x.ProxyTimeout, y.ProxyTimeout = 0, 0 diff --git a/pkg/object/layer4rawserver/spec.go b/pkg/object/layer4rawserver/spec.go index 7c58cf567c..d8dbf27588 100644 --- a/pkg/object/layer4rawserver/spec.go +++ b/pkg/object/layer4rawserver/spec.go @@ -1,49 +1,92 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package layer4rawserver import ( + "fmt" + "github.com/megaease/easegress/pkg/util/ipfilter" + "github.com/megaease/easegress/pkg/util/layer4stat" ) type ( - // Spec describes the TcpServer. + // Spec describes the Layer4 Server. Spec struct { - Protocol string `yaml:"protocol" jsonschema:"required,enum=tcp,enum=udp"` - LocalAddr string `yaml:"localAddr" jsonschema:"omitempty"` - Port uint16 `yaml:"port" json:"port" jsonschema:"required"` - BindPort bool `yaml:"bindPort" jsonschema:"omitempty"` - MaxConnections uint32 `yaml:"maxConnections" jsonschema:"omitempty,minimum=1"` - - // By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. - Backlog int32 `yaml:"backlog" jsonschema:"omitempty,minimum=-1"` - SendBuf int `yaml:"sendBuf" jsonschema:"omitempty"` - RecvBuf int `yaml:"recvBuf" jsonschema:"omitempty"` - Reuseport bool `yaml:"reuseport" jsonschema:"omitempty"` + Protocol string `yaml:"protocol" jsonschema:"required,enum=tcp,enum=udp"` + Port uint16 `yaml:"port" json:"port" jsonschema:"required"` + + // tcp stream config params KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` + MaxConnections uint32 `yaml:"maxConnections" jsonschema:"omitempty,minimum=1"` ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` + Pool *PoolSpec `yaml:"pool" jsonschema:"required"` IPFilter *ipfilter.Spec `yaml:"ipFilter,omitempty" jsonschema:"omitempty"` } - ListenerConfig struct { - Protocol string `yaml:"protocol" jsonschema:"required,enum=tcp,enum=udp"` - LocalAddr string `yaml:"localAddr" jsonschema:"omitempty"` - Port uint16 `yaml:"port" json:"port" jsonschema:"required"` - BindPort bool `yaml:"bindPort" jsonschema:"omitempty"` - // By default, backlog is set to -1 on FreeBSD, DragonFly BSD, and macOS, and to 511 on other platforms. - Backlog int32 `yaml:"backlog" jsonschema:"omitempty,minimum=-1"` - SendBuf int `yaml:"sendBuf" jsonschema:"omitempty"` - RecvBuf int `yaml:"recvBuf" jsonschema:"omitempty"` - Reuseport bool `yaml:"reuseport" jsonschema:"omitempty"` - KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` - TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` - ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` - ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` + // PoolSpec describes a pool of servers. + PoolSpec struct { + ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` + ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` + Servers []*Server `yaml:"servers" jsonschema:"omitempty"` + ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` + LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` + } + + // PoolStatus is the status of Pool. + PoolStatus struct { + Stat *layer4stat.Status `yaml:"stat"` } ) -// Validate validates TcpServerSpec. +// Validate validates Layer4 Server. func (spec *Spec) Validate() error { + if poolErr := spec.Pool.Validate(); poolErr != nil { + return poolErr + } + + return nil +} + +// Validate validates poolSpec. +func (s PoolSpec) Validate() error { + if s.ServiceName == "" && len(s.Servers) == 0 { + return fmt.Errorf("both serviceName and servers are empty") + } + + serversGotWeight := 0 + for _, server := range s.Servers { + if server.Weight > 0 { + serversGotWeight++ + } + } + if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { + return fmt.Errorf("not all servers have weight(%d/%d)", + serversGotWeight, len(s.Servers)) + } + + if s.ServiceName == "" { + servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) + if servers.len() == 0 { + return fmt.Errorf("serversTags picks none of servers") + } + } return nil } diff --git a/pkg/object/layer4rawserver/udpreceiver.go b/pkg/object/layer4rawserver/udpreceiver.go new file mode 100644 index 0000000000..9a03249b2b --- /dev/null +++ b/pkg/object/layer4rawserver/udpreceiver.go @@ -0,0 +1,28 @@ +package layer4rawserver + +import ( + "strings" + "sync" + + "github.com/megaease/easegress/pkg/context" +) + +var ( + ProxyMap = sync.Map{} +) + +func GetProxyMapKey(raddr, laddr string) string { + var builder strings.Builder + builder.WriteString(raddr) + builder.WriteString(":") + builder.WriteString(laddr) + return builder.String() +} + +func SetUDPProxyMap(key string, layer4Context context.Layer4Context) { + ProxyMap.Store(key, layer4Context) +} + +func DelUDPProxyMap(key string) { + ProxyMap.Delete(key) +} diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index 6b6f61f630..236b3ae469 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -45,6 +45,7 @@ import ( _ "github.com/megaease/easegress/pkg/object/httppipeline" _ "github.com/megaease/easegress/pkg/object/httpserver" _ "github.com/megaease/easegress/pkg/object/ingresscontroller" + _ "github.com/megaease/easegress/pkg/object/layer4rawserver" _ "github.com/megaease/easegress/pkg/object/meshcontroller" _ "github.com/megaease/easegress/pkg/object/nacosserviceregistry" _ "github.com/megaease/easegress/pkg/object/rawconfigtrafficcontroller" diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index 8ed43daa91..f708add49a 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -38,29 +38,29 @@ type Connection struct { closed uint32 protocol string localAddr net.Addr - remoteAddr net.Addr - - readEnabled bool - readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters + remoteAddr net.Addr // just for udp proxy use lastBytesSizeRead int64 lastWriteSizeWrite int64 - readBuffer iobufferpool.IoBuffer - writeBuffers net.Buffers - ioBuffers []iobufferpool.IoBuffer - writeBufferChan chan *[]iobufferpool.IoBuffer + readEnabled bool + readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters + + readBuffer iobufferpool.IoBuffer + writeBuffers net.Buffers + ioBuffers []iobufferpool.IoBuffer + writeBufferChan chan *[]iobufferpool.IoBuffer - mu sync.Mutex - startOnce sync.Once - stopChan chan struct{} + mu sync.Mutex + startOnce sync.Once + connStopChan chan struct{} // use for connection close + listenerStopChan chan struct{} // use for listener close - onRead func(buffer iobufferpool.IoBuffer) // execute read filters - onWrite func(src []iobufferpool.IoBuffer) []iobufferpool.IoBuffer // execute write filters + onReadBuffer func(buffer iobufferpool.IoBuffer) // execute read filters } // NewClientConnection wrap connection create from client -func NewClientConnection(conn net.Conn, remoteAddr net.Addr, stopChan chan struct{}) *Connection { - res := &Connection{ +func NewClientConnection(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}, connStopChan chan struct{}) *Connection { + clientConn := &Connection{ conn: conn, connected: 1, protocol: conn.LocalAddr().Network(), @@ -70,33 +70,43 @@ func NewClientConnection(conn net.Conn, remoteAddr net.Addr, stopChan chan struc readEnabledChan: make(chan bool, 1), writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), - mu: sync.Mutex{}, - stopChan: stopChan, + mu: sync.Mutex{}, + connStopChan: connStopChan, + listenerStopChan: listenerStopChan, } if remoteAddr != nil { - res.remoteAddr = remoteAddr + clientConn.remoteAddr = remoteAddr } else { - res.remoteAddr = conn.RemoteAddr() + clientConn.remoteAddr = conn.RemoteAddr() // udp server conn can not get remote address } - return res + return clientConn } +// Protocol get connection protocol func (c *Connection) Protocol() string { - return c.conn.LocalAddr().Network() + return c.protocol } +// LocalAddr get connection local addr func (c *Connection) LocalAddr() net.Addr { return c.localAddr } +// RemoteAddr get connection remote addr(it's nil for udp server conn) func (c *Connection) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } +// SetOnRead set connection read handle +func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { + c.onReadBuffer = onRead +} + +// Start running connection read/write loop func (c *Connection) Start() { if c.protocol == "udp" && c.conn.RemoteAddr() == nil { - return + return // udp server connection no need to start read/write loop } c.startOnce.Do(func() { @@ -104,6 +114,7 @@ func (c *Connection) Start() { }) } +// State get connection running state func (c *Connection) State() ConnState { if atomic.LoadUint32(&c.closed) == 1 { return ConnClosed @@ -118,19 +129,19 @@ func (c *Connection) startRWLoop() { go func() { defer func() { if r := recover(); r != nil { - logger.Errorf("%s connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) - } - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection close due to read loop crashed failed, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) - } + logger.Errorf("tcp connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), r) + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("tcp connection close failed, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose) }() - _ = c.Close(NoFlush, LocalClose) - }() + } }() c.startReadLoop() }() @@ -138,19 +149,19 @@ func (c *Connection) startRWLoop() { go func() { defer func() { if r := recover(); r != nil { - logger.Errorf("%s connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) - } - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection close due to write loop crashed failed, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) - } + logger.Errorf("tcp connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), r) + + go func() { + defer func() { + if r := recover(); r != nil { + logger.Errorf("tcp connection close failed, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), r) + } + }() + _ = c.Close(NoFlush, LocalClose) }() - _ = c.Close(NoFlush, LocalClose) - }() + } }() c.startWriteLoop() }() @@ -159,7 +170,9 @@ func (c *Connection) startRWLoop() { func (c *Connection) startReadLoop() { for { select { - case <-c.stopChan: + case <-c.connStopChan: + return + case <-c.listenerStopChan: return case <-c.readEnabledChan: default: @@ -174,13 +187,13 @@ func (c *Connection) startReadLoop() { continue } - // normal close or health check, modify log level + // normal close or health check if c.lastBytesSizeRead == 0 || err == io.EOF { - logger.Debugf("%s connection error on read, local addr: %s, remote addr: %s, err: %s", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), err.Error()) + logger.Debugf("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) } else { - logger.Errorf("%s connection error on read, local addr: %s, remote addr: %s, err: %s", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), err.Error()) + logger.Errorf("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) } if err == io.EOF { @@ -213,7 +226,7 @@ func (c *Connection) startWriteLoop() { var err error for { select { - case <-c.stopChan: + case <-c.listenerStopChan: return case buf, ok := <-c.writeBufferChan: if !ok { @@ -303,7 +316,7 @@ func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { } // close conn recv, then notify read/write loop to exit - close(c.stopChan) + close(c.listenerStopChan) _ = c.conn.Close() logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", @@ -349,15 +362,14 @@ func (c *Connection) doReadIO() (err error) { return } - if bufLen := c.readBuffer.Len(); bufLen == 0 { + if prevLen := c.readBuffer.Len(); prevLen == 0 { return } else { - buf := c.readBuffer.Clone() - c.readBuffer.Drain(bufLen) - c.onRead(buf) + c.onReadBuffer(c.readBuffer) + readBufLen := int64(prevLen - c.readBuffer.Len()) // calculate read buffer len - if int64(bufLen) != c.lastBytesSizeRead { - c.lastBytesSizeRead = int64(bufLen) + if readBufLen != c.lastBytesSizeRead { + c.lastBytesSizeRead = int64(prevLen) } } return @@ -436,11 +448,6 @@ func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { } }() - bufs := c.onWrite(buffers) - if bufs == nil { - return - } - select { case c.writeBufferChan <- &buffers: return @@ -474,8 +481,7 @@ type UpstreamConnection struct { connectOnce sync.Once } -func NewUpstreamConnection(connectTimeout time.Duration, remoteAddr net.Addr, stopChan chan struct{}, - onRead func(buffer iobufferpool.IoBuffer), onWrite func(src []iobufferpool.IoBuffer) []iobufferpool.IoBuffer) *UpstreamConnection { +func NewUpstreamConnection(connectTimeout time.Duration, remoteAddr net.Addr, stopChan chan struct{}) *UpstreamConnection { res := &UpstreamConnection{ Connection: Connection{ connected: 1, @@ -485,10 +491,8 @@ func NewUpstreamConnection(connectTimeout time.Duration, remoteAddr net.Addr, st readEnabledChan: make(chan bool, 1), writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), - mu: sync.Mutex{}, - stopChan: stopChan, - onRead: onRead, - onWrite: onWrite, + mu: sync.Mutex{}, + listenerStopChan: stopChan, }, connectTimeout: connectTimeout, } @@ -533,7 +537,7 @@ func (u *UpstreamConnection) Connect() (err error) { } logger.Debugf("connect upstream, upstream addr: %s, event: %+v, err: %+v", u.remoteAddr, event, err) if event != Connected { - close(u.stopChan) // if upstream connection failed, close client connection + close(u.listenerStopChan) // if upstream connection failed, close client connection } }) return diff --git a/pkg/util/layer4stat/layer4stat.go b/pkg/util/layer4stat/layer4stat.go index c33da5fc11..842f315e37 100644 --- a/pkg/util/layer4stat/layer4stat.go +++ b/pkg/util/layer4stat/layer4stat.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package layer4stat import ( @@ -72,10 +89,12 @@ type ( } ) +// Status get layer4 proxy status func (s *Layer4Stat) Status() *Status { panic("implement me") } +// New get new layer4 stat func New() *Layer4Stat { panic("implement me") } diff --git a/pkg/util/timerpool/timerpool.go b/pkg/util/timerpool/timerpool.go index a444c41c6a..ae620a7007 100644 --- a/pkg/util/timerpool/timerpool.go +++ b/pkg/util/timerpool/timerpool.go @@ -41,7 +41,7 @@ func Get(d time.Duration) *time.Timer { // Put pools the given timer. // -// There is no need to call t.Stop() before calling Put. +// There is no need to call t.stop() before calling Put. // // Put will try to stop the timer before pooling. If the // given timer already expired, Put will read the unreceived From 796e8f41f8384161fce4d2828828c37697c8c531 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 17 Sep 2021 11:35:44 +0800 Subject: [PATCH 18/99] [connection] fix bug in connection util & add udp session --- pkg/util/connection/connection.go | 129 ++++++++++++++++------------- pkg/util/connection/udpreceiver.go | 48 +++++++++++ 2 files changed, 119 insertions(+), 58 deletions(-) create mode 100644 pkg/util/connection/udpreceiver.go diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index f708add49a..89a0b5bd0f 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -58,8 +58,8 @@ type Connection struct { onReadBuffer func(buffer iobufferpool.IoBuffer) // execute read filters } -// NewClientConnection wrap connection create from client -func NewClientConnection(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}, connStopChan chan struct{}) *Connection { +// NewClientConn wrap connection create from client +func NewClientConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}, connStopChan chan struct{}) *Connection { clientConn := &Connection{ conn: conn, connected: 1, @@ -98,6 +98,11 @@ func (c *Connection) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } +// ReadEnabled get connection read enable status +func (c *Connection) ReadEnabled() bool { + return c.readEnabled +} + // SetOnRead set connection read handle func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { c.onReadBuffer = onRead @@ -167,6 +172,33 @@ func (c *Connection) startRWLoop() { }() } +// Write receive other connection data +func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { + defer func() { + if r := recover(); r != nil { + logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + err = ErrConnectionHasClosed + } + }() + + select { + case c.writeBufferChan <- &buffers: + return + default: + } + + // try to send data again in 60 seconds + t := timerpool.Get(60 * time.Second) + select { + case c.writeBufferChan <- &buffers: + case <-t.C: + err = ErrWriteBufferChanTimeout + } + timerpool.Put(t) + return +} + func (c *Connection) startReadLoop() { for { select { @@ -234,7 +266,7 @@ func (c *Connection) startWriteLoop() { } c.appendBuffer(buf) OUTER: - for i := 0; i < 10; i++ { + for i := 0; i < 8; i++ { select { case buf, ok := <-c.writeBufferChan: if !ok { @@ -288,9 +320,8 @@ func (c *Connection) appendBuffer(ioBuffers *[]iobufferpool.IoBuffer) { func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { defer func() { if r := recover(); r != nil { - logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", + logger.Errorf("%s connection close occur panic, local addr: %s, remote addr: %s, err: %+v", c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) - err = ErrConnectionHasClosed } }() @@ -310,20 +341,39 @@ func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { // close tcp conn read first if tconn, ok := c.conn.(*net.TCPConn); ok { - logger.Errorf("%s connection close read, local addr: %s, remote addr: %s", - c.protocol, c.localAddr.String(), c.remoteAddr.String()) + logger.Debugf("tcp connection close read, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) _ = tconn.CloseRead() } + if c.protocol == "udp" && c.conn.RemoteAddr() == nil { + key := GetProxyMapKey(c.localAddr.String(), c.remoteAddr.String()) + DelUDPProxyMap(key) + } + // close conn recv, then notify read/write loop to exit close(c.listenerStopChan) _ = c.conn.Close() + c.lastBytesSizeRead = 0 + c.lastWriteSizeWrite = 0 logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", c.protocol, c.localAddr.String(), c.remoteAddr.String()) return nil } +func (c *Connection) SetReadDisable(disable bool) { + if disable { + if c.readEnabled { + c.readEnabled = false + } + } else { + c.readEnabled = true + // only on read disable status, we need to trigger chan to wake read loop up + c.readEnabledChan <- true + } +} + func (c *Connection) doReadIO() (err error) { if c.readBuffer == nil { switch c.protocol { @@ -354,23 +404,17 @@ func (c *Connection) doReadIO() (err error) { if bytesRead == 0 && err == nil { err = io.EOF - logger.Errorf("%s connection ReadOnce maybe always return (0, nil) and causes dead loop, local addr: %s, remote addr: %s", - c.protocol, c.localAddr.String(), c.remoteAddr.String()) + logger.Errorf("tcp connection ReadOnce maybe always return (0, nil) and causes dead loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) } - if !c.readEnabled { + if !c.readEnabled || c.readBuffer.Len() == 0 { return } - if prevLen := c.readBuffer.Len(); prevLen == 0 { - return - } else { - c.onReadBuffer(c.readBuffer) - readBufLen := int64(prevLen - c.readBuffer.Len()) // calculate read buffer len - - if readBufLen != c.lastBytesSizeRead { - c.lastBytesSizeRead = int64(prevLen) - } + c.onReadBuffer(c.readBuffer) + if currLen := int64(c.readBuffer.Len()); c.lastBytesSizeRead != currLen { + c.lastBytesSizeRead = currLen } return } @@ -381,11 +425,8 @@ func (c *Connection) doWrite() (int64, error) { return 0, nil } - if bytesSent > 0 { - bytesBufSize := int64(c.writeBufLen()) - if int64(c.writeBufLen()) != c.lastWriteSizeWrite { - c.lastWriteSizeWrite = bytesBufSize - } + if bytesBufSize := int64(c.writeBufLen()); bytesBufSize != c.lastWriteSizeWrite { + c.lastWriteSizeWrite = bytesBufSize } return bytesSent, err } @@ -403,9 +444,9 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { case "tcp": bytesSent, err = buffers.WriteTo(c.conn) case "udp": - addr := c.remoteAddr.(*net.UDPAddr) n := 0 bytesSent = 0 + addr := c.remoteAddr.(*net.UDPAddr) for _, buf := range c.ioBuffers { if c.conn.RemoteAddr() == nil { n, err = c.conn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) @@ -438,33 +479,6 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { return } -// Write receive other connection data -func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) - err = ErrConnectionHasClosed - } - }() - - select { - case c.writeBufferChan <- &buffers: - return - default: - } - - // fail after 60s - t := timerpool.Get(60 * time.Second) - select { - case c.writeBufferChan <- &buffers: - case <-t.C: - err = ErrWriteBufferChanTimeout - } - timerpool.Put(t) - return -} - func (c *Connection) setWriteDeadline() { switch c.protocol { case "udp": @@ -481,10 +495,11 @@ type UpstreamConnection struct { connectOnce sync.Once } -func NewUpstreamConnection(connectTimeout time.Duration, remoteAddr net.Addr, stopChan chan struct{}) *UpstreamConnection { - res := &UpstreamConnection{ +func NewUpstreamConn(connectTimeout time.Duration, remoteAddr net.Addr, stopChan chan struct{}, connStopChan chan struct{}) *UpstreamConnection { + conn := &UpstreamConnection{ Connection: Connection{ connected: 1, + protocol: remoteAddr.Network(), remoteAddr: remoteAddr, readEnabled: true, @@ -492,14 +507,12 @@ func NewUpstreamConnection(connectTimeout time.Duration, remoteAddr net.Addr, st writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), mu: sync.Mutex{}, + connStopChan: connStopChan, listenerStopChan: stopChan, }, connectTimeout: connectTimeout, } - if res.remoteAddr != nil { - res.Connection.protocol = res.remoteAddr.Network() - } - return res + return conn } func (u *UpstreamConnection) connect() (event Event, err error) { diff --git a/pkg/util/connection/udpreceiver.go b/pkg/util/connection/udpreceiver.go new file mode 100644 index 0000000000..03a3daac58 --- /dev/null +++ b/pkg/util/connection/udpreceiver.go @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package connection + +import ( + "strings" + "sync" + + "github.com/megaease/easegress/pkg/context" +) + +var ( + ProxyMap = sync.Map{} +) + +// GetProxyMapKey construct udp session key +func GetProxyMapKey(raddr, laddr string) string { + var builder strings.Builder + builder.WriteString(raddr) + builder.WriteString(":") + builder.WriteString(laddr) + return builder.String() +} + +// SetUDPProxyMap set udp session by udp server listener +func SetUDPProxyMap(key string, layer4Context context.Layer4Context) { + ProxyMap.Store(key, layer4Context) +} + +// DelUDPProxyMap delete udp session +func DelUDPProxyMap(key string) { + ProxyMap.Delete(key) +} From 01bfd85761c93165e8dc3ee0f8ca2ad4f7593f6c Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 17 Sep 2021 11:36:13 +0800 Subject: [PATCH 19/99] [iobufferpool] fix comment --- pkg/util/iobufferpool/iobuffer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/iobufferpool/iobuffer.go b/pkg/util/iobufferpool/iobuffer.go index 8f9aa0f479..f02f00ea3a 100644 --- a/pkg/util/iobufferpool/iobuffer.go +++ b/pkg/util/iobufferpool/iobuffer.go @@ -213,7 +213,7 @@ func (b *ioBuffer) ReadOnce(r io.Reader) (n int64, err error) { b.copy(0) } - // free max buffers avoid memleak + // free max buffers avoid memory leak if b.off == len(b.buf) && cap(b.buf) > MaxBufferLength { b.Free() b.Alloc(MaxRead) From 3037b13bb528a3544e267fc69aeee64b581623c6 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 17 Sep 2021 16:47:42 +0800 Subject: [PATCH 20/99] [layer4proxy] complete layer4 proxy method --- pkg/context/layer4context.go | 67 ++++--- pkg/object/layer4rawserver/backendserver.go | 15 +- pkg/object/layer4rawserver/listener.go | 25 +-- pkg/object/layer4rawserver/runtime.go | 195 ++++++++++++-------- pkg/object/layer4rawserver/spec.go | 1 + pkg/object/layer4rawserver/udpreceiver.go | 28 --- pkg/util/connection/connection.go | 8 +- pkg/util/connection/constant.go | 2 - pkg/util/iobufferpool/iobuffer_pool.go | 5 +- 9 files changed, 180 insertions(+), 166 deletions(-) delete mode 100644 pkg/object/layer4rawserver/udpreceiver.go diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 724e55f25a..2b6c0a41d7 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -37,7 +37,6 @@ type ( LocalAddr() net.Addr ClientAddr() net.Addr UpstreamAddr() net.Addr - SetUpstreamAddr(addr net.Addr) GetReadBuffer() iobufferpool.IoBuffer AppendReadBuffer(buffer iobufferpool.IoBuffer) @@ -49,7 +48,6 @@ type ( Finish() Duration() time.Duration - StopChan() chan struct{} // client connection and upstream connection stop by this chan CallNextHandler(lastResult string) string SetHandlerCaller(caller HandlerCaller) @@ -68,16 +66,15 @@ type ( } layer4Context struct { - mutex sync.Mutex + mu sync.Mutex protocol string localAddr net.Addr clientAddr net.Addr upstreamAddr net.Addr - stopChan chan struct{} // notify quit read loop and write loop - clientConn connection.Connection - upstreamConn connection.UpstreamConnection + clientConn *connection.Connection + upstreamConn *connection.UpstreamConnection readBuffer iobufferpool.IoBuffer writeBuffer iobufferpool.IoBuffer @@ -91,26 +88,44 @@ type ( ) // NewLayer4Context creates an Layer4Context. -func NewLayer4Context(clientConn *connection.Connection, stopChan chan struct{}) *layer4Context { - +func NewLayer4Context(cliConn *connection.Connection, upstreamConn *connection.UpstreamConnection, cliAddr net.Addr) *layer4Context { startTime := time.Now() - res := layer4Context{ - mutex: sync.Mutex{}, - protocol: clientConn.Protocol(), - localAddr: clientConn.LocalAddr(), - clientAddr: clientConn.RemoteAddr(), - stopChan: stopChan, - startTime: &startTime, + ctx := &layer4Context{ + protocol: cliConn.Protocol(), + localAddr: cliConn.LocalAddr(), + upstreamAddr: upstreamConn.RemoteAddr(), + clientConn: cliConn, + upstreamConn: upstreamConn, + startTime: &startTime, + + mu: sync.Mutex{}, + } + + if cliAddr != nil { + ctx.clientAddr = cliAddr + } else { + ctx.clientAddr = cliConn.RemoteAddr() // nil for udp server conn + } + + switch ctx.protocol { + case "udp": + ctx.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + ctx.writeBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + case "tcp": + ctx.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + ctx.writeBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) } - return &res + return ctx } +// Lock acquire context lock func (ctx *layer4Context) Lock() { - ctx.mutex.Lock() + ctx.mu.Lock() } +// Unlock release lock func (ctx *layer4Context) Unlock() { - ctx.mutex.Unlock() + ctx.mu.Unlock() } // Protocol get proxy protocol @@ -131,14 +146,6 @@ func (ctx *layer4Context) UpstreamAddr() net.Addr { return ctx.upstreamAddr } -func (ctx *layer4Context) SetUpstreamAddr(addr net.Addr) { - ctx.upstreamAddr = addr -} - -func (ctx *layer4Context) StopChan() chan struct{} { - return ctx.stopChan -} - // GetReadBuffer get read buffer func (ctx *layer4Context) GetReadBuffer() iobufferpool.IoBuffer { return ctx.readBuffer @@ -181,15 +188,23 @@ func (ctx *layer4Context) WriteToUpstream(buffer iobufferpool.IoBuffer) { _ = ctx.clientConn.Write(buffer) } +// CallNextHandler call handler caller func (ctx *layer4Context) CallNextHandler(lastResult string) string { return ctx.caller(lastResult) } +// SetHandlerCaller set handler caller func (ctx *layer4Context) SetHandlerCaller(caller HandlerCaller) { ctx.caller = caller } +// Finish context finish handler func (ctx *layer4Context) Finish() { + _ = iobufferpool.PutIoBuffer(ctx.readBuffer) + _ = iobufferpool.PutIoBuffer(ctx.writeBuffer) + ctx.readBuffer = nil + ctx.writeBuffer = nil + finish := time.Now() ctx.endTime = &finish } diff --git a/pkg/object/layer4rawserver/backendserver.go b/pkg/object/layer4rawserver/backendserver.go index d52f1f72ea..6eaad30450 100644 --- a/pkg/object/layer4rawserver/backendserver.go +++ b/pkg/object/layer4rawserver/backendserver.go @@ -23,7 +23,6 @@ import ( "sync" "sync/atomic" - "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/object/serviceregistry" "github.com/megaease/easegress/pkg/supervisor" @@ -131,7 +130,7 @@ func (s *servers) useService(serviceInstanceSpecs map[string]*serviceregistry.Se var servers []*Server for _, instance := range serviceInstanceSpecs { servers = append(servers, &Server{ - Addr: instance.URL(), + Addr: fmt.Sprintf("%s:%d", instance.Address, instance.Port), Tags: instance.Tags, Weight: instance.Weight, }) @@ -175,12 +174,12 @@ func (s *servers) len() int { return static.len() } -func (s *servers) next(ctx context.Layer4Context) (*Server, error) { +func (s *servers) next(cliAddr string) (*Server, error) { static := s.snapshot() if static.len() == 0 { return nil, fmt.Errorf("no server available") } - return static.next(ctx), nil + return static.next(cliAddr), nil } func (s *servers) close() { @@ -233,7 +232,7 @@ func (ss *staticServers) len() int { return len(ss.servers) } -func (ss *staticServers) next(ctx context.Layer4Context) *Server { +func (ss *staticServers) next(cliAddr string) *Server { switch ss.lb.Policy { case PolicyRoundRobin: return ss.roundRobin() @@ -242,7 +241,7 @@ func (ss *staticServers) next(ctx context.Layer4Context) *Server { case PolicyWeightedRandom: return ss.weightedRandom() case PolicyIPHash: - return ss.ipHash(ctx) + return ss.ipHash(cliAddr) } logger.Errorf("BUG: unknown load balance policy: %s", ss.lb.Policy) return ss.roundRobin() @@ -274,7 +273,7 @@ func (ss *staticServers) weightedRandom() *Server { return ss.random() } -func (ss *staticServers) ipHash(ctx context.Layer4Context) *Server { - sum32 := int(hashtool.Hash32(ctx.ClientAddr().String())) +func (ss *staticServers) ipHash(cliAddr string) *Server { + sum32 := int(hashtool.Hash32(cliAddr)) return ss.servers[sum32%len(ss.servers)] } diff --git a/pkg/object/layer4rawserver/listener.go b/pkg/object/layer4rawserver/listener.go index c9facca9d4..5aad46c602 100644 --- a/pkg/object/layer4rawserver/listener.go +++ b/pkg/object/layer4rawserver/listener.go @@ -41,35 +41,35 @@ const ( ) type listener struct { - m *mux name string udpListener net.PacketConn // udp connection listener tcpListener *limitlistener.LimitListener // tcp connection listener with connection limit state ListenerState - listenAddr string protocol string // enum:udp/tcp + listenAddr string keepalive bool - reuseport bool maxConnections uint32 mutex *sync.Mutex stopChan chan struct{} // connection listen to this stopChan - onTcpAccept func(conn net.Conn, listenerStopChan chan struct{}) - onUdpAccept func(clientAddr net.Addr, buffer iobufferpool.IoBuffer) + onTcpAccept func(conn net.Conn, listenerStopChan chan struct{}) // tcp accept handle + onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer) // udp accept handle } -func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStopChan chan struct{})) *listener { +func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStopChan chan struct{}), + onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer)) *listener { listen := &listener{ state: ListenerInited, listenAddr: fmt.Sprintf(":%d", spec.Port), protocol: spec.Protocol, keepalive: spec.KeepAlive, maxConnections: spec.MaxConnections, - onTcpAccept: onAccept, - mutex: &sync.Mutex{}, + onTcpAccept: onAccept, + onUdpAccept: onUdpAccept, + mutex: &sync.Mutex{}, } return listen } @@ -159,12 +159,6 @@ func (l *listener) acceptEventLoop() { l.listenAddr, err.Error()) } } else { - host, _, splitErr := net.SplitHostPort(tconn.RemoteAddr().String()) - if splitErr != nil || !l.m.AllowIP(host) { - logger.Debugf("reject remote connection from: %s", tconn.RemoteAddr().String()) - _ = tconn.Close() - continue - } go l.onTcpAccept(tconn, l.stopChan) } } @@ -211,8 +205,7 @@ func (l *listener) readMsgLoop() { logger.Errorf("udp listener %s receiving packet occur error: %+v", l.listenAddr, err) continue } - - l.onUdpAccept(rAddr, buf) + l.onUdpAccept(rAddr, conn, l.stopChan, buf) } } diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4rawserver/runtime.go index f695126e32..01655597bd 100644 --- a/pkg/object/layer4rawserver/runtime.go +++ b/pkg/object/layer4rawserver/runtime.go @@ -19,17 +19,17 @@ package layer4rawserver import ( "fmt" - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/util/connection" - "github.com/megaease/easegress/pkg/util/iobufferpool" "net" "reflect" "sync/atomic" "time" + "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/protocol" "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/connection" + "github.com/megaease/easegress/pkg/util/iobufferpool" "github.com/megaease/easegress/pkg/util/layer4stat" ) @@ -224,88 +224,121 @@ func (r *runtime) handleEventClose(e *eventClose) { close(e.done) } -func (r *runtime) startServer() { - onAccept := func(conn net.Conn, listenerStopChan chan struct{}) { - internalStopChan := make(chan struct{}, 1) - switch r.spec.Protocol { - case "tcp": - clientConn := connection.NewClientConnection(conn, conn.RemoteAddr(), listenerStopChan, internalStopChan) - ctx := context.NewLayer4Context(clientConn, listenerStopChan) - if server, err := r.pool.servers.next(ctx); err != nil { - logger.Errorf("Get layer4 proxy upstream server failed, err: %+v", err) - _ = clientConn.Close(connection.NoFlush, connection.OnConnect) - } else { - upstreamAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) - upstreamConn := connection.NewUpstreamConnection(time.Duration(r.spec.ProxyTimeout)*time.Millisecond, upstreamAddr, listenerStopChan) - if err := upstreamConn.Connect(); err == nil { - if handle, ok := r.mux.GetHandler(r.spec.Protocol); ok { - clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - handle.InboundHandler(ctx, buffer) - buf := ctx.GetReadBuffer().Clone() - ctx.GetReadBuffer().Drain(buf.Len()) - ctx.WriteToUpstream(buf) - }) - upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - handle.OutboundHandler(ctx, buffer) - buf := ctx.GetReadBuffer().Clone() - ctx.GetReadBuffer().Drain(buf.Len()) - ctx.WriteToClient(buf) - }) - } else { - clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - buf := buffer.Clone() - buffer.Drain(buf.Len()) - ctx.WriteToUpstream(buf) - }) - upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - buf := buffer.Clone() - buffer.Drain(buf.Len()) - ctx.WriteToClient(buf) - }) - } - } +func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) { + if r.spec.Protocol != "tcp" { + return nil + } + + return func(conn net.Conn, listenerStop chan struct{}) { + remote := conn.RemoteAddr().(*net.TCPAddr).IP.String() + if r.mux.AllowIP(remote) { + _ = conn.Close() + logger.Infof("close tcp connection from %s to %s which ip is not allowed", + conn.RemoteAddr().String(), conn.LocalAddr().String()) + return + } + + server, err := r.pool.servers.next(remote) + if err != nil { + _ = conn.Close() + logger.Errorf("close tcp connection due to can not find upstream server, local addr: %s, err: %+v", + conn.LocalAddr(), err) + return + } + + stopChan := make(chan struct{}) + + serverAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) + connTimeout := time.Duration(r.spec.ProxyConnectTimeout) * time.Millisecond + upstreamConn := connection.NewUpstreamConn(connTimeout, serverAddr, listenerStop, stopChan) + if err := upstreamConn.Connect(); err != nil { + logger.Errorf("close tcp connection due to upstream conn connect failed, local addr: %s, err: %+v", + conn.LocalAddr().String(), err) + _ = conn.Close() + close(stopChan) + return + } + + cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, stopChan) + ctx := context.NewLayer4Context(cliConn, upstreamConn, cliConn.RemoteAddr()) + r.setConnectionReadHandler(cliConn, upstreamConn, ctx) + } +} + +func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer) { + if r.spec.Protocol != "udp" { + return nil + } + + return func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer) { + localAddr := conn.LocalAddr() + remote := cliAddr.(*net.UDPAddr).IP.String() + if r.mux.AllowIP(remote) { + logger.Infof("discard udp packet from %s to %s which ip is not allowed", cliAddr.String(), localAddr.String()) + return + } + + key := connection.GetProxyMapKey(localAddr.String(), cliAddr.String()) + if rawCtx, ok := connection.ProxyMap.Load(key); ok { + ctx := rawCtx.(context.Layer4Context) + ctx.WriteToUpstream(buffer.Clone()) // there is no need to reset buffer + return + } + + server, err := r.pool.servers.next(remote) + if err != nil { + logger.Infof("discard udp packet from %s to %s due to can not find upstream server, err: %+v", + cliAddr.String(), localAddr.String()) + return + } + + upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) + connTimeout := time.Duration(r.spec.ProxyConnectTimeout) * time.Millisecond + upstreamConn := connection.NewUpstreamConn(connTimeout, upstreamAddr, listenerStop, nil) + + if err := upstreamConn.Connect(); err != nil { + logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) + return + } + + cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, nil) + ctx := context.NewLayer4Context(cliConn, upstreamConn, cliAddr) + r.setConnectionReadHandler(cliConn, upstreamConn, ctx) + } +} + +func (r *runtime) setConnectionReadHandler(cliConn *connection.Connection, upstreamConn *connection.UpstreamConnection, ctx context.Layer4Context) { + if handle, ok := r.mux.GetHandler(r.spec.Name); ok { + cliConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + handle.InboundHandler(ctx, buffer) + if ctx.GetReadBuffer().Len() > 0 { + buf := ctx.GetReadBuffer().Clone() + ctx.GetReadBuffer().Reset() + ctx.WriteToUpstream(buf) } - case "udp": - clientConn := connection.NewClientConnection(conn, conn.RemoteAddr(), listenerStopChan, internalStopChan) - ctx := context.NewLayer4Context(clientConn, listenerStopChan) - if server, err := r.pool.servers.next(ctx); err != nil { - logger.Errorf("Get layer4 proxy upstream server failed, err: %+v", err) - _ = clientConn.Close(connection.NoFlush, connection.OnConnect) - } else { - upstreamAddr, _ := net.ResolveTCPAddr("udp", server.Addr) - upstreamConn := connection.NewUpstreamConnection(time.Duration(r.spec.ProxyTimeout)*time.Millisecond, upstreamAddr, listenerStopChan) - if err := upstreamConn.Connect(); err == nil { - if handle, ok := r.mux.GetHandler(r.spec.Protocol); ok { - clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - handle.InboundHandler(ctx, buffer) - buf := ctx.GetReadBuffer().Clone() - ctx.GetReadBuffer().Reset() - ctx.WriteToUpstream(buf) - }) - upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - handle.OutboundHandler(ctx, buffer) - buf := ctx.GetReadBuffer().Clone() - ctx.GetReadBuffer().Reset() - ctx.WriteToClient(buf) - }) - } else { - clientConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - buf := buffer.Clone() - buffer.Drain(buf.Len()) - ctx.WriteToUpstream(buf) - }) - upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - buf := buffer.Clone() - buffer.Drain(buf.Len()) - ctx.WriteToClient(buf) - }) - } - } + }) + upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + handle.OutboundHandler(ctx, buffer) + if ctx.GetReadBuffer().Len() > 0 { + buf := ctx.GetReadBuffer().Clone() + ctx.GetReadBuffer().Reset() + ctx.WriteToClient(buf) } - } + }) + } else { + cliConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + ctx.WriteToUpstream(buffer.Clone()) + buffer.Reset() + }) + upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { + ctx.WriteToClient(buffer.Clone()) + buffer.Reset() + }) } +} - l := newListener(r.spec, onAccept) +func (r *runtime) startServer() { + l := newListener(r.spec, r.onTcpAccept(), r.onUdpAccept()) err := l.listen() if err != nil { r.setState(stateFailed) diff --git a/pkg/object/layer4rawserver/spec.go b/pkg/object/layer4rawserver/spec.go index d8dbf27588..42a2774fcc 100644 --- a/pkg/object/layer4rawserver/spec.go +++ b/pkg/object/layer4rawserver/spec.go @@ -27,6 +27,7 @@ import ( type ( // Spec describes the Layer4 Server. Spec struct { + Name string `yaml:"name" json:"name" jsonschema:"required"` Protocol string `yaml:"protocol" jsonschema:"required,enum=tcp,enum=udp"` Port uint16 `yaml:"port" json:"port" jsonschema:"required"` diff --git a/pkg/object/layer4rawserver/udpreceiver.go b/pkg/object/layer4rawserver/udpreceiver.go deleted file mode 100644 index 9a03249b2b..0000000000 --- a/pkg/object/layer4rawserver/udpreceiver.go +++ /dev/null @@ -1,28 +0,0 @@ -package layer4rawserver - -import ( - "strings" - "sync" - - "github.com/megaease/easegress/pkg/context" -) - -var ( - ProxyMap = sync.Map{} -) - -func GetProxyMapKey(raddr, laddr string) string { - var builder strings.Builder - builder.WriteString(raddr) - builder.WriteString(":") - builder.WriteString(laddr) - return builder.String() -} - -func SetUDPProxyMap(key string, layer4Context context.Layer4Context) { - ProxyMap.Store(key, layer4Context) -} - -func DelUDPProxyMap(key string) { - ProxyMap.Delete(key) -} diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index 89a0b5bd0f..fc246517c5 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -212,9 +212,9 @@ func (c *Connection) startReadLoop() { err := c.doReadIO() if err != nil { if te, ok := err.(net.Error); ok && te.Timeout() { - if c.protocol == "tcp" && c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > DefaultBufferReadCapacity { + if c.protocol == "tcp" && c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > iobufferpool.DefaultBufferReadCapacity { c.readBuffer.Free() - c.readBuffer.Alloc(DefaultBufferReadCapacity) + c.readBuffer.Alloc(iobufferpool.DefaultBufferReadCapacity) } continue } @@ -381,7 +381,7 @@ func (c *Connection) doReadIO() (err error) { // A UDP socket will Read up to the size of the receiving buffer and will discard the rest c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) default: // unix or tcp - c.readBuffer = iobufferpool.GetIoBuffer(DefaultBufferReadCapacity) + c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) } } @@ -495,7 +495,7 @@ type UpstreamConnection struct { connectOnce sync.Once } -func NewUpstreamConn(connectTimeout time.Duration, remoteAddr net.Addr, stopChan chan struct{}, connStopChan chan struct{}) *UpstreamConnection { +func NewUpstreamConn(connectTimeout time.Duration, remoteAddr net.Addr, stopChan, connStopChan chan struct{}) *UpstreamConnection { conn := &UpstreamConnection{ Connection: Connection{ connected: 1, diff --git a/pkg/util/connection/constant.go b/pkg/util/connection/constant.go index ae4c7fd512..ccef6babba 100644 --- a/pkg/util/connection/constant.go +++ b/pkg/util/connection/constant.go @@ -58,8 +58,6 @@ var ( // Network related const const ( - DefaultBufferReadCapacity = 1 << 7 - NetBufferDefaultSize = 0 NetBufferDefaultCapacity = 1 << 4 diff --git a/pkg/util/iobufferpool/iobuffer_pool.go b/pkg/util/iobufferpool/iobuffer_pool.go index 8faa161c72..01318bcb07 100644 --- a/pkg/util/iobufferpool/iobuffer_pool.go +++ b/pkg/util/iobufferpool/iobuffer_pool.go @@ -22,7 +22,10 @@ import ( "sync" ) -const UdpPacketMaxSize = 64 * 1024 +const ( + UdpPacketMaxSize = 64 * 1024 + DefaultBufferReadCapacity = 1 << 7 +) var ibPool IoBufferPool From d8908d20967ad365a95e4f11ea802dbc50cb3948 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 17 Sep 2021 17:35:22 +0800 Subject: [PATCH 21/99] [layer4proxy] resolve import cycle --- pkg/context/layer4context.go | 5 +++-- pkg/object/layer4rawserver/layer4server.go | 2 +- pkg/object/layer4rawserver/runtime.go | 2 +- pkg/supervisor/registry.go | 16 ++++++++-------- pkg/util/connection/udpreceiver.go | 4 +--- pkg/util/gracenet/gracenet.go | 17 +++++++++++++++++ 6 files changed, 31 insertions(+), 15 deletions(-) diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 2b6c0a41d7..fc646e0a0b 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -18,11 +18,12 @@ package context import ( - "github.com/megaease/easegress/pkg/util/connection" - "github.com/megaease/easegress/pkg/util/iobufferpool" "net" "sync" "time" + + "github.com/megaease/easegress/pkg/util/connection" + "github.com/megaease/easegress/pkg/util/iobufferpool" ) type ( diff --git a/pkg/object/layer4rawserver/layer4server.go b/pkg/object/layer4rawserver/layer4server.go index 79e88cee3d..e8078dcbd5 100644 --- a/pkg/object/layer4rawserver/layer4server.go +++ b/pkg/object/layer4rawserver/layer4server.go @@ -31,7 +31,7 @@ const ( ) func init() { - supervisor.Register(&Layer4Server{}) + //supervisor.Register(&Layer4Server{}) } type ( diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4rawserver/runtime.go index 01655597bd..a934540c6b 100644 --- a/pkg/object/layer4rawserver/runtime.go +++ b/pkg/object/layer4rawserver/runtime.go @@ -295,7 +295,6 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerSt upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) connTimeout := time.Duration(r.spec.ProxyConnectTimeout) * time.Millisecond upstreamConn := connection.NewUpstreamConn(connTimeout, upstreamAddr, listenerStop, nil) - if err := upstreamConn.Connect(); err != nil { logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) return @@ -303,6 +302,7 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerSt cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, nil) ctx := context.NewLayer4Context(cliConn, upstreamConn, cliAddr) + connection.SetUDPProxyMap(connection.GetProxyMapKey(localAddr.String(), cliAddr.String()), ctx) r.setConnectionReadHandler(cliConn, upstreamConn, ctx) } } diff --git a/pkg/supervisor/registry.go b/pkg/supervisor/registry.go index 83241c32bd..856e0f5b58 100644 --- a/pkg/supervisor/registry.go +++ b/pkg/supervisor/registry.go @@ -70,14 +70,14 @@ type ( // The supervisor won't call Close for the previous generation. Inherit(superSpec *Spec, previousGeneration Object, muxMapper protocol.MuxMapper) - // InitLayer4 initializes the Object. - InitLayer4(superSpec *Spec, muxMapper protocol.Layer4MuxMapper) - - // InheritLayer4 also initializes the Object. - // But it needs to handle the lifecycle of the previous generation. - // So its own responsibility for the object to inherit and clean the previous generation stuff. - // The supervisor won't call Close for the previous generation. - InheritLayer4(superSpec *Spec, previousGeneration Object, muxMapper protocol.Layer4MuxMapper) + //// InitLayer4 initializes the Object. + //InitLayer4(superSpec *Spec, muxMapper protocol.Layer4MuxMapper) + // + //// InheritLayer4 also initializes the Object. + //// But it needs to handle the lifecycle of the previous generation. + //// So its own responsibility for the object to inherit and clean the previous generation stuff. + //// The supervisor won't call Close for the previous generation. + //InheritLayer4(superSpec *Spec, previousGeneration Object, muxMapper protocol.Layer4MuxMapper) } // TrafficGate is the object in category of TrafficGate. diff --git a/pkg/util/connection/udpreceiver.go b/pkg/util/connection/udpreceiver.go index 03a3daac58..9431850fef 100644 --- a/pkg/util/connection/udpreceiver.go +++ b/pkg/util/connection/udpreceiver.go @@ -20,8 +20,6 @@ package connection import ( "strings" "sync" - - "github.com/megaease/easegress/pkg/context" ) var ( @@ -38,7 +36,7 @@ func GetProxyMapKey(raddr, laddr string) string { } // SetUDPProxyMap set udp session by udp server listener -func SetUDPProxyMap(key string, layer4Context context.Layer4Context) { +func SetUDPProxyMap(key string, layer4Context interface{}) { ProxyMap.Store(key, layer4Context) } diff --git a/pkg/util/gracenet/gracenet.go b/pkg/util/gracenet/gracenet.go index 1de891d3bd..55268427cd 100644 --- a/pkg/util/gracenet/gracenet.go +++ b/pkg/util/gracenet/gracenet.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package gracenet import "github.com/megaease/easegress/pkg/graceupdate" From 068a7049559ed1b86a9f03f2615bda487bb684e8 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 3 Oct 2021 21:42:22 +0800 Subject: [PATCH 22/99] [layer4proxy] finish majority layer4 proxy function --- pkg/context/layer4context.go | 152 +++++++---- pkg/filter/bridge/bridge.go | 2 +- pkg/object/httpserver/mux.go | 2 +- pkg/object/layer4pipeline/layer4pipeline.go | 6 +- pkg/object/layer4rawserver/backendserver.go | 2 +- pkg/object/layer4rawserver/layer4server.go | 4 +- pkg/object/layer4rawserver/listener.go | 217 +++++++--------- pkg/object/layer4rawserver/mux.go | 9 +- pkg/object/layer4rawserver/runtime.go | 240 ++++++++++-------- pkg/object/layer4rawserver/spec.go | 2 +- .../trafficcontroller/trafficcontroller.go | 58 ++++- pkg/protocol/layer4.go | 42 --- pkg/protocol/{http.go => protocol.go} | 15 +- pkg/registry/registry.go | 1 + pkg/supervisor/supervisor.go | 2 +- pkg/util/connection/connection.go | 13 +- pkg/util/connection/udpreceiver.go | 4 +- pkg/util/gracenet/gracenet.go | 22 -- pkg/util/httpstat/httpstat.go | 2 +- pkg/util/layer4stat/layer4stat.go | 127 ++++++++- 20 files changed, 540 insertions(+), 382 deletions(-) delete mode 100644 pkg/protocol/layer4.go rename pkg/protocol/{http.go => protocol.go} (63%) delete mode 100644 pkg/util/gracenet/gracenet.go diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index fc646e0a0b..71b19dc962 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -18,6 +18,7 @@ package context import ( + "github.com/megaease/easegress/pkg/util/layer4stat" "net" "sync" "time" @@ -34,19 +35,37 @@ type ( Lock() Unlock() + // Protocol current support tcp/udp, future maybe support unix Protocol() string + // LocalAddr listen addr for layer4 server LocalAddr() net.Addr + // ClientAddr client addr ClientAddr() net.Addr + // UpstreamAddr addr for upstream server UpstreamAddr() net.Addr - GetReadBuffer() iobufferpool.IoBuffer - AppendReadBuffer(buffer iobufferpool.IoBuffer) - GetWriteBuffer() iobufferpool.IoBuffer - AppendWriteBuffer(buffer iobufferpool.IoBuffer) - - WriteToClient(buffer iobufferpool.IoBuffer) - WriteToUpstream(buffer iobufferpool.IoBuffer) - + // GetClientReadBuffer get io buffer read from client + GetClientReadBuffer() iobufferpool.IoBuffer + // GetWriteToClientBuffer get io buffer write to client + GetWriteToClientBuffer() iobufferpool.IoBuffer + // GetUpstreamReadBuffer get io buffer read from upstream + GetUpstreamReadBuffer() iobufferpool.IoBuffer + // GetWriteToUpstreamBuffer get io buffer write to upstream + GetWriteToUpstreamBuffer() iobufferpool.IoBuffer + + // WriteToClient get write to client buffer and send to client + WriteToClient() + // DirectWriteToClient directly write to client + DirectWriteToClient(buffer iobufferpool.IoBuffer) + // WriteToUpstream get write to upstream buffer and send to upstream + WriteToUpstream() + // DirectWriteToUpstream directly write to upstream + DirectWriteToUpstream(buffer iobufferpool.IoBuffer) + + // StatMetric get + StatMetric() *layer4stat.Metric + + // Finish close context and release buffer resource Finish() Duration() time.Duration @@ -54,20 +73,10 @@ type ( SetHandlerCaller(caller HandlerCaller) } - ConnectionArgs struct { - TCPNodelay bool - Linger bool - SendBufSize uint32 - RecvBufSize uint32 - ProxyTimeout uint32 - ProxyReadTimeout int64 // connection read timeout(milliseconds) - ProxyWriteTimeout int64 // connection write timeout(milliseconds) - - startOnce sync.Once // make sure read loop and write loop start only once - } - layer4Context struct { - mu sync.Mutex + mu sync.Mutex + reqSize uint64 + respSize uint64 protocol string localAddr net.Addr @@ -77,9 +86,10 @@ type ( clientConn *connection.Connection upstreamConn *connection.UpstreamConnection - readBuffer iobufferpool.IoBuffer - writeBuffer iobufferpool.IoBuffer - connectionArgs *ConnectionArgs + clientReadBuffer iobufferpool.IoBuffer + clientWriteBuffer iobufferpool.IoBuffer + upstreamReadBuffer iobufferpool.IoBuffer + upstreamWriteBuffer iobufferpool.IoBuffer startTime *time.Time // connection accept time endTime *time.Time // connection close time @@ -89,6 +99,7 @@ type ( ) // NewLayer4Context creates an Layer4Context. +// @param cliAddr udp client addr(client addr can not get from udp listen server) func NewLayer4Context(cliConn *connection.Connection, upstreamConn *connection.UpstreamConnection, cliAddr net.Addr) *layer4Context { startTime := time.Now() ctx := &layer4Context{ @@ -110,11 +121,15 @@ func NewLayer4Context(cliConn *connection.Connection, upstreamConn *connection.U switch ctx.protocol { case "udp": - ctx.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - ctx.writeBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + ctx.clientReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + ctx.clientWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + ctx.upstreamReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) case "tcp": - ctx.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) - ctx.writeBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + ctx.clientReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + ctx.clientWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + ctx.upstreamReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) } return ctx } @@ -139,7 +154,7 @@ func (ctx *layer4Context) LocalAddr() net.Addr { } func (ctx *layer4Context) ClientAddr() net.Addr { - return ctx.ClientAddr() + return ctx.clientAddr } // UpstreamAddr get upstream addr @@ -147,46 +162,71 @@ func (ctx *layer4Context) UpstreamAddr() net.Addr { return ctx.upstreamAddr } -// GetReadBuffer get read buffer -func (ctx *layer4Context) GetReadBuffer() iobufferpool.IoBuffer { - return ctx.readBuffer +func (ctx *layer4Context) StatMetric() *layer4stat.Metric { + return &layer4stat.Metric{ + Err: false, + Duration: ctx.Duration(), + ReqSize: ctx.reqSize, + RespSize: ctx.respSize, + } } -// AppendReadBuffer filter receive client data, append data to ctx read buffer for other filters handle -func (ctx *layer4Context) AppendReadBuffer(buffer iobufferpool.IoBuffer) { - if buffer == nil || buffer.Len() == 0 { - return - } - _ = ctx.readBuffer.Append(buffer.Bytes()) +// GetClientReadBuffer get read buffer for client conn +func (ctx *layer4Context) GetClientReadBuffer() iobufferpool.IoBuffer { + return ctx.clientReadBuffer } -// GetWriteBuffer get write buffer -func (ctx *layer4Context) GetWriteBuffer() iobufferpool.IoBuffer { - return ctx.writeBuffer +// GetWriteToClientBuffer get write buffer sync to client +func (ctx *layer4Context) GetWriteToClientBuffer() iobufferpool.IoBuffer { + return ctx.clientWriteBuffer } -// AppendWriteBuffer filter receive upstream data, append data to ctx write buffer for other filters handle -func (ctx *layer4Context) AppendWriteBuffer(buffer iobufferpool.IoBuffer) { - if buffer == nil || buffer.Len() == 0 { +// GetUpstreamReadBuffer get read buffer for upstream conn +func (ctx *layer4Context) GetUpstreamReadBuffer() iobufferpool.IoBuffer { + return ctx.upstreamReadBuffer +} + +// GetWriteToUpstreamBuffer get write buffer sync to upstream +func (ctx *layer4Context) GetWriteToUpstreamBuffer() iobufferpool.IoBuffer { + return ctx.upstreamWriteBuffer +} + +// WriteToClient filter handle client upload data, send result to upstream connection +func (ctx *layer4Context) WriteToClient() { + if ctx.clientWriteBuffer == nil || ctx.upstreamWriteBuffer.Len() == 0 { return } - _ = ctx.writeBuffer.Append(buffer.Bytes()) + buf := ctx.clientWriteBuffer.Clone() + ctx.respSize += uint64(buf.Len()) + ctx.clientWriteBuffer.Reset() + _ = ctx.upstreamConn.Write(buf) } -// WriteToClient filter handle client upload data, send result to upstream connection -func (ctx *layer4Context) WriteToClient(buffer iobufferpool.IoBuffer) { +func (ctx *layer4Context) DirectWriteToClient(buffer iobufferpool.IoBuffer) { if buffer == nil || buffer.Len() == 0 { return } - _ = ctx.upstreamConn.Write(buffer) + ctx.respSize += uint64(buffer.Len()) + _ = ctx.upstreamConn.Write(buffer.Clone()) } // WriteToUpstream filter handle client upload data, send result to upstream connection -func (ctx *layer4Context) WriteToUpstream(buffer iobufferpool.IoBuffer) { +func (ctx *layer4Context) WriteToUpstream() { + if ctx.upstreamWriteBuffer == nil || ctx.upstreamWriteBuffer.Len() == 0 { + return + } + buf := ctx.upstreamWriteBuffer.Clone() + ctx.reqSize += uint64(buf.Len()) + _ = ctx.clientConn.Write(buf) + ctx.upstreamWriteBuffer.Reset() +} + +func (ctx *layer4Context) DirectWriteToUpstream(buffer iobufferpool.IoBuffer) { if buffer == nil || buffer.Len() == 0 { return } - _ = ctx.clientConn.Write(buffer) + ctx.reqSize += uint64(buffer.Len()) + _ = ctx.clientConn.Write(buffer.Clone()) } // CallNextHandler call handler caller @@ -201,10 +241,14 @@ func (ctx *layer4Context) SetHandlerCaller(caller HandlerCaller) { // Finish context finish handler func (ctx *layer4Context) Finish() { - _ = iobufferpool.PutIoBuffer(ctx.readBuffer) - _ = iobufferpool.PutIoBuffer(ctx.writeBuffer) - ctx.readBuffer = nil - ctx.writeBuffer = nil + _ = iobufferpool.PutIoBuffer(ctx.clientReadBuffer) + _ = iobufferpool.PutIoBuffer(ctx.clientWriteBuffer) + _ = iobufferpool.PutIoBuffer(ctx.upstreamReadBuffer) + _ = iobufferpool.PutIoBuffer(ctx.upstreamWriteBuffer) + ctx.clientReadBuffer = nil + ctx.clientWriteBuffer = nil + ctx.upstreamReadBuffer = nil + ctx.upstreamWriteBuffer = nil finish := time.Now() ctx.endTime = &finish diff --git a/pkg/filter/bridge/bridge.go b/pkg/filter/bridge/bridge.go index 3548911f11..ad19ed623d 100644 --- a/pkg/filter/bridge/bridge.go +++ b/pkg/filter/bridge/bridge.go @@ -147,7 +147,7 @@ func (b *Bridge) handle(ctx context.HTTPContext) (result string) { return resultDestinationNotFound } - handler, exists := b.muxMapper.GetHandler(dest) + handler, exists := b.muxMapper.GetHTTPHandler(dest) if !exists { logger.Errorf("failed to get running object %s", b.spec.Destinations[0]) diff --git a/pkg/object/httpserver/mux.go b/pkg/object/httpserver/mux.go index 4229559bb8..6b058d76c2 100644 --- a/pkg/object/httpserver/mux.go +++ b/pkg/object/httpserver/mux.go @@ -434,7 +434,7 @@ func (m *mux) handleRequestWithCache(rules *muxRules, ctx context.HTTPContext, c case ci.methodNotAllowed: ctx.Response().SetStatusCode(http.StatusMethodNotAllowed) case ci.path != nil: - handler, exists := rules.muxMapper.GetHandler(ci.path.backend) + handler, exists := rules.muxMapper.GetHTTPHandler(ci.path.backend) if !exists { ctx.AddTag(stringtool.Cat("backend ", ci.path.backend, " not found")) ctx.Response().SetStatusCode(http.StatusServiceUnavailable) diff --git a/pkg/object/layer4pipeline/layer4pipeline.go b/pkg/object/layer4pipeline/layer4pipeline.go index 5bdd564369..dd32ee0c38 100644 --- a/pkg/object/layer4pipeline/layer4pipeline.go +++ b/pkg/object/layer4pipeline/layer4pipeline.go @@ -53,7 +53,7 @@ type ( superSpec *supervisor.Spec spec *Spec - muxMapper protocol.Layer4MuxMapper + muxMapper protocol.MuxMapper runningFilters []*runningFilter } @@ -273,14 +273,14 @@ func (l *Layer4Pipeline) DefaultSpec() interface{} { } // Init initializes Layer4Pipeline. -func (l *Layer4Pipeline) Init(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { +func (l *Layer4Pipeline) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { l.superSpec, l.spec, l.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper l.reload(nil /*no previous generation*/) } // Inherit inherits previous generation of Layer4Pipeline. -func (l *Layer4Pipeline) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.Layer4MuxMapper) { +func (l *Layer4Pipeline) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { l.superSpec, l.spec, l.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper l.reload(previousGeneration.(*Layer4Pipeline)) diff --git a/pkg/object/layer4rawserver/backendserver.go b/pkg/object/layer4rawserver/backendserver.go index 6eaad30450..6357eac399 100644 --- a/pkg/object/layer4rawserver/backendserver.go +++ b/pkg/object/layer4rawserver/backendserver.go @@ -249,7 +249,7 @@ func (ss *staticServers) next(cliAddr string) *Server { func (ss *staticServers) roundRobin() *Server { count := atomic.AddUint64(&ss.count, 1) - // NOTE: start from 0. + // NOTE: startEventLoop from 0. count-- return ss.servers[int(count)%len(ss.servers)] } diff --git a/pkg/object/layer4rawserver/layer4server.go b/pkg/object/layer4rawserver/layer4server.go index e8078dcbd5..1e98b6b140 100644 --- a/pkg/object/layer4rawserver/layer4server.go +++ b/pkg/object/layer4rawserver/layer4server.go @@ -60,7 +60,7 @@ func (l4 *Layer4Server) DefaultSpec() interface{} { } // Init initializes Layer4Server. -func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { +func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { l4.runtime = newRuntime(superSpec, muxMapper) @@ -71,7 +71,7 @@ func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.Laye } // Inherit inherits previous generation of Layer4Server. -func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.Layer4MuxMapper) { +func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { l4.runtime = previousGeneration.(*Layer4Server).runtime l4.runtime.eventChan <- &eventReload{ diff --git a/pkg/object/layer4rawserver/listener.go b/pkg/object/layer4rawserver/listener.go index 5aad46c602..788a292f9e 100644 --- a/pkg/object/layer4rawserver/listener.go +++ b/pkg/object/layer4rawserver/listener.go @@ -19,160 +19,93 @@ package layer4rawserver import ( stdcontext "context" + "errors" "fmt" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/util/iobufferpool" - "github.com/megaease/easegress/pkg/util/limitlistener" "net" "runtime/debug" "sync" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/limitlistener" ) type ListenerState int -// listener state -// ListenerInited means listener is inited, an inited listener can be started or stopped -// ListenerRunning means listener is running, start a running listener will be ignored. -// ListenerStopped means listener is stopped, start a stopped listener without restart flag will be ignored. -const ( - ListenerInited ListenerState = iota - ListenerRunning - ListenerStopped -) - type listener struct { - name string - udpListener net.PacketConn // udp connection listener - tcpListener *limitlistener.LimitListener // tcp connection listener with connection limit - - state ListenerState - protocol string // enum:udp/tcp - listenAddr string - keepalive bool - maxConnections uint32 + name string + state ListenerState + protocol string // enum:udp/tcp + localAddr string mutex *sync.Mutex - stopChan chan struct{} // connection listen to this stopChan + stopChan chan struct{} - onTcpAccept func(conn net.Conn, listenerStopChan chan struct{}) // tcp accept handle - onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer) // udp accept handle + udpListener net.PacketConn // udp listener + + keepalive bool // keepalive for tcp + maxConns uint32 // maxConn for tcp listener + tcpListener *limitlistener.LimitListener // tcp listener with accept limit + + onTcpAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle + onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) // udp accept handle } -func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStopChan chan struct{}), - onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer)) *listener { +func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStop chan struct{}), + onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer)) *listener { listen := &listener{ - state: ListenerInited, - listenAddr: fmt.Sprintf(":%d", spec.Port), - protocol: spec.Protocol, - keepalive: spec.KeepAlive, - maxConnections: spec.MaxConnections, + protocol: spec.Protocol, + localAddr: fmt.Sprintf(":%d", spec.Port), + mutex: &sync.Mutex{}, + stopChan: make(chan struct{}), onTcpAccept: onAccept, onUdpAccept: onUdpAccept, - mutex: &sync.Mutex{}, } - return listen -} - -func (l *listener) start() { - ignored := func() bool { - l.mutex.Lock() - defer l.mutex.Unlock() - - switch l.state { - case ListenerRunning: - logger.Debugf("listener %s %s is already running", l.protocol, l.listenAddr) - return true - case ListenerStopped: - logger.Debugf("listener %s %s restart", l.protocol, l.listenAddr) - if err := l.listen(); err != nil { - logger.Errorf("listener %s %s restart failed, err: %+v", l.protocol, l.listenAddr, err) - return true - } - default: - if l.udpListener == nil && l.tcpListener == nil { - if err := l.listen(); err != nil { - logger.Errorf("listener %s %s start failed, err: %+v", l.protocol, l.listenAddr, err) - } - } - } - l.state = ListenerRunning - return false - }() - if ignored { - return - } - - switch l.protocol { - case "udp": - l.readMsgEventLoop() - case "tcp": - l.acceptEventLoop() + if listen.protocol == "tcp" { + listen.keepalive = spec.KeepAlive + listen.maxConns = spec.MaxConnections } + return listen } func (l *listener) listen() error { switch l.protocol { case "udp": c := net.ListenConfig{} - if ul, err := c.ListenPacket(stdcontext.Background(), l.protocol, l.listenAddr); err != nil { + if ul, err := c.ListenPacket(stdcontext.Background(), l.protocol, l.localAddr); err != nil { return err } else { l.udpListener = ul } case "tcp": - if tl, err := net.Listen(l.protocol, l.listenAddr); err != nil { + if tl, err := net.Listen(l.protocol, l.localAddr); err != nil { return err } else { - l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConnections) + // wrap tcp listener with accept limit + l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConns) } + default: + return errors.New("invalid protocol for layer4 server listener") } return nil } -func (l *listener) acceptEventLoop() { - - for { - if tconn, err := l.tcpListener.Accept(); err != nil { - if nerr, ok := err.(net.Error); ok && nerr.Timeout() { - logger.Infof("tcp listener(%s) stop accept connection due to deadline, err: %s", - l.listenAddr, nerr) - return - } - - if ope, ok := err.(*net.OpError); ok { - // not timeout error and not temporary, which means the error is non-recoverable - if !(ope.Timeout() && ope.Temporary()) { - // accept error raised by sockets closing - if ope.Op == "accept" { - logger.Errorf("tcp listener(%s) stop accept connection due to listener closed", - l.listenAddr) - } else { - logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", - l.listenAddr, err.Error()) - } - return - } - } else { - logger.Errorf("tcp listener(%s) stop accept connection with unknown error: %s.", - l.listenAddr, err.Error()) - } - } else { - go l.onTcpAccept(tconn, l.stopChan) - } +func (l *listener) startEventLoop() { + switch l.protocol { + case "udp": + l.readMsgEventLoop() + case "tcp": + l.acceptEventLoop() } } -func (l *listener) setMaxConnection(maxConn uint32) { - l.tcpListener.SetMaxConnection(maxConn) -} - func (l *listener) readMsgEventLoop() { go func() { defer func() { if r := recover(); r != nil { - logger.Errorf("failed to read udp msg for %s\n, stack trace: \n", l.listenAddr, debug.Stack()) + logger.Errorf("failed to read udp msg for %s\n, stack trace: \n", l.localAddr, debug.Stack()) l.readMsgEventLoop() } }() @@ -184,7 +117,9 @@ func (l *listener) readMsgEventLoop() { func (l *listener) readMsgLoop() { conn := l.udpListener.(*net.UDPConn) buf := iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - defer iobufferpool.PutIoBuffer(buf) + defer func(buf iobufferpool.IoBuffer) { + _ = iobufferpool.PutIoBuffer(buf) + }(buf) for { buf.Reset() @@ -193,32 +128,74 @@ func (l *listener) readMsgLoop() { if err != nil { if nerr, ok := err.(net.Error); ok && nerr.Timeout() { - logger.Infof("udp listener %s stop receiving packet by deadline", l.listenAddr) + logger.Infof("udp listener %s stop receiving packet by deadline", l.localAddr) return } if ope, ok := err.(*net.OpError); ok { if !(ope.Timeout() && ope.Temporary()) { - logger.Errorf("udp listener %s occurs non-recoverable error, stop listening and receiving", l.listenAddr) + logger.Errorf("udp listener %s occurs non-recoverable error, stop listening and receiving", l.localAddr) return } } - logger.Errorf("udp listener %s receiving packet occur error: %+v", l.listenAddr, err) + logger.Errorf("udp listener %s receiving packet occur error: %+v", l.localAddr, err) continue } - l.onUdpAccept(rAddr, conn, l.stopChan, buf) + l.onUdpAccept(rAddr, conn, l.stopChan, buf.Clone()) + } +} + +func (l *listener) acceptEventLoop() { + + for { + if tconn, err := l.tcpListener.Accept(); err != nil { + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + logger.Infof("tcp listener(%s) stop accept connection due to timeout, err: %s", + l.localAddr, nerr) + return + } + + if ope, ok := err.(*net.OpError); ok { + // not timeout error and not temporary, which means the error is non-recoverable + if !(ope.Timeout() && ope.Temporary()) { + // accept error raised by sockets closing + if ope.Op == "accept" { + logger.Errorf("tcp listener(%s) stop accept connection due to listener closed", + l.localAddr) + } else { + logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", + l.localAddr, err.Error()) + } + return + } + } else { + logger.Errorf("tcp listener(%s) stop accept connection with unknown error: %s.", + l.localAddr, err.Error()) + } + } else { + go l.onTcpAccept(tconn, l.stopChan) + } } } +func (l *listener) setMaxConnection(maxConn uint32) { + l.tcpListener.SetMaxConnection(maxConn) +} + func (l *listener) close() error { l.mutex.Lock() defer l.mutex.Unlock() - if l.tcpListener != nil { - return l.tcpListener.Close() - } - if l.udpListener != nil { - return l.udpListener.Close() + var err error + switch l.protocol { + case "tcp": + if l.tcpListener != nil { + err = l.tcpListener.Close() + } + case "udp": + if l.udpListener != nil { + err = l.udpListener.Close() + } } - close(l.stopChan) // TODO listener关闭时,需要关闭已建立的连接吗 - return nil + close(l.stopChan) + return err } diff --git a/pkg/object/layer4rawserver/mux.go b/pkg/object/layer4rawserver/mux.go index 66b12caeae..417b423ac4 100644 --- a/pkg/object/layer4rawserver/mux.go +++ b/pkg/object/layer4rawserver/mux.go @@ -37,7 +37,7 @@ type ( superSpec *supervisor.Spec spec *Spec - muxMapper protocol.Layer4MuxMapper + muxMapper protocol.MuxMapper ipFilter *ipfilter.IPFilter ipFilterChan *ipfilter.IPFilters @@ -87,7 +87,7 @@ func (mr *muxRules) pass(ctx context.Layer4Context) bool { return false } -func newMux(mapper protocol.Layer4MuxMapper) *mux { +func newMux(mapper protocol.MuxMapper) *mux { m := &mux{} m.rules.Store(&muxRules{ @@ -97,7 +97,7 @@ func newMux(mapper protocol.Layer4MuxMapper) *mux { return m } -func (m *mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { +func (m *mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { spec := superSpec.ObjectSpec().(*Spec) rules := &muxRules{ @@ -123,8 +123,9 @@ func (m *mux) GetHandler(name string) (protocol.Layer4Handler, bool) { if rules == nil { return nil, false } - return rules.muxMapper.GetHandler(name) + return rules.muxMapper.GetLayer4Handler(name) } func (m *mux) close() { + // TODO add close tracing } diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4rawserver/runtime.go index a934540c6b..cf4bf772b3 100644 --- a/pkg/object/layer4rawserver/runtime.go +++ b/pkg/object/layer4rawserver/runtime.go @@ -57,7 +57,7 @@ type ( eventReload struct { nextSuperSpec *supervisor.Spec - muxMapper protocol.Layer4MuxMapper + muxMapper protocol.MuxMapper } eventClose struct{ done chan struct{} } @@ -68,14 +68,13 @@ type ( state atomic.Value // runtime running state err atomic.Value // runtime running error - startNum uint64 // runtime start num + startNum uint64 // runtime startEventLoop num eventChan chan interface{} // receive traffic controller event - mux *mux // mux for layer4 pipeline - pool *pool - - listener *listener - tcpstat *layer4stat.Layer4Stat + mux *mux + pool *pool + listener *listener + layer4Stat *layer4stat.Layer4Stat } // Status contains all status generated by runtime, for displaying to users. @@ -84,13 +83,16 @@ type ( State stateType `yaml:"state"` Error string `yaml:"error,omitempty"` + + *layer4stat.Status } ) -func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) *runtime { +func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) *runtime { r := &runtime{ - superSpec: superSpec, - eventChan: make(chan interface{}, 10), + superSpec: superSpec, + eventChan: make(chan interface{}, 10), + layer4Stat: layer4stat.New(), } r.mux = newMux(muxMapper) @@ -103,6 +105,7 @@ func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) return r } +// Close notify runtime close func (r *runtime) Close() { done := make(chan struct{}) r.eventChan <- &eventClose{done: done} @@ -117,6 +120,7 @@ func (r *runtime) Status() *Status { Health: health, State: r.getState(), Error: r.getError().Error(), + Status: r.layer4Stat.Status(), } } @@ -142,10 +146,11 @@ func (r *runtime) fsm() { } } -func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.Layer4MuxMapper) { +func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { r.superSpec = nextSuperSpec r.mux.reloadRules(nextSuperSpec, muxMapper) + // TODO reload pool with guard? nextSpec := nextSuperSpec.ObjectSpec().(*Spec) r.pool = newPool(nextSuperSpec.Super(), nextSpec.Pool, "") @@ -203,8 +208,74 @@ func (r *runtime) getError() error { return err.(error) } -func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { +func (r *runtime) needRestartServer(nextSpec *Spec) bool { + x := *r.spec + y := *nextSpec + + // The change of options below need not restart the layer4 server. + x.KeepAlive, y.KeepAlive = true, true + x.TcpNodelay, y.TcpNodelay = true, true + x.MaxConnections, y.MaxConnections = 0, 0 + x.ProxyConnectTimeout, y.ProxyTimeout = 0, 0 + x.ProxyTimeout, y.ProxyTimeout = 0, 0 + + x.Pool, y.Pool = nil, nil + x.IPFilter, y.IPFilter = nil, nil + + // The update of rules need not to shutdown server. + return !reflect.DeepEqual(x, y) +} + +func (r *runtime) startServer() { + l := newListener(r.spec, r.onTcpAccept(), r.onUdpAccept()) + + r.listener = l + r.startNum++ + r.setState(stateRunning) + r.setError(nil) + + if err := l.listen(); err != nil { + r.setState(stateFailed) + r.setError(err) + logger.Errorf("listen for %s :%d failed, err: %+v", r.spec.Protocol, r.spec.Port, err) + + _ = l.close() + r.eventChan <- &eventServeFailed{ + err: err, + startNum: r.startNum, + } + return + } + + go r.listener.startEventLoop() +} + +func (r *runtime) closeServer() { + if r.listener == nil { + return + } + + _ = r.listener.close() + logger.Infof("listener for %s :%d closed", r.listener.protocol, r.listener.localAddr) +} + +func (r *runtime) checkFailed() { + ticker := time.NewTicker(checkFailedTimeout) + for range ticker.C { + state := r.getState() + if state == stateFailed { + r.eventChan <- &eventCheckFailed{} + } else if state == stateClosed { + ticker.Stop() + return + } + } +} +func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { + if r.getState() == stateFailed { + r.startServer() + } } func (r *runtime) handleEventServeFailed(e *eventServeFailed) { @@ -216,18 +287,16 @@ func (r *runtime) handleEventServeFailed(e *eventServeFailed) { } func (r *runtime) handleEventReload(e *eventReload) { - + r.reload(e.nextSuperSpec, e.muxMapper) } func (r *runtime) handleEventClose(e *eventClose) { r.closeServer() + r.mux.close() close(e.done) } func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) { - if r.spec.Protocol != "tcp" { - return nil - } return func(conn net.Conn, listenerStop chan struct{}) { remote := conn.RemoteAddr().(*net.TCPAddr).IP.String() @@ -241,47 +310,50 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) server, err := r.pool.servers.next(remote) if err != nil { _ = conn.Close() - logger.Errorf("close tcp connection due to can not find upstream server, local addr: %s, err: %+v", + logger.Errorf("close tcp connection due to no available upstream server, local addr: %s, err: %+v", conn.LocalAddr(), err) return } - stopChan := make(chan struct{}) - serverAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) connTimeout := time.Duration(r.spec.ProxyConnectTimeout) * time.Millisecond - upstreamConn := connection.NewUpstreamConn(connTimeout, serverAddr, listenerStop, stopChan) + upstreamStopChan := make(chan struct{}) + upstreamConn := connection.NewUpstreamConn(connTimeout, serverAddr, listenerStop, upstreamStopChan) if err := upstreamConn.Connect(); err != nil { logger.Errorf("close tcp connection due to upstream conn connect failed, local addr: %s, err: %+v", conn.LocalAddr().String(), err) _ = conn.Close() - close(stopChan) - return + } else { + clientStopChan := make(chan struct{}) + cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, clientStopChan) + ctx := context.NewLayer4Context(cliConn, upstreamConn, conn.RemoteAddr()) + r.setOnReadHandler(cliConn, upstreamConn, ctx) + + // finish context lifecycle + go func(chans ...chan struct{}) { + for _, v := range chans { + <-v + ctx.Finish() + } + }(upstreamStopChan, clientStopChan) } - - cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, stopChan) - ctx := context.NewLayer4Context(cliConn, upstreamConn, cliConn.RemoteAddr()) - r.setConnectionReadHandler(cliConn, upstreamConn, ctx) } } -func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer) { - if r.spec.Protocol != "udp" { - return nil - } - - return func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, buffer iobufferpool.IoBuffer) { - localAddr := conn.LocalAddr() +func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { + return func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { remote := cliAddr.(*net.UDPAddr).IP.String() if r.mux.AllowIP(remote) { - logger.Infof("discard udp packet from %s to %s which ip is not allowed", cliAddr.String(), localAddr.String()) + logger.Infof("discard udp packet from %s to %s which ip is not allowed", cliAddr.String(), + conn.LocalAddr().String()) return } + localAddr := conn.LocalAddr() key := connection.GetProxyMapKey(localAddr.String(), cliAddr.String()) if rawCtx, ok := connection.ProxyMap.Load(key); ok { - ctx := rawCtx.(context.Layer4Context) - ctx.WriteToUpstream(buffer.Clone()) // there is no need to reset buffer + ctx := rawCtx.(*connection.Connection) + ctx.OnRead(packet) return } @@ -294,7 +366,8 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerSt upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) connTimeout := time.Duration(r.spec.ProxyConnectTimeout) * time.Millisecond - upstreamConn := connection.NewUpstreamConn(connTimeout, upstreamAddr, listenerStop, nil) + upstreamStopChan := make(chan struct{}) + upstreamConn := connection.NewUpstreamConn(connTimeout, upstreamAddr, listenerStop, upstreamStopChan) if err := upstreamConn.Connect(); err != nil { logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) return @@ -302,94 +375,39 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerSt cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, nil) ctx := context.NewLayer4Context(cliConn, upstreamConn, cliAddr) - connection.SetUDPProxyMap(connection.GetProxyMapKey(localAddr.String(), cliAddr.String()), ctx) - r.setConnectionReadHandler(cliConn, upstreamConn, ctx) + connection.SetUDPProxyMap(connection.GetProxyMapKey(localAddr.String(), cliAddr.String()), &cliConn) + r.setOnReadHandler(cliConn, upstreamConn, ctx) + + // finish context lifecycle + go func(chans ...chan struct{}) { + for _, v := range chans { + <-v + ctx.Finish() + } + }(upstreamStopChan) } } -func (r *runtime) setConnectionReadHandler(cliConn *connection.Connection, upstreamConn *connection.UpstreamConnection, ctx context.Layer4Context) { +func (r *runtime) setOnReadHandler(cliConn *connection.Connection, upstreamConn *connection.UpstreamConnection, ctx context.Layer4Context) { if handle, ok := r.mux.GetHandler(r.spec.Name); ok { cliConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - handle.InboundHandler(ctx, buffer) - if ctx.GetReadBuffer().Len() > 0 { - buf := ctx.GetReadBuffer().Clone() - ctx.GetReadBuffer().Reset() - ctx.WriteToUpstream(buf) - } + _ = ctx.GetClientReadBuffer().Append(buffer.Bytes()) + handle.Handle(ctx) // its filter's response to clear context client read buffer + buffer.Reset() }) upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - handle.OutboundHandler(ctx, buffer) - if ctx.GetReadBuffer().Len() > 0 { - buf := ctx.GetReadBuffer().Clone() - ctx.GetReadBuffer().Reset() - ctx.WriteToClient(buf) - } + _ = ctx.GetUpstreamReadBuffer().Append(buffer.Bytes()) + handle.Handle(ctx) // its filter's response to clear context upstream read buffer + buffer.Reset() }) } else { cliConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - ctx.WriteToUpstream(buffer.Clone()) + ctx.DirectWriteToUpstream(buffer) buffer.Reset() }) upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - ctx.WriteToClient(buffer.Clone()) + ctx.DirectWriteToClient(buffer) buffer.Reset() }) } } - -func (r *runtime) startServer() { - l := newListener(r.spec, r.onTcpAccept(), r.onUdpAccept()) - err := l.listen() - if err != nil { - r.setState(stateFailed) - r.setError(err) - logger.Errorf("listen tcp conn for :%d failed, err: %v", r.spec.Port, err) - - _ = l.close() - r.eventChan <- &eventServeFailed{ - err: err, - startNum: r.startNum, - } - return - } - - r.startNum++ - r.setState(stateRunning) - r.setError(nil) - - r.listener = l - go r.listener.start() -} - -func (r *runtime) closeServer() { - _ = r.listener.close() // TODO close established connection when listener closed? - logger.Infof("listener for %s :%d closed,") -} - -func (r *runtime) checkFailed() { - ticker := time.NewTicker(checkFailedTimeout) - for range ticker.C { - state := r.getState() - if state == stateFailed { - r.eventChan <- &eventCheckFailed{} - } else if state == stateClosed { - ticker.Stop() - return - } - } -} - -func (r *runtime) needRestartServer(nextSpec *Spec) bool { - x := *r.spec - y := *nextSpec - - // The change of options below need not restart the HTTP server. - x.MaxConnections, y.MaxConnections = 0, 0 - x.IPFilter, y.IPFilter = nil, nil - x.Pool, y.Pool = nil, nil - x.ProxyConnectTimeout, y.ProxyTimeout = 0, 0 - x.ProxyTimeout, y.ProxyTimeout = 0, 0 - - // The update of rules need not to shutdown server. - return !reflect.DeepEqual(x, y) -} diff --git a/pkg/object/layer4rawserver/spec.go b/pkg/object/layer4rawserver/spec.go index 42a2774fcc..4722dfad1b 100644 --- a/pkg/object/layer4rawserver/spec.go +++ b/pkg/object/layer4rawserver/spec.go @@ -34,7 +34,7 @@ type ( // tcp stream config params KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` - MaxConnections uint32 `yaml:"maxConnections" jsonschema:"omitempty,minimum=1"` + MaxConnections uint32 `yaml:"maxConns" jsonschema:"omitempty,minimum=1"` ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` diff --git a/pkg/object/trafficcontroller/trafficcontroller.go b/pkg/object/trafficcontroller/trafficcontroller.go index 30bb1e1a04..13b16e0e1c 100644 --- a/pkg/object/trafficcontroller/trafficcontroller.go +++ b/pkg/object/trafficcontroller/trafficcontroller.go @@ -57,8 +57,10 @@ type ( // When the entry for a given key is only ever written once but read many times. // Reference: https://golang.org/pkg/sync/#Map // types of both: map[string]*supervisor.ObjectEntity - httpservers sync.Map - httppipelines sync.Map + httpservers sync.Map + httppipelines sync.Map + layer4servers sync.Map + layer4Pipelines sync.Map } // WalkFunc is the type of the function called for @@ -105,8 +107,8 @@ func newNamespace(namespace string) *Namespace { } } -// GetHandler gets handler within the namespace -func (ns *Namespace) GetHandler(name string) (protocol.HTTPHandler, bool) { +// GetHTTPHandler gets handler within the namespace +func (ns *Namespace) GetHTTPHandler(name string) (protocol.HTTPHandler, bool) { entity, exists := ns.httppipelines.Load(name) if !exists { return nil, false @@ -116,6 +118,16 @@ func (ns *Namespace) GetHandler(name string) (protocol.HTTPHandler, bool) { return handler, true } +func (ns *Namespace) GetLayer4Handler(name string) (protocol.Layer4Handler, bool) { + entity, exists := ns.layer4Pipelines.Load(name) + if !exists { + return nil, false + } + + handler := entity.(*supervisor.ObjectEntity).Instance().(protocol.Layer4Handler) + return handler, true +} + // Category returns the category of TrafficController. func (tc *TrafficController) Category() supervisor.ObjectCategory { return Category @@ -153,6 +165,42 @@ func (tc *TrafficController) reload(previousGeneration *TrafficController) { } } +// CreateLayer4ServerForSpec creates layer4 server with a spec +func (tc *TrafficController) CreateLayer4ServerForSpec(namespace string, superSpec *supervisor.Spec) ( + *supervisor.ObjectEntity, error) { + entity, err := tc.super.NewObjectEntityFromSpec(superSpec) + if err != nil { + return nil, err + } + return tc.CreateLayer4Server(namespace, entity) +} + +// CreateLayer4Server creates Layer4 server +func (tc *TrafficController) CreateLayer4Server(namespace string, entity *supervisor.ObjectEntity) (*supervisor.ObjectEntity, error) { + if namespace == "" { + return nil, fmt.Errorf("empty namespace") + } + + tc.mutex.Lock() + defer tc.mutex.Unlock() + + space, exists := tc.namespaces[namespace] + if !exists { + space = newNamespace(namespace) + tc.namespaces[namespace] = space + logger.Infof("create namespace %s", namespace) + } + + name := entity.Spec().Name() + + entity.InitWithRecovery(space) + space.layer4servers.Store(name, entity) + + logger.Infof("create layer4 server %s/%s", namespace, name) + + return entity, nil +} + // CreateHTTPServerForSpec creates HTTP server with a spec func (tc *TrafficController) CreateHTTPServerForSpec(namespace string, superSpec *supervisor.Spec) ( *supervisor.ObjectEntity, error) { @@ -306,7 +354,7 @@ func (tc *TrafficController) DeleteHTTPServer(namespace, name string) error { return nil } -// GetHTTPServer gets HTTP servers by it's namespace and name +// GetHTTPServer gets HTTP servers by its namespace and name func (tc *TrafficController) GetHTTPServer(namespace, name string) (*supervisor.ObjectEntity, bool) { tc.mutex.Lock() defer tc.mutex.Unlock() diff --git a/pkg/protocol/layer4.go b/pkg/protocol/layer4.go deleted file mode 100644 index f6e1dbbcfe..0000000000 --- a/pkg/protocol/layer4.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package protocol - -import ( - "github.com/megaease/easegress/pkg/context" -) - -type ( - // Layer4Handler is the common handler for the all backends - // which handle the traffic from layer4(tcp/udp) server. - Layer4Handler interface { - - // InboundHandler filter handle inbound stream from client via ctx - // put handle result to object and pass to next filter - InboundHandler(ctx context.Layer4Context, object interface{}) - - // OutboundHandler filter handle inbound stream from upstream via ctx - // put handle result to object and pass to next filter - OutboundHandler(ctx context.Layer4Context, object interface{}) - } - - // Layer4MuxMapper gets layer4 handler pipeline with mutex - Layer4MuxMapper interface { - GetHandler(name string) (Layer4Handler, bool) - } -) diff --git a/pkg/protocol/http.go b/pkg/protocol/protocol.go similarity index 63% rename from pkg/protocol/http.go rename to pkg/protocol/protocol.go index 3fcb49a3ce..14e7e368da 100644 --- a/pkg/protocol/http.go +++ b/pkg/protocol/protocol.go @@ -30,8 +30,21 @@ type ( Handle(ctx context.HTTPContext) } + // Layer4Handler is the common handler for the all backends + // which handle the traffic from layer4(tcp/udp) server. + Layer4Handler interface { + // Handle read buffer from context, and set write buffer to context, + // its filter's response to release read buffer in context + // and its filter's response to determine which time to flush buffer to client or upstream + Handle(ctx context.Layer4Context) + } + // MuxMapper gets HTTP handler pipeline with mutex MuxMapper interface { - GetHandler(name string) (HTTPHandler, bool) + // GetHTTPHandler get http handler from mux + GetHTTPHandler(name string) (HTTPHandler, bool) + + // GetLayer4Handler get layer4 handler from mux + GetLayer4Handler(name string) (Layer4Handler, bool) } ) diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index 236b3ae469..eb40966db3 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -45,6 +45,7 @@ import ( _ "github.com/megaease/easegress/pkg/object/httppipeline" _ "github.com/megaease/easegress/pkg/object/httpserver" _ "github.com/megaease/easegress/pkg/object/ingresscontroller" + _ "github.com/megaease/easegress/pkg/object/layer4pipeline" _ "github.com/megaease/easegress/pkg/object/layer4rawserver" _ "github.com/megaease/easegress/pkg/object/meshcontroller" _ "github.com/megaease/easegress/pkg/object/nacosserviceregistry" diff --git a/pkg/supervisor/supervisor.go b/pkg/supervisor/supervisor.go index b697a05667..37ea8bb821 100644 --- a/pkg/supervisor/supervisor.go +++ b/pkg/supervisor/supervisor.go @@ -202,7 +202,7 @@ func (s *Supervisor) ObjectRegistry() *ObjectRegistry { return s.objectRegistry } -// WalkControllers walks every controllers until walkFn returns false. +// WalkControllers walks every controller until walkFn returns false. func (s *Supervisor) WalkControllers(walkFn WalkFunc) { defer func() { if err := recover(); err != nil { diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index fc246517c5..f9a763b33a 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -59,7 +59,8 @@ type Connection struct { } // NewClientConn wrap connection create from client -func NewClientConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}, connStopChan chan struct{}) *Connection { +// @param remoteAddr client addr for udp proxy use +func NewClientConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan, connStopChan chan struct{}) *Connection { clientConn := &Connection{ conn: conn, connected: 1, @@ -108,6 +109,10 @@ func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { c.onReadBuffer = onRead } +func (c *Connection) OnRead(buffer iobufferpool.IoBuffer) { + c.onReadBuffer(buffer) +} + // Start running connection read/write loop func (c *Connection) Start() { if c.protocol == "udp" && c.conn.RemoteAddr() == nil { @@ -495,7 +500,7 @@ type UpstreamConnection struct { connectOnce sync.Once } -func NewUpstreamConn(connectTimeout time.Duration, remoteAddr net.Addr, stopChan, connStopChan chan struct{}) *UpstreamConnection { +func NewUpstreamConn(connectTimeout time.Duration, remoteAddr net.Addr, listenerStopChan, ConnStopChan chan struct{}) *UpstreamConnection { conn := &UpstreamConnection{ Connection: Connection{ connected: 1, @@ -507,8 +512,8 @@ func NewUpstreamConn(connectTimeout time.Duration, remoteAddr net.Addr, stopChan writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), mu: sync.Mutex{}, - connStopChan: connStopChan, - listenerStopChan: stopChan, + connStopChan: ConnStopChan, + listenerStopChan: listenerStopChan, }, connectTimeout: connectTimeout, } diff --git a/pkg/util/connection/udpreceiver.go b/pkg/util/connection/udpreceiver.go index 9431850fef..9b15d57a90 100644 --- a/pkg/util/connection/udpreceiver.go +++ b/pkg/util/connection/udpreceiver.go @@ -36,8 +36,8 @@ func GetProxyMapKey(raddr, laddr string) string { } // SetUDPProxyMap set udp session by udp server listener -func SetUDPProxyMap(key string, layer4Context interface{}) { - ProxyMap.Store(key, layer4Context) +func SetUDPProxyMap(key string, clientConn interface{}) { + ProxyMap.Store(key, clientConn) } // DelUDPProxyMap delete udp session diff --git a/pkg/util/gracenet/gracenet.go b/pkg/util/gracenet/gracenet.go deleted file mode 100644 index 55268427cd..0000000000 --- a/pkg/util/gracenet/gracenet.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gracenet - -import "github.com/megaease/easegress/pkg/graceupdate" - -var GNet = graceupdate.Global diff --git a/pkg/util/httpstat/httpstat.go b/pkg/util/httpstat/httpstat.go index 310f7327cb..94bc1a1391 100644 --- a/pkg/util/httpstat/httpstat.go +++ b/pkg/util/httpstat/httpstat.go @@ -127,7 +127,7 @@ func New() *HTTPStat { // NOTE: The methods of HTTPStats use Mutex to protect themselves. // It does not hurt affect performance , because all statistics -// are called after finishing all other stuff in HTTPContext. +// are called after finishing all others stuff in HTTPContext. // Stat stats the ctx. func (hs *HTTPStat) Stat(m *Metric) { diff --git a/pkg/util/layer4stat/layer4stat.go b/pkg/util/layer4stat/layer4stat.go index 842f315e37..752e1c58a0 100644 --- a/pkg/util/layer4stat/layer4stat.go +++ b/pkg/util/layer4stat/layer4stat.go @@ -19,6 +19,7 @@ package layer4stat import ( "sync" + "time" "github.com/megaease/easegress/pkg/util/sampler" "github.com/rcrowley/go-metrics" @@ -54,6 +55,14 @@ type ( respSize uint64 } + // Metric is the package of statistics at once. + Metric struct { + Err bool // client/upstream connection send/receive data failed + Duration time.Duration + ReqSize uint64 + RespSize uint64 + } + // Status contains all status generated by HTTPStat. Status struct { Count uint64 `yaml:"count"` @@ -89,12 +98,118 @@ type ( } ) -// Status get layer4 proxy status -func (s *Layer4Stat) Status() *Status { - panic("implement me") -} - // New get new layer4 stat func New() *Layer4Stat { - panic("implement me") + hs := &Layer4Stat{ + rate1: metrics.NewEWMA1(), + rate5: metrics.NewEWMA5(), + rate15: metrics.NewEWMA15(), + + errRate1: metrics.NewEWMA1(), + errRate5: metrics.NewEWMA5(), + errRate15: metrics.NewEWMA15(), + + durationSampler: sampler.NewDurationSampler(), + } + + return hs +} + +// Stat stats the ctx. +func (l *Layer4Stat) Stat(m *Metric) { + l.mutex.Lock() + defer l.mutex.Unlock() + + l.count++ + l.rate1.Update(1) + l.rate5.Update(1) + l.rate15.Update(1) + + if m.Err { + l.errCount++ + l.errRate1.Update(1) + l.errRate5.Update(1) + l.errRate15.Update(1) + } + + duration := uint64(m.Duration.Milliseconds()) + l.total += duration + if l.count == 1 { + l.min, l.mean, l.max = duration, duration, duration + } else { + if duration < l.min { + l.min = duration + } + if duration > l.max { + l.max = duration + } + l.mean = l.total / l.count + } + + l.durationSampler.Update(m.Duration) + + l.reqSize += m.ReqSize + l.respSize += m.RespSize +} + +// Status returns Layer4Stat Status, It assumes it is called every five seconds. +// https://github.com/rcrowley/go-metrics/blob/3113b8401b8a98917cde58f8bbd42a1b1c03b1fd/ewma.go#L98-L99 +func (l *Layer4Stat) Status() *Status { + l.mutex.Lock() + defer l.mutex.Unlock() + + l.rate1.Tick() + l.rate5.Tick() + l.rate15.Tick() + l.errRate1.Tick() + l.errRate5.Tick() + l.errRate15.Tick() + + m1, m5, m15 := l.rate1.Rate(), l.rate5.Rate(), l.rate15.Rate() + m1Err, m5Err, m15Err := l.errRate1.Rate(), l.errRate5.Rate(), l.errRate15.Rate() + m1ErrPercent, m5ErrPercent, m15ErrPercent := 0.0, 0.0, 0.0 + if m1 > 0 { + m1ErrPercent = m1Err / m1 + } + if m5 > 0 { + m1ErrPercent = m5Err / m5 + } + if m15 > 0 { + m1ErrPercent = m15Err / m15 + } + + percentiles := l.durationSampler.Percentiles() + + status := &Status{ + Count: l.count, + M1: m1, + M5: m5, + M15: m15, + + ErrCount: l.errCount, + M1Err: m1Err, + M5Err: m5Err, + M15Err: m15Err, + + M1ErrPercent: m1ErrPercent, + M5ErrPercent: m5ErrPercent, + M15ErrPercent: m15ErrPercent, + + Min: l.min, + Mean: l.mean, + Max: l.max, + + P25: percentiles[0], + P50: percentiles[1], + P75: percentiles[2], + P95: percentiles[3], + P98: percentiles[4], + P99: percentiles[5], + P999: percentiles[6], + + ReqSize: l.reqSize, + RespSize: l.respSize, + } + + return status } From de51f3182caaa44ee3621ae5b20af85c200e21f9 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 12 Oct 2021 16:59:47 +0800 Subject: [PATCH 23/99] [layer4proxy] simplify layer4 server --- pkg/context/layer4context.go | 219 ++++---------------- pkg/object/layer4rawserver/backendserver.go | 3 +- pkg/object/layer4rawserver/layer4server.go | 22 +- pkg/object/layer4rawserver/listener.go | 15 +- pkg/object/layer4rawserver/mux.go | 36 +--- pkg/object/layer4rawserver/pool.go | 16 +- pkg/object/layer4rawserver/runtime.go | 167 ++++++--------- pkg/object/layer4rawserver/spec.go | 15 +- pkg/protocol/protocol.go | 3 +- pkg/util/connection/connection.go | 37 +++- pkg/util/layer4stat/layer4stat.go | 215 ------------------- 11 files changed, 170 insertions(+), 578 deletions(-) delete mode 100644 pkg/util/layer4stat/layer4stat.go diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 71b19dc962..0ace6dbb64 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -18,13 +18,16 @@ package context import ( - "github.com/megaease/easegress/pkg/util/layer4stat" "net" "sync" "time" +) + +type ConnectionType uint16 - "github.com/megaease/easegress/pkg/util/connection" - "github.com/megaease/easegress/pkg/util/iobufferpool" +const ( + DownstreamConnection ConnectionType = iota + UpstreamConnection ) type ( @@ -35,38 +38,15 @@ type ( Lock() Unlock() - // Protocol current support tcp/udp, future maybe support unix Protocol() string - // LocalAddr listen addr for layer4 server LocalAddr() net.Addr - // ClientAddr client addr - ClientAddr() net.Addr - // UpstreamAddr addr for upstream server UpstreamAddr() net.Addr - - // GetClientReadBuffer get io buffer read from client - GetClientReadBuffer() iobufferpool.IoBuffer - // GetWriteToClientBuffer get io buffer write to client - GetWriteToClientBuffer() iobufferpool.IoBuffer - // GetUpstreamReadBuffer get io buffer read from upstream - GetUpstreamReadBuffer() iobufferpool.IoBuffer - // GetWriteToUpstreamBuffer get io buffer write to upstream - GetWriteToUpstreamBuffer() iobufferpool.IoBuffer - - // WriteToClient get write to client buffer and send to client - WriteToClient() - // DirectWriteToClient directly write to client - DirectWriteToClient(buffer iobufferpool.IoBuffer) - // WriteToUpstream get write to upstream buffer and send to upstream - WriteToUpstream() - // DirectWriteToUpstream directly write to upstream - DirectWriteToUpstream(buffer iobufferpool.IoBuffer) - - // StatMetric get - StatMetric() *layer4stat.Metric - - // Finish close context and release buffer resource - Finish() + DownstreamAddr() net.Addr + // SetDownstreamAddr use for udp downstream addr + SetDownstreamAddr(addr net.Addr) + // Finish close by downstream connection and upstream connection + Finish(t ConnectionType) + // Duration context alive duration Duration() time.Duration CallNextHandler(lastResult string) string @@ -74,74 +54,40 @@ type ( } layer4Context struct { - mu sync.Mutex - reqSize uint64 - respSize uint64 - - protocol string - localAddr net.Addr - clientAddr net.Addr - upstreamAddr net.Addr + mutex sync.Mutex - clientConn *connection.Connection - upstreamConn *connection.UpstreamConnection - - clientReadBuffer iobufferpool.IoBuffer - clientWriteBuffer iobufferpool.IoBuffer - upstreamReadBuffer iobufferpool.IoBuffer - upstreamWriteBuffer iobufferpool.IoBuffer - - startTime *time.Time // connection accept time - endTime *time.Time // connection close time + protocol string // tcp/udp + localAddr net.Addr + downstreamAddr net.Addr + upstreamAddr net.Addr + startTime *time.Time // connection accept time + endTime *time.Time // connection close time caller HandlerCaller } ) // NewLayer4Context creates an Layer4Context. -// @param cliAddr udp client addr(client addr can not get from udp listen server) -func NewLayer4Context(cliConn *connection.Connection, upstreamConn *connection.UpstreamConnection, cliAddr net.Addr) *layer4Context { - startTime := time.Now() - ctx := &layer4Context{ - protocol: cliConn.Protocol(), - localAddr: cliConn.LocalAddr(), - upstreamAddr: upstreamConn.RemoteAddr(), - clientConn: cliConn, - upstreamConn: upstreamConn, - startTime: &startTime, - - mu: sync.Mutex{}, - } - - if cliAddr != nil { - ctx.clientAddr = cliAddr - } else { - ctx.clientAddr = cliConn.RemoteAddr() // nil for udp server conn - } +func NewLayer4Context(protocol string, localAddr net.Addr, downstreamAddr, upstreamAddr net.Addr) *layer4Context { - switch ctx.protocol { - case "udp": - ctx.clientReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - ctx.clientWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - ctx.upstreamReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - case "tcp": - ctx.clientReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) - ctx.clientWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) - ctx.upstreamReadBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) - ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + startTime := time.Now() + res := layer4Context{ + mutex: sync.Mutex{}, + protocol: protocol, + startTime: &startTime, + localAddr: localAddr, + downstreamAddr: downstreamAddr, + upstreamAddr: upstreamAddr, } - return ctx + return &res } -// Lock acquire context lock func (ctx *layer4Context) Lock() { - ctx.mu.Lock() + ctx.mutex.Lock() } -// Unlock release lock func (ctx *layer4Context) Unlock() { - ctx.mu.Unlock() + ctx.mutex.Unlock() } // Protocol get proxy protocol @@ -153,111 +99,34 @@ func (ctx *layer4Context) LocalAddr() net.Addr { return ctx.localAddr } -func (ctx *layer4Context) ClientAddr() net.Addr { - return ctx.clientAddr +func (ctx *layer4Context) DownstreamAddr() net.Addr { + return ctx.downstreamAddr } -// UpstreamAddr get upstream addr -func (ctx *layer4Context) UpstreamAddr() net.Addr { - return ctx.upstreamAddr +func (ctx *layer4Context) SetDownstreamAddr(addr net.Addr) { + ctx.downstreamAddr = addr } -func (ctx *layer4Context) StatMetric() *layer4stat.Metric { - return &layer4stat.Metric{ - Err: false, - Duration: ctx.Duration(), - ReqSize: ctx.reqSize, - RespSize: ctx.respSize, - } -} - -// GetClientReadBuffer get read buffer for client conn -func (ctx *layer4Context) GetClientReadBuffer() iobufferpool.IoBuffer { - return ctx.clientReadBuffer -} - -// GetWriteToClientBuffer get write buffer sync to client -func (ctx *layer4Context) GetWriteToClientBuffer() iobufferpool.IoBuffer { - return ctx.clientWriteBuffer -} - -// GetUpstreamReadBuffer get read buffer for upstream conn -func (ctx *layer4Context) GetUpstreamReadBuffer() iobufferpool.IoBuffer { - return ctx.upstreamReadBuffer -} - -// GetWriteToUpstreamBuffer get write buffer sync to upstream -func (ctx *layer4Context) GetWriteToUpstreamBuffer() iobufferpool.IoBuffer { - return ctx.upstreamWriteBuffer -} - -// WriteToClient filter handle client upload data, send result to upstream connection -func (ctx *layer4Context) WriteToClient() { - if ctx.clientWriteBuffer == nil || ctx.upstreamWriteBuffer.Len() == 0 { - return - } - buf := ctx.clientWriteBuffer.Clone() - ctx.respSize += uint64(buf.Len()) - ctx.clientWriteBuffer.Reset() - _ = ctx.upstreamConn.Write(buf) -} - -func (ctx *layer4Context) DirectWriteToClient(buffer iobufferpool.IoBuffer) { - if buffer == nil || buffer.Len() == 0 { - return - } - ctx.respSize += uint64(buffer.Len()) - _ = ctx.upstreamConn.Write(buffer.Clone()) +func (ctx *layer4Context) UpstreamAddr() net.Addr { + return ctx.upstreamAddr } -// WriteToUpstream filter handle client upload data, send result to upstream connection -func (ctx *layer4Context) WriteToUpstream() { - if ctx.upstreamWriteBuffer == nil || ctx.upstreamWriteBuffer.Len() == 0 { - return - } - buf := ctx.upstreamWriteBuffer.Clone() - ctx.reqSize += uint64(buf.Len()) - _ = ctx.clientConn.Write(buf) - ctx.upstreamWriteBuffer.Reset() +func (ctx *layer4Context) Finish(t ConnectionType) { + finish := time.Now() + ctx.endTime = &finish } -func (ctx *layer4Context) DirectWriteToUpstream(buffer iobufferpool.IoBuffer) { - if buffer == nil || buffer.Len() == 0 { - return +func (ctx *layer4Context) Duration() time.Duration { + if ctx.endTime != nil { + return ctx.endTime.Sub(*ctx.startTime) } - ctx.reqSize += uint64(buffer.Len()) - _ = ctx.clientConn.Write(buffer.Clone()) + return time.Now().Sub(*ctx.startTime) } -// CallNextHandler call handler caller func (ctx *layer4Context) CallNextHandler(lastResult string) string { return ctx.caller(lastResult) } -// SetHandlerCaller set handler caller func (ctx *layer4Context) SetHandlerCaller(caller HandlerCaller) { ctx.caller = caller } - -// Finish context finish handler -func (ctx *layer4Context) Finish() { - _ = iobufferpool.PutIoBuffer(ctx.clientReadBuffer) - _ = iobufferpool.PutIoBuffer(ctx.clientWriteBuffer) - _ = iobufferpool.PutIoBuffer(ctx.upstreamReadBuffer) - _ = iobufferpool.PutIoBuffer(ctx.upstreamWriteBuffer) - ctx.clientReadBuffer = nil - ctx.clientWriteBuffer = nil - ctx.upstreamReadBuffer = nil - ctx.upstreamWriteBuffer = nil - - finish := time.Now() - ctx.endTime = &finish -} - -// Duration get context execute duration -func (ctx *layer4Context) Duration() time.Duration { - if ctx.endTime != nil { - return ctx.endTime.Sub(*ctx.startTime) - } - return time.Now().Sub(*ctx.startTime) -} diff --git a/pkg/object/layer4rawserver/backendserver.go b/pkg/object/layer4rawserver/backendserver.go index 6357eac399..79cb7d85bb 100644 --- a/pkg/object/layer4rawserver/backendserver.go +++ b/pkg/object/layer4rawserver/backendserver.go @@ -50,7 +50,8 @@ type ( serviceRegistry *serviceregistry.ServiceRegistry serviceWatcher serviceregistry.ServiceWatcher static *staticServers - done chan struct{} + + done chan struct{} } staticServers struct { diff --git a/pkg/object/layer4rawserver/layer4server.go b/pkg/object/layer4rawserver/layer4server.go index 1e98b6b140..1759a41b9a 100644 --- a/pkg/object/layer4rawserver/layer4server.go +++ b/pkg/object/layer4rawserver/layer4server.go @@ -23,15 +23,15 @@ import ( ) const ( - // Category is the category of HTTPServer. + // Category is the category of Layer4Server. Category = supervisor.CategoryTrafficGate - // Kind is the kind of HTTPServer. + // Kind is the kind of Layer4Server. Kind = "Layer4Server" ) func init() { - //supervisor.Register(&Layer4Server{}) + supervisor.Register(&Layer4Server{}) } type ( @@ -54,16 +54,20 @@ func (l4 *Layer4Server) Kind() string { // DefaultSpec returns the default spec of Layer4Server. func (l4 *Layer4Server) DefaultSpec() interface{} { return &Spec{ - MaxConnections: 10240, - ProxyConnectTimeout: 15 * 1000, + MaxConnections: 10240, + ConnectTimeout: 5 * 1000, } } +// Validate validates the layer4 server structure. +func (l4 *Layer4Server) Validate() error { + return nil +} + // Init initializes Layer4Server. func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { l4.runtime = newRuntime(superSpec, muxMapper) - l4.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, muxMapper: muxMapper, @@ -72,8 +76,8 @@ func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxM // Inherit inherits previous generation of Layer4Server. func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { - l4.runtime = previousGeneration.(*Layer4Server).runtime + l4.runtime = previousGeneration.(*Layer4Server).runtime l4.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, muxMapper: muxMapper, @@ -82,9 +86,7 @@ func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration s // Status is the wrapper of runtimes Status. func (l4 *Layer4Server) Status() *supervisor.Status { - return &supervisor.Status{ - ObjectStatus: l4.runtime.Status(), - } + return &supervisor.Status{} } // Close closes Layer4Server. diff --git a/pkg/object/layer4rawserver/listener.go b/pkg/object/layer4rawserver/listener.go index 788a292f9e..3f67598169 100644 --- a/pkg/object/layer4rawserver/listener.go +++ b/pkg/object/layer4rawserver/listener.go @@ -38,17 +38,16 @@ type listener struct { protocol string // enum:udp/tcp localAddr string - mutex *sync.Mutex - stopChan chan struct{} + mutex *sync.Mutex + stopChan chan struct{} + keepalive bool // keepalive for tcp + maxConns uint32 // maxConn for tcp listener - udpListener net.PacketConn // udp listener - - keepalive bool // keepalive for tcp - maxConns uint32 // maxConn for tcp listener tcpListener *limitlistener.LimitListener // tcp listener with accept limit - onTcpAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle - onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) // udp accept handle + udpListener net.PacketConn // udp listener + onTcpAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle + onUdpAccept func(downstreamAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) // udp accept handle } func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStop chan struct{}), diff --git a/pkg/object/layer4rawserver/mux.go b/pkg/object/layer4rawserver/mux.go index 417b423ac4..72de0608d5 100644 --- a/pkg/object/layer4rawserver/mux.go +++ b/pkg/object/layer4rawserver/mux.go @@ -37,32 +37,11 @@ type ( superSpec *supervisor.Spec spec *Spec + ipFilter *ipfilter.IPFilter muxMapper protocol.MuxMapper - - ipFilter *ipfilter.IPFilter - ipFilterChan *ipfilter.IPFilters } ) -// newIPFilterChain returns nil if the number of final filters is zero. -func newIPFilterChain(parentIPFilters *ipfilter.IPFilters, childSpec *ipfilter.Spec) *ipfilter.IPFilters { - var ipFilters *ipfilter.IPFilters - if parentIPFilters != nil { - ipFilters = ipfilter.NewIPFilters(parentIPFilters.Filters()...) - } else { - ipFilters = ipfilter.NewIPFilters() - } - - if childSpec != nil { - ipFilters.Append(ipfilter.New(childSpec)) - } - - if len(ipFilters.Filters()) == 0 { - return nil - } - return ipFilters -} - func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { if spec == nil { return nil @@ -76,7 +55,7 @@ func (mr *muxRules) pass(ctx context.Layer4Context) bool { return true } - switch addr := ctx.ClientAddr().(type) { + switch addr := ctx.DownstreamAddr().(type) { case *net.UDPAddr: return mr.ipFilter.Allow(addr.IP.String()) case *net.TCPAddr: @@ -101,11 +80,10 @@ func (m *mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.MuxMapp spec := superSpec.ObjectSpec().(*Spec) rules := &muxRules{ - superSpec: superSpec, - spec: spec, - muxMapper: muxMapper, - ipFilter: newIPFilter(spec.IPFilter), - ipFilterChan: newIPFilterChain(nil, spec.IPFilter), + superSpec: superSpec, + spec: spec, + muxMapper: muxMapper, + ipFilter: newIPFilter(spec.IPFilter), } m.rules.Store(rules) } @@ -127,5 +105,5 @@ func (m *mux) GetHandler(name string) (protocol.Layer4Handler, bool) { } func (m *mux) close() { - // TODO add close tracing + // may be close tracing in future } diff --git a/pkg/object/layer4rawserver/pool.go b/pkg/object/layer4rawserver/pool.go index dcbffb6713..a27d096836 100644 --- a/pkg/object/layer4rawserver/pool.go +++ b/pkg/object/layer4rawserver/pool.go @@ -19,7 +19,6 @@ package layer4rawserver import ( "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/layer4stat" ) type ( @@ -27,9 +26,8 @@ type ( pool struct { spec *PoolSpec - tagPrefix string - layer4Stat *layer4stat.Layer4Stat - servers *servers + tagPrefix string + servers *servers } ) @@ -37,17 +35,11 @@ func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *po return &pool{ spec: spec, - tagPrefix: tagPrefix, - layer4Stat: layer4stat.New(), - servers: newServers(super, spec), + tagPrefix: tagPrefix, + servers: newServers(super, spec), } } -func (p *pool) status() *PoolStatus { - s := &PoolStatus{Stat: p.layer4Stat.Status()} - return s -} - func (p *pool) close() { p.servers.close() } diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4rawserver/runtime.go index cf4bf772b3..4ae7261cba 100644 --- a/pkg/object/layer4rawserver/runtime.go +++ b/pkg/object/layer4rawserver/runtime.go @@ -30,7 +30,6 @@ import ( "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/connection" "github.com/megaease/easegress/pkg/util/iobufferpool" - "github.com/megaease/easegress/pkg/util/layer4stat" ) const ( @@ -64,35 +63,21 @@ type ( runtime struct { superSpec *supervisor.Spec spec *Spec + mux *mux + pool *pool // backend servers + listener *listener // layer4 server + startNum uint64 + eventChan chan interface{} // receive traffic controller event state atomic.Value // runtime running state err atomic.Value // runtime running error - - startNum uint64 // runtime startEventLoop num - eventChan chan interface{} // receive traffic controller event - - mux *mux - pool *pool - listener *listener - layer4Stat *layer4stat.Layer4Stat - } - - // Status contains all status generated by runtime, for displaying to users. - Status struct { - Health string `yaml:"health"` - - State stateType `yaml:"state"` - Error string `yaml:"error,omitempty"` - - *layer4stat.Status } ) func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) *runtime { r := &runtime{ - superSpec: superSpec, - eventChan: make(chan interface{}, 10), - layer4Stat: layer4stat.New(), + superSpec: superSpec, + eventChan: make(chan interface{}, 10), } r.mux = newMux(muxMapper) @@ -101,7 +86,6 @@ func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) *runti go r.fsm() go r.checkFailed() - return r } @@ -112,18 +96,6 @@ func (r *runtime) Close() { <-done } -// Status returns HTTPServer Status. -func (r *runtime) Status() *Status { - health := r.getError().Error() - - return &Status{ - Health: health, - State: r.getState(), - Error: r.getError().Error(), - Status: r.layer4Stat.Status(), - } -} - // FSM is the finite-state-machine for the runtime. func (r *runtime) fsm() { for e := range r.eventChan { @@ -150,7 +122,6 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.MuxM r.superSpec = nextSuperSpec r.mux.reloadRules(nextSuperSpec, muxMapper) - // TODO reload pool with guard? nextSpec := nextSuperSpec.ObjectSpec().(*Spec) r.pool = newPool(nextSuperSpec.Super(), nextSpec.Pool, "") @@ -214,9 +185,8 @@ func (r *runtime) needRestartServer(nextSpec *Spec) bool { // The change of options below need not restart the layer4 server. x.KeepAlive, y.KeepAlive = true, true - x.TcpNodelay, y.TcpNodelay = true, true x.MaxConnections, y.MaxConnections = 0, 0 - x.ProxyConnectTimeout, y.ProxyTimeout = 0, 0 + x.ConnectTimeout, y.ProxyTimeout = 0, 0 x.ProxyTimeout, y.ProxyTimeout = 0, 0 x.Pool, y.Pool = nil, nil @@ -237,7 +207,7 @@ func (r *runtime) startServer() { if err := l.listen(); err != nil { r.setState(stateFailed) r.setError(err) - logger.Errorf("listen for %s :%d failed, err: %+v", r.spec.Protocol, r.spec.Port, err) + logger.Errorf("listen for %s %s failed, err: %+v", l.protocol, l.localAddr, err) _ = l.close() r.eventChan <- &eventServeFailed{ @@ -293,71 +263,61 @@ func (r *runtime) handleEventReload(e *eventReload) { func (r *runtime) handleEventClose(e *eventClose) { r.closeServer() r.mux.close() + r.pool.close() close(e.done) } func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) { - return func(conn net.Conn, listenerStop chan struct{}) { - remote := conn.RemoteAddr().(*net.TCPAddr).IP.String() - if r.mux.AllowIP(remote) { - _ = conn.Close() + return func(rawConn net.Conn, listenerStop chan struct{}) { + downstream := rawConn.RemoteAddr().(*net.TCPAddr).IP.String() + if r.mux.AllowIP(downstream) { + _ = rawConn.Close() logger.Infof("close tcp connection from %s to %s which ip is not allowed", - conn.RemoteAddr().String(), conn.LocalAddr().String()) + rawConn.RemoteAddr().String(), rawConn.LocalAddr().String()) return } - server, err := r.pool.servers.next(remote) + server, err := r.pool.servers.next(downstream) if err != nil { - _ = conn.Close() + _ = rawConn.Close() logger.Errorf("close tcp connection due to no available upstream server, local addr: %s, err: %+v", - conn.LocalAddr(), err) + rawConn.LocalAddr(), err) return } - serverAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) - connTimeout := time.Duration(r.spec.ProxyConnectTimeout) * time.Millisecond - upstreamStopChan := make(chan struct{}) - upstreamConn := connection.NewUpstreamConn(connTimeout, serverAddr, listenerStop, upstreamStopChan) + upstreamAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) + upstreamConn := connection.NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) if err := upstreamConn.Connect(); err != nil { logger.Errorf("close tcp connection due to upstream conn connect failed, local addr: %s, err: %+v", - conn.LocalAddr().String(), err) - _ = conn.Close() + rawConn.LocalAddr().String(), err) + _ = rawConn.Close() } else { - clientStopChan := make(chan struct{}) - cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, clientStopChan) - ctx := context.NewLayer4Context(cliConn, upstreamConn, conn.RemoteAddr()) - r.setOnReadHandler(cliConn, upstreamConn, ctx) - - // finish context lifecycle - go func(chans ...chan struct{}) { - for _, v := range chans { - <-v - ctx.Finish() - } - }(upstreamStopChan, clientStopChan) + downstreamConn := connection.NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) + ctx := context.NewLayer4Context("tcp", rawConn.LocalAddr(), rawConn.RemoteAddr(), upstreamAddr) + r.setOnReadHandler(downstreamConn, upstreamConn, ctx) } } } -func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { - return func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { - remote := cliAddr.(*net.UDPAddr).IP.String() - if r.mux.AllowIP(remote) { +func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { + return func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { + downstream := cliAddr.(*net.UDPAddr).IP.String() + if r.mux.AllowIP(downstream) { logger.Infof("discard udp packet from %s to %s which ip is not allowed", cliAddr.String(), - conn.LocalAddr().String()) + rawConn.LocalAddr().String()) return } - localAddr := conn.LocalAddr() + localAddr := rawConn.LocalAddr() key := connection.GetProxyMapKey(localAddr.String(), cliAddr.String()) - if rawCtx, ok := connection.ProxyMap.Load(key); ok { - ctx := rawCtx.(*connection.Connection) - ctx.OnRead(packet) + if rawDownstreamConn, ok := connection.ProxyMap.Load(key); ok { + downstreamConn := rawDownstreamConn.(*connection.Connection) + downstreamConn.OnRead(packet) return } - server, err := r.pool.servers.next(remote) + server, err := r.pool.servers.next(downstream) if err != nil { logger.Infof("discard udp packet from %s to %s due to can not find upstream server, err: %+v", cliAddr.String(), localAddr.String()) @@ -365,49 +325,46 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, conn net.Conn, listenerSt } upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) - connTimeout := time.Duration(r.spec.ProxyConnectTimeout) * time.Millisecond - upstreamStopChan := make(chan struct{}) - upstreamConn := connection.NewUpstreamConn(connTimeout, upstreamAddr, listenerStop, upstreamStopChan) + upstreamConn := connection.NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) if err := upstreamConn.Connect(); err != nil { logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) return } - cliConn := connection.NewClientConn(conn, conn.RemoteAddr(), listenerStop, nil) - ctx := context.NewLayer4Context(cliConn, upstreamConn, cliAddr) - connection.SetUDPProxyMap(connection.GetProxyMapKey(localAddr.String(), cliAddr.String()), &cliConn) - r.setOnReadHandler(cliConn, upstreamConn, ctx) - - // finish context lifecycle - go func(chans ...chan struct{}) { - for _, v := range chans { - <-v - ctx.Finish() - } - }(upstreamStopChan) + downstreamConn := connection.NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) + ctx := context.NewLayer4Context("udp", localAddr, upstreamAddr, upstreamAddr) + connection.SetUDPProxyMap(connection.GetProxyMapKey(localAddr.String(), cliAddr.String()), &downstreamConn) + r.setOnReadHandler(downstreamConn, upstreamConn, ctx) + downstreamConn.OnRead(packet) } } -func (r *runtime) setOnReadHandler(cliConn *connection.Connection, upstreamConn *connection.UpstreamConnection, ctx context.Layer4Context) { +func (r *runtime) setOnReadHandler(downstreamConn *connection.Connection, upstreamConn *connection.UpstreamConnection, ctx context.Layer4Context) { if handle, ok := r.mux.GetHandler(r.spec.Name); ok { - cliConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - _ = ctx.GetClientReadBuffer().Append(buffer.Bytes()) - handle.Handle(ctx) // its filter's response to clear context client read buffer - buffer.Reset() + downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + writeBuf := handle.Handle(ctx, readBuf) + if writeBuf != nil && writeBuf.Len() > 0 { + _ = upstreamConn.Write(writeBuf) + } }) - upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - _ = ctx.GetUpstreamReadBuffer().Append(buffer.Bytes()) - handle.Handle(ctx) // its filter's response to clear context upstream read buffer - buffer.Reset() + upstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + writeBuf := handle.Handle(ctx, readBuf) + if writeBuf != nil && writeBuf.Len() > 0 { + _ = downstreamConn.Write(writeBuf) + } }) } else { - cliConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - ctx.DirectWriteToUpstream(buffer) - buffer.Reset() + downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + if readBuf != nil && readBuf.Len() > 0 { + _ = upstreamConn.Write(readBuf.Clone()) + readBuf.Drain(readBuf.Len()) + } }) - upstreamConn.SetOnRead(func(buffer iobufferpool.IoBuffer) { - ctx.DirectWriteToClient(buffer) - buffer.Reset() + upstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + if readBuf != nil && readBuf.Len() > 0 { + _ = downstreamConn.Write(readBuf.Clone()) + readBuf.Drain(readBuf.Len()) + } }) } } diff --git a/pkg/object/layer4rawserver/spec.go b/pkg/object/layer4rawserver/spec.go index 4722dfad1b..c213a72735 100644 --- a/pkg/object/layer4rawserver/spec.go +++ b/pkg/object/layer4rawserver/spec.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/megaease/easegress/pkg/util/ipfilter" - "github.com/megaease/easegress/pkg/util/layer4stat" ) type ( @@ -32,11 +31,10 @@ type ( Port uint16 `yaml:"port" json:"port" jsonschema:"required"` // tcp stream config params - KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` - TcpNodelay bool `yaml:"tcpNodelay" jsonschema:"omitempty"` - MaxConnections uint32 `yaml:"maxConns" jsonschema:"omitempty,minimum=1"` - ProxyConnectTimeout uint32 `yaml:"proxyConnectTimeout" jsonschema:"omitempty"` - ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` + KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` + MaxConnections uint32 `yaml:"maxConns" jsonschema:"omitempty,minimum=1"` + ConnectTimeout uint32 `yaml:"connectTimeout" jsonschema:"omitempty"` + ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` Pool *PoolSpec `yaml:"pool" jsonschema:"required"` IPFilter *ipfilter.Spec `yaml:"ipFilter,omitempty" jsonschema:"omitempty"` @@ -50,11 +48,6 @@ type ( ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` } - - // PoolStatus is the status of Pool. - PoolStatus struct { - Stat *layer4stat.Status `yaml:"stat"` - } ) // Validate validates Layer4 Server. diff --git a/pkg/protocol/protocol.go b/pkg/protocol/protocol.go index 14e7e368da..08ed02dfc8 100644 --- a/pkg/protocol/protocol.go +++ b/pkg/protocol/protocol.go @@ -21,6 +21,7 @@ package protocol import ( "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/util/iobufferpool" ) type ( @@ -36,7 +37,7 @@ type ( // Handle read buffer from context, and set write buffer to context, // its filter's response to release read buffer in context // and its filter's response to determine which time to flush buffer to client or upstream - Handle(ctx context.Layer4Context) + Handle(ctx context.Layer4Context, readBuf iobufferpool.IoBuffer) (writeBuf iobufferpool.IoBuffer) } // MuxMapper gets HTTP handler pipeline with mutex diff --git a/pkg/util/connection/connection.go b/pkg/util/connection/connection.go index f9a763b33a..1dc95cc34c 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/util/connection/connection.go @@ -30,6 +30,7 @@ import ( "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/util/iobufferpool" "github.com/megaease/easegress/pkg/util/timerpool" + "github.com/rcrowley/go-metrics" ) type Connection struct { @@ -55,12 +56,16 @@ type Connection struct { connStopChan chan struct{} // use for connection close listenerStopChan chan struct{} // use for listener close - onReadBuffer func(buffer iobufferpool.IoBuffer) // execute read filters + readCollector metrics.Counter + writeCollector metrics.Counter + + onRead func(buffer iobufferpool.IoBuffer) // execute read filters + onClose func() } -// NewClientConn wrap connection create from client +// NewDownstreamConn wrap connection create from client // @param remoteAddr client addr for udp proxy use -func NewClientConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan, connStopChan chan struct{}) *Connection { +func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}) *Connection { clientConn := &Connection{ conn: conn, connected: 1, @@ -72,7 +77,7 @@ func NewClientConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan, connSto writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), mu: sync.Mutex{}, - connStopChan: connStopChan, + connStopChan: make(chan struct{}), listenerStopChan: listenerStopChan, } @@ -104,13 +109,23 @@ func (c *Connection) ReadEnabled() bool { return c.readEnabled } +// SetCollector set read/write metrics collectors +func (c *Connection) SetCollector(read, write metrics.Counter) { + c.readCollector = read + c.writeCollector = write +} + // SetOnRead set connection read handle func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { - c.onReadBuffer = onRead + c.onRead = onRead } func (c *Connection) OnRead(buffer iobufferpool.IoBuffer) { - c.onReadBuffer(buffer) + c.onRead(buffer) +} + +func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { + return c.readBuffer } // Start running connection read/write loop @@ -417,7 +432,7 @@ func (c *Connection) doReadIO() (err error) { return } - c.onReadBuffer(c.readBuffer) + c.onRead(c.readBuffer) if currLen := int64(c.readBuffer.Len()); c.lastBytesSizeRead != currLen { c.lastBytesSizeRead = currLen } @@ -500,19 +515,19 @@ type UpstreamConnection struct { connectOnce sync.Once } -func NewUpstreamConn(connectTimeout time.Duration, remoteAddr net.Addr, listenerStopChan, ConnStopChan chan struct{}) *UpstreamConnection { +func NewUpstreamConn(connectTimeout time.Duration, upstreamAddr net.Addr, listenerStopChan chan struct{}) *UpstreamConnection { conn := &UpstreamConnection{ Connection: Connection{ connected: 1, - protocol: remoteAddr.Network(), - remoteAddr: remoteAddr, + protocol: upstreamAddr.Network(), + remoteAddr: upstreamAddr, readEnabled: true, readEnabledChan: make(chan bool, 1), writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), mu: sync.Mutex{}, - connStopChan: ConnStopChan, + connStopChan: make(chan struct{}), listenerStopChan: listenerStopChan, }, connectTimeout: connectTimeout, diff --git a/pkg/util/layer4stat/layer4stat.go b/pkg/util/layer4stat/layer4stat.go deleted file mode 100644 index 752e1c58a0..0000000000 --- a/pkg/util/layer4stat/layer4stat.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4stat - -import ( - "sync" - "time" - - "github.com/megaease/easegress/pkg/util/sampler" - "github.com/rcrowley/go-metrics" -) - -type ( - // Layer4Stat is the statistics tool for TCP traffic. - Layer4Stat struct { - mutex sync.Mutex - - count uint64 // for tcp connection - rate1 metrics.EWMA - rate5 metrics.EWMA - rate15 metrics.EWMA - - errCount uint64 - errRate1 metrics.EWMA - errRate5 metrics.EWMA - errRate15 metrics.EWMA - - m1ErrPercent float64 - m5ErrPercent float64 - m15ErrPercent float64 - - total uint64 - min uint64 - mean uint64 - max uint64 - - durationSampler *sampler.DurationSampler - - reqSize uint64 - respSize uint64 - } - - // Metric is the package of statistics at once. - Metric struct { - Err bool // client/upstream connection send/receive data failed - Duration time.Duration - ReqSize uint64 - RespSize uint64 - } - - // Status contains all status generated by HTTPStat. - Status struct { - Count uint64 `yaml:"count"` - M1 float64 `yaml:"m1"` - M5 float64 `yaml:"m5"` - M15 float64 `yaml:"m15"` - - ErrCount uint64 `yaml:"errCount"` - M1Err float64 `yaml:"m1Err"` - M5Err float64 `yaml:"m5Err"` - M15Err float64 `yaml:"m15Err"` - - M1ErrPercent float64 `yaml:"m1ErrPercent"` - M5ErrPercent float64 `yaml:"m5ErrPercent"` - M15ErrPercent float64 `yaml:"m15ErrPercent"` - - Min uint64 `yaml:"min"` - Max uint64 `yaml:"max"` - Mean uint64 `yaml:"mean"` - - P25 float64 `yaml:"p25"` - P50 float64 `yaml:"p50"` - P75 float64 `yaml:"p75"` - P95 float64 `yaml:"p95"` - P98 float64 `yaml:"p98"` - P99 float64 `yaml:"p99"` - P999 float64 `yaml:"p999"` - - ReqSize uint64 `yaml:"reqSize"` - RespSize uint64 `yaml:"respSize"` - - Codes map[int]uint64 `yaml:"codes"` - } -) - -// New get new layer4 stat -func New() *Layer4Stat { - hs := &Layer4Stat{ - rate1: metrics.NewEWMA1(), - rate5: metrics.NewEWMA5(), - rate15: metrics.NewEWMA15(), - - errRate1: metrics.NewEWMA1(), - errRate5: metrics.NewEWMA5(), - errRate15: metrics.NewEWMA15(), - - durationSampler: sampler.NewDurationSampler(), - } - - return hs -} - -// Stat stats the ctx. -func (l *Layer4Stat) Stat(m *Metric) { - l.mutex.Lock() - defer l.mutex.Unlock() - - l.count++ - l.rate1.Update(1) - l.rate5.Update(1) - l.rate15.Update(1) - - if m.Err { - l.errCount++ - l.errRate1.Update(1) - l.errRate5.Update(1) - l.errRate15.Update(1) - } - - duration := uint64(m.Duration.Milliseconds()) - l.total += duration - if l.count == 1 { - l.min, l.mean, l.max = duration, duration, duration - } else { - if duration < l.min { - l.min = duration - } - if duration > l.max { - l.max = duration - } - l.mean = l.total / l.count - } - - l.durationSampler.Update(m.Duration) - - l.reqSize += m.ReqSize - l.respSize += m.RespSize -} - -// Status returns Layer4Stat Status, It assumes it is called every five seconds. -// https://github.com/rcrowley/go-metrics/blob/3113b8401b8a98917cde58f8bbd42a1b1c03b1fd/ewma.go#L98-L99 -func (l *Layer4Stat) Status() *Status { - l.mutex.Lock() - defer l.mutex.Unlock() - - l.rate1.Tick() - l.rate5.Tick() - l.rate15.Tick() - l.errRate1.Tick() - l.errRate5.Tick() - l.errRate15.Tick() - - m1, m5, m15 := l.rate1.Rate(), l.rate5.Rate(), l.rate15.Rate() - m1Err, m5Err, m15Err := l.errRate1.Rate(), l.errRate5.Rate(), l.errRate15.Rate() - m1ErrPercent, m5ErrPercent, m15ErrPercent := 0.0, 0.0, 0.0 - if m1 > 0 { - m1ErrPercent = m1Err / m1 - } - if m5 > 0 { - m1ErrPercent = m5Err / m5 - } - if m15 > 0 { - m1ErrPercent = m15Err / m15 - } - - percentiles := l.durationSampler.Percentiles() - - status := &Status{ - Count: l.count, - M1: m1, - M5: m5, - M15: m15, - - ErrCount: l.errCount, - M1Err: m1Err, - M5Err: m5Err, - M15Err: m15Err, - - M1ErrPercent: m1ErrPercent, - M5ErrPercent: m5ErrPercent, - M15ErrPercent: m15ErrPercent, - - Min: l.min, - Mean: l.mean, - Max: l.max, - - P25: percentiles[0], - P50: percentiles[1], - P75: percentiles[2], - P95: percentiles[3], - P98: percentiles[4], - P99: percentiles[5], - P999: percentiles[6], - - ReqSize: l.reqSize, - RespSize: l.respSize, - } - - return status -} From e6b8d434e9a5f180ce7824f4ae65307d4c0e380d Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 12 Oct 2021 19:53:31 +0800 Subject: [PATCH 24/99] [layer4proxy] extract downstream/upstream write buffer to layer4 context --- pkg/context/layer4context.go | 49 +++++++++++++++++++++++++++ pkg/object/layer4rawserver/runtime.go | 12 +++---- pkg/protocol/protocol.go | 2 +- 3 files changed, 56 insertions(+), 7 deletions(-) diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go index 0ace6dbb64..95ae887c74 100644 --- a/pkg/context/layer4context.go +++ b/pkg/context/layer4context.go @@ -18,6 +18,7 @@ package context import ( + "github.com/megaease/easegress/pkg/util/iobufferpool" "net" "sync" "time" @@ -49,6 +50,13 @@ type ( // Duration context alive duration Duration() time.Duration + GetUpstreamWriteBuffer() iobufferpool.IoBuffer + // AppendUpstreamWriteBuffer append upstream write buffer(notice: buffer will put into buffer pool) + AppendUpstreamWriteBuffer(buffer iobufferpool.IoBuffer) + GetDownstreamWriteBuffer() iobufferpool.IoBuffer + // AppendDownstreamWriteBuffer append downstream write buffer(notice: buffer will put into buffer pool) + AppendDownstreamWriteBuffer(buffer iobufferpool.IoBuffer) + CallNextHandler(lastResult string) string SetHandlerCaller(caller HandlerCaller) } @@ -63,6 +71,9 @@ type ( startTime *time.Time // connection accept time endTime *time.Time // connection close time + upstreamWriteBuffer iobufferpool.IoBuffer // init when AppendUpstreamWriteBuffer called + downstreamWriteBuffer iobufferpool.IoBuffer // init when AppendDownstreamWriteBuffer called + caller HandlerCaller } ) @@ -123,6 +134,44 @@ func (ctx *layer4Context) Duration() time.Duration { return time.Now().Sub(*ctx.startTime) } +func (ctx *layer4Context) GetUpstreamWriteBuffer() iobufferpool.IoBuffer { + if ctx.upstreamWriteBuffer == nil || ctx.upstreamWriteBuffer.Len() == 0 { + return nil + } + return ctx.upstreamWriteBuffer.Clone() +} + +func (ctx *layer4Context) AppendUpstreamWriteBuffer(buffer iobufferpool.IoBuffer) { + if ctx.upstreamWriteBuffer == nil { + if ctx.protocol == "tcp" { + ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + } else { + ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + } + } + _ = ctx.upstreamWriteBuffer.Append(buffer.Bytes()) + _ = iobufferpool.PutIoBuffer(buffer) +} + +func (ctx *layer4Context) GetDownstreamWriteBuffer() iobufferpool.IoBuffer { + if ctx.downstreamWriteBuffer == nil || ctx.downstreamWriteBuffer.Len() == 0 { + return nil + } + return ctx.downstreamWriteBuffer.Clone() +} + +func (ctx *layer4Context) AppendDownstreamWriteBuffer(buffer iobufferpool.IoBuffer) { + if ctx.downstreamWriteBuffer == nil { + if ctx.protocol == "tcp" { + ctx.downstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + } else { + ctx.downstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) + } + } + _ = ctx.downstreamWriteBuffer.Append(buffer.Bytes()) + _ = iobufferpool.PutIoBuffer(buffer) +} + func (ctx *layer4Context) CallNextHandler(lastResult string) string { return ctx.caller(lastResult) } diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4rawserver/runtime.go index 4ae7261cba..51a5439751 100644 --- a/pkg/object/layer4rawserver/runtime.go +++ b/pkg/object/layer4rawserver/runtime.go @@ -342,15 +342,15 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listene func (r *runtime) setOnReadHandler(downstreamConn *connection.Connection, upstreamConn *connection.UpstreamConnection, ctx context.Layer4Context) { if handle, ok := r.mux.GetHandler(r.spec.Name); ok { downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { - writeBuf := handle.Handle(ctx, readBuf) - if writeBuf != nil && writeBuf.Len() > 0 { - _ = upstreamConn.Write(writeBuf) + handle.Handle(ctx, readBuf, nil) + if buf := ctx.GetDownstreamWriteBuffer(); buf != nil { + _ = upstreamConn.Write(buf) } }) upstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { - writeBuf := handle.Handle(ctx, readBuf) - if writeBuf != nil && writeBuf.Len() > 0 { - _ = downstreamConn.Write(writeBuf) + handle.Handle(ctx, readBuf, nil) + if buf := ctx.GetUpstreamWriteBuffer(); buf != nil { + _ = downstreamConn.Write(buf) } }) } else { diff --git a/pkg/protocol/protocol.go b/pkg/protocol/protocol.go index 08ed02dfc8..4b5bb82513 100644 --- a/pkg/protocol/protocol.go +++ b/pkg/protocol/protocol.go @@ -37,7 +37,7 @@ type ( // Handle read buffer from context, and set write buffer to context, // its filter's response to release read buffer in context // and its filter's response to determine which time to flush buffer to client or upstream - Handle(ctx context.Layer4Context, readBuf iobufferpool.IoBuffer) (writeBuf iobufferpool.IoBuffer) + Handle(ctx context.Layer4Context, readBuf iobufferpool.IoBuffer, v interface{}) } // MuxMapper gets HTTP handler pipeline with mutex From f2c10173454f60b02cf105276a8141c04680c923 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 13 Oct 2021 17:27:33 +0800 Subject: [PATCH 25/99] [layer4proxy] rollback supervisor modify --- pkg/supervisor/object.go | 6 +++--- pkg/supervisor/registry.go | 15 +++------------ 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/pkg/supervisor/object.go b/pkg/supervisor/object.go index 6bd3967c92..dd88d21c42 100644 --- a/pkg/supervisor/object.go +++ b/pkg/supervisor/object.go @@ -87,7 +87,7 @@ const ( configFileName = "running_objects.yaml" ) -// FilterCategory returns a bool function to check if the object entity is filter by category or not +// FilterCategory returns a bool function to check if the object entity is filtered by category or not func FilterCategory(categories ...ObjectCategory) ObjectEntityWatcherFilter { allCategory := false for _, category := range categories { @@ -327,7 +327,7 @@ func (w *ObjectEntityWatcher) Entities() map[string]*ObjectEntity { return entities } -// NewObjectEntityFromConfig creates a object entity from configuration +// NewObjectEntityFromConfig creates an object entity from configuration func (s *Supervisor) NewObjectEntityFromConfig(config string) (*ObjectEntity, error) { spec, err := s.NewSpec(config) if err != nil { @@ -337,7 +337,7 @@ func (s *Supervisor) NewObjectEntityFromConfig(config string) (*ObjectEntity, er return s.NewObjectEntityFromSpec(spec) } -// NewObjectEntityFromSpec creates a object entity from a spec +// NewObjectEntityFromSpec creates an object entity from a spec func (s *Supervisor) NewObjectEntityFromSpec(spec *Spec) (*ObjectEntity, error) { registerObject, exists := objectRegistry[spec.Kind()] if !exists { diff --git a/pkg/supervisor/registry.go b/pkg/supervisor/registry.go index 856e0f5b58..402455bfc5 100644 --- a/pkg/supervisor/registry.go +++ b/pkg/supervisor/registry.go @@ -66,18 +66,9 @@ type ( // Inherit also initializes the Object. // But it needs to handle the lifecycle of the previous generation. - // So its own responsibility for the object to inherit and clean the previous generation stuff. + // So it's own responsibility for the object to inherit and clean the previous generation stuff. // The supervisor won't call Close for the previous generation. Inherit(superSpec *Spec, previousGeneration Object, muxMapper protocol.MuxMapper) - - //// InitLayer4 initializes the Object. - //InitLayer4(superSpec *Spec, muxMapper protocol.Layer4MuxMapper) - // - //// InheritLayer4 also initializes the Object. - //// But it needs to handle the lifecycle of the previous generation. - //// So its own responsibility for the object to inherit and clean the previous generation stuff. - //// The supervisor won't call Close for the previous generation. - //InheritLayer4(superSpec *Spec, previousGeneration Object, muxMapper protocol.Layer4MuxMapper) } // TrafficGate is the object in category of TrafficGate. @@ -167,12 +158,12 @@ func Register(o Object) { case CategoryBusinessController, CategorySystemController: _, ok := o.(Controller) if !ok { - panic(fmt.Errorf("%s: not satisfies interface Controller", o.Kind())) + panic(fmt.Errorf("%s: doesn't implement interface Controller", o.Kind())) } case CategoryPipeline, CategoryTrafficGate: _, ok := o.(TrafficObject) if !ok { - panic(fmt.Errorf("%s: not satisfies interface TrafficObject", o.Kind())) + panic(fmt.Errorf("%s: doesn't implement interface TrafficObject", o.Kind())) } } From ee0b146535f4efc6109335003f163e6226e3facf Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 13 Oct 2021 17:28:20 +0800 Subject: [PATCH 26/99] [layer4proxy] rollback traffic controller and protocol package modify --- .../trafficcontroller/trafficcontroller.go | 58 ++----------------- pkg/protocol/{protocol.go => http.go} | 16 +---- 2 files changed, 6 insertions(+), 68 deletions(-) rename pkg/protocol/{protocol.go => http.go} (60%) diff --git a/pkg/object/trafficcontroller/trafficcontroller.go b/pkg/object/trafficcontroller/trafficcontroller.go index 13b16e0e1c..e96d69a165 100644 --- a/pkg/object/trafficcontroller/trafficcontroller.go +++ b/pkg/object/trafficcontroller/trafficcontroller.go @@ -57,10 +57,8 @@ type ( // When the entry for a given key is only ever written once but read many times. // Reference: https://golang.org/pkg/sync/#Map // types of both: map[string]*supervisor.ObjectEntity - httpservers sync.Map - httppipelines sync.Map - layer4servers sync.Map - layer4Pipelines sync.Map + httpservers sync.Map + httppipelines sync.Map } // WalkFunc is the type of the function called for @@ -107,8 +105,8 @@ func newNamespace(namespace string) *Namespace { } } -// GetHTTPHandler gets handler within the namespace -func (ns *Namespace) GetHTTPHandler(name string) (protocol.HTTPHandler, bool) { +// GetHandler gets handler within the namespace +func (ns *Namespace) GetHandler(name string) (protocol.HTTPHandler, bool) { entity, exists := ns.httppipelines.Load(name) if !exists { return nil, false @@ -118,16 +116,6 @@ func (ns *Namespace) GetHTTPHandler(name string) (protocol.HTTPHandler, bool) { return handler, true } -func (ns *Namespace) GetLayer4Handler(name string) (protocol.Layer4Handler, bool) { - entity, exists := ns.layer4Pipelines.Load(name) - if !exists { - return nil, false - } - - handler := entity.(*supervisor.ObjectEntity).Instance().(protocol.Layer4Handler) - return handler, true -} - // Category returns the category of TrafficController. func (tc *TrafficController) Category() supervisor.ObjectCategory { return Category @@ -165,42 +153,6 @@ func (tc *TrafficController) reload(previousGeneration *TrafficController) { } } -// CreateLayer4ServerForSpec creates layer4 server with a spec -func (tc *TrafficController) CreateLayer4ServerForSpec(namespace string, superSpec *supervisor.Spec) ( - *supervisor.ObjectEntity, error) { - entity, err := tc.super.NewObjectEntityFromSpec(superSpec) - if err != nil { - return nil, err - } - return tc.CreateLayer4Server(namespace, entity) -} - -// CreateLayer4Server creates Layer4 server -func (tc *TrafficController) CreateLayer4Server(namespace string, entity *supervisor.ObjectEntity) (*supervisor.ObjectEntity, error) { - if namespace == "" { - return nil, fmt.Errorf("empty namespace") - } - - tc.mutex.Lock() - defer tc.mutex.Unlock() - - space, exists := tc.namespaces[namespace] - if !exists { - space = newNamespace(namespace) - tc.namespaces[namespace] = space - logger.Infof("create namespace %s", namespace) - } - - name := entity.Spec().Name() - - entity.InitWithRecovery(space) - space.layer4servers.Store(name, entity) - - logger.Infof("create layer4 server %s/%s", namespace, name) - - return entity, nil -} - // CreateHTTPServerForSpec creates HTTP server with a spec func (tc *TrafficController) CreateHTTPServerForSpec(namespace string, superSpec *supervisor.Spec) ( *supervisor.ObjectEntity, error) { @@ -565,7 +517,7 @@ func (tc *TrafficController) ApplyHTTPPipeline(namespace string, entity *supervi return entity, nil } -// DeleteHTTPPipeline deletes the HTTP pipeline by it's namespace and name +// DeleteHTTPPipeline deletes the HTTP pipeline by its namespace and name func (tc *TrafficController) DeleteHTTPPipeline(namespace, name string) error { tc.mutex.Lock() defer tc.mutex.Unlock() diff --git a/pkg/protocol/protocol.go b/pkg/protocol/http.go similarity index 60% rename from pkg/protocol/protocol.go rename to pkg/protocol/http.go index 4b5bb82513..3fcb49a3ce 100644 --- a/pkg/protocol/protocol.go +++ b/pkg/protocol/http.go @@ -21,7 +21,6 @@ package protocol import ( "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/util/iobufferpool" ) type ( @@ -31,21 +30,8 @@ type ( Handle(ctx context.HTTPContext) } - // Layer4Handler is the common handler for the all backends - // which handle the traffic from layer4(tcp/udp) server. - Layer4Handler interface { - // Handle read buffer from context, and set write buffer to context, - // its filter's response to release read buffer in context - // and its filter's response to determine which time to flush buffer to client or upstream - Handle(ctx context.Layer4Context, readBuf iobufferpool.IoBuffer, v interface{}) - } - // MuxMapper gets HTTP handler pipeline with mutex MuxMapper interface { - // GetHTTPHandler get http handler from mux - GetHTTPHandler(name string) (HTTPHandler, bool) - - // GetLayer4Handler get layer4 handler from mux - GetLayer4Handler(name string) (Layer4Handler, bool) + GetHandler(name string) (HTTPHandler, bool) } ) From 87027b10e8092dc7a7a6ed4d0300779853938f48 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 13 Oct 2021 22:03:57 +0800 Subject: [PATCH 27/99] [layer4proxy] cleanup code --- pkg/context/handlercaller.go | 23 - pkg/context/httpcontext.go | 4 +- pkg/context/layer4context.go | 181 ------- pkg/filter/bridge/bridge.go | 2 +- pkg/object/httpserver/mux.go | 2 +- pkg/object/layer4pipeline/layer4pipeline.go | 482 ------------------ pkg/object/layer4pipeline/registry.go | 120 ----- pkg/object/layer4pipeline/spec.go | 142 ------ pkg/object/layer4rawserver/mux.go | 109 ---- .../layer4server}/connection.go | 4 +- .../layer4server}/constant.go | 11 +- pkg/object/layer4server/ipfilters.go | 64 +++ .../layer4server.go | 6 +- .../listener.go | 31 +- .../{layer4rawserver => layer4server}/pool.go | 42 +- .../runtime.go | 112 ++-- .../server.go} | 2 +- .../{layer4rawserver => layer4server}/spec.go | 6 +- .../layer4server}/udpreceiver.go | 2 +- pkg/registry/registry.go | 3 +- 20 files changed, 183 insertions(+), 1165 deletions(-) delete mode 100644 pkg/context/handlercaller.go delete mode 100644 pkg/context/layer4context.go delete mode 100644 pkg/object/layer4pipeline/layer4pipeline.go delete mode 100644 pkg/object/layer4pipeline/registry.go delete mode 100644 pkg/object/layer4pipeline/spec.go delete mode 100644 pkg/object/layer4rawserver/mux.go rename pkg/{util/connection => object/layer4server}/connection.go (99%) rename pkg/{util/connection => object/layer4server}/constant.go (91%) create mode 100644 pkg/object/layer4server/ipfilters.go rename pkg/object/{layer4rawserver => layer4server}/layer4server.go (96%) rename pkg/object/{layer4rawserver => layer4server}/listener.go (88%) rename pkg/object/{layer4rawserver => layer4server}/pool.go (59%) rename pkg/object/{layer4rawserver => layer4server}/runtime.go (71%) rename pkg/object/{layer4rawserver/backendserver.go => layer4server/server.go} (99%) rename pkg/object/{layer4rawserver => layer4server}/spec.go (91%) rename pkg/{util/connection => object/layer4server}/udpreceiver.go (98%) diff --git a/pkg/context/handlercaller.go b/pkg/context/handlercaller.go deleted file mode 100644 index dc04ffbf19..0000000000 --- a/pkg/context/handlercaller.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package context - -type ( - // HandlerCaller is a helper function to call the handler - HandlerCaller func(lastResult string) string -) diff --git a/pkg/context/httpcontext.go b/pkg/context/httpcontext.go index 79e5286785..e2361911c4 100644 --- a/pkg/context/httpcontext.go +++ b/pkg/context/httpcontext.go @@ -39,6 +39,8 @@ import ( ) type ( + // HandlerCaller is a helper function to call the handler + HandlerCaller func(lastResult string) string // HTTPContext is all context of an HTTP processing. // It is not goroutine-safe, callers must use Lock/Unlock @@ -82,7 +84,7 @@ type ( Method() string SetMethod(method string) - // URL + // Scheme URL Scheme() string Host() string SetHost(host string) diff --git a/pkg/context/layer4context.go b/pkg/context/layer4context.go deleted file mode 100644 index 95ae887c74..0000000000 --- a/pkg/context/layer4context.go +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package context - -import ( - "github.com/megaease/easegress/pkg/util/iobufferpool" - "net" - "sync" - "time" -) - -type ConnectionType uint16 - -const ( - DownstreamConnection ConnectionType = iota - UpstreamConnection -) - -type ( - // Layer4Context is all context of an TCP processing. - // It is not goroutine-safe, callers must use Lock/Unlock - // to protect it by themselves. - Layer4Context interface { - Lock() - Unlock() - - Protocol() string - LocalAddr() net.Addr - UpstreamAddr() net.Addr - DownstreamAddr() net.Addr - // SetDownstreamAddr use for udp downstream addr - SetDownstreamAddr(addr net.Addr) - // Finish close by downstream connection and upstream connection - Finish(t ConnectionType) - // Duration context alive duration - Duration() time.Duration - - GetUpstreamWriteBuffer() iobufferpool.IoBuffer - // AppendUpstreamWriteBuffer append upstream write buffer(notice: buffer will put into buffer pool) - AppendUpstreamWriteBuffer(buffer iobufferpool.IoBuffer) - GetDownstreamWriteBuffer() iobufferpool.IoBuffer - // AppendDownstreamWriteBuffer append downstream write buffer(notice: buffer will put into buffer pool) - AppendDownstreamWriteBuffer(buffer iobufferpool.IoBuffer) - - CallNextHandler(lastResult string) string - SetHandlerCaller(caller HandlerCaller) - } - - layer4Context struct { - mutex sync.Mutex - - protocol string // tcp/udp - localAddr net.Addr - downstreamAddr net.Addr - upstreamAddr net.Addr - startTime *time.Time // connection accept time - endTime *time.Time // connection close time - - upstreamWriteBuffer iobufferpool.IoBuffer // init when AppendUpstreamWriteBuffer called - downstreamWriteBuffer iobufferpool.IoBuffer // init when AppendDownstreamWriteBuffer called - - caller HandlerCaller - } -) - -// NewLayer4Context creates an Layer4Context. -func NewLayer4Context(protocol string, localAddr net.Addr, downstreamAddr, upstreamAddr net.Addr) *layer4Context { - - startTime := time.Now() - res := layer4Context{ - mutex: sync.Mutex{}, - protocol: protocol, - startTime: &startTime, - localAddr: localAddr, - downstreamAddr: downstreamAddr, - upstreamAddr: upstreamAddr, - } - return &res -} - -func (ctx *layer4Context) Lock() { - ctx.mutex.Lock() -} - -func (ctx *layer4Context) Unlock() { - ctx.mutex.Unlock() -} - -// Protocol get proxy protocol -func (ctx *layer4Context) Protocol() string { - return ctx.protocol -} - -func (ctx *layer4Context) LocalAddr() net.Addr { - return ctx.localAddr -} - -func (ctx *layer4Context) DownstreamAddr() net.Addr { - return ctx.downstreamAddr -} - -func (ctx *layer4Context) SetDownstreamAddr(addr net.Addr) { - ctx.downstreamAddr = addr -} - -func (ctx *layer4Context) UpstreamAddr() net.Addr { - return ctx.upstreamAddr -} - -func (ctx *layer4Context) Finish(t ConnectionType) { - finish := time.Now() - ctx.endTime = &finish -} - -func (ctx *layer4Context) Duration() time.Duration { - if ctx.endTime != nil { - return ctx.endTime.Sub(*ctx.startTime) - } - return time.Now().Sub(*ctx.startTime) -} - -func (ctx *layer4Context) GetUpstreamWriteBuffer() iobufferpool.IoBuffer { - if ctx.upstreamWriteBuffer == nil || ctx.upstreamWriteBuffer.Len() == 0 { - return nil - } - return ctx.upstreamWriteBuffer.Clone() -} - -func (ctx *layer4Context) AppendUpstreamWriteBuffer(buffer iobufferpool.IoBuffer) { - if ctx.upstreamWriteBuffer == nil { - if ctx.protocol == "tcp" { - ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) - } else { - ctx.upstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - } - } - _ = ctx.upstreamWriteBuffer.Append(buffer.Bytes()) - _ = iobufferpool.PutIoBuffer(buffer) -} - -func (ctx *layer4Context) GetDownstreamWriteBuffer() iobufferpool.IoBuffer { - if ctx.downstreamWriteBuffer == nil || ctx.downstreamWriteBuffer.Len() == 0 { - return nil - } - return ctx.downstreamWriteBuffer.Clone() -} - -func (ctx *layer4Context) AppendDownstreamWriteBuffer(buffer iobufferpool.IoBuffer) { - if ctx.downstreamWriteBuffer == nil { - if ctx.protocol == "tcp" { - ctx.downstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) - } else { - ctx.downstreamWriteBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - } - } - _ = ctx.downstreamWriteBuffer.Append(buffer.Bytes()) - _ = iobufferpool.PutIoBuffer(buffer) -} - -func (ctx *layer4Context) CallNextHandler(lastResult string) string { - return ctx.caller(lastResult) -} - -func (ctx *layer4Context) SetHandlerCaller(caller HandlerCaller) { - ctx.caller = caller -} diff --git a/pkg/filter/bridge/bridge.go b/pkg/filter/bridge/bridge.go index ad19ed623d..3548911f11 100644 --- a/pkg/filter/bridge/bridge.go +++ b/pkg/filter/bridge/bridge.go @@ -147,7 +147,7 @@ func (b *Bridge) handle(ctx context.HTTPContext) (result string) { return resultDestinationNotFound } - handler, exists := b.muxMapper.GetHTTPHandler(dest) + handler, exists := b.muxMapper.GetHandler(dest) if !exists { logger.Errorf("failed to get running object %s", b.spec.Destinations[0]) diff --git a/pkg/object/httpserver/mux.go b/pkg/object/httpserver/mux.go index 6b058d76c2..4229559bb8 100644 --- a/pkg/object/httpserver/mux.go +++ b/pkg/object/httpserver/mux.go @@ -434,7 +434,7 @@ func (m *mux) handleRequestWithCache(rules *muxRules, ctx context.HTTPContext, c case ci.methodNotAllowed: ctx.Response().SetStatusCode(http.StatusMethodNotAllowed) case ci.path != nil: - handler, exists := rules.muxMapper.GetHTTPHandler(ci.path.backend) + handler, exists := rules.muxMapper.GetHandler(ci.path.backend) if !exists { ctx.AddTag(stringtool.Cat("backend ", ci.path.backend, " not found")) ctx.Response().SetStatusCode(http.StatusServiceUnavailable) diff --git a/pkg/object/layer4pipeline/layer4pipeline.go b/pkg/object/layer4pipeline/layer4pipeline.go deleted file mode 100644 index dd32ee0c38..0000000000 --- a/pkg/object/layer4pipeline/layer4pipeline.go +++ /dev/null @@ -1,482 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4pipeline - -import ( - "bytes" - "fmt" - "reflect" - "sync" - "time" - - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/protocol" - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/stringtool" - "github.com/megaease/easegress/pkg/util/yamltool" -) - -const ( - // Category is the category of Layer4Pipeline. - Category = supervisor.CategoryPipeline - - // Kind is the kind of Layer4Pipeline. - Kind = "Layer4Pipeline" - - // LabelEND is the built-in label for jumping of flow. - LabelEND = "END" -) - -func init() { - supervisor.Register(&Layer4Pipeline{}) -} - -type ( - // Layer4Pipeline is Object Layer4Pipeline. - Layer4Pipeline struct { - superSpec *supervisor.Spec - spec *Spec - - muxMapper protocol.MuxMapper - runningFilters []*runningFilter - } - - runningFilter struct { - spec *FilterSpec - jumpIf map[string]string - rootFilter Filter - filter Filter - } - - // Spec describes the Layer4Pipeline. - Spec struct { - Flow []Flow `yaml:"flow" jsonschema:"omitempty"` - Filters []map[string]interface{} `yaml:"filters" jsonschema:"required"` - } - - // Flow controls the flow of pipeline. - Flow struct { - Filter string `yaml:"filter" jsonschema:"required,format=urlname"` - JumpIf map[string]string `yaml:"jumpIf" jsonschema:"omitempty"` - } - - // Status is the status of Layer4Pipeline. - Status struct { - Health string `yaml:"health"` - - Filters map[string]interface{} `yaml:"filters"` - } - - // PipelineContext contains the context of the Layer4Pipeline. - PipelineContext struct { - FilterStats *FilterStat - } - - // FilterStat records the statistics of the running filter. - FilterStat struct { - Name string - Kind string - Result string - Duration time.Duration - Next []*FilterStat - } -) - -func (fs *FilterStat) selfDuration() time.Duration { - d := fs.Duration - for _, s := range fs.Next { - d -= s.Duration - } - return d -} - -func (ctx *PipelineContext) log() string { - if ctx.FilterStats == nil { - return "" - } - - var buf bytes.Buffer - var fn func(stat *FilterStat) - - fn = func(stat *FilterStat) { - buf.WriteString(stat.Name) - buf.WriteByte('(') - buf.WriteString(stat.Result) - if stat.Result != "" { - buf.WriteByte(',') - } - buf.WriteString(stat.selfDuration().String()) - buf.WriteByte(')') - if len(stat.Next) == 0 { - return - } - buf.WriteString("->") - if len(stat.Next) > 1 { - buf.WriteByte('[') - } - for i, s := range stat.Next { - if i > 0 { - buf.WriteByte(',') - } - fn(s) - } - if len(stat.Next) > 1 { - buf.WriteByte(']') - } - } - - fn(ctx.FilterStats) - return buf.String() -} - -// context.Layer4Pipeline: *PipelineContext -var runningContexts = sync.Map{} - -func newAndSetPipelineContext(ctx context.Layer4Context) *PipelineContext { - pipeCtx := &PipelineContext{} - runningContexts.Store(ctx, pipeCtx) - return pipeCtx -} - -// GetPipelineContext returns the corresponding PipelineContext of the Layer4Context, -// and a bool flag to represent it succeed or not. -func GetPipelineContext(ctx context.Layer4Context) (*PipelineContext, bool) { - value, ok := runningContexts.Load(ctx) - if !ok { - return nil, false - } - - pipeCtx, ok := value.(*PipelineContext) - if !ok { - logger.Errorf("BUG: want *PipelineContext, got %T", value) - return nil, false - } - - return pipeCtx, true -} - -func deletePipelineContext(ctx context.Layer4Context) { - runningContexts.Delete(ctx) -} - -func extractFiltersData(config []byte) interface{} { - var whole map[string]interface{} - yamltool.Unmarshal(config, &whole) - return whole["filters"] -} - -// Validate validates the meta information -func (meta *FilterMetaSpec) Validate() error { - if len(meta.Name) == 0 { - return fmt.Errorf("filter name is required") - } - if len(meta.Kind) == 0 { - return fmt.Errorf("filter kind is required") - } - - if meta.Name == LabelEND { - return fmt.Errorf("can't use %s(built-in label) for filter name", LabelEND) - } - return nil -} - -// Validate validates Spec. -func (s Spec) Validate() (err error) { - errPrefix := "filters" - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%s: %s", errPrefix, r) - } - }() - - config := yamltool.Marshal(s) - - filtersData := extractFiltersData(config) - if filtersData == nil { - return fmt.Errorf("filters is required") - } - - filterSpecs := make(map[string]*FilterSpec) - for _, filterSpec := range s.Filters { - // NOTE: Nil supervisor is fine in spec validating phrase. - spec, err := NewFilterSpec(filterSpec, nil) - if err != nil { - panic(err) - } - - if _, exists := filterSpecs[spec.Name()]; exists { - panic(fmt.Errorf("conflict name: %s", spec.Name())) - } - filterSpecs[spec.Name()] = spec - } - - errPrefix = "flow" - filters := make(map[string]struct{}) - for _, f := range s.Flow { - if _, exists := filters[f.Filter]; exists { - panic(fmt.Errorf("repeated filter %s", f.Filter)) - } - } - - labelsValid := map[string]struct{}{LabelEND: {}} - for i := len(s.Flow) - 1; i >= 0; i-- { - f := s.Flow[i] - spec, exists := filterSpecs[f.Filter] - if !exists { - panic(fmt.Errorf("filter %s not found", f.Filter)) - } - expectedResults := spec.RootFilter().Results() - for result, label := range f.JumpIf { - if !stringtool.StrInSlice(result, expectedResults) { - panic(fmt.Errorf("filter %s: result %s is not in %v", - f.Filter, result, expectedResults)) - } - if _, exists := labelsValid[label]; !exists { - panic(fmt.Errorf("filter %s: label %s not found", - f.Filter, label)) - } - } - labelsValid[f.Filter] = struct{}{} - } - return nil -} - -// Category returns the category of Layer4Pipeline. -func (l *Layer4Pipeline) Category() supervisor.ObjectCategory { - return Category -} - -// Kind returns the kind of Layer4Pipeline. -func (l *Layer4Pipeline) Kind() string { - return Kind -} - -// DefaultSpec returns the default spec of Layer4Pipeline. -func (l *Layer4Pipeline) DefaultSpec() interface{} { - return &Spec{} -} - -// Init initializes Layer4Pipeline. -func (l *Layer4Pipeline) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { - l.superSpec, l.spec, l.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper - - l.reload(nil /*no previous generation*/) -} - -// Inherit inherits previous generation of Layer4Pipeline. -func (l *Layer4Pipeline) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { - l.superSpec, l.spec, l.muxMapper = superSpec, superSpec.ObjectSpec().(*Spec), muxMapper - - l.reload(previousGeneration.(*Layer4Pipeline)) - - // NOTE: It's filters' responsibility to inherit and clean their resources. - // previousGeneration.Close() -} - -func (l *Layer4Pipeline) reload(previousGeneration *Layer4Pipeline) { - runningFilters := make([]*runningFilter, 0) - if len(l.spec.Flow) == 0 { - for _, filterSpec := range l.spec.Filters { - spec, err := NewFilterSpec(filterSpec, l.superSpec.Super()) - if err != nil { - panic(err) - } - - runningFilters = append(runningFilters, &runningFilter{ - spec: spec, - }) - } - } else { - for _, f := range l.spec.Flow { - var spec *FilterSpec - for _, filterSpec := range l.spec.Filters { - var err error - spec, err = NewFilterSpec(filterSpec, l.superSpec.Super()) - if err != nil { - panic(err) - } - if spec.Name() == f.Filter { - break - } - } - if spec == nil { - panic(fmt.Errorf("flow filter %s not found in filters", f.Filter)) - } - - runningFilters = append(runningFilters, &runningFilter{ - spec: spec, - jumpIf: f.JumpIf, - }) - } - } - - pipelineName := l.superSpec.Name() - for _, runningFilter := range runningFilters { - name, kind := runningFilter.spec.Name(), runningFilter.spec.Kind() - rootFilter, exists := filterRegistry[kind] - if !exists { - panic(fmt.Errorf("kind %s not found", kind)) - } - - var prevInstance Filter - if previousGeneration != nil { - runningFilter := previousGeneration.getRunningFilter(name) - if runningFilter != nil { - prevInstance = runningFilter.filter - } - } - - filter := reflect.New(reflect.TypeOf(rootFilter).Elem()).Interface().(Filter) - runningFilter.spec.meta.Pipeline = pipelineName - if prevInstance == nil { - filter.Init(runningFilter.spec) - } else { - filter.Inherit(runningFilter.spec, prevInstance) - } - - runningFilter.filter, runningFilter.rootFilter = filter, rootFilter - } - - l.runningFilters = runningFilters -} - -func (l *Layer4Pipeline) getNextFilterIndex(index int, result string) int { - // return index + 1 if last filter succeeded - if result == "" { - return index + 1 - } - - // check the jumpIf table of current filter, return its index if the jump - // target is valid and -1 otherwise - filter := l.runningFilters[index] - if !stringtool.StrInSlice(result, filter.rootFilter.Results()) { - format := "BUG: invalid result %s not in %v" - logger.Errorf(format, result, filter.rootFilter.Results()) - } - - if len(filter.jumpIf) == 0 { - return -1 - } - name, ok := filter.jumpIf[result] - if !ok { - return -1 - } - if name == LabelEND { - return len(l.runningFilters) - } - - for index++; index < len(l.runningFilters); index++ { - if l.runningFilters[index].spec.Name() == name { - return index - } - } - return -1 -} - -// InboundHandle is the handler to deal with layer4 inbound data -func (l *Layer4Pipeline) InboundHandle(ctx context.Layer4Context) { - l.innerHandle(ctx, true) -} - -// OutboundHandle is the handler to deal with layer4 outbound data -func (l *Layer4Pipeline) OutboundHandle(ctx context.Layer4Context) { - l.innerHandle(ctx, false) -} - -func (l *Layer4Pipeline) innerHandle(ctx context.Layer4Context, isInbound bool) { - pipeCtx := newAndSetPipelineContext(ctx) - defer deletePipelineContext(ctx) - - filterIndex := -1 - filterStat := &FilterStat{} - - handle := func(lastResult string) string { - - // Filters are called recursively as a stack, so we need to save current - // state and restore it before return - lastIndex := filterIndex - lastStat := filterStat - defer func() { - filterIndex = lastIndex - filterStat = lastStat - }() - - filterIndex = l.getNextFilterIndex(filterIndex, lastResult) - if filterIndex == len(l.runningFilters) { - return "" // reach the end of pipeline - } else if filterIndex == -1 { - return lastResult // an error occurs but no filter can handle it - } - - filter := l.runningFilters[filterIndex] - name := filter.spec.Name() - - filterStat = &FilterStat{Name: name, Kind: filter.spec.Kind()} - startTime := time.Now() - var result string - if isInbound { - result = filter.filter.InboundHandle(ctx) - } else { - result = filter.filter.OutboundHandle(ctx) - } - filterStat.Duration = time.Since(startTime) - filterStat.Result = result - - lastStat.Next = append(lastStat.Next, filterStat) - return result - } - - ctx.SetHandlerCaller(handle) - handle("") - - if len(filterStat.Next) > 0 { - pipeCtx.FilterStats = filterStat.Next[0] - } -} - -func (l *Layer4Pipeline) getRunningFilter(name string) *runningFilter { - for _, filter := range l.runningFilters { - if filter.spec.Name() == name { - return filter - } - } - return nil -} - -// Status returns Status generated by Runtime. -func (l *Layer4Pipeline) Status() *supervisor.Status { - s := &Status{ - Filters: make(map[string]interface{}), - } - - for _, runningFilter := range l.runningFilters { - s.Filters[runningFilter.spec.Name()] = runningFilter.filter.Status() - } - - return &supervisor.Status{ - ObjectStatus: s, - } -} - -// Close closes Layer4Pipeline. -func (l *Layer4Pipeline) Close() { - for _, runningFilter := range l.runningFilters { - runningFilter.filter.Close() - } -} diff --git a/pkg/object/layer4pipeline/registry.go b/pkg/object/layer4pipeline/registry.go deleted file mode 100644 index 02ab67d3a2..0000000000 --- a/pkg/object/layer4pipeline/registry.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4pipeline - -import ( - "fmt" - "reflect" - - "github.com/megaease/easegress/pkg/context" -) - -type ( - // Filter is the common interface for filters handling HTTP traffic. - Filter interface { - // Kind returns the unique kind name to represent itself. - Kind() string - - // DefaultSpec returns the default spec. - DefaultSpec() interface{} - - // Description returns the description of the filter. - Description() string - - // Results returns all possible results, the normal result - // (i.e. empty string) could not be in it. - Results() []string - - // Init initializes the Filter. - Init(filterSpec *FilterSpec) - - // Inherit also initializes the Filter. - // But it needs to handle the lifecycle of the previous generation. - // So its own responsibility for the filter to inherit and clean the previous generation stuff. - // The http pipeline won't call Close for the previous generation. - Inherit(filterSpec *FilterSpec, previousGeneration Filter) - - // InboundHandle handle layer4 inbound data - InboundHandle(tcpContext context.Layer4Context) (result string) - - // OutboundHandle handle layer4 outbound data - OutboundHandle(tcpContext context.Layer4Context) (result string) - - // Status returns its runtime status. - // It could return nil. - Status() interface{} - - // Close closes itself. - Close() - } -) - -var filterRegistry = map[string]Filter{} - -// Register registers filter. -func Register(f Filter) { - if f.Kind() == "" { - panic(fmt.Errorf("%T: empty kind", f)) - } - - existedFilter, existed := filterRegistry[f.Kind()] - if existed { - panic(fmt.Errorf("%T and %T got same kind: %s", f, existedFilter, f.Kind())) - } - - // Checking filter type. - filterType := reflect.TypeOf(f) - if filterType.Kind() != reflect.Ptr { - panic(fmt.Errorf("%s: want a pointer, got %s", f.Kind(), filterType.Kind())) - } - if filterType.Elem().Kind() != reflect.Struct { - panic(fmt.Errorf("%s elem: want a struct, got %s", f.Kind(), filterType.Kind())) - } - - // Checking spec type. - specType := reflect.TypeOf(f.DefaultSpec()) - if specType.Kind() != reflect.Ptr { - panic(fmt.Errorf("%s spec: want a pointer, got %s", f.Kind(), specType.Kind())) - } - if specType.Elem().Kind() != reflect.Struct { - panic(fmt.Errorf("%s spec elem: want a struct, got %s", f.Kind(), specType.Elem().Kind())) - } - - // Checking results. - results := make(map[string]struct{}) - for _, result := range f.Results() { - _, exists := results[result] - if exists { - panic(fmt.Errorf("repeated result: %s", result)) - } - results[result] = struct{}{} - } - - filterRegistry[f.Kind()] = f -} - -// GetFilterRegistry get the filter registry. -func GetFilterRegistry() map[string]Filter { - result := map[string]Filter{} - - for kind, f := range filterRegistry { - result[kind] = f - } - - return result -} diff --git a/pkg/object/layer4pipeline/spec.go b/pkg/object/layer4pipeline/spec.go deleted file mode 100644 index 681b9c3c6d..0000000000 --- a/pkg/object/layer4pipeline/spec.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4pipeline - -import ( - "fmt" - - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/yamltool" - "github.com/megaease/easegress/pkg/v" -) - -type ( - // FilterSpec is the universal spec for all filters. - FilterSpec struct { - super *supervisor.Supervisor - - rawSpec map[string]interface{} - yamlConfig string - meta *FilterMetaSpec - filterSpec interface{} - rootFilter Filter - } - - // FilterMetaSpec is metadata for all specs. - FilterMetaSpec struct { - Name string `yaml:"name" jsonschema:"required,format=urlname"` - Kind string `yaml:"kind" jsonschema:"required"` - Pipeline string `yaml:"-" jsonschema:"-"` - } -) - -// NewFilterSpec creates a filter spec and validates it. -func NewFilterSpec(originalRawSpec map[string]interface{}, super *supervisor.Supervisor) ( - s *FilterSpec, err error) { - - s = &FilterSpec{super: super} - - defer func() { - if r := recover(); r != nil { - s = nil - err = fmt.Errorf("%v", r) - } else { - err = nil - } - }() - - yamlBuff := yamltool.Marshal(originalRawSpec) - - // Meta part. - meta := &FilterMetaSpec{} - yamltool.Unmarshal(yamlBuff, meta) - verr := v.Validate(meta) - if !verr.Valid() { - panic(verr) - } - - // Filter self part. - rootFilter, exists := filterRegistry[meta.Kind] - if !exists { - panic(fmt.Errorf("kind %s not found", meta.Kind)) - } - filterSpec := rootFilter.DefaultSpec() - yamltool.Unmarshal(yamlBuff, filterSpec) - verr = v.Validate(filterSpec) - if !verr.Valid() { - // TODO: Make the invalid part more accurate. e,g: - // filters: jsonschemaErrs: - // - 'policies.0: name is required' - // to - // filters: jsonschemaErrs: - // - 'rateLimiter.policies.0: name is required' - panic(verr) - } - - // Build final yaml config and raw spec. - var rawSpec map[string]interface{} - filterBuff := yamltool.Marshal(filterSpec) - yamltool.Unmarshal(filterBuff, &rawSpec) - - metaBuff := yamltool.Marshal(meta) - yamltool.Unmarshal(metaBuff, &rawSpec) - - yamlConfig := string(yamltool.Marshal(rawSpec)) - - s.meta = meta - s.filterSpec = filterSpec - s.rawSpec = rawSpec - s.yamlConfig = yamlConfig - s.rootFilter = rootFilter - - return -} - -// Super returns -func (s *FilterSpec) Super() *supervisor.Supervisor { - return s.super -} - -// Name returns name. -func (s *FilterSpec) Name() string { return s.meta.Name } - -// Kind returns kind. -func (s *FilterSpec) Kind() string { return s.meta.Kind } - -// Pipeline returns the name of the pipeline this filter belongs to. -func (s *FilterSpec) Pipeline() string { return s.meta.Pipeline } - -// YAMLConfig returns the config in yaml format. -func (s *FilterSpec) YAMLConfig() string { - return s.yamlConfig -} - -// RawSpec returns raw spec in type map[string]interface{}. -func (s *FilterSpec) RawSpec() map[string]interface{} { - return s.rawSpec -} - -// FilterSpec returns the filter spec in its own type. -func (s *FilterSpec) FilterSpec() interface{} { - return s.filterSpec -} - -// RootFilter returns the root filter of the filter spec. -func (s *FilterSpec) RootFilter() Filter { - return s.rootFilter -} diff --git a/pkg/object/layer4rawserver/mux.go b/pkg/object/layer4rawserver/mux.go deleted file mode 100644 index 72de0608d5..0000000000 --- a/pkg/object/layer4rawserver/mux.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4rawserver - -import ( - "net" - "sync/atomic" - - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/protocol" - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/ipfilter" -) - -type ( - mux struct { - rules atomic.Value // *muxRules - } - - muxRules struct { - superSpec *supervisor.Spec - spec *Spec - - ipFilter *ipfilter.IPFilter - muxMapper protocol.MuxMapper - } -) - -func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { - if spec == nil { - return nil - } - - return ipfilter.New(spec) -} - -func (mr *muxRules) pass(ctx context.Layer4Context) bool { - if mr.ipFilter == nil { - return true - } - - switch addr := ctx.DownstreamAddr().(type) { - case *net.UDPAddr: - return mr.ipFilter.Allow(addr.IP.String()) - case *net.TCPAddr: - return mr.ipFilter.Allow(addr.IP.String()) - default: - logger.Warnf("invalid remote addr type") - } - return false -} - -func newMux(mapper protocol.MuxMapper) *mux { - m := &mux{} - - m.rules.Store(&muxRules{ - spec: &Spec{}, - muxMapper: mapper, - }) - return m -} - -func (m *mux) reloadRules(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { - spec := superSpec.ObjectSpec().(*Spec) - - rules := &muxRules{ - superSpec: superSpec, - spec: spec, - muxMapper: muxMapper, - ipFilter: newIPFilter(spec.IPFilter), - } - m.rules.Store(rules) -} - -func (m *mux) AllowIP(ipStr string) bool { - rules := m.rules.Load().(*muxRules) - if rules == nil { - return true - } - return rules.ipFilter.Allow(ipStr) -} - -func (m *mux) GetHandler(name string) (protocol.Layer4Handler, bool) { - rules := m.rules.Load().(*muxRules) - if rules == nil { - return nil, false - } - return rules.muxMapper.GetLayer4Handler(name) -} - -func (m *mux) close() { - // may be close tracing in future -} diff --git a/pkg/util/connection/connection.go b/pkg/object/layer4server/connection.go similarity index 99% rename from pkg/util/connection/connection.go rename to pkg/object/layer4server/connection.go index 1dc95cc34c..f69d22763c 100644 --- a/pkg/util/connection/connection.go +++ b/pkg/object/layer4server/connection.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package connection +package layer4server import ( "errors" @@ -372,7 +372,7 @@ func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { } // close conn recv, then notify read/write loop to exit - close(c.listenerStopChan) + close(c.connStopChan) _ = c.conn.Close() c.lastBytesSizeRead = 0 c.lastWriteSizeWrite = 0 diff --git a/pkg/util/connection/constant.go b/pkg/object/layer4server/constant.go similarity index 91% rename from pkg/util/connection/constant.go rename to pkg/object/layer4server/constant.go index ccef6babba..b67af253d4 100644 --- a/pkg/util/connection/constant.go +++ b/pkg/object/layer4server/constant.go @@ -15,11 +15,10 @@ * limitations under the License. */ -package connection +package layer4server import ( "errors" - "time" ) // CloseType represent connection close type @@ -56,14 +55,6 @@ var ( ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") ) -// Network related const -const ( - NetBufferDefaultSize = 0 - NetBufferDefaultCapacity = 1 << 4 - - DefaultConnectTimeout = 10 * time.Second -) - // ConnState status type ConnState int diff --git a/pkg/object/layer4server/ipfilters.go b/pkg/object/layer4server/ipfilters.go new file mode 100644 index 0000000000..1661c9eea6 --- /dev/null +++ b/pkg/object/layer4server/ipfilters.go @@ -0,0 +1,64 @@ +package layer4server + +import ( + "reflect" + "sync/atomic" + + "github.com/megaease/easegress/pkg/util/ipfilter" +) + +type ( + ipFilters struct { + rules atomic.Value + } + + ipFiltersRules struct { + spec *ipfilter.Spec + ipFilter *ipfilter.IPFilter + } +) + +func newIpFilters(spec *ipfilter.Spec) *ipFilters { + m := &ipFilters{} + + m.rules.Store(&ipFiltersRules{ + spec: spec, + ipFilter: newIPFilter(spec), + }) + return m +} + +func (i *ipFilters) AllowIP(ip string) bool { + rules := i.rules.Load().(*ipFiltersRules) + if rules == nil { + return true + } + return rules.ipFilter.Allow(ip) +} + +func (i *ipFilters) reloadRules(spec *ipfilter.Spec) { + old := i.rules.Load().(*ipFiltersRules) + if reflect.DeepEqual(old.spec, spec) { + return + } + + rules := &ipFiltersRules{ + spec: spec, + ipFilter: newIPFilter(spec), + } + i.rules.Store(rules) +} + +func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { + if spec == nil { + return nil + } + return ipfilter.New(spec) +} + +func (r *ipFiltersRules) pass(downstreamIp string) bool { + if r.ipFilter == nil { + return true + } + return r.ipFilter.Allow(downstreamIp) +} diff --git a/pkg/object/layer4rawserver/layer4server.go b/pkg/object/layer4server/layer4server.go similarity index 96% rename from pkg/object/layer4rawserver/layer4server.go rename to pkg/object/layer4server/layer4server.go index 1759a41b9a..6f28197ec2 100644 --- a/pkg/object/layer4rawserver/layer4server.go +++ b/pkg/object/layer4server/layer4server.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4rawserver +package layer4server import ( "github.com/megaease/easegress/pkg/protocol" @@ -54,7 +54,7 @@ func (l4 *Layer4Server) Kind() string { // DefaultSpec returns the default spec of Layer4Server. func (l4 *Layer4Server) DefaultSpec() interface{} { return &Spec{ - MaxConnections: 10240, + MaxConnections: 1024, ConnectTimeout: 5 * 1000, } } @@ -67,7 +67,7 @@ func (l4 *Layer4Server) Validate() error { // Init initializes Layer4Server. func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { - l4.runtime = newRuntime(superSpec, muxMapper) + l4.runtime = newRuntime(superSpec) l4.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, muxMapper: muxMapper, diff --git a/pkg/object/layer4rawserver/listener.go b/pkg/object/layer4server/listener.go similarity index 88% rename from pkg/object/layer4rawserver/listener.go rename to pkg/object/layer4server/listener.go index 3f67598169..546cde2aa5 100644 --- a/pkg/object/layer4rawserver/listener.go +++ b/pkg/object/layer4server/listener.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4rawserver +package layer4server import ( stdcontext "context" @@ -34,37 +34,36 @@ type ListenerState int type listener struct { name string - state ListenerState - protocol string // enum:udp/tcp - localAddr string + protocol string // enum:udp/tcp + localAddr string // listen addr + state ListenerState // listener state - mutex *sync.Mutex - stopChan chan struct{} - keepalive bool // keepalive for tcp - maxConns uint32 // maxConn for tcp listener - - tcpListener *limitlistener.LimitListener // tcp listener with accept limit + mutex *sync.Mutex + stopChan chan struct{} + maxConns uint32 // maxConn for tcp listener udpListener net.PacketConn // udp listener + tcpListener *limitlistener.LimitListener // tcp listener with accept limit onTcpAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle onUdpAccept func(downstreamAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) // udp accept handle } -func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStop chan struct{}), +func newListener(spec *Spec, onTcpAccept func(conn net.Conn, listenerStop chan struct{}), onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer)) *listener { listen := &listener{ + name: spec.Name, protocol: spec.Protocol, localAddr: fmt.Sprintf(":%d", spec.Port), - mutex: &sync.Mutex{}, - stopChan: make(chan struct{}), - onTcpAccept: onAccept, - onUdpAccept: onUdpAccept, + mutex: &sync.Mutex{}, + stopChan: make(chan struct{}), } if listen.protocol == "tcp" { - listen.keepalive = spec.KeepAlive listen.maxConns = spec.MaxConnections + listen.onTcpAccept = onTcpAccept + } else { + listen.onUdpAccept = onUdpAccept } return listen } diff --git a/pkg/object/layer4rawserver/pool.go b/pkg/object/layer4server/pool.go similarity index 59% rename from pkg/object/layer4rawserver/pool.go rename to pkg/object/layer4server/pool.go index a27d096836..d171cf7364 100644 --- a/pkg/object/layer4rawserver/pool.go +++ b/pkg/object/layer4server/pool.go @@ -15,15 +15,22 @@ * limitations under the License. */ -package layer4rawserver +package layer4server import ( + "reflect" + "sync/atomic" + "github.com/megaease/easegress/pkg/supervisor" ) type ( - // pool backend server pool pool struct { + rules atomic.Value + } + + // pool backend server pool + poolRules struct { spec *PoolSpec tagPrefix string @@ -32,14 +39,39 @@ type ( ) func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { - return &pool{ + p := &pool{} + + p.rules.Store(&poolRules{ spec: spec, tagPrefix: tagPrefix, servers: newServers(super, spec), - } + }) + return p +} + +func (p *pool) next(cliAddr string) (*Server, error) { + rules := p.rules.Load().(*poolRules) + return rules.servers.next(cliAddr) } func (p *pool) close() { - p.servers.close() + if old := p.rules.Load(); old != nil { + oldPool := old.(*poolRules) + oldPool.servers.close() + } +} + +func (p *pool) reloadRules(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) { + old := p.rules.Load().(*poolRules) + if reflect.DeepEqual(old.spec, spec) { + return + } + p.close() + p.rules.Store(&poolRules{ + spec: spec, + + tagPrefix: tagPrefix, + servers: newServers(super, spec), + }) } diff --git a/pkg/object/layer4rawserver/runtime.go b/pkg/object/layer4server/runtime.go similarity index 71% rename from pkg/object/layer4rawserver/runtime.go rename to pkg/object/layer4server/runtime.go index 51a5439751..81b36e2efb 100644 --- a/pkg/object/layer4rawserver/runtime.go +++ b/pkg/object/layer4server/runtime.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4rawserver +package layer4server import ( "fmt" @@ -24,11 +24,9 @@ import ( "sync/atomic" "time" - "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/protocol" "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/connection" "github.com/megaease/easegress/pkg/util/iobufferpool" ) @@ -63,9 +61,11 @@ type ( runtime struct { superSpec *supervisor.Spec spec *Spec - mux *mux - pool *pool // backend servers - listener *listener // layer4 server + + pool *pool // backend servers pool + ipFilters *ipFilters // ip filters + listener *listener // layer4 listener + startNum uint64 eventChan chan interface{} // receive traffic controller event @@ -74,13 +74,17 @@ type ( } ) -func newRuntime(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) *runtime { +func newRuntime(superSpec *supervisor.Spec) *runtime { + spec := superSpec.ObjectSpec().(*Spec) r := &runtime{ superSpec: superSpec, + + pool: newPool(superSpec.Super(), spec.Pool, ""), + ipFilters: newIpFilters(spec.IPFilter), + eventChan: make(chan interface{}, 10), } - r.mux = newMux(muxMapper) r.setState(stateNil) r.setError(errNil) @@ -118,12 +122,11 @@ func (r *runtime) fsm() { } } -func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { +func (r *runtime) reload(nextSuperSpec *supervisor.Spec) { r.superSpec = nextSuperSpec - r.mux.reloadRules(nextSuperSpec, muxMapper) - nextSpec := nextSuperSpec.ObjectSpec().(*Spec) - r.pool = newPool(nextSuperSpec.Super(), nextSpec.Pool, "") + r.ipFilters.reloadRules(nextSpec.IPFilter) + r.pool.reloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") // r.listener does not create just after the process started and the config load for the first time. if nextSpec != nil && r.listener != nil { @@ -184,10 +187,8 @@ func (r *runtime) needRestartServer(nextSpec *Spec) bool { y := *nextSpec // The change of options below need not restart the layer4 server. - x.KeepAlive, y.KeepAlive = true, true x.MaxConnections, y.MaxConnections = 0, 0 - x.ConnectTimeout, y.ProxyTimeout = 0, 0 - x.ProxyTimeout, y.ProxyTimeout = 0, 0 + x.ConnectTimeout, y.ConnectTimeout = 0, 0 x.Pool, y.Pool = nil, nil x.IPFilter, y.IPFilter = nil, nil @@ -257,12 +258,11 @@ func (r *runtime) handleEventServeFailed(e *eventServeFailed) { } func (r *runtime) handleEventReload(e *eventReload) { - r.reload(e.nextSuperSpec, e.muxMapper) + r.reload(e.nextSuperSpec) } func (r *runtime) handleEventClose(e *eventClose) { r.closeServer() - r.mux.close() r.pool.close() close(e.done) } @@ -271,14 +271,14 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) return func(rawConn net.Conn, listenerStop chan struct{}) { downstream := rawConn.RemoteAddr().(*net.TCPAddr).IP.String() - if r.mux.AllowIP(downstream) { + if r.ipFilters.AllowIP(downstream) { _ = rawConn.Close() logger.Infof("close tcp connection from %s to %s which ip is not allowed", rawConn.RemoteAddr().String(), rawConn.LocalAddr().String()) return } - server, err := r.pool.servers.next(downstream) + server, err := r.pool.next(downstream) if err != nil { _ = rawConn.Close() logger.Errorf("close tcp connection due to no available upstream server, local addr: %s, err: %+v", @@ -287,15 +287,16 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) } upstreamAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) - upstreamConn := connection.NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) + upstreamConn := NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) if err := upstreamConn.Connect(); err != nil { logger.Errorf("close tcp connection due to upstream conn connect failed, local addr: %s, err: %+v", rawConn.LocalAddr().String(), err) _ = rawConn.Close() } else { - downstreamConn := connection.NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) - ctx := context.NewLayer4Context("tcp", rawConn.LocalAddr(), rawConn.RemoteAddr(), upstreamAddr) - r.setOnReadHandler(downstreamConn, upstreamConn, ctx) + downstreamConn := NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) + r.setOnReadHandler(downstreamConn, upstreamConn) + upstreamConn.Start() + downstreamConn.Start() } } } @@ -303,21 +304,21 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { return func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { downstream := cliAddr.(*net.UDPAddr).IP.String() - if r.mux.AllowIP(downstream) { + if r.ipFilters.AllowIP(downstream) { logger.Infof("discard udp packet from %s to %s which ip is not allowed", cliAddr.String(), rawConn.LocalAddr().String()) return } localAddr := rawConn.LocalAddr() - key := connection.GetProxyMapKey(localAddr.String(), cliAddr.String()) - if rawDownstreamConn, ok := connection.ProxyMap.Load(key); ok { - downstreamConn := rawDownstreamConn.(*connection.Connection) + key := GetProxyMapKey(localAddr.String(), cliAddr.String()) + if rawDownstreamConn, ok := ProxyMap.Load(key); ok { + downstreamConn := rawDownstreamConn.(*Connection) downstreamConn.OnRead(packet) return } - server, err := r.pool.servers.next(downstream) + server, err := r.pool.next(downstream) if err != nil { logger.Infof("discard udp packet from %s to %s due to can not find upstream server, err: %+v", cliAddr.String(), localAddr.String()) @@ -325,46 +326,35 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listene } upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) - upstreamConn := connection.NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) + upstreamConn := NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) if err := upstreamConn.Connect(); err != nil { logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) return } - downstreamConn := connection.NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) - ctx := context.NewLayer4Context("udp", localAddr, upstreamAddr, upstreamAddr) - connection.SetUDPProxyMap(connection.GetProxyMapKey(localAddr.String(), cliAddr.String()), &downstreamConn) - r.setOnReadHandler(downstreamConn, upstreamConn, ctx) + fd, _ := rawConn.(*net.UDPConn).File() + downstreamRawConn, _ := net.FilePacketConn(fd) + downstreamConn := NewDownstreamConn(downstreamRawConn.(*net.UDPConn), rawConn.RemoteAddr(), listenerStop) + SetUDPProxyMap(GetProxyMapKey(localAddr.String(), cliAddr.String()), &downstreamConn) + r.setOnReadHandler(downstreamConn, upstreamConn) + + downstreamConn.Start() + upstreamConn.Start() downstreamConn.OnRead(packet) } } -func (r *runtime) setOnReadHandler(downstreamConn *connection.Connection, upstreamConn *connection.UpstreamConnection, ctx context.Layer4Context) { - if handle, ok := r.mux.GetHandler(r.spec.Name); ok { - downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { - handle.Handle(ctx, readBuf, nil) - if buf := ctx.GetDownstreamWriteBuffer(); buf != nil { - _ = upstreamConn.Write(buf) - } - }) - upstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { - handle.Handle(ctx, readBuf, nil) - if buf := ctx.GetUpstreamWriteBuffer(); buf != nil { - _ = downstreamConn.Write(buf) - } - }) - } else { - downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { - if readBuf != nil && readBuf.Len() > 0 { - _ = upstreamConn.Write(readBuf.Clone()) - readBuf.Drain(readBuf.Len()) - } - }) - upstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { - if readBuf != nil && readBuf.Len() > 0 { - _ = downstreamConn.Write(readBuf.Clone()) - readBuf.Drain(readBuf.Len()) - } - }) - } +func (r *runtime) setOnReadHandler(downstreamConn *Connection, upstreamConn *UpstreamConnection) { + downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + if readBuf != nil && readBuf.Len() > 0 { + _ = upstreamConn.Write(readBuf.Clone()) + readBuf.Drain(readBuf.Len()) + } + }) + upstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + if readBuf != nil && readBuf.Len() > 0 { + _ = downstreamConn.Write(readBuf.Clone()) + readBuf.Drain(readBuf.Len()) + } + }) } diff --git a/pkg/object/layer4rawserver/backendserver.go b/pkg/object/layer4server/server.go similarity index 99% rename from pkg/object/layer4rawserver/backendserver.go rename to pkg/object/layer4server/server.go index 79cb7d85bb..0a8ff2f6d1 100644 --- a/pkg/object/layer4rawserver/backendserver.go +++ b/pkg/object/layer4server/server.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4rawserver +package layer4server import ( "fmt" diff --git a/pkg/object/layer4rawserver/spec.go b/pkg/object/layer4server/spec.go similarity index 91% rename from pkg/object/layer4rawserver/spec.go rename to pkg/object/layer4server/spec.go index c213a72735..94538a85f9 100644 --- a/pkg/object/layer4rawserver/spec.go +++ b/pkg/object/layer4server/spec.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4rawserver +package layer4server import ( "fmt" @@ -31,13 +31,11 @@ type ( Port uint16 `yaml:"port" json:"port" jsonschema:"required"` // tcp stream config params - KeepAlive bool `yaml:"keepAlive" jsonschema:"required"` MaxConnections uint32 `yaml:"maxConns" jsonschema:"omitempty,minimum=1"` ConnectTimeout uint32 `yaml:"connectTimeout" jsonschema:"omitempty"` - ProxyTimeout uint32 `yaml:"proxyTimeout" jsonschema:"omitempty"` Pool *PoolSpec `yaml:"pool" jsonschema:"required"` - IPFilter *ipfilter.Spec `yaml:"ipFilter,omitempty" jsonschema:"omitempty"` + IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` } // PoolSpec describes a pool of servers. diff --git a/pkg/util/connection/udpreceiver.go b/pkg/object/layer4server/udpreceiver.go similarity index 98% rename from pkg/util/connection/udpreceiver.go rename to pkg/object/layer4server/udpreceiver.go index 9b15d57a90..c54061b868 100644 --- a/pkg/util/connection/udpreceiver.go +++ b/pkg/object/layer4server/udpreceiver.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package connection +package layer4server import ( "strings" diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index eb40966db3..5eb0ab2288 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -45,8 +45,7 @@ import ( _ "github.com/megaease/easegress/pkg/object/httppipeline" _ "github.com/megaease/easegress/pkg/object/httpserver" _ "github.com/megaease/easegress/pkg/object/ingresscontroller" - _ "github.com/megaease/easegress/pkg/object/layer4pipeline" - _ "github.com/megaease/easegress/pkg/object/layer4rawserver" + _ "github.com/megaease/easegress/pkg/object/layer4server" _ "github.com/megaease/easegress/pkg/object/meshcontroller" _ "github.com/megaease/easegress/pkg/object/nacosserviceregistry" _ "github.com/megaease/easegress/pkg/object/rawconfigtrafficcontroller" From e2ea5bc55a2a60bb14efe774761bfed03ad3352a Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 13 Oct 2021 22:50:35 +0800 Subject: [PATCH 28/99] [layer4proxy] delete layer4 filter --- pkg/object/layer4server/ipfilters.go | 4 +- pkg/object/layer4server/listener.go | 8 +-- pkg/util/layer4filter/layer4filter.go | 86 --------------------------- 3 files changed, 6 insertions(+), 92 deletions(-) delete mode 100644 pkg/util/layer4filter/layer4filter.go diff --git a/pkg/object/layer4server/ipfilters.go b/pkg/object/layer4server/ipfilters.go index 1661c9eea6..43bf39d144 100644 --- a/pkg/object/layer4server/ipfilters.go +++ b/pkg/object/layer4server/ipfilters.go @@ -56,9 +56,9 @@ func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { return ipfilter.New(spec) } -func (r *ipFiltersRules) pass(downstreamIp string) bool { +func (r *ipFiltersRules) pass(downstreamIP string) bool { if r.ipFilter == nil { return true } - return r.ipFilter.Allow(downstreamIp) + return r.ipFilter.Allow(downstreamIP) } diff --git a/pkg/object/layer4server/listener.go b/pkg/object/layer4server/listener.go index 546cde2aa5..65854c358d 100644 --- a/pkg/object/layer4server/listener.go +++ b/pkg/object/layer4server/listener.go @@ -48,8 +48,8 @@ type listener struct { onUdpAccept func(downstreamAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) // udp accept handle } -func newListener(spec *Spec, onTcpAccept func(conn net.Conn, listenerStop chan struct{}), - onUdpAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer)) *listener { +func newListener(spec *Spec, onTCPAccept func(conn net.Conn, listenerStop chan struct{}), + onUDPAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer)) *listener { listen := &listener{ name: spec.Name, protocol: spec.Protocol, @@ -61,9 +61,9 @@ func newListener(spec *Spec, onTcpAccept func(conn net.Conn, listenerStop chan s if listen.protocol == "tcp" { listen.maxConns = spec.MaxConnections - listen.onTcpAccept = onTcpAccept + listen.onTcpAccept = onTCPAccept } else { - listen.onUdpAccept = onUdpAccept + listen.onUdpAccept = onUDPAccept } return listen } diff --git a/pkg/util/layer4filter/layer4filter.go b/pkg/util/layer4filter/layer4filter.go deleted file mode 100644 index 93a701148b..0000000000 --- a/pkg/util/layer4filter/layer4filter.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4filter - -import ( - "math/rand" - "net" - "time" - - "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/util/hashtool" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -const ( - policyIPHash string = "ipHash" - policyRandom = "random" -) - -type ( - // Spec describes Layer4filter. - Spec struct { - Probability *Probability `yaml:"probability,omitempty" jsonschema:"omitempty"` - } - - // Layer4filter filters layer4 traffic. - Layer4filter struct { - spec *Spec - } - - // Probability filters layer4 traffic by probability. - Probability struct { - PerMill uint32 `yaml:"perMill" jsonschema:"required,minimum=1,maximum=1000"` - Policy string `yaml:"policy" jsonschema:"required,enum=ipHash,enum=headerHash,enum=random"` - } -) - -// New creates an HTTPFilter. -func New(spec *Spec) *Layer4filter { - hf := &Layer4filter{ - spec: spec, - } - return hf -} - -// Filter filters Layer4Context. -func (hf *Layer4filter) Filter(ctx context.Layer4Context) bool { - return hf.filterProbability(ctx) -} - -func (hf *Layer4filter) filterProbability(ctx context.Layer4Context) bool { - prob := hf.spec.Probability - - var result uint32 - switch prob.Policy { - case policyRandom: - result = uint32(rand.Int31n(1000)) - case policyIPHash: - default: - host, _, _ := net.SplitHostPort(ctx.RemoteAddr().String()) - result = hashtool.Hash32(host) - } - - if result%1000 < prob.PerMill { - return true - } - return false -} From 48901c784455cd800d16b320578dc3a2efbe4d3b Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 13 Oct 2021 23:01:31 +0800 Subject: [PATCH 29/99] [layer4proxy] add missing license --- pkg/object/layer4server/ipfilters.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pkg/object/layer4server/ipfilters.go b/pkg/object/layer4server/ipfilters.go index 43bf39d144..a364cb8984 100644 --- a/pkg/object/layer4server/ipfilters.go +++ b/pkg/object/layer4server/ipfilters.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package layer4server import ( From d9293ca0454dc3128560d2f3f181a92212c78064 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 14 Oct 2021 17:26:12 +0800 Subject: [PATCH 30/99] [layer4proxy] change layer4 proxy Category --- pkg/object/layer4server/layer4server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/object/layer4server/layer4server.go b/pkg/object/layer4server/layer4server.go index 6f28197ec2..62aac725f7 100644 --- a/pkg/object/layer4server/layer4server.go +++ b/pkg/object/layer4server/layer4server.go @@ -24,7 +24,7 @@ import ( const ( // Category is the category of Layer4Server. - Category = supervisor.CategoryTrafficGate + Category = supervisor.CategoryBusinessController // Kind is the kind of Layer4Server. Kind = "Layer4Server" From b5f3325896022004fedce7e267453fa9a9ba67c9 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 14 Oct 2021 22:41:13 +0800 Subject: [PATCH 31/99] [layer4proxy] fix some minor problem --- pkg/object/layer4server/connection.go | 10 +++++++--- pkg/object/layer4server/ipfilters.go | 7 ++++++- pkg/object/layer4server/layer4server.go | 7 ++----- pkg/object/layer4server/listener.go | 3 +-- pkg/object/layer4server/runtime.go | 12 +++++------- pkg/object/layer4server/server.go | 3 +-- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/pkg/object/layer4server/connection.go b/pkg/object/layer4server/connection.go index f69d22763c..36dace09a9 100644 --- a/pkg/object/layer4server/connection.go +++ b/pkg/object/layer4server/connection.go @@ -515,7 +515,7 @@ type UpstreamConnection struct { connectOnce sync.Once } -func NewUpstreamConn(connectTimeout time.Duration, upstreamAddr net.Addr, listenerStopChan chan struct{}) *UpstreamConnection { +func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopChan chan struct{}) *UpstreamConnection { conn := &UpstreamConnection{ Connection: Connection{ connected: 1, @@ -530,7 +530,7 @@ func NewUpstreamConn(connectTimeout time.Duration, upstreamAddr net.Addr, listen connStopChan: make(chan struct{}), listenerStopChan: listenerStopChan, }, - connectTimeout: connectTimeout, + connectTimeout: time.Duration(connectTimeout) * time.Millisecond, } return conn } @@ -556,8 +556,12 @@ func (u *UpstreamConnection) connect() (event Event, err error) { return } atomic.StoreUint32(&u.connected, 1) - event = Connected u.localAddr = u.conn.LocalAddr() + if u.protocol == "tcp" { + _ = u.conn.(*net.TCPConn).SetNoDelay(true) + _ = u.conn.(*net.TCPConn).SetKeepAlive(true) + } + event = Connected return } diff --git a/pkg/object/layer4server/ipfilters.go b/pkg/object/layer4server/ipfilters.go index a364cb8984..47eb249971 100644 --- a/pkg/object/layer4server/ipfilters.go +++ b/pkg/object/layer4server/ipfilters.go @@ -47,13 +47,18 @@ func newIpFilters(spec *ipfilter.Spec) *ipFilters { func (i *ipFilters) AllowIP(ip string) bool { rules := i.rules.Load().(*ipFiltersRules) - if rules == nil { + if rules == nil || rules.spec == nil { return true } return rules.ipFilter.Allow(ip) } func (i *ipFilters) reloadRules(spec *ipfilter.Spec) { + if spec == nil { + i.rules.Store(&ipFiltersRules{}) + return + } + old := i.rules.Load().(*ipFiltersRules) if reflect.DeepEqual(old.spec, spec) { return diff --git a/pkg/object/layer4server/layer4server.go b/pkg/object/layer4server/layer4server.go index 62aac725f7..04c2c9bbe7 100644 --- a/pkg/object/layer4server/layer4server.go +++ b/pkg/object/layer4server/layer4server.go @@ -18,7 +18,6 @@ package layer4server import ( - "github.com/megaease/easegress/pkg/protocol" "github.com/megaease/easegress/pkg/supervisor" ) @@ -65,22 +64,20 @@ func (l4 *Layer4Server) Validate() error { } // Init initializes Layer4Server. -func (l4 *Layer4Server) Init(superSpec *supervisor.Spec, muxMapper protocol.MuxMapper) { +func (l4 *Layer4Server) Init(superSpec *supervisor.Spec) { l4.runtime = newRuntime(superSpec) l4.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, - muxMapper: muxMapper, } } // Inherit inherits previous generation of Layer4Server. -func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper protocol.MuxMapper) { +func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { l4.runtime = previousGeneration.(*Layer4Server).runtime l4.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, - muxMapper: muxMapper, } } diff --git a/pkg/object/layer4server/listener.go b/pkg/object/layer4server/listener.go index 65854c358d..41c2667fbf 100644 --- a/pkg/object/layer4server/listener.go +++ b/pkg/object/layer4server/listener.go @@ -157,8 +157,7 @@ func (l *listener) acceptEventLoop() { if !(ope.Timeout() && ope.Temporary()) { // accept error raised by sockets closing if ope.Op == "accept" { - logger.Errorf("tcp listener(%s) stop accept connection due to listener closed", - l.localAddr) + logger.Debugf("tcp listener(%s) stop accept connection due to listener closed", l.localAddr) } else { logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", l.localAddr, err.Error()) diff --git a/pkg/object/layer4server/runtime.go b/pkg/object/layer4server/runtime.go index 81b36e2efb..f8b66e8fd2 100644 --- a/pkg/object/layer4server/runtime.go +++ b/pkg/object/layer4server/runtime.go @@ -25,7 +25,6 @@ import ( "time" "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/protocol" "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/iobufferpool" ) @@ -54,7 +53,6 @@ type ( eventReload struct { nextSuperSpec *supervisor.Spec - muxMapper protocol.MuxMapper } eventClose struct{ done chan struct{} } @@ -227,7 +225,7 @@ func (r *runtime) closeServer() { } _ = r.listener.close() - logger.Infof("listener for %s :%d closed", r.listener.protocol, r.listener.localAddr) + logger.Infof("listener for %s(%s) closed", r.listener.protocol, r.listener.localAddr) } func (r *runtime) checkFailed() { @@ -271,7 +269,7 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) return func(rawConn net.Conn, listenerStop chan struct{}) { downstream := rawConn.RemoteAddr().(*net.TCPAddr).IP.String() - if r.ipFilters.AllowIP(downstream) { + if r.ipFilters != nil && !r.ipFilters.AllowIP(downstream) { _ = rawConn.Close() logger.Infof("close tcp connection from %s to %s which ip is not allowed", rawConn.RemoteAddr().String(), rawConn.LocalAddr().String()) @@ -287,7 +285,7 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) } upstreamAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) - upstreamConn := NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) + upstreamConn := NewUpstreamConn(r.spec.ConnectTimeout, upstreamAddr, listenerStop) if err := upstreamConn.Connect(); err != nil { logger.Errorf("close tcp connection due to upstream conn connect failed, local addr: %s, err: %+v", rawConn.LocalAddr().String(), err) @@ -304,7 +302,7 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { return func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { downstream := cliAddr.(*net.UDPAddr).IP.String() - if r.ipFilters.AllowIP(downstream) { + if r.ipFilters != nil && !r.ipFilters.AllowIP(downstream) { logger.Infof("discard udp packet from %s to %s which ip is not allowed", cliAddr.String(), rawConn.LocalAddr().String()) return @@ -326,7 +324,7 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listene } upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) - upstreamConn := NewUpstreamConn(time.Duration(r.spec.ConnectTimeout)*time.Millisecond, upstreamAddr, listenerStop) + upstreamConn := NewUpstreamConn(r.spec.ConnectTimeout, upstreamAddr, listenerStop) if err := upstreamConn.Connect(); err != nil { logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) return diff --git a/pkg/object/layer4server/server.go b/pkg/object/layer4server/server.go index 0a8ff2f6d1..947a4d5342 100644 --- a/pkg/object/layer4server/server.go +++ b/pkg/object/layer4server/server.go @@ -70,8 +70,7 @@ type ( // LoadBalance is load balance for multiple servers. LoadBalance struct { - Policy string `yaml:"policy" jsonschema:"required,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash"` - HeaderHashKey string `yaml:"headerHashKey" jsonschema:"omitempty"` + Policy string `yaml:"policy" jsonschema:"required,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash"` } ) From 6defc6b216d0c75d2ee2888d08bb542f0ea9f739 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 15 Oct 2021 14:25:44 +0800 Subject: [PATCH 32/99] [layer4proxy] fix tcp connection close bug --- pkg/object/layer4server/connection.go | 40 ++++++++++++++++----------- pkg/object/layer4server/constant.go | 24 ++++++++-------- pkg/object/layer4server/runtime.go | 18 ++++++++++-- 3 files changed, 52 insertions(+), 30 deletions(-) diff --git a/pkg/object/layer4server/connection.go b/pkg/object/layer4server/connection.go index 36dace09a9..328d2995dc 100644 --- a/pkg/object/layer4server/connection.go +++ b/pkg/object/layer4server/connection.go @@ -22,6 +22,7 @@ import ( "io" "net" "reflect" + "runtime/debug" "strings" "sync" "sync/atomic" @@ -60,7 +61,7 @@ type Connection struct { writeCollector metrics.Counter onRead func(buffer iobufferpool.IoBuffer) // execute read filters - onClose func() + onClose func(event ConnectionEvent) } // NewDownstreamConn wrap connection create from client @@ -124,6 +125,10 @@ func (c *Connection) OnRead(buffer iobufferpool.IoBuffer) { c.onRead(buffer) } +func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { + c.onClose = onclose +} + func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { return c.readBuffer } @@ -223,6 +228,7 @@ func (c *Connection) startReadLoop() { for { select { case <-c.connStopChan: + logger.Infof("exit read loop") return case <-c.listenerStopChan: return @@ -241,7 +247,7 @@ func (c *Connection) startReadLoop() { // normal close or health check if c.lastBytesSizeRead == 0 || err == io.EOF { - logger.Debugf("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", + logger.Infof("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) } else { logger.Errorf("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", @@ -304,8 +310,8 @@ func (c *Connection) startWriteLoop() { if err != nil { if err == iobufferpool.EOF { - logger.Debugf("%s connection error on write, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), err) + logger.Debugf("%s connection local close with eof, local addr: %s, remote addr: %s", + c.protocol, c.localAddr.String(), c.remoteAddr.String()) _ = c.Close(NoFlush, LocalClose) } else { logger.Errorf("%s connection error on write, local addr: %s, remote addr: %s, err: %+v", @@ -337,11 +343,10 @@ func (c *Connection) appendBuffer(ioBuffers *[]iobufferpool.IoBuffer) { } } -func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { +func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) { defer func() { if r := recover(); r != nil { - logger.Errorf("%s connection close occur panic, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + logger.Errorf("%s connection close panic, err: %+v\n%s", c.protocol, r, string(debug.Stack())) } }() @@ -361,8 +366,8 @@ func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { // close tcp conn read first if tconn, ok := c.conn.(*net.TCPConn); ok { - logger.Debugf("tcp connection close read, local addr: %s, remote addr: %s", - c.localAddr.String(), c.remoteAddr.String()) + logger.Debugf("tcp connection closed, local addr: %s, remote addr: %s, event: %s", + c.localAddr.String(), c.remoteAddr.String(), event) _ = tconn.CloseRead() } @@ -374,11 +379,14 @@ func (c *Connection) Close(ccType CloseType, eventType Event) (err error) { // close conn recv, then notify read/write loop to exit close(c.connStopChan) _ = c.conn.Close() - c.lastBytesSizeRead = 0 - c.lastWriteSizeWrite = 0 + c.lastBytesSizeRead, c.lastWriteSizeWrite = 0, 0 + + logger.Debugf("%s connection closed, local addr: %s, remote addr: %s, event: %s", + c.protocol, c.localAddr.String(), c.remoteAddr.String(), event) - logger.Errorf("%s connection closed, local addr: %s, remote addr: %s", - c.protocol, c.localAddr.String(), c.remoteAddr.String()) + if c.onClose != nil { + c.onClose(event) + } return nil } @@ -490,7 +498,7 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { err = iobufferpool.EOF } if e := iobufferpool.PutIoBuffer(buf); e != nil { - logger.Errorf("%s connection PutIoBuffer error, local addr: %s, remote addr: %s, err: %+v", + logger.Errorf("%s connection give buffer error, local addr: %s, remote addr: %s, err: %+v", c.protocol, c.localAddr.String(), c.remoteAddr.String(), err) } } @@ -535,7 +543,7 @@ func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopC return conn } -func (u *UpstreamConnection) connect() (event Event, err error) { +func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { timeout := u.connectTimeout if timeout == 0 { timeout = 10 * time.Second @@ -567,7 +575,7 @@ func (u *UpstreamConnection) connect() (event Event, err error) { func (u *UpstreamConnection) Connect() (err error) { u.connectOnce.Do(func() { - var event Event + var event ConnectionEvent event, err = u.connect() if err == nil { u.Start() diff --git a/pkg/object/layer4server/constant.go b/pkg/object/layer4server/constant.go index b67af253d4..7299bc659c 100644 --- a/pkg/object/layer4server/constant.go +++ b/pkg/object/layer4server/constant.go @@ -32,21 +32,21 @@ const ( NoFlush CloseType = "NoFlush" ) -// Event type -type Event string +// ConnectionEvent type +type ConnectionEvent string // ConnectionEvent types const ( - RemoteClose Event = "RemoteClose" - LocalClose Event = "LocalClose" - OnReadErrClose Event = "OnReadErrClose" - OnWriteErrClose Event = "OnWriteErrClose" - OnConnect Event = "OnConnect" - Connected Event = "ConnectedFlag" - ConnectTimeout Event = "ConnectTimeout" - ConnectFailed Event = "ConnectFailed" - OnReadTimeout Event = "OnReadTimeout" - OnWriteTimeout Event = "OnWriteTimeout" + RemoteClose ConnectionEvent = "RemoteClose" + LocalClose ConnectionEvent = "LocalClose" + OnReadErrClose ConnectionEvent = "OnReadErrClose" + OnWriteErrClose ConnectionEvent = "OnWriteErrClose" + OnConnect ConnectionEvent = "OnConnect" + Connected ConnectionEvent = "ConnectedFlag" + ConnectTimeout ConnectionEvent = "ConnectTimeout" + ConnectFailed ConnectionEvent = "ConnectFailed" + OnReadTimeout ConnectionEvent = "OnReadTimeout" + OnWriteTimeout ConnectionEvent = "OnWriteTimeout" ) var ( diff --git a/pkg/object/layer4server/runtime.go b/pkg/object/layer4server/runtime.go index f8b66e8fd2..723dcbbb04 100644 --- a/pkg/object/layer4server/runtime.go +++ b/pkg/object/layer4server/runtime.go @@ -287,12 +287,26 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) upstreamAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) upstreamConn := NewUpstreamConn(r.spec.ConnectTimeout, upstreamAddr, listenerStop) if err := upstreamConn.Connect(); err != nil { - logger.Errorf("close tcp connection due to upstream conn connect failed, local addr: %s, err: %+v", - rawConn.LocalAddr().String(), err) + logger.Errorf("upstream connect failed(name: %s, addr: %s), err: %+v", + r.spec.Name, rawConn.LocalAddr().String(), err) _ = rawConn.Close() } else { downstreamConn := NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) r.setOnReadHandler(downstreamConn, upstreamConn) + downstreamConn.SetOnClose(func(event ConnectionEvent) { + if event == RemoteClose { + _ = upstreamConn.Close(FlushWrite, LocalClose) + } else { + _ = upstreamConn.Close(NoFlush, LocalClose) + } + }) + upstreamConn.SetOnClose(func(event ConnectionEvent) { + if event == RemoteClose { + _ = downstreamConn.Close(FlushWrite, LocalClose) + } else { + _ = downstreamConn.Close(NoFlush, LocalClose) + } + }) upstreamConn.Start() downstreamConn.Start() } From ae9063fb364013d508b55bcddb6a9fabedf9befe Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 15 Oct 2021 22:45:45 +0800 Subject: [PATCH 33/99] [layer4proxy] fix upstream connect bug --- pkg/object/layer4server/connection.go | 73 +++++++++++---------------- pkg/object/layer4server/runtime.go | 62 +++++++++++------------ 2 files changed, 61 insertions(+), 74 deletions(-) diff --git a/pkg/object/layer4server/connection.go b/pkg/object/layer4server/connection.go index 328d2995dc..07727f29af 100644 --- a/pkg/object/layer4server/connection.go +++ b/pkg/object/layer4server/connection.go @@ -31,16 +31,16 @@ import ( "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/util/iobufferpool" "github.com/megaease/easegress/pkg/util/timerpool" - "github.com/rcrowley/go-metrics" ) type Connection struct { - conn net.Conn - connected uint32 - closed uint32 + rawConn net.Conn + connected uint32 + closed uint32 + protocol string localAddr net.Addr - remoteAddr net.Addr // just for udp proxy use + remoteAddr net.Addr // udp listener send response use remoteAddr lastBytesSizeRead int64 lastWriteSizeWrite int64 @@ -57,9 +57,6 @@ type Connection struct { connStopChan chan struct{} // use for connection close listenerStopChan chan struct{} // use for listener close - readCollector metrics.Counter - writeCollector metrics.Counter - onRead func(buffer iobufferpool.IoBuffer) // execute read filters onClose func(event ConnectionEvent) } @@ -68,7 +65,7 @@ type Connection struct { // @param remoteAddr client addr for udp proxy use func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}) *Connection { clientConn := &Connection{ - conn: conn, + rawConn: conn, connected: 1, protocol: conn.LocalAddr().Network(), localAddr: conn.LocalAddr(), @@ -85,7 +82,7 @@ func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan if remoteAddr != nil { clientConn.remoteAddr = remoteAddr } else { - clientConn.remoteAddr = conn.RemoteAddr() // udp server conn can not get remote address + clientConn.remoteAddr = conn.RemoteAddr() // udp server rawConn can not get remote address } return clientConn } @@ -100,9 +97,9 @@ func (c *Connection) LocalAddr() net.Addr { return c.localAddr } -// RemoteAddr get connection remote addr(it's nil for udp server conn) +// RemoteAddr get connection remote addr(it's nil for udp server rawConn) func (c *Connection) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() + return c.rawConn.RemoteAddr() } // ReadEnabled get connection read enable status @@ -110,12 +107,6 @@ func (c *Connection) ReadEnabled() bool { return c.readEnabled } -// SetCollector set read/write metrics collectors -func (c *Connection) SetCollector(read, write metrics.Counter) { - c.readCollector = read - c.writeCollector = write -} - // SetOnRead set connection read handle func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { c.onRead = onRead @@ -135,7 +126,7 @@ func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { // Start running connection read/write loop func (c *Connection) Start() { - if c.protocol == "udp" && c.conn.RemoteAddr() == nil { + if c.protocol == "udp" && c.rawConn.RemoteAddr() == nil { return // udp server connection no need to start read/write loop } @@ -228,7 +219,6 @@ func (c *Connection) startReadLoop() { for { select { case <-c.connStopChan: - logger.Infof("exit read loop") return case <-c.listenerStopChan: return @@ -274,9 +264,9 @@ func (c *Connection) startReadLoop() { func (c *Connection) setReadDeadline() { switch c.protocol { case "udp": - _ = c.conn.SetReadDeadline(time.Now().Add(1 * time.Second)) + _ = c.rawConn.SetReadDeadline(time.Now().Add(1 * time.Second)) case "tcp": - _ = c.conn.SetReadDeadline(time.Now().Add(15 * time.Second)) + _ = c.rawConn.SetReadDeadline(time.Now().Add(15 * time.Second)) } } @@ -360,25 +350,25 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) } // connection failed in client mode - if c.conn == nil || reflect.ValueOf(c.conn).IsNil() { + if c.rawConn == nil || reflect.ValueOf(c.rawConn).IsNil() { return nil } - // close tcp conn read first - if tconn, ok := c.conn.(*net.TCPConn); ok { + // close tcp rawConn read first + if tconn, ok := c.rawConn.(*net.TCPConn); ok { logger.Debugf("tcp connection closed, local addr: %s, remote addr: %s, event: %s", c.localAddr.String(), c.remoteAddr.String(), event) _ = tconn.CloseRead() } - if c.protocol == "udp" && c.conn.RemoteAddr() == nil { + if c.protocol == "udp" && c.rawConn.RemoteAddr() == nil { key := GetProxyMapKey(c.localAddr.String(), c.remoteAddr.String()) DelUDPProxyMap(key) } - // close conn recv, then notify read/write loop to exit + // close rawConn recv, then notify read/write loop to exit close(c.connStopChan) - _ = c.conn.Close() + _ = c.rawConn.Close() c.lastBytesSizeRead, c.lastWriteSizeWrite = 0, 0 logger.Debugf("%s connection closed, local addr: %s, remote addr: %s, event: %s", @@ -415,7 +405,7 @@ func (c *Connection) doReadIO() (err error) { var bytesRead int64 c.setReadDeadline() - bytesRead, err = c.readBuffer.ReadOnce(c.conn) + bytesRead, err = c.readBuffer.ReadOnce(c.rawConn) if err != nil { if atomic.LoadUint32(&c.closed) == 1 { @@ -470,16 +460,16 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { buffers := c.writeBuffers switch c.protocol { case "tcp": - bytesSent, err = buffers.WriteTo(c.conn) + bytesSent, err = buffers.WriteTo(c.rawConn) case "udp": n := 0 bytesSent = 0 addr := c.remoteAddr.(*net.UDPAddr) for _, buf := range c.ioBuffers { - if c.conn.RemoteAddr() == nil { - n, err = c.conn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) + if c.rawConn.RemoteAddr() == nil { + n, err = c.rawConn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) } else { - n, err = c.conn.Write(buf.Bytes()) + n, err = c.rawConn.Write(buf.Bytes()) } if err != nil { break @@ -510,9 +500,9 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { func (c *Connection) setWriteDeadline() { switch c.protocol { case "udp": - _ = c.conn.SetWriteDeadline(time.Now().Add(5 * time.Second)) + _ = c.rawConn.SetWriteDeadline(time.Now().Add(5 * time.Second)) case "tcp": - _ = c.conn.SetWriteDeadline(time.Now().Add(15 * time.Second)) + _ = c.rawConn.SetWriteDeadline(time.Now().Add(15 * time.Second)) } } @@ -552,7 +542,7 @@ func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { if addr == nil { return ConnectFailed, errors.New("upstream addr is nil") } - u.conn, err = net.DialTimeout(u.protocol, addr.String(), timeout) + u.rawConn, err = net.DialTimeout(u.protocol, addr.String(), timeout) if err != nil { if err == io.EOF { event = RemoteClose @@ -564,10 +554,10 @@ func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { return } atomic.StoreUint32(&u.connected, 1) - u.localAddr = u.conn.LocalAddr() + u.localAddr = u.rawConn.LocalAddr() if u.protocol == "tcp" { - _ = u.conn.(*net.TCPConn).SetNoDelay(true) - _ = u.conn.(*net.TCPConn).SetKeepAlive(true) + _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) + _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) } event = Connected return @@ -580,10 +570,7 @@ func (u *UpstreamConnection) Connect() (err error) { if err == nil { u.Start() } - logger.Debugf("connect upstream, upstream addr: %s, event: %+v, err: %+v", u.remoteAddr, event, err) - if event != Connected { - close(u.listenerStopChan) // if upstream connection failed, close client connection - } + logger.Debugf("%s connect upstream(%s), event: %s, err: %+v", u.protocol, u.remoteAddr, event, err) }) return } diff --git a/pkg/object/layer4server/runtime.go b/pkg/object/layer4server/runtime.go index 723dcbbb04..1246b38952 100644 --- a/pkg/object/layer4server/runtime.go +++ b/pkg/object/layer4server/runtime.go @@ -290,26 +290,12 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) logger.Errorf("upstream connect failed(name: %s, addr: %s), err: %+v", r.spec.Name, rawConn.LocalAddr().String(), err) _ = rawConn.Close() - } else { - downstreamConn := NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) - r.setOnReadHandler(downstreamConn, upstreamConn) - downstreamConn.SetOnClose(func(event ConnectionEvent) { - if event == RemoteClose { - _ = upstreamConn.Close(FlushWrite, LocalClose) - } else { - _ = upstreamConn.Close(NoFlush, LocalClose) - } - }) - upstreamConn.SetOnClose(func(event ConnectionEvent) { - if event == RemoteClose { - _ = downstreamConn.Close(FlushWrite, LocalClose) - } else { - _ = downstreamConn.Close(NoFlush, LocalClose) - } - }) - upstreamConn.Start() - downstreamConn.Start() + return } + + downstreamConn := NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) + r.setCallbacks(downstreamConn, upstreamConn) + downstreamConn.Start() // upstream conn start read/write loop when connect is called } } @@ -324,9 +310,9 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listene localAddr := rawConn.LocalAddr() key := GetProxyMapKey(localAddr.String(), cliAddr.String()) - if rawDownstreamConn, ok := ProxyMap.Load(key); ok { - downstreamConn := rawDownstreamConn.(*Connection) - downstreamConn.OnRead(packet) + if rawDc, ok := ProxyMap.Load(key); ok { + dc := rawDc.(*Connection) + dc.OnRead(packet) return } @@ -338,25 +324,24 @@ func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listene } upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) - upstreamConn := NewUpstreamConn(r.spec.ConnectTimeout, upstreamAddr, listenerStop) - if err := upstreamConn.Connect(); err != nil { + uc := NewUpstreamConn(r.spec.ConnectTimeout, upstreamAddr, listenerStop) + if err := uc.Connect(); err != nil { logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) return } fd, _ := rawConn.(*net.UDPConn).File() downstreamRawConn, _ := net.FilePacketConn(fd) - downstreamConn := NewDownstreamConn(downstreamRawConn.(*net.UDPConn), rawConn.RemoteAddr(), listenerStop) - SetUDPProxyMap(GetProxyMapKey(localAddr.String(), cliAddr.String()), &downstreamConn) - r.setOnReadHandler(downstreamConn, upstreamConn) + dc := NewDownstreamConn(downstreamRawConn.(*net.UDPConn), rawConn.RemoteAddr(), listenerStop) + SetUDPProxyMap(GetProxyMapKey(localAddr.String(), cliAddr.String()), &dc) + r.setCallbacks(dc, uc) - downstreamConn.Start() - upstreamConn.Start() - downstreamConn.OnRead(packet) + dc.Start() + dc.OnRead(packet) } } -func (r *runtime) setOnReadHandler(downstreamConn *Connection, upstreamConn *UpstreamConnection) { +func (r *runtime) setCallbacks(downstreamConn *Connection, upstreamConn *UpstreamConnection) { downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { if readBuf != nil && readBuf.Len() > 0 { _ = upstreamConn.Write(readBuf.Clone()) @@ -369,4 +354,19 @@ func (r *runtime) setOnReadHandler(downstreamConn *Connection, upstreamConn *Ups readBuf.Drain(readBuf.Len()) } }) + + downstreamConn.SetOnClose(func(event ConnectionEvent) { + if event == RemoteClose { + _ = upstreamConn.Close(FlushWrite, LocalClose) + } else { + _ = upstreamConn.Close(NoFlush, LocalClose) + } + }) + upstreamConn.SetOnClose(func(event ConnectionEvent) { + if event == RemoteClose { + _ = downstreamConn.Close(FlushWrite, LocalClose) + } else { + _ = downstreamConn.Close(NoFlush, LocalClose) + } + }) } From 6e8116b4926b7fa9fdd5ad9695efd46ca6c8edf2 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Mon, 18 Oct 2021 18:08:28 +0800 Subject: [PATCH 34/99] [tcpproxy] remove udp proxy(something wrong on windows platform) --- pkg/object/layer4server/listener.go | 198 ------------------ pkg/object/layer4server/udpreceiver.go | 46 ---- .../server.go => tcpproxy/backendserver.go} | 2 +- .../{layer4server => tcpproxy}/connection.go | 116 +++------- .../{layer4server => tcpproxy}/constant.go | 2 +- .../{layer4server => tcpproxy}/ipfilters.go | 2 +- pkg/object/tcpproxy/listener.go | 113 ++++++++++ pkg/object/{layer4server => tcpproxy}/pool.go | 2 +- .../{layer4server => tcpproxy}/runtime.go | 58 +---- pkg/object/{layer4server => tcpproxy}/spec.go | 7 +- .../layer4server.go => tcpproxy/tcpserver.go} | 46 ++-- pkg/registry/registry.go | 2 +- 12 files changed, 178 insertions(+), 416 deletions(-) delete mode 100644 pkg/object/layer4server/listener.go delete mode 100644 pkg/object/layer4server/udpreceiver.go rename pkg/object/{layer4server/server.go => tcpproxy/backendserver.go} (99%) rename pkg/object/{layer4server => tcpproxy}/connection.go (77%) rename pkg/object/{layer4server => tcpproxy}/constant.go (98%) rename pkg/object/{layer4server => tcpproxy}/ipfilters.go (98%) create mode 100644 pkg/object/tcpproxy/listener.go rename pkg/object/{layer4server => tcpproxy}/pool.go (98%) rename pkg/object/{layer4server => tcpproxy}/runtime.go (79%) rename pkg/object/{layer4server => tcpproxy}/spec.go (91%) rename pkg/object/{layer4server/layer4server.go => tcpproxy/tcpserver.go} (53%) diff --git a/pkg/object/layer4server/listener.go b/pkg/object/layer4server/listener.go deleted file mode 100644 index 41c2667fbf..0000000000 --- a/pkg/object/layer4server/listener.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4server - -import ( - stdcontext "context" - "errors" - "fmt" - "net" - "runtime/debug" - "sync" - - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/util/iobufferpool" - "github.com/megaease/easegress/pkg/util/limitlistener" -) - -type ListenerState int - -type listener struct { - name string - protocol string // enum:udp/tcp - localAddr string // listen addr - state ListenerState // listener state - - mutex *sync.Mutex - stopChan chan struct{} - maxConns uint32 // maxConn for tcp listener - - udpListener net.PacketConn // udp listener - tcpListener *limitlistener.LimitListener // tcp listener with accept limit - onTcpAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle - onUdpAccept func(downstreamAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) // udp accept handle -} - -func newListener(spec *Spec, onTCPAccept func(conn net.Conn, listenerStop chan struct{}), - onUDPAccept func(cliAddr net.Addr, conn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer)) *listener { - listen := &listener{ - name: spec.Name, - protocol: spec.Protocol, - localAddr: fmt.Sprintf(":%d", spec.Port), - - mutex: &sync.Mutex{}, - stopChan: make(chan struct{}), - } - - if listen.protocol == "tcp" { - listen.maxConns = spec.MaxConnections - listen.onTcpAccept = onTCPAccept - } else { - listen.onUdpAccept = onUDPAccept - } - return listen -} - -func (l *listener) listen() error { - switch l.protocol { - case "udp": - c := net.ListenConfig{} - if ul, err := c.ListenPacket(stdcontext.Background(), l.protocol, l.localAddr); err != nil { - return err - } else { - l.udpListener = ul - } - case "tcp": - if tl, err := net.Listen(l.protocol, l.localAddr); err != nil { - return err - } else { - // wrap tcp listener with accept limit - l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConns) - } - default: - return errors.New("invalid protocol for layer4 server listener") - } - return nil -} - -func (l *listener) startEventLoop() { - switch l.protocol { - case "udp": - l.readMsgEventLoop() - case "tcp": - l.acceptEventLoop() - } -} - -func (l *listener) readMsgEventLoop() { - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("failed to read udp msg for %s\n, stack trace: \n", l.localAddr, debug.Stack()) - l.readMsgEventLoop() - } - }() - - l.readMsgLoop() - }() -} - -func (l *listener) readMsgLoop() { - conn := l.udpListener.(*net.UDPConn) - buf := iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - defer func(buf iobufferpool.IoBuffer) { - _ = iobufferpool.PutIoBuffer(buf) - }(buf) - - for { - buf.Reset() - n, rAddr, err := conn.ReadFromUDP(buf.Bytes()[:buf.Cap()]) - _ = buf.Grow(n) - - if err != nil { - if nerr, ok := err.(net.Error); ok && nerr.Timeout() { - logger.Infof("udp listener %s stop receiving packet by deadline", l.localAddr) - return - } - if ope, ok := err.(*net.OpError); ok { - if !(ope.Timeout() && ope.Temporary()) { - logger.Errorf("udp listener %s occurs non-recoverable error, stop listening and receiving", l.localAddr) - return - } - } - logger.Errorf("udp listener %s receiving packet occur error: %+v", l.localAddr, err) - continue - } - l.onUdpAccept(rAddr, conn, l.stopChan, buf.Clone()) - } -} - -func (l *listener) acceptEventLoop() { - - for { - if tconn, err := l.tcpListener.Accept(); err != nil { - if nerr, ok := err.(net.Error); ok && nerr.Timeout() { - logger.Infof("tcp listener(%s) stop accept connection due to timeout, err: %s", - l.localAddr, nerr) - return - } - - if ope, ok := err.(*net.OpError); ok { - // not timeout error and not temporary, which means the error is non-recoverable - if !(ope.Timeout() && ope.Temporary()) { - // accept error raised by sockets closing - if ope.Op == "accept" { - logger.Debugf("tcp listener(%s) stop accept connection due to listener closed", l.localAddr) - } else { - logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", - l.localAddr, err.Error()) - } - return - } - } else { - logger.Errorf("tcp listener(%s) stop accept connection with unknown error: %s.", - l.localAddr, err.Error()) - } - } else { - go l.onTcpAccept(tconn, l.stopChan) - } - } -} - -func (l *listener) setMaxConnection(maxConn uint32) { - l.tcpListener.SetMaxConnection(maxConn) -} - -func (l *listener) close() error { - l.mutex.Lock() - defer l.mutex.Unlock() - - var err error - switch l.protocol { - case "tcp": - if l.tcpListener != nil { - err = l.tcpListener.Close() - } - case "udp": - if l.udpListener != nil { - err = l.udpListener.Close() - } - } - close(l.stopChan) - return err -} diff --git a/pkg/object/layer4server/udpreceiver.go b/pkg/object/layer4server/udpreceiver.go deleted file mode 100644 index c54061b868..0000000000 --- a/pkg/object/layer4server/udpreceiver.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package layer4server - -import ( - "strings" - "sync" -) - -var ( - ProxyMap = sync.Map{} -) - -// GetProxyMapKey construct udp session key -func GetProxyMapKey(raddr, laddr string) string { - var builder strings.Builder - builder.WriteString(raddr) - builder.WriteString(":") - builder.WriteString(laddr) - return builder.String() -} - -// SetUDPProxyMap set udp session by udp server listener -func SetUDPProxyMap(key string, clientConn interface{}) { - ProxyMap.Store(key, clientConn) -} - -// DelUDPProxyMap delete udp session -func DelUDPProxyMap(key string) { - ProxyMap.Delete(key) -} diff --git a/pkg/object/layer4server/server.go b/pkg/object/tcpproxy/backendserver.go similarity index 99% rename from pkg/object/layer4server/server.go rename to pkg/object/tcpproxy/backendserver.go index 947a4d5342..c07d0250a0 100644 --- a/pkg/object/layer4server/server.go +++ b/pkg/object/tcpproxy/backendserver.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "fmt" diff --git a/pkg/object/layer4server/connection.go b/pkg/object/tcpproxy/connection.go similarity index 77% rename from pkg/object/layer4server/connection.go rename to pkg/object/tcpproxy/connection.go index 07727f29af..26e98cd65c 100644 --- a/pkg/object/layer4server/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "errors" @@ -23,7 +23,6 @@ import ( "net" "reflect" "runtime/debug" - "strings" "sync" "sync/atomic" "time" @@ -38,9 +37,8 @@ type Connection struct { connected uint32 closed uint32 - protocol string localAddr net.Addr - remoteAddr net.Addr // udp listener send response use remoteAddr + remoteAddr net.Addr lastBytesSizeRead int64 lastWriteSizeWrite int64 @@ -65,10 +63,10 @@ type Connection struct { // @param remoteAddr client addr for udp proxy use func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}) *Connection { clientConn := &Connection{ - rawConn: conn, - connected: 1, - protocol: conn.LocalAddr().Network(), - localAddr: conn.LocalAddr(), + connected: 1, + rawConn: conn, + localAddr: conn.LocalAddr(), + remoteAddr: conn.RemoteAddr(), readEnabled: true, readEnabledChan: make(chan bool, 1), @@ -87,11 +85,6 @@ func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan return clientConn } -// Protocol get connection protocol -func (c *Connection) Protocol() string { - return c.protocol -} - // LocalAddr get connection local addr func (c *Connection) LocalAddr() net.Addr { return c.localAddr @@ -126,10 +119,6 @@ func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { // Start running connection read/write loop func (c *Connection) Start() { - if c.protocol == "udp" && c.rawConn.RemoteAddr() == nil { - return // udp server connection no need to start read/write loop - } - c.startOnce.Do(func() { c.startRWLoop() }) @@ -192,8 +181,8 @@ func (c *Connection) startRWLoop() { func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { defer func() { if r := recover(); r != nil { - logger.Errorf("%s connection has closed, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), r) + logger.Errorf("tcp connection has closed, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), r) err = ErrConnectionHasClosed } }() @@ -228,7 +217,7 @@ func (c *Connection) startReadLoop() { err := c.doReadIO() if err != nil { if te, ok := err.(net.Error); ok && te.Timeout() { - if c.protocol == "tcp" && c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > iobufferpool.DefaultBufferReadCapacity { + if c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > iobufferpool.DefaultBufferReadCapacity { c.readBuffer.Free() c.readBuffer.Alloc(iobufferpool.DefaultBufferReadCapacity) } @@ -261,15 +250,6 @@ func (c *Connection) startReadLoop() { } } -func (c *Connection) setReadDeadline() { - switch c.protocol { - case "udp": - _ = c.rawConn.SetReadDeadline(time.Now().Add(1 * time.Second)) - case "tcp": - _ = c.rawConn.SetReadDeadline(time.Now().Add(15 * time.Second)) - } -} - func (c *Connection) startWriteLoop() { var err error for { @@ -294,26 +274,23 @@ func (c *Connection) startWriteLoop() { } } - c.setWriteDeadline() + _ = c.rawConn.SetWriteDeadline(time.Now().Add(15 * time.Second)) _, err = c.doWrite() } if err != nil { if err == iobufferpool.EOF { - logger.Debugf("%s connection local close with eof, local addr: %s, remote addr: %s", - c.protocol, c.localAddr.String(), c.remoteAddr.String()) + logger.Debugf("tcp connection local close with eof, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) _ = c.Close(NoFlush, LocalClose) } else { - logger.Errorf("%s connection error on write, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), err) + logger.Errorf("tcp connection error on write, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), err) } if te, ok := err.(net.Error); ok && te.Timeout() { _ = c.Close(NoFlush, OnWriteTimeout) } - if c.protocol == "udp" && strings.Contains(err.Error(), "connection refused") { - _ = c.Close(NoFlush, RemoteClose) - } //other write errs not close connection, because readbuffer may have unread data, wait for readloop close connection, return } @@ -336,7 +313,7 @@ func (c *Connection) appendBuffer(ioBuffers *[]iobufferpool.IoBuffer) { func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) { defer func() { if r := recover(); r != nil { - logger.Errorf("%s connection close panic, err: %+v\n%s", c.protocol, r, string(debug.Stack())) + logger.Errorf("tcp connection close panic, err: %+v\n%s", r, string(debug.Stack())) } }() @@ -361,18 +338,12 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) _ = tconn.CloseRead() } - if c.protocol == "udp" && c.rawConn.RemoteAddr() == nil { - key := GetProxyMapKey(c.localAddr.String(), c.remoteAddr.String()) - DelUDPProxyMap(key) - } - // close rawConn recv, then notify read/write loop to exit close(c.connStopChan) _ = c.rawConn.Close() c.lastBytesSizeRead, c.lastWriteSizeWrite = 0, 0 - logger.Debugf("%s connection closed, local addr: %s, remote addr: %s, event: %s", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), event) + logger.Debugf("tcp connection closed, local addr: %s, remote addr: %s, event: %s", c.localAddr.String(), c.remoteAddr.String(), event) if c.onClose != nil { c.onClose(event) @@ -394,17 +365,11 @@ func (c *Connection) SetReadDisable(disable bool) { func (c *Connection) doReadIO() (err error) { if c.readBuffer == nil { - switch c.protocol { - case "udp": - // A UDP socket will Read up to the size of the receiving buffer and will discard the rest - c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.UdpPacketMaxSize) - default: // unix or tcp - c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) - } + c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) } var bytesRead int64 - c.setReadDeadline() + _ = c.rawConn.SetReadDeadline(time.Now().Add(15 * time.Second)) bytesRead, err = c.readBuffer.ReadOnce(c.rawConn) if err != nil { @@ -458,25 +423,7 @@ func (c *Connection) writeBufLen() (bufLen int) { func (c *Connection) doWriteIO() (bytesSent int64, err error) { buffers := c.writeBuffers - switch c.protocol { - case "tcp": - bytesSent, err = buffers.WriteTo(c.rawConn) - case "udp": - n := 0 - bytesSent = 0 - addr := c.remoteAddr.(*net.UDPAddr) - for _, buf := range c.ioBuffers { - if c.rawConn.RemoteAddr() == nil { - n, err = c.rawConn.(*net.UDPConn).WriteToUDP(buf.Bytes(), addr) - } else { - n, err = c.rawConn.Write(buf.Bytes()) - } - if err != nil { - break - } - bytesSent += int64(n) - } - } + bytesSent, err = buffers.WriteTo(c.rawConn) if err != nil { return bytesSent, err @@ -488,8 +435,8 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { err = iobufferpool.EOF } if e := iobufferpool.PutIoBuffer(buf); e != nil { - logger.Errorf("%s connection give buffer error, local addr: %s, remote addr: %s, err: %+v", - c.protocol, c.localAddr.String(), c.remoteAddr.String(), err) + logger.Errorf("tcp connection give buffer error, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), err) } } c.ioBuffers = c.ioBuffers[:0] @@ -497,15 +444,6 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { return } -func (c *Connection) setWriteDeadline() { - switch c.protocol { - case "udp": - _ = c.rawConn.SetWriteDeadline(time.Now().Add(5 * time.Second)) - case "tcp": - _ = c.rawConn.SetWriteDeadline(time.Now().Add(15 * time.Second)) - } -} - // UpstreamConnection wrap connection to upstream type UpstreamConnection struct { Connection @@ -517,7 +455,6 @@ func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopC conn := &UpstreamConnection{ Connection: Connection{ connected: 1, - protocol: upstreamAddr.Network(), remoteAddr: upstreamAddr, readEnabled: true, @@ -542,7 +479,7 @@ func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { if addr == nil { return ConnectFailed, errors.New("upstream addr is nil") } - u.rawConn, err = net.DialTimeout(u.protocol, addr.String(), timeout) + u.rawConn, err = net.DialTimeout("tcp", addr.String(), timeout) if err != nil { if err == io.EOF { event = RemoteClose @@ -555,10 +492,9 @@ func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { } atomic.StoreUint32(&u.connected, 1) u.localAddr = u.rawConn.LocalAddr() - if u.protocol == "tcp" { - _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) - _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) - } + + _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) + _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) event = Connected return } @@ -570,7 +506,7 @@ func (u *UpstreamConnection) Connect() (err error) { if err == nil { u.Start() } - logger.Debugf("%s connect upstream(%s), event: %s, err: %+v", u.protocol, u.remoteAddr, event, err) + logger.Debugf("tcp connect upstream(%s), event: %s, err: %+v", u.remoteAddr, event, err) }) return } diff --git a/pkg/object/layer4server/constant.go b/pkg/object/tcpproxy/constant.go similarity index 98% rename from pkg/object/layer4server/constant.go rename to pkg/object/tcpproxy/constant.go index 7299bc659c..d74b9adae4 100644 --- a/pkg/object/layer4server/constant.go +++ b/pkg/object/tcpproxy/constant.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "errors" diff --git a/pkg/object/layer4server/ipfilters.go b/pkg/object/tcpproxy/ipfilters.go similarity index 98% rename from pkg/object/layer4server/ipfilters.go rename to pkg/object/tcpproxy/ipfilters.go index 47eb249971..3adc63460e 100644 --- a/pkg/object/layer4server/ipfilters.go +++ b/pkg/object/tcpproxy/ipfilters.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "reflect" diff --git a/pkg/object/tcpproxy/listener.go b/pkg/object/tcpproxy/listener.go new file mode 100644 index 0000000000..1116ae157c --- /dev/null +++ b/pkg/object/tcpproxy/listener.go @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tcpproxy + +import ( + "fmt" + "net" + "sync" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/limitlistener" +) + +type ListenerState int + +type listener struct { + name string + localAddr string // listen addr + state ListenerState // listener state + + mutex *sync.Mutex + stopChan chan struct{} + maxConns uint32 // maxConn for tcp listener + + tcpListener *limitlistener.LimitListener // tcp listener with accept limit + onAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle +} + +func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStop chan struct{})) *listener { + listen := &listener{ + name: spec.Name, + localAddr: fmt.Sprintf(":%d", spec.Port), + + mutex: &sync.Mutex{}, + maxConns: spec.MaxConnections, + stopChan: make(chan struct{}), + + onAccept: onAccept, + } + return listen +} + +func (l *listener) listen() error { + if tl, err := net.Listen("tcp", l.localAddr); err != nil { + return err + } else { + // wrap tcp listener with accept limit + l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConns) + } + return nil +} + +func (l *listener) acceptEventLoop() { + + for { + if tconn, err := l.tcpListener.Accept(); err != nil { + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + logger.Infof("tcp listener(%s) stop accept connection due to timeout, err: %s", + l.localAddr, nerr) + return + } + + if ope, ok := err.(*net.OpError); ok { + // not timeout error and not temporary, which means the error is non-recoverable + if !(ope.Timeout() && ope.Temporary()) { + // accept error raised by sockets closing + if ope.Op == "accept" { + logger.Debugf("tcp listener(%s) stop accept connection due to listener closed", l.localAddr) + } else { + logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", + l.localAddr, err.Error()) + } + return + } + } else { + logger.Errorf("tcp listener(%s) stop accept connection with unknown error: %s.", + l.localAddr, err.Error()) + } + } else { + go l.onAccept(tconn, l.stopChan) + } + } +} + +func (l *listener) setMaxConnection(maxConn uint32) { + l.tcpListener.SetMaxConnection(maxConn) +} + +func (l *listener) close() (err error) { + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.tcpListener != nil { + err = l.tcpListener.Close() + } + close(l.stopChan) + return err +} diff --git a/pkg/object/layer4server/pool.go b/pkg/object/tcpproxy/pool.go similarity index 98% rename from pkg/object/layer4server/pool.go rename to pkg/object/tcpproxy/pool.go index d171cf7364..adc6ec3354 100644 --- a/pkg/object/layer4server/pool.go +++ b/pkg/object/tcpproxy/pool.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "reflect" diff --git a/pkg/object/layer4server/runtime.go b/pkg/object/tcpproxy/runtime.go similarity index 79% rename from pkg/object/layer4server/runtime.go rename to pkg/object/tcpproxy/runtime.go index 1246b38952..1dc73f1df9 100644 --- a/pkg/object/layer4server/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "fmt" @@ -62,7 +62,7 @@ type ( pool *pool // backend servers pool ipFilters *ipFilters // ip filters - listener *listener // layer4 listener + listener *listener // tcp listener startNum uint64 eventChan chan interface{} // receive traffic controller event @@ -184,7 +184,7 @@ func (r *runtime) needRestartServer(nextSpec *Spec) bool { x := *r.spec y := *nextSpec - // The change of options below need not restart the layer4 server. + // The change of options below need not restart the tcp server. x.MaxConnections, y.MaxConnections = 0, 0 x.ConnectTimeout, y.ConnectTimeout = 0, 0 @@ -196,7 +196,7 @@ func (r *runtime) needRestartServer(nextSpec *Spec) bool { } func (r *runtime) startServer() { - l := newListener(r.spec, r.onTcpAccept(), r.onUdpAccept()) + l := newListener(r.spec, r.onAccept()) r.listener = l r.startNum++ @@ -206,7 +206,7 @@ func (r *runtime) startServer() { if err := l.listen(); err != nil { r.setState(stateFailed) r.setError(err) - logger.Errorf("listen for %s %s failed, err: %+v", l.protocol, l.localAddr, err) + logger.Errorf("tcp listener for %s failed, err: %+v", l.localAddr, err) _ = l.close() r.eventChan <- &eventServeFailed{ @@ -216,7 +216,7 @@ func (r *runtime) startServer() { return } - go r.listener.startEventLoop() + go r.listener.acceptEventLoop() } func (r *runtime) closeServer() { @@ -225,7 +225,7 @@ func (r *runtime) closeServer() { } _ = r.listener.close() - logger.Infof("listener for %s(%s) closed", r.listener.protocol, r.listener.localAddr) + logger.Infof("listener for %s(%s) closed", r.listener.localAddr) } func (r *runtime) checkFailed() { @@ -265,7 +265,7 @@ func (r *runtime) handleEventClose(e *eventClose) { close(e.done) } -func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) { +func (r *runtime) onAccept() func(conn net.Conn, listenerStop chan struct{}) { return func(rawConn net.Conn, listenerStop chan struct{}) { downstream := rawConn.RemoteAddr().(*net.TCPAddr).IP.String() @@ -299,48 +299,6 @@ func (r *runtime) onTcpAccept() func(conn net.Conn, listenerStop chan struct{}) } } -func (r *runtime) onUdpAccept() func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { - return func(cliAddr net.Addr, rawConn net.Conn, listenerStop chan struct{}, packet iobufferpool.IoBuffer) { - downstream := cliAddr.(*net.UDPAddr).IP.String() - if r.ipFilters != nil && !r.ipFilters.AllowIP(downstream) { - logger.Infof("discard udp packet from %s to %s which ip is not allowed", cliAddr.String(), - rawConn.LocalAddr().String()) - return - } - - localAddr := rawConn.LocalAddr() - key := GetProxyMapKey(localAddr.String(), cliAddr.String()) - if rawDc, ok := ProxyMap.Load(key); ok { - dc := rawDc.(*Connection) - dc.OnRead(packet) - return - } - - server, err := r.pool.next(downstream) - if err != nil { - logger.Infof("discard udp packet from %s to %s due to can not find upstream server, err: %+v", - cliAddr.String(), localAddr.String()) - return - } - - upstreamAddr, _ := net.ResolveUDPAddr("udp", server.Addr) - uc := NewUpstreamConn(r.spec.ConnectTimeout, upstreamAddr, listenerStop) - if err := uc.Connect(); err != nil { - logger.Errorf("discard udp packet due to upstream connect failed, local addr: %s, err: %+v", localAddr, err) - return - } - - fd, _ := rawConn.(*net.UDPConn).File() - downstreamRawConn, _ := net.FilePacketConn(fd) - dc := NewDownstreamConn(downstreamRawConn.(*net.UDPConn), rawConn.RemoteAddr(), listenerStop) - SetUDPProxyMap(GetProxyMapKey(localAddr.String(), cliAddr.String()), &dc) - r.setCallbacks(dc, uc) - - dc.Start() - dc.OnRead(packet) - } -} - func (r *runtime) setCallbacks(downstreamConn *Connection, upstreamConn *UpstreamConnection) { downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { if readBuf != nil && readBuf.Len() > 0 { diff --git a/pkg/object/layer4server/spec.go b/pkg/object/tcpproxy/spec.go similarity index 91% rename from pkg/object/layer4server/spec.go rename to pkg/object/tcpproxy/spec.go index 94538a85f9..03f15e3afe 100644 --- a/pkg/object/layer4server/spec.go +++ b/pkg/object/tcpproxy/spec.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "fmt" @@ -26,9 +26,8 @@ import ( type ( // Spec describes the Layer4 Server. Spec struct { - Name string `yaml:"name" json:"name" jsonschema:"required"` - Protocol string `yaml:"protocol" jsonschema:"required,enum=tcp,enum=udp"` - Port uint16 `yaml:"port" json:"port" jsonschema:"required"` + Name string `yaml:"name" json:"name" jsonschema:"required"` + Port uint16 `yaml:"port" json:"port" jsonschema:"required"` // tcp stream config params MaxConnections uint32 `yaml:"maxConns" jsonschema:"omitempty,minimum=1"` diff --git a/pkg/object/layer4server/layer4server.go b/pkg/object/tcpproxy/tcpserver.go similarity index 53% rename from pkg/object/layer4server/layer4server.go rename to pkg/object/tcpproxy/tcpserver.go index 04c2c9bbe7..efa9899087 100644 --- a/pkg/object/layer4server/layer4server.go +++ b/pkg/object/tcpproxy/tcpserver.go @@ -15,56 +15,56 @@ * limitations under the License. */ -package layer4server +package tcpproxy import ( "github.com/megaease/easegress/pkg/supervisor" ) const ( - // Category is the category of Layer4Server. + // Category is the category of TcpServer. Category = supervisor.CategoryBusinessController - // Kind is the kind of Layer4Server. - Kind = "Layer4Server" + // Kind is the kind of TcpServer. + Kind = "TcpServer" ) func init() { - supervisor.Register(&Layer4Server{}) + supervisor.Register(&TcpServer{}) } type ( - // Layer4Server is Object of tpc/udp server. - Layer4Server struct { + // TcpServer is Object of tcp server. + TcpServer struct { runtime *runtime } ) -// Category returns the category of Layer4Server. -func (l4 *Layer4Server) Category() supervisor.ObjectCategory { +// Category returns the category of TcpServer. +func (l4 *TcpServer) Category() supervisor.ObjectCategory { return Category } -// Kind returns the kind of Layer4Server. -func (l4 *Layer4Server) Kind() string { +// Kind returns the kind of TcpServer. +func (l4 *TcpServer) Kind() string { return Kind } -// DefaultSpec returns the default spec of Layer4Server. -func (l4 *Layer4Server) DefaultSpec() interface{} { +// DefaultSpec returns the default spec of TcpServer. +func (l4 *TcpServer) DefaultSpec() interface{} { return &Spec{ MaxConnections: 1024, ConnectTimeout: 5 * 1000, } } -// Validate validates the layer4 server structure. -func (l4 *Layer4Server) Validate() error { +// Validate validates the tcp server structure. +func (l4 *TcpServer) Validate() error { return nil } -// Init initializes Layer4Server. -func (l4 *Layer4Server) Init(superSpec *supervisor.Spec) { +// Init initializes TcpServer. +func (l4 *TcpServer) Init(superSpec *supervisor.Spec) { l4.runtime = newRuntime(superSpec) l4.runtime.eventChan <- &eventReload{ @@ -72,21 +72,21 @@ func (l4 *Layer4Server) Init(superSpec *supervisor.Spec) { } } -// Inherit inherits previous generation of Layer4Server. -func (l4 *Layer4Server) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { +// Inherit inherits previous generation of TcpServer. +func (l4 *TcpServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { - l4.runtime = previousGeneration.(*Layer4Server).runtime + l4.runtime = previousGeneration.(*TcpServer).runtime l4.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, } } // Status is the wrapper of runtimes Status. -func (l4 *Layer4Server) Status() *supervisor.Status { +func (l4 *TcpServer) Status() *supervisor.Status { return &supervisor.Status{} } -// Close closes Layer4Server. -func (l4 *Layer4Server) Close() { +// Close closes TcpServer. +func (l4 *TcpServer) Close() { l4.runtime.Close() } diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index 5eb0ab2288..a0cbe97b39 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -45,10 +45,10 @@ import ( _ "github.com/megaease/easegress/pkg/object/httppipeline" _ "github.com/megaease/easegress/pkg/object/httpserver" _ "github.com/megaease/easegress/pkg/object/ingresscontroller" - _ "github.com/megaease/easegress/pkg/object/layer4server" _ "github.com/megaease/easegress/pkg/object/meshcontroller" _ "github.com/megaease/easegress/pkg/object/nacosserviceregistry" _ "github.com/megaease/easegress/pkg/object/rawconfigtrafficcontroller" + _ "github.com/megaease/easegress/pkg/object/tcpproxy" _ "github.com/megaease/easegress/pkg/object/trafficcontroller" _ "github.com/megaease/easegress/pkg/object/websocketserver" _ "github.com/megaease/easegress/pkg/object/zookeeperserviceregistry" From af4dc0e2f53e10b1bb9dd67afb4498162b57b579 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 19 Oct 2021 09:49:14 +0800 Subject: [PATCH 35/99] [tcpproxy] remove read enable & fix log --- pkg/object/tcpproxy/connection.go | 75 +++++++++---------------------- pkg/object/tcpproxy/listener.go | 6 +-- pkg/object/tcpproxy/runtime.go | 2 +- 3 files changed, 26 insertions(+), 57 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 26e98cd65c..43988461fc 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -42,8 +42,6 @@ type Connection struct { lastBytesSizeRead int64 lastWriteSizeWrite int64 - readEnabled bool - readEnabledChan chan bool // if we need to reload read filters, it's better to stop read data before reload filters readBuffer iobufferpool.IoBuffer writeBuffers net.Buffers @@ -68,8 +66,6 @@ func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan localAddr: conn.LocalAddr(), remoteAddr: conn.RemoteAddr(), - readEnabled: true, - readEnabledChan: make(chan bool, 1), writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), mu: sync.Mutex{}, @@ -95,11 +91,6 @@ func (c *Connection) RemoteAddr() net.Addr { return c.rawConn.RemoteAddr() } -// ReadEnabled get connection read enable status -func (c *Connection) ReadEnabled() bool { - return c.readEnabled -} - // SetOnRead set connection read handle func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { c.onRead = onRead @@ -211,40 +202,32 @@ func (c *Connection) startReadLoop() { return case <-c.listenerStopChan: return - case <-c.readEnabledChan: default: - if c.readEnabled { - err := c.doReadIO() - if err != nil { - if te, ok := err.(net.Error); ok && te.Timeout() { - if c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > iobufferpool.DefaultBufferReadCapacity { - c.readBuffer.Free() - c.readBuffer.Alloc(iobufferpool.DefaultBufferReadCapacity) - } - continue - } - - // normal close or health check - if c.lastBytesSizeRead == 0 || err == io.EOF { - logger.Infof("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - } else { - logger.Errorf("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) + err := c.doReadIO() + if err != nil { + if te, ok := err.(net.Error); ok && te.Timeout() { + if c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > iobufferpool.DefaultBufferReadCapacity { + c.readBuffer.Free() + c.readBuffer.Alloc(iobufferpool.DefaultBufferReadCapacity) } + continue + } - if err == io.EOF { - _ = c.Close(NoFlush, RemoteClose) - } else { - _ = c.Close(NoFlush, OnReadErrClose) - } - return + // normal close or health check + if c.lastBytesSizeRead == 0 || err == io.EOF { + logger.Infof("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + } else { + logger.Errorf("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) } - } else { - select { - case <-c.readEnabledChan: - case <-time.After(100 * time.Millisecond): + + if err == io.EOF { + _ = c.Close(NoFlush, RemoteClose) + } else { + _ = c.Close(NoFlush, OnReadErrClose) } + return } } } @@ -351,18 +334,6 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) return nil } -func (c *Connection) SetReadDisable(disable bool) { - if disable { - if c.readEnabled { - c.readEnabled = false - } - } else { - c.readEnabled = true - // only on read disable status, we need to trigger chan to wake read loop up - c.readEnabledChan <- true - } -} - func (c *Connection) doReadIO() (err error) { if c.readBuffer == nil { c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) @@ -391,7 +362,7 @@ func (c *Connection) doReadIO() (err error) { c.localAddr.String(), c.remoteAddr.String()) } - if !c.readEnabled || c.readBuffer.Len() == 0 { + if c.readBuffer.Len() == 0 { return } @@ -457,8 +428,6 @@ func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopC connected: 1, remoteAddr: upstreamAddr, - readEnabled: true, - readEnabledChan: make(chan bool, 1), writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), mu: sync.Mutex{}, diff --git a/pkg/object/tcpproxy/listener.go b/pkg/object/tcpproxy/listener.go index 1116ae157c..0f373af495 100644 --- a/pkg/object/tcpproxy/listener.go +++ b/pkg/object/tcpproxy/listener.go @@ -46,11 +46,11 @@ func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStop chan stru name: spec.Name, localAddr: fmt.Sprintf(":%d", spec.Port), - mutex: &sync.Mutex{}, + onAccept: onAccept, maxConns: spec.MaxConnections, - stopChan: make(chan struct{}), - onAccept: onAccept, + mutex: &sync.Mutex{}, + stopChan: make(chan struct{}), } return listen } diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 1dc73f1df9..9da9445b32 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -225,7 +225,7 @@ func (r *runtime) closeServer() { } _ = r.listener.close() - logger.Infof("listener for %s(%s) closed", r.listener.localAddr) + logger.Infof("listener for %s(%s) closed", r.listener.name, r.listener.localAddr) } func (r *runtime) checkFailed() { From 5f5147febcf3b9b28f69c217330654ad2649fae7 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 19 Oct 2021 10:59:54 +0800 Subject: [PATCH 36/99] [tcpproxy] extract connection goWithRecover method --- pkg/object/tcpproxy/connection.go | 56 ++++++++++++++----------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 43988461fc..89e8613853 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -126,46 +126,40 @@ func (c *Connection) State() ConnState { return ConnInit } -func (c *Connection) startRWLoop() { +// GoWithRecover wraps a `go func()` with recover() +func (c *Connection) goWithRecover(handler func(), recoverHandler func(r interface{})) { go func() { defer func() { if r := recover(); r != nil { - logger.Errorf("tcp connection read loop crashed, local addr: %s, remote addr: %s, err: %+v", - c.localAddr.String(), c.remoteAddr.String(), r) - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("tcp connection close failed, local addr: %s, remote addr: %s, err: %+v", - c.localAddr.String(), c.remoteAddr.String(), r) - } + logger.Errorf("tcp connection goroutine panic: %v\n%s\n", r, string(debug.Stack())) + if recoverHandler != nil { + go func() { + defer func() { + if p := recover(); p != nil { + logger.Errorf("tcp connection goroutine panic: %v\n%s\n", p, string(debug.Stack())) + } + }() + recoverHandler(r) }() - _ = c.Close(NoFlush, LocalClose) - }() + } } }() - c.startReadLoop() + handler() }() +} - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("tcp connection write loop crashed, local addr: %s, remote addr: %s, err: %+v", - c.localAddr.String(), c.remoteAddr.String(), r) - - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("tcp connection close failed, local addr: %s, remote addr: %s, err: %+v", - c.localAddr.String(), c.remoteAddr.String(), r) - } - }() - _ = c.Close(NoFlush, LocalClose) - }() - } - }() +func (c *Connection) startRWLoop() { + c.goWithRecover(func() { + c.startReadLoop() + }, func(r interface{}) { + _ = c.Close(NoFlush, LocalClose) + }) + + c.goWithRecover(func() { c.startWriteLoop() - }() + }, func(r interface{}) { + _ = c.Close(NoFlush, LocalClose) + }) } // Write receive other connection data From bfbaa4a6395ab4d527096af0a3382cb63711b57f Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 19 Oct 2021 11:27:19 +0800 Subject: [PATCH 37/99] [tcpproxy] fix code warning --- pkg/util/iobufferpool/buffer.go | 1 + pkg/util/iobufferpool/bytebuffer_pool.go | 4 ++++ pkg/util/iobufferpool/iobuffer_pool.go | 3 ++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/util/iobufferpool/buffer.go b/pkg/util/iobufferpool/buffer.go index 5f6c52631a..c01131a2a6 100644 --- a/pkg/util/iobufferpool/buffer.go +++ b/pkg/util/iobufferpool/buffer.go @@ -19,6 +19,7 @@ package iobufferpool import "io" +// IoBuffer io buffer for stream proxy type IoBuffer interface { // Read reads the next len(p) bytes from the buffer or until the buffer // is drained. The return value n is the number of bytes read. If the diff --git a/pkg/util/iobufferpool/bytebuffer_pool.go b/pkg/util/iobufferpool/bytebuffer_pool.go index c158053382..d337aeccbe 100644 --- a/pkg/util/iobufferpool/bytebuffer_pool.go +++ b/pkg/util/iobufferpool/bytebuffer_pool.go @@ -118,17 +118,20 @@ func (p *byteBufferPool) give(buf *[]byte) { p.pool[slot].pool.Put(buf) } +// ByteBufferPoolContainer byte buffer pool container type ByteBufferPoolContainer struct { bytes []*[]byte *byteBufferPool } +// NewByteBufferPoolContainer construct byte buffer pool container func NewByteBufferPoolContainer() *ByteBufferPoolContainer { return &ByteBufferPoolContainer{ byteBufferPool: bbPool, } } +// Reset clean byte buffer pool container resource func (c *ByteBufferPoolContainer) Reset() { for _, buf := range c.bytes { c.give(buf) @@ -136,6 +139,7 @@ func (c *ByteBufferPoolContainer) Reset() { c.bytes = c.bytes[:0] } +// Take append *[]byte with fixed size from byteBufferPool func (c *ByteBufferPoolContainer) Take(size int) *[]byte { buf := c.take(size) c.bytes = append(c.bytes, buf) diff --git a/pkg/util/iobufferpool/iobuffer_pool.go b/pkg/util/iobufferpool/iobuffer_pool.go index 01318bcb07..bacf333f9e 100644 --- a/pkg/util/iobufferpool/iobuffer_pool.go +++ b/pkg/util/iobufferpool/iobuffer_pool.go @@ -23,7 +23,8 @@ import ( ) const ( - UdpPacketMaxSize = 64 * 1024 + // UDPPacketMaxSize udp packet max size + UDPPacketMaxSize = 64 * 1024 DefaultBufferReadCapacity = 1 << 7 ) From b07d48e139bc51dd7070deb22766514fdef5a9d2 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 19 Oct 2021 14:26:37 +0800 Subject: [PATCH 38/99] [tcpproxy] fix code warning +1 --- pkg/object/tcpproxy/connection.go | 7 +++++ pkg/object/tcpproxy/constant.go | 19 +++++------- pkg/object/tcpproxy/ipfilters.go | 2 +- pkg/object/tcpproxy/runtime.go | 2 +- pkg/object/tcpproxy/tcpserver.go | 42 +++++++++++++------------- pkg/util/iobufferpool/iobuffer.go | 12 +++++--- pkg/util/iobufferpool/iobuffer_pool.go | 3 +- 7 files changed, 45 insertions(+), 42 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 89e8613853..75f2e24383 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -32,6 +32,7 @@ import ( "github.com/megaease/easegress/pkg/util/timerpool" ) +// Connection wrap tcp connection type Connection struct { rawConn net.Conn connected uint32 @@ -96,14 +97,17 @@ func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { c.onRead = onRead } +// OnRead set data read callback func (c *Connection) OnRead(buffer iobufferpool.IoBuffer) { c.onRead(buffer) } +// SetOnClose set close callback func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { c.onClose = onclose } +// GetReadBuffer get connection red buffer func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { return c.readBuffer } @@ -287,6 +291,7 @@ func (c *Connection) appendBuffer(ioBuffers *[]iobufferpool.IoBuffer) { } } +// Close connection close function func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) { defer func() { if r := recover(); r != nil { @@ -416,6 +421,7 @@ type UpstreamConnection struct { connectOnce sync.Once } +// NewUpstreamConn construct tcp upstream connection func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopChan chan struct{}) *UpstreamConnection { conn := &UpstreamConnection{ Connection: Connection{ @@ -462,6 +468,7 @@ func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { return } +// Connect tcp upstream connect to backend server func (u *UpstreamConnection) Connect() (err error) { u.connectOnce.Do(func() { var event ConnectionEvent diff --git a/pkg/object/tcpproxy/constant.go b/pkg/object/tcpproxy/constant.go index d74b9adae4..49885a7d61 100644 --- a/pkg/object/tcpproxy/constant.go +++ b/pkg/object/tcpproxy/constant.go @@ -35,23 +35,18 @@ const ( // ConnectionEvent type type ConnectionEvent string -// ConnectionEvent types const ( - RemoteClose ConnectionEvent = "RemoteClose" - LocalClose ConnectionEvent = "LocalClose" - OnReadErrClose ConnectionEvent = "OnReadErrClose" - OnWriteErrClose ConnectionEvent = "OnWriteErrClose" - OnConnect ConnectionEvent = "OnConnect" - Connected ConnectionEvent = "ConnectedFlag" - ConnectTimeout ConnectionEvent = "ConnectTimeout" - ConnectFailed ConnectionEvent = "ConnectFailed" - OnReadTimeout ConnectionEvent = "OnReadTimeout" - OnWriteTimeout ConnectionEvent = "OnWriteTimeout" + RemoteClose ConnectionEvent = "RemoteClose" + LocalClose ConnectionEvent = "LocalClose" + OnReadErrClose ConnectionEvent = "OnReadErrClose" + Connected ConnectionEvent = "ConnectedFlag" + ConnectTimeout ConnectionEvent = "ConnectTimeout" + ConnectFailed ConnectionEvent = "ConnectFailed" + OnWriteTimeout ConnectionEvent = "OnWriteTimeout" ) var ( ErrConnectionHasClosed = errors.New("connection has closed") - ErrWriteTryLockTimeout = errors.New("write trylock has timeout") ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") ) diff --git a/pkg/object/tcpproxy/ipfilters.go b/pkg/object/tcpproxy/ipfilters.go index 3adc63460e..e219fb3154 100644 --- a/pkg/object/tcpproxy/ipfilters.go +++ b/pkg/object/tcpproxy/ipfilters.go @@ -35,7 +35,7 @@ type ( } ) -func newIpFilters(spec *ipfilter.Spec) *ipFilters { +func newIPFilters(spec *ipfilter.Spec) *ipFilters { m := &ipFilters{} m.rules.Store(&ipFiltersRules{ diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 9da9445b32..8ce470953f 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -78,7 +78,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { superSpec: superSpec, pool: newPool(superSpec.Super(), spec.Pool, ""), - ipFilters: newIpFilters(spec.IPFilter), + ipFilters: newIPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), } diff --git a/pkg/object/tcpproxy/tcpserver.go b/pkg/object/tcpproxy/tcpserver.go index efa9899087..fe8052b8c1 100644 --- a/pkg/object/tcpproxy/tcpserver.go +++ b/pkg/object/tcpproxy/tcpserver.go @@ -22,36 +22,36 @@ import ( ) const ( - // Category is the category of TcpServer. + // Category is the category of TCPServer. Category = supervisor.CategoryBusinessController - // Kind is the kind of TcpServer. - Kind = "TcpServer" + // Kind is the kind of TCPServer. + Kind = "TCPServer" ) func init() { - supervisor.Register(&TcpServer{}) + supervisor.Register(&TCPServer{}) } type ( - // TcpServer is Object of tcp server. - TcpServer struct { + // TCPServer is Object of tcp server. + TCPServer struct { runtime *runtime } ) -// Category returns the category of TcpServer. -func (l4 *TcpServer) Category() supervisor.ObjectCategory { +// Category returns the category of TCPServer. +func (l4 *TCPServer) Category() supervisor.ObjectCategory { return Category } -// Kind returns the kind of TcpServer. -func (l4 *TcpServer) Kind() string { +// Kind returns the kind of TCPServer. +func (l4 *TCPServer) Kind() string { return Kind } -// DefaultSpec returns the default spec of TcpServer. -func (l4 *TcpServer) DefaultSpec() interface{} { +// DefaultSpec returns the default spec of TCPServer. +func (l4 *TCPServer) DefaultSpec() interface{} { return &Spec{ MaxConnections: 1024, ConnectTimeout: 5 * 1000, @@ -59,12 +59,12 @@ func (l4 *TcpServer) DefaultSpec() interface{} { } // Validate validates the tcp server structure. -func (l4 *TcpServer) Validate() error { +func (l4 *TCPServer) Validate() error { return nil } -// Init initializes TcpServer. -func (l4 *TcpServer) Init(superSpec *supervisor.Spec) { +// Init initializes TCPServer. +func (l4 *TCPServer) Init(superSpec *supervisor.Spec) { l4.runtime = newRuntime(superSpec) l4.runtime.eventChan <- &eventReload{ @@ -72,21 +72,21 @@ func (l4 *TcpServer) Init(superSpec *supervisor.Spec) { } } -// Inherit inherits previous generation of TcpServer. -func (l4 *TcpServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { +// Inherit inherits previous generation of TCPServer. +func (l4 *TCPServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { - l4.runtime = previousGeneration.(*TcpServer).runtime + l4.runtime = previousGeneration.(*TCPServer).runtime l4.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, } } // Status is the wrapper of runtimes Status. -func (l4 *TcpServer) Status() *supervisor.Status { +func (l4 *TCPServer) Status() *supervisor.Status { return &supervisor.Status{} } -// Close closes TcpServer. -func (l4 *TcpServer) Close() { +// Close closes TCPServer. +func (l4 *TCPServer) Close() { l4.runtime.Close() } diff --git a/pkg/util/iobufferpool/iobuffer.go b/pkg/util/iobufferpool/iobuffer.go index f02f00ea3a..5c1bcd88c9 100644 --- a/pkg/util/iobufferpool/iobuffer.go +++ b/pkg/util/iobufferpool/iobuffer.go @@ -23,7 +23,6 @@ import ( "io" "sync" "sync/atomic" - "time" ) const ( @@ -39,11 +38,10 @@ const ( var nullByte []byte var ( - EOF = errors.New("EOF") - ErrTooLarge = errors.New("io buffer: too large") - ErrNegativeCount = errors.New("io buffer: negative count") + // EOF io buffer eof sign + EOF = errors.New("EOF") + // ErrInvalidWriteCount io buffer: invalid write count ErrInvalidWriteCount = errors.New("io buffer: invalid write count") - ConnReadTimeout = 15 * time.Second ) type pipe struct { @@ -117,6 +115,7 @@ func (p *pipe) CloseWithError(err error) { defer p.c.Signal() } +// NewPipeBuffer create pipe buffer with fixed capacity func NewPipeBuffer(capacity int) IoBuffer { return &pipe{ IoBuffer: newIoBuffer(capacity), @@ -147,6 +146,7 @@ func newIoBuffer(capacity int) IoBuffer { return buffer } +// NewIoBufferString new io buffer with string func NewIoBufferString(s string) IoBuffer { if s == "" { return newIoBuffer(0) @@ -158,6 +158,7 @@ func NewIoBufferString(s string) IoBuffer { } } +// NewIoBufferBytes new io buffer with bytes array func NewIoBufferBytes(bytes []byte) IoBuffer { if bytes == nil { return NewIoBuffer(0) @@ -169,6 +170,7 @@ func NewIoBufferBytes(bytes []byte) IoBuffer { } } +// NewIoBufferEOF new io buffer with eof sign func NewIoBufferEOF() IoBuffer { buf := newIoBuffer(0) buf.SetEOF(true) diff --git a/pkg/util/iobufferpool/iobuffer_pool.go b/pkg/util/iobufferpool/iobuffer_pool.go index bacf333f9e..bb24e47698 100644 --- a/pkg/util/iobufferpool/iobuffer_pool.go +++ b/pkg/util/iobufferpool/iobuffer_pool.go @@ -23,8 +23,7 @@ import ( ) const ( - // UDPPacketMaxSize udp packet max size - UDPPacketMaxSize = 64 * 1024 + // DefaultBufferReadCapacity default buffer capacity for stream proxy such as tcp DefaultBufferReadCapacity = 1 << 7 ) From 23841b045db55c13cae666ac2d187c44e8a904af Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 19 Oct 2021 14:38:06 +0800 Subject: [PATCH 39/99] [tcpproxy] change err EOF variable to ErrEOF --- pkg/object/tcpproxy/connection.go | 4 ++-- pkg/object/tcpproxy/listener.go | 9 +++++---- pkg/util/iobufferpool/buffer.go | 4 ++-- pkg/util/iobufferpool/iobuffer.go | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 75f2e24383..713b7a636c 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -260,7 +260,7 @@ func (c *Connection) startWriteLoop() { } if err != nil { - if err == iobufferpool.EOF { + if err == iobufferpool.ErrEOF { logger.Debugf("tcp connection local close with eof, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) _ = c.Close(NoFlush, LocalClose) @@ -402,7 +402,7 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { c.ioBuffers[i] = nil c.writeBuffers[i] = nil if buf.EOF() { - err = iobufferpool.EOF + err = iobufferpool.ErrEOF } if e := iobufferpool.PutIoBuffer(buf); e != nil { logger.Errorf("tcp connection give buffer error, local addr: %s, remote addr: %s, err: %+v", diff --git a/pkg/object/tcpproxy/listener.go b/pkg/object/tcpproxy/listener.go index 0f373af495..e6ef9bd5ec 100644 --- a/pkg/object/tcpproxy/listener.go +++ b/pkg/object/tcpproxy/listener.go @@ -26,6 +26,7 @@ import ( "github.com/megaease/easegress/pkg/util/limitlistener" ) +// ListenerState listener running state type ListenerState int type listener struct { @@ -56,12 +57,12 @@ func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStop chan stru } func (l *listener) listen() error { - if tl, err := net.Listen("tcp", l.localAddr); err != nil { + tl, err := net.Listen("tcp", l.localAddr) + if err != nil { return err - } else { - // wrap tcp listener with accept limit - l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConns) } + // wrap tcp listener with accept limit + l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConns) return nil } diff --git a/pkg/util/iobufferpool/buffer.go b/pkg/util/iobufferpool/buffer.go index c01131a2a6..7092411526 100644 --- a/pkg/util/iobufferpool/buffer.go +++ b/pkg/util/iobufferpool/buffer.go @@ -33,7 +33,7 @@ type IoBuffer interface { // buffer becomes too large, ReadFrom will panic with ErrTooLarge. ReadOnce(r io.Reader) (n int64, err error) - // ReadFrom reads data from r until EOF and appends it to the buffer, growing + // ReadFrom reads data from r until ErrEOF and appends it to the buffer, growing // the buffer as needed. The return value n is the number of bytes read. Any // error except io.EOF encountered during the read is also returned. If the // buffer becomes too large, ReadFrom will panic with ErrTooLarge. @@ -126,7 +126,7 @@ type IoBuffer interface { // EOF returns whether Io is EOF on the connection EOF() bool - //SetEOF sets the IoBuffer EOF + //SetEOF sets the IoBuffer ErrEOF SetEOF(eof bool) Append(data []byte) error diff --git a/pkg/util/iobufferpool/iobuffer.go b/pkg/util/iobufferpool/iobuffer.go index 5c1bcd88c9..91656653bc 100644 --- a/pkg/util/iobufferpool/iobuffer.go +++ b/pkg/util/iobufferpool/iobuffer.go @@ -38,8 +38,8 @@ const ( var nullByte []byte var ( - // EOF io buffer eof sign - EOF = errors.New("EOF") + // ErrEOF io buffer eof sign + ErrEOF = errors.New("EOF") // ErrInvalidWriteCount io buffer: invalid write count ErrInvalidWriteCount = errors.New("io buffer: invalid write count") ) From d13352803a71c02a6b8a352d42a4b6500f79f171 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 20 Oct 2021 13:58:46 +0800 Subject: [PATCH 40/99] [tcpproxy] fix revive warning --- pkg/object/tcpproxy/constant.go | 19 ++++++++++++++----- pkg/util/iobufferpool/iobuffer.go | 1 + 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/pkg/object/tcpproxy/constant.go b/pkg/object/tcpproxy/constant.go index 49885a7d61..07a6a2daaf 100644 --- a/pkg/object/tcpproxy/constant.go +++ b/pkg/object/tcpproxy/constant.go @@ -36,17 +36,26 @@ const ( type ConnectionEvent string const ( - RemoteClose ConnectionEvent = "RemoteClose" - LocalClose ConnectionEvent = "LocalClose" + // RemoteClose connection closed by remote + RemoteClose ConnectionEvent = "RemoteClose" + // LocalClose connection closed by local + LocalClose ConnectionEvent = "LocalClose" + // OnReadErrClose connection closed by read error OnReadErrClose ConnectionEvent = "OnReadErrClose" - Connected ConnectionEvent = "ConnectedFlag" + // Connected connection has been connected + Connected ConnectionEvent = "ConnectedFlag" + // ConnectTimeout connect to remote failed due to timeout ConnectTimeout ConnectionEvent = "ConnectTimeout" - ConnectFailed ConnectionEvent = "ConnectFailed" + // ConnectFailed connect to remote failed + ConnectFailed ConnectionEvent = "ConnectFailed" + // OnWriteTimeout write data failed due to timeout OnWriteTimeout ConnectionEvent = "OnWriteTimeout" ) var ( - ErrConnectionHasClosed = errors.New("connection has closed") + // ErrConnectionHasClosed connection has been closed + ErrConnectionHasClosed = errors.New("connection has closed") + // ErrWriteBufferChanTimeout writeBufferChan has timeout ErrWriteBufferChanTimeout = errors.New("writeBufferChan has timeout") ) diff --git a/pkg/util/iobufferpool/iobuffer.go b/pkg/util/iobufferpool/iobuffer.go index 91656653bc..5b3504d73d 100644 --- a/pkg/util/iobufferpool/iobuffer.go +++ b/pkg/util/iobufferpool/iobuffer.go @@ -26,6 +26,7 @@ import ( ) const ( + // AutoExpand auto expand io buffer AutoExpand = -1 MinRead = 1 << 9 MaxRead = 1 << 17 From b04282efb1b99332770427f605d7f1dac38f6460 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 21 Oct 2021 15:32:32 +0800 Subject: [PATCH 41/99] [tcpproxy] finish first version of udp proxy --- pkg/object/udpproxy/backendserver.go | 279 +++++++++++++++++++ pkg/object/udpproxy/ipfilters.go | 86 ++++++ pkg/object/udpproxy/pool.go | 77 ++++++ pkg/object/udpproxy/runtime.go | 364 +++++++++++++++++++++++++ pkg/object/udpproxy/session.go | 173 ++++++++++++ pkg/object/udpproxy/spec.go | 67 +++++ pkg/object/udpproxy/udpserver.go | 115 ++++++++ pkg/util/iobufferpool/iobuffer_pool.go | 2 + 8 files changed, 1163 insertions(+) create mode 100644 pkg/object/udpproxy/backendserver.go create mode 100644 pkg/object/udpproxy/ipfilters.go create mode 100644 pkg/object/udpproxy/pool.go create mode 100644 pkg/object/udpproxy/runtime.go create mode 100644 pkg/object/udpproxy/session.go create mode 100644 pkg/object/udpproxy/spec.go create mode 100644 pkg/object/udpproxy/udpserver.go diff --git a/pkg/object/udpproxy/backendserver.go b/pkg/object/udpproxy/backendserver.go new file mode 100644 index 0000000000..d6b99aefc2 --- /dev/null +++ b/pkg/object/udpproxy/backendserver.go @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package udpproxy + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/object/serviceregistry" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/hashtool" + "github.com/megaease/easegress/pkg/util/stringtool" +) + +const ( + // PolicyRoundRobin is the policy of round-robin. + PolicyRoundRobin = "roundRobin" + // PolicyRandom is the policy of random. + PolicyRandom = "random" + // PolicyWeightedRandom is the policy of weighted random. + PolicyWeightedRandom = "weightedRandom" + // PolicyIPHash is the policy of ip hash. + PolicyIPHash = "ipHash" +) + +type ( + servers struct { + poolSpec *PoolSpec + super *supervisor.Supervisor + + mutex sync.Mutex + serviceRegistry *serviceregistry.ServiceRegistry + serviceWatcher serviceregistry.ServiceWatcher + static *staticServers + + done chan struct{} + } + + staticServers struct { + count uint64 + weightsSum int + servers []*Server + lb LoadBalance + } + + // Server is proxy server. + Server struct { + Addr string `yaml:"url" jsonschema:"required,format=hostport"` + Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` + Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` + } + + // LoadBalance is load balance for multiple servers. + LoadBalance struct { + Policy string `yaml:"policy" jsonschema:"required,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash"` + } +) + +func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { + s := &servers{ + poolSpec: poolSpec, + super: super, + done: make(chan struct{}), + } + + s.useStaticServers() + if poolSpec.ServiceRegistry == "" || poolSpec.ServiceName == "" { + return s + } + + s.serviceRegistry = s.super.MustGetSystemController(serviceregistry.Kind). + Instance().(*serviceregistry.ServiceRegistry) + s.tryUseService() + s.serviceWatcher = s.serviceRegistry.NewServiceWatcher(s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + + go s.watchService() + return s +} + +func (s *Server) String() string { + return fmt.Sprintf("%s,%v,%d", s.Addr, s.Tags, s.Weight) +} + +func (s *servers) watchService() { + for { + select { + case <-s.done: + return + case event := <-s.serviceWatcher.Watch(): + s.handleEvent(event) + } + } +} + +func (s *servers) handleEvent(event *serviceregistry.ServiceEvent) { + s.useService(event.Instances) +} + +func (s *servers) tryUseService() { + serviceInstanceSpecs, err := s.serviceRegistry.ListServiceInstances(s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + + if err != nil { + logger.Errorf("get service %s/%s failed: %v", + s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName, err) + s.useStaticServers() + return + } + s.useService(serviceInstanceSpecs) +} + +func (s *servers) useService(serviceInstanceSpecs map[string]*serviceregistry.ServiceInstanceSpec) { + var servers []*Server + for _, instance := range serviceInstanceSpecs { + servers = append(servers, &Server{ + Addr: fmt.Sprintf("%s:%d", instance.Address, instance.Port), + Tags: instance.Tags, + Weight: instance.Weight, + }) + } + if len(servers) == 0 { + logger.Errorf("%s/%s: empty service instance", + s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + s.useStaticServers() + return + } + + dynamicServers := newStaticServers(servers, s.poolSpec.ServersTags, s.poolSpec.LoadBalance) + if dynamicServers.len() == 0 { + logger.Errorf("%s/%s: no service instance satisfy tags: %v", + s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName, s.poolSpec.ServersTags) + s.useStaticServers() + } + + logger.Infof("use dynamic service: %s/%s", s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) + + s.mutex.Lock() + defer s.mutex.Unlock() + s.static = dynamicServers +} + +func (s *servers) useStaticServers() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.static = newStaticServers(s.poolSpec.Servers, s.poolSpec.ServersTags, s.poolSpec.LoadBalance) +} + +func (s *servers) snapshot() *staticServers { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.static +} + +func (s *servers) len() int { + static := s.snapshot() + return static.len() +} + +func (s *servers) next(cliAddr string) (*Server, error) { + static := s.snapshot() + if static.len() == 0 { + return nil, fmt.Errorf("no server available") + } + return static.next(cliAddr), nil +} + +func (s *servers) close() { + close(s.done) + + if s.serviceWatcher != nil { + s.serviceWatcher.Stop() + } +} + +func newStaticServers(servers []*Server, tags []string, lb *LoadBalance) *staticServers { + if servers == nil { + servers = make([]*Server, 0) + } + + ss := &staticServers{} + if lb == nil { + ss.lb.Policy = PolicyRoundRobin + } else { + ss.lb = *lb + } + + defer ss.prepare() + + if len(tags) == 0 { + ss.servers = servers + return ss + } + + chosenServers := make([]*Server, 0) + for _, server := range servers { + for _, tag := range tags { + if stringtool.StrInSlice(tag, server.Tags) { + chosenServers = append(chosenServers, server) + break + } + } + } + ss.servers = chosenServers + return ss +} + +func (ss *staticServers) prepare() { + for _, server := range ss.servers { + ss.weightsSum += server.Weight + } +} + +func (ss *staticServers) len() int { + return len(ss.servers) +} + +func (ss *staticServers) next(cliAddr string) *Server { + switch ss.lb.Policy { + case PolicyRoundRobin: + return ss.roundRobin() + case PolicyRandom: + return ss.random() + case PolicyWeightedRandom: + return ss.weightedRandom() + case PolicyIPHash: + return ss.ipHash(cliAddr) + } + logger.Errorf("BUG: unknown load balance policy: %s", ss.lb.Policy) + return ss.roundRobin() +} + +func (ss *staticServers) roundRobin() *Server { + count := atomic.AddUint64(&ss.count, 1) + // NOTE: startEventLoop from 0. + count-- + return ss.servers[int(count)%len(ss.servers)] +} + +func (ss *staticServers) random() *Server { + return ss.servers[rand.Intn(len(ss.servers))] +} + +func (ss *staticServers) weightedRandom() *Server { + randomWeight := rand.Intn(ss.weightsSum) + for _, server := range ss.servers { + randomWeight -= server.Weight + if randomWeight < 0 { + return server + } + } + + logger.Errorf("BUG: weighted random can't pick a server: sum(%d) servers(%+v)", + ss.weightsSum, ss.servers) + + return ss.random() +} + +func (ss *staticServers) ipHash(cliAddr string) *Server { + sum32 := int(hashtool.Hash32(cliAddr)) + return ss.servers[sum32%len(ss.servers)] +} diff --git a/pkg/object/udpproxy/ipfilters.go b/pkg/object/udpproxy/ipfilters.go new file mode 100644 index 0000000000..4e5db51aff --- /dev/null +++ b/pkg/object/udpproxy/ipfilters.go @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package udpproxy + +import ( + "reflect" + "sync/atomic" + + "github.com/megaease/easegress/pkg/util/ipfilter" +) + +type ( + ipFilters struct { + rules atomic.Value + } + + ipFiltersRules struct { + spec *ipfilter.Spec + ipFilter *ipfilter.IPFilter + } +) + +func newIPFilters(spec *ipfilter.Spec) *ipFilters { + m := &ipFilters{} + + m.rules.Store(&ipFiltersRules{ + spec: spec, + ipFilter: newIPFilter(spec), + }) + return m +} + +func (i *ipFilters) AllowIP(ip string) bool { + rules := i.rules.Load().(*ipFiltersRules) + if rules == nil || rules.spec == nil { + return true + } + return rules.ipFilter.Allow(ip) +} + +func (i *ipFilters) reloadRules(spec *ipfilter.Spec) { + if spec == nil { + i.rules.Store(&ipFiltersRules{}) + return + } + + old := i.rules.Load().(*ipFiltersRules) + if reflect.DeepEqual(old.spec, spec) { + return + } + + rules := &ipFiltersRules{ + spec: spec, + ipFilter: newIPFilter(spec), + } + i.rules.Store(rules) +} + +func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { + if spec == nil { + return nil + } + return ipfilter.New(spec) +} + +func (r *ipFiltersRules) pass(downstreamIP string) bool { + if r.ipFilter == nil { + return true + } + return r.ipFilter.Allow(downstreamIP) +} diff --git a/pkg/object/udpproxy/pool.go b/pkg/object/udpproxy/pool.go new file mode 100644 index 0000000000..a0df4787c5 --- /dev/null +++ b/pkg/object/udpproxy/pool.go @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package udpproxy + +import ( + "reflect" + "sync/atomic" + + "github.com/megaease/easegress/pkg/supervisor" +) + +type ( + pool struct { + rules atomic.Value + } + + // pool backend server pool + poolRules struct { + spec *PoolSpec + + tagPrefix string + servers *servers + } +) + +func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { + p := &pool{} + + p.rules.Store(&poolRules{ + spec: spec, + + tagPrefix: tagPrefix, + servers: newServers(super, spec), + }) + return p +} + +func (p *pool) next(cliAddr string) (*Server, error) { + rules := p.rules.Load().(*poolRules) + return rules.servers.next(cliAddr) +} + +func (p *pool) close() { + if old := p.rules.Load(); old != nil { + oldPool := old.(*poolRules) + oldPool.servers.close() + } +} + +func (p *pool) reloadRules(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) { + old := p.rules.Load().(*poolRules) + if reflect.DeepEqual(old.spec, spec) { + return + } + p.close() + p.rules.Store(&poolRules{ + spec: spec, + + tagPrefix: tagPrefix, + servers: newServers(super, spec), + }) +} diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go new file mode 100644 index 0000000000..6048d92cf4 --- /dev/null +++ b/pkg/object/udpproxy/runtime.go @@ -0,0 +1,364 @@ +package udpproxy + +import ( + "fmt" + "net" + "reflect" + "sync" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/iobufferpool" +) + +const ( + checkFailedTimeout = 10 * time.Second + + stateNil stateType = "nil" + stateFailed stateType = "failed" + stateRunning stateType = "running" + stateClosed stateType = "closed" +) + +type ( + stateType string + + eventCheckFailed struct{} + eventServeFailed struct { + startNum uint64 + err error + } + + eventReload struct { + nextSuperSpec *supervisor.Spec + } + eventClose struct{ done chan struct{} } + + runtime struct { + superSpec *supervisor.Spec + spec *Spec + + startNum uint64 + pool *pool // backend servers pool + serverConn *net.UDPConn // listener + sessions map[string]*session + + stopped uint32 + state atomic.Value // runtime running state + eventChan chan interface{} // receive event + ipFilters *ipFilters + + mu sync.Mutex + } +) + +func newRuntime(superSpec *supervisor.Spec) *runtime { + spec := superSpec.ObjectSpec().(*Spec) + r := &runtime{ + superSpec: superSpec, + + pool: newPool(superSpec.Super(), spec.Pool, ""), + ipFilters: newIPFilters(spec.IPFilter), + + eventChan: make(chan interface{}, 10), + } + + r.setState(stateNil) + + go r.fsm() + go r.checkFailed() + return r +} + +// FSM is the finite-state-machine for the runtime. +func (r *runtime) fsm() { + ticker := time.NewTicker(2 * time.Second) + for { + select { + case <-ticker.C: + r.cleanup() + case e := <-r.eventChan: + switch e := e.(type) { + case *eventCheckFailed: + r.handleEventCheckFailed() + case *eventServeFailed: + r.handleEventServeFailed(e) + case *eventReload: + r.handleEventReload(e) + case *eventClose: + ticker.Stop() + r.handleEventClose(e) + // NOTE: We don't close hs.eventChan, + // in case of panic of any other goroutines + // to send event to it later. + return + default: + logger.Errorf("BUG: unknown event: %T\n", e) + } + } + } +} + +func (r *runtime) setState(state stateType) { + r.state.Store(state) +} + +func (r *runtime) getState() stateType { + return r.state.Load().(stateType) +} + +// Close notify runtime close +func (r *runtime) Close() { + done := make(chan struct{}) + r.eventChan <- &eventClose{done: done} + <-done +} + +func (r *runtime) checkFailed() { + ticker := time.NewTicker(checkFailedTimeout) + for range ticker.C { + state := r.getState() + if state == stateFailed { + r.eventChan <- &eventCheckFailed{} + } else if state == stateClosed { + ticker.Stop() + return + } + } +} + +func (r *runtime) handleEventCheckFailed() { + if r.getState() == stateFailed { + r.startServer() + } +} + +func (r *runtime) handleEventServeFailed(e *eventServeFailed) { + if r.startNum > e.startNum { + return + } + r.setState(stateFailed) +} + +func (r *runtime) handleEventReload(e *eventReload) { + r.reload(e.nextSuperSpec) +} + +func (r *runtime) handleEventClose(e *eventClose) { + _ = r.serverConn.Close() + r.mu.Lock() + for k, s := range r.sessions { + delete(r.sessions, k) + s.Close() + } + r.mu.Unlock() + r.pool.close() + close(e.done) +} + +func (r *runtime) reload(nextSuperSpec *supervisor.Spec) { + r.superSpec = nextSuperSpec + nextSpec := nextSuperSpec.ObjectSpec().(*Spec) + + r.ipFilters.reloadRules(nextSpec.IPFilter) + r.pool.reloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") + + // NOTE: Due to the mechanism of supervisor, + // nextSpec must not be nil, just defensive programming here. + switch { + case r.spec == nil && nextSpec == nil: + logger.Errorf("BUG: nextSpec is nil") + // Nothing to do. + case r.spec == nil && nextSpec != nil: + r.spec = nextSpec + r.startServer() + case r.spec != nil && nextSpec == nil: + logger.Errorf("BUG: nextSpec is nil") + r.spec = nil + r.closeServer() + case r.spec != nil && nextSpec != nil: + if r.needRestartServer(nextSpec) { + r.spec = nextSpec + r.closeServer() + r.startServer() + } else { + r.spec = nextSpec + } + } +} + +func (r *runtime) startServer() { + listenAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", r.spec.Port)) + if err != nil { + r.setState(stateFailed) + logger.Errorf("parse udp listen addr(%s) failed, err: %+v", r.spec.Port, err) + return + } + + r.serverConn, err = net.ListenUDP("udp", listenAddr) + if err != nil { + r.setState(stateFailed) + logger.Errorf("create udp listener(%s) failed, err: %+v", r.spec.Port, err) + return + } + r.setState(stateRunning) + + var cp *connPool + if r.spec.ContinuousMode { + cp = newConnPool() + } + + go func() { + defer cp.close() + + buf := iobufferpool.GetIoBuffer(iobufferpool.UDPPacketMaxSize) + for { + buf.Reset() + n, downstreamAddr, err := r.serverConn.ReadFromUDP(buf.Bytes()[:buf.Cap()]) + _ = buf.Grow(n) + + if err != nil { + if r.getState() != stateRunning { + return + } + logger.Errorf("failed to read packet from udp connection(:%d), err: %+v", r.spec.Port, err) + continue + } + + if r.ipFilters != nil { + if !r.ipFilters.AllowIP(downstreamAddr.IP.String()) { + logger.Debugf("discard udp packet from %s send to udp server(:%d)", downstreamAddr.IP.String(), r.spec.Port) + continue + } + } + + if r.spec.ContinuousMode { + if err := r.sendOneShot(cp, downstreamAddr, &buf); err != nil { + logger.Errorf("%s", err.Error()) + } + continue + } + + data := buf.Clone() + r.proxy(downstreamAddr, &data) + } + }() +} + +func (r *runtime) getUpstreamConn(pool *connPool, downstreamAddr *net.UDPAddr) (net.Conn, string, error) { + server, err := r.pool.next(downstreamAddr.IP.String()) + if err != nil { + return nil, "", fmt.Errorf("can not get upstream addr for udp connection(:%d)", r.spec.Port) + } + + var upstreamConn net.Conn + if pool != nil { + upstreamConn = pool.get(server.Addr) + if upstreamConn != nil { + return upstreamConn, server.Addr, nil + } + } + + addr, err := net.ResolveUDPAddr("udp", server.Addr) + if err != nil { + return nil, server.Addr, fmt.Errorf("parse upstream addr(%s) to udp addr failed, err: %+v", server.Addr, err) + } + + upstreamConn, err = net.DialUDP("udp", nil, addr) + if err != nil { + return nil, server.Addr, fmt.Errorf("dial to upstream addr(%s) failed, err: %+v", server.Addr, err) + } + if pool != nil { + pool.put(server.Addr, upstreamConn) + } + return upstreamConn, server.Addr, nil +} + +func (r *runtime) sendOneShot(pool *connPool, downstreamAddr *net.UDPAddr, buf *iobufferpool.IoBuffer) error { + upstreamConn, upstreamAddr, err := r.getUpstreamConn(pool, downstreamAddr) + if err != nil { + return err + } + + n, err := upstreamConn.Write((*buf).Bytes()) + if err != nil { + return fmt.Errorf("sned data to %s failed, err: %+v", upstreamAddr, err) + } + + if n != (*buf).Len() { + return fmt.Errorf("failed to send full packet to %s, read %d but send %d", upstreamAddr, (*buf).Len(), n) + } + return nil +} + +func (r *runtime) getSession(downstreamAddr *net.UDPAddr) (*session, error) { + key := downstreamAddr.String() + + r.mu.Lock() + defer r.mu.Unlock() + + s, ok := r.sessions[key] + if ok && !s.IsClosed() { + return s, nil + } + + if ok { + go func() { s.Close() }() + } + + upstreamConn, upstreamAddr, err := r.getUpstreamConn(nil, downstreamAddr) + if err != nil { + return nil, err + } + + s = newSession(downstreamAddr, upstreamAddr, upstreamConn, + time.Duration(r.spec.UpstreamIdleTimeout)*time.Millisecond, time.Duration(r.spec.DownstreamIdleTimeout)*time.Millisecond) + s.ListenResponse(r.serverConn) + + r.sessions[key] = s + return s, nil +} + +func (r *runtime) proxy(downstreamAddr *net.UDPAddr, buf *iobufferpool.IoBuffer) { + s, err := r.getSession(downstreamAddr) + if err != nil { + logger.Errorf("%s", err.Error()) + return + } + + err = s.Write(buf) + if err != nil { + logger.Errorf("write data to udp session(%s) failed, err: %v", downstreamAddr.IP.String(), err) + } +} + +func (r *runtime) cleanup() { + r.mu.Lock() + defer r.mu.Unlock() + + for k, s := range r.sessions { + if s.IsClosed() { + delete(r.sessions, k) + } + } +} + +func (r *runtime) closeServer() { + done := make(chan struct{}) + r.eventChan <- &eventClose{ + done: done, + } + <-done +} + +func (r *runtime) needRestartServer(nextSpec *Spec) bool { + x := *r.spec + y := *nextSpec + + x.Pool, y.Pool = nil, nil + x.IPFilter, y.IPFilter = nil, nil + + return !reflect.DeepEqual(x, y) +} diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go new file mode 100644 index 0000000000..f29cc10e6d --- /dev/null +++ b/pkg/object/udpproxy/session.go @@ -0,0 +1,173 @@ +package udpproxy + +import ( + "fmt" + "net" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/iobufferpool" +) + +type session struct { + upstreamAddr string + downstreamAddr *net.UDPAddr + downstreamIdleTimeout time.Duration + upstreamIdleTimeout time.Duration + + upstreamConn net.Conn + writeBuf chan *iobufferpool.IoBuffer + stopChan chan struct{} + stopped uint32 +} + +func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn net.Conn, + downstreamIdleTimeout, upstreamIdleTimeout time.Duration) *session { + s := session{ + upstreamAddr: upstreamAddr, + downstreamAddr: downstreamAddr, + upstreamConn: upstreamConn, + upstreamIdleTimeout: upstreamIdleTimeout, + downstreamIdleTimeout: downstreamIdleTimeout, + + writeBuf: make(chan *iobufferpool.IoBuffer, 512), + stopChan: make(chan struct{}, 1), + } + + go func() { + var t *time.Timer + var idleCheck <-chan time.Time + + if downstreamIdleTimeout > 0 { + t = time.NewTimer(downstreamIdleTimeout) + idleCheck = t.C + } + + for { + select { + case <-idleCheck: + s.Close() + case buf, ok := <-s.writeBuf: + if !ok { + s.Close() + continue + } + + if t != nil { + if !t.Stop() { + <-t.C + } + t.Reset(downstreamIdleTimeout) + } + + bufLen := (*buf).Len() + n, err := s.upstreamConn.Write((*buf).Bytes()) + _ = iobufferpool.PutIoBuffer(*buf) + + if err != nil { + logger.Errorf("udp connection flush data to upstream(%s) failed, err: %+v", upstreamAddr, err) + s.cleanWriteBuf() + break + } + + if bufLen != n { + logger.Errorf("udp connection flush data to upstream(%s) failed, should write %d but written %d", + upstreamAddr, bufLen, n) + s.cleanWriteBuf() + break + } + + case <-s.stopChan: + if !atomic.CompareAndSwapUint32(&s.stopped, 0, 1) { + break + } + if t != nil { + t.Stop() + } + _ = s.upstreamConn.Close() + s.cleanWriteBuf() + } + } + }() + + return &s +} + +// Write send data to buffer channel, wait flush to upstream +func (s *session) Write(buf *iobufferpool.IoBuffer) error { + if atomic.LoadUint32(&s.stopped) == 1 { + return fmt.Errorf("udp connection from %s to %s has closed", s.downstreamAddr.String(), s.upstreamAddr) + } + + select { + case s.writeBuf <- buf: + default: + _ = iobufferpool.PutIoBuffer(*buf) // if failed, may be try again? + } + return nil +} + +// ListenResponse session listen upstream connection response and send to downstream +func (s *session) ListenResponse(sendTo *net.UDPConn) { + go func() { + buf := iobufferpool.GetIoBuffer(iobufferpool.UDPPacketMaxSize) + defer s.Close() + + for { + if s.upstreamIdleTimeout > 0 { + _ = s.upstreamConn.SetReadDeadline(time.Now().Add(s.upstreamIdleTimeout)) + } + + nRead, err := buf.ReadOnce(s.upstreamConn) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + return + } + + if atomic.LoadUint32(&s.stopped) == 0 { + logger.Errorf("udp connection read data from upstream(%s) failed, err: %+v", s.upstreamAddr, err) + } + return + } + + nWrite, err := sendTo.WriteToUDP(buf.Bytes(), s.downstreamAddr) + if err != nil { + logger.Errorf("udp connection send data to downstream(%s) failed, err: %+v", s.downstreamAddr.String(), err) + return + } + + if nRead != int64(nWrite) { + logger.Errorf("udp connection send data to downstream(%s) failed, should write %d but written %d", + s.downstreamAddr.String(), nRead, nWrite) + return + } + } + }() +} + +func (s *session) cleanWriteBuf() { + for { + select { + case buf := <-s.writeBuf: + if buf != nil { + _ = iobufferpool.PutIoBuffer(*buf) + } + default: + return + } + } +} + +// IsClosed determine session if it is closed +func (s *session) IsClosed() bool { + return atomic.LoadUint32(&s.stopped) == 1 +} + +// Close send session close signal +func (s *session) Close() { + select { + case s.stopChan <- struct{}{}: + default: + } +} diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go new file mode 100644 index 0000000000..39e0857ad9 --- /dev/null +++ b/pkg/object/udpproxy/spec.go @@ -0,0 +1,67 @@ +package udpproxy + +import ( + "fmt" + + "github.com/megaease/easegress/pkg/util/ipfilter" +) + +type ( + + // Spec describes the udp server + Spec struct { + Name string `yaml:"name" json:"name" jsonschema:"required"` + Port uint16 `yaml:"port" json:"port" jsonschema:"required"` + + ContinuousMode bool `yaml:"continuousMode" jsonschema:"omitempty"` + DownstreamIdleTimeout uint32 `yaml:"downstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` + UpstreamIdleTimeout uint32 `yaml:"upstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` + + Pool *PoolSpec `yaml:"pool" jsonschema:"required"` + IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` + } + + // PoolSpec describes a pool of servers. + PoolSpec struct { + ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` + ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` + Servers []*Server `yaml:"servers" jsonschema:"omitempty"` + ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` + LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` + } +) + +// Validate validates Layer4 Server. +func (spec *Spec) Validate() error { + if poolErr := spec.Pool.Validate(); poolErr != nil { + return poolErr + } + + return nil +} + +// Validate validates poolSpec. +func (s PoolSpec) Validate() error { + if s.ServiceName == "" && len(s.Servers) == 0 { + return fmt.Errorf("both serviceName and servers are empty") + } + + serversGotWeight := 0 + for _, server := range s.Servers { + if server.Weight > 0 { + serversGotWeight++ + } + } + if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { + return fmt.Errorf("not all servers have weight(%d/%d)", + serversGotWeight, len(s.Servers)) + } + + if s.ServiceName == "" { + servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) + if servers.len() == 0 { + return fmt.Errorf("serversTags picks none of servers") + } + } + return nil +} diff --git a/pkg/object/udpproxy/udpserver.go b/pkg/object/udpproxy/udpserver.go new file mode 100644 index 0000000000..5389db9a9e --- /dev/null +++ b/pkg/object/udpproxy/udpserver.go @@ -0,0 +1,115 @@ +package udpproxy + +import ( + "net" + "sync" + + "github.com/megaease/easegress/pkg/supervisor" +) + +const ( + // Category is the category of TCPServer. + Category = supervisor.CategoryBusinessController + + // Kind is the kind of TCPServer. + Kind = "UDPServer" +) + +func init() { + supervisor.Register(&UDPServer{}) +} + +type ( + // UDPServer is Object of udp server. + UDPServer struct { + runtime *runtime + } + + connPool struct { + pool map[string]net.Conn + mu sync.RWMutex + } +) + +// Category get object category +func (u *UDPServer) Category() supervisor.ObjectCategory { + return Category +} + +// Kind get object kind +func (u *UDPServer) Kind() string { + return Kind +} + +// DefaultSpec get default spec of UDPServer +func (u *UDPServer) DefaultSpec() interface{} { + return &Spec{} +} + +// Status get UDPServer status +func (u *UDPServer) Status() *supervisor.Status { + return &supervisor.Status{} +} + +// Close actually close runtime +func (u *UDPServer) Close() { + u.runtime.Close() +} + +// Init initializes UDPServer. +func (u *UDPServer) Init(superSpec *supervisor.Spec) { + + u.runtime = newRuntime(superSpec) + u.runtime.eventChan <- &eventReload{ + nextSuperSpec: superSpec, + } +} + +// Inherit inherits previous generation of UDPServer. +func (u *UDPServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { + + u.runtime = previousGeneration.(*UDPServer).runtime + u.runtime.eventChan <- &eventReload{ + nextSuperSpec: superSpec, + } +} + +func newConnPool() *connPool { + return &connPool{ + pool: make(map[string]net.Conn), + } +} + +func (c *connPool) get(addr string) net.Conn { + if c == nil { + return nil + } + + c.mu.RLock() + defer c.mu.RUnlock() + return c.pool[addr] +} + +func (c *connPool) put(addr string, conn net.Conn) { + if c == nil { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + c.pool[addr] = conn +} + +func (c *connPool) close() { + if c == nil { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + for _, conn := range c.pool { + _ = conn.Close() + } + c.pool = nil +} diff --git a/pkg/util/iobufferpool/iobuffer_pool.go b/pkg/util/iobufferpool/iobuffer_pool.go index bb24e47698..9d9cb7a1b7 100644 --- a/pkg/util/iobufferpool/iobuffer_pool.go +++ b/pkg/util/iobufferpool/iobuffer_pool.go @@ -23,6 +23,8 @@ import ( ) const ( + // UDPPacketMaxSize max size of udp packet + UDPPacketMaxSize = 64 * 1024 // DefaultBufferReadCapacity default buffer capacity for stream proxy such as tcp DefaultBufferReadCapacity = 1 << 7 ) From 1cd74d8c0a8f37e246101e94af26ba10ceac70d3 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 21 Oct 2021 15:34:16 +0800 Subject: [PATCH 42/99] [tcpproxy] add missing license --- pkg/object/udpproxy/runtime.go | 17 +++++++++++++++++ pkg/object/udpproxy/session.go | 17 +++++++++++++++++ pkg/object/udpproxy/spec.go | 17 +++++++++++++++++ pkg/object/udpproxy/udpserver.go | 17 +++++++++++++++++ 4 files changed, 68 insertions(+) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 6048d92cf4..47a31549bd 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package udpproxy import ( diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index f29cc10e6d..fbec1bb1dd 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package udpproxy import ( diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index 39e0857ad9..5e58607c6a 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package udpproxy import ( diff --git a/pkg/object/udpproxy/udpserver.go b/pkg/object/udpproxy/udpserver.go index 5389db9a9e..45c6c3a1aa 100644 --- a/pkg/object/udpproxy/udpserver.go +++ b/pkg/object/udpproxy/udpserver.go @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package udpproxy import ( From e15886774c8ffadc9dd993ee064ecaa23800359d Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 21 Oct 2021 15:45:07 +0800 Subject: [PATCH 43/99] [tcpproxy] optimization for no response udp proxy scenes --- pkg/object/udpproxy/runtime.go | 4 ++-- pkg/object/udpproxy/spec.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 47a31549bd..69bfb0d8e0 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -223,7 +223,7 @@ func (r *runtime) startServer() { r.setState(stateRunning) var cp *connPool - if r.spec.ContinuousMode { + if r.spec.HasResponse { cp = newConnPool() } @@ -251,7 +251,7 @@ func (r *runtime) startServer() { } } - if r.spec.ContinuousMode { + if !r.spec.HasResponse { if err := r.sendOneShot(cp, downstreamAddr, &buf); err != nil { logger.Errorf("%s", err.Error()) } diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index 5e58607c6a..f79e1f9ef2 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -30,7 +30,7 @@ type ( Name string `yaml:"name" json:"name" jsonschema:"required"` Port uint16 `yaml:"port" json:"port" jsonschema:"required"` - ContinuousMode bool `yaml:"continuousMode" jsonschema:"omitempty"` + HasResponse bool `yaml:"HasResponse" jsonschema:"required"` DownstreamIdleTimeout uint32 `yaml:"downstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` UpstreamIdleTimeout uint32 `yaml:"upstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` From b120e1af3f379f24c0bf4206b59931a244f55b4f Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 21 Oct 2021 17:30:27 +0800 Subject: [PATCH 44/99] [udpproxy] fix udp proxy bug --- pkg/object/udpproxy/runtime.go | 28 ++++++++++++++++------------ pkg/object/udpproxy/session.go | 1 + pkg/object/udpproxy/spec.go | 2 +- pkg/registry/registry.go | 1 + 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 69bfb0d8e0..64bd09d7ab 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -62,7 +62,6 @@ type ( serverConn *net.UDPConn // listener sessions map[string]*session - stopped uint32 state atomic.Value // runtime running state eventChan chan interface{} // receive event ipFilters *ipFilters @@ -80,6 +79,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { ipFilters: newIPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), + sessions: make(map[string]*session), } r.setState(stateNil) @@ -164,13 +164,7 @@ func (r *runtime) handleEventReload(e *eventReload) { } func (r *runtime) handleEventClose(e *eventClose) { - _ = r.serverConn.Close() - r.mu.Lock() - for k, s := range r.sessions { - delete(r.sessions, k) - s.Close() - } - r.mu.Unlock() + r.closeServer() r.pool.close() close(e.done) } @@ -240,6 +234,13 @@ func (r *runtime) startServer() { if r.getState() != stateRunning { return } + if ope, ok := err.(*net.OpError); ok { + // not timeout error and not temporary, which means the error is non-recoverable + if !(ope.Timeout() && ope.Temporary()) { + logger.Errorf("udp listener(%d) crashed due to non-recoverable error, err: %+v", r.spec.Port, err) + return + } + } logger.Errorf("failed to read packet from udp connection(:%d), err: %+v", r.spec.Port, err) continue } @@ -363,11 +364,14 @@ func (r *runtime) cleanup() { } func (r *runtime) closeServer() { - done := make(chan struct{}) - r.eventChan <- &eventClose{ - done: done, + r.setState(stateClosed) + _ = r.serverConn.Close() + r.mu.Lock() + for k, s := range r.sessions { + delete(r.sessions, k) + s.Close() } - <-done + r.mu.Unlock() } func (r *runtime) needRestartServer(nextSpec *Spec) bool { diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index fbec1bb1dd..ae152d64a2 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -132,6 +132,7 @@ func (s *session) ListenResponse(sendTo *net.UDPConn) { defer s.Close() for { + buf.Reset() if s.upstreamIdleTimeout > 0 { _ = s.upstreamConn.SetReadDeadline(time.Now().Add(s.upstreamIdleTimeout)) } diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index f79e1f9ef2..6be426cb35 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -30,7 +30,7 @@ type ( Name string `yaml:"name" json:"name" jsonschema:"required"` Port uint16 `yaml:"port" json:"port" jsonschema:"required"` - HasResponse bool `yaml:"HasResponse" jsonschema:"required"` + HasResponse bool `yaml:"hasResponse" jsonschema:"required"` DownstreamIdleTimeout uint32 `yaml:"downstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` UpstreamIdleTimeout uint32 `yaml:"upstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index a0cbe97b39..8921451db8 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -50,6 +50,7 @@ import ( _ "github.com/megaease/easegress/pkg/object/rawconfigtrafficcontroller" _ "github.com/megaease/easegress/pkg/object/tcpproxy" _ "github.com/megaease/easegress/pkg/object/trafficcontroller" + _ "github.com/megaease/easegress/pkg/object/udpproxy" _ "github.com/megaease/easegress/pkg/object/websocketserver" _ "github.com/megaease/easegress/pkg/object/zookeeperserviceregistry" ) From 615b675d2f31f7c81db54f5b2cb1e079b1eb44a3 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 24 Oct 2021 21:36:39 +0800 Subject: [PATCH 45/99] [layer4proxy] update timerpool license --- pkg/util/timerpool/timerpool.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/pkg/util/timerpool/timerpool.go b/pkg/util/timerpool/timerpool.go index ae620a7007..fa01a5a6bf 100644 --- a/pkg/util/timerpool/timerpool.go +++ b/pkg/util/timerpool/timerpool.go @@ -1,15 +1,19 @@ -// Copyright 2017-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package timerpool From b14f06de1c62fac973b9e4d8f3bd6197b6a296fb Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 24 Oct 2021 21:49:40 +0800 Subject: [PATCH 46/99] [layer4proxy] extract ipfilters file in tcpproxy and udpproxy package to util package --- pkg/object/tcpproxy/runtime.go | 11 +-- pkg/object/udpproxy/ipfilters.go | 86 ------------------- pkg/object/udpproxy/runtime.go | 7 +- .../layer4ipfilters/layer4ipfilters.go} | 29 ++----- 4 files changed, 16 insertions(+), 117 deletions(-) delete mode 100644 pkg/object/udpproxy/ipfilters.go rename pkg/{object/tcpproxy/ipfilters.go => util/layer4ipfilters/layer4ipfilters.go} (53%) diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 8ce470953f..ba0d9b4b47 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -19,6 +19,7 @@ package tcpproxy import ( "fmt" + "github.com/megaease/easegress/pkg/util/layer4ipfilters" "net" "reflect" "sync/atomic" @@ -60,9 +61,9 @@ type ( superSpec *supervisor.Spec spec *Spec - pool *pool // backend servers pool - ipFilters *ipFilters // ip filters - listener *listener // tcp listener + pool *pool // backend servers pool + ipFilters *layer4ipfilters.Layer4IpFilters // ip filters + listener *listener // tcp listener startNum uint64 eventChan chan interface{} // receive traffic controller event @@ -78,7 +79,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { superSpec: superSpec, pool: newPool(superSpec.Super(), spec.Pool, ""), - ipFilters: newIPFilters(spec.IPFilter), + ipFilters: layer4ipfilters.NewLayer4IPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), } @@ -123,7 +124,7 @@ func (r *runtime) fsm() { func (r *runtime) reload(nextSuperSpec *supervisor.Spec) { r.superSpec = nextSuperSpec nextSpec := nextSuperSpec.ObjectSpec().(*Spec) - r.ipFilters.reloadRules(nextSpec.IPFilter) + r.ipFilters.ReloadRules(nextSpec.IPFilter) r.pool.reloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") // r.listener does not create just after the process started and the config load for the first time. diff --git a/pkg/object/udpproxy/ipfilters.go b/pkg/object/udpproxy/ipfilters.go deleted file mode 100644 index 4e5db51aff..0000000000 --- a/pkg/object/udpproxy/ipfilters.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package udpproxy - -import ( - "reflect" - "sync/atomic" - - "github.com/megaease/easegress/pkg/util/ipfilter" -) - -type ( - ipFilters struct { - rules atomic.Value - } - - ipFiltersRules struct { - spec *ipfilter.Spec - ipFilter *ipfilter.IPFilter - } -) - -func newIPFilters(spec *ipfilter.Spec) *ipFilters { - m := &ipFilters{} - - m.rules.Store(&ipFiltersRules{ - spec: spec, - ipFilter: newIPFilter(spec), - }) - return m -} - -func (i *ipFilters) AllowIP(ip string) bool { - rules := i.rules.Load().(*ipFiltersRules) - if rules == nil || rules.spec == nil { - return true - } - return rules.ipFilter.Allow(ip) -} - -func (i *ipFilters) reloadRules(spec *ipfilter.Spec) { - if spec == nil { - i.rules.Store(&ipFiltersRules{}) - return - } - - old := i.rules.Load().(*ipFiltersRules) - if reflect.DeepEqual(old.spec, spec) { - return - } - - rules := &ipFiltersRules{ - spec: spec, - ipFilter: newIPFilter(spec), - } - i.rules.Store(rules) -} - -func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { - if spec == nil { - return nil - } - return ipfilter.New(spec) -} - -func (r *ipFiltersRules) pass(downstreamIP string) bool { - if r.ipFilter == nil { - return true - } - return r.ipFilter.Allow(downstreamIP) -} diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 64bd09d7ab..b8ee5cf532 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -19,6 +19,7 @@ package udpproxy import ( "fmt" + "github.com/megaease/easegress/pkg/util/layer4ipfilters" "net" "reflect" "sync" @@ -64,7 +65,7 @@ type ( state atomic.Value // runtime running state eventChan chan interface{} // receive event - ipFilters *ipFilters + ipFilters *layer4ipfilters.Layer4IpFilters mu sync.Mutex } @@ -76,7 +77,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { superSpec: superSpec, pool: newPool(superSpec.Super(), spec.Pool, ""), - ipFilters: newIPFilters(spec.IPFilter), + ipFilters: layer4ipfilters.NewLayer4IPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), sessions: make(map[string]*session), @@ -173,7 +174,7 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec) { r.superSpec = nextSuperSpec nextSpec := nextSuperSpec.ObjectSpec().(*Spec) - r.ipFilters.reloadRules(nextSpec.IPFilter) + r.ipFilters.ReloadRules(nextSpec.IPFilter) r.pool.reloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") // NOTE: Due to the mechanism of supervisor, diff --git a/pkg/object/tcpproxy/ipfilters.go b/pkg/util/layer4ipfilters/layer4ipfilters.go similarity index 53% rename from pkg/object/tcpproxy/ipfilters.go rename to pkg/util/layer4ipfilters/layer4ipfilters.go index e219fb3154..271b3af160 100644 --- a/pkg/object/tcpproxy/ipfilters.go +++ b/pkg/util/layer4ipfilters/layer4ipfilters.go @@ -1,21 +1,4 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package tcpproxy +package layer4ipfilters import ( "reflect" @@ -25,7 +8,7 @@ import ( ) type ( - ipFilters struct { + Layer4IpFilters struct { rules atomic.Value } @@ -35,8 +18,8 @@ type ( } ) -func newIPFilters(spec *ipfilter.Spec) *ipFilters { - m := &ipFilters{} +func NewLayer4IPFilters(spec *ipfilter.Spec) *Layer4IpFilters { + m := &Layer4IpFilters{} m.rules.Store(&ipFiltersRules{ spec: spec, @@ -45,7 +28,7 @@ func newIPFilters(spec *ipfilter.Spec) *ipFilters { return m } -func (i *ipFilters) AllowIP(ip string) bool { +func (i *Layer4IpFilters) AllowIP(ip string) bool { rules := i.rules.Load().(*ipFiltersRules) if rules == nil || rules.spec == nil { return true @@ -53,7 +36,7 @@ func (i *ipFilters) AllowIP(ip string) bool { return rules.ipFilter.Allow(ip) } -func (i *ipFilters) reloadRules(spec *ipfilter.Spec) { +func (i *Layer4IpFilters) ReloadRules(spec *ipfilter.Spec) { if spec == nil { i.rules.Store(&ipFiltersRules{}) return From b5cbd6d8207c860fad6ec96b3500289c07d28c99 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 24 Oct 2021 22:11:33 +0800 Subject: [PATCH 47/99] [layer4proxy] change tcpproxy listen function to make code more readable --- pkg/object/tcpproxy/listener.go | 46 ++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/pkg/object/tcpproxy/listener.go b/pkg/object/tcpproxy/listener.go index e6ef9bd5ec..f85418ffaf 100644 --- a/pkg/object/tcpproxy/listener.go +++ b/pkg/object/tcpproxy/listener.go @@ -69,31 +69,35 @@ func (l *listener) listen() error { func (l *listener) acceptEventLoop() { for { - if tconn, err := l.tcpListener.Accept(); err != nil { - if nerr, ok := err.(net.Error); ok && nerr.Timeout() { - logger.Infof("tcp listener(%s) stop accept connection due to timeout, err: %s", - l.localAddr, nerr) - return - } + tconn, err := l.tcpListener.Accept() + if err == nil { + go l.onAccept(tconn, l.stopChan) + continue + } + + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + logger.Infof("tcp listener(%s) stop accept connection due to timeout, err: %s", + l.localAddr, nerr) + return + } + + ope, ok := err.(*net.OpError) + if !ok { + logger.Errorf("tcp listener(%s) stop accept connection with unknown error: %s.", + l.localAddr, err.Error()) + return + } - if ope, ok := err.(*net.OpError); ok { - // not timeout error and not temporary, which means the error is non-recoverable - if !(ope.Timeout() && ope.Temporary()) { - // accept error raised by sockets closing - if ope.Op == "accept" { - logger.Debugf("tcp listener(%s) stop accept connection due to listener closed", l.localAddr) - } else { - logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", - l.localAddr, err.Error()) - } - return - } + // not timeout error and not temporary, which means the error is non-recoverable + if !(ope.Timeout() && ope.Temporary()) { + // accept error raised by sockets closing + if ope.Op == "accept" { + logger.Debugf("tcp listener(%s) stop accept connection due to listener closed", l.localAddr) } else { - logger.Errorf("tcp listener(%s) stop accept connection with unknown error: %s.", + logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", l.localAddr, err.Error()) } - } else { - go l.onAccept(tconn, l.stopChan) + return } } } From 086213764de7982c0bdd7b8a23d53ffb76a905f2 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 24 Oct 2021 22:46:15 +0800 Subject: [PATCH 48/99] [layer4proxy] fix udp receive bug --- pkg/object/udpproxy/session.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index ae152d64a2..34954b2e91 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -84,17 +84,15 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n if err != nil { logger.Errorf("udp connection flush data to upstream(%s) failed, err: %+v", upstreamAddr, err) - s.cleanWriteBuf() - break + s.Close() + continue } if bufLen != n { logger.Errorf("udp connection flush data to upstream(%s) failed, should write %d but written %d", upstreamAddr, bufLen, n) - s.cleanWriteBuf() - break + s.Close() } - case <-s.stopChan: if !atomic.CompareAndSwapUint32(&s.stopped, 0, 1) { break From 3e891e96688f0199b69ea5e1590b292aa3968abd Mon Sep 17 00:00:00 2001 From: jxd134 Date: Mon, 25 Oct 2021 09:35:16 +0800 Subject: [PATCH 49/99] [layer4proxy] move layer4 ipfilters to util/ipfilter --- pkg/object/httpserver/runtime.go | 2 +- pkg/object/tcpproxy/runtime.go | 10 +-- pkg/object/udpproxy/runtime.go | 6 +- pkg/util/ipfilter/layer4ipfilters.go | 80 +++++++++++++++++++++ pkg/util/layer4ipfilters/layer4ipfilters.go | 69 ------------------ 5 files changed, 89 insertions(+), 78 deletions(-) create mode 100644 pkg/util/ipfilter/layer4ipfilters.go delete mode 100644 pkg/util/layer4ipfilters/layer4ipfilters.go diff --git a/pkg/object/httpserver/runtime.go b/pkg/object/httpserver/runtime.go index f16e8f0253..7d3dc7db2c 100644 --- a/pkg/object/httpserver/runtime.go +++ b/pkg/object/httpserver/runtime.go @@ -162,7 +162,7 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper protocol.MuxM r.mux.reloadRules(nextSuperSpec, muxMapper) nextSpec := nextSuperSpec.ObjectSpec().(*Spec) - + // r.limitListener is not created just after the process started and the config load for the first time. if nextSpec != nil && r.limitListener != nil { r.limitListener.SetMaxConnection(nextSpec.MaxConnections) diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index ba0d9b4b47..261f1c0d45 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -19,7 +19,6 @@ package tcpproxy import ( "fmt" - "github.com/megaease/easegress/pkg/util/layer4ipfilters" "net" "reflect" "sync/atomic" @@ -28,6 +27,7 @@ import ( "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/ipfilter" ) const ( @@ -61,9 +61,9 @@ type ( superSpec *supervisor.Spec spec *Spec - pool *pool // backend servers pool - ipFilters *layer4ipfilters.Layer4IpFilters // ip filters - listener *listener // tcp listener + pool *pool // backend servers pool + ipFilters *ipfilter.Layer4IpFilters // ip filters + listener *listener // tcp listener startNum uint64 eventChan chan interface{} // receive traffic controller event @@ -79,7 +79,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { superSpec: superSpec, pool: newPool(superSpec.Super(), spec.Pool, ""), - ipFilters: layer4ipfilters.NewLayer4IPFilters(spec.IPFilter), + ipFilters: ipfilter.NewLayer4IPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), } diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index b8ee5cf532..1005faae3b 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -19,7 +19,6 @@ package udpproxy import ( "fmt" - "github.com/megaease/easegress/pkg/util/layer4ipfilters" "net" "reflect" "sync" @@ -29,6 +28,7 @@ import ( "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/ipfilter" ) const ( @@ -65,7 +65,7 @@ type ( state atomic.Value // runtime running state eventChan chan interface{} // receive event - ipFilters *layer4ipfilters.Layer4IpFilters + ipFilters *ipfilter.Layer4IpFilters mu sync.Mutex } @@ -77,7 +77,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { superSpec: superSpec, pool: newPool(superSpec.Super(), spec.Pool, ""), - ipFilters: layer4ipfilters.NewLayer4IPFilters(spec.IPFilter), + ipFilters: ipfilter.NewLayer4IPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), sessions: make(map[string]*session), diff --git a/pkg/util/ipfilter/layer4ipfilters.go b/pkg/util/ipfilter/layer4ipfilters.go new file mode 100644 index 0000000000..7112b01799 --- /dev/null +++ b/pkg/util/ipfilter/layer4ipfilters.go @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ipfilter + +import ( + "reflect" + "sync/atomic" +) + +type ( + Layer4IpFilters struct { + rules atomic.Value + } + + ipFiltersRules struct { + spec *Spec + ipFilter *IPFilter + } +) + +func NewLayer4IPFilters(spec *Spec) *Layer4IpFilters { + if spec == nil { + return &Layer4IpFilters{} + } + + m := &Layer4IpFilters{} + m.rules.Store(&ipFiltersRules{ + spec: spec, + ipFilter: New(spec), + }) + return m +} + +func (i *Layer4IpFilters) AllowIP(ip string) bool { + rules := i.rules.Load().(*ipFiltersRules) + if rules == nil || rules.spec == nil { + return true + } + return rules.ipFilter.Allow(ip) +} + +func (i *Layer4IpFilters) ReloadRules(spec *Spec) { + if spec == nil { + i.rules.Store(&ipFiltersRules{}) + return + } + + old := i.rules.Load().(*ipFiltersRules) + if reflect.DeepEqual(old.spec, spec) { + return + } + + rules := &ipFiltersRules{ + spec: spec, + ipFilter: New(spec), + } + i.rules.Store(rules) +} + +func (r *ipFiltersRules) pass(downstreamIP string) bool { + if r.ipFilter == nil { + return true + } + return r.ipFilter.Allow(downstreamIP) +} diff --git a/pkg/util/layer4ipfilters/layer4ipfilters.go b/pkg/util/layer4ipfilters/layer4ipfilters.go deleted file mode 100644 index 271b3af160..0000000000 --- a/pkg/util/layer4ipfilters/layer4ipfilters.go +++ /dev/null @@ -1,69 +0,0 @@ -package layer4ipfilters - -import ( - "reflect" - "sync/atomic" - - "github.com/megaease/easegress/pkg/util/ipfilter" -) - -type ( - Layer4IpFilters struct { - rules atomic.Value - } - - ipFiltersRules struct { - spec *ipfilter.Spec - ipFilter *ipfilter.IPFilter - } -) - -func NewLayer4IPFilters(spec *ipfilter.Spec) *Layer4IpFilters { - m := &Layer4IpFilters{} - - m.rules.Store(&ipFiltersRules{ - spec: spec, - ipFilter: newIPFilter(spec), - }) - return m -} - -func (i *Layer4IpFilters) AllowIP(ip string) bool { - rules := i.rules.Load().(*ipFiltersRules) - if rules == nil || rules.spec == nil { - return true - } - return rules.ipFilter.Allow(ip) -} - -func (i *Layer4IpFilters) ReloadRules(spec *ipfilter.Spec) { - if spec == nil { - i.rules.Store(&ipFiltersRules{}) - return - } - - old := i.rules.Load().(*ipFiltersRules) - if reflect.DeepEqual(old.spec, spec) { - return - } - - rules := &ipFiltersRules{ - spec: spec, - ipFilter: newIPFilter(spec), - } - i.rules.Store(rules) -} - -func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { - if spec == nil { - return nil - } - return ipfilter.New(spec) -} - -func (r *ipFiltersRules) pass(downstreamIP string) bool { - if r.ipFilter == nil { - return true - } - return r.ipFilter.Allow(downstreamIP) -} From 20012399a412eeb7521ed5f4a09ed91294a41a85 Mon Sep 17 00:00:00 2001 From: jxd Date: Mon, 25 Oct 2021 09:41:58 +0800 Subject: [PATCH 50/99] Update GetReadBuffer function comment Co-authored-by: Bomin Zhang --- pkg/object/tcpproxy/connection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 713b7a636c..bfa085a5c6 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -107,7 +107,7 @@ func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { c.onClose = onclose } -// GetReadBuffer get connection red buffer +// GetReadBuffer get connection read buffer func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { return c.readBuffer } From 6911fcde40b1c995e72b890b2313b1f337644b0c Mon Sep 17 00:00:00 2001 From: jxd134 Date: Mon, 25 Oct 2021 10:08:59 +0800 Subject: [PATCH 51/99] [layer4proxy] fix error modify --- pkg/option/option.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/option/option.go b/pkg/option/option.go index 854de0b0e5..26997f728a 100644 --- a/pkg/option/option.go +++ b/pkg/option/option.go @@ -108,7 +108,7 @@ func New() *Options { opt.flags.StringSliceVar(&opt.ClusterAdvertiseClientURLs, "cluster-advertise-client-urls", []string{"http://localhost:2379"}, "List of this member’s client URLs to advertise to the rest of the cluster.") opt.flags.StringSliceVar(&opt.ClusterInitialAdvertisePeerURLs, "cluster-initial-advertise-peer-urls", []string{"http://localhost:2380"}, "List of this member’s peer URLs to advertise to the rest of the cluster.") opt.flags.StringSliceVar(&opt.ClusterJoinURLs, "cluster-join-urls", nil, "List of URLs to join, when the first url is the same with any one of cluster-initial-advertise-peer-urls, it means to join itself, and this config will be treated empty.") - opt.flags.StringVar(&opt.APIAddr, "api-addr", "localhost:2381", "HostPort([host]:port) to listen on for administration traffic.") + opt.flags.StringVar(&opt.APIAddr, "api-addr", "localhost:2381", "Address([host]:port) to listen on for administration traffic.") opt.flags.BoolVar(&opt.Debug, "debug", false, "Flag to set lowest log level from INFO downgrade DEBUG.") opt.flags.StringSliceVar(&opt.InitialObjectConfigFiles, "initial-object-config-files", nil, "List of configuration files for initial objects, these objects will be created at startup if not already exist.") From ceaaec12fa8a03263fe2cfc7c520cfb88837c1cc Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 26 Oct 2021 22:27:41 +0800 Subject: [PATCH 52/99] [layer4proxy] update io buffer for tcp/udp proxy --- go.mod | 1 + pkg/object/tcpproxy/connection.go | 124 ++-- pkg/object/tcpproxy/runtime.go | 10 +- pkg/object/udpproxy/runtime.go | 26 +- pkg/object/udpproxy/session.go | 28 +- pkg/util/iobufferpool/buffer.go | 135 ----- pkg/util/iobufferpool/bytebuffer_pool.go | 157 ------ pkg/util/iobufferpool/constants.go | 32 ++ pkg/util/iobufferpool/iobuffer.go | 594 -------------------- pkg/util/iobufferpool/iobuffer_pool.go | 81 --- pkg/util/iobufferpool/packet_pool.go | 52 ++ pkg/util/iobufferpool/stream_buffer_pool.go | 89 +++ 12 files changed, 250 insertions(+), 1079 deletions(-) delete mode 100644 pkg/util/iobufferpool/buffer.go delete mode 100644 pkg/util/iobufferpool/bytebuffer_pool.go create mode 100644 pkg/util/iobufferpool/constants.go delete mode 100644 pkg/util/iobufferpool/iobuffer.go delete mode 100644 pkg/util/iobufferpool/iobuffer_pool.go create mode 100644 pkg/util/iobufferpool/packet_pool.go create mode 100644 pkg/util/iobufferpool/stream_buffer_pool.go diff --git a/go.mod b/go.mod index de35bd2167..0566c95d04 100644 --- a/go.mod +++ b/go.mod @@ -41,6 +41,7 @@ require ( github.com/tcnksm/go-httpstat v0.2.1-0.20191008022543-e866bb274419 github.com/tidwall/gjson v1.8.0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce + github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.1 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.1-0.20201027075954-b076d39a02e5 diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index bfa085a5c6..0e1b7f5048 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -41,20 +41,20 @@ type Connection struct { localAddr net.Addr remoteAddr net.Addr - lastBytesSizeRead int64 - lastWriteSizeWrite int64 + lastBytesSizeRead int + lastWriteSizeWrite int - readBuffer iobufferpool.IoBuffer + readBuffer []byte writeBuffers net.Buffers - ioBuffers []iobufferpool.IoBuffer - writeBufferChan chan *[]iobufferpool.IoBuffer + ioBuffers []*iobufferpool.StreamBuffer + writeBufferChan chan *iobufferpool.StreamBuffer mu sync.Mutex startOnce sync.Once connStopChan chan struct{} // use for connection close listenerStopChan chan struct{} // use for listener close - onRead func(buffer iobufferpool.IoBuffer) // execute read filters + onRead func(buffer *iobufferpool.StreamBuffer) // execute read filters onClose func(event ConnectionEvent) } @@ -67,7 +67,7 @@ func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan localAddr: conn.LocalAddr(), remoteAddr: conn.RemoteAddr(), - writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), + writeBufferChan: make(chan *iobufferpool.StreamBuffer, 8), mu: sync.Mutex{}, connStopChan: make(chan struct{}), @@ -93,12 +93,12 @@ func (c *Connection) RemoteAddr() net.Addr { } // SetOnRead set connection read handle -func (c *Connection) SetOnRead(onRead func(buffer iobufferpool.IoBuffer)) { +func (c *Connection) SetOnRead(onRead func(buffer *iobufferpool.StreamBuffer)) { c.onRead = onRead } // OnRead set data read callback -func (c *Connection) OnRead(buffer iobufferpool.IoBuffer) { +func (c *Connection) OnRead(buffer *iobufferpool.StreamBuffer) { c.onRead(buffer) } @@ -107,11 +107,6 @@ func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { c.onClose = onclose } -// GetReadBuffer get connection read buffer -func (c *Connection) GetReadBuffer() iobufferpool.IoBuffer { - return c.readBuffer -} - // Start running connection read/write loop func (c *Connection) Start() { c.startOnce.Do(func() { @@ -167,7 +162,7 @@ func (c *Connection) startRWLoop() { } // Write receive other connection data -func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { +func (c *Connection) Write(buf *iobufferpool.StreamBuffer) (err error) { defer func() { if r := recover(); r != nil { logger.Errorf("tcp connection has closed, local addr: %s, remote addr: %s, err: %+v", @@ -177,7 +172,7 @@ func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { }() select { - case c.writeBufferChan <- &buffers: + case c.writeBufferChan <- buf: return default: } @@ -185,8 +180,9 @@ func (c *Connection) Write(buffers ...iobufferpool.IoBuffer) (err error) { // try to send data again in 60 seconds t := timerpool.Get(60 * time.Second) select { - case c.writeBufferChan <- &buffers: + case c.writeBufferChan <- buf: case <-t.C: + buf.Release() err = ErrWriteBufferChanTimeout } timerpool.Put(t) @@ -201,28 +197,34 @@ func (c *Connection) startReadLoop() { case <-c.listenerStopChan: return default: - err := c.doReadIO() + bufLen, err := c.doReadIO() if err != nil { + if atomic.LoadUint32(&c.closed) == 1 { + logger.Infof("tcp connection exit read loop for connection has closed, local addr: %s, "+ + "remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) + return + } + if te, ok := err.(net.Error); ok && te.Timeout() { - if c.readBuffer != nil && c.readBuffer.Len() == 0 && c.readBuffer.Cap() > iobufferpool.DefaultBufferReadCapacity { - c.readBuffer.Free() - c.readBuffer.Alloc(iobufferpool.DefaultBufferReadCapacity) + if bufLen == 0 { + continue // continue read data, ignore timeout error } - continue } + } - // normal close or health check - if c.lastBytesSizeRead == 0 || err == io.EOF { - logger.Infof("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - } else { - logger.Errorf("tcp connection error on read, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - } + if bufLen != 0 && (err == nil || err == io.EOF) { + c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer)) + c.readBuffer = c.readBuffer[:0] + } + if err != nil { if err == io.EOF { + logger.Infof("tcp connection read error, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) _ = c.Close(NoFlush, RemoteClose) } else { + logger.Errorf("tcp connection read error, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) _ = c.Close(NoFlush, OnReadErrClose) } return @@ -278,17 +280,12 @@ func (c *Connection) startWriteLoop() { } } -func (c *Connection) appendBuffer(ioBuffers *[]iobufferpool.IoBuffer) { - if ioBuffers == nil { +func (c *Connection) appendBuffer(buf *iobufferpool.StreamBuffer) { + if buf == nil { return } - for _, buf := range *ioBuffers { - if buf == nil { - continue - } - c.ioBuffers = append(c.ioBuffers, buf) - c.writeBuffers = append(c.writeBuffers, buf.Bytes()) - } + c.ioBuffers = append(c.ioBuffers, buf) + c.writeBuffers = append(c.writeBuffers, buf.Bytes()) } // Close connection close function @@ -300,7 +297,7 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) }() if ccType == FlushWrite { - _ = c.Write(iobufferpool.NewIoBufferEOF()) + _ = c.Write(iobufferpool.NewEOFStreamBuffer()) return nil } @@ -333,43 +330,15 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) return nil } -func (c *Connection) doReadIO() (err error) { +func (c *Connection) doReadIO() (bufLen int, err error) { if c.readBuffer == nil { - c.readBuffer = iobufferpool.GetIoBuffer(iobufferpool.DefaultBufferReadCapacity) + c.readBuffer = iobufferpool.TCPBufferPool.Get().([]byte) } - var bytesRead int64 + // add read deadline setting optimization? + // https://github.com/golang/go/issues/15133 _ = c.rawConn.SetReadDeadline(time.Now().Add(15 * time.Second)) - bytesRead, err = c.readBuffer.ReadOnce(c.rawConn) - - if err != nil { - if atomic.LoadUint32(&c.closed) == 1 { - return err - } - if te, ok := err.(net.Error); ok && te.Timeout() { - if bytesRead == 0 { - return err - } - } else if err != io.EOF { - return err - } - } - - if bytesRead == 0 && err == nil { - err = io.EOF - logger.Errorf("tcp connection ReadOnce maybe always return (0, nil) and causes dead loop, local addr: %s, remote addr: %s", - c.localAddr.String(), c.remoteAddr.String()) - } - - if c.readBuffer.Len() == 0 { - return - } - - c.onRead(c.readBuffer) - if currLen := int64(c.readBuffer.Len()); c.lastBytesSizeRead != currLen { - c.lastBytesSizeRead = currLen - } - return + return c.rawConn.(io.Reader).Read(c.readBuffer) } func (c *Connection) doWrite() (int64, error) { @@ -378,7 +347,7 @@ func (c *Connection) doWrite() (int64, error) { return 0, nil } - if bytesBufSize := int64(c.writeBufLen()); bytesBufSize != c.lastWriteSizeWrite { + if bytesBufSize := c.writeBufLen(); bytesBufSize != c.lastWriteSizeWrite { c.lastWriteSizeWrite = bytesBufSize } return bytesSent, err @@ -404,10 +373,7 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { if buf.EOF() { err = iobufferpool.ErrEOF } - if e := iobufferpool.PutIoBuffer(buf); e != nil { - logger.Errorf("tcp connection give buffer error, local addr: %s, remote addr: %s, err: %+v", - c.localAddr.String(), c.remoteAddr.String(), err) - } + buf.Release() } c.ioBuffers = c.ioBuffers[:0] c.writeBuffers = c.writeBuffers[:0] @@ -428,7 +394,7 @@ func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopC connected: 1, remoteAddr: upstreamAddr, - writeBufferChan: make(chan *[]iobufferpool.IoBuffer, 8), + writeBufferChan: make(chan *iobufferpool.StreamBuffer, 8), mu: sync.Mutex{}, connStopChan: make(chan struct{}), diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 261f1c0d45..71a3730bc2 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -301,16 +301,14 @@ func (r *runtime) onAccept() func(conn net.Conn, listenerStop chan struct{}) { } func (r *runtime) setCallbacks(downstreamConn *Connection, upstreamConn *UpstreamConnection) { - downstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + downstreamConn.SetOnRead(func(readBuf *iobufferpool.StreamBuffer) { if readBuf != nil && readBuf.Len() > 0 { - _ = upstreamConn.Write(readBuf.Clone()) - readBuf.Drain(readBuf.Len()) + _ = upstreamConn.Write(readBuf) } }) - upstreamConn.SetOnRead(func(readBuf iobufferpool.IoBuffer) { + upstreamConn.SetOnRead(func(readBuf *iobufferpool.StreamBuffer) { if readBuf != nil && readBuf.Len() > 0 { - _ = downstreamConn.Write(readBuf.Clone()) - readBuf.Drain(readBuf.Len()) + _ = downstreamConn.Write(readBuf) } }) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 1005faae3b..290f9ab12c 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -225,11 +225,10 @@ func (r *runtime) startServer() { go func() { defer cp.close() - buf := iobufferpool.GetIoBuffer(iobufferpool.UDPPacketMaxSize) + buf := make([]byte, iobufferpool.UDPPacketMaxSize) for { - buf.Reset() - n, downstreamAddr, err := r.serverConn.ReadFromUDP(buf.Bytes()[:buf.Cap()]) - _ = buf.Grow(n) + buf = buf[:0] + n, downstreamAddr, err := r.serverConn.ReadFromUDP(buf) if err != nil { if r.getState() != stateRunning { @@ -254,14 +253,13 @@ func (r *runtime) startServer() { } if !r.spec.HasResponse { - if err := r.sendOneShot(cp, downstreamAddr, &buf); err != nil { + if err := r.sendOneShot(cp, downstreamAddr, buf[0:n]); err != nil { logger.Errorf("%s", err.Error()) } continue } - data := buf.Clone() - r.proxy(downstreamAddr, &data) + r.proxy(downstreamAddr, buf[0:n]) } }() } @@ -295,19 +293,19 @@ func (r *runtime) getUpstreamConn(pool *connPool, downstreamAddr *net.UDPAddr) ( return upstreamConn, server.Addr, nil } -func (r *runtime) sendOneShot(pool *connPool, downstreamAddr *net.UDPAddr, buf *iobufferpool.IoBuffer) error { +func (r *runtime) sendOneShot(pool *connPool, downstreamAddr *net.UDPAddr, buf []byte) error { upstreamConn, upstreamAddr, err := r.getUpstreamConn(pool, downstreamAddr) if err != nil { return err } - n, err := upstreamConn.Write((*buf).Bytes()) + n, err := upstreamConn.Write(buf) if err != nil { return fmt.Errorf("sned data to %s failed, err: %+v", upstreamAddr, err) } - if n != (*buf).Len() { - return fmt.Errorf("failed to send full packet to %s, read %d but send %d", upstreamAddr, (*buf).Len(), n) + if n != len(buf) { + return fmt.Errorf("failed to send full packet to %s, read %d but send %d", upstreamAddr, len(buf), n) } return nil } @@ -340,14 +338,16 @@ func (r *runtime) getSession(downstreamAddr *net.UDPAddr) (*session, error) { return s, nil } -func (r *runtime) proxy(downstreamAddr *net.UDPAddr, buf *iobufferpool.IoBuffer) { +func (r *runtime) proxy(downstreamAddr *net.UDPAddr, buf []byte) { s, err := r.getSession(downstreamAddr) if err != nil { logger.Errorf("%s", err.Error()) return } - err = s.Write(buf) + dup := iobufferpool.UDPBufferPool.Get().([]byte) + n := copy(dup, buf) + err = s.Write(&iobufferpool.Packet{Payload: dup, Len: n}) if err != nil { logger.Errorf("write data to udp session(%s) failed, err: %v", downstreamAddr.IP.String(), err) } diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 34954b2e91..2f056dfe73 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -34,7 +34,7 @@ type session struct { upstreamIdleTimeout time.Duration upstreamConn net.Conn - writeBuf chan *iobufferpool.IoBuffer + writeBuf chan *iobufferpool.Packet stopChan chan struct{} stopped uint32 } @@ -48,7 +48,7 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n upstreamIdleTimeout: upstreamIdleTimeout, downstreamIdleTimeout: downstreamIdleTimeout, - writeBuf: make(chan *iobufferpool.IoBuffer, 512), + writeBuf: make(chan *iobufferpool.Packet, 512), stopChan: make(chan struct{}, 1), } @@ -78,9 +78,9 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n t.Reset(downstreamIdleTimeout) } - bufLen := (*buf).Len() - n, err := s.upstreamConn.Write((*buf).Bytes()) - _ = iobufferpool.PutIoBuffer(*buf) + bufLen := len(buf.Payload) + n, err := s.upstreamConn.Write(buf.Bytes()) + buf.Release() if err != nil { logger.Errorf("udp connection flush data to upstream(%s) failed, err: %+v", upstreamAddr, err) @@ -110,7 +110,7 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n } // Write send data to buffer channel, wait flush to upstream -func (s *session) Write(buf *iobufferpool.IoBuffer) error { +func (s *session) Write(buf *iobufferpool.Packet) error { if atomic.LoadUint32(&s.stopped) == 1 { return fmt.Errorf("udp connection from %s to %s has closed", s.downstreamAddr.String(), s.upstreamAddr) } @@ -118,7 +118,7 @@ func (s *session) Write(buf *iobufferpool.IoBuffer) error { select { case s.writeBuf <- buf: default: - _ = iobufferpool.PutIoBuffer(*buf) // if failed, may be try again? + buf.Release() // if failed, may be try again? } return nil } @@ -126,19 +126,19 @@ func (s *session) Write(buf *iobufferpool.IoBuffer) error { // ListenResponse session listen upstream connection response and send to downstream func (s *session) ListenResponse(sendTo *net.UDPConn) { go func() { - buf := iobufferpool.GetIoBuffer(iobufferpool.UDPPacketMaxSize) + buf := iobufferpool.UDPBufferPool.Get().([]byte) defer s.Close() for { - buf.Reset() + buf = buf[:0] if s.upstreamIdleTimeout > 0 { _ = s.upstreamConn.SetReadDeadline(time.Now().Add(s.upstreamIdleTimeout)) } - nRead, err := buf.ReadOnce(s.upstreamConn) + nRead, err := s.upstreamConn.Read(buf) if err != nil { if err, ok := err.(net.Error); ok && err.Timeout() { - return + return // return or continue to read? } if atomic.LoadUint32(&s.stopped) == 0 { @@ -147,13 +147,13 @@ func (s *session) ListenResponse(sendTo *net.UDPConn) { return } - nWrite, err := sendTo.WriteToUDP(buf.Bytes(), s.downstreamAddr) + nWrite, err := sendTo.WriteToUDP(buf[0:nRead], s.downstreamAddr) if err != nil { logger.Errorf("udp connection send data to downstream(%s) failed, err: %+v", s.downstreamAddr.String(), err) return } - if nRead != int64(nWrite) { + if nRead != nWrite { logger.Errorf("udp connection send data to downstream(%s) failed, should write %d but written %d", s.downstreamAddr.String(), nRead, nWrite) return @@ -167,7 +167,7 @@ func (s *session) cleanWriteBuf() { select { case buf := <-s.writeBuf: if buf != nil { - _ = iobufferpool.PutIoBuffer(*buf) + buf.Release() } default: return diff --git a/pkg/util/iobufferpool/buffer.go b/pkg/util/iobufferpool/buffer.go deleted file mode 100644 index 7092411526..0000000000 --- a/pkg/util/iobufferpool/buffer.go +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iobufferpool - -import "io" - -// IoBuffer io buffer for stream proxy -type IoBuffer interface { - // Read reads the next len(p) bytes from the buffer or until the buffer - // is drained. The return value n is the number of bytes read. If the - // buffer has no data to return, err is io.EOF (unless len(p) is zero); - // otherwise it is nil. - Read(p []byte) (n int, err error) - - // ReadOnce make a one-shot read and appends it to the buffer, growing - // the buffer as needed. The return value n is the number of bytes read. Any - // error except io.EOF encountered during the read is also returned. If the - // buffer becomes too large, ReadFrom will panic with ErrTooLarge. - ReadOnce(r io.Reader) (n int64, err error) - - // ReadFrom reads data from r until ErrEOF and appends it to the buffer, growing - // the buffer as needed. The return value n is the number of bytes read. Any - // error except io.EOF encountered during the read is also returned. If the - // buffer becomes too large, ReadFrom will panic with ErrTooLarge. - ReadFrom(r io.Reader) (n int64, err error) - - // Grow updates the length of the buffer by n, growing the buffer as - // needed. The return value n is the length of p; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - Grow(n int) error - - // Write appends the contents of p to the buffer, growing the buffer as - // needed. The return value n is the length of p; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - Write(p []byte) (n int, err error) - - // WriteString appends the string to the buffer, growing the buffer as - // needed. The return value n is the length of s; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - WriteString(s string) (n int, err error) - - // WriteByte appends the byte to the buffer, growing the buffer as - // needed. The return value n is the length of s; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - WriteByte(p byte) error - - // WriteUint16 appends the uint16 to the buffer, growing the buffer as - // needed. The return value n is the length of s; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - WriteUint16(p uint16) error - - // WriteUint32 appends the uint32 to the buffer, growing the buffer as - // needed. The return value n is the length of s; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - WriteUint32(p uint32) error - - // WriteUint64 appends the uint64 to the buffer, growing the buffer as - // needed. The return value n is the length of s; err is always nil. If the - // buffer becomes too large, Write will panic with ErrTooLarge. - WriteUint64(p uint64) error - - // WriteTo writes data to w until the buffer is drained or an error occurs. - // The return value n is the number of bytes written; it always fits into an - // int, but it is int64 to match the io.WriterTo interface. Any error - // encountered during the write is also returned. - WriteTo(w io.Writer) (n int64, err error) - - // Peek returns n bytes from buffer, without draining any buffered data. - // If n > readable buffer, nil will be returned. - // It can be used in codec to check first-n-bytes magic bytes - // Note: do not change content in return bytes, use write instead - Peek(n int) []byte - - // Bytes returns all bytes from buffer, without draining any buffered data. - // It can be used to get fixed-length content, such as headers, body. - // Note: do not change content in return bytes, use write instead - Bytes() []byte - - // Drain drains a offset length of bytes in buffer. - // It can be used with Bytes(), after consuming a fixed-length of data - Drain(offset int) - - // Len returns the number of bytes of the unread portion of the buffer; - // b.Len() == len(b.Bytes()). - Len() int - - // Cap returns the capacity of the buffer's underlying byte slice, that is, the - // total space allocated for the buffer's data. - Cap() int - - // Reset resets the buffer to be empty, - // but it retains the underlying storage for use by future writes. - Reset() - - // Clone makes a copy of IoBuffer struct - Clone() IoBuffer - - // String returns the contents of the unread portion of the buffer - // as a string. If the Buffer is a nil pointer, it returns "". - String() string - - // Alloc alloc bytes from BytePoolBuffer - Alloc(int) - - // Free free bytes to BytePoolBuffer - Free() - - // Count sets and returns reference count - Count(int32) int32 - - // EOF returns whether Io is EOF on the connection - EOF() bool - - //SetEOF sets the IoBuffer ErrEOF - SetEOF(eof bool) - - Append(data []byte) error - - CloseWithError(err error) -} diff --git a/pkg/util/iobufferpool/bytebuffer_pool.go b/pkg/util/iobufferpool/bytebuffer_pool.go deleted file mode 100644 index d337aeccbe..0000000000 --- a/pkg/util/iobufferpool/bytebuffer_pool.go +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iobufferpool - -import ( - "sync" -) - -const minShift = 6 -const maxShift = 18 -const errSlot = -1 - -var bbPool *byteBufferPool - -func init() { - bbPool = newByteBufferPool() -} - -// byteBufferPool is []byte pools -type byteBufferPool struct { - minShift int - minSize int - maxSize int - - pool []*bufferSlot -} - -type bufferSlot struct { - defaultSize int - pool sync.Pool -} - -// newByteBufferPool returns byteBufferPool -func newByteBufferPool() *byteBufferPool { - p := &byteBufferPool{ - minShift: minShift, - minSize: 1 << minShift, - maxSize: 1 << maxShift, - } - for i := 0; i <= maxShift-minShift; i++ { - slab := &bufferSlot{ - defaultSize: 1 << (uint)(i+minShift), - } - p.pool = append(p.pool, slab) - } - - return p -} - -func (p *byteBufferPool) slot(size int) int { - if size > p.maxSize { - return errSlot - } - slot := 0 - shift := 0 - if size > p.minSize { - size-- - for size > 0 { - size = size >> 1 - shift++ - } - slot = shift - p.minShift - } - - return slot -} - -func newBytes(size int) []byte { - return make([]byte, size) -} - -// take returns *[]byte from byteBufferPool -func (p *byteBufferPool) take(size int) *[]byte { - slot := p.slot(size) - if slot == errSlot { - b := newBytes(size) - return &b - } - v := p.pool[slot].pool.Get() - if v == nil { - b := newBytes(p.pool[slot].defaultSize) - b = b[0:size] - return &b - } - b := v.(*[]byte) - *b = (*b)[0:size] - return b -} - -// give returns *[]byte to byteBufferPool -func (p *byteBufferPool) give(buf *[]byte) { - if buf == nil { - return - } - size := cap(*buf) - slot := p.slot(size) - if slot == errSlot { - return - } - if size != int(p.pool[slot].defaultSize) { - return - } - p.pool[slot].pool.Put(buf) -} - -// ByteBufferPoolContainer byte buffer pool container -type ByteBufferPoolContainer struct { - bytes []*[]byte - *byteBufferPool -} - -// NewByteBufferPoolContainer construct byte buffer pool container -func NewByteBufferPoolContainer() *ByteBufferPoolContainer { - return &ByteBufferPoolContainer{ - byteBufferPool: bbPool, - } -} - -// Reset clean byte buffer pool container resource -func (c *ByteBufferPoolContainer) Reset() { - for _, buf := range c.bytes { - c.give(buf) - } - c.bytes = c.bytes[:0] -} - -// Take append *[]byte with fixed size from byteBufferPool -func (c *ByteBufferPoolContainer) Take(size int) *[]byte { - buf := c.take(size) - c.bytes = append(c.bytes, buf) - return buf -} - -// GetBytes returns *[]byte from byteBufferPool -func GetBytes(size int) *[]byte { - return bbPool.take(size) -} - -// PutBytes Put *[]byte to byteBufferPool -func PutBytes(buf *[]byte) { - bbPool.give(buf) -} diff --git a/pkg/util/iobufferpool/constants.go b/pkg/util/iobufferpool/constants.go new file mode 100644 index 0000000000..32048fe153 --- /dev/null +++ b/pkg/util/iobufferpool/constants.go @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import "errors" + +const ( + // UDPPacketMaxSize max size of udp packet + UDPPacketMaxSize = 65535 + // DefaultBufferReadCapacity default buffer capacity for stream proxy such as tcp + DefaultBufferReadCapacity = 1 << 7 +) + +var ( + // ErrEOF io buffer eof sign + ErrEOF = errors.New("EOF") +) diff --git a/pkg/util/iobufferpool/iobuffer.go b/pkg/util/iobufferpool/iobuffer.go deleted file mode 100644 index 5b3504d73d..0000000000 --- a/pkg/util/iobufferpool/iobuffer.go +++ /dev/null @@ -1,594 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iobufferpool - -import ( - "encoding/binary" - "errors" - "io" - "sync" - "sync/atomic" -) - -const ( - // AutoExpand auto expand io buffer - AutoExpand = -1 - MinRead = 1 << 9 - MaxRead = 1 << 17 - ResetOffMark = -1 - DefaultSize = 1 << 4 - MaxBufferLength = 1 << 20 - MaxThreshold = 1 << 22 -) - -var nullByte []byte - -var ( - // ErrEOF io buffer eof sign - ErrEOF = errors.New("EOF") - // ErrInvalidWriteCount io buffer: invalid write count - ErrInvalidWriteCount = errors.New("io buffer: invalid write count") -) - -type pipe struct { - IoBuffer - mu sync.Mutex - c sync.Cond - - err error -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - if p.IoBuffer == nil { - return 0 - } - return p.IoBuffer.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.IoBuffer != nil && p.IoBuffer.Len() > 0 { - return p.IoBuffer.Read(d) - } - if p.err != nil { - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - return len(d), p.IoBuffer.Append(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { - if err == nil { - err = io.EOF - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - p.err = err - defer p.c.Signal() -} - -// NewPipeBuffer create pipe buffer with fixed capacity -func NewPipeBuffer(capacity int) IoBuffer { - return &pipe{ - IoBuffer: newIoBuffer(capacity), - } -} - -// ioBuffer is an implementation of IoBuffer -type ioBuffer struct { - buf []byte // contents: buf[off : len(buf)] - off int // read from &buf[off], write to &buf[len(buf)] - offMark int - count int32 - eof bool - - b *[]byte -} - -func newIoBuffer(capacity int) IoBuffer { - buffer := &ioBuffer{ - offMark: ResetOffMark, - count: 1, - } - if capacity <= 0 { - capacity = DefaultSize - } - buffer.b = GetBytes(capacity) - buffer.buf = (*buffer.b)[:0] - return buffer -} - -// NewIoBufferString new io buffer with string -func NewIoBufferString(s string) IoBuffer { - if s == "" { - return newIoBuffer(0) - } - return &ioBuffer{ - buf: []byte(s), - offMark: ResetOffMark, - count: 1, - } -} - -// NewIoBufferBytes new io buffer with bytes array -func NewIoBufferBytes(bytes []byte) IoBuffer { - if bytes == nil { - return NewIoBuffer(0) - } - return &ioBuffer{ - buf: bytes, - offMark: ResetOffMark, - count: 1, - } -} - -// NewIoBufferEOF new io buffer with eof sign -func NewIoBufferEOF() IoBuffer { - buf := newIoBuffer(0) - buf.SetEOF(true) - return buf -} - -func (b *ioBuffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - b.Reset() - - if len(p) == 0 { - return - } - - return 0, io.EOF - } - - n = copy(p, b.buf[b.off:]) - b.off += n - - return -} - -func (b *ioBuffer) Grow(n int) error { - _, ok := b.tryGrowByReslice(n) - - if !ok { - b.grow(n) - } - - return nil -} - -func (b *ioBuffer) ReadOnce(r io.Reader) (n int64, err error) { - var m int - - if b.off > 0 && b.off >= len(b.buf) { - b.Reset() - } - - if b.off >= (cap(b.buf) - len(b.buf)) { - b.copy(0) - } - - // free max buffers avoid memory leak - if b.off == len(b.buf) && cap(b.buf) > MaxBufferLength { - b.Free() - b.Alloc(MaxRead) - } - - l := cap(b.buf) - len(b.buf) - - m, err = r.Read(b.buf[len(b.buf):cap(b.buf)]) - - b.buf = b.buf[0 : len(b.buf)+m] - n = int64(m) - - // Not enough space anywhere, we need to allocate. - if l == m { - b.copy(AutoExpand) - } - - return n, err -} - -func (b *ioBuffer) ReadFrom(r io.Reader) (n int64, err error) { - if b.off >= len(b.buf) { - b.Reset() - } - - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - b.copy(MinRead) - } else { - b.copy(0) - } - } - - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - - if e == io.EOF { - break - } - - if m == 0 { - break - } - - if e != nil { - return n, e - } - } - - return -} - -func (b *ioBuffer) Write(p []byte) (n int, err error) { - m, ok := b.tryGrowByReslice(len(p)) - - if !ok { - m = b.grow(len(p)) - } - - return copy(b.buf[m:], p), nil -} - -func (b *ioBuffer) WriteString(s string) (n int, err error) { - m, ok := b.tryGrowByReslice(len(s)) - - if !ok { - m = b.grow(len(s)) - } - - return copy(b.buf[m:], s), nil -} - -func (b *ioBuffer) tryGrowByReslice(n int) (int, bool) { - if l := len(b.buf); l+n <= cap(b.buf) { - b.buf = b.buf[:l+n] - - return l, true - } - - return 0, false -} - -func (b *ioBuffer) grow(n int) int { - m := b.Len() - - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Reset() - } - - // Try to grow by means of a reslice. - if i, ok := b.tryGrowByReslice(n); ok { - return i - } - - if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - b.copy(0) - } else { - // Not enough space anywhere, we need to allocate. - b.copy(n) - } - - // Restore b.off and len(b.buf). - b.off = 0 - b.buf = b.buf[:m+n] - - return m -} - -func (b *ioBuffer) WriteTo(w io.Writer) (n int64, err error) { - for b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - - if m > nBytes { - panic(ErrInvalidWriteCount) - } - - b.off += m - n += int64(m) - - if e != nil { - return n, e - } - - if m == 0 || m == nBytes { - return n, nil - } - } - - return -} - -func (b *ioBuffer) WriteByte(p byte) error { - m, ok := b.tryGrowByReslice(1) - - if !ok { - m = b.grow(1) - } - - b.buf[m] = p - return nil -} - -func (b *ioBuffer) WriteUint16(p uint16) error { - m, ok := b.tryGrowByReslice(2) - - if !ok { - m = b.grow(2) - } - - binary.BigEndian.PutUint16(b.buf[m:], p) - return nil -} - -func (b *ioBuffer) WriteUint32(p uint32) error { - m, ok := b.tryGrowByReslice(4) - - if !ok { - m = b.grow(4) - } - - binary.BigEndian.PutUint32(b.buf[m:], p) - return nil -} - -func (b *ioBuffer) WriteUint64(p uint64) error { - m, ok := b.tryGrowByReslice(8) - - if !ok { - m = b.grow(8) - } - - binary.BigEndian.PutUint64(b.buf[m:], p) - return nil -} - -func (b *ioBuffer) Append(data []byte) error { - if b.off >= len(b.buf) { - b.Reset() - } - - dataLen := len(data) - - if free := cap(b.buf) - len(b.buf); free < dataLen { - // not enough space at end - if b.off+free < dataLen { - // not enough space using beginning of buffer; - // double buffer capacity - b.copy(dataLen) - } else { - b.copy(0) - } - } - - m := copy(b.buf[len(b.buf):len(b.buf)+dataLen], data) - b.buf = b.buf[0 : len(b.buf)+m] - - return nil -} - -func (b *ioBuffer) AppendByte(data byte) error { - return b.Append([]byte{data}) -} - -func (b *ioBuffer) Peek(n int) []byte { - if len(b.buf)-b.off < n { - return nil - } - - return b.buf[b.off : b.off+n] -} - -func (b *ioBuffer) Mark() { - b.offMark = b.off -} - -func (b *ioBuffer) Restore() { - if b.offMark != ResetOffMark { - b.off = b.offMark - b.offMark = ResetOffMark - } -} - -func (b *ioBuffer) Bytes() []byte { - return b.buf[b.off:] -} - -func (b *ioBuffer) Cut(offset int) IoBuffer { - if b.off+offset > len(b.buf) { - return nil - } - - buf := make([]byte, offset) - - copy(buf, b.buf[b.off:b.off+offset]) - b.off += offset - b.offMark = ResetOffMark - - return &ioBuffer{ - buf: buf, - off: 0, - } -} - -func (b *ioBuffer) Drain(offset int) { - if b.off+offset > len(b.buf) { - return - } - - b.off += offset - b.offMark = ResetOffMark -} - -func (b *ioBuffer) String() string { - return string(b.buf[b.off:]) -} - -func (b *ioBuffer) Len() int { - return len(b.buf) - b.off -} - -func (b *ioBuffer) Cap() int { - return cap(b.buf) -} - -func (b *ioBuffer) Reset() { - b.buf = b.buf[:0] - b.off = 0 - b.offMark = ResetOffMark - b.eof = false -} - -func (b *ioBuffer) available() int { - return len(b.buf) - b.off -} - -func (b *ioBuffer) Clone() IoBuffer { - buf := GetIoBuffer(b.Len()) - buf.Write(b.Bytes()) - - buf.SetEOF(b.EOF()) - - return buf -} - -func (b *ioBuffer) Free() { - b.Reset() - b.giveSlice() -} - -func (b *ioBuffer) Alloc(size int) { - if b.buf != nil { - b.Free() - } - if size <= 0 { - size = DefaultSize - } - b.b = b.makeSlice(size) - b.buf = *b.b - b.buf = b.buf[:0] -} - -func (b *ioBuffer) Count(count int32) int32 { - return atomic.AddInt32(&b.count, count) -} - -func (b *ioBuffer) EOF() bool { - return b.eof -} - -func (b *ioBuffer) SetEOF(eof bool) { - b.eof = eof -} - -//The expand parameter means the following: -//A, if expand > 0, cap(newbuf) is calculated according to cap(oldbuf) and expand. -//B, if expand == AutoExpand, cap(newbuf) is calculated only according to cap(oldbuf). -//C, if expand == 0, only copy, buf not be expanded. -func (b *ioBuffer) copy(expand int) { - var newBuf []byte - var bufp *[]byte - - if expand > 0 || expand == AutoExpand { - cap := cap(b.buf) - // when buf cap greater than MaxThreshold, start Slow Grow. - if cap < 2*MinRead { - cap = 2 * MinRead - } else if cap < MaxThreshold { - cap = 2 * cap - } else { - cap = cap + cap/4 - } - - if expand == AutoExpand { - expand = 0 - } - - bufp = b.makeSlice(cap + expand) - newBuf = *bufp - copy(newBuf, b.buf[b.off:]) - PutBytes(b.b) - b.b = bufp - } else { - newBuf = b.buf - copy(newBuf, b.buf[b.off:]) - } - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 -} - -func (b *ioBuffer) makeSlice(n int) *[]byte { - return GetBytes(n) -} - -func (b *ioBuffer) giveSlice() { - if b.b != nil { - PutBytes(b.b) - b.b = nil - b.buf = nullByte - } -} - -func (b *ioBuffer) CloseWithError(err error) { -} diff --git a/pkg/util/iobufferpool/iobuffer_pool.go b/pkg/util/iobufferpool/iobuffer_pool.go deleted file mode 100644 index 9d9cb7a1b7..0000000000 --- a/pkg/util/iobufferpool/iobuffer_pool.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iobufferpool - -import ( - "errors" - "sync" -) - -const ( - // UDPPacketMaxSize max size of udp packet - UDPPacketMaxSize = 64 * 1024 - // DefaultBufferReadCapacity default buffer capacity for stream proxy such as tcp - DefaultBufferReadCapacity = 1 << 7 -) - -var ibPool IoBufferPool - -// IoBufferPool is IoBuffer Pool -type IoBufferPool struct { - pool sync.Pool -} - -// take returns IoBuffer from IoBufferPool -func (p *IoBufferPool) take(size int) (buf IoBuffer) { - v := p.pool.Get() - if v == nil { - buf = newIoBuffer(size) - } else { - buf = v.(IoBuffer) - buf.Alloc(size) - buf.Count(1) - } - return -} - -// give returns IoBuffer to IoBufferPool -func (p *IoBufferPool) give(buf IoBuffer) { - buf.Free() - p.pool.Put(buf) -} - -// GetIoBuffer returns IoBuffer from pool -func GetIoBuffer(size int) IoBuffer { - return ibPool.take(size) -} - -// NewIoBuffer is an alias for GetIoBuffer -func NewIoBuffer(size int) IoBuffer { - return GetIoBuffer(size) -} - -// PutIoBuffer returns IoBuffer to pool -func PutIoBuffer(buf IoBuffer) error { - count := buf.Count(-1) - if count > 0 { - return nil - } else if count < 0 { - return errors.New("PutIoBuffer duplicate") - } - if p, _ := buf.(*pipe); p != nil { - buf = p.IoBuffer - } - ibPool.give(buf) - return nil -} diff --git a/pkg/util/iobufferpool/packet_pool.go b/pkg/util/iobufferpool/packet_pool.go new file mode 100644 index 0000000000..f617f717bd --- /dev/null +++ b/pkg/util/iobufferpool/packet_pool.go @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import ( + "sync" +) + +// UDPBufferPool udp buffer pool +var UDPBufferPool = sync.Pool{ + New: func() interface{} { + return make([]byte, UDPPacketMaxSize) + }, +} + +// Packet udp connection msg +type Packet struct { + Payload []byte + Len int +} + +// Bytes return underlying bytes for io buffer +func (p *Packet) Bytes() []byte { + if p.Payload == nil { + return nil + } + + return p.Payload[0:p.Len] +} + +// Release return io buffer resource to pool +func (p *Packet) Release() { + if p.Payload == nil { + return + } + UDPBufferPool.Put(p.Payload) +} diff --git a/pkg/util/iobufferpool/stream_buffer_pool.go b/pkg/util/iobufferpool/stream_buffer_pool.go new file mode 100644 index 0000000000..3b4808077b --- /dev/null +++ b/pkg/util/iobufferpool/stream_buffer_pool.go @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package iobufferpool + +import ( + "sync" + + "github.com/valyala/bytebufferpool" +) + +// TCPBufferPool tcp buffer pool for tcp connection +var TCPBufferPool = sync.Pool{ + New: func() interface{} { + return make([]byte, DefaultBufferReadCapacity) + }, +} + +// StreamBuffer io buffer for stream scene +type StreamBuffer struct { + payload *bytebufferpool.ByteBuffer + eof bool +} + +func NewStreamBuffer(buf []byte) *StreamBuffer { + res := &StreamBuffer{ + payload: bytebufferpool.Get(), + eof: false, + } + _, _ = res.payload.Write(buf) + return res +} + +// NewEOFStreamBuffer create stream buffer with eof sign +func NewEOFStreamBuffer() *StreamBuffer { + return &StreamBuffer{ + payload: bytebufferpool.Get(), + eof: true, + } +} + +// Bytes return underlying bytes +func (s *StreamBuffer) Bytes() []byte { + return s.payload.B +} + +// Len get buffer len +func (s *StreamBuffer) Len() int { + return len(s.payload.B) +} + +// Write implements io.Writer +func (s *StreamBuffer) Write(p []byte) (int, error) { + s.payload.B = append(s.payload.B, p...) + return len(p), nil +} + +// Release put buffer resource to pool +func (s *StreamBuffer) Release() { + if s.payload == nil { + return + } + bytebufferpool.Put(s.payload) + s.payload = nil +} + +// EOF return eof sign +func (s *StreamBuffer) EOF() bool { + return s.eof +} + +// SetEOF set eof sign +func (s *StreamBuffer) SetEOF(eof bool) { + s.eof = eof +} From 6124cff97fc6efed7894e0002a6a39e066fd7ada Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 26 Oct 2021 22:32:26 +0800 Subject: [PATCH 53/99] [layer4proxy] fix go.mod import --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 0566c95d04..e3398c582e 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/tcnksm/go-httpstat v0.2.1-0.20191008022543-e866bb274419 github.com/tidwall/gjson v1.8.0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce - github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/fasttemplate v1.2.1 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonschema v1.2.1-0.20201027075954-b076d39a02e5 From baf7e54392be4b7b1d6d8e0d4bdb635da2f64ade Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 27 Oct 2021 10:26:16 +0800 Subject: [PATCH 54/99] [layer4proxy] extract backend servers pool related code to util/layer4backend --- pkg/object/tcpproxy/connection.go | 11 +- pkg/object/tcpproxy/runtime.go | 11 +- pkg/object/tcpproxy/spec.go | 42 +-- pkg/object/udpproxy/backendserver.go | 279 ------------------ pkg/object/udpproxy/pool.go | 77 ----- pkg/object/udpproxy/runtime.go | 13 +- pkg/object/udpproxy/spec.go | 42 +-- .../layer4backend}/backendserver.go | 2 +- .../tcpproxy => util/layer4backend}/pool.go | 21 +- pkg/util/layer4backend/spec.go | 55 ++++ 10 files changed, 90 insertions(+), 463 deletions(-) delete mode 100644 pkg/object/udpproxy/backendserver.go delete mode 100644 pkg/object/udpproxy/pool.go rename pkg/{object/tcpproxy => util/layer4backend}/backendserver.go (99%) rename pkg/{object/tcpproxy => util/layer4backend}/pool.go (73%) create mode 100644 pkg/util/layer4backend/spec.go diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 0e1b7f5048..d39d66a609 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -125,21 +125,14 @@ func (c *Connection) State() ConnState { return ConnInit } -// GoWithRecover wraps a `go func()` with recover() func (c *Connection) goWithRecover(handler func(), recoverHandler func(r interface{})) { go func() { defer func() { if r := recover(); r != nil { logger.Errorf("tcp connection goroutine panic: %v\n%s\n", r, string(debug.Stack())) if recoverHandler != nil { - go func() { - defer func() { - if p := recover(); p != nil { - logger.Errorf("tcp connection goroutine panic: %v\n%s\n", p, string(debug.Stack())) - } - }() - recoverHandler(r) - }() + // it is not needed to wrap recoverHandler with go func in the current scenario + recoverHandler(r) } } }() diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 71a3730bc2..244b591777 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -19,6 +19,7 @@ package tcpproxy import ( "fmt" + "github.com/megaease/easegress/pkg/util/layer4backend" "net" "reflect" "sync/atomic" @@ -61,7 +62,7 @@ type ( superSpec *supervisor.Spec spec *Spec - pool *pool // backend servers pool + pool *layer4backend.Pool // backend servers pool ipFilters *ipfilter.Layer4IpFilters // ip filters listener *listener // tcp listener @@ -78,7 +79,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { r := &runtime{ superSpec: superSpec, - pool: newPool(superSpec.Super(), spec.Pool, ""), + pool: layer4backend.NewPool(superSpec.Super(), spec.Pool, ""), ipFilters: ipfilter.NewLayer4IPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), @@ -125,7 +126,7 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec) { r.superSpec = nextSuperSpec nextSpec := nextSuperSpec.ObjectSpec().(*Spec) r.ipFilters.ReloadRules(nextSpec.IPFilter) - r.pool.reloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") + r.pool.ReloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") // r.listener does not create just after the process started and the config load for the first time. if nextSpec != nil && r.listener != nil { @@ -262,7 +263,7 @@ func (r *runtime) handleEventReload(e *eventReload) { func (r *runtime) handleEventClose(e *eventClose) { r.closeServer() - r.pool.close() + r.pool.Close() close(e.done) } @@ -277,7 +278,7 @@ func (r *runtime) onAccept() func(conn net.Conn, listenerStop chan struct{}) { return } - server, err := r.pool.next(downstream) + server, err := r.pool.Next(downstream) if err != nil { _ = rawConn.Close() logger.Errorf("close tcp connection due to no available upstream server, local addr: %s, err: %+v", diff --git a/pkg/object/tcpproxy/spec.go b/pkg/object/tcpproxy/spec.go index 03f15e3afe..13446ac854 100644 --- a/pkg/object/tcpproxy/spec.go +++ b/pkg/object/tcpproxy/spec.go @@ -18,9 +18,8 @@ package tcpproxy import ( - "fmt" - "github.com/megaease/easegress/pkg/util/ipfilter" + "github.com/megaease/easegress/pkg/util/layer4backend" ) type ( @@ -33,17 +32,8 @@ type ( MaxConnections uint32 `yaml:"maxConns" jsonschema:"omitempty,minimum=1"` ConnectTimeout uint32 `yaml:"connectTimeout" jsonschema:"omitempty"` - Pool *PoolSpec `yaml:"pool" jsonschema:"required"` - IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` - } - - // PoolSpec describes a pool of servers. - PoolSpec struct { - ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` - Servers []*Server `yaml:"servers" jsonschema:"omitempty"` - ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` - LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` + Pool *layer4backend.PoolSpec `yaml:"pool" jsonschema:"required"` + IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` } ) @@ -55,29 +45,3 @@ func (spec *Spec) Validate() error { return nil } - -// Validate validates poolSpec. -func (s PoolSpec) Validate() error { - if s.ServiceName == "" && len(s.Servers) == 0 { - return fmt.Errorf("both serviceName and servers are empty") - } - - serversGotWeight := 0 - for _, server := range s.Servers { - if server.Weight > 0 { - serversGotWeight++ - } - } - if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { - return fmt.Errorf("not all servers have weight(%d/%d)", - serversGotWeight, len(s.Servers)) - } - - if s.ServiceName == "" { - servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) - if servers.len() == 0 { - return fmt.Errorf("serversTags picks none of servers") - } - } - return nil -} diff --git a/pkg/object/udpproxy/backendserver.go b/pkg/object/udpproxy/backendserver.go deleted file mode 100644 index d6b99aefc2..0000000000 --- a/pkg/object/udpproxy/backendserver.go +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package udpproxy - -import ( - "fmt" - "math/rand" - "sync" - "sync/atomic" - - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/object/serviceregistry" - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/hashtool" - "github.com/megaease/easegress/pkg/util/stringtool" -) - -const ( - // PolicyRoundRobin is the policy of round-robin. - PolicyRoundRobin = "roundRobin" - // PolicyRandom is the policy of random. - PolicyRandom = "random" - // PolicyWeightedRandom is the policy of weighted random. - PolicyWeightedRandom = "weightedRandom" - // PolicyIPHash is the policy of ip hash. - PolicyIPHash = "ipHash" -) - -type ( - servers struct { - poolSpec *PoolSpec - super *supervisor.Supervisor - - mutex sync.Mutex - serviceRegistry *serviceregistry.ServiceRegistry - serviceWatcher serviceregistry.ServiceWatcher - static *staticServers - - done chan struct{} - } - - staticServers struct { - count uint64 - weightsSum int - servers []*Server - lb LoadBalance - } - - // Server is proxy server. - Server struct { - Addr string `yaml:"url" jsonschema:"required,format=hostport"` - Tags []string `yaml:"tags" jsonschema:"omitempty,uniqueItems=true"` - Weight int `yaml:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` - } - - // LoadBalance is load balance for multiple servers. - LoadBalance struct { - Policy string `yaml:"policy" jsonschema:"required,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash"` - } -) - -func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { - s := &servers{ - poolSpec: poolSpec, - super: super, - done: make(chan struct{}), - } - - s.useStaticServers() - if poolSpec.ServiceRegistry == "" || poolSpec.ServiceName == "" { - return s - } - - s.serviceRegistry = s.super.MustGetSystemController(serviceregistry.Kind). - Instance().(*serviceregistry.ServiceRegistry) - s.tryUseService() - s.serviceWatcher = s.serviceRegistry.NewServiceWatcher(s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) - - go s.watchService() - return s -} - -func (s *Server) String() string { - return fmt.Sprintf("%s,%v,%d", s.Addr, s.Tags, s.Weight) -} - -func (s *servers) watchService() { - for { - select { - case <-s.done: - return - case event := <-s.serviceWatcher.Watch(): - s.handleEvent(event) - } - } -} - -func (s *servers) handleEvent(event *serviceregistry.ServiceEvent) { - s.useService(event.Instances) -} - -func (s *servers) tryUseService() { - serviceInstanceSpecs, err := s.serviceRegistry.ListServiceInstances(s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) - - if err != nil { - logger.Errorf("get service %s/%s failed: %v", - s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName, err) - s.useStaticServers() - return - } - s.useService(serviceInstanceSpecs) -} - -func (s *servers) useService(serviceInstanceSpecs map[string]*serviceregistry.ServiceInstanceSpec) { - var servers []*Server - for _, instance := range serviceInstanceSpecs { - servers = append(servers, &Server{ - Addr: fmt.Sprintf("%s:%d", instance.Address, instance.Port), - Tags: instance.Tags, - Weight: instance.Weight, - }) - } - if len(servers) == 0 { - logger.Errorf("%s/%s: empty service instance", - s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) - s.useStaticServers() - return - } - - dynamicServers := newStaticServers(servers, s.poolSpec.ServersTags, s.poolSpec.LoadBalance) - if dynamicServers.len() == 0 { - logger.Errorf("%s/%s: no service instance satisfy tags: %v", - s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName, s.poolSpec.ServersTags) - s.useStaticServers() - } - - logger.Infof("use dynamic service: %s/%s", s.poolSpec.ServiceRegistry, s.poolSpec.ServiceName) - - s.mutex.Lock() - defer s.mutex.Unlock() - s.static = dynamicServers -} - -func (s *servers) useStaticServers() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.static = newStaticServers(s.poolSpec.Servers, s.poolSpec.ServersTags, s.poolSpec.LoadBalance) -} - -func (s *servers) snapshot() *staticServers { - s.mutex.Lock() - defer s.mutex.Unlock() - - return s.static -} - -func (s *servers) len() int { - static := s.snapshot() - return static.len() -} - -func (s *servers) next(cliAddr string) (*Server, error) { - static := s.snapshot() - if static.len() == 0 { - return nil, fmt.Errorf("no server available") - } - return static.next(cliAddr), nil -} - -func (s *servers) close() { - close(s.done) - - if s.serviceWatcher != nil { - s.serviceWatcher.Stop() - } -} - -func newStaticServers(servers []*Server, tags []string, lb *LoadBalance) *staticServers { - if servers == nil { - servers = make([]*Server, 0) - } - - ss := &staticServers{} - if lb == nil { - ss.lb.Policy = PolicyRoundRobin - } else { - ss.lb = *lb - } - - defer ss.prepare() - - if len(tags) == 0 { - ss.servers = servers - return ss - } - - chosenServers := make([]*Server, 0) - for _, server := range servers { - for _, tag := range tags { - if stringtool.StrInSlice(tag, server.Tags) { - chosenServers = append(chosenServers, server) - break - } - } - } - ss.servers = chosenServers - return ss -} - -func (ss *staticServers) prepare() { - for _, server := range ss.servers { - ss.weightsSum += server.Weight - } -} - -func (ss *staticServers) len() int { - return len(ss.servers) -} - -func (ss *staticServers) next(cliAddr string) *Server { - switch ss.lb.Policy { - case PolicyRoundRobin: - return ss.roundRobin() - case PolicyRandom: - return ss.random() - case PolicyWeightedRandom: - return ss.weightedRandom() - case PolicyIPHash: - return ss.ipHash(cliAddr) - } - logger.Errorf("BUG: unknown load balance policy: %s", ss.lb.Policy) - return ss.roundRobin() -} - -func (ss *staticServers) roundRobin() *Server { - count := atomic.AddUint64(&ss.count, 1) - // NOTE: startEventLoop from 0. - count-- - return ss.servers[int(count)%len(ss.servers)] -} - -func (ss *staticServers) random() *Server { - return ss.servers[rand.Intn(len(ss.servers))] -} - -func (ss *staticServers) weightedRandom() *Server { - randomWeight := rand.Intn(ss.weightsSum) - for _, server := range ss.servers { - randomWeight -= server.Weight - if randomWeight < 0 { - return server - } - } - - logger.Errorf("BUG: weighted random can't pick a server: sum(%d) servers(%+v)", - ss.weightsSum, ss.servers) - - return ss.random() -} - -func (ss *staticServers) ipHash(cliAddr string) *Server { - sum32 := int(hashtool.Hash32(cliAddr)) - return ss.servers[sum32%len(ss.servers)] -} diff --git a/pkg/object/udpproxy/pool.go b/pkg/object/udpproxy/pool.go deleted file mode 100644 index a0df4787c5..0000000000 --- a/pkg/object/udpproxy/pool.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package udpproxy - -import ( - "reflect" - "sync/atomic" - - "github.com/megaease/easegress/pkg/supervisor" -) - -type ( - pool struct { - rules atomic.Value - } - - // pool backend server pool - poolRules struct { - spec *PoolSpec - - tagPrefix string - servers *servers - } -) - -func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { - p := &pool{} - - p.rules.Store(&poolRules{ - spec: spec, - - tagPrefix: tagPrefix, - servers: newServers(super, spec), - }) - return p -} - -func (p *pool) next(cliAddr string) (*Server, error) { - rules := p.rules.Load().(*poolRules) - return rules.servers.next(cliAddr) -} - -func (p *pool) close() { - if old := p.rules.Load(); old != nil { - oldPool := old.(*poolRules) - oldPool.servers.close() - } -} - -func (p *pool) reloadRules(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) { - old := p.rules.Load().(*poolRules) - if reflect.DeepEqual(old.spec, spec) { - return - } - p.close() - p.rules.Store(&poolRules{ - spec: spec, - - tagPrefix: tagPrefix, - servers: newServers(super, spec), - }) -} diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 290f9ab12c..306382eba2 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -29,6 +29,7 @@ import ( "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/iobufferpool" "github.com/megaease/easegress/pkg/util/ipfilter" + "github.com/megaease/easegress/pkg/util/layer4backend" ) const ( @@ -59,8 +60,8 @@ type ( spec *Spec startNum uint64 - pool *pool // backend servers pool - serverConn *net.UDPConn // listener + pool *layer4backend.Pool // backend servers pool + serverConn *net.UDPConn // listener sessions map[string]*session state atomic.Value // runtime running state @@ -76,7 +77,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { r := &runtime{ superSpec: superSpec, - pool: newPool(superSpec.Super(), spec.Pool, ""), + pool: layer4backend.NewPool(superSpec.Super(), spec.Pool, ""), ipFilters: ipfilter.NewLayer4IPFilters(spec.IPFilter), eventChan: make(chan interface{}, 10), @@ -166,7 +167,7 @@ func (r *runtime) handleEventReload(e *eventReload) { func (r *runtime) handleEventClose(e *eventClose) { r.closeServer() - r.pool.close() + r.pool.Close() close(e.done) } @@ -175,7 +176,7 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec) { nextSpec := nextSuperSpec.ObjectSpec().(*Spec) r.ipFilters.ReloadRules(nextSpec.IPFilter) - r.pool.reloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") + r.pool.ReloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") // NOTE: Due to the mechanism of supervisor, // nextSpec must not be nil, just defensive programming here. @@ -265,7 +266,7 @@ func (r *runtime) startServer() { } func (r *runtime) getUpstreamConn(pool *connPool, downstreamAddr *net.UDPAddr) (net.Conn, string, error) { - server, err := r.pool.next(downstreamAddr.IP.String()) + server, err := r.pool.Next(downstreamAddr.IP.String()) if err != nil { return nil, "", fmt.Errorf("can not get upstream addr for udp connection(:%d)", r.spec.Port) } diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index 6be426cb35..ac5d3cfa9f 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -18,9 +18,8 @@ package udpproxy import ( - "fmt" - "github.com/megaease/easegress/pkg/util/ipfilter" + "github.com/megaease/easegress/pkg/util/layer4backend" ) type ( @@ -34,17 +33,8 @@ type ( DownstreamIdleTimeout uint32 `yaml:"downstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` UpstreamIdleTimeout uint32 `yaml:"upstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` - Pool *PoolSpec `yaml:"pool" jsonschema:"required"` - IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` - } - - // PoolSpec describes a pool of servers. - PoolSpec struct { - ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` - Servers []*Server `yaml:"servers" jsonschema:"omitempty"` - ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` - LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` + Pool *layer4backend.PoolSpec `yaml:"pool" jsonschema:"required"` + IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` } ) @@ -56,29 +46,3 @@ func (spec *Spec) Validate() error { return nil } - -// Validate validates poolSpec. -func (s PoolSpec) Validate() error { - if s.ServiceName == "" && len(s.Servers) == 0 { - return fmt.Errorf("both serviceName and servers are empty") - } - - serversGotWeight := 0 - for _, server := range s.Servers { - if server.Weight > 0 { - serversGotWeight++ - } - } - if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { - return fmt.Errorf("not all servers have weight(%d/%d)", - serversGotWeight, len(s.Servers)) - } - - if s.ServiceName == "" { - servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) - if servers.len() == 0 { - return fmt.Errorf("serversTags picks none of servers") - } - } - return nil -} diff --git a/pkg/object/tcpproxy/backendserver.go b/pkg/util/layer4backend/backendserver.go similarity index 99% rename from pkg/object/tcpproxy/backendserver.go rename to pkg/util/layer4backend/backendserver.go index c07d0250a0..db80db8f96 100644 --- a/pkg/object/tcpproxy/backendserver.go +++ b/pkg/util/layer4backend/backendserver.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package tcpproxy +package layer4backend import ( "fmt" diff --git a/pkg/object/tcpproxy/pool.go b/pkg/util/layer4backend/pool.go similarity index 73% rename from pkg/object/tcpproxy/pool.go rename to pkg/util/layer4backend/pool.go index adc6ec3354..c9aea988a1 100644 --- a/pkg/object/tcpproxy/pool.go +++ b/pkg/util/layer4backend/pool.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package tcpproxy +package layer4backend import ( "reflect" @@ -25,7 +25,8 @@ import ( ) type ( - pool struct { + // Pool backend servers pool + Pool struct { rules atomic.Value } @@ -38,8 +39,9 @@ type ( } ) -func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *pool { - p := &pool{} +// NewPool create backend server pool +func NewPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *Pool { + p := &Pool{} p.rules.Store(&poolRules{ spec: spec, @@ -50,24 +52,27 @@ func newPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *po return p } -func (p *pool) next(cliAddr string) (*Server, error) { +// Next choose one backend for proxy +func (p *Pool) Next(cliAddr string) (*Server, error) { rules := p.rules.Load().(*poolRules) return rules.servers.next(cliAddr) } -func (p *pool) close() { +// Close shutdown backend servers watcher +func (p *Pool) Close() { if old := p.rules.Load(); old != nil { oldPool := old.(*poolRules) oldPool.servers.close() } } -func (p *pool) reloadRules(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) { +// ReloadRules reload backend servers pool rule +func (p *Pool) ReloadRules(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) { old := p.rules.Load().(*poolRules) if reflect.DeepEqual(old.spec, spec) { return } - p.close() + p.Close() p.rules.Store(&poolRules{ spec: spec, diff --git a/pkg/util/layer4backend/spec.go b/pkg/util/layer4backend/spec.go new file mode 100644 index 0000000000..73e28d3083 --- /dev/null +++ b/pkg/util/layer4backend/spec.go @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package layer4backend + +import "fmt" + +// PoolSpec describes a pool of servers. +type PoolSpec struct { + ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` + ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` + Servers []*Server `yaml:"servers" jsonschema:"omitempty"` + ServersTags []string `yaml:"serversTags" jsonschema:"omitempty,uniqueItems=true"` + LoadBalance *LoadBalance `yaml:"loadBalance" jsonschema:"required"` +} + +// Validate validates poolSpec. +func (s *PoolSpec) Validate() error { + if s.ServiceName == "" && len(s.Servers) == 0 { + return fmt.Errorf("both serviceName and servers are empty") + } + + serversGotWeight := 0 + for _, server := range s.Servers { + if server.Weight > 0 { + serversGotWeight++ + } + } + if serversGotWeight > 0 && serversGotWeight < len(s.Servers) { + return fmt.Errorf("not all servers have weight(%d/%d)", + serversGotWeight, len(s.Servers)) + } + + if s.ServiceName == "" { + servers := newStaticServers(s.Servers, s.ServersTags, s.LoadBalance) + if servers.len() == 0 { + return fmt.Errorf("serversTags picks none of servers") + } + } + return nil +} From 99ad2f57c7ed4edae5233d95fb74ed2bd5dabc92 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 27 Oct 2021 14:58:06 +0800 Subject: [PATCH 55/99] [layer4proxy] simplify udp server lifecycle --- pkg/object/udpproxy/runtime.go | 192 +++++-------------------------- pkg/object/udpproxy/udpserver.go | 9 +- 2 files changed, 32 insertions(+), 169 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 306382eba2..1249cadb6d 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -20,9 +20,7 @@ package udpproxy import ( "fmt" "net" - "reflect" "sync" - "sync/atomic" "time" "github.com/megaease/easegress/pkg/logger" @@ -32,43 +30,19 @@ import ( "github.com/megaease/easegress/pkg/util/layer4backend" ) -const ( - checkFailedTimeout = 10 * time.Second - - stateNil stateType = "nil" - stateFailed stateType = "failed" - stateRunning stateType = "running" - stateClosed stateType = "closed" -) - type ( - stateType string - - eventCheckFailed struct{} - eventServeFailed struct { - startNum uint64 - err error - } - - eventReload struct { - nextSuperSpec *supervisor.Spec - } - eventClose struct{ done chan struct{} } - runtime struct { superSpec *supervisor.Spec spec *Spec - startNum uint64 pool *layer4backend.Pool // backend servers pool serverConn *net.UDPConn // listener sessions map[string]*session - state atomic.Value // runtime running state - eventChan chan interface{} // receive event ipFilters *ipfilter.Layer4IpFilters - mu sync.Mutex + mu sync.Mutex + done chan struct{} } ) @@ -80,143 +54,42 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { pool: layer4backend.NewPool(superSpec.Super(), spec.Pool, ""), ipFilters: ipfilter.NewLayer4IPFilters(spec.IPFilter), - eventChan: make(chan interface{}, 10), - sessions: make(map[string]*session), + sessions: make(map[string]*session), } - r.setState(stateNil) - - go r.fsm() - go r.checkFailed() + r.startServer() return r } -// FSM is the finite-state-machine for the runtime. -func (r *runtime) fsm() { - ticker := time.NewTicker(2 * time.Second) - for { - select { - case <-ticker.C: - r.cleanup() - case e := <-r.eventChan: - switch e := e.(type) { - case *eventCheckFailed: - r.handleEventCheckFailed() - case *eventServeFailed: - r.handleEventServeFailed(e) - case *eventReload: - r.handleEventReload(e) - case *eventClose: - ticker.Stop() - r.handleEventClose(e) - // NOTE: We don't close hs.eventChan, - // in case of panic of any other goroutines - // to send event to it later. - return - default: - logger.Errorf("BUG: unknown event: %T\n", e) - } - } - } -} - -func (r *runtime) setState(state stateType) { - r.state.Store(state) -} - -func (r *runtime) getState() stateType { - return r.state.Load().(stateType) -} - // Close notify runtime close func (r *runtime) Close() { - done := make(chan struct{}) - r.eventChan <- &eventClose{done: done} - <-done -} - -func (r *runtime) checkFailed() { - ticker := time.NewTicker(checkFailedTimeout) - for range ticker.C { - state := r.getState() - if state == stateFailed { - r.eventChan <- &eventCheckFailed{} - } else if state == stateClosed { - ticker.Stop() - return - } - } -} -func (r *runtime) handleEventCheckFailed() { - if r.getState() == stateFailed { - r.startServer() - } -} + close(r.done) + _ = r.serverConn.Close() -func (r *runtime) handleEventServeFailed(e *eventServeFailed) { - if r.startNum > e.startNum { - return + r.mu.Lock() + for k, s := range r.sessions { + delete(r.sessions, k) + s.Close() } - r.setState(stateFailed) -} - -func (r *runtime) handleEventReload(e *eventReload) { - r.reload(e.nextSuperSpec) -} + r.sessions = nil + r.mu.Unlock() -func (r *runtime) handleEventClose(e *eventClose) { - r.closeServer() r.pool.Close() - close(e.done) -} - -func (r *runtime) reload(nextSuperSpec *supervisor.Spec) { - r.superSpec = nextSuperSpec - nextSpec := nextSuperSpec.ObjectSpec().(*Spec) - - r.ipFilters.ReloadRules(nextSpec.IPFilter) - r.pool.ReloadRules(nextSuperSpec.Super(), nextSpec.Pool, "") - - // NOTE: Due to the mechanism of supervisor, - // nextSpec must not be nil, just defensive programming here. - switch { - case r.spec == nil && nextSpec == nil: - logger.Errorf("BUG: nextSpec is nil") - // Nothing to do. - case r.spec == nil && nextSpec != nil: - r.spec = nextSpec - r.startServer() - case r.spec != nil && nextSpec == nil: - logger.Errorf("BUG: nextSpec is nil") - r.spec = nil - r.closeServer() - case r.spec != nil && nextSpec != nil: - if r.needRestartServer(nextSpec) { - r.spec = nextSpec - r.closeServer() - r.startServer() - } else { - r.spec = nextSpec - } - } } func (r *runtime) startServer() { listenAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", r.spec.Port)) if err != nil { - r.setState(stateFailed) logger.Errorf("parse udp listen addr(%s) failed, err: %+v", r.spec.Port, err) return } r.serverConn, err = net.ListenUDP("udp", listenAddr) if err != nil { - r.setState(stateFailed) logger.Errorf("create udp listener(%s) failed, err: %+v", r.spec.Port, err) return } - r.setState(stateRunning) var cp *connPool if r.spec.HasResponse { @@ -232,9 +105,12 @@ func (r *runtime) startServer() { n, downstreamAddr, err := r.serverConn.ReadFromUDP(buf) if err != nil { - if r.getState() != stateRunning { - return + select { + case <-r.done: + return // detect weather udp server is closed + default: } + if ope, ok := err.(*net.OpError); ok { // not timeout error and not temporary, which means the error is non-recoverable if !(ope.Timeout() && ope.Temporary()) { @@ -263,6 +139,19 @@ func (r *runtime) startServer() { r.proxy(downstreamAddr, buf[0:n]) } }() + + go func() { + ticker := time.NewTicker(5 * time.Second) + for { + select { + case <-ticker.C: + r.cleanup() + case <-r.done: + ticker.Stop() + return + } + } + }() } func (r *runtime) getUpstreamConn(pool *connPool, downstreamAddr *net.UDPAddr) (net.Conn, string, error) { @@ -364,24 +253,3 @@ func (r *runtime) cleanup() { } } } - -func (r *runtime) closeServer() { - r.setState(stateClosed) - _ = r.serverConn.Close() - r.mu.Lock() - for k, s := range r.sessions { - delete(r.sessions, k) - s.Close() - } - r.mu.Unlock() -} - -func (r *runtime) needRestartServer(nextSpec *Spec) bool { - x := *r.spec - y := *nextSpec - - x.Pool, y.Pool = nil, nil - x.IPFilter, y.IPFilter = nil, nil - - return !reflect.DeepEqual(x, y) -} diff --git a/pkg/object/udpproxy/udpserver.go b/pkg/object/udpproxy/udpserver.go index 45c6c3a1aa..d567cc558a 100644 --- a/pkg/object/udpproxy/udpserver.go +++ b/pkg/object/udpproxy/udpserver.go @@ -75,20 +75,15 @@ func (u *UDPServer) Close() { // Init initializes UDPServer. func (u *UDPServer) Init(superSpec *supervisor.Spec) { - u.runtime = newRuntime(superSpec) - u.runtime.eventChan <- &eventReload{ - nextSuperSpec: superSpec, - } } // Inherit inherits previous generation of UDPServer. func (u *UDPServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { u.runtime = previousGeneration.(*UDPServer).runtime - u.runtime.eventChan <- &eventReload{ - nextSuperSpec: superSpec, - } + u.runtime.Close() + u.Init(superSpec) } func newConnPool() *connPool { From 6a857085c56909b5abd5757d638014c9947b941c Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 28 Oct 2021 10:58:30 +0800 Subject: [PATCH 56/99] [layer4proxy] fix udp session close data race bug --- pkg/object/udpproxy/runtime.go | 4 +- pkg/object/udpproxy/session.go | 72 +++++++++++++++++++++------------- 2 files changed, 46 insertions(+), 30 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 1249cadb6d..8700dc033a 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -207,7 +207,7 @@ func (r *runtime) getSession(downstreamAddr *net.UDPAddr) (*session, error) { defer r.mu.Unlock() s, ok := r.sessions[key] - if ok && !s.IsClosed() { + if ok && !s.isClosed() { return s, nil } @@ -248,7 +248,7 @@ func (r *runtime) cleanup() { defer r.mu.Unlock() for k, s := range r.sessions { - if s.IsClosed() { + if s.isClosed() { delete(r.sessions, k) } } diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 2f056dfe73..65acbe9f4a 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -25,19 +25,22 @@ import ( "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/timerpool" ) -type session struct { - upstreamAddr string - downstreamAddr *net.UDPAddr - downstreamIdleTimeout time.Duration - upstreamIdleTimeout time.Duration - - upstreamConn net.Conn - writeBuf chan *iobufferpool.Packet - stopChan chan struct{} - stopped uint32 -} +type ( + session struct { + upstreamAddr string + downstreamAddr *net.UDPAddr + downstreamIdleTimeout time.Duration + upstreamIdleTimeout time.Duration + + upstreamConn net.Conn + writeBuf chan *iobufferpool.Packet + stopChan chan struct{} + stopped uint32 + } +) func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn net.Conn, downstreamIdleTimeout, upstreamIdleTimeout time.Duration) *session { @@ -94,14 +97,12 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n s.Close() } case <-s.stopChan: - if !atomic.CompareAndSwapUint32(&s.stopped, 0, 1) { - break - } if t != nil { t.Stop() } _ = s.upstreamConn.Close() s.cleanWriteBuf() + return } } }() @@ -111,16 +112,30 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n // Write send data to buffer channel, wait flush to upstream func (s *session) Write(buf *iobufferpool.Packet) error { - if atomic.LoadUint32(&s.stopped) == 1 { - return fmt.Errorf("udp connection from %s to %s has closed", s.downstreamAddr.String(), s.upstreamAddr) + select { + case s.writeBuf <- buf: + return nil // try to send data with no check + default: } + var t *time.Timer + if s.upstreamIdleTimeout != 0 { + t = timerpool.Get(s.upstreamIdleTimeout * time.Millisecond) + } else { + t = timerpool.Get(60 * time.Second) + } + defer timerpool.Put(t) + select { case s.writeBuf <- buf: - default: - buf.Release() // if failed, may be try again? + return nil + case <-s.stopChan: + buf.Release() + return nil + case <-t.C: + buf.Release() + return fmt.Errorf("write data to channel timeout") } - return nil } // ListenResponse session listen upstream connection response and send to downstream @@ -137,12 +152,14 @@ func (s *session) ListenResponse(sendTo *net.UDPConn) { nRead, err := s.upstreamConn.Read(buf) if err != nil { - if err, ok := err.(net.Error); ok && err.Timeout() { - return // return or continue to read? + select { + case <-s.stopChan: + return // if session has closed, exit + default: } - if atomic.LoadUint32(&s.stopped) == 0 { - logger.Errorf("udp connection read data from upstream(%s) failed, err: %+v", s.upstreamAddr, err) + if err, ok := err.(net.Error); ok && err.Timeout() { + continue } return } @@ -175,15 +192,14 @@ func (s *session) cleanWriteBuf() { } } -// IsClosed determine session if it is closed -func (s *session) IsClosed() bool { +// isClosed determine session if it is closed, used only for clean sessionMap +func (s *session) isClosed() bool { return atomic.LoadUint32(&s.stopped) == 1 } // Close send session close signal func (s *session) Close() { - select { - case s.stopChan <- struct{}{}: - default: + if atomic.CompareAndSwapUint32(&s.stopped, 0, 1) { + close(s.stopChan) } } From b6f9ea973018b8c9b1cce7764c1a98ce08a95a7d Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 28 Oct 2021 11:27:53 +0800 Subject: [PATCH 57/99] [layer4proxy] no need to protect check when connPool close function been called --- pkg/object/udpproxy/udpserver.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/object/udpproxy/udpserver.go b/pkg/object/udpproxy/udpserver.go index d567cc558a..feff9164e2 100644 --- a/pkg/object/udpproxy/udpserver.go +++ b/pkg/object/udpproxy/udpserver.go @@ -117,9 +117,6 @@ func (c *connPool) close() { return } - c.mu.Lock() - defer c.mu.Unlock() - for _, conn := range c.pool { _ = conn.Close() } From 2da5a57d4aaae240502dc42716697acf7f365ff1 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 28 Oct 2021 16:22:19 +0800 Subject: [PATCH 58/99] [layer4proxy] fix udp proxy buffer bug --- pkg/object/udpproxy/runtime.go | 8 ++++---- pkg/object/udpproxy/session.go | 9 ++++----- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 8700dc033a..b4182dfa90 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -50,6 +50,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { spec := superSpec.ObjectSpec().(*Spec) r := &runtime{ superSpec: superSpec, + spec: spec, pool: layer4backend.NewPool(superSpec.Super(), spec.Pool, ""), ipFilters: ipfilter.NewLayer4IPFilters(spec.IPFilter), @@ -101,8 +102,7 @@ func (r *runtime) startServer() { buf := make([]byte, iobufferpool.UDPPacketMaxSize) for { - buf = buf[:0] - n, downstreamAddr, err := r.serverConn.ReadFromUDP(buf) + n, downstreamAddr, err := r.serverConn.ReadFromUDP(buf[:]) if err != nil { select { @@ -235,9 +235,9 @@ func (r *runtime) proxy(downstreamAddr *net.UDPAddr, buf []byte) { return } - dup := iobufferpool.UDPBufferPool.Get().([]byte) + dup := iobufferpool.UDPBufferPool.Get().([]byte)[:iobufferpool.UDPPacketMaxSize] n := copy(dup, buf) - err = s.Write(&iobufferpool.Packet{Payload: dup, Len: n}) + err = s.Write(&iobufferpool.Packet{Payload: dup[:n], Len: n}) if err != nil { logger.Errorf("write data to udp session(%s) failed, err: %v", downstreamAddr.IP.String(), err) } diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 65acbe9f4a..86d7bec55e 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -145,12 +145,11 @@ func (s *session) ListenResponse(sendTo *net.UDPConn) { defer s.Close() for { - buf = buf[:0] if s.upstreamIdleTimeout > 0 { _ = s.upstreamConn.SetReadDeadline(time.Now().Add(s.upstreamIdleTimeout)) } - nRead, err := s.upstreamConn.Read(buf) + n, err := s.upstreamConn.Read(buf) if err != nil { select { case <-s.stopChan: @@ -164,15 +163,15 @@ func (s *session) ListenResponse(sendTo *net.UDPConn) { return } - nWrite, err := sendTo.WriteToUDP(buf[0:nRead], s.downstreamAddr) + nWrite, err := sendTo.WriteToUDP(buf[0:n], s.downstreamAddr) if err != nil { logger.Errorf("udp connection send data to downstream(%s) failed, err: %+v", s.downstreamAddr.String(), err) return } - if nRead != nWrite { + if n != nWrite { logger.Errorf("udp connection send data to downstream(%s) failed, should write %d but written %d", - s.downstreamAddr.String(), nRead, nWrite) + s.downstreamAddr.String(), n, nWrite) return } } From 9a6803a815c1322b82ab78914b1d29a82139e112 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 28 Oct 2021 22:01:25 +0800 Subject: [PATCH 59/99] [layer4proxy] optimization udp proxy buffer len setting --- pkg/object/udpproxy/runtime.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index b4182dfa90..e544661b11 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -235,9 +235,9 @@ func (r *runtime) proxy(downstreamAddr *net.UDPAddr, buf []byte) { return } - dup := iobufferpool.UDPBufferPool.Get().([]byte)[:iobufferpool.UDPPacketMaxSize] + dup := iobufferpool.UDPBufferPool.Get().([]byte)[:len(buf)] n := copy(dup, buf) - err = s.Write(&iobufferpool.Packet{Payload: dup[:n], Len: n}) + err = s.Write(&iobufferpool.Packet{Payload: dup, Len: n}) if err != nil { logger.Errorf("write data to udp session(%s) failed, err: %v", downstreamAddr.IP.String(), err) } From 51a0d43216e8da8ede6b538e901be7c279a0ac96 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 28 Oct 2021 22:26:02 +0800 Subject: [PATCH 60/99] [layer4proxy] simplify backend server spec name --- pkg/object/tcpproxy/spec.go | 4 ++-- pkg/object/udpproxy/session.go | 2 +- pkg/object/udpproxy/spec.go | 4 ++-- pkg/util/layer4backend/backendserver.go | 4 ++-- pkg/util/layer4backend/pool.go | 7 ++++--- pkg/util/layer4backend/spec.go | 6 +++--- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/pkg/object/tcpproxy/spec.go b/pkg/object/tcpproxy/spec.go index 13446ac854..32c979d379 100644 --- a/pkg/object/tcpproxy/spec.go +++ b/pkg/object/tcpproxy/spec.go @@ -32,8 +32,8 @@ type ( MaxConnections uint32 `yaml:"maxConns" jsonschema:"omitempty,minimum=1"` ConnectTimeout uint32 `yaml:"connectTimeout" jsonschema:"omitempty"` - Pool *layer4backend.PoolSpec `yaml:"pool" jsonschema:"required"` - IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` + Pool *layer4backend.Spec `yaml:"pool" jsonschema:"required"` + IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` } ) diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 86d7bec55e..ef86f22eea 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -52,7 +52,7 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n downstreamIdleTimeout: downstreamIdleTimeout, writeBuf: make(chan *iobufferpool.Packet, 512), - stopChan: make(chan struct{}, 1), + stopChan: make(chan struct{}), } go func() { diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index ac5d3cfa9f..3a094b615e 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -33,8 +33,8 @@ type ( DownstreamIdleTimeout uint32 `yaml:"downstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` UpstreamIdleTimeout uint32 `yaml:"upstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` - Pool *layer4backend.PoolSpec `yaml:"pool" jsonschema:"required"` - IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` + Pool *layer4backend.Spec `yaml:"pool" jsonschema:"required"` + IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` } ) diff --git a/pkg/util/layer4backend/backendserver.go b/pkg/util/layer4backend/backendserver.go index db80db8f96..440052bed1 100644 --- a/pkg/util/layer4backend/backendserver.go +++ b/pkg/util/layer4backend/backendserver.go @@ -43,7 +43,7 @@ const ( type ( servers struct { - poolSpec *PoolSpec + poolSpec *Spec super *supervisor.Supervisor mutex sync.Mutex @@ -74,7 +74,7 @@ type ( } ) -func newServers(super *supervisor.Supervisor, poolSpec *PoolSpec) *servers { +func newServers(super *supervisor.Supervisor, poolSpec *Spec) *servers { s := &servers{ poolSpec: poolSpec, super: super, diff --git a/pkg/util/layer4backend/pool.go b/pkg/util/layer4backend/pool.go index c9aea988a1..99be9d0ac9 100644 --- a/pkg/util/layer4backend/pool.go +++ b/pkg/util/layer4backend/pool.go @@ -32,7 +32,7 @@ type ( // pool backend server pool poolRules struct { - spec *PoolSpec + spec *Spec tagPrefix string servers *servers @@ -40,7 +40,7 @@ type ( ) // NewPool create backend server pool -func NewPool(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) *Pool { +func NewPool(super *supervisor.Supervisor, spec *Spec, tagPrefix string) *Pool { p := &Pool{} p.rules.Store(&poolRules{ @@ -67,12 +67,13 @@ func (p *Pool) Close() { } // ReloadRules reload backend servers pool rule -func (p *Pool) ReloadRules(super *supervisor.Supervisor, spec *PoolSpec, tagPrefix string) { +func (p *Pool) ReloadRules(super *supervisor.Supervisor, spec *Spec, tagPrefix string) { old := p.rules.Load().(*poolRules) if reflect.DeepEqual(old.spec, spec) { return } p.Close() + p.rules.Store(&poolRules{ spec: spec, diff --git a/pkg/util/layer4backend/spec.go b/pkg/util/layer4backend/spec.go index 73e28d3083..0186b37f0e 100644 --- a/pkg/util/layer4backend/spec.go +++ b/pkg/util/layer4backend/spec.go @@ -19,8 +19,8 @@ package layer4backend import "fmt" -// PoolSpec describes a pool of servers. -type PoolSpec struct { +// Spec describes a pool of servers. +type Spec struct { ServiceRegistry string `yaml:"serviceRegistry" jsonschema:"omitempty"` ServiceName string `yaml:"serviceName" jsonschema:"omitempty"` Servers []*Server `yaml:"servers" jsonschema:"omitempty"` @@ -29,7 +29,7 @@ type PoolSpec struct { } // Validate validates poolSpec. -func (s *PoolSpec) Validate() error { +func (s *Spec) Validate() error { if s.ServiceName == "" && len(s.Servers) == 0 { return fmt.Errorf("both serviceName and servers are empty") } From a0c9290a5a442889a3e5a26a9e43c4ae753ef375 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 30 Oct 2021 19:22:24 +0800 Subject: [PATCH 61/99] [udpproxy] rename (downstream/upstream to client/server, oldpool to oldRules) --- pkg/object/udpproxy/runtime.go | 60 ++++++++++++------------ pkg/object/udpproxy/session.go | 70 ++++++++++++++-------------- pkg/object/udpproxy/spec.go | 6 +-- pkg/util/ipfilter/layer4ipfilters.go | 4 +- pkg/util/layer4backend/pool.go | 4 +- 5 files changed, 72 insertions(+), 72 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index e544661b11..916944d4c3 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -102,7 +102,7 @@ func (r *runtime) startServer() { buf := make([]byte, iobufferpool.UDPPacketMaxSize) for { - n, downstreamAddr, err := r.serverConn.ReadFromUDP(buf[:]) + n, clientAddr, err := r.serverConn.ReadFromUDP(buf[:]) if err != nil { select { @@ -123,20 +123,20 @@ func (r *runtime) startServer() { } if r.ipFilters != nil { - if !r.ipFilters.AllowIP(downstreamAddr.IP.String()) { - logger.Debugf("discard udp packet from %s send to udp server(:%d)", downstreamAddr.IP.String(), r.spec.Port) + if !r.ipFilters.AllowIP(clientAddr.IP.String()) { + logger.Debugf("discard udp packet from %s send to udp server(:%d)", clientAddr.IP.String(), r.spec.Port) continue } } if !r.spec.HasResponse { - if err := r.sendOneShot(cp, downstreamAddr, buf[0:n]); err != nil { + if err := r.sendOneShot(cp, clientAddr, buf[0:n]); err != nil { logger.Errorf("%s", err.Error()) } continue } - r.proxy(downstreamAddr, buf[0:n]) + r.proxy(clientAddr, buf[0:n]) } }() @@ -154,54 +154,54 @@ func (r *runtime) startServer() { }() } -func (r *runtime) getUpstreamConn(pool *connPool, downstreamAddr *net.UDPAddr) (net.Conn, string, error) { - server, err := r.pool.Next(downstreamAddr.IP.String()) +func (r *runtime) getServerConn(pool *connPool, clientAddr *net.UDPAddr) (net.Conn, string, error) { + server, err := r.pool.Next(clientAddr.IP.String()) if err != nil { - return nil, "", fmt.Errorf("can not get upstream addr for udp connection(:%d)", r.spec.Port) + return nil, "", fmt.Errorf("can not get server addr for udp connection(:%d)", r.spec.Port) } - var upstreamConn net.Conn + var serverConn net.Conn if pool != nil { - upstreamConn = pool.get(server.Addr) - if upstreamConn != nil { - return upstreamConn, server.Addr, nil + serverConn = pool.get(server.Addr) + if serverConn != nil { + return serverConn, server.Addr, nil } } addr, err := net.ResolveUDPAddr("udp", server.Addr) if err != nil { - return nil, server.Addr, fmt.Errorf("parse upstream addr(%s) to udp addr failed, err: %+v", server.Addr, err) + return nil, server.Addr, fmt.Errorf("parse server addr(%s) to udp addr failed, err: %+v", server.Addr, err) } - upstreamConn, err = net.DialUDP("udp", nil, addr) + serverConn, err = net.DialUDP("udp", nil, addr) if err != nil { - return nil, server.Addr, fmt.Errorf("dial to upstream addr(%s) failed, err: %+v", server.Addr, err) + return nil, server.Addr, fmt.Errorf("dial to server addr(%s) failed, err: %+v", server.Addr, err) } if pool != nil { - pool.put(server.Addr, upstreamConn) + pool.put(server.Addr, serverConn) } - return upstreamConn, server.Addr, nil + return serverConn, server.Addr, nil } -func (r *runtime) sendOneShot(pool *connPool, downstreamAddr *net.UDPAddr, buf []byte) error { - upstreamConn, upstreamAddr, err := r.getUpstreamConn(pool, downstreamAddr) +func (r *runtime) sendOneShot(pool *connPool, clientAddr *net.UDPAddr, buf []byte) error { + serverConn, serverAddr, err := r.getServerConn(pool, clientAddr) if err != nil { return err } - n, err := upstreamConn.Write(buf) + n, err := serverConn.Write(buf) if err != nil { - return fmt.Errorf("sned data to %s failed, err: %+v", upstreamAddr, err) + return fmt.Errorf("sned data to %s failed, err: %+v", serverAddr, err) } if n != len(buf) { - return fmt.Errorf("failed to send full packet to %s, read %d but send %d", upstreamAddr, len(buf), n) + return fmt.Errorf("failed to send full packet to %s, read %d but send %d", serverAddr, len(buf), n) } return nil } -func (r *runtime) getSession(downstreamAddr *net.UDPAddr) (*session, error) { - key := downstreamAddr.String() +func (r *runtime) getSession(clientAddr *net.UDPAddr) (*session, error) { + key := clientAddr.String() r.mu.Lock() defer r.mu.Unlock() @@ -215,21 +215,21 @@ func (r *runtime) getSession(downstreamAddr *net.UDPAddr) (*session, error) { go func() { s.Close() }() } - upstreamConn, upstreamAddr, err := r.getUpstreamConn(nil, downstreamAddr) + serverConn, serverAddr, err := r.getServerConn(nil, clientAddr) if err != nil { return nil, err } - s = newSession(downstreamAddr, upstreamAddr, upstreamConn, - time.Duration(r.spec.UpstreamIdleTimeout)*time.Millisecond, time.Duration(r.spec.DownstreamIdleTimeout)*time.Millisecond) + s = newSession(clientAddr, serverAddr, serverConn, + time.Duration(r.spec.ServerIdleTimeout)*time.Millisecond, time.Duration(r.spec.ClientIdleTimeout)*time.Millisecond) s.ListenResponse(r.serverConn) r.sessions[key] = s return s, nil } -func (r *runtime) proxy(downstreamAddr *net.UDPAddr, buf []byte) { - s, err := r.getSession(downstreamAddr) +func (r *runtime) proxy(clientAddr *net.UDPAddr, buf []byte) { + s, err := r.getSession(clientAddr) if err != nil { logger.Errorf("%s", err.Error()) return @@ -239,7 +239,7 @@ func (r *runtime) proxy(downstreamAddr *net.UDPAddr, buf []byte) { n := copy(dup, buf) err = s.Write(&iobufferpool.Packet{Payload: dup, Len: n}) if err != nil { - logger.Errorf("write data to udp session(%s) failed, err: %v", downstreamAddr.IP.String(), err) + logger.Errorf("write data to udp session(%s) failed, err: %v", clientAddr.IP.String(), err) } } diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index ef86f22eea..258d44c761 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -30,26 +30,26 @@ import ( type ( session struct { - upstreamAddr string - downstreamAddr *net.UDPAddr - downstreamIdleTimeout time.Duration - upstreamIdleTimeout time.Duration - - upstreamConn net.Conn - writeBuf chan *iobufferpool.Packet - stopChan chan struct{} - stopped uint32 + clientAddr *net.UDPAddr + serverAddr string + clientIdleTimeout time.Duration + serverIdleTimeout time.Duration + + serverConn net.Conn + writeBuf chan *iobufferpool.Packet + stopChan chan struct{} + stopped uint32 } ) -func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn net.Conn, - downstreamIdleTimeout, upstreamIdleTimeout time.Duration) *session { +func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, + clientIdleTimeout, serverIdleTimeout time.Duration) *session { s := session{ - upstreamAddr: upstreamAddr, - downstreamAddr: downstreamAddr, - upstreamConn: upstreamConn, - upstreamIdleTimeout: upstreamIdleTimeout, - downstreamIdleTimeout: downstreamIdleTimeout, + serverAddr: serverAddr, + clientAddr: clientAddr, + serverConn: serverConn, + serverIdleTimeout: serverIdleTimeout, + clientIdleTimeout: clientIdleTimeout, writeBuf: make(chan *iobufferpool.Packet, 512), stopChan: make(chan struct{}), @@ -59,8 +59,8 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n var t *time.Timer var idleCheck <-chan time.Time - if downstreamIdleTimeout > 0 { - t = time.NewTimer(downstreamIdleTimeout) + if clientIdleTimeout > 0 { + t = time.NewTimer(clientIdleTimeout) idleCheck = t.C } @@ -78,29 +78,29 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n if !t.Stop() { <-t.C } - t.Reset(downstreamIdleTimeout) + t.Reset(clientIdleTimeout) } bufLen := len(buf.Payload) - n, err := s.upstreamConn.Write(buf.Bytes()) + n, err := s.serverConn.Write(buf.Bytes()) buf.Release() if err != nil { - logger.Errorf("udp connection flush data to upstream(%s) failed, err: %+v", upstreamAddr, err) + logger.Errorf("udp connection flush data to server(%s) failed, err: %+v", serverAddr, err) s.Close() continue } if bufLen != n { - logger.Errorf("udp connection flush data to upstream(%s) failed, should write %d but written %d", - upstreamAddr, bufLen, n) + logger.Errorf("udp connection flush data to server(%s) failed, should write %d but written %d", + serverAddr, bufLen, n) s.Close() } case <-s.stopChan: if t != nil { t.Stop() } - _ = s.upstreamConn.Close() + _ = s.serverConn.Close() s.cleanWriteBuf() return } @@ -110,7 +110,7 @@ func newSession(downstreamAddr *net.UDPAddr, upstreamAddr string, upstreamConn n return &s } -// Write send data to buffer channel, wait flush to upstream +// Write send data to buffer channel, wait flush to server func (s *session) Write(buf *iobufferpool.Packet) error { select { case s.writeBuf <- buf: @@ -119,8 +119,8 @@ func (s *session) Write(buf *iobufferpool.Packet) error { } var t *time.Timer - if s.upstreamIdleTimeout != 0 { - t = timerpool.Get(s.upstreamIdleTimeout * time.Millisecond) + if s.serverIdleTimeout != 0 { + t = timerpool.Get(s.serverIdleTimeout * time.Millisecond) } else { t = timerpool.Get(60 * time.Second) } @@ -138,18 +138,18 @@ func (s *session) Write(buf *iobufferpool.Packet) error { } } -// ListenResponse session listen upstream connection response and send to downstream +// ListenResponse session listen server connection response and send to client func (s *session) ListenResponse(sendTo *net.UDPConn) { go func() { buf := iobufferpool.UDPBufferPool.Get().([]byte) defer s.Close() for { - if s.upstreamIdleTimeout > 0 { - _ = s.upstreamConn.SetReadDeadline(time.Now().Add(s.upstreamIdleTimeout)) + if s.serverIdleTimeout > 0 { + _ = s.serverConn.SetReadDeadline(time.Now().Add(s.serverIdleTimeout)) } - n, err := s.upstreamConn.Read(buf) + n, err := s.serverConn.Read(buf) if err != nil { select { case <-s.stopChan: @@ -163,15 +163,15 @@ func (s *session) ListenResponse(sendTo *net.UDPConn) { return } - nWrite, err := sendTo.WriteToUDP(buf[0:n], s.downstreamAddr) + nWrite, err := sendTo.WriteToUDP(buf[0:n], s.clientAddr) if err != nil { - logger.Errorf("udp connection send data to downstream(%s) failed, err: %+v", s.downstreamAddr.String(), err) + logger.Errorf("udp connection send data to client(%s) failed, err: %+v", s.clientAddr.String(), err) return } if n != nWrite { - logger.Errorf("udp connection send data to downstream(%s) failed, should write %d but written %d", - s.downstreamAddr.String(), n, nWrite) + logger.Errorf("udp connection send data to client(%s) failed, should write %d but written %d", + s.clientAddr.String(), n, nWrite) return } } diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index 3a094b615e..03c8392d0a 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -29,9 +29,9 @@ type ( Name string `yaml:"name" json:"name" jsonschema:"required"` Port uint16 `yaml:"port" json:"port" jsonschema:"required"` - HasResponse bool `yaml:"hasResponse" jsonschema:"required"` - DownstreamIdleTimeout uint32 `yaml:"downstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` - UpstreamIdleTimeout uint32 `yaml:"upstreamIdleTimeout" jsonschema:"omitempty,minimum=1"` + HasResponse bool `yaml:"hasResponse" jsonschema:"required"` + ClientIdleTimeout uint32 `yaml:"clientIdleTimeout" jsonschema:"omitempty,minimum=1"` + ServerIdleTimeout uint32 `yaml:"serverIdleTimeout" jsonschema:"omitempty,minimum=1"` Pool *layer4backend.Spec `yaml:"pool" jsonschema:"required"` IPFilter *ipfilter.Spec `yaml:"ipFilters,omitempty" jsonschema:"omitempty"` diff --git a/pkg/util/ipfilter/layer4ipfilters.go b/pkg/util/ipfilter/layer4ipfilters.go index 7112b01799..e0e42f4a0a 100644 --- a/pkg/util/ipfilter/layer4ipfilters.go +++ b/pkg/util/ipfilter/layer4ipfilters.go @@ -72,9 +72,9 @@ func (i *Layer4IpFilters) ReloadRules(spec *Spec) { i.rules.Store(rules) } -func (r *ipFiltersRules) pass(downstreamIP string) bool { +func (r *ipFiltersRules) pass(clientIP string) bool { if r.ipFilter == nil { return true } - return r.ipFilter.Allow(downstreamIP) + return r.ipFilter.Allow(clientIP) } diff --git a/pkg/util/layer4backend/pool.go b/pkg/util/layer4backend/pool.go index 99be9d0ac9..a3ec41a18c 100644 --- a/pkg/util/layer4backend/pool.go +++ b/pkg/util/layer4backend/pool.go @@ -61,8 +61,8 @@ func (p *Pool) Next(cliAddr string) (*Server, error) { // Close shutdown backend servers watcher func (p *Pool) Close() { if old := p.rules.Load(); old != nil { - oldPool := old.(*poolRules) - oldPool.servers.close() + oldRules := old.(*poolRules) + oldRules.servers.close() } } From 3fb339d52babf6c4ca9cf7e31044b833f1449996 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 30 Oct 2021 19:24:28 +0800 Subject: [PATCH 62/99] [udpproxy] fix bug(closing old rules should happen after new rules are stored) --- pkg/util/layer4backend/pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/layer4backend/pool.go b/pkg/util/layer4backend/pool.go index a3ec41a18c..9379506a21 100644 --- a/pkg/util/layer4backend/pool.go +++ b/pkg/util/layer4backend/pool.go @@ -72,7 +72,6 @@ func (p *Pool) ReloadRules(super *supervisor.Supervisor, spec *Spec, tagPrefix s if reflect.DeepEqual(old.spec, spec) { return } - p.Close() p.rules.Store(&poolRules{ spec: spec, @@ -80,4 +79,5 @@ func (p *Pool) ReloadRules(super *supervisor.Supervisor, spec *Spec, tagPrefix s tagPrefix: tagPrefix, servers: newServers(super, spec), }) + p.Close() } From 4670fad3b3972523e67c3699c8f2dab1d396deb8 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 30 Oct 2021 19:26:52 +0800 Subject: [PATCH 63/99] [udpproxy] fix bug(check pool rules before get next server) --- pkg/util/layer4backend/pool.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/util/layer4backend/pool.go b/pkg/util/layer4backend/pool.go index 9379506a21..bec1f03161 100644 --- a/pkg/util/layer4backend/pool.go +++ b/pkg/util/layer4backend/pool.go @@ -18,6 +18,7 @@ package layer4backend import ( + "fmt" "reflect" "sync/atomic" @@ -55,6 +56,9 @@ func NewPool(super *supervisor.Supervisor, spec *Spec, tagPrefix string) *Pool { // Next choose one backend for proxy func (p *Pool) Next(cliAddr string) (*Server, error) { rules := p.rules.Load().(*poolRules) + if rules == nil { + return nil, fmt.Errorf("no server available") + } return rules.servers.next(cliAddr) } From f69f29a21d1d78db483f107ffe6cbf7a449e1c3f Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 30 Oct 2021 19:30:25 +0800 Subject: [PATCH 64/99] [udpproxy] add comment for Layer4IPFilters --- pkg/util/ipfilter/layer4ipfilters.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/util/ipfilter/layer4ipfilters.go b/pkg/util/ipfilter/layer4ipfilters.go index e0e42f4a0a..160c47b773 100644 --- a/pkg/util/ipfilter/layer4ipfilters.go +++ b/pkg/util/ipfilter/layer4ipfilters.go @@ -23,6 +23,7 @@ import ( ) type ( + // Layer4IpFilters layer4 ip filters Layer4IpFilters struct { rules atomic.Value } @@ -33,6 +34,7 @@ type ( } ) +// NewLayer4IPFilters create layer4 ip filters func NewLayer4IPFilters(spec *Spec) *Layer4IpFilters { if spec == nil { return &Layer4IpFilters{} @@ -46,6 +48,7 @@ func NewLayer4IPFilters(spec *Spec) *Layer4IpFilters { return m } +// AllowIP check whether the IP is allowed to pass func (i *Layer4IpFilters) AllowIP(ip string) bool { rules := i.rules.Load().(*ipFiltersRules) if rules == nil || rules.spec == nil { @@ -54,6 +57,7 @@ func (i *Layer4IpFilters) AllowIP(ip string) bool { return rules.ipFilter.Allow(ip) } +// ReloadRules reload layer4 ip filters rules func (i *Layer4IpFilters) ReloadRules(spec *Spec) { if spec == nil { i.rules.Store(&ipFiltersRules{}) From f8132bbc0541ea59c99f1bfbb0a3059766b80e9c Mon Sep 17 00:00:00 2001 From: jxd Date: Sat, 30 Oct 2021 19:38:20 +0800 Subject: [PATCH 65/99] fix wrong comment Co-authored-by: Bomin Zhang --- pkg/object/tcpproxy/connection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index d39d66a609..f118c742b2 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -59,7 +59,7 @@ type Connection struct { } // NewDownstreamConn wrap connection create from client -// @param remoteAddr client addr for udp proxy use +// @param remoteAddr client addr for tcp proxy use func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}) *Connection { clientConn := &Connection{ connected: 1, From c00185f9d85f3264988ff0bf7f8706569d6056b0 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 30 Oct 2021 22:45:15 +0800 Subject: [PATCH 66/99] [tcpproxy] rename downstrean/upstream to client/server in tcpproxy module --- pkg/object/tcpproxy/connection.go | 46 ++++++++++--------------------- pkg/object/tcpproxy/listener.go | 17 +++++------- pkg/object/tcpproxy/runtime.go | 46 +++++++++++++++---------------- 3 files changed, 44 insertions(+), 65 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index d39d66a609..8b22fa530a 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -58,10 +58,9 @@ type Connection struct { onClose func(event ConnectionEvent) } -// NewDownstreamConn wrap connection create from client -// @param remoteAddr client addr for udp proxy use -func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan struct{}) *Connection { - clientConn := &Connection{ +// NewClientConn wrap connection create from client +func NewClientConn(conn net.Conn, listenerStopChan chan struct{}) *Connection { + return &Connection{ connected: 1, rawConn: conn, localAddr: conn.LocalAddr(), @@ -73,23 +72,6 @@ func NewDownstreamConn(conn net.Conn, remoteAddr net.Addr, listenerStopChan chan connStopChan: make(chan struct{}), listenerStopChan: listenerStopChan, } - - if remoteAddr != nil { - clientConn.remoteAddr = remoteAddr - } else { - clientConn.remoteAddr = conn.RemoteAddr() // udp server rawConn can not get remote address - } - return clientConn -} - -// LocalAddr get connection local addr -func (c *Connection) LocalAddr() net.Addr { - return c.localAddr -} - -// RemoteAddr get connection remote addr(it's nil for udp server rawConn) -func (c *Connection) RemoteAddr() net.Addr { - return c.rawConn.RemoteAddr() } // SetOnRead set connection read handle @@ -373,19 +355,19 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { return } -// UpstreamConnection wrap connection to upstream -type UpstreamConnection struct { +// ServerConnection wrap tcp connection to backend server +type ServerConnection struct { Connection connectTimeout time.Duration connectOnce sync.Once } -// NewUpstreamConn construct tcp upstream connection -func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopChan chan struct{}) *UpstreamConnection { - conn := &UpstreamConnection{ +// NewServerConn construct tcp server connection +func NewServerConn(connectTimeout uint32, serverAddr net.Addr, listenerStopChan chan struct{}) *ServerConnection { + conn := &ServerConnection{ Connection: Connection{ connected: 1, - remoteAddr: upstreamAddr, + remoteAddr: serverAddr, writeBufferChan: make(chan *iobufferpool.StreamBuffer, 8), @@ -398,14 +380,14 @@ func NewUpstreamConn(connectTimeout uint32, upstreamAddr net.Addr, listenerStopC return conn } -func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { +func (u *ServerConnection) connect() (event ConnectionEvent, err error) { timeout := u.connectTimeout if timeout == 0 { timeout = 10 * time.Second } addr := u.remoteAddr if addr == nil { - return ConnectFailed, errors.New("upstream addr is nil") + return ConnectFailed, errors.New("server addr is nil") } u.rawConn, err = net.DialTimeout("tcp", addr.String(), timeout) if err != nil { @@ -427,15 +409,15 @@ func (u *UpstreamConnection) connect() (event ConnectionEvent, err error) { return } -// Connect tcp upstream connect to backend server -func (u *UpstreamConnection) Connect() (err error) { +// Connect create backend server tcp connection +func (u *ServerConnection) Connect() (err error) { u.connectOnce.Do(func() { var event ConnectionEvent event, err = u.connect() if err == nil { u.Start() } - logger.Debugf("tcp connect upstream(%s), event: %s, err: %+v", u.remoteAddr, event, err) + logger.Debugf("tcp connect server(%s), event: %s, err: %+v", u.remoteAddr, event, err) }) return } diff --git a/pkg/object/tcpproxy/listener.go b/pkg/object/tcpproxy/listener.go index f85418ffaf..75113222f2 100644 --- a/pkg/object/tcpproxy/listener.go +++ b/pkg/object/tcpproxy/listener.go @@ -38,8 +38,8 @@ type listener struct { stopChan chan struct{} maxConns uint32 // maxConn for tcp listener - tcpListener *limitlistener.LimitListener // tcp listener with accept limit - onAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle + listener *limitlistener.LimitListener // tcp listener with accept limit + onAccept func(conn net.Conn, listenerStop chan struct{}) // tcp accept handle } func newListener(spec *Spec, onAccept func(conn net.Conn, listenerStop chan struct{})) *listener { @@ -62,14 +62,14 @@ func (l *listener) listen() error { return err } // wrap tcp listener with accept limit - l.tcpListener = limitlistener.NewLimitListener(tl, l.maxConns) + l.listener = limitlistener.NewLimitListener(tl, l.maxConns) return nil } func (l *listener) acceptEventLoop() { for { - tconn, err := l.tcpListener.Accept() + tconn, err := l.listener.Accept() if err == nil { go l.onAccept(tconn, l.stopChan) continue @@ -103,15 +103,12 @@ func (l *listener) acceptEventLoop() { } func (l *listener) setMaxConnection(maxConn uint32) { - l.tcpListener.SetMaxConnection(maxConn) + l.listener.SetMaxConnection(maxConn) } func (l *listener) close() (err error) { - l.mutex.Lock() - defer l.mutex.Unlock() - - if l.tcpListener != nil { - err = l.tcpListener.Close() + if l.listener != nil { + err = l.listener.Close() } close(l.stopChan) return err diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 244b591777..be7ce0d154 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -19,7 +19,6 @@ package tcpproxy import ( "fmt" - "github.com/megaease/easegress/pkg/util/layer4backend" "net" "reflect" "sync/atomic" @@ -29,6 +28,7 @@ import ( "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/iobufferpool" "github.com/megaease/easegress/pkg/util/ipfilter" + "github.com/megaease/easegress/pkg/util/layer4backend" ) const ( @@ -270,61 +270,61 @@ func (r *runtime) handleEventClose(e *eventClose) { func (r *runtime) onAccept() func(conn net.Conn, listenerStop chan struct{}) { return func(rawConn net.Conn, listenerStop chan struct{}) { - downstream := rawConn.RemoteAddr().(*net.TCPAddr).IP.String() - if r.ipFilters != nil && !r.ipFilters.AllowIP(downstream) { + clientIP := rawConn.RemoteAddr().(*net.TCPAddr).IP.String() + if r.ipFilters != nil && !r.ipFilters.AllowIP(clientIP) { _ = rawConn.Close() logger.Infof("close tcp connection from %s to %s which ip is not allowed", rawConn.RemoteAddr().String(), rawConn.LocalAddr().String()) return } - server, err := r.pool.Next(downstream) + server, err := r.pool.Next(clientIP) if err != nil { _ = rawConn.Close() - logger.Errorf("close tcp connection due to no available upstream server, local addr: %s, err: %+v", + logger.Errorf("close tcp connection due to no available server, local addr: %s, err: %+v", rawConn.LocalAddr(), err) return } - upstreamAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) - upstreamConn := NewUpstreamConn(r.spec.ConnectTimeout, upstreamAddr, listenerStop) - if err := upstreamConn.Connect(); err != nil { - logger.Errorf("upstream connect failed(name: %s, addr: %s), err: %+v", + serverAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) + serverConn := NewServerConn(r.spec.ConnectTimeout, serverAddr, listenerStop) + if err := serverConn.Connect(); err != nil { + logger.Errorf("connect to server failed(name: %s, addr: %s), err: %+v", r.spec.Name, rawConn.LocalAddr().String(), err) _ = rawConn.Close() return } - downstreamConn := NewDownstreamConn(rawConn, rawConn.RemoteAddr(), listenerStop) - r.setCallbacks(downstreamConn, upstreamConn) - downstreamConn.Start() // upstream conn start read/write loop when connect is called + clientConn := NewClientConn(rawConn, listenerStop) + r.setCallbacks(clientConn, serverConn) + clientConn.Start() // server conn start read/write loop when connect is called } } -func (r *runtime) setCallbacks(downstreamConn *Connection, upstreamConn *UpstreamConnection) { - downstreamConn.SetOnRead(func(readBuf *iobufferpool.StreamBuffer) { +func (r *runtime) setCallbacks(clientConn *Connection, serverConn *ServerConnection) { + clientConn.SetOnRead(func(readBuf *iobufferpool.StreamBuffer) { if readBuf != nil && readBuf.Len() > 0 { - _ = upstreamConn.Write(readBuf) + _ = serverConn.Write(readBuf) } }) - upstreamConn.SetOnRead(func(readBuf *iobufferpool.StreamBuffer) { + serverConn.SetOnRead(func(readBuf *iobufferpool.StreamBuffer) { if readBuf != nil && readBuf.Len() > 0 { - _ = downstreamConn.Write(readBuf) + _ = clientConn.Write(readBuf) } }) - downstreamConn.SetOnClose(func(event ConnectionEvent) { + clientConn.SetOnClose(func(event ConnectionEvent) { if event == RemoteClose { - _ = upstreamConn.Close(FlushWrite, LocalClose) + _ = serverConn.Close(FlushWrite, LocalClose) } else { - _ = upstreamConn.Close(NoFlush, LocalClose) + _ = serverConn.Close(NoFlush, LocalClose) } }) - upstreamConn.SetOnClose(func(event ConnectionEvent) { + serverConn.SetOnClose(func(event ConnectionEvent) { if event == RemoteClose { - _ = downstreamConn.Close(FlushWrite, LocalClose) + _ = clientConn.Close(FlushWrite, LocalClose) } else { - _ = downstreamConn.Close(NoFlush, LocalClose) + _ = clientConn.Close(NoFlush, LocalClose) } }) } From e1002b17cd0f22ef82330fcbb795f7553f1eb238 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Mon, 1 Nov 2021 15:24:16 +0800 Subject: [PATCH 67/99] [tcpproxy] remove connected param & simplify connect method --- pkg/object/tcpproxy/connection.go | 123 ++++++++------------ pkg/util/iobufferpool/stream_buffer_pool.go | 16 +-- 2 files changed, 56 insertions(+), 83 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 8b22fa530a..29807b4091 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -32,12 +32,16 @@ import ( "github.com/megaease/easegress/pkg/util/timerpool" ) +var tcpBufferPool = sync.Pool{ + New: func() interface{} { + return make([]byte, iobufferpool.DefaultBufferReadCapacity) + }, +} + // Connection wrap tcp connection type Connection struct { - rawConn net.Conn - connected uint32 - closed uint32 - + closed uint32 + rawConn net.Conn localAddr net.Addr remoteAddr net.Addr @@ -61,7 +65,6 @@ type Connection struct { // NewClientConn wrap connection create from client func NewClientConn(conn net.Conn, listenerStopChan chan struct{}) *Connection { return &Connection{ - connected: 1, rawConn: conn, localAddr: conn.LocalAddr(), remoteAddr: conn.RemoteAddr(), @@ -92,21 +95,20 @@ func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { // Start running connection read/write loop func (c *Connection) Start() { c.startOnce.Do(func() { - c.startRWLoop() + c.goWithRecover(func() { + c.startReadLoop() + }, func(r interface{}) { + _ = c.Close(NoFlush, LocalClose) + }) + + c.goWithRecover(func() { + c.startWriteLoop() + }, func(r interface{}) { + _ = c.Close(NoFlush, LocalClose) + }) }) } -// State get connection running state -func (c *Connection) State() ConnState { - if atomic.LoadUint32(&c.closed) == 1 { - return ConnClosed - } - if atomic.LoadUint32(&c.connected) == 1 { - return ConnActive - } - return ConnInit -} - func (c *Connection) goWithRecover(handler func(), recoverHandler func(r interface{})) { go func() { defer func() { @@ -122,20 +124,6 @@ func (c *Connection) goWithRecover(handler func(), recoverHandler func(r interfa }() } -func (c *Connection) startRWLoop() { - c.goWithRecover(func() { - c.startReadLoop() - }, func(r interface{}) { - _ = c.Close(NoFlush, LocalClose) - }) - - c.goWithRecover(func() { - c.startWriteLoop() - }, func(r interface{}) { - _ = c.Close(NoFlush, LocalClose) - }) -} - // Write receive other connection data func (c *Connection) Write(buf *iobufferpool.StreamBuffer) (err error) { defer func() { @@ -172,24 +160,24 @@ func (c *Connection) startReadLoop() { case <-c.listenerStopChan: return default: - bufLen, err := c.doReadIO() + n, err := c.doReadIO() if err != nil { if atomic.LoadUint32(&c.closed) == 1 { logger.Infof("tcp connection exit read loop for connection has closed, local addr: %s, "+ "remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) + tcpBufferPool.Put(c.readBuffer) return } if te, ok := err.(net.Error); ok && te.Timeout() { - if bufLen == 0 { + if n == 0 { continue // continue read data, ignore timeout error } } } - if bufLen != 0 && (err == nil || err == io.EOF) { - c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer)) - c.readBuffer = c.readBuffer[:0] + if n != 0 && (err == nil || err == io.EOF) { + c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer[:n])) } if err != nil { @@ -307,7 +295,7 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) func (c *Connection) doReadIO() (bufLen int, err error) { if c.readBuffer == nil { - c.readBuffer = iobufferpool.TCPBufferPool.Get().([]byte) + c.readBuffer = tcpBufferPool.Get().([]byte)[:iobufferpool.DefaultBufferReadCapacity] } // add read deadline setting optimization? @@ -366,7 +354,6 @@ type ServerConnection struct { func NewServerConn(connectTimeout uint32, serverAddr net.Addr, listenerStopChan chan struct{}) *ServerConnection { conn := &ServerConnection{ Connection: Connection{ - connected: 1, remoteAddr: serverAddr, writeBufferChan: make(chan *iobufferpool.StreamBuffer, 8), @@ -380,44 +367,36 @@ func NewServerConn(connectTimeout uint32, serverAddr net.Addr, listenerStopChan return conn } -func (u *ServerConnection) connect() (event ConnectionEvent, err error) { - timeout := u.connectTimeout - if timeout == 0 { - timeout = 10 * time.Second - } - addr := u.remoteAddr - if addr == nil { - return ConnectFailed, errors.New("server addr is nil") - } - u.rawConn, err = net.DialTimeout("tcp", addr.String(), timeout) - if err != nil { - if err == io.EOF { - event = RemoteClose - } else if err, ok := err.(net.Error); ok && err.Timeout() { - event = ConnectTimeout - } else { - event = ConnectFailed - } - return - } - atomic.StoreUint32(&u.connected, 1) - u.localAddr = u.rawConn.LocalAddr() - - _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) - _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) - event = Connected - return -} - // Connect create backend server tcp connection func (u *ServerConnection) Connect() (err error) { u.connectOnce.Do(func() { - var event ConnectionEvent - event, err = u.connect() - if err == nil { - u.Start() + addr := u.remoteAddr + if addr == nil { + err = errors.New("server addr is nil") + return + } + + timeout := u.connectTimeout + if timeout == 0 { + timeout = 10 * time.Second + } + + u.rawConn, err = net.DialTimeout("tcp", addr.String(), timeout) + if err != nil { + if err == io.EOF { + err = errors.New("server has been closed") + } else if te, ok := err.(net.Error); ok && te.Timeout() { + err = errors.New("connect to server timeout") + } else { + err = errors.New("connect to server failed") + } + return } - logger.Debugf("tcp connect server(%s), event: %s, err: %+v", u.remoteAddr, event, err) + + u.localAddr = u.rawConn.LocalAddr() + _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) + _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) + u.Start() }) return } diff --git a/pkg/util/iobufferpool/stream_buffer_pool.go b/pkg/util/iobufferpool/stream_buffer_pool.go index 3b4808077b..582fa17e10 100644 --- a/pkg/util/iobufferpool/stream_buffer_pool.go +++ b/pkg/util/iobufferpool/stream_buffer_pool.go @@ -18,18 +18,9 @@ package iobufferpool import ( - "sync" - "github.com/valyala/bytebufferpool" ) -// TCPBufferPool tcp buffer pool for tcp connection -var TCPBufferPool = sync.Pool{ - New: func() interface{} { - return make([]byte, DefaultBufferReadCapacity) - }, -} - // StreamBuffer io buffer for stream scene type StreamBuffer struct { payload *bytebufferpool.ByteBuffer @@ -41,16 +32,19 @@ func NewStreamBuffer(buf []byte) *StreamBuffer { payload: bytebufferpool.Get(), eof: false, } + res.payload.Reset() _, _ = res.payload.Write(buf) return res } // NewEOFStreamBuffer create stream buffer with eof sign func NewEOFStreamBuffer() *StreamBuffer { - return &StreamBuffer{ + res := &StreamBuffer{ payload: bytebufferpool.Get(), eof: true, } + res.payload.Reset() + return res } // Bytes return underlying bytes @@ -74,8 +68,8 @@ func (s *StreamBuffer) Release() { if s.payload == nil { return } + s.payload.Reset() bytebufferpool.Put(s.payload) - s.payload = nil } // EOF return eof sign From 802ce1f823e357e3623584574f5479b28091ee4a Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 2 Nov 2021 10:19:34 +0800 Subject: [PATCH 68/99] [tcpproxy] simplify tcp connection --- pkg/object/tcpproxy/connection.go | 54 ++++++++++++------------------- 1 file changed, 21 insertions(+), 33 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 29807b4091..97ca8dcc1e 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -21,7 +21,6 @@ import ( "errors" "io" "net" - "reflect" "runtime/debug" "sync" "sync/atomic" @@ -32,6 +31,8 @@ import ( "github.com/megaease/easegress/pkg/util/timerpool" ) +const writeBufSize = 8 + var tcpBufferPool = sync.Pool{ New: func() interface{} { return make([]byte, iobufferpool.DefaultBufferReadCapacity) @@ -65,15 +66,14 @@ type Connection struct { // NewClientConn wrap connection create from client func NewClientConn(conn net.Conn, listenerStopChan chan struct{}) *Connection { return &Connection{ - rawConn: conn, - localAddr: conn.LocalAddr(), - remoteAddr: conn.RemoteAddr(), - - writeBufferChan: make(chan *iobufferpool.StreamBuffer, 8), - - mu: sync.Mutex{}, - connStopChan: make(chan struct{}), + rawConn: conn, + localAddr: conn.LocalAddr(), + remoteAddr: conn.RemoteAddr(), listenerStopChan: listenerStopChan, + + mu: sync.Mutex{}, + connStopChan: make(chan struct{}), + writeBufferChan: make(chan *iobufferpool.StreamBuffer, writeBufSize), } } @@ -82,11 +82,6 @@ func (c *Connection) SetOnRead(onRead func(buffer *iobufferpool.StreamBuffer)) { c.onRead = onRead } -// OnRead set data read callback -func (c *Connection) OnRead(buffer *iobufferpool.StreamBuffer) { - c.onRead(buffer) -} - // SetOnClose set close callback func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { c.onClose = onclose @@ -161,6 +156,10 @@ func (c *Connection) startReadLoop() { return default: n, err := c.doReadIO() + if n > 0 { + c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer[:n])) + } + if err != nil { if atomic.LoadUint32(&c.closed) == 1 { logger.Infof("tcp connection exit read loop for connection has closed, local addr: %s, "+ @@ -176,10 +175,6 @@ func (c *Connection) startReadLoop() { } } - if n != 0 && (err == nil || err == io.EOF) { - c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer[:n])) - } - if err != nil { if err == io.EOF { logger.Infof("tcp connection read error, local addr: %s, remote addr: %s, err: %s", @@ -208,7 +203,8 @@ func (c *Connection) startWriteLoop() { } c.appendBuffer(buf) OUTER: - for i := 0; i < 8; i++ { + // Keep reading until write buffer channel is full(write buffer channel size is writeBufSize) + for i := 0; i < writeBufSize-1; i++ { select { case buf, ok := <-c.writeBufferChan: if !ok { @@ -219,8 +215,6 @@ func (c *Connection) startWriteLoop() { break OUTER } } - - _ = c.rawConn.SetWriteDeadline(time.Now().Add(15 * time.Second)) _, err = c.doWrite() } @@ -268,17 +262,10 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) return nil } - // connection failed in client mode - if c.rawConn == nil || reflect.ValueOf(c.rawConn).IsNil() { - return nil - } - // close tcp rawConn read first - if tconn, ok := c.rawConn.(*net.TCPConn); ok { - logger.Debugf("tcp connection closed, local addr: %s, remote addr: %s, event: %s", - c.localAddr.String(), c.remoteAddr.String(), event) - _ = tconn.CloseRead() - } + logger.Debugf("tcp connection closed, local addr: %s, remote addr: %s, event: %s", + c.localAddr.String(), c.remoteAddr.String(), event) + _ = c.rawConn.(*net.TCPConn).CloseRead() // close rawConn recv, then notify read/write loop to exit close(c.connStopChan) @@ -305,6 +292,7 @@ func (c *Connection) doReadIO() (bufLen int, err error) { } func (c *Connection) doWrite() (int64, error) { + _ = c.rawConn.SetWriteDeadline(time.Now().Add(15 * time.Second)) bytesSent, err := c.doWriteIO() if err != nil && atomic.LoadUint32(&c.closed) == 1 { return 0, nil @@ -326,10 +314,10 @@ func (c *Connection) writeBufLen() (bufLen int) { func (c *Connection) doWriteIO() (bytesSent int64, err error) { buffers := c.writeBuffers bytesSent, err = buffers.WriteTo(c.rawConn) - if err != nil { return bytesSent, err } + for i, buf := range c.ioBuffers { c.ioBuffers[i] = nil c.writeBuffers[i] = nil @@ -356,7 +344,7 @@ func NewServerConn(connectTimeout uint32, serverAddr net.Addr, listenerStopChan Connection: Connection{ remoteAddr: serverAddr, - writeBufferChan: make(chan *iobufferpool.StreamBuffer, 8), + writeBufferChan: make(chan *iobufferpool.StreamBuffer, writeBufSize), mu: sync.Mutex{}, connStopChan: make(chan struct{}), From 24cebfd291f8a45c1975c2b020ffc5ace99cc843 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 2 Nov 2021 14:00:10 +0800 Subject: [PATCH 69/99] [udpproxy] get udp session by client ip --- pkg/object/udpproxy/runtime.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 916944d4c3..c367433f32 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -162,7 +162,7 @@ func (r *runtime) getServerConn(pool *connPool, clientAddr *net.UDPAddr) (net.Co var serverConn net.Conn if pool != nil { - serverConn = pool.get(server.Addr) + serverConn = pool.get(clientAddr.String()) if serverConn != nil { return serverConn, server.Addr, nil } From 964515318f53d44d8520e3eaabf7cb1ee26ff013 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 2 Nov 2021 14:36:16 +0800 Subject: [PATCH 70/99] [udpproxy] remove udp server runtime cleanup method(session cleanup itself when close) --- pkg/object/udpproxy/runtime.go | 47 ++++++++-------------------------- pkg/object/udpproxy/session.go | 20 ++++++++++----- 2 files changed, 24 insertions(+), 43 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index c367433f32..0f951d88fe 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -64,18 +64,8 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { // Close notify runtime close func (r *runtime) Close() { - close(r.done) _ = r.serverConn.Close() - - r.mu.Lock() - for k, s := range r.sessions { - delete(r.sessions, k) - s.Close() - } - r.sessions = nil - r.mu.Unlock() - r.pool.Close() } @@ -139,19 +129,6 @@ func (r *runtime) startServer() { r.proxy(clientAddr, buf[0:n]) } }() - - go func() { - ticker := time.NewTicker(5 * time.Second) - for { - select { - case <-ticker.C: - r.cleanup() - case <-r.done: - ticker.Stop() - return - } - } - }() } func (r *runtime) getServerConn(pool *connPool, clientAddr *net.UDPAddr) (net.Conn, string, error) { @@ -162,7 +139,7 @@ func (r *runtime) getServerConn(pool *connPool, clientAddr *net.UDPAddr) (net.Co var serverConn net.Conn if pool != nil { - serverConn = pool.get(clientAddr.String()) + serverConn = pool.get(server.Addr) if serverConn != nil { return serverConn, server.Addr, nil } @@ -177,6 +154,7 @@ func (r *runtime) getServerConn(pool *connPool, clientAddr *net.UDPAddr) (net.Co if err != nil { return nil, server.Addr, fmt.Errorf("dial to server addr(%s) failed, err: %+v", server.Addr, err) } + if pool != nil { pool.put(server.Addr, serverConn) } @@ -220,8 +198,14 @@ func (r *runtime) getSession(clientAddr *net.UDPAddr) (*session, error) { return nil, err } - s = newSession(clientAddr, serverAddr, serverConn, - time.Duration(r.spec.ServerIdleTimeout)*time.Millisecond, time.Duration(r.spec.ClientIdleTimeout)*time.Millisecond) + onClose := func() { + r.mu.Lock() + delete(r.sessions, key) + r.mu.Unlock() + } + s = newSession(clientAddr, serverAddr, serverConn, r.done, onClose, + time.Duration(r.spec.ServerIdleTimeout)*time.Millisecond, + time.Duration(r.spec.ClientIdleTimeout)*time.Millisecond) s.ListenResponse(r.serverConn) r.sessions[key] = s @@ -242,14 +226,3 @@ func (r *runtime) proxy(clientAddr *net.UDPAddr, buf []byte) { logger.Errorf("write data to udp session(%s) failed, err: %v", clientAddr.IP.String(), err) } } - -func (r *runtime) cleanup() { - r.mu.Lock() - defer r.mu.Unlock() - - for k, s := range r.sessions { - if s.isClosed() { - delete(r.sessions, k) - } - } -} diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 258d44c761..1cbb020e28 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -35,14 +35,17 @@ type ( clientIdleTimeout time.Duration serverIdleTimeout time.Duration - serverConn net.Conn - writeBuf chan *iobufferpool.Packet - stopChan chan struct{} - stopped uint32 + serverConn net.Conn + writeBuf chan *iobufferpool.Packet + stopped uint32 + stopChan chan struct{} + listenerStop chan struct{} + onClose func() } ) func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, + listenerStop chan struct{}, onClose func(), clientIdleTimeout, serverIdleTimeout time.Duration) *session { s := session{ serverAddr: serverAddr, @@ -51,8 +54,10 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, serverIdleTimeout: serverIdleTimeout, clientIdleTimeout: clientIdleTimeout, - writeBuf: make(chan *iobufferpool.Packet, 512), - stopChan: make(chan struct{}), + writeBuf: make(chan *iobufferpool.Packet, 512), + stopChan: make(chan struct{}), + listenerStop: listenerStop, + onClose: onClose, } go func() { @@ -66,6 +71,8 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, for { select { + case <-s.listenerStop: + s.Close() case <-idleCheck: s.Close() case buf, ok := <-s.writeBuf: @@ -102,6 +109,7 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, } _ = s.serverConn.Close() s.cleanWriteBuf() + s.onClose() return } } From 3cd652468198af2fa5f1391c5ae15242dafd92c4 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 2 Nov 2021 15:21:50 +0800 Subject: [PATCH 71/99] [udpproxy] replace atomic with mutex in session close function --- pkg/object/udpproxy/runtime.go | 4 ---- pkg/object/udpproxy/session.go | 42 +++++++++++++++++++++------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 0f951d88fe..b5e8bb11ab 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -189,10 +189,6 @@ func (r *runtime) getSession(clientAddr *net.UDPAddr) (*session, error) { return s, nil } - if ok { - go func() { s.Close() }() - } - serverConn, serverAddr, err := r.getServerConn(nil, clientAddr) if err != nil { return nil, err diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 1cbb020e28..3ed38c43f3 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -20,7 +20,7 @@ package udpproxy import ( "fmt" "net" - "sync/atomic" + "sync" "time" "github.com/megaease/easegress/pkg/logger" @@ -32,15 +32,17 @@ type ( session struct { clientAddr *net.UDPAddr serverAddr string + serverConn net.Conn clientIdleTimeout time.Duration serverIdleTimeout time.Duration + writeBuf chan *iobufferpool.Packet - serverConn net.Conn - writeBuf chan *iobufferpool.Packet - stopped uint32 + stopped bool stopChan chan struct{} listenerStop chan struct{} onClose func() + + mu sync.Mutex } ) @@ -53,8 +55,9 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, serverConn: serverConn, serverIdleTimeout: serverIdleTimeout, clientIdleTimeout: clientIdleTimeout, + writeBuf: make(chan *iobufferpool.Packet, 512), - writeBuf: make(chan *iobufferpool.Packet, 512), + stopped: false, stopChan: make(chan struct{}), listenerStop: listenerStop, onClose: onClose, @@ -72,12 +75,12 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, for { select { case <-s.listenerStop: - s.Close() + s.close() case <-idleCheck: - s.Close() + s.close() case buf, ok := <-s.writeBuf: if !ok { - s.Close() + s.close() continue } @@ -94,14 +97,14 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, if err != nil { logger.Errorf("udp connection flush data to server(%s) failed, err: %+v", serverAddr, err) - s.Close() + s.close() continue } if bufLen != n { logger.Errorf("udp connection flush data to server(%s) failed, should write %d but written %d", serverAddr, bufLen, n) - s.Close() + s.close() } case <-s.stopChan: if t != nil { @@ -150,7 +153,7 @@ func (s *session) Write(buf *iobufferpool.Packet) error { func (s *session) ListenResponse(sendTo *net.UDPConn) { go func() { buf := iobufferpool.UDPBufferPool.Get().([]byte) - defer s.Close() + defer s.close() for { if s.serverIdleTimeout > 0 { @@ -201,12 +204,19 @@ func (s *session) cleanWriteBuf() { // isClosed determine session if it is closed, used only for clean sessionMap func (s *session) isClosed() bool { - return atomic.LoadUint32(&s.stopped) == 1 + s.mu.Lock() + defer s.mu.Unlock() + return s.stopped } -// Close send session close signal -func (s *session) Close() { - if atomic.CompareAndSwapUint32(&s.stopped, 0, 1) { - close(s.stopChan) +// close send session close signal +func (s *session) close() { + s.mu.Lock() + defer s.mu.Unlock() + if s.stopped == true { + return } + + s.onClose() + close(s.stopChan) } From e3abd99338a79cfd1127658fbe68a38f4a111951 Mon Sep 17 00:00:00 2001 From: "jinxiaodong@cmii.chinamobile.com" <1990ziyou> Date: Tue, 2 Nov 2021 22:44:48 +0800 Subject: [PATCH 72/99] [layer4proxy] bug fix for udp connection pool --- pkg/object/udpproxy/runtime.go | 7 ++++--- pkg/object/udpproxy/spec.go | 1 + pkg/object/udpproxy/udpserver.go | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index b5e8bb11ab..44d998fdcd 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -62,8 +62,8 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { return r } -// Close notify runtime close -func (r *runtime) Close() { +// close notify runtime close +func (r *runtime) close() { close(r.done) _ = r.serverConn.Close() r.pool.Close() @@ -83,7 +83,8 @@ func (r *runtime) startServer() { } var cp *connPool - if r.spec.HasResponse { + if !r.spec.HasResponse { + // if client udp request doesn't have response, use connection pool to save server connections pool cp = newConnPool() } diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index 03c8392d0a..0f6a3bb4a5 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -29,6 +29,7 @@ type ( Name string `yaml:"name" json:"name" jsonschema:"required"` Port uint16 `yaml:"port" json:"port" jsonschema:"required"` + // HasResponse client udp request has response? HasResponse bool `yaml:"hasResponse" jsonschema:"required"` ClientIdleTimeout uint32 `yaml:"clientIdleTimeout" jsonschema:"omitempty,minimum=1"` ServerIdleTimeout uint32 `yaml:"serverIdleTimeout" jsonschema:"omitempty,minimum=1"` diff --git a/pkg/object/udpproxy/udpserver.go b/pkg/object/udpproxy/udpserver.go index feff9164e2..b5ff94d510 100644 --- a/pkg/object/udpproxy/udpserver.go +++ b/pkg/object/udpproxy/udpserver.go @@ -70,7 +70,7 @@ func (u *UDPServer) Status() *supervisor.Status { // Close actually close runtime func (u *UDPServer) Close() { - u.runtime.Close() + u.runtime.close() } // Init initializes UDPServer. @@ -82,7 +82,7 @@ func (u *UDPServer) Init(superSpec *supervisor.Spec) { func (u *UDPServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { u.runtime = previousGeneration.(*UDPServer).runtime - u.runtime.Close() + u.runtime.close() u.Init(superSpec) } From 49df67ed26edbf3c1d260086bf225ed31b13413e Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 3 Nov 2021 09:18:08 +0800 Subject: [PATCH 73/99] [udpproxy] fix udp connection pool judge bug --- pkg/object/udpproxy/runtime.go | 7 ++++--- pkg/object/udpproxy/spec.go | 1 + pkg/object/udpproxy/udpserver.go | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index b5e8bb11ab..44d998fdcd 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -62,8 +62,8 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { return r } -// Close notify runtime close -func (r *runtime) Close() { +// close notify runtime close +func (r *runtime) close() { close(r.done) _ = r.serverConn.Close() r.pool.Close() @@ -83,7 +83,8 @@ func (r *runtime) startServer() { } var cp *connPool - if r.spec.HasResponse { + if !r.spec.HasResponse { + // if client udp request doesn't have response, use connection pool to save server connections pool cp = newConnPool() } diff --git a/pkg/object/udpproxy/spec.go b/pkg/object/udpproxy/spec.go index 03c8392d0a..0f6a3bb4a5 100644 --- a/pkg/object/udpproxy/spec.go +++ b/pkg/object/udpproxy/spec.go @@ -29,6 +29,7 @@ type ( Name string `yaml:"name" json:"name" jsonschema:"required"` Port uint16 `yaml:"port" json:"port" jsonschema:"required"` + // HasResponse client udp request has response? HasResponse bool `yaml:"hasResponse" jsonschema:"required"` ClientIdleTimeout uint32 `yaml:"clientIdleTimeout" jsonschema:"omitempty,minimum=1"` ServerIdleTimeout uint32 `yaml:"serverIdleTimeout" jsonschema:"omitempty,minimum=1"` diff --git a/pkg/object/udpproxy/udpserver.go b/pkg/object/udpproxy/udpserver.go index feff9164e2..b5ff94d510 100644 --- a/pkg/object/udpproxy/udpserver.go +++ b/pkg/object/udpproxy/udpserver.go @@ -70,7 +70,7 @@ func (u *UDPServer) Status() *supervisor.Status { // Close actually close runtime func (u *UDPServer) Close() { - u.runtime.Close() + u.runtime.close() } // Init initializes UDPServer. @@ -82,7 +82,7 @@ func (u *UDPServer) Init(superSpec *supervisor.Spec) { func (u *UDPServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object) { u.runtime = previousGeneration.(*UDPServer).runtime - u.runtime.Close() + u.runtime.close() u.Init(superSpec) } From e72fb2f0516ffcbf7fee0a0a6bb49fbb16ab22da Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 5 Nov 2021 17:02:20 +0800 Subject: [PATCH 74/99] [tcpproxy] fix tcp client close bug & add comment --- pkg/object/tcpproxy/connection.go | 25 ++++++++++++--------- pkg/util/iobufferpool/constants.go | 2 +- pkg/util/iobufferpool/stream_buffer_pool.go | 1 + pkg/util/layer4backend/backendserver.go | 1 + pkg/util/limitlistener/limitlistener.go | 6 ++--- 5 files changed, 21 insertions(+), 14 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 97ca8dcc1e..7269ca44a7 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -28,6 +28,7 @@ import ( "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/util/iobufferpool" + "github.com/megaease/easegress/pkg/util/limitlistener" "github.com/megaease/easegress/pkg/util/timerpool" ) @@ -162,8 +163,8 @@ func (c *Connection) startReadLoop() { if err != nil { if atomic.LoadUint32(&c.closed) == 1 { - logger.Infof("tcp connection exit read loop for connection has closed, local addr: %s, "+ - "remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) + logger.Debugf("tcp connection has closed, exit read loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) tcpBufferPool.Put(c.readBuffer) return } @@ -177,7 +178,7 @@ func (c *Connection) startReadLoop() { if err != nil { if err == io.EOF { - logger.Infof("tcp connection read error, local addr: %s, remote addr: %s, err: %s", + logger.Debugf("tcp connection remote close, local addr: %s, remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) _ = c.Close(NoFlush, RemoteClose) } else { @@ -195,7 +196,9 @@ func (c *Connection) startWriteLoop() { var err error for { select { - case <-c.listenerStopChan: + case <-c.connStopChan: + logger.Debugf("connection exit write loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) return case buf, ok := <-c.writeBufferChan: if !ok { @@ -220,7 +223,7 @@ func (c *Connection) startWriteLoop() { if err != nil { if err == iobufferpool.ErrEOF { - logger.Debugf("tcp connection local close with eof, local addr: %s, remote addr: %s", + logger.Debugf("tcp connection close, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) _ = c.Close(NoFlush, LocalClose) } else { @@ -263,17 +266,19 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) } // close tcp rawConn read first - logger.Debugf("tcp connection closed, local addr: %s, remote addr: %s, event: %s", - c.localAddr.String(), c.remoteAddr.String(), event) - _ = c.rawConn.(*net.TCPConn).CloseRead() + logger.Debugf("tcp connection closed(%s), local addr: %s, remote addr: %s", + event, c.localAddr.String(), c.remoteAddr.String()) + if conn, ok := c.rawConn.(*limitlistener.Conn); ok { + _ = conn.Conn.(*net.TCPConn).CloseRead() // client connection is wrapped by limitlistener.Conn + } else { + _ = c.rawConn.(*net.TCPConn).CloseRead() + } // close rawConn recv, then notify read/write loop to exit close(c.connStopChan) _ = c.rawConn.Close() c.lastBytesSizeRead, c.lastWriteSizeWrite = 0, 0 - logger.Debugf("tcp connection closed, local addr: %s, remote addr: %s, event: %s", c.localAddr.String(), c.remoteAddr.String(), event) - if c.onClose != nil { c.onClose(event) } diff --git a/pkg/util/iobufferpool/constants.go b/pkg/util/iobufferpool/constants.go index 32048fe153..6487a6cd4c 100644 --- a/pkg/util/iobufferpool/constants.go +++ b/pkg/util/iobufferpool/constants.go @@ -23,7 +23,7 @@ const ( // UDPPacketMaxSize max size of udp packet UDPPacketMaxSize = 65535 // DefaultBufferReadCapacity default buffer capacity for stream proxy such as tcp - DefaultBufferReadCapacity = 1 << 7 + DefaultBufferReadCapacity = 1 << 13 ) var ( diff --git a/pkg/util/iobufferpool/stream_buffer_pool.go b/pkg/util/iobufferpool/stream_buffer_pool.go index 582fa17e10..eec2229e2d 100644 --- a/pkg/util/iobufferpool/stream_buffer_pool.go +++ b/pkg/util/iobufferpool/stream_buffer_pool.go @@ -27,6 +27,7 @@ type StreamBuffer struct { eof bool } +// NewStreamBuffer create stream buffer with specific payload func NewStreamBuffer(buf []byte) *StreamBuffer { res := &StreamBuffer{ payload: bytebufferpool.Get(), diff --git a/pkg/util/layer4backend/backendserver.go b/pkg/util/layer4backend/backendserver.go index 440052bed1..2f3dcfd1a9 100644 --- a/pkg/util/layer4backend/backendserver.go +++ b/pkg/util/layer4backend/backendserver.go @@ -95,6 +95,7 @@ func newServers(super *supervisor.Supervisor, poolSpec *Spec) *servers { return s } +// String backend server info func (s *Server) String() string { return fmt.Sprintf("%s,%v,%d", s.Addr, s.Tags, s.Weight) } diff --git a/pkg/util/limitlistener/limitlistener.go b/pkg/util/limitlistener/limitlistener.go index ee3af26763..8810b70b33 100644 --- a/pkg/util/limitlistener/limitlistener.go +++ b/pkg/util/limitlistener/limitlistener.go @@ -73,7 +73,7 @@ func (l *LimitListener) Accept() (net.Conn, error) { l.release() return nil, err } - return &limitListenerConn{Conn: c, release: l.release}, nil + return &Conn{Conn: c, release: l.release}, nil } // SetMaxConnection sets max connection. @@ -88,13 +88,13 @@ func (l *LimitListener) Close() error { return err } -type limitListenerConn struct { +type Conn struct { net.Conn releaseOnce sync.Once release func() } -func (l *limitListenerConn) Close() error { +func (l *Conn) Close() error { err := l.Conn.Close() l.releaseOnce.Do(l.release) return err From 9149981312f127e7d486d841d76f81b3a4a83346 Mon Sep 17 00:00:00 2001 From: jxd Date: Sat, 6 Nov 2021 12:37:05 +0800 Subject: [PATCH 75/99] simplify connection start function Co-authored-by: Bomin Zhang --- pkg/object/tcpproxy/connection.go | 36 +++++++++++-------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 7269ca44a7..e962a5aa73 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -90,34 +90,24 @@ func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { // Start running connection read/write loop func (c *Connection) Start() { + fnRecover := func() { + if r := recover(); r != nil { + logger.Errorf("tcp connection goroutine panic: %v\n%s\n", r, string(debug.Stack())) + c.Close(NoFlush, LocalClose) + } + } + c.startOnce.Do(func() { - c.goWithRecover(func() { + go func() { + defer fnRecover() c.startReadLoop() - }, func(r interface{}) { - _ = c.Close(NoFlush, LocalClose) - }) + }() - c.goWithRecover(func() { + go func() { + defer fnRecover() c.startWriteLoop() - }, func(r interface{}) { - _ = c.Close(NoFlush, LocalClose) - }) - }) -} - -func (c *Connection) goWithRecover(handler func(), recoverHandler func(r interface{})) { - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("tcp connection goroutine panic: %v\n%s\n", r, string(debug.Stack())) - if recoverHandler != nil { - // it is not needed to wrap recoverHandler with go func in the current scenario - recoverHandler(r) - } - } }() - handler() - }() + }) } // Write receive other connection data From 51e77d2c6c71220559c4d8eaee9f674b05b4dd1f Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 12:42:27 +0800 Subject: [PATCH 76/99] [tcpproxy] remove unnecessary parameters `startOnce` --- pkg/object/tcpproxy/connection.go | 76 ++++++++++++++----------------- 1 file changed, 35 insertions(+), 41 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index e962a5aa73..e9c04a9bfd 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -56,7 +56,6 @@ type Connection struct { writeBufferChan chan *iobufferpool.StreamBuffer mu sync.Mutex - startOnce sync.Once connStopChan chan struct{} // use for connection close listenerStopChan chan struct{} // use for listener close @@ -93,21 +92,19 @@ func (c *Connection) Start() { fnRecover := func() { if r := recover(); r != nil { logger.Errorf("tcp connection goroutine panic: %v\n%s\n", r, string(debug.Stack())) - c.Close(NoFlush, LocalClose) + _ = c.Close(NoFlush, LocalClose) } } - - c.startOnce.Do(func() { - go func() { - defer fnRecover() - c.startReadLoop() - }() - - go func() { - defer fnRecover() - c.startWriteLoop() - }() - }) + + go func() { + defer fnRecover() + c.startReadLoop() + }() + + go func() { + defer fnRecover() + c.startWriteLoop() + }() } // Write receive other connection data @@ -330,7 +327,6 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { type ServerConnection struct { Connection connectTimeout time.Duration - connectOnce sync.Once } // NewServerConn construct tcp server connection @@ -352,34 +348,32 @@ func NewServerConn(connectTimeout uint32, serverAddr net.Addr, listenerStopChan // Connect create backend server tcp connection func (u *ServerConnection) Connect() (err error) { - u.connectOnce.Do(func() { - addr := u.remoteAddr - if addr == nil { - err = errors.New("server addr is nil") - return - } + addr := u.remoteAddr + if addr == nil { + err = errors.New("server addr is nil") + return + } - timeout := u.connectTimeout - if timeout == 0 { - timeout = 10 * time.Second - } + timeout := u.connectTimeout + if timeout == 0 { + timeout = 10 * time.Second + } - u.rawConn, err = net.DialTimeout("tcp", addr.String(), timeout) - if err != nil { - if err == io.EOF { - err = errors.New("server has been closed") - } else if te, ok := err.(net.Error); ok && te.Timeout() { - err = errors.New("connect to server timeout") - } else { - err = errors.New("connect to server failed") - } - return + u.rawConn, err = net.DialTimeout("tcp", addr.String(), timeout) + if err != nil { + if err == io.EOF { + err = errors.New("server has been closed") + } else if te, ok := err.(net.Error); ok && te.Timeout() { + err = errors.New("connect to server timeout") + } else { + err = errors.New("connect to server failed") } + return + } - u.localAddr = u.rawConn.LocalAddr() - _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) - _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) - u.Start() - }) - return + u.localAddr = u.rawConn.LocalAddr() + _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) + _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) + u.Start() + return nil } From 3dff75226152411cae45cf65780ea1ff8e50a2b8 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 13:12:24 +0800 Subject: [PATCH 77/99] [tcpproxy] early continue to reduce nesting --- pkg/object/tcpproxy/connection.go | 44 +++++++++++++++---------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index e9c04a9bfd..ebb4716d64 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -136,6 +136,7 @@ func (c *Connection) Write(buf *iobufferpool.StreamBuffer) (err error) { } func (c *Connection) startReadLoop() { + defer tcpBufferPool.Put(c.readBuffer) for { select { case <-c.connStopChan: @@ -148,33 +149,32 @@ func (c *Connection) startReadLoop() { c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer[:n])) } - if err != nil { - if atomic.LoadUint32(&c.closed) == 1 { - logger.Debugf("tcp connection has closed, exit read loop, local addr: %s, remote addr: %s", - c.localAddr.String(), c.remoteAddr.String()) - tcpBufferPool.Put(c.readBuffer) - return - } + if err == nil { + continue + } - if te, ok := err.(net.Error); ok && te.Timeout() { - if n == 0 { - continue // continue read data, ignore timeout error - } - } + if atomic.LoadUint32(&c.closed) == 1 { + logger.Debugf("tcp connection has closed, exit read loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) + return } - if err != nil { - if err == io.EOF { - logger.Debugf("tcp connection remote close, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - _ = c.Close(NoFlush, RemoteClose) - } else { - logger.Errorf("tcp connection read error, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - _ = c.Close(NoFlush, OnReadErrClose) + if te, ok := err.(net.Error); ok && te.Timeout() { + if n == 0 { + continue // continue read data, ignore timeout error } - return } + + if err == io.EOF { + logger.Debugf("tcp connection remote close, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + _ = c.Close(NoFlush, RemoteClose) + } else { + logger.Errorf("tcp connection read error, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + _ = c.Close(NoFlush, OnReadErrClose) + } + return } } } From 5b7d97b8336b2e91a1911ace676e7a8288951547 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 13:24:15 +0800 Subject: [PATCH 78/99] [tcpproxy] add comment for limit listener connection --- pkg/util/limitlistener/limitlistener.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/util/limitlistener/limitlistener.go b/pkg/util/limitlistener/limitlistener.go index 8810b70b33..e16e4885bd 100644 --- a/pkg/util/limitlistener/limitlistener.go +++ b/pkg/util/limitlistener/limitlistener.go @@ -88,12 +88,14 @@ func (l *LimitListener) Close() error { return err } +// Conn limit listener connection type Conn struct { net.Conn releaseOnce sync.Once release func() } +// Close release semaphore and close connection func (l *Conn) Close() error { err := l.Conn.Close() l.releaseOnce.Do(l.release) From f0d1a61fd60813d13be23dfdb8adbb3e99d75708 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 15:55:03 +0800 Subject: [PATCH 79/99] [tcpproxy] no need to call `atmoic.LoadUint32` for `closed` param when something wrong with read/write --- pkg/object/tcpproxy/connection.go | 65 ++++++++++++++---------------- pkg/object/tcpproxy/runtime.go | 8 ++-- pkg/util/iobufferpool/constants.go | 2 +- 3 files changed, 35 insertions(+), 40 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index ebb4716d64..f06a322812 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -18,7 +18,6 @@ package tcpproxy import ( - "errors" "io" "net" "runtime/debug" @@ -153,12 +152,6 @@ func (c *Connection) startReadLoop() { continue } - if atomic.LoadUint32(&c.closed) == 1 { - logger.Debugf("tcp connection has closed, exit read loop, local addr: %s, remote addr: %s", - c.localAddr.String(), c.remoteAddr.String()) - return - } - if te, ok := err.(net.Error); ok && te.Timeout() { if n == 0 { continue // continue read data, ignore timeout error @@ -166,11 +159,11 @@ func (c *Connection) startReadLoop() { } if err == io.EOF { - logger.Debugf("tcp connection remote close, local addr: %s, remote addr: %s, err: %s", + logger.Debugf("remote close connection, local addr: %s, remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) _ = c.Close(NoFlush, RemoteClose) } else { - logger.Errorf("tcp connection read error, local addr: %s, remote addr: %s, err: %s", + logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) _ = c.Close(NoFlush, OnReadErrClose) } @@ -208,22 +201,25 @@ func (c *Connection) startWriteLoop() { _, err = c.doWrite() } - if err != nil { - if err == iobufferpool.ErrEOF { - logger.Debugf("tcp connection close, local addr: %s, remote addr: %s", - c.localAddr.String(), c.remoteAddr.String()) - _ = c.Close(NoFlush, LocalClose) - } else { - logger.Errorf("tcp connection error on write, local addr: %s, remote addr: %s, err: %+v", - c.localAddr.String(), c.remoteAddr.String(), err) - } + if err == nil { + continue + } - if te, ok := err.(net.Error); ok && te.Timeout() { - _ = c.Close(NoFlush, OnWriteTimeout) - } - //other write errs not close connection, because readbuffer may have unread data, wait for readloop close connection, + if te, ok := err.(net.Error); ok && te.Timeout() { + _ = c.Close(NoFlush, OnWriteTimeout) return } + + if err == iobufferpool.ErrEOF { + logger.Debugf("finish write with eof, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) + _ = c.Close(NoFlush, LocalClose) + } else { + // remote call CloseRead, so just exit write loop, wait read loop exit + logger.Errorf("error on write, local addr: %s, remote addr: %s, err: %+v", + c.localAddr.String(), c.remoteAddr.String(), err) + } + return } } @@ -239,7 +235,7 @@ func (c *Connection) appendBuffer(buf *iobufferpool.StreamBuffer) { func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) { defer func() { if r := recover(); r != nil { - logger.Errorf("tcp connection close panic, err: %+v\n%s", r, string(debug.Stack())) + logger.Errorf("connection close panic, err: %+v\n%s", r, string(debug.Stack())) } }() @@ -253,7 +249,7 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) } // close tcp rawConn read first - logger.Debugf("tcp connection closed(%s), local addr: %s, remote addr: %s", + logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", event, c.localAddr.String(), c.remoteAddr.String()) if conn, ok := c.rawConn.(*limitlistener.Conn); ok { _ = conn.Conn.(*net.TCPConn).CloseRead() // client connection is wrapped by limitlistener.Conn @@ -286,8 +282,8 @@ func (c *Connection) doReadIO() (bufLen int, err error) { func (c *Connection) doWrite() (int64, error) { _ = c.rawConn.SetWriteDeadline(time.Now().Add(15 * time.Second)) bytesSent, err := c.doWriteIO() - if err != nil && atomic.LoadUint32(&c.closed) == 1 { - return 0, nil + if err != nil { + return 0, err } if bytesBufSize := c.writeBufLen(); bytesBufSize != c.lastWriteSizeWrite { @@ -347,11 +343,11 @@ func NewServerConn(connectTimeout uint32, serverAddr net.Addr, listenerStopChan } // Connect create backend server tcp connection -func (u *ServerConnection) Connect() (err error) { +func (u *ServerConnection) Connect() bool { addr := u.remoteAddr if addr == nil { - err = errors.New("server addr is nil") - return + logger.Errorf("cannot connect because the server has been closed, server addr: %s", addr.String()) + return false } timeout := u.connectTimeout @@ -359,21 +355,22 @@ func (u *ServerConnection) Connect() (err error) { timeout = 10 * time.Second } + var err error u.rawConn, err = net.DialTimeout("tcp", addr.String(), timeout) if err != nil { if err == io.EOF { - err = errors.New("server has been closed") + logger.Errorf("cannot connect because the server has been closed, server addr: %s", addr.String()) } else if te, ok := err.(net.Error); ok && te.Timeout() { - err = errors.New("connect to server timeout") + logger.Errorf("connect to server timeout, server addr: %s", addr.String()) } else { - err = errors.New("connect to server failed") + logger.Errorf("connect to server failed, server addr: %s, err: %s", addr.String(), err.Error()) } - return + return false } u.localAddr = u.rawConn.LocalAddr() _ = u.rawConn.(*net.TCPConn).SetNoDelay(true) _ = u.rawConn.(*net.TCPConn).SetKeepAlive(true) u.Start() - return nil + return true } diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index be7ce0d154..2fce478798 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -105,7 +105,7 @@ func (r *runtime) fsm() { for e := range r.eventChan { switch e := e.(type) { case *eventCheckFailed: - r.handleEventCheckFailed(e) + r.handleEventCheckFailed() case *eventServeFailed: r.handleEventServeFailed(e) case *eventReload: @@ -243,7 +243,7 @@ func (r *runtime) checkFailed() { } } -func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { +func (r *runtime) handleEventCheckFailed() { if r.getState() == stateFailed { r.startServer() } @@ -288,9 +288,7 @@ func (r *runtime) onAccept() func(conn net.Conn, listenerStop chan struct{}) { serverAddr, _ := net.ResolveTCPAddr("tcp", server.Addr) serverConn := NewServerConn(r.spec.ConnectTimeout, serverAddr, listenerStop) - if err := serverConn.Connect(); err != nil { - logger.Errorf("connect to server failed(name: %s, addr: %s), err: %+v", - r.spec.Name, rawConn.LocalAddr().String(), err) + if !serverConn.Connect() { _ = rawConn.Close() return } diff --git a/pkg/util/iobufferpool/constants.go b/pkg/util/iobufferpool/constants.go index 6487a6cd4c..a514d8674f 100644 --- a/pkg/util/iobufferpool/constants.go +++ b/pkg/util/iobufferpool/constants.go @@ -23,7 +23,7 @@ const ( // UDPPacketMaxSize max size of udp packet UDPPacketMaxSize = 65535 // DefaultBufferReadCapacity default buffer capacity for stream proxy such as tcp - DefaultBufferReadCapacity = 1 << 13 + DefaultBufferReadCapacity = 1 << 16 ) var ( From 97d36db8ec58cc7f189f0729f28a5acc9e3243fd Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 19:16:07 +0800 Subject: [PATCH 80/99] [tcpproxy] refactor `startWriteLoop` function --- pkg/object/tcpproxy/connection.go | 62 ++++++++++++++++--------------- pkg/object/tcpproxy/runtime.go | 8 ++-- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index f06a322812..4fd6fc5206 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -91,7 +91,7 @@ func (c *Connection) Start() { fnRecover := func() { if r := recover(); r != nil { logger.Errorf("tcp connection goroutine panic: %v\n%s\n", r, string(debug.Stack())) - _ = c.Close(NoFlush, LocalClose) + c.Close(NoFlush, LocalClose) } } @@ -152,6 +152,13 @@ func (c *Connection) startReadLoop() { continue } + if atomic.LoadUint32(&c.closed) == 1 { + // connection has closed, so there is no need to record error log(error may create by CloseRead) + logger.Debugf("connection has closed, exit read loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) + return + } + if te, ok := err.(net.Error); ok && te.Timeout() { if n == 0 { continue // continue read data, ignore timeout error @@ -161,11 +168,11 @@ func (c *Connection) startReadLoop() { if err == io.EOF { logger.Debugf("remote close connection, local addr: %s, remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) - _ = c.Close(NoFlush, RemoteClose) + c.Close(NoFlush, RemoteClose) } else { logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", c.localAddr.String(), c.remoteAddr.String(), err.Error()) - _ = c.Close(NoFlush, OnReadErrClose) + c.Close(NoFlush, OnReadErrClose) } return } @@ -180,40 +187,36 @@ func (c *Connection) startWriteLoop() { logger.Debugf("connection exit write loop, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) return - case buf, ok := <-c.writeBufferChan: - if !ok { - return - } - c.appendBuffer(buf) - OUTER: - // Keep reading until write buffer channel is full(write buffer channel size is writeBufSize) - for i := 0; i < writeBufSize-1; i++ { - select { - case buf, ok := <-c.writeBufferChan: - if !ok { - return - } - c.appendBuffer(buf) - default: - break OUTER + default: + } + + OUTER: + // Keep reading until write buffer channel is full(write buffer channel size is writeBufSize) + for i := 0; i < writeBufSize; i++ { + select { + case buf, ok := <-c.writeBufferChan: + if !ok { + return } + c.appendBuffer(buf) + default: + break OUTER } - _, err = c.doWrite() } - if err == nil { + if _, err = c.doWrite(); err == nil { continue } if te, ok := err.(net.Error); ok && te.Timeout() { - _ = c.Close(NoFlush, OnWriteTimeout) + c.Close(NoFlush, OnWriteTimeout) return } if err == iobufferpool.ErrEOF { logger.Debugf("finish write with eof, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) - _ = c.Close(NoFlush, LocalClose) + c.Close(NoFlush, LocalClose) } else { // remote call CloseRead, so just exit write loop, wait read loop exit logger.Errorf("error on write, local addr: %s, remote addr: %s, err: %+v", @@ -232,7 +235,7 @@ func (c *Connection) appendBuffer(buf *iobufferpool.StreamBuffer) { } // Close connection close function -func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) { +func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { defer func() { if r := recover(); r != nil { logger.Errorf("connection close panic, err: %+v\n%s", r, string(debug.Stack())) @@ -241,31 +244,30 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) (err error) if ccType == FlushWrite { _ = c.Write(iobufferpool.NewEOFStreamBuffer()) - return nil + return } if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { - return nil + return } - // close tcp rawConn read first logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", event, c.localAddr.String(), c.remoteAddr.String()) + + // close tcp rawConn read first, make sure exit read loop if conn, ok := c.rawConn.(*limitlistener.Conn); ok { _ = conn.Conn.(*net.TCPConn).CloseRead() // client connection is wrapped by limitlistener.Conn } else { _ = c.rawConn.(*net.TCPConn).CloseRead() } - // close rawConn recv, then notify read/write loop to exit close(c.connStopChan) _ = c.rawConn.Close() - c.lastBytesSizeRead, c.lastWriteSizeWrite = 0, 0 + c.lastBytesSizeRead, c.lastWriteSizeWrite = 0, 0 if c.onClose != nil { c.onClose(event) } - return nil } func (c *Connection) doReadIO() (bufLen int, err error) { diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 2fce478798..8eb639f3a0 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -313,16 +313,16 @@ func (r *runtime) setCallbacks(clientConn *Connection, serverConn *ServerConnect clientConn.SetOnClose(func(event ConnectionEvent) { if event == RemoteClose { - _ = serverConn.Close(FlushWrite, LocalClose) + serverConn.Close(FlushWrite, LocalClose) } else { - _ = serverConn.Close(NoFlush, LocalClose) + serverConn.Close(NoFlush, LocalClose) } }) serverConn.SetOnClose(func(event ConnectionEvent) { if event == RemoteClose { - _ = clientConn.Close(FlushWrite, LocalClose) + clientConn.Close(FlushWrite, LocalClose) } else { - _ = clientConn.Close(NoFlush, LocalClose) + clientConn.Close(NoFlush, LocalClose) } }) } From ca3340036eee85b32ea33c15edc85071540c3e8d Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 19:39:19 +0800 Subject: [PATCH 81/99] [layer4backend] pool rules is never empty, no need to check it --- pkg/util/layer4backend/pool.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pkg/util/layer4backend/pool.go b/pkg/util/layer4backend/pool.go index bec1f03161..20d802e109 100644 --- a/pkg/util/layer4backend/pool.go +++ b/pkg/util/layer4backend/pool.go @@ -18,7 +18,6 @@ package layer4backend import ( - "fmt" "reflect" "sync/atomic" @@ -56,18 +55,13 @@ func NewPool(super *supervisor.Supervisor, spec *Spec, tagPrefix string) *Pool { // Next choose one backend for proxy func (p *Pool) Next(cliAddr string) (*Server, error) { rules := p.rules.Load().(*poolRules) - if rules == nil { - return nil, fmt.Errorf("no server available") - } return rules.servers.next(cliAddr) } // Close shutdown backend servers watcher func (p *Pool) Close() { - if old := p.rules.Load(); old != nil { - oldRules := old.(*poolRules) - oldRules.servers.close() - } + rules := p.rules.Load().(*poolRules) + rules.servers.close() } // ReloadRules reload backend servers pool rule From 4f494c816d1cbeea2c511444cc0f58c6daa055b8 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 20:13:10 +0800 Subject: [PATCH 82/99] [layer4backend] fix listener stop bug(can not close exist connection) --- pkg/object/tcpproxy/connection.go | 3 +++ pkg/object/tcpproxy/listener.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 4fd6fc5206..d9fcf0d16a 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -141,6 +141,9 @@ func (c *Connection) startReadLoop() { case <-c.connStopChan: return case <-c.listenerStopChan: + logger.Debugf("connection close due to listener stopped, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) + c.Close(NoFlush, LocalClose) return default: n, err := c.doReadIO() diff --git a/pkg/object/tcpproxy/listener.go b/pkg/object/tcpproxy/listener.go index 75113222f2..1ab5ac5664 100644 --- a/pkg/object/tcpproxy/listener.go +++ b/pkg/object/tcpproxy/listener.go @@ -92,7 +92,7 @@ func (l *listener) acceptEventLoop() { if !(ope.Timeout() && ope.Temporary()) { // accept error raised by sockets closing if ope.Op == "accept" { - logger.Debugf("tcp listener(%s) stop accept connection due to listener closed", l.localAddr) + logger.Debugf("tcp listener(%s) closed, stop accept connection", l.localAddr) } else { logger.Errorf("tcp listener(%s) stop accept connection due to non-recoverable error: %s", l.localAddr, err.Error()) From a05d0a86caff35a77a70f38a93f051360794923d Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sat, 6 Nov 2021 21:11:31 +0800 Subject: [PATCH 83/99] [layer4backend] fix timeout exception check bug --- pkg/object/tcpproxy/connection.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index d9fcf0d16a..1ce83f30a3 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -163,9 +163,7 @@ func (c *Connection) startReadLoop() { } if te, ok := err.(net.Error); ok && te.Timeout() { - if n == 0 { - continue // continue read data, ignore timeout error - } + continue // ignore timeout error, read more stream data } if err == io.EOF { From d311dc731014f357ef59aa66a4ebbee578bebd44 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 7 Nov 2021 16:26:43 +0800 Subject: [PATCH 84/99] [tcpproxy] fix tcp bufferpool capacity bug(ref: https://github.com/golang/go/issues/23199#issuecomment-406967375) --- pkg/object/tcpproxy/connection.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 1ce83f30a3..bc8f6294fa 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -35,7 +35,8 @@ const writeBufSize = 8 var tcpBufferPool = sync.Pool{ New: func() interface{} { - return make([]byte, iobufferpool.DefaultBufferReadCapacity) + buf := make([]byte, iobufferpool.DefaultBufferReadCapacity) + return buf }, } @@ -135,7 +136,12 @@ func (c *Connection) Write(buf *iobufferpool.StreamBuffer) (err error) { } func (c *Connection) startReadLoop() { - defer tcpBufferPool.Put(c.readBuffer) + defer func() { + if cap(c.readBuffer) < iobufferpool.DefaultBufferReadCapacity { + logger.Errorf("1") + } + tcpBufferPool.Put(c.readBuffer[:iobufferpool.DefaultBufferReadCapacity]) + }() for { select { case <-c.connStopChan: @@ -273,7 +279,7 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { func (c *Connection) doReadIO() (bufLen int, err error) { if c.readBuffer == nil { - c.readBuffer = tcpBufferPool.Get().([]byte)[:iobufferpool.DefaultBufferReadCapacity] + c.readBuffer = tcpBufferPool.Get().([]byte) } // add read deadline setting optimization? From d5004a36c55c3206d1792840ceb3317e540b216e Mon Sep 17 00:00:00 2001 From: jxd134 Date: Sun, 7 Nov 2021 16:38:09 +0800 Subject: [PATCH 85/99] [tcpproxy] remove debug log --- pkg/object/tcpproxy/connection.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index bc8f6294fa..913762f61a 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -137,11 +137,11 @@ func (c *Connection) Write(buf *iobufferpool.StreamBuffer) (err error) { func (c *Connection) startReadLoop() { defer func() { - if cap(c.readBuffer) < iobufferpool.DefaultBufferReadCapacity { - logger.Errorf("1") + if c.readBuffer != nil { + tcpBufferPool.Put(c.readBuffer[:iobufferpool.DefaultBufferReadCapacity]) } - tcpBufferPool.Put(c.readBuffer[:iobufferpool.DefaultBufferReadCapacity]) }() + for { select { case <-c.connStopChan: From 8be02cf8c973ded2396739512e5c36ebddc60e45 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Mon, 8 Nov 2021 19:10:00 +0800 Subject: [PATCH 86/99] [tcpproxy] fix write loop busy loop bug --- pkg/object/tcpproxy/connection.go | 48 +++++++++++++++++++++---------- pkg/object/udpproxy/session.go | 4 ++- 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 913762f61a..f1a5aabe32 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -26,6 +26,7 @@ import ( "time" "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/fasttime" "github.com/megaease/easegress/pkg/util/iobufferpool" "github.com/megaease/easegress/pkg/util/limitlistener" "github.com/megaease/easegress/pkg/util/timerpool" @@ -59,6 +60,9 @@ type Connection struct { connStopChan chan struct{} // use for connection close listenerStopChan chan struct{} // use for listener close + lastReadDeadlineTime time.Time + lastWriteDeadlineTime time.Time + onRead func(buffer *iobufferpool.StreamBuffer) // execute read filters onClose func(event ConnectionEvent) } @@ -194,20 +198,24 @@ func (c *Connection) startWriteLoop() { logger.Debugf("connection exit write loop, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) return - default: - } - - OUTER: - // Keep reading until write buffer channel is full(write buffer channel size is writeBufSize) - for i := 0; i < writeBufSize; i++ { - select { - case buf, ok := <-c.writeBufferChan: - if !ok { - return + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + OUTER: + // Keep reading until writeBufferChan is empty + // writeBufferChan may be full when writeLoop call doWrite + for i := 0; i < writeBufSize-1; i++ { + select { + case buf, ok := <-c.writeBufferChan: + if !ok { + return + } + c.appendBuffer(buf) + default: + break OUTER } - c.appendBuffer(buf) - default: - break OUTER } } @@ -284,12 +292,22 @@ func (c *Connection) doReadIO() (bufLen int, err error) { // add read deadline setting optimization? // https://github.com/golang/go/issues/15133 - _ = c.rawConn.SetReadDeadline(time.Now().Add(15 * time.Second)) + curr := fasttime.Now().Add(15 * time.Second) + // there is no need to set readDeadline in too short time duration + if diff := curr.Sub(c.lastReadDeadlineTime).Milliseconds(); diff > 0 { + _ = c.rawConn.SetReadDeadline(curr) + c.lastReadDeadlineTime = curr + } return c.rawConn.(io.Reader).Read(c.readBuffer) } func (c *Connection) doWrite() (int64, error) { - _ = c.rawConn.SetWriteDeadline(time.Now().Add(15 * time.Second)) + curr := fasttime.Now().Add(15 * time.Second) + // there is no need to set writeDeadline in too short time duration + if diff := curr.Sub(c.lastWriteDeadlineTime).Milliseconds(); diff > 0 { + _ = c.rawConn.SetWriteDeadline(curr) + c.lastWriteDeadlineTime = curr + } bytesSent, err := c.doWriteIO() if err != nil { return 0, err diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 3ed38c43f3..8443e70c73 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -24,6 +24,7 @@ import ( "time" "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/util/fasttime" "github.com/megaease/easegress/pkg/util/iobufferpool" "github.com/megaease/easegress/pkg/util/timerpool" ) @@ -49,6 +50,7 @@ type ( func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, listenerStop chan struct{}, onClose func(), clientIdleTimeout, serverIdleTimeout time.Duration) *session { + time.Now() s := session{ serverAddr: serverAddr, clientAddr: clientAddr, @@ -157,7 +159,7 @@ func (s *session) ListenResponse(sendTo *net.UDPConn) { for { if s.serverIdleTimeout > 0 { - _ = s.serverConn.SetReadDeadline(time.Now().Add(s.serverIdleTimeout)) + _ = s.serverConn.SetReadDeadline(fasttime.Now().Add(s.serverIdleTimeout)) } n, err := s.serverConn.Read(buf) From c93b12bdaa76136ef503f5f397f5c8467c9c26a4 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Mon, 8 Nov 2021 19:11:33 +0800 Subject: [PATCH 87/99] [tcpproxy] remove unused param --- pkg/object/udpproxy/session.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 8443e70c73..9520dd9d54 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -50,7 +50,6 @@ type ( func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, listenerStop chan struct{}, onClose func(), clientIdleTimeout, serverIdleTimeout time.Duration) *session { - time.Now() s := session{ serverAddr: serverAddr, clientAddr: clientAddr, From 1785884649354e9629d0e55eb7aff0dcf8e40b23 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 9 Nov 2021 18:08:16 +0800 Subject: [PATCH 88/99] [udpproxy] fix udp runtime not initialized bug --- pkg/object/udpproxy/runtime.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 44d998fdcd..e1d9718498 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -55,6 +55,7 @@ func newRuntime(superSpec *supervisor.Spec) *runtime { pool: layer4backend.NewPool(superSpec.Super(), spec.Pool, ""), ipFilters: ipfilter.NewLayer4IPFilters(spec.IPFilter), + done: make(chan struct{}), sessions: make(map[string]*session), } From 9b192819352a5b5a7543288742f4f8f746fb0d1d Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 9 Nov 2021 18:51:08 +0800 Subject: [PATCH 89/99] [udpproxy] fix udp session close bug & optimization udp byte buffer --- pkg/object/udpproxy/runtime.go | 4 ++-- pkg/object/udpproxy/session.go | 1 + pkg/util/iobufferpool/packet_pool.go | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index e1d9718498..649266299f 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -217,9 +217,9 @@ func (r *runtime) proxy(clientAddr *net.UDPAddr, buf []byte) { return } - dup := iobufferpool.UDPBufferPool.Get().([]byte)[:len(buf)] + dup := iobufferpool.UDPBufferPool.Get().([]byte) n := copy(dup, buf) - err = s.Write(&iobufferpool.Packet{Payload: dup, Len: n}) + err = s.Write(&iobufferpool.Packet{Payload: dup[:n], Len: n}) if err != nil { logger.Errorf("write data to udp session(%s) failed, err: %v", clientAddr.IP.String(), err) } diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index 9520dd9d54..d8b0df4832 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -218,6 +218,7 @@ func (s *session) close() { return } + s.stopped = true s.onClose() close(s.stopChan) } diff --git a/pkg/util/iobufferpool/packet_pool.go b/pkg/util/iobufferpool/packet_pool.go index f617f717bd..bc0b6daab5 100644 --- a/pkg/util/iobufferpool/packet_pool.go +++ b/pkg/util/iobufferpool/packet_pool.go @@ -48,5 +48,5 @@ func (p *Packet) Release() { if p.Payload == nil { return } - UDPBufferPool.Put(p.Payload) + UDPBufferPool.Put(p.Payload[:UDPPacketMaxSize]) } From a995b2db44dd5ccb0570b9d5fa51e51635a7761e Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 18 Nov 2021 20:24:05 +0800 Subject: [PATCH 90/99] [tcpproxy] remove unused param in tcp connection and checking --- pkg/object/tcpproxy/connection.go | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index f1a5aabe32..ea0b8c6a83 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -48,9 +48,6 @@ type Connection struct { localAddr net.Addr remoteAddr net.Addr - lastBytesSizeRead int - lastWriteSizeWrite int - readBuffer []byte writeBuffers net.Buffers ioBuffers []*iobufferpool.StreamBuffer @@ -166,14 +163,15 @@ func (c *Connection) startReadLoop() { } if atomic.LoadUint32(&c.closed) == 1 { - // connection has closed, so there is no need to record error log(error may create by CloseRead) + // connection has closed, so there is no need to record error log + // error may be created by CloseRead function logger.Debugf("connection has closed, exit read loop, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) return } if te, ok := err.(net.Error); ok && te.Timeout() { - continue // ignore timeout error, read more stream data + continue // ignore timeout error, continue read data } if err == io.EOF { @@ -203,7 +201,7 @@ func (c *Connection) startWriteLoop() { return } c.appendBuffer(buf) - OUTER: + NoMoreData: // Keep reading until writeBufferChan is empty // writeBufferChan may be full when writeLoop call doWrite for i := 0; i < writeBufSize-1; i++ { @@ -214,7 +212,7 @@ func (c *Connection) startWriteLoop() { } c.appendBuffer(buf) default: - break OUTER + break NoMoreData } } } @@ -263,8 +261,10 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { } if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { + // connection has already closed, so there is no need to execute below code return } + // close tcp rawConn read first logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", event, c.localAddr.String(), c.remoteAddr.String()) @@ -278,11 +278,7 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { close(c.connStopChan) _ = c.rawConn.Close() - - c.lastBytesSizeRead, c.lastWriteSizeWrite = 0, 0 - if c.onClose != nil { - c.onClose(event) - } + c.onClose(event) } func (c *Connection) doReadIO() (bufLen int, err error) { @@ -308,15 +304,7 @@ func (c *Connection) doWrite() (int64, error) { _ = c.rawConn.SetWriteDeadline(curr) c.lastWriteDeadlineTime = curr } - bytesSent, err := c.doWriteIO() - if err != nil { - return 0, err - } - - if bytesBufSize := c.writeBufLen(); bytesBufSize != c.lastWriteSizeWrite { - c.lastWriteSizeWrite = bytesBufSize - } - return bytesSent, err + return c.doWriteIO() } func (c *Connection) writeBufLen() (bufLen int) { From 4c08d077db4a57f9345984debc64d10c69dcf391 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 24 Nov 2021 10:14:02 +0800 Subject: [PATCH 91/99] [tcpproxy] notify read/write loop to exit by connection timeout --- pkg/object/tcpproxy/connection.go | 70 ++++++++++++++++--------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index ea0b8c6a83..f69355a6f9 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -28,7 +28,6 @@ import ( "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/util/fasttime" "github.com/megaease/easegress/pkg/util/iobufferpool" - "github.com/megaease/easegress/pkg/util/limitlistener" "github.com/megaease/easegress/pkg/util/timerpool" ) @@ -92,7 +91,7 @@ func (c *Connection) SetOnClose(onclose func(event ConnectionEvent)) { func (c *Connection) Start() { fnRecover := func() { if r := recover(); r != nil { - logger.Errorf("tcp connection goroutine panic: %v\n%s\n", r, string(debug.Stack())) + logger.Errorf("tcp read/write loop panic: %v\n%s\n", r, string(debug.Stack())) c.Close(NoFlush, LocalClose) } } @@ -153,38 +152,38 @@ func (c *Connection) startReadLoop() { c.Close(NoFlush, LocalClose) return default: - n, err := c.doReadIO() - if n > 0 { - c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer[:n])) - } + } - if err == nil { - continue - } + n, err := c.doReadIO() + if n > 0 { + c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer[:n])) + } + + if err == nil { + continue + } - if atomic.LoadUint32(&c.closed) == 1 { - // connection has closed, so there is no need to record error log - // error may be created by CloseRead function + if te, ok := err.(net.Error); ok && te.Timeout() { + select { + case <-c.connStopChan: logger.Debugf("connection has closed, exit read loop, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) return + default: } + continue // ignore timeout error, continue read data + } - if te, ok := err.(net.Error); ok && te.Timeout() { - continue // ignore timeout error, continue read data - } - - if err == io.EOF { - logger.Debugf("remote close connection, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - c.Close(NoFlush, RemoteClose) - } else { - logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - c.Close(NoFlush, OnReadErrClose) - } - return + if err == io.EOF { + logger.Debugf("remote close connection, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + c.Close(NoFlush, RemoteClose) + } else { + logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + c.Close(NoFlush, OnReadErrClose) } + return } } @@ -222,6 +221,14 @@ func (c *Connection) startWriteLoop() { } if te, ok := err.(net.Error); ok && te.Timeout() { + select { + case <-c.connStopChan: + logger.Debugf("connection has closed, exit write loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) + return + default: + } + c.Close(NoFlush, OnWriteTimeout) return } @@ -269,16 +276,11 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", event, c.localAddr.String(), c.remoteAddr.String()) - // close tcp rawConn read first, make sure exit read loop - if conn, ok := c.rawConn.(*limitlistener.Conn); ok { - _ = conn.Conn.(*net.TCPConn).CloseRead() // client connection is wrapped by limitlistener.Conn - } else { - _ = c.rawConn.(*net.TCPConn).CloseRead() - } - close(c.connStopChan) - _ = c.rawConn.Close() + _ = c.rawConn.SetDeadline(time.Now()) // notify break read/write loop + c.onClose(event) + _ = c.rawConn.Close() } func (c *Connection) doReadIO() (bufLen int, err error) { From 7fb34e1e886822ce5dc89bc44c6e0fb984eb7cd8 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Wed, 24 Nov 2021 15:19:33 +0800 Subject: [PATCH 92/99] [tcpproxy] simplify io eof --- pkg/object/tcpproxy/connection.go | 4 ++-- pkg/util/iobufferpool/constants.go | 7 ------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index f69355a6f9..41d22a30b8 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -233,7 +233,7 @@ func (c *Connection) startWriteLoop() { return } - if err == iobufferpool.ErrEOF { + if err == io.EOF { logger.Debugf("finish write with eof, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) c.Close(NoFlush, LocalClose) @@ -327,7 +327,7 @@ func (c *Connection) doWriteIO() (bytesSent int64, err error) { c.ioBuffers[i] = nil c.writeBuffers[i] = nil if buf.EOF() { - err = iobufferpool.ErrEOF + err = io.EOF } buf.Release() } diff --git a/pkg/util/iobufferpool/constants.go b/pkg/util/iobufferpool/constants.go index a514d8674f..99ae7e25c6 100644 --- a/pkg/util/iobufferpool/constants.go +++ b/pkg/util/iobufferpool/constants.go @@ -17,16 +17,9 @@ package iobufferpool -import "errors" - const ( // UDPPacketMaxSize max size of udp packet UDPPacketMaxSize = 65535 // DefaultBufferReadCapacity default buffer capacity for stream proxy such as tcp DefaultBufferReadCapacity = 1 << 16 ) - -var ( - // ErrEOF io buffer eof sign - ErrEOF = errors.New("EOF") -) From 98b638a414b30355410ab3bd7bb5b0a6a69dfcfe Mon Sep 17 00:00:00 2001 From: jxd134 Date: Thu, 25 Nov 2021 17:20:57 +0800 Subject: [PATCH 93/99] [tcpproxy] Optimize the read/write exit mechanism --- pkg/object/tcpproxy/connection.go | 44 +++++++++++++------------------ pkg/object/tcpproxy/tcpserver.go | 2 +- 2 files changed, 19 insertions(+), 27 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 41d22a30b8..4848f7e796 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -54,7 +54,7 @@ type Connection struct { mu sync.Mutex connStopChan chan struct{} // use for connection close - listenerStopChan chan struct{} // use for listener close + listenerStopChan chan struct{} // notify tcp listener has been closed, just use in read loop lastReadDeadlineTime time.Time lastWriteDeadlineTime time.Time @@ -142,36 +142,33 @@ func (c *Connection) startReadLoop() { } }() + var n int + var err error for { - select { - case <-c.connStopChan: + if atomic.LoadUint32(&c.closed) == 1 { + logger.Debugf("connection has been closed, exit read loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) return + } + + select { case <-c.listenerStopChan: - logger.Debugf("connection close due to listener stopped, local addr: %s, remote addr: %s", + logger.Debugf("listener stopped, exit read loop,local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) c.Close(NoFlush, LocalClose) return default: } - n, err := c.doReadIO() - if n > 0 { + if n, err = c.doReadIO(); n > 0 { c.onRead(iobufferpool.NewStreamBuffer(c.readBuffer[:n])) } if err == nil { continue } - if te, ok := err.(net.Error); ok && te.Timeout() { - select { - case <-c.connStopChan: - logger.Debugf("connection has closed, exit read loop, local addr: %s, remote addr: %s", - c.localAddr.String(), c.remoteAddr.String()) - return - default: - } - continue // ignore timeout error, continue read data + continue // c.closed will be check in the front of read loop } if err == io.EOF { @@ -221,15 +218,12 @@ func (c *Connection) startWriteLoop() { } if te, ok := err.(net.Error); ok && te.Timeout() { - select { - case <-c.connStopChan: - logger.Debugf("connection has closed, exit write loop, local addr: %s, remote addr: %s", + if atomic.LoadUint32(&c.closed) == 1 { + logger.Debugf("connection has been close, exit write loop, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) - return - default: + } else { + c.Close(NoFlush, OnWriteTimeout) } - - c.Close(NoFlush, OnWriteTimeout) return } @@ -263,7 +257,7 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { }() if ccType == FlushWrite { - _ = c.Write(iobufferpool.NewEOFStreamBuffer()) + _ = c.Write(iobufferpool.NewEOFStreamBuffer()) // wait for write loop to call close function again return } @@ -271,13 +265,11 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { // connection has already closed, so there is no need to execute below code return } - - // close tcp rawConn read first logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", event, c.localAddr.String(), c.remoteAddr.String()) close(c.connStopChan) - _ = c.rawConn.SetDeadline(time.Now()) // notify break read/write loop + _ = c.rawConn.SetDeadline(time.Now()) // notify read/write loop to break c.onClose(event) _ = c.rawConn.Close() diff --git a/pkg/object/tcpproxy/tcpserver.go b/pkg/object/tcpproxy/tcpserver.go index fe8052b8c1..87787c91d0 100644 --- a/pkg/object/tcpproxy/tcpserver.go +++ b/pkg/object/tcpproxy/tcpserver.go @@ -86,7 +86,7 @@ func (l4 *TCPServer) Status() *supervisor.Status { return &supervisor.Status{} } -// Close closes TCPServer. +// Close actually close tcp server runtime func (l4 *TCPServer) Close() { l4.runtime.Close() } From 0e07f18e131ff60fe56a51503a9d528630667a18 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 31 Dec 2021 11:47:33 +0800 Subject: [PATCH 94/99] [udpproxy] refactor start session method --- pkg/object/udpproxy/session.go | 95 +++++++++++++++++----------------- 1 file changed, 48 insertions(+), 47 deletions(-) diff --git a/pkg/object/udpproxy/session.go b/pkg/object/udpproxy/session.go index d8b0df4832..a99d7336d1 100644 --- a/pkg/object/udpproxy/session.go +++ b/pkg/object/udpproxy/session.go @@ -56,7 +56,7 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, serverConn: serverConn, serverIdleTimeout: serverIdleTimeout, clientIdleTimeout: clientIdleTimeout, - writeBuf: make(chan *iobufferpool.Packet, 512), + writeBuf: make(chan *iobufferpool.Packet, 256), stopped: false, stopChan: make(chan struct{}), @@ -64,62 +64,63 @@ func newSession(clientAddr *net.UDPAddr, serverAddr string, serverConn net.Conn, onClose: onClose, } - go func() { - var t *time.Timer - var idleCheck <-chan time.Time + go s.startSession(serverAddr, clientIdleTimeout) + return &s +} - if clientIdleTimeout > 0 { - t = time.NewTimer(clientIdleTimeout) - idleCheck = t.C - } +func (s *session) startSession(serverAddr string, clientIdleTimeout time.Duration) { + var t *time.Timer + var idleCheck <-chan time.Time - for { - select { - case <-s.listenerStop: - s.close() - case <-idleCheck: + if clientIdleTimeout > 0 { + t = time.NewTimer(clientIdleTimeout) + idleCheck = t.C + } + + for { + select { + case <-s.listenerStop: + s.close() + case <-idleCheck: + s.close() + case buf, ok := <-s.writeBuf: + if !ok { s.close() - case buf, ok := <-s.writeBuf: - if !ok { - s.close() - continue - } + continue + } - if t != nil { - if !t.Stop() { - <-t.C - } - t.Reset(clientIdleTimeout) + if t != nil { + if !t.Stop() { + <-t.C } + t.Reset(clientIdleTimeout) + } - bufLen := len(buf.Payload) - n, err := s.serverConn.Write(buf.Bytes()) - buf.Release() + bufLen := len(buf.Payload) + n, err := s.serverConn.Write(buf.Bytes()) + buf.Release() - if err != nil { - logger.Errorf("udp connection flush data to server(%s) failed, err: %+v", serverAddr, err) - s.close() - continue - } + if err != nil { + logger.Errorf("udp connection flush data to server(%s) failed, err: %+v", serverAddr, err) + s.close() + continue + } - if bufLen != n { - logger.Errorf("udp connection flush data to server(%s) failed, should write %d but written %d", - serverAddr, bufLen, n) - s.close() - } - case <-s.stopChan: - if t != nil { - t.Stop() - } - _ = s.serverConn.Close() - s.cleanWriteBuf() - s.onClose() - return + if bufLen != n { + logger.Errorf("udp connection flush data to server(%s) failed, should write %d but written %d", + serverAddr, bufLen, n) + s.close() + } + case <-s.stopChan: + if t != nil { + t.Stop() } + _ = s.serverConn.Close() + s.cleanWriteBuf() + s.onClose() + return } - }() - - return &s + } } // Write send data to buffer channel, wait flush to server From af9a795ef79cae3608f635743fca527c97153e4a Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 31 Dec 2021 14:33:18 +0800 Subject: [PATCH 95/99] [util/layer4ipfilters] fix ipfilters create return value --- pkg/object/udpproxy/runtime.go | 8 +++----- pkg/util/ipfilter/layer4ipfilters.go | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 649266299f..97828ad93d 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -114,11 +114,9 @@ func (r *runtime) startServer() { continue } - if r.ipFilters != nil { - if !r.ipFilters.AllowIP(clientAddr.IP.String()) { - logger.Debugf("discard udp packet from %s send to udp server(:%d)", clientAddr.IP.String(), r.spec.Port) - continue - } + if r.ipFilters != nil && !r.ipFilters.AllowIP(clientAddr.IP.String()) { + logger.Debugf("discard udp packet from %s send to udp server(:%d)", clientAddr.IP.String(), r.spec.Port) + continue } if !r.spec.HasResponse { diff --git a/pkg/util/ipfilter/layer4ipfilters.go b/pkg/util/ipfilter/layer4ipfilters.go index 160c47b773..d24fcd06c7 100644 --- a/pkg/util/ipfilter/layer4ipfilters.go +++ b/pkg/util/ipfilter/layer4ipfilters.go @@ -37,7 +37,7 @@ type ( // NewLayer4IPFilters create layer4 ip filters func NewLayer4IPFilters(spec *Spec) *Layer4IpFilters { if spec == nil { - return &Layer4IpFilters{} + return nil } m := &Layer4IpFilters{} From 8edcc7a8b2fb34ab63fa740800c6254cd6fec749 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 4 Jan 2022 22:09:45 +0800 Subject: [PATCH 96/99] [util/layer4ipfilters] fix ipfilters nil check problem --- pkg/object/tcpproxy/runtime.go | 2 +- pkg/object/udpproxy/runtime.go | 2 +- pkg/util/ipfilter/layer4ipfilters.go | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/object/tcpproxy/runtime.go b/pkg/object/tcpproxy/runtime.go index 8eb639f3a0..42b04c3670 100644 --- a/pkg/object/tcpproxy/runtime.go +++ b/pkg/object/tcpproxy/runtime.go @@ -271,7 +271,7 @@ func (r *runtime) onAccept() func(conn net.Conn, listenerStop chan struct{}) { return func(rawConn net.Conn, listenerStop chan struct{}) { clientIP := rawConn.RemoteAddr().(*net.TCPAddr).IP.String() - if r.ipFilters != nil && !r.ipFilters.AllowIP(clientIP) { + if !r.ipFilters.AllowIP(clientIP) { _ = rawConn.Close() logger.Infof("close tcp connection from %s to %s which ip is not allowed", rawConn.RemoteAddr().String(), rawConn.LocalAddr().String()) diff --git a/pkg/object/udpproxy/runtime.go b/pkg/object/udpproxy/runtime.go index 97828ad93d..d1a034ce92 100644 --- a/pkg/object/udpproxy/runtime.go +++ b/pkg/object/udpproxy/runtime.go @@ -114,7 +114,7 @@ func (r *runtime) startServer() { continue } - if r.ipFilters != nil && !r.ipFilters.AllowIP(clientAddr.IP.String()) { + if !r.ipFilters.AllowIP(clientAddr.IP.String()) { logger.Debugf("discard udp packet from %s send to udp server(:%d)", clientAddr.IP.String(), r.spec.Port) continue } diff --git a/pkg/util/ipfilter/layer4ipfilters.go b/pkg/util/ipfilter/layer4ipfilters.go index d24fcd06c7..7e90321f3c 100644 --- a/pkg/util/ipfilter/layer4ipfilters.go +++ b/pkg/util/ipfilter/layer4ipfilters.go @@ -36,22 +36,22 @@ type ( // NewLayer4IPFilters create layer4 ip filters func NewLayer4IPFilters(spec *Spec) *Layer4IpFilters { + m := &Layer4IpFilters{} if spec == nil { - return nil + m.rules.Store(&ipFiltersRules{}) + } else { + m.rules.Store(&ipFiltersRules{ + spec: spec, + ipFilter: New(spec), + }) } - - m := &Layer4IpFilters{} - m.rules.Store(&ipFiltersRules{ - spec: spec, - ipFilter: New(spec), - }) return m } // AllowIP check whether the IP is allowed to pass func (i *Layer4IpFilters) AllowIP(ip string) bool { rules := i.rules.Load().(*ipFiltersRules) - if rules == nil || rules.spec == nil { + if rules.spec == nil { return true } return rules.ipFilter.Allow(ip) From 59dc6e6571ffe67ed8835ce77e6651acb29085a6 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 4 Jan 2022 22:10:45 +0800 Subject: [PATCH 97/99] [tcpproxy] fix tcp connection close check log --- pkg/object/tcpproxy/connection.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 4848f7e796..7949dd1920 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -21,6 +21,7 @@ import ( "io" "net" "runtime/debug" + "strings" "sync" "sync/atomic" "time" @@ -176,9 +177,16 @@ func (c *Connection) startReadLoop() { c.localAddr.String(), c.remoteAddr.String(), err.Error()) c.Close(NoFlush, RemoteClose) } else { - logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - c.Close(NoFlush, OnReadErrClose) + // it's hard to distinguish errors caused by c.rawConn.Close() + if atomic.LoadUint32(&c.closed) == 1 && + strings.Contains(err.Error(), "use of closed network connection") { + logger.Debugf("stop read due to close connection, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + } else { + logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + c.Close(NoFlush, OnReadErrClose) + } } return } @@ -265,11 +273,11 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { // connection has already closed, so there is no need to execute below code return } - logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", - event, c.localAddr.String(), c.remoteAddr.String()) close(c.connStopChan) _ = c.rawConn.SetDeadline(time.Now()) // notify read/write loop to break + logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", + event, c.localAddr.String(), c.remoteAddr.String()) c.onClose(event) _ = c.rawConn.Close() From 97b8f1a446cfad6d90fa86778e912988e3b17631 Mon Sep 17 00:00:00 2001 From: jxd134 Date: Tue, 4 Jan 2022 22:19:51 +0800 Subject: [PATCH 98/99] [tcpproxy/udpproxy] Add dummy TCP and UDP clients to facilitate reviewing and testing (thanks to @ samutamm) --- example/backend-service/echo/echo.go | 83 +++++++++++++++++++++++++++- example/client/tcp_udp.go | 83 ++++++++++++++++++++++++++++ 2 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 example/client/tcp_udp.go diff --git a/example/backend-service/echo/echo.go b/example/backend-service/echo/echo.go index 2e31e22db6..7321327179 100644 --- a/example/backend-service/echo/echo.go +++ b/example/backend-service/echo/echo.go @@ -1,14 +1,17 @@ package main import ( + "bufio" "fmt" "io" + "log" + "net" "net/http" "os" "time" ) -// TeeWriter is an io.Writer wapper. +// TeeWriter is an io.Writer wrapper. type TeeWriter struct { writers []io.Writer } @@ -26,7 +29,7 @@ func (tw *TeeWriter) Write(p []byte) (n int, err error) { return len(p), nil } -func main() { +func httpServer() { echoHandler := func(w http.ResponseWriter, req *http.Request) { time.Sleep(10 * time.Millisecond) body, err := io.ReadAll(req.Body) @@ -60,3 +63,79 @@ func main() { http.ListenAndServe(":9095", nil) fmt.Println("listen and serve failed") } + +func tcpServer() { + echoHandler := func(conn net.Conn) { + time.Sleep(10 * time.Millisecond) + reader := bufio.NewReader(conn) + for { + message, err := reader.ReadString('\n') + if err != nil { + conn.Close() + return + } + fmt.Println("Message incoming: ", string(message)) + responseMsg := []byte( + "\nYour Message \n" + + "============== \n" + + "Message incoming: " + string(message) + "\n", + ) + conn.Write(responseMsg) + } + } + + listener, err := net.Listen("tcp", "127.0.0.1:9095") + if err != nil { + log.Fatal(err) + } + defer listener.Close() + + for { + conn, err := listener.Accept() + if err != nil { + log.Fatal(err) + } + go echoHandler(conn) + } +} + +func udpServer() { + echoHandler := func(pc net.PacketConn, addr net.Addr, buf []byte) { + time.Sleep(10 * time.Millisecond) + + fmt.Println("Your Message") + fmt.Println("==============") + fmt.Printf("Message incoming: %s \n", string(buf)) + + pc.WriteTo(buf, addr) + } + pc, err := net.ListenPacket("udp", ":9095") + if err != nil { + log.Fatal(err) + } + defer pc.Close() + + for { + buf := make([]byte, 1024) + n, addr, err := pc.ReadFrom(buf) + if err != nil { + continue + } + go echoHandler(pc, addr, buf[:n]) + } +} + +func main() { + protocol := "http" + if len(os.Args) > 1 { + protocol = os.Args[1] + } + switch protocol { + case "tcp": + tcpServer() + case "udp": + udpServer() + default: + httpServer() + } +} diff --git a/example/client/tcp_udp.go b/example/client/tcp_udp.go new file mode 100644 index 0000000000..c751ec2d14 --- /dev/null +++ b/example/client/tcp_udp.go @@ -0,0 +1,83 @@ +package main + +import ( + "bufio" + "fmt" + "net" + "os" +) + +func tcpClient() { + strEcho := "Hello from client! \n" + servAddr := "127.0.0.1:10080" + if len(os.Args) > 2 { + servAddr = os.Args[2] + } + tcpAddr, err := net.ResolveTCPAddr("tcp", servAddr) + if err != nil { + fmt.Println("ResolveTCPAddr failed:", err.Error()) + os.Exit(1) + } + + conn, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + fmt.Println("Dial failed:", err.Error()) + os.Exit(1) + } + + _, err = conn.Write([]byte(strEcho)) + if err != nil { + fmt.Println("Write to server failed:", err.Error()) + os.Exit(1) + } + + fmt.Println("write to server = ", strEcho) + + reply := make([]byte, 1024) + + _, err = conn.Read(reply) + if err != nil { + fmt.Println("Write to server failed:", err.Error()) + os.Exit(1) + } + + fmt.Println("reply from server=", string(reply)) + + _ = conn.Close() +} + +func udpClient() { + p := make([]byte, 2048) + servAddr := "127.0.0.1:10070" + if len(os.Args) > 2 { + servAddr = os.Args[2] + } + conn, err := net.Dial("udp", servAddr) + if err != nil { + fmt.Printf("Some error %v", err) + return + } + _, _ = fmt.Fprintf(conn, "Ping from client") + _, err = bufio.NewReader(conn).Read(p) + if err == nil { + fmt.Printf("%s\n", p) + } else { + fmt.Printf("Some error %v\n", err) + } + _ = conn.Close() +} + +func main() { + protocol := "" + if len(os.Args) > 1 { + protocol = os.Args[1] + } + switch protocol { + case "tcp": + tcpClient() + case "udp": + udpClient() + default: + fmt.Println("Please provide udp or tcp flag.") + } +} From fc145ece780fd963d698e2d345ac2f6f29d37a5e Mon Sep 17 00:00:00 2001 From: jxd134 Date: Fri, 7 Jan 2022 23:01:32 +0800 Subject: [PATCH 99/99] [tcpproxy/udpproxy] add read/write loop channel to make sure close socket gracefully --- pkg/object/tcpproxy/connection.go | 71 +++++++++++++++++-------------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/pkg/object/tcpproxy/connection.go b/pkg/object/tcpproxy/connection.go index 7949dd1920..1d8f45fc7d 100644 --- a/pkg/object/tcpproxy/connection.go +++ b/pkg/object/tcpproxy/connection.go @@ -21,7 +21,6 @@ import ( "io" "net" "runtime/debug" - "strings" "sync" "sync/atomic" "time" @@ -54,7 +53,9 @@ type Connection struct { writeBufferChan chan *iobufferpool.StreamBuffer mu sync.Mutex - connStopChan chan struct{} // use for connection close + readLoopExit chan struct{} + writeLoopExit chan struct{} + connStopChan chan struct{} // notify write loop doesn't block on read buffer channel listenerStopChan chan struct{} // notify tcp listener has been closed, just use in read loop lastReadDeadlineTime time.Time @@ -70,10 +71,12 @@ func NewClientConn(conn net.Conn, listenerStopChan chan struct{}) *Connection { rawConn: conn, localAddr: conn.LocalAddr(), remoteAddr: conn.RemoteAddr(), + readLoopExit: make(chan struct{}), + writeLoopExit: make(chan struct{}), + connStopChan: make(chan struct{}), listenerStopChan: listenerStopChan, mu: sync.Mutex{}, - connStopChan: make(chan struct{}), writeBufferChan: make(chan *iobufferpool.StreamBuffer, writeBufSize), } } @@ -138,6 +141,7 @@ func (c *Connection) Write(buf *iobufferpool.StreamBuffer) (err error) { func (c *Connection) startReadLoop() { defer func() { + close(c.readLoopExit) if c.readBuffer != nil { tcpBufferPool.Put(c.readBuffer[:iobufferpool.DefaultBufferReadCapacity]) } @@ -177,27 +181,30 @@ func (c *Connection) startReadLoop() { c.localAddr.String(), c.remoteAddr.String(), err.Error()) c.Close(NoFlush, RemoteClose) } else { - // it's hard to distinguish errors caused by c.rawConn.Close() - if atomic.LoadUint32(&c.closed) == 1 && - strings.Contains(err.Error(), "use of closed network connection") { - logger.Debugf("stop read due to close connection, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - } else { - logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", - c.localAddr.String(), c.remoteAddr.String(), err.Error()) - c.Close(NoFlush, OnReadErrClose) - } + logger.Errorf("error on read, local addr: %s, remote addr: %s, err: %s", + c.localAddr.String(), c.remoteAddr.String(), err.Error()) + c.Close(NoFlush, OnReadErrClose) } return } } func (c *Connection) startWriteLoop() { - var err error + + defer func() { + close(c.writeLoopExit) + }() + for { + if atomic.LoadUint32(&c.closed) == 1 { + logger.Debugf("connection has been closed, exit write loop, local addr: %s, remote addr: %s", + c.localAddr.String(), c.remoteAddr.String()) + return + } + select { case <-c.connStopChan: - logger.Debugf("connection exit write loop, local addr: %s, remote addr: %s", + logger.Debugf("connection has been closed, exit write loop, local addr: %s, remote addr: %s", c.localAddr.String(), c.remoteAddr.String()) return case buf, ok := <-c.writeBufferChan: @@ -221,18 +228,12 @@ func (c *Connection) startWriteLoop() { } } - if _, err = c.doWrite(); err == nil { + _, err := c.doWrite() + if err == nil { continue } - if te, ok := err.(net.Error); ok && te.Timeout() { - if atomic.LoadUint32(&c.closed) == 1 { - logger.Debugf("connection has been close, exit write loop, local addr: %s, remote addr: %s", - c.localAddr.String(), c.remoteAddr.String()) - } else { - c.Close(NoFlush, OnWriteTimeout) - } - return + continue } if err == io.EOF { @@ -273,14 +274,19 @@ func (c *Connection) Close(ccType CloseType, event ConnectionEvent) { // connection has already closed, so there is no need to execute below code return } - - close(c.connStopChan) - _ = c.rawConn.SetDeadline(time.Now()) // notify read/write loop to break - logger.Debugf("enter connection close func(%s), local addr: %s, remote addr: %s", + logger.Infof("enter connection close func(%s), local addr: %s, remote addr: %s", event, c.localAddr.String(), c.remoteAddr.String()) + _ = c.rawConn.SetDeadline(time.Now()) // notify read/write loop to exit + close(c.connStopChan) c.onClose(event) - _ = c.rawConn.Close() + + go func() { + <-c.readLoopExit + <-c.writeLoopExit + // wait for read/write loop exit, then close socket(avoid exceptions caused by close operations) + _ = c.rawConn.Close() + }() } func (c *Connection) doReadIO() (bufLen int, err error) { @@ -346,12 +352,13 @@ type ServerConnection struct { func NewServerConn(connectTimeout uint32, serverAddr net.Addr, listenerStopChan chan struct{}) *ServerConnection { conn := &ServerConnection{ Connection: Connection{ - remoteAddr: serverAddr, - + remoteAddr: serverAddr, + readLoopExit: make(chan struct{}), + writeLoopExit: make(chan struct{}), + connStopChan: make(chan struct{}), writeBufferChan: make(chan *iobufferpool.StreamBuffer, writeBufSize), mu: sync.Mutex{}, - connStopChan: make(chan struct{}), listenerStopChan: listenerStopChan, }, connectTimeout: time.Duration(connectTimeout) * time.Millisecond,