-
Notifications
You must be signed in to change notification settings - Fork 564
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
fix CVE-2023-44487 for containerized-data-importer
- Loading branch information
Henry Li
committed
Jan 24, 2025
1 parent
25d8212
commit de85f4e
Showing
2 changed files
with
260 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,258 @@ | ||
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go | ||
index 3dd1564..9d9a3fd 100644 | ||
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go | ||
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go | ||
@@ -165,15 +165,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, | ||
ID: http2.SettingMaxFrameSize, | ||
Val: http2MaxFrameLen, | ||
}} | ||
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is | ||
- // permitted in the HTTP2 spec. | ||
- maxStreams := config.MaxStreams | ||
- if maxStreams == 0 { | ||
- maxStreams = math.MaxUint32 | ||
- } else { | ||
+ if config.MaxStreams != math.MaxUint32 { | ||
isettings = append(isettings, http2.Setting{ | ||
ID: http2.SettingMaxConcurrentStreams, | ||
- Val: maxStreams, | ||
+ Val: config.MaxStreams, | ||
}) | ||
} | ||
dynamicWindow := true | ||
@@ -252,7 +247,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, | ||
framer: framer, | ||
readerDone: make(chan struct{}), | ||
writerDone: make(chan struct{}), | ||
- maxStreams: maxStreams, | ||
+ maxStreams: config.MaxStreams, | ||
inTapHandle: config.InTapHandle, | ||
fc: &trInFlow{limit: uint32(icwz)}, | ||
state: reachable, | ||
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go | ||
index f4dde72..98839ad 100644 | ||
--- a/vendor/google.golang.org/grpc/server.go | ||
+++ b/vendor/google.golang.org/grpc/server.go | ||
@@ -43,7 +43,6 @@ import ( | ||
"google.golang.org/grpc/internal" | ||
"google.golang.org/grpc/internal/binarylog" | ||
"google.golang.org/grpc/internal/channelz" | ||
- "google.golang.org/grpc/internal/grpcrand" | ||
"google.golang.org/grpc/internal/grpcsync" | ||
"google.golang.org/grpc/internal/transport" | ||
"google.golang.org/grpc/keepalive" | ||
@@ -74,10 +73,10 @@ func init() { | ||
srv.drainServerTransports(addr) | ||
} | ||
internal.AddGlobalServerOptions = func(opt ...ServerOption) { | ||
- extraServerOptions = append(extraServerOptions, opt...) | ||
+ globalServerOptions = append(globalServerOptions, opt...) | ||
} | ||
internal.ClearGlobalServerOptions = func() { | ||
- extraServerOptions = nil | ||
+ globalServerOptions = nil | ||
} | ||
internal.BinaryLogger = binaryLogger | ||
internal.JoinServerOptions = newJoinServerOption | ||
@@ -115,12 +114,6 @@ type serviceInfo struct { | ||
mdata interface{} | ||
} | ||
|
||
-type serverWorkerData struct { | ||
- st transport.ServerTransport | ||
- wg *sync.WaitGroup | ||
- stream *transport.Stream | ||
-} | ||
- | ||
// Server is a gRPC server to serve RPC requests. | ||
type Server struct { | ||
opts serverOptions | ||
@@ -145,7 +138,7 @@ type Server struct { | ||
channelzID *channelz.Identifier | ||
czData *channelzData | ||
|
||
- serverWorkerChannels []chan *serverWorkerData | ||
+ serverWorkerChannel chan func() | ||
} | ||
|
||
type serverOptions struct { | ||
@@ -177,13 +170,14 @@ type serverOptions struct { | ||
} | ||
|
||
var defaultServerOptions = serverOptions{ | ||
+ maxConcurrentStreams: math.MaxUint32, | ||
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, | ||
maxSendMessageSize: defaultServerMaxSendMessageSize, | ||
connectionTimeout: 120 * time.Second, | ||
writeBufferSize: defaultWriteBufSize, | ||
readBufferSize: defaultReadBufSize, | ||
} | ||
-var extraServerOptions []ServerOption | ||
+var globalServerOptions []ServerOption | ||
|
||
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. | ||
type ServerOption interface { | ||
@@ -387,6 +381,9 @@ func MaxSendMsgSize(m int) ServerOption { | ||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number | ||
// of concurrent streams to each ServerTransport. | ||
func MaxConcurrentStreams(n uint32) ServerOption { | ||
+ if n == 0 { | ||
+ n = math.MaxUint32 | ||
+ } | ||
return newFuncServerOption(func(o *serverOptions) { | ||
o.maxConcurrentStreams = n | ||
}) | ||
@@ -565,42 +562,35 @@ const serverWorkerResetThreshold = 1 << 16 | ||
// re-allocations (see the runtime.morestack problem [1]). | ||
// | ||
// [1] https://github.com/golang/go/issues/18138 | ||
-func (s *Server) serverWorker(ch chan *serverWorkerData) { | ||
- // To make sure all server workers don't reset at the same time, choose a | ||
- // random number of iterations before resetting. | ||
- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) | ||
- for completed := 0; completed < threshold; completed++ { | ||
- data, ok := <-ch | ||
+func (s *Server) serverWorker() { | ||
+ for completed := 0; completed < serverWorkerResetThreshold; completed++ { | ||
+ f, ok := <-s.serverWorkerChannel | ||
if !ok { | ||
return | ||
} | ||
- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) | ||
- data.wg.Done() | ||
+ f() | ||
} | ||
- go s.serverWorker(ch) | ||
+ go s.serverWorker() | ||
} | ||
|
||
// initServerWorkers creates worker goroutines and channels to process incoming | ||
// connections to reduce the time spent overall on runtime.morestack. | ||
func (s *Server) initServerWorkers() { | ||
- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) | ||
+ s.serverWorkerChannel = make(chan func()) | ||
for i := uint32(0); i < s.opts.numServerWorkers; i++ { | ||
- s.serverWorkerChannels[i] = make(chan *serverWorkerData) | ||
- go s.serverWorker(s.serverWorkerChannels[i]) | ||
+ go s.serverWorker() | ||
} | ||
} | ||
|
||
func (s *Server) stopServerWorkers() { | ||
- for i := uint32(0); i < s.opts.numServerWorkers; i++ { | ||
- close(s.serverWorkerChannels[i]) | ||
- } | ||
+ close(s.serverWorkerChannel) | ||
} | ||
|
||
// NewServer creates a gRPC server which has no service registered and has not | ||
// started to accept requests yet. | ||
func NewServer(opt ...ServerOption) *Server { | ||
opts := defaultServerOptions | ||
- for _, o := range extraServerOptions { | ||
+ for _, o := range globalServerOptions { | ||
o.apply(&opts) | ||
} | ||
for _, o := range opt { | ||
@@ -945,25 +935,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { | ||
defer st.Close() | ||
var wg sync.WaitGroup | ||
|
||
- var roundRobinCounter uint32 | ||
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) | ||
st.HandleStreams(func(stream *transport.Stream) { | ||
wg.Add(1) | ||
+ | ||
+ streamQuota.acquire() | ||
+ f := func() { | ||
+ defer streamQuota.release() | ||
+ defer wg.Done() | ||
+ s.handleStream(st, stream, s.traceInfo(st, stream)) | ||
+ } | ||
+ | ||
if s.opts.numServerWorkers > 0 { | ||
- data := &serverWorkerData{st: st, wg: &wg, stream: stream} | ||
select { | ||
- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: | ||
+ case s.serverWorkerChannel <- f: | ||
+ return | ||
default: | ||
// If all stream workers are busy, fallback to the default code path. | ||
- go func() { | ||
- s.handleStream(st, stream, s.traceInfo(st, stream)) | ||
- wg.Done() | ||
- }() | ||
} | ||
} else { | ||
- go func() { | ||
- defer wg.Done() | ||
- s.handleStream(st, stream, s.traceInfo(st, stream)) | ||
- }() | ||
+ go f() | ||
} | ||
}, func(ctx context.Context, method string) context.Context { | ||
if !EnableTracing { | ||
@@ -1978,3 +1969,34 @@ type channelzServer struct { | ||
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { | ||
return c.s.channelzMetric() | ||
} | ||
+ | ||
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be | ||
+// called synchronously; release may be called asynchronously. | ||
+type atomicSemaphore struct { | ||
+ n atomic.Int64 | ||
+ wait chan struct{} | ||
+} | ||
+ | ||
+func (q *atomicSemaphore) acquire() { | ||
+ if q.n.Add(-1) < 0 { | ||
+ // We ran out of quota. Block until a release happens. | ||
+ <-q.wait | ||
+ } | ||
+} | ||
+ | ||
+func (q *atomicSemaphore) release() { | ||
+ // N.B. the "<= 0" check below should allow for this to work with multiple | ||
+ // concurrent calls to acquire, but also note that with synchronous calls to | ||
+ // acquire, as our system does, n will never be less than -1. There are | ||
+ // fairness issues (queuing) to consider if this was to be generalized. | ||
+ if q.n.Add(1) <= 0 { | ||
+ // An acquire was waiting on us. Unblock it. | ||
+ q.wait <- struct{}{} | ||
+ } | ||
+} | ||
+ | ||
+func newHandlerQuota(n uint32) *atomicSemaphore { | ||
+ a := &atomicSemaphore{wait: make(chan struct{}, 1)} | ||
+ a.n.Store(int64(n)) | ||
+ return a | ||
+} | ||
\ No newline at end of file | ||
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go | ||
index d738725..3674914 100644 | ||
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go | ||
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go | ||
@@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { | ||
// OnError will block if it is called more often than the embedded period time. | ||
// This will prevent overly tight hot error loops. | ||
func (r *rudimentaryErrorBackoff) OnError(error) { | ||
+ now := time.Now() // start the timer before acquiring the lock | ||
r.lastErrorTimeLock.Lock() | ||
- defer r.lastErrorTimeLock.Unlock() | ||
- d := time.Since(r.lastErrorTime) | ||
- if d < r.minPeriod { | ||
- // If the time moves backwards for any reason, do nothing | ||
- time.Sleep(r.minPeriod - d) | ||
- } | ||
+ d := now.Sub(r.lastErrorTime) | ||
r.lastErrorTime = time.Now() | ||
+ r.lastErrorTimeLock.Unlock() | ||
+ | ||
+ // Do not sleep with the lock held because that causes all callers of HandleError to block. | ||
+ // We only want the current goroutine to block. | ||
+ // A negative or zero duration causes time.Sleep to return immediately. | ||
+ // If the time moves backwards for any reason, do nothing. | ||
+ time.Sleep(r.minPeriod - d) | ||
} | ||
|
||
// GetCaller returns the caller of the function that calls it. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -30,6 +30,7 @@ Patch1: CVE-2022-2879.patch | |
Patch2: CVE-2024-24786.patch | ||
Patch3: CVE-2024-45338.patch | ||
Patch4: CVE-2023-39325.patch | ||
Patch5: CVE-2023-44487.patch | ||
BuildRequires: golang | ||
BuildRequires: golang-packaging | ||
BuildRequires: libnbd-devel | ||
|
@@ -225,7 +226,7 @@ install -m 0644 _out/manifests/release/cdi-cr.yaml %{buildroot}%{_datadir}/cdi/m | |
|
||
%changelog | ||
* Fri Jan 24 2025 Henry Li <[email protected]> - 1.57.0-8 | ||
- Add patch for CVE-2023-39325 | ||
- Add patch for CVE-2023-39325 and CVE-2023-44487 | ||
|
||
* Tue Dec 31 2024 Rohit Rawat <[email protected]> - 1.57.0-7 | ||
- Add patch for CVE-2024-45338 | ||
|