From ffa762d824addf536d87677f498ded99ac27e6b3 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:39:34 -0500 Subject: [PATCH] Migrates various components in lib/srv to use slog (#49840) * Convert srv.AuthHandlers to use slog * Convert srv.Heartbeat and srv.HeartbeatV2 to use slog * Convert srv.KeepAlive to use slog * Convert srv.Monitor to use slog * Convert srv.SessionControl to use slog * Convert srv.TermManager to use slog --- lib/kube/proxy/forwarder.go | 9 ++-- lib/service/db.go | 2 +- lib/service/service.go | 12 ++--- lib/srv/authhandlers.go | 74 ++++++++++++++++++------------- lib/srv/ctx.go | 9 ++-- lib/srv/db/access_test.go | 4 +- lib/srv/desktop/windows_server.go | 2 +- lib/srv/heartbeat.go | 33 +++++++------- lib/srv/heartbeatv2.go | 34 +++++++------- lib/srv/keepalive.go | 11 ++--- lib/srv/monitor.go | 38 ++++++++-------- lib/srv/monitor_test.go | 6 +-- lib/srv/session_control.go | 8 ++-- lib/srv/termmanager.go | 8 ++-- lib/web/apiserver_test.go | 2 +- 15 files changed, 134 insertions(+), 118 deletions(-) diff --git a/lib/kube/proxy/forwarder.go b/lib/kube/proxy/forwarder.go index 1cfd6351fddce..fcf571661dcdc 100644 --- a/lib/kube/proxy/forwarder.go +++ b/lib/kube/proxy/forwarder.go @@ -25,6 +25,7 @@ import ( "errors" "fmt" "io" + "log/slog" "maps" "net" "net/http" @@ -2348,9 +2349,11 @@ func (s *clusterSession) monitorConn(conn net.Conn, err error, hostID string) (n Context: s.connCtx, TeleportUser: s.User.GetName(), ServerID: s.parent.cfg.HostID, - Entry: s.parent.log, - Emitter: s.parent.cfg.AuthClient, - EmitterContext: s.parent.ctx, + // TODO(tross) update this to use the child logger + // once Forwarder is converted to use a slog.Logger + Logger: slog.Default(), + Emitter: s.parent.cfg.AuthClient, + EmitterContext: s.parent.ctx, }) if err != nil { tc.CloseWithCause(err) diff --git a/lib/service/db.go b/lib/service/db.go index 80258e66582e1..3bb3938b9321b 100644 --- a/lib/service/db.go +++ b/lib/service/db.go @@ -137,7 +137,7 @@ func (process *TeleportProcess) initDatabaseService() (retErr error) { ServerID: process.Config.HostUUID, Emitter: asyncEmitter, EmitterContext: process.ExitContext(), - Logger: process.log, + Logger: process.logger, }) if err != nil { return trace.Wrap(err) diff --git a/lib/service/service.go b/lib/service/service.go index 9f39b825d05cc..97fbd4a03e370 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -2967,7 +2967,7 @@ func (process *TeleportProcess) initSSH() error { LockEnforcer: lockWatcher, Emitter: &events.StreamerAndEmitter{Emitter: asyncEmitter, Streamer: conn.Client}, Component: teleport.ComponentNode, - Logger: process.log.WithField(teleport.ComponentKey, teleport.Component(teleport.ComponentNode, process.id)).WithField(teleport.ComponentKey, "sessionctrl"), + Logger: process.logger.With(teleport.ComponentKey, teleport.Component(teleport.ComponentNode, process.id, "sessionctrl")), TracerProvider: process.TracingProvider, ServerID: serverID, }) @@ -4468,7 +4468,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { LockEnforcer: lockWatcher, Emitter: asyncEmitter, Component: teleport.ComponentProxy, - Logger: process.log.WithField(teleport.ComponentKey, "sessionctrl"), + Logger: process.logger.With(teleport.ComponentKey, "sessionctrl"), TracerProvider: process.TracingProvider, ServerID: serverID, }) @@ -4561,7 +4561,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { ServerID: cfg.HostUUID, Emitter: asyncEmitter, EmitterContext: process.GracefulExitContext(), - Logger: process.log, + Logger: process.logger, }) if err != nil { return trace.Wrap(err) @@ -4924,7 +4924,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { ServerID: serverID, Emitter: asyncEmitter, EmitterContext: process.ExitContext(), - Logger: process.log, + Logger: process.logger, }) if err != nil { return trace.Wrap(err) @@ -5142,7 +5142,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { ServerID: process.Config.HostUUID, Emitter: asyncEmitter, EmitterContext: process.ExitContext(), - Logger: process.log, + Logger: process.logger, }) if err != nil { return trace.Wrap(err) @@ -6024,7 +6024,7 @@ func (process *TeleportProcess) initApps() { ServerID: process.Config.HostUUID, Emitter: asyncEmitter, EmitterContext: process.ExitContext(), - Logger: process.log, + Logger: process.logger, MonitorCloseChannel: process.Config.Apps.MonitorCloseChannel, }) if err != nil { diff --git a/lib/srv/authhandlers.go b/lib/srv/authhandlers.go index d80f5704acfe4..0947c6a975709 100644 --- a/lib/srv/authhandlers.go +++ b/lib/srv/authhandlers.go @@ -22,6 +22,7 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "net" "strconv" "time" @@ -30,7 +31,6 @@ import ( "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" "github.com/gravitational/teleport" @@ -123,7 +123,7 @@ func (c *AuthHandlerConfig) CheckAndSetDefaults() error { type AuthHandlers struct { loginChecker - log *log.Entry + log *slog.Logger c *AuthHandlerConfig } @@ -140,7 +140,7 @@ func NewAuthHandlers(config *AuthHandlerConfig) (*AuthHandlers, error) { ah := &AuthHandlers{ c: config, - log: log.WithField(teleport.ComponentKey, config.Component), + log: slog.With(teleport.ComponentKey, config.Component), } ah.loginChecker = &ahLoginChecker{ log: ah.log, @@ -284,10 +284,10 @@ func (h *AuthHandlers) CheckPortForward(addr string, ctx *ServerContext) error { Error: systemErrorMessage, }, }); err != nil { - h.log.WithError(err).Warn("Failed to emit port forward deny audit event.") + h.log.WarnContext(h.c.Server.Context(), "Failed to emit port forward deny audit event", "error", err) } - h.log.Warnf("Port forwarding request denied: %v.", systemErrorMessage) + h.log.WarnContext(h.c.Server.Context(), "Port forwarding request denied", "error", systemErrorMessage) return trace.AccessDenied(userErrorMessage) } @@ -303,28 +303,38 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s fingerprint := fmt.Sprintf("%v %v", key.Type(), sshutils.Fingerprint(key)) // create a new logging entry with info specific to this login attempt - log := h.log.WithField(teleport.ComponentFields, log.Fields{ - "local": conn.LocalAddr(), - "remote": conn.RemoteAddr(), - "user": conn.User(), - "fingerprint": fingerprint, - }) - - cid := fmt.Sprintf("conn(%v->%v, user=%v)", conn.RemoteAddr(), conn.LocalAddr(), conn.User()) - log.Debugf("%v auth attempt", cid) + log := h.log.With( + "local_addr", conn.LocalAddr(), + "remote_addr", conn.RemoteAddr(), + "user", conn.User(), + "fingerprint", fingerprint, + ) cert, ok := key.(*ssh.Certificate) - log.Debugf("%v auth attempt with key %v, %#v", cid, fingerprint, cert) if !ok { - log.Debugf("auth attempt, unsupported key type") + log.DebugContext(ctx, "rejecting auth attempt, unsupported key type") return nil, trace.BadParameter("unsupported key type: %v", fingerprint) } + + log.DebugContext(ctx, "processing auth attempt with key", + slog.Group("cert", + "serial", cert.Serial, + "type", cert.CertType, + "key_id", cert.KeyId, + "valid_principals", cert.ValidPrincipals, + "valid_after", cert.ValidAfter, + "valid_before", cert.ValidBefore, + "permissions", cert.Permissions, + "reserved", cert.Reserved, + ), + ) + if len(cert.ValidPrincipals) == 0 { - log.Debugf("need a valid principal for key") + log.DebugContext(ctx, "rejecting auth attempt without valid principals") return nil, trace.BadParameter("need a valid principal for key %v", fingerprint) } if len(cert.KeyId) == 0 { - log.Debugf("need a valid key ID for key") + log.DebugContext(ctx, "rejecting auth attempt without valid key ID") return nil, trace.BadParameter("need a valid key for key %v", fingerprint) } teleportUser := cert.KeyId @@ -380,7 +390,7 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s message, err, ); err != nil { - h.log.WithError(err).Warn("Failed to append Trace to ConnectionDiagnostic.") + h.log.WarnContext(ctx, "Failed to append Trace to ConnectionDiagnostic", "error", err) } if err := h.c.Emitter.EmitAuditEvent(h.c.Server.Context(), &apievents.AuthAttempt{ @@ -402,7 +412,7 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s Error: err.Error(), }, }); err != nil { - h.log.WithError(err).Warn("Failed to emit failed login audit event.") + h.log.WarnContext(ctx, "Failed to emit failed login audit event", "error", err) } auditdMsg := auditd.Message{ @@ -412,7 +422,7 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s } if err := auditd.SendEvent(auditd.AuditUserErr, auditd.Failed, auditdMsg); err != nil { - log.Warnf("Failed to send an event to auditd: %v", err) + log.WarnContext(ctx, "Failed to send an event to auditd", "error", err) } } @@ -433,7 +443,7 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s recordFailedLogin(err) return nil, trace.Wrap(err) } - log.Debugf("Successfully authenticated") + log.DebugContext(ctx, "Successfully authenticated") clusterName, err := h.c.AccessPoint.GetClusterName() if err != nil { @@ -452,7 +462,7 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s case ssh.HostCert: permissions.Extensions[utils.ExtIntCertType] = utils.ExtIntCertTypeHost default: - log.Warnf("Unexpected cert type: %v", cert.CertType) + log.WarnContext(ctx, "Received unexpected cert type", "cert_type", cert.CertType) } if h.isProxy() { @@ -464,7 +474,7 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s // client's certificate ca, err := h.authorityForCert(types.UserCA, cert.SignatureKey) if err != nil { - log.Errorf("Permission denied: %v", err) + log.ErrorContext(ctx, "Permission denied", "error", err) recordFailedLogin(err) return nil, trace.Wrap(err) } @@ -483,7 +493,7 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s err = h.canLoginWithRBAC(cert, ca, clusterName.GetClusterName(), h.c.Server.GetInfo(), teleportUser, conn.User()) } if err != nil { - log.Errorf("Permission denied: %v", err) + log.ErrorContext(ctx, "Permission denied", "error", err) recordFailedLogin(err) return nil, trace.Wrap(err) } @@ -572,7 +582,7 @@ func (h *AuthHandlers) hostKeyCallback(hostname string, remote net.Addr, key ssh // If strict host key checking is not enabled, log that Teleport trusted an // insecure key, but allow the request to go through. - h.log.Warnf("Insecure configuration! Strict host key checking disabled, allowing login without checking host key of type %v.", key.Type()) + h.log.WarnContext(ctx, "Insecure configuration! Strict host key checking disabled, allowing login without checking host key", "key_type", key.Type()) return nil } @@ -591,7 +601,7 @@ func (h *AuthHandlers) IsUserAuthority(cert ssh.PublicKey) bool { // Teleport CA. func (h *AuthHandlers) IsHostAuthority(cert ssh.PublicKey, address string) bool { if _, err := h.authorityForCert(types.HostCA, cert); err != nil { - h.log.Debugf("Unable to find SSH host CA: %v.", err) + h.log.DebugContext(h.c.Server.Context(), "Unable to find SSH host CA", "error", err) return false } return true @@ -607,7 +617,7 @@ type loginChecker interface { } type ahLoginChecker struct { - log *log.Entry + log *slog.Logger c *AuthHandlerConfig } @@ -618,7 +628,7 @@ func (a *ahLoginChecker) canLoginWithRBAC(cert *ssh.Certificate, ca types.CertAu // Use the server's shutdown context. ctx := a.c.Server.Context() - a.log.Debugf("Checking permissions for (%v,%v) to login to node with RBAC checks.", teleportUser, osUser) + a.log.DebugContext(ctx, "Checking permissions for (%v,%v) to login to node with RBAC checks.", teleportUser, osUser) // get roles assigned to this user accessInfo, err := fetchAccessInfo(cert, ca, teleportUser, clusterName) @@ -694,9 +704,9 @@ func fetchAccessInfo(cert *ssh.Certificate, ca types.CertAuthority, teleportUser // Certificate Authority and returns it. func (h *AuthHandlers) authorityForCert(caType types.CertAuthType, key ssh.PublicKey) (types.CertAuthority, error) { // get all certificate authorities for given type - cas, err := h.c.AccessPoint.GetCertAuthorities(context.TODO(), caType, false) + cas, err := h.c.AccessPoint.GetCertAuthorities(h.c.Server.Context(), caType, false) if err != nil { - h.log.Warnf("%v", trace.DebugReport(err)) + h.log.WarnContext(h.c.Server.Context(), "failed retrieving cert authority", "error", err) return nil, trace.Wrap(err) } @@ -705,7 +715,7 @@ func (h *AuthHandlers) authorityForCert(caType types.CertAuthType, key ssh.Publi for i := range cas { checkers, err := sshutils.GetCheckers(cas[i]) if err != nil { - h.log.Warnf("%v", err) + h.log.WarnContext(h.c.Server.Context(), "unable to get cert checker for ca", "ca", cas[i].GetName(), "error", err) return nil, trace.Wrap(err) } for _, checker := range checkers { diff --git a/lib/srv/ctx.go b/lib/srv/ctx.go index 802851d6c7b68..342566dee7a54 100644 --- a/lib/srv/ctx.go +++ b/lib/srv/ctx.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "io" + "log/slog" "net" "os" "strconv" @@ -490,9 +491,11 @@ func NewServerContext(ctx context.Context, parent *sshutils.ConnectionContext, s TeleportUser: child.Identity.TeleportUser, Login: child.Identity.Login, ServerID: child.srv.ID(), - Entry: child.Entry, - Emitter: child.srv, - EmitterContext: ctx, + // TODO(tross) update this to use the child logger + // once ServerContext is converted to use a slog.Logger + Logger: slog.Default(), + Emitter: child.srv, + EmitterContext: ctx, } for _, opt := range monitorOpts { opt(&monitorConfig) diff --git a/lib/srv/db/access_test.go b/lib/srv/db/access_test.go index b53da925e0669..0adc9a8b28796 100644 --- a/lib/srv/db/access_test.go +++ b/lib/srv/db/access_test.go @@ -2389,7 +2389,7 @@ func setupTestContext(ctx context.Context, t testing.TB, withDatabases ...withDa ServerID: testCtx.hostID, Emitter: testCtx.emitter, EmitterContext: ctx, - Logger: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) require.NoError(t, err) @@ -2542,7 +2542,7 @@ func (c *testContext) setupDatabaseServer(ctx context.Context, t testing.TB, p a ServerID: p.HostID, Emitter: c.emitter, EmitterContext: context.Background(), - Logger: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) require.NoError(t, err) diff --git a/lib/srv/desktop/windows_server.go b/lib/srv/desktop/windows_server.go index 7196425b79bff..9d42bdcf74333 100644 --- a/lib/srv/desktop/windows_server.go +++ b/lib/srv/desktop/windows_server.go @@ -1000,7 +1000,7 @@ func (s *WindowsService) connectRDP(ctx context.Context, log *slog.Logger, tdpCo Clock: s.cfg.Clock, ClientIdleTimeout: authCtx.Checker.AdjustClientIdleTimeout(netConfig.GetClientIdleTimeout()), DisconnectExpiredCert: authCtx.GetDisconnectCertExpiry(authPref), - Entry: logrus.NewEntry(logrus.StandardLogger()), + Logger: s.cfg.Logger, Emitter: s.cfg.Emitter, EmitterContext: s.closeCtx, LockWatcher: s.cfg.LockWatcher, diff --git a/lib/srv/heartbeat.go b/lib/srv/heartbeat.go index 639f6581e1e31..82ea1eafacca2 100644 --- a/lib/srv/heartbeat.go +++ b/lib/srv/heartbeat.go @@ -21,11 +21,11 @@ package srv import ( "context" "fmt" + "log/slog" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -155,14 +155,17 @@ func NewHeartbeat(cfg HeartbeatConfig) (*Heartbeat, error) { cancelCtx: ctx, cancel: cancel, HeartbeatConfig: cfg, - Entry: log.WithFields(log.Fields{ - teleport.ComponentKey: teleport.Component(cfg.Component, "beat"), - }), - checkTicker: cfg.Clock.NewTicker(cfg.CheckPeriod), - announceC: make(chan struct{}, 1), - sendC: make(chan struct{}, 1), + logger: slog.With(teleport.ComponentKey, teleport.Component(cfg.Component, "beat")), + checkTicker: cfg.Clock.NewTicker(cfg.CheckPeriod), + announceC: make(chan struct{}, 1), + sendC: make(chan struct{}, 1), } - h.Debugf("Starting %v heartbeat with announce period: %v, keep-alive period %v, poll period: %v", cfg.Mode, cfg.KeepAlivePeriod, cfg.AnnouncePeriod, cfg.CheckPeriod) + h.logger.DebugContext(ctx, "Starting heartbeat with announce period", + "mode", cfg.Mode, + "keep_alive_period", cfg.KeepAlivePeriod, + "announce_period", cfg.AnnouncePeriod, + "check_period", cfg.CheckPeriod, + ) return h, nil } @@ -249,7 +252,7 @@ type Heartbeat struct { HeartbeatConfig cancelCtx context.Context cancel context.CancelFunc - *log.Entry + logger *slog.Logger state KeepAliveState current types.Resource keepAlive *types.KeepAlive @@ -280,15 +283,15 @@ func (h *Heartbeat) Run() error { for { err := h.fetchAndAnnounce() if err != nil { - h.Warningf("Heartbeat failed %v.", err) + h.logger.WarnContext(h.Context, "Heartbeat failed", "error", err) } h.OnHeartbeat(err) select { case <-h.checkTicker.Chan(): case <-h.sendC: - h.Debugf("Asked check out of cycle") + h.logger.DebugContext(h.Context, "Asked check out of cycle") case <-h.cancelCtx.Done(): - h.Debugf("Heartbeat exited.") + h.logger.DebugContext(h.Context, "Heartbeat exited") return nil } } @@ -324,7 +327,7 @@ func (h *Heartbeat) reset(state KeepAliveState) { h.keepAlive = nil if h.keepAliver != nil { if err := h.keepAliver.Close(); err != nil { - h.Warningf("Failed to close keep aliver: %v", err) + h.logger.WarnContext(h.Context, "Failed to close keep aliver", "error", err) } h.keepAliver = nil } @@ -590,7 +593,7 @@ func (h *Heartbeat) announce() error { case <-h.cancelCtx.Done(): return nil case <-timeout.C: - h.Warningf("Blocked on keep alive send, going to reset.") + h.logger.WarnContext(h.Context, "Blocked on keep alive send, going to reset.") h.reset(HeartbeatStateInit) return trace.ConnectionProblem(nil, "timeout sending keep alive") case h.keepAliver.KeepAlives() <- keepAlive: @@ -599,7 +602,7 @@ func (h *Heartbeat) announce() error { h.setState(HeartbeatStateKeepAliveWait) return nil case <-h.keepAliver.Done(): - h.Warningf("Keep alive has failed: %v.", h.keepAliver.Error()) + h.logger.WarnContext(h.Context, "Keep alive has failed", "error", h.keepAliver.Error()) err := h.keepAliver.Error() h.reset(HeartbeatStateInit) return trace.ConnectionProblem(err, "keep alive channel closed") diff --git a/lib/srv/heartbeatv2.go b/lib/srv/heartbeatv2.go index 7a95020e7a31d..299c1b274e09c 100644 --- a/lib/srv/heartbeatv2.go +++ b/lib/srv/heartbeatv2.go @@ -21,10 +21,10 @@ package srv import ( "context" "errors" + "log/slog" "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -559,12 +559,12 @@ func (h *sshServerHeartbeatV2) FallbackAnnounce(ctx context.Context) (ok bool) { } server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform fallback heartbeat for ssh server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for ssh server", "error", err) return false } if _, err := h.announcer.UpsertNode(ctx, server); err != nil { - log.Warnf("Failed to perform fallback heartbeat for ssh server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for ssh server", "error", err) return false } @@ -575,12 +575,12 @@ func (h *sshServerHeartbeatV2) FallbackAnnounce(ctx context.Context) (ok bool) { func (h *sshServerHeartbeatV2) Announce(ctx context.Context, sender inventory.DownstreamSender) (ok bool) { server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform inventory heartbeat for ssh server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for ssh server", "error", err) return false } if err := sender.Send(ctx, proto.InventoryHeartbeat{SSHServer: apiutils.CloneProtoMsg(server)}); err != nil { - log.Warnf("Failed to perform inventory heartbeat for ssh server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for ssh server", "error", err) return false } h.prev = server @@ -617,13 +617,13 @@ func (h *appServerHeartbeatV2) FallbackAnnounce(ctx context.Context) (ok bool) { } server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform fallback heartbeat for app server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for app server", "error", err) return false } if _, err := h.announcer.UpsertApplicationServer(ctx, server); err != nil { if !errors.Is(err, context.Canceled) && status.Code(err) != codes.Canceled { - log.Warnf("Failed to perform fallback heartbeat for app server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for app server", "error", err) } return false } @@ -645,13 +645,13 @@ func (h *appServerHeartbeatV2) Announce(ctx context.Context, sender inventory.Do server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform inventory heartbeat for app server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for app server", "error", err) return false } if err := sender.Send(ctx, proto.InventoryHeartbeat{AppServer: apiutils.CloneProtoMsg(server)}); err != nil { if !errors.Is(err, context.Canceled) && status.Code(err) != codes.Canceled { - log.Warnf("Failed to perform inventory heartbeat for app server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for app server", "error", err) } return false } @@ -690,12 +690,12 @@ func (h *dbServerHeartbeatV2) FallbackAnnounce(ctx context.Context) (ok bool) { } server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform fallback heartbeat for database server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for database server", "error", err) return false } if _, err := h.announcer.UpsertDatabaseServer(ctx, server); err != nil { if !errors.Is(err, context.Canceled) && status.Code(err) != codes.Canceled { - log.Warnf("Failed to perform fallback heartbeat for database server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for database server", "error", err) } return false } @@ -717,12 +717,12 @@ func (h *dbServerHeartbeatV2) Announce(ctx context.Context, sender inventory.Dow server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform inventory heartbeat for database server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for database server", "error", err) return false } if err := sender.Send(ctx, proto.InventoryHeartbeat{DatabaseServer: apiutils.CloneProtoMsg(server)}); err != nil { if !errors.Is(err, context.Canceled) && status.Code(err) != codes.Canceled { - log.Warnf("Failed to perform inventory heartbeat for database server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for database server", "error", err) } return false } @@ -761,13 +761,13 @@ func (h *kubeServerHeartbeatV2) FallbackAnnounce(ctx context.Context) (ok bool) } server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform fallback heartbeat for kubernetes server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for kubernetes server", "error", err) return false } if _, err := h.announcer.UpsertKubernetesServer(ctx, apiutils.CloneProtoMsg(server)); err != nil { if !errors.Is(err, context.Canceled) && status.Code(err) != codes.Canceled { - log.Warnf("Failed to perform fallback heartbeat for kubernetes server: %v", err) + slog.WarnContext(ctx, "Failed to perform fallback heartbeat for kubernetes server", "error", err) } return false } @@ -789,12 +789,12 @@ func (h *kubeServerHeartbeatV2) Announce(ctx context.Context, sender inventory.D server, err := h.getServer(ctx) if err != nil { - log.Warnf("Failed to perform inventory heartbeat for kubernetes server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for kubernetes server", "error", err) return false } if err := sender.Send(ctx, proto.InventoryHeartbeat{KubernetesServer: apiutils.CloneProtoMsg(server)}); err != nil { if !errors.Is(err, context.Canceled) && status.Code(err) != codes.Canceled { - log.Warnf("Failed to perform inventory heartbeat for kubernetes server: %v", err) + slog.WarnContext(ctx, "Failed to perform inventory heartbeat for kubernetes server", "error", err) } return false } diff --git a/lib/srv/keepalive.go b/lib/srv/keepalive.go index 18d3418041f33..1f11bc1847248 100644 --- a/lib/srv/keepalive.go +++ b/lib/srv/keepalive.go @@ -20,10 +20,9 @@ package srv import ( "context" + "log/slog" "time" - "github.com/sirupsen/logrus" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/defaults" ) @@ -59,10 +58,8 @@ type KeepAliveParams struct { func StartKeepAliveLoop(p KeepAliveParams) { var missedCount int64 - log := logrus.WithFields(logrus.Fields{ - teleport.ComponentKey: teleport.ComponentKeepAlive, - }) - log.Debugf("Starting keep-alive loop with interval %v and max count %v.", p.Interval, p.MaxCount) + log := slog.With(teleport.ComponentKey, teleport.ComponentKeepAlive) + log.DebugContext(p.CloseContext, "Starting keep-alive loop", "interval", p.Interval, "max_count", p.MaxCount) tickerCh := time.NewTicker(p.Interval) defer tickerCh.Stop() @@ -89,7 +86,7 @@ func StartKeepAliveLoop(p KeepAliveParams) { // and notify the server to disconnect and cleanup. missedCount = missedCount + 1 if missedCount > p.MaxCount { - log.Infof("Missed %v keep-alive messages, closing connection.", missedCount) + log.InfoContext(p.CloseContext, "Missed too keep-alive messages, closing connection", "missed_count", missedCount) p.CloseCancel() return } diff --git a/lib/srv/monitor.go b/lib/srv/monitor.go index 183aeba78b7e8..8381c6e8bde4a 100644 --- a/lib/srv/monitor.go +++ b/lib/srv/monitor.go @@ -22,13 +22,13 @@ import ( "context" "fmt" "io" + "log/slog" "net" "sync" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" "github.com/gravitational/teleport" @@ -75,8 +75,8 @@ type ConnectionMonitorConfig struct { Emitter apievents.Emitter // EmitterContext is long-lived context suitable to be used with Emitter EmitterContext context.Context - // Logger is a logging entry. - Logger log.FieldLogger + // Logger emits log messages. + Logger *slog.Logger // MonitorCloseChannel will be signaled when the monitor closes a connection. // Used only for testing. Optional. MonitorCloseChannel chan struct{} @@ -191,7 +191,7 @@ func (c *ConnectionMonitor) MonitorConn(ctx context.Context, authzCtx *authz.Con TeleportUser: identity.Username, Emitter: c.cfg.Emitter, EmitterContext: c.cfg.EmitterContext, - Entry: c.cfg.Logger, + Logger: c.cfg.Logger, IdleTimeoutMessage: netConfig.GetClientIdleTimeoutMessage(), MonitorCloseChannel: c.cfg.MonitorCloseChannel, }); err != nil { @@ -234,8 +234,8 @@ type MonitorConfig struct { Emitter apievents.Emitter // EmitterContext is long-lived context suitable to be used with Emitter. Typically, a server exit context will be used here. EmitterContext context.Context - // Entry is a logging entry - Entry log.FieldLogger + // Logger emits log messages. + Logger *slog.Logger // IdleTimeoutMessage is sent to the client when the idle timeout expires. IdleTimeoutMessage string // MessageWriter wraps a channel to send text messages to the client. Use @@ -260,8 +260,8 @@ func (m *MonitorConfig) CheckAndSetDefaults() error { if m.Conn == nil { return trace.BadParameter("missing parameter Conn") } - if m.Entry == nil { - return trace.BadParameter("missing parameter Entry") + if m.Logger == nil { + return trace.BadParameter("missing parameter Logger") } if m.Tracker == nil { return trace.BadParameter("missing parameter Tracker") @@ -321,7 +321,7 @@ func (w *Monitor) start(lockWatch types.Watcher) { lockWatchDoneC := lockWatch.Done() defer func() { if err := lockWatch.Close(); err != nil { - w.Entry.WithError(err).Warn("Failed to close lock watcher subscription.") + w.Logger.WarnContext(w.Context, "Failed to close lock watcher subscription", "error", err) } }() @@ -366,14 +366,14 @@ func (w *Monitor) start(lockWatch types.Watcher) { msg = reason } if _, err := w.MessageWriter.WriteString(msg); err != nil { - w.Entry.WithError(err).Warn("Failed to send idle timeout message.") + w.Logger.WarnContext(w.Context, "Failed to send idle timeout message", "error", err) } } w.disconnectClient(reason) return } next := w.ClientIdleTimeout - since - w.Entry.Debugf("Client activity detected %v ago; next check in %v", since, next) + w.Logger.DebugContext(w.Context, "Client activity detected", "last_active", since, "next_check", next) idleTime = w.Clock.After(next) // Lock in force. @@ -383,7 +383,7 @@ func (w *Monitor) start(lockWatch types.Watcher) { case types.OpPut: lock, ok := lockEvent.Resource.(types.Lock) if !ok { - w.Entry.Warnf("Skipping unexpected lock event resource type %T.", lockEvent.Resource) + w.Logger.WarnContext(w.Context, "Skipping unexpected lock event resource type", "resource_kind", lockEvent.Resource.GetKind()) } else { lockErr = services.LockInForceAccessDenied(lock) } @@ -394,7 +394,7 @@ func (w *Monitor) start(lockWatch types.Watcher) { lockErr = services.StrictLockingModeAccessDenied } default: - w.Entry.Warnf("Skipping unexpected lock event type %q.", lockEvent.Type) + w.Logger.WarnContext(w.Context, "Skipping unexpected lock event type", "event_type", lockEvent.Type) } if lockErr != nil { w.handleLockInForce(lockErr) @@ -402,7 +402,7 @@ func (w *Monitor) start(lockWatch types.Watcher) { } case <-lockWatchDoneC: - w.Entry.WithError(lockWatch.Error()).Warn("Lock watcher subscription was closed.") + w.Logger.WarnContext(w.Context, "Lock watcher subscription was closed", "error", lockWatch.Error()) if w.DisconnectExpiredCert.IsZero() && w.ClientIdleTimeout == 0 { return } @@ -425,21 +425,21 @@ type withCauseCloser interface { } func (w *Monitor) disconnectClient(reason string) { - w.Entry.Debugf("Disconnecting client: %v", reason) + w.Logger.DebugContext(w.Context, "Disconnecting client", "reason", reason) if connWithCauseCloser, ok := w.Conn.(withCauseCloser); ok { if err := connWithCauseCloser.CloseWithCause(trace.AccessDenied(reason)); err != nil { - w.Entry.WithError(err).Error("Failed to close connection.") + w.Logger.ErrorContext(w.Context, "Failed to close connection", "error", err) } } else { if err := w.Conn.Close(); err != nil { - w.Entry.WithError(err).Error("Failed to close connection.") + w.Logger.ErrorContext(w.Context, "Failed to close connection", "error", err) } } // emit audit event after client has been disconnected. if err := w.emitDisconnectEvent(reason); err != nil { - w.Entry.WithError(err).Warn("Failed to emit audit event.") + w.Logger.WarnContext(w.Context, "Failed to emit audit event", "error", err) } } @@ -470,7 +470,7 @@ func (w *Monitor) handleLockInForce(lockErr error) { reason := lockErr.Error() if w.MessageWriter != nil { if _, err := w.MessageWriter.WriteString(reason); err != nil { - w.Entry.WithError(err).Warn("Failed to send lock-in-force message.") + w.Logger.WarnContext(w.Context, "Failed to send lock-in-force message", "error", err) } } w.disconnectClient(reason) diff --git a/lib/srv/monitor_test.go b/lib/srv/monitor_test.go index ad95adefef345..db39dbef2c3d3 100644 --- a/lib/srv/monitor_test.go +++ b/lib/srv/monitor_test.go @@ -28,7 +28,6 @@ import ( "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/constants" @@ -39,6 +38,7 @@ import ( "github.com/gravitational/teleport/lib/events/eventstest" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/teleport/lib/utils" ) func newTestMonitor(ctx context.Context, t *testing.T, asrv *auth.TestAuthServer, mut ...func(*MonitorConfig)) (*mockTrackingConn, *eventstest.ChannelEmitter, MonitorConfig) { @@ -54,7 +54,7 @@ func newTestMonitor(ctx context.Context, t *testing.T, asrv *auth.TestAuthServer EmitterContext: context.Background(), Clock: asrv.Clock(), Tracker: &mockActivityTracker{asrv.Clock()}, - Entry: logrus.StandardLogger(), + Logger: utils.NewSlogLoggerForTests(), LockWatcher: asrv.LockWatcher, LockTargets: []types.LockTarget{{User: "test-user"}}, LockingMode: constants.LockingModeBestEffort, @@ -86,7 +86,7 @@ func TestConnectionMonitorLockInForce(t *testing.T) { Emitter: emitter, EmitterContext: ctx, Clock: asrv.Clock(), - Logger: logrus.StandardLogger(), + Logger: utils.NewSlogLoggerForTests(), LockWatcher: asrv.LockWatcher, ServerID: "test", }) diff --git a/lib/srv/session_control.go b/lib/srv/session_control.go index bd340e53279cc..748aa111062eb 100644 --- a/lib/srv/session_control.go +++ b/lib/srv/session_control.go @@ -21,12 +21,12 @@ package srv import ( "context" "io" + "log/slog" "strings" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" oteltrace "go.opentelemetry.io/otel/trace" "golang.org/x/crypto/ssh" @@ -75,7 +75,7 @@ type SessionControllerConfig struct { // have different flows Component string // Logger is used to emit log entries - Logger *logrus.Entry + Logger *slog.Logger // TracerProvider creates a tracer so that spans may be emitted TracerProvider oteltrace.TracerProvider // ServerID is the UUID of the server @@ -114,7 +114,7 @@ func (c *SessionControllerConfig) CheckAndSetDefaults() error { } if c.Logger == nil { - c.Logger = logrus.WithField(teleport.ComponentKey, "SessionCtrl") + c.Logger = slog.With(teleport.ComponentKey, "SessionCtrl") } if c.Clock == nil { @@ -342,6 +342,6 @@ func (s *SessionController) emitRejection(ctx context.Context, userMetadata apie Reason: reason, Maximum: max, }); err != nil { - s.cfg.Logger.WithError(err).Warn("Failed to emit session reject event.") + s.cfg.Logger.WarnContext(ctx, "Failed to emit session reject event", "error", err) } } diff --git a/lib/srv/termmanager.go b/lib/srv/termmanager.go index 3c47011672f7f..ddb0d1f3bbc88 100644 --- a/lib/srv/termmanager.go +++ b/lib/srv/termmanager.go @@ -19,12 +19,12 @@ package srv import ( + "context" "errors" "io" + "log/slog" "sync" "sync/atomic" - - log "github.com/sirupsen/logrus" ) // maxHistoryBytes is the maximum bytes that are retained as history and broadcasted to new clients. @@ -107,7 +107,7 @@ func (g *TermManager) writeToClients(p []byte) { _, err := w.Write(p) if err != nil { if !errors.Is(err, io.EOF) { - log.Warnf("Failed to write to remote terminal: %v", err) + slog.WarnContext(context.Background(), "Failed to write to remote terminal", "error", err) } toDelete = append( toDelete, struct { @@ -244,7 +244,7 @@ func (g *TermManager) AddReader(name string, r io.Reader) { n, err := r.Read(buf) if err != nil { if !errors.Is(err, io.EOF) { - log.Warnf("Failed to read from remote terminal: %v", err) + slog.WarnContext(context.Background(), "Failed to read from remote terminal", "error", err) } // Let term manager decide how to handle broken party readers. if g.OnReadError != nil { diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index b94cbf091b5cf..3a701fe65a26a 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -8302,7 +8302,7 @@ func createProxy(ctx context.Context, t *testing.T, proxyID string, node *regula ServerID: proxyID, Emitter: client, EmitterContext: ctx, - Logger: log, + Logger: utils.NewSlogLoggerForTests(), }) require.NoError(t, err)