diff --git a/e2e/aws/fixtures_test.go b/e2e/aws/fixtures_test.go index c196af60b663d..a7f682d799005 100644 --- a/e2e/aws/fixtures_test.go +++ b/e2e/aws/fixtures_test.go @@ -190,7 +190,7 @@ func newInstanceConfig(t *testing.T) helpers.InstanceConfig { NodeName: host, Priv: priv, Pub: pub, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } } diff --git a/integration/appaccess/fixtures.go b/integration/appaccess/fixtures.go index 0f5543efb4ff1..eee6390c471f5 100644 --- a/integration/appaccess/fixtures.go +++ b/integration/appaccess/fixtures.go @@ -65,7 +65,7 @@ func SetupWithOptions(t *testing.T, opts AppTestOptions) *Pack { tr := utils.NewTracer(utils.ThisFunction()).Start() defer tr.Stop() - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests() // Insecure development mode needs to be set because the web proxy uses a // self-signed certificate during tests. @@ -323,7 +323,7 @@ func SetupWithOptions(t *testing.T, opts AppTestOptions) *Pack { NodeName: helpers.Host, Priv: privateKey, Pub: publicKey, - Log: log, + Logger: log, } if opts.RootClusterListeners != nil { rootCfg.Listeners = opts.RootClusterListeners(t, &rootCfg.Fds) @@ -338,7 +338,7 @@ func SetupWithOptions(t *testing.T, opts AppTestOptions) *Pack { NodeName: helpers.Host, Priv: privateKey, Pub: publicKey, - Log: log, + Logger: log, } if opts.LeafClusterListeners != nil { leafCfg.Listeners = opts.LeafClusterListeners(t, &leafCfg.Fds) @@ -347,7 +347,7 @@ func SetupWithOptions(t *testing.T, opts AppTestOptions) *Pack { rcConf := servicecfg.MakeDefaultConfig() rcConf.Console = nil - rcConf.Log = log + rcConf.Logger = log rcConf.DataDir = t.TempDir() rcConf.Auth.Enabled = true rcConf.Auth.Preference.SetSecondFactor("off") @@ -365,7 +365,7 @@ func SetupWithOptions(t *testing.T, opts AppTestOptions) *Pack { lcConf := servicecfg.MakeDefaultConfig() lcConf.Console = nil - lcConf.Log = log + lcConf.Logger = log lcConf.DataDir = t.TempDir() lcConf.Auth.Enabled = true lcConf.Auth.Preference.SetSecondFactor("off") diff --git a/integration/appaccess/pack.go b/integration/appaccess/pack.go index cf7afae964fe6..5deabac208c4c 100644 --- a/integration/appaccess/pack.go +++ b/integration/appaccess/pack.go @@ -698,15 +698,13 @@ func (p *Pack) waitForLogout(appCookies []*http.Cookie) (int, error) { } func (p *Pack) startRootAppServers(t *testing.T, count int, opts AppTestOptions) []*service.TeleportProcess { - log := utils.NewLoggerForTests() - configs := make([]*servicecfg.Config, count) for i := 0; i < count; i++ { raConf := servicecfg.MakeDefaultConfig() raConf.Clock = opts.Clock raConf.Console = nil - raConf.Log = log + raConf.Logger = utils.NewSlogLoggerForTests() raConf.DataDir = t.TempDir() raConf.SetToken("static-token-value") raConf.SetAuthServerAddress(utils.NetAddr{ @@ -870,14 +868,13 @@ func waitForAppServer(t *testing.T, tunnel reversetunnelclient.Server, name stri } func (p *Pack) startLeafAppServers(t *testing.T, count int, opts AppTestOptions) []*service.TeleportProcess { - log := utils.NewLoggerForTests() configs := make([]*servicecfg.Config, count) for i := 0; i < count; i++ { laConf := servicecfg.MakeDefaultConfig() laConf.Clock = opts.Clock laConf.Console = nil - laConf.Log = log + laConf.Logger = utils.NewSlogLoggerForTests() laConf.DataDir = t.TempDir() laConf.SetToken("static-token-value") laConf.SetAuthServerAddress(utils.NetAddr{ diff --git a/integration/client_test.go b/integration/client_test.go index e6c1e799419a7..c9ae4d4129abc 100644 --- a/integration/client_test.go +++ b/integration/client_test.go @@ -41,7 +41,7 @@ func TestClientWithExpiredCredentialsAndDetailedErrorMessage(t *testing.T) { ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } cfg.Listeners = helpers.SingleProxyPortSetup(t, &cfg.Fds) rc := helpers.NewInstance(t, cfg) diff --git a/integration/db/fixture.go b/integration/db/fixture.go index 66ea7fcad90cd..855252e8352fa 100644 --- a/integration/db/fixture.go +++ b/integration/db/fixture.go @@ -253,7 +253,7 @@ func SetupDatabaseTest(t *testing.T, options ...TestOptionFunc) *DatabasePack { tracer := utils.NewTracer(utils.ThisFunction()).Start() t.Cleanup(func() { tracer.Stop() }) lib.SetInsecureDevMode(true) - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests() // Generate keypair. privateKey, publicKey, err := testauthority.New().GenerateKeyPair() @@ -272,7 +272,7 @@ func SetupDatabaseTest(t *testing.T, options ...TestOptionFunc) *DatabasePack { NodeName: opts.nodeName, Priv: privateKey, Pub: publicKey, - Log: log, + Logger: log, } rootCfg.Listeners = opts.listenerSetup(t, &rootCfg.Fds) p.Root.Cluster = helpers.NewInstance(t, rootCfg) @@ -284,7 +284,7 @@ func SetupDatabaseTest(t *testing.T, options ...TestOptionFunc) *DatabasePack { NodeName: opts.nodeName, Priv: privateKey, Pub: publicKey, - Log: log, + Logger: log, } leafCfg.Listeners = opts.listenerSetup(t, &leafCfg.Fds) p.Leaf.Cluster = helpers.NewInstance(t, leafCfg) @@ -395,15 +395,15 @@ func (p *DatabasePack) WaitForLeaf(t *testing.T) { servers, err := accessPoint.GetDatabaseServers(ctx, apidefaults.Namespace) if err != nil { // Use root logger as we need a configured logger instance and the root cluster have one. - p.Root.Cluster.Log.WithError(err).Debugf("Leaf cluster access point is unavailable.") + p.Root.Cluster.Log.DebugContext(ctx, "Leaf cluster access point is unavailable", "error", err) continue } if !containsDB(servers, p.Leaf.MysqlService.Name) { - p.Root.Cluster.Log.WithError(err).Debugf("Leaf db service %q is unavailable.", p.Leaf.MysqlService.Name) + p.Root.Cluster.Log.DebugContext(ctx, "Leaf db service is unavailable", "error", err, "db_service", p.Leaf.MysqlService.Name) continue } if !containsDB(servers, p.Leaf.PostgresService.Name) { - p.Root.Cluster.Log.WithError(err).Debugf("Leaf db service %q is unavailable.", p.Leaf.PostgresService.Name) + p.Root.Cluster.Log.DebugContext(ctx, "Leaf db service is unavailable", "error", err, "db_service", p.Leaf.PostgresService.Name) continue } return diff --git a/integration/ec2_test.go b/integration/ec2_test.go index 4c685be1a55db..a151d169d25a0 100644 --- a/integration/ec2_test.go +++ b/integration/ec2_test.go @@ -21,7 +21,7 @@ package integration import ( "context" "fmt" - "io" + "log/slog" "os" "testing" "time" @@ -31,7 +31,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -52,15 +51,9 @@ import ( "github.com/gravitational/teleport/lib/service/servicecfg" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) -func newSilentLogger() utils.Logger { - logger := utils.NewLoggerForTests() - logger.SetLevel(logrus.PanicLevel) - logger.SetOutput(io.Discard) - return logger -} - func newNodeConfig(t *testing.T, tokenName string, joinMethod types.JoinMethod) *servicecfg.Config { config := servicecfg.MakeDefaultConfig() config.Version = defaults.TeleportConfigVersionV3 @@ -71,7 +64,7 @@ func newNodeConfig(t *testing.T, tokenName string, joinMethod types.JoinMethod) config.Auth.Enabled = false config.Proxy.Enabled = false config.DataDir = t.TempDir() - config.Log = newSilentLogger() + config.Logger = slog.New(logutils.DiscardHandler{}) config.CircuitBreakerConfig = breaker.NoopBreakerConfig() config.InstanceMetadataClient = cloudimds.NewDisabledIMDSClient() return config @@ -92,7 +85,7 @@ func newProxyConfig(t *testing.T, authAddr utils.NetAddr, tokenName string, join config.DataDir = t.TempDir() config.SetAuthServerAddress(authAddr) - config.Log = newSilentLogger() + config.Logger = slog.New(logutils.DiscardHandler{}) config.CircuitBreakerConfig = breaker.NoopBreakerConfig() config.InstanceMetadataClient = cloudimds.NewDisabledIMDSClient() return config @@ -126,7 +119,7 @@ func newAuthConfig(t *testing.T, clock clockwork.Clock) *servicecfg.Config { config.Proxy.Enabled = false config.SSH.Enabled = false config.Clock = clock - config.Log = newSilentLogger() + config.Logger = slog.New(logutils.DiscardHandler{}) config.CircuitBreakerConfig = breaker.NoopBreakerConfig() config.InstanceMetadataClient = cloudimds.NewDisabledIMDSClient() return config @@ -348,7 +341,7 @@ func TestEC2Labels(t *testing.T) { }, } tconf := servicecfg.MakeDefaultConfig() - tconf.Log = newSilentLogger() + tconf.Logger = slog.New(logutils.DiscardHandler{}) tconf.DataDir = t.TempDir() tconf.Auth.Enabled = true tconf.Proxy.Enabled = true @@ -473,7 +466,7 @@ func TestEC2Hostname(t *testing.T) { }, } tconf := servicecfg.MakeDefaultConfig() - tconf.Log = newSilentLogger() + tconf.Logger = slog.New(logutils.DiscardHandler{}) tconf.DataDir = t.TempDir() tconf.Auth.Enabled = true tconf.Proxy.Enabled = true diff --git a/integration/helpers/fixture.go b/integration/helpers/fixture.go index 635951758aa17..f56807e15a8be 100644 --- a/integration/helpers/fixture.go +++ b/integration/helpers/fixture.go @@ -19,6 +19,7 @@ package helpers import ( + "log/slog" "os" "os/user" "testing" @@ -28,7 +29,6 @@ import ( "github.com/gravitational/teleport/lib/auth/testauthority" "github.com/gravitational/teleport/lib/service/servicecfg" - "github.com/gravitational/teleport/lib/utils" ) const ( @@ -44,7 +44,7 @@ type Fixture struct { Pub []byte // Log defines the test-specific logger - Log utils.Logger + Log *slog.Logger } func NewFixture(t *testing.T) *Fixture { @@ -112,7 +112,7 @@ func (s *Fixture) DefaultInstanceConfig(t *testing.T) InstanceConfig { NodeName: Host, Priv: s.Priv, Pub: s.Pub, - Log: s.Log, + Logger: s.Log, } cfg.Listeners = StandardListenerSetup(t, &cfg.Fds) return cfg diff --git a/integration/helpers/helpers.go b/integration/helpers/helpers.go index 0896621e5fb35..54a35b0a32084 100644 --- a/integration/helpers/helpers.go +++ b/integration/helpers/helpers.go @@ -379,7 +379,7 @@ func MakeTestServers(t *testing.T) (auth *service.TeleportProcess, proxy *servic cfg.SSH.Enabled = false cfg.Auth.Enabled = true cfg.Proxy.Enabled = false - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() auth, err = service.NewTeleport(cfg) require.NoError(t, err) @@ -417,7 +417,7 @@ func MakeTestServers(t *testing.T) (auth *service.TeleportProcess, proxy *servic cfg.Proxy.WebAddr, } cfg.Proxy.DisableWebInterface = true - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() proxy, err = service.NewTeleport(cfg) require.NoError(t, err) @@ -454,7 +454,7 @@ func MakeTestDatabaseServer(t *testing.T, proxyAddr utils.NetAddr, token string, cfg.Databases.Enabled = true cfg.Databases.Databases = dbs cfg.Databases.ResourceMatchers = resMatchers - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() db, err := service.NewTeleport(cfg) require.NoError(t, err) @@ -487,7 +487,7 @@ func MakeAgentServer(t *testing.T, cfg *servicecfg.Config, proxyAddr utils.NetAd cfg.Auth.Enabled = false cfg.Proxy.Enabled = false cfg.Databases.Enabled = false - cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() agent, err := service.NewTeleport(cfg) require.NoError(t, err) diff --git a/integration/helpers/instance.go b/integration/helpers/instance.go index f6b25459ea275..5f652c77b4eea 100644 --- a/integration/helpers/instance.go +++ b/integration/helpers/instance.go @@ -26,6 +26,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -39,7 +40,7 @@ import ( "github.com/gorilla/websocket" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "golang.org/x/crypto/ssh" @@ -69,6 +70,7 @@ import ( "github.com/gravitational/teleport/lib/sshutils" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" "github.com/gravitational/teleport/lib/web" websession "github.com/gravitational/teleport/lib/web/session" "github.com/gravitational/teleport/lib/web/terminal" @@ -81,7 +83,11 @@ const ( func fatalIf(err error) { if err != nil { - log.Fatalf("%v at %v", string(debug.Stack()), err) + slog.ErrorContext(context.Background(), "Fatal error", + "stack", string(debug.Stack()), + "error", err, + ) + os.Exit(1) } } @@ -295,7 +301,7 @@ type TeleInstance struct { tempDirs []string // Log specifies the instance logger - Log utils.Logger + Log *slog.Logger InstanceListeners Fds []*servicecfg.FileDescriptor // ProcessProvider creates a Teleport process (OSS or Enterprise) @@ -322,7 +328,11 @@ type InstanceConfig struct { // Pub is SSH public key of the instance Pub []byte // Log specifies the logger + // Deprecated: Use Logger instead + // TODO(tross): Delete when e is updated Log utils.Logger + // Logger specifies the logger + Logger *slog.Logger // Ports is a collection of instance ports. Listeners *InstanceListeners @@ -344,6 +354,14 @@ func NewInstance(t *testing.T, cfg InstanceConfig) *TeleInstance { cfg.Listeners = StandardListenerSetup(t, &cfg.Fds) } + if cfg.Log == nil { + cfg.Log = logrus.New() + } + + if cfg.Logger == nil { + cfg.Logger = slog.New(logutils.DiscardHandler{}) + } + // generate instance secrets (keys): if cfg.Priv == nil || cfg.Pub == nil { privateKey, err := cryptosuites.GeneratePrivateKeyWithAlgorithm(cryptosuites.ECDSAP256) @@ -399,7 +417,7 @@ func NewInstance(t *testing.T, cfg InstanceConfig) *TeleInstance { i := &TeleInstance{ Hostname: cfg.NodeName, UploadEventsC: make(chan events.UploadEvent, 100), - Log: cfg.Log, + Log: cfg.Logger, InstanceListeners: *cfg.Listeners, Fds: cfg.Fds, } @@ -425,12 +443,12 @@ func NewInstance(t *testing.T, cfg InstanceConfig) *TeleInstance { func (i *TeleInstance) GetSiteAPI(siteName string) authclient.ClientI { siteTunnel, err := i.Tunnel.GetSite(siteName) if err != nil { - log.Warn(err) + i.Log.WarnContext(context.Background(), "failed to get site", "error", err, "siter", siteName) return nil } siteAPI, err := siteTunnel.GetClient() if err != nil { - log.Warn(err) + i.Log.WarnContext(context.Background(), "failed to get site client", "error", err, "site", siteName) return nil } return siteAPI @@ -442,7 +460,7 @@ func (i *TeleInstance) Create(t *testing.T, trustedSecrets []*InstanceSecrets, e tconf := servicecfg.MakeDefaultConfig() tconf.SSH.Enabled = enableSSH tconf.Console = console - tconf.Log = i.Log + tconf.Logger = i.Log tconf.Proxy.DisableWebService = true tconf.Proxy.DisableWebInterface = true tconf.CircuitBreakerConfig = breaker.NoopBreakerConfig() @@ -466,7 +484,7 @@ func (i *TeleInstance) GenerateConfig(t *testing.T, trustedSecrets []*InstanceSe if tconf.InstanceMetadataClient == nil { tconf.InstanceMetadataClient = imds.NewDisabledIMDSClient() } - tconf.Log = i.Log + tconf.Logger = i.Log tconf.DataDir = dataDir tconf.Testing.UploadEventsC = i.UploadEventsC tconf.CachePolicy.Enabled = true @@ -764,8 +782,12 @@ func (i *TeleInstance) StartNodeWithTargetPort(tconf *servicecfg.Config, authPor return nil, trace.Wrap(err) } - log.Debugf("Teleport node %s (in instance %s) started: %v/%v expected events received.", - process.Config.Hostname, i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + i.Log.DebugContext(context.Background(), "Teleport node started", + "node_name", process.Config.Hostname, + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) return process, nil } @@ -807,8 +829,11 @@ func (i *TeleInstance) StartApp(conf *servicecfg.Config) (*service.TeleportProce return nil, trace.Wrap(err) } - log.Debugf("Teleport Application Server (in instance %v) started: %v/%v events received.", - i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + i.Log.DebugContext(context.Background(), "Teleport Application Server started", + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) return process, nil } @@ -857,8 +882,11 @@ func (i *TeleInstance) StartApps(configs []*servicecfg.Config) ([]*service.Telep results <- result{err: err, tmpDir: dataDir} } - log.Debugf("Teleport Application Server (in instance %v) started: %v/%v events received.", - i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + i.Log.DebugContext(context.Background(), "Teleport Application Server started", + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) results <- result{err: err, tmpDir: dataDir, process: process} }(conf) @@ -941,8 +969,12 @@ func (i *TeleInstance) StartDatabase(conf *servicecfg.Config) (*service.Teleport return nil, nil, trace.BadParameter("failed to retrieve auth client") } - log.Debugf("Teleport Database Server (in instance %v) started: %v/%v events received.", - i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + i.Log.DebugContext(context.Background(), "Teleport Database Server started", + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) + return process, client, nil } @@ -989,8 +1021,13 @@ func (i *TeleInstance) StartKube(t *testing.T, conf *servicecfg.Config, clusterN if err != nil { return nil, trace.Wrap(err) } - log.Debugf("Teleport Kube Server (in instance %v) started: %v/%v events received.", - i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + + i.Log.DebugContext(context.Background(), "Teleport Kube Server started", + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) + return process, nil } @@ -1004,7 +1041,7 @@ func (i *TeleInstance) StartNodeAndProxy(t *testing.T, name string) (sshPort, we tconf := servicecfg.MakeDefaultConfig() - tconf.Log = i.Log + tconf.Logger = i.Log authServer := utils.MustParseAddr(i.Auth) tconf.SetAuthServerAddress(*authServer) tconf.SetToken("token") @@ -1060,8 +1097,11 @@ func (i *TeleInstance) StartNodeAndProxy(t *testing.T, name string) (sshPort, we receivedEvents, err := StartAndWait(process, expectedEvents) require.NoError(t, err) - log.Debugf("Teleport node and proxy (in instance %v) started: %v/%v events received.", - i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + i.Log.DebugContext(context.Background(), "Teleport node and proxy started", + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) return } @@ -1099,7 +1139,7 @@ func (i *TeleInstance) StartProxy(cfg ProxyConfig, opts ...Option) (reversetunne tconf := servicecfg.MakeDefaultConfig() tconf.Console = nil - tconf.Log = i.Log + tconf.Logger = i.Log authServer := utils.MustParseAddr(i.Auth) tconf.SetAuthServerAddress(*authServer) tconf.CachePolicy = servicecfg.CachePolicy{Enabled: true} @@ -1162,8 +1202,11 @@ func (i *TeleInstance) StartProxy(cfg ProxyConfig, opts ...Option) (reversetunne return nil, nil, trace.Wrap(err) } - log.Debugf("Teleport proxy (in instance %v) started: %v/%v events received.", - i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + i.Log.DebugContext(context.Background(), "Teleport proxy started", + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) // Extract and set reversetunnelclient.Server and reversetunnel.AgentPool upon // receipt of a ProxyReverseTunnelReady event @@ -1228,7 +1271,7 @@ func (i *TeleInstance) AddUserWithRole(username string, roles ...types.Role) *Us // Adds a new user into i Teleport instance. 'mappings' is a comma-separated // list of OS users func (i *TeleInstance) AddUser(username string, mappings []string) *User { - log.Infof("teleInstance.AddUser(%v) mapped to %v", username, mappings) + i.Log.InfoContext(context.Background(), "Adding user to teleInstance", "user", username, "mappings", mappings) if mappings == nil { mappings = make([]string, 0) } @@ -1299,8 +1342,12 @@ func (i *TeleInstance) Start() error { } } - log.Debugf("Teleport instance %v started: %v/%v events received.", - i.Secrets.SiteName, len(receivedEvents), len(expectedEvents)) + i.Log.DebugContext(context.Background(), "Teleport instance started", + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) + return nil } @@ -1669,11 +1716,11 @@ func (i *TeleInstance) StopProxy() error { if p.Config.Proxy.Enabled { if err := p.Close(); err != nil { errors = append(errors, err) - i.Log.Errorf("Failed closing extra proxy: %v.", err) + i.Log.ErrorContext(context.Background(), "Failed closing extra proxy", "error", err) } if err := p.Wait(); err != nil { errors = append(errors, err) - i.Log.Errorf("Failed to stop extra proxy: %v.", err) + i.Log.ErrorContext(context.Background(), "Failed to stop extra proxy", "error", err) } } } @@ -1687,11 +1734,11 @@ func (i *TeleInstance) StopNodes() error { for _, node := range i.Nodes { if err := node.Close(); err != nil { errors = append(errors, err) - i.Log.Errorf("Failed closing extra node %v", err) + i.Log.ErrorContext(context.Background(), "Failed closing extra node", "error", err) } if err := node.Wait(); err != nil { errors = append(errors, err) - i.Log.Errorf("Failed stopping extra node %v", err) + i.Log.ErrorContext(context.Background(), "Failed stopping extra node", "error", err) } } return trace.NewAggregate(errors...) @@ -1703,13 +1750,13 @@ func (i *TeleInstance) RestartAuth() error { return nil } - i.Log.Infof("Asking Teleport instance %q to stop", i.Secrets.SiteName) + i.Log.InfoContext(context.Background(), "Asking Teleport instance to stop", "instance", i.Secrets.SiteName) err := i.Process.Close() if err != nil { - i.Log.WithError(err).Error("Failed closing the teleport process.") + i.Log.ErrorContext(context.Background(), "Failed closing the teleport process", "error", err) return trace.Wrap(err) } - i.Log.Infof("Teleport instance %q stopped!", i.Secrets.SiteName) + i.Log.InfoContext(context.Background(), "Teleport instance stopped", "instance", i.Secrets.SiteName) if err := i.Process.Wait(); err != nil { return trace.Wrap(err) @@ -1723,9 +1770,9 @@ func (i *TeleInstance) RestartAuth() error { func (i *TeleInstance) StopAuth(removeData bool) error { defer func() { if i.Config != nil && removeData { - i.Log.Infoln("Removing data dir", i.Config.DataDir) + i.Log.InfoContext(context.Background(), "Removing data dir", "data_dir", i.Config.DataDir) if err := os.RemoveAll(i.Config.DataDir); err != nil { - i.Log.WithError(err).Error("Failed removing temporary local Teleport directory.") + i.Log.ErrorContext(context.Background(), "Failed removing temporary local Teleport directory", "error", err) } } i.Process = nil @@ -1734,14 +1781,14 @@ func (i *TeleInstance) StopAuth(removeData bool) error { if i.Process == nil { return nil } - i.Log.Infof("Asking Teleport instance %q to stop", i.Secrets.SiteName) + i.Log.InfoContext(context.Background(), "Asking Teleport instance to stop", "instance", i.Secrets.SiteName) err := i.Process.Close() if err != nil { - i.Log.WithError(err).Error("Failed closing the teleport process.") + i.Log.ErrorContext(context.Background(), "Failed closing the teleport process", "error", err) return trace.Wrap(err) } defer func() { - i.Log.Infof("Teleport instance %q stopped!", i.Secrets.SiteName) + i.Log.InfoContext(context.Background(), "Teleport instance stopped", "instance", i.Secrets.SiteName) }() return i.Process.Wait() } @@ -1761,7 +1808,7 @@ func (i *TeleInstance) StopAll() error { errors = append(errors, os.RemoveAll(dir)) } - i.Log.Infof("Stopped all teleport services for site %q", i.Secrets.SiteName) + i.Log.InfoContext(context.Background(), "Stopped all teleport services for site", "instance", i.Secrets.SiteName) return trace.NewAggregate(errors...) } diff --git a/integration/helpers/proxy.go b/integration/helpers/proxy.go index 2989a8f8a4c7f..3486a57f8edf6 100644 --- a/integration/helpers/proxy.go +++ b/integration/helpers/proxy.go @@ -22,6 +22,7 @@ import ( "context" "crypto/tls" "fmt" + "log/slog" "net" "net/http" "net/url" @@ -30,7 +31,6 @@ import ( "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/fixtures" @@ -163,7 +163,7 @@ func (m *MockAWSALBProxy) serve(ctx context.Context) { conn, err := m.Accept() if err != nil { - logrus.WithError(err).Debugf("Failed to accept conn.") + slog.DebugContext(ctx, "Failed to accept conn", "error", err) return } @@ -178,7 +178,7 @@ func (m *MockAWSALBProxy) serve(ctx context.Context) { // api.Client may try different connection methods. Just close the // connection when something goes wrong. if err := downstreamConn.HandshakeContext(ctx); err != nil { - logrus.WithError(err).Debugf("Failed to handshake.") + slog.DebugContext(ctx, "Failed to handshake", "error", err) return } @@ -187,7 +187,7 @@ func (m *MockAWSALBProxy) serve(ctx context.Context) { InsecureSkipVerify: true, }) if err != nil { - logrus.WithError(err).Debugf("Failed to dial upstream.") + slog.DebugContext(ctx, "Failed to dial upstream", "error", err) return } utils.ProxyConn(ctx, downstreamConn, upstreamConn) diff --git a/integration/helpers/trustedclusters.go b/integration/helpers/trustedclusters.go index cfc68f571ce5d..7def24b31ff08 100644 --- a/integration/helpers/trustedclusters.go +++ b/integration/helpers/trustedclusters.go @@ -25,7 +25,6 @@ import ( "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -60,18 +59,15 @@ func TryCreateTrustedCluster(t *testing.T, authServer *auth.Server, trustedClust t.Helper() ctx := context.TODO() for i := 0; i < 10; i++ { - log.Debugf("Will create trusted cluster %v, attempt %v.", trustedCluster, i) _, err := authServer.CreateTrustedCluster(ctx, trustedCluster) if err == nil { return } if trace.IsConnectionProblem(err) { - log.Debugf("Retrying on connection problem: %v.", err) time.Sleep(500 * time.Millisecond) continue } if trace.IsAccessDenied(err) { - log.Debugf("Retrying on access denied: %v.", err) time.Sleep(500 * time.Millisecond) continue } @@ -87,18 +83,15 @@ func TryUpdateTrustedCluster(t *testing.T, authServer *auth.Server, trustedClust t.Helper() ctx := context.TODO() for i := 0; i < 10; i++ { - log.Debugf("Will create trusted cluster %v, attempt %v.", trustedCluster, i) _, err := authServer.UpdateTrustedCluster(ctx, trustedCluster) if err == nil { return } if trace.IsConnectionProblem(err) { - log.Debugf("Retrying on connection problem: %v.", err) time.Sleep(500 * time.Millisecond) continue } if trace.IsAccessDenied(err) { - log.Debugf("Retrying on access denied: %v.", err) time.Sleep(500 * time.Millisecond) continue } @@ -114,7 +107,6 @@ func TryUpsertTrustedCluster(t *testing.T, authServer *auth.Server, trustedClust t.Helper() ctx := context.TODO() for i := 0; i < 10; i++ { - log.Debugf("Will create trusted cluster %v, attempt %v.", trustedCluster, i) var err error if skipNameValidation { _, err = authServer.UpsertTrustedCluster(ctx, trustedCluster) @@ -125,12 +117,10 @@ func TryUpsertTrustedCluster(t *testing.T, authServer *auth.Server, trustedClust return } if trace.IsConnectionProblem(err) { - log.Debugf("Retrying on connection problem: %v.", err) time.Sleep(500 * time.Millisecond) continue } if trace.IsAccessDenied(err) { - log.Debugf("Retrying on access denied: %v.", err) time.Sleep(500 * time.Millisecond) continue } diff --git a/integration/hostuser_test.go b/integration/hostuser_test.go index 2f7a741e513f5..b5b045c2840b3 100644 --- a/integration/hostuser_test.go +++ b/integration/hostuser_test.go @@ -26,6 +26,7 @@ import ( "bytes" "context" "fmt" + "log/slog" "os" "os/exec" "os/user" @@ -37,7 +38,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -226,7 +226,7 @@ func cleanupUsersAndGroups(users []string, groups []string) { cmd := exec.Command("groupdel", group) err := cmd.Run() if err != nil { - log.Debugf("Error deleting group %s: %s", group, err) + slog.DebugContext(context.Background(), "Error deleting group", "group", group, "error", err) } } for _, user := range users { diff --git a/integration/hsm/helpers.go b/integration/hsm/helpers.go index a8a91c470a814..d0cec1342bc3d 100644 --- a/integration/hsm/helpers.go +++ b/integration/hsm/helpers.go @@ -20,6 +20,7 @@ package hsm import ( "context" + "log/slog" "net" "path/filepath" "testing" @@ -27,7 +28,6 @@ import ( "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/breaker" @@ -47,7 +47,7 @@ import ( // are not generally thread safe. type teleportService struct { name string - log logrus.FieldLogger + log *slog.Logger config *servicecfg.Config process *service.TeleportProcess @@ -58,7 +58,7 @@ type teleportService struct { func newTeleportService(ctx context.Context, config *servicecfg.Config, name string) (*teleportService, error) { t := &teleportService{ name: name, - log: config.Log.WithField("helper_service", name), + log: config.Logger.With("helper_service", name), config: config, errC: make(chan struct{}), } @@ -98,7 +98,7 @@ func (t *teleportService) waitForShutdown(ctx context.Context) error { } func (t *teleportService) waitForLocalAdditionalKeys(ctx context.Context) error { - t.log.Debug("waiting for local additional keys") + t.log.DebugContext(ctx, "waiting for local additional keys") authServer := t.process.GetAuthServer() if authServer == nil { return trace.NotFound("%v: attempted to wait for additional keys in a service with no auth", t.name) @@ -130,7 +130,7 @@ func (t *teleportService) waitForLocalAdditionalKeys(ctx context.Context) error return trace.Wrap(err) } if usableKeysResult.CAHasPreferredKeyType { - t.log.Debugf("got local additional keys") + t.log.DebugContext(ctx, "got local additional keys") return nil } } @@ -204,13 +204,13 @@ func (s teleportServices) waitForLocalAdditionalKeys(ctx context.Context) error return s.forEach(func(t *teleportService) error { return t.waitForLocalAdditionalKeys(ctx) }) } -func newAuthConfig(t *testing.T, log utils.Logger, clock clockwork.Clock) *servicecfg.Config { +func newAuthConfig(t *testing.T, log *slog.Logger, clock clockwork.Clock) *servicecfg.Config { config := servicecfg.MakeDefaultConfig() config.DataDir = t.TempDir() config.Auth.StorageConfig.Params["path"] = filepath.Join(config.DataDir, defaults.BackendDir) config.SSH.Enabled = false config.Proxy.Enabled = false - config.Log = log + config.Logger = log config.InstanceMetadataClient = imds.NewDisabledIMDSClient() config.MaxRetryPeriod = 25 * time.Millisecond config.PollingPeriod = 2 * time.Second @@ -244,7 +244,7 @@ func newAuthConfig(t *testing.T, log utils.Logger, clock clockwork.Clock) *servi return config } -func newProxyConfig(t *testing.T, authAddr utils.NetAddr, log utils.Logger, clock clockwork.Clock) *servicecfg.Config { +func newProxyConfig(t *testing.T, authAddr utils.NetAddr, log *slog.Logger, clock clockwork.Clock) *servicecfg.Config { config := servicecfg.MakeDefaultConfig() config.Version = defaults.TeleportConfigVersionV3 config.DataDir = t.TempDir() @@ -253,7 +253,7 @@ func newProxyConfig(t *testing.T, authAddr utils.NetAddr, log utils.Logger, cloc config.SSH.Enabled = false config.SetToken("foo") config.SetAuthServerAddress(authAddr) - config.Log = log + config.Logger = log config.InstanceMetadataClient = imds.NewDisabledIMDSClient() config.MaxRetryPeriod = 25 * time.Millisecond config.PollingPeriod = 2 * time.Second diff --git a/integration/hsm/hsm_test.go b/integration/hsm/hsm_test.go index d2aec7359f44c..7df064ef34b6d 100644 --- a/integration/hsm/hsm_test.go +++ b/integration/hsm/hsm_test.go @@ -20,6 +20,7 @@ package hsm import ( "context" + "log/slog" "net" "os" "path/filepath" @@ -65,7 +66,7 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } -func newHSMAuthConfig(t *testing.T, storageConfig *backend.Config, log utils.Logger, clock clockwork.Clock) *servicecfg.Config { +func newHSMAuthConfig(t *testing.T, storageConfig *backend.Config, log *slog.Logger, clock clockwork.Clock) *servicecfg.Config { config := newAuthConfig(t, log, clock) config.Auth.StorageConfig = *storageConfig config.Auth.KeyStore = keystore.HSMTestConfig(t) @@ -90,9 +91,9 @@ func liteBackendConfig(t *testing.T) *backend.Config { func TestHSMRotation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests().With(teleport.ComponentKey, "TestHSMRotation") - log.Debug("TestHSMRotation: starting auth server") + log.DebugContext(ctx, "starting auth server") authConfig := newHSMAuthConfig(t, liteBackendConfig(t), log, clockwork.NewRealClock()) auth1, err := newTeleportService(ctx, authConfig, "auth1") require.NoError(t, err) @@ -103,12 +104,12 @@ func TestHSMRotation(t *testing.T) { }) // start a proxy to make sure it can get creds at each stage of rotation - log.Debug("TestHSMRotation: starting proxy") + log.DebugContext(ctx, "starting proxy") proxy, err := newTeleportService(ctx, newProxyConfig(t, auth1.authAddr(t), log, clockwork.NewRealClock()), "proxy") require.NoError(t, err) allServices = append(allServices, proxy) - log.Debug("TestHSMRotation: sending rotation request init") + log.DebugContext(ctx, "sending rotation request init") require.NoError(t, allServices.waitingForNewEvent(ctx, service.TeleportPhaseChangeEvent, func() error { return trace.Wrap(auth1.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -117,7 +118,7 @@ func TestHSMRotation(t *testing.T) { })) })) - log.Debug("TestHSMRotation: sending rotation request update_clients") + log.DebugContext(ctx, "sending rotation request update_clients") require.NoError(t, allServices.waitingForNewEvent(ctx, service.TeleportCredentialsUpdatedEvent, func() error { return trace.Wrap(auth1.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -126,7 +127,7 @@ func TestHSMRotation(t *testing.T) { })) })) - log.Debug("TestHSMRotation: sending rotation request update_servers") + log.DebugContext(ctx, "sending rotation request update_servers") require.NoError(t, allServices.waitingForNewEvent(ctx, service.TeleportCredentialsUpdatedEvent, func() error { return trace.Wrap(auth1.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -135,7 +136,7 @@ func TestHSMRotation(t *testing.T) { })) })) - log.Debug("TestHSMRotation: sending rotation request standby") + log.DebugContext(ctx, "sending rotation request standby") require.NoError(t, allServices.waitingForNewEvent(ctx, service.TeleportCredentialsUpdatedEvent, func() error { return trace.Wrap(auth1.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -201,11 +202,11 @@ func TestHSMDualAuthRotation(t *testing.T) { t.Setenv("TELEPORT_UNSTABLE_SKIP_VERSION_UPGRADE_CHECK", "1") ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests().With(teleport.ComponentKey, "TestHSMDualAuthRotation") storageConfig := liteBackendConfig(t) // start a cluster with 1 auth server - log.Debug("TestHSMDualAuthRotation: Starting auth server 1") + log.DebugContext(ctx, "Starting auth server 1") auth1Config := newHSMAuthConfig(t, storageConfig, log, clockwork.NewRealClock()) auth1, err := newTeleportService(ctx, auth1Config, "auth1") require.NoError(t, err) @@ -215,7 +216,7 @@ func TestHSMDualAuthRotation(t *testing.T) { }) authServices := teleportServices{auth1} - log.Debug("TestHSMDualAuthRotation: Starting load balancer") + log.DebugContext(ctx, "Starting load balancer") lb, err := utils.NewLoadBalancer( ctx, *utils.MustParseAddr(net.JoinHostPort("localhost", "0")), @@ -227,7 +228,7 @@ func TestHSMDualAuthRotation(t *testing.T) { t.Cleanup(func() { require.NoError(t, lb.Close()) }) // add a new auth server - log.Debug("TestHSMDualAuthRotation: Starting auth server 2") + log.DebugContext(ctx, "Starting auth server 2") auth2Config := newHSMAuthConfig(t, storageConfig, log, clockwork.NewRealClock()) auth2, err := newTeleportService(ctx, auth2Config, "auth2") require.NoError(t, err) @@ -278,7 +279,7 @@ func TestHSMDualAuthRotation(t *testing.T) { // do a full rotation for _, stage := range stages { - log.Debugf("TestHSMDualAuthRotation: Sending rotate request %s", stage.targetPhase) + log.DebugContext(ctx, "Sending rotate request", "phase", stage.targetPhase) require.NoError(t, stage.verify(func() error { return auth1.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -299,7 +300,7 @@ func TestHSMDualAuthRotation(t *testing.T) { // Do another full rotation from the new auth server for _, stage := range stages { - log.Debugf("TestHSMDualAuthRotation: Sending rotate request %s", stage.targetPhase) + log.DebugContext(ctx, "Sending rotate request", "phase", stage.targetPhase) require.NoError(t, stage.verify(func() error { return auth2.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -365,7 +366,7 @@ func TestHSMDualAuthRotation(t *testing.T) { }, } for _, stage := range stages { - log.Debugf("TestHSMDualAuthRotation: Sending rotate request %s", stage.targetPhase) + log.DebugContext(ctx, "Sending rotate request", "phase", stage.targetPhase) require.NoError(t, stage.verify(func() error { return auth1.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ @@ -383,11 +384,11 @@ func TestHSMMigrate(t *testing.T) { t.Setenv("TELEPORT_UNSTABLE_SKIP_VERSION_UPGRADE_CHECK", "1") ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests().With(teleport.ComponentKey, "TestHSMMigrate") storageConfig := liteBackendConfig(t) // start a dual auth non-hsm cluster - log.Debug("TestHSMMigrate: Starting auth server 1") + log.DebugContext(ctx, "Starting auth server 1") auth1Config := newHSMAuthConfig(t, storageConfig, log, clockwork.NewRealClock()) auth1Config.Auth.KeyStore = servicecfg.KeystoreConfig{} auth2Config := newHSMAuthConfig(t, storageConfig, log, clockwork.NewRealClock()) @@ -402,7 +403,7 @@ func TestHSMMigrate(t *testing.T) { auth1Config.Auth.ListenAddr = auth1.authAddr(t) auth2Config.Auth.ListenAddr = auth2.authAddr(t) - log.Debug("TestHSMMigrate: Starting load balancer") + log.DebugContext(ctx, "Starting load balancer") lb, err := utils.NewLoadBalancer( ctx, *utils.MustParseAddr(net.JoinHostPort("localhost", "0")), @@ -476,7 +477,7 @@ func TestHSMMigrate(t *testing.T) { // Do a full rotation to get HSM keys for auth1 into the CA. for _, stage := range stages { - log.Debugf("TestHSMMigrate: Sending rotate request %s", stage.targetPhase) + log.DebugContext(ctx, "Sending rotate request", "phase", stage.targetPhase) require.NoError(t, stage.verify(func() error { return auth1.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -512,7 +513,7 @@ func TestHSMMigrate(t *testing.T) { // Do another full rotation to get HSM keys for auth2 into the CA. for _, stage := range stages { - log.Debugf("TestHSMMigrate: Sending rotate request %s", stage.targetPhase) + log.DebugContext(ctx, "Sending rotate request", "phase", stage.targetPhase) require.NoError(t, stage.verify(func() error { return auth2.process.GetAuthServer().RotateCertAuthority(ctx, types.RotateRequest{ Type: types.HostCA, @@ -530,9 +531,9 @@ func TestHSMRevert(t *testing.T) { clock := clockwork.NewFakeClock() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests().With(teleport.ComponentKey, "TestHSMRevert") - log.Debug("TestHSMRevert: starting auth server") + log.DebugContext(ctx, "starting auth server") auth1Config := newHSMAuthConfig(t, liteBackendConfig(t), log, clock) auth1, err := newTeleportService(ctx, auth1Config, "auth1") require.NoError(t, err) @@ -571,7 +572,7 @@ func TestHSMRevert(t *testing.T) { types.RotationPhaseUpdateServers, types.RotationPhaseStandby, } { - log.Debugf("TestHSMRevert: sending rotation request %v for CA %v", targetPhase, caType) + log.DebugContext(ctx, "sending rotation request", "phase", targetPhase, "ca", caType) if caType == types.HostCA { expectedEvent := service.TeleportCredentialsUpdatedEvent if targetPhase == types.RotationPhaseInit { diff --git a/integration/instance_test.go b/integration/instance_test.go index 1e76e26012c41..35af71e53a385 100644 --- a/integration/instance_test.go +++ b/integration/instance_test.go @@ -123,7 +123,7 @@ func TestInstanceCertReissue(t *testing.T) { authCfg.SSH.Enabled = true authCfg.SSH.Addr.Addr = "localhost:0" authCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() - authCfg.Log = utils.NewLoggerForTests() + authCfg.Logger = utils.NewSlogLoggerForTests() authCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() authProc, err := service.NewTeleport(authCfg) @@ -162,7 +162,7 @@ func TestInstanceCertReissue(t *testing.T) { agentCfg.WindowsDesktop.Enabled = true agentCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() - agentCfg.Log = utils.NewLoggerForTests() + agentCfg.Logger = utils.NewSlogLoggerForTests() agentCfg.MaxRetryPeriod = time.Second agentCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() diff --git a/integration/integration_test.go b/integration/integration_test.go index 9dcb2332b44e6..43f2a358e51b8 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -29,6 +29,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "net" "net/http" "net/http/httptest" @@ -52,7 +53,6 @@ import ( "github.com/gravitational/trace" "github.com/gravitational/trace/trail" "github.com/pkg/sftp" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/crypto/ssh" @@ -124,7 +124,7 @@ func (s *integrationTestSuite) bind(test integrationTest) func(t *testing.T) { // Attempt to set a logger for the test. Be warned that parts of the // Teleport codebase do not honor the logger passed in via config and // will create their own. Do not expect to catch _all_ output with this. - s.Log = utils.NewLoggerForTests() + s.Log = utils.NewSlogLoggerForTests() os.RemoveAll(profile.FullProfilePath("")) t.Cleanup(func() { s.Log = nil }) test(t, s) @@ -1433,8 +1433,12 @@ func testIPPropagation(t *testing.T, suite *integrationTestSuite) { receivedEvents, err := helpers.StartAndWait(process, expectedEvents) require.NoError(t, err) - log.Debugf("Node (in instance %v) started: %v/%v events received.", - i.Secrets.SiteName, len(expectedEvents), len(receivedEvents)) + i.Log.DebugContext(context.Background(), "Teleport node started", + "node_name", process.Config.Hostname, + "instance", i.Secrets.SiteName, + "expected_events_count", len(expectedEvents), + "received_events_count", len(receivedEvents), + ) } wg.Add(len(rootNodes) + len(leafNodes)) @@ -1902,7 +1906,7 @@ func testClientIdleConnection(t *testing.T, suite *integrationTestSuite) { tconf := servicecfg.MakeDefaultConfig() tconf.SSH.Enabled = true - tconf.Log = utils.NewLoggerForTests() + tconf.Logger = utils.NewSlogLoggerForTests() tconf.Proxy.DisableWebService = true tconf.Proxy.DisableWebInterface = true tconf.Auth.NetworkingConfig = netConfig @@ -2335,7 +2339,7 @@ func testTwoClustersTunnel(t *testing.T, suite *integrationTestSuite) { }) } - log.Info("Tests done. Cleaning up.") + slog.InfoContext(context.Background(), "Tests done,cleaning up") } func twoClustersTunnel(t *testing.T, suite *integrationTestSuite, now time.Time, proxyRecordMode string, execCountSiteA, execCountSiteB int) { @@ -3006,7 +3010,7 @@ func createAndUpdateTrustedClusters(t *testing.T, suite *integrationTestSuite, t NodeName: Host, Priv: suite.Priv, Pub: suite.Pub, - Log: suite.Log, + Logger: suite.Log, } mainCfg.Listeners = standardPortsOrMuxSetup(t, test.multiplex, &mainCfg.Fds) main := helpers.NewInstance(t, mainCfg) @@ -3116,7 +3120,7 @@ func trustedClusters(t *testing.T, suite *integrationTestSuite, test trustedClus NodeName: Host, Priv: suite.Priv, Pub: suite.Pub, - Log: suite.Log, + Logger: suite.Log, } mainCfg.Listeners = standardPortsOrMuxSetup(t, test.multiplex, &mainCfg.Fds) main := helpers.NewInstance(t, mainCfg) @@ -3359,7 +3363,7 @@ func trustedDisabledCluster(t *testing.T, suite *integrationTestSuite, test trus NodeName: Host, Priv: suite.Priv, Pub: suite.Pub, - Log: suite.Log, + Logger: suite.Log, } mainCfg.Listeners = standardPortsOrMuxSetup(t, test.multiplex, &mainCfg.Fds) @@ -3499,7 +3503,7 @@ func trustedClustersRoleMapChanges(t *testing.T, suite *integrationTestSuite, te NodeName: Host, Priv: suite.Priv, Pub: suite.Pub, - Log: suite.Log, + Logger: suite.Log, } mainCfg.Listeners = standardPortsOrMuxSetup(t, test.multiplex, &mainCfg.Fds) main := helpers.NewInstance(t, mainCfg) @@ -3791,7 +3795,7 @@ func testTrustedClusterAgentless(t *testing.T, suite *integrationTestSuite) { NodeName: Host, Priv: suite.Priv, Pub: suite.Pub, - Log: suite.Log, + Logger: suite.Log, } mainCfg.Listeners = standardPortsOrMuxSetup(t, false, &mainCfg.Fds) main := helpers.NewInstance(t, mainCfg) @@ -7188,7 +7192,7 @@ func (s *integrationTestSuite) newNamedTeleportInstance(t *testing.T, clusterNam NodeName: Host, Priv: s.Priv, Pub: s.Pub, - Log: utils.WrapLogger(s.Log.WithField("cluster", clusterName)), + Logger: s.Log.With("cluster", clusterName), } for _, opt := range opts { @@ -7217,7 +7221,7 @@ func WithListeners(setupFn helpers.InstanceListenerSetupFunc) InstanceConfigOpti func (s *integrationTestSuite) defaultServiceConfig() *servicecfg.Config { cfg := servicecfg.MakeDefaultConfig() cfg.Console = nil - cfg.Log = s.Log + cfg.Logger = s.Log cfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() cfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() cfg.DebugService.Enabled = false @@ -7301,7 +7305,7 @@ func TestWebProxyInsecure(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) rcConf := servicecfg.MakeDefaultConfig() @@ -7335,7 +7339,7 @@ func TestWebProxyInsecure(t *testing.T) { // roles in root and leaf clusters. func TestTraitsPropagation(t *testing.T) { ctx := context.Background() - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests() privateKey, publicKey, err := testauthority.New().GenerateKeyPair() require.NoError(t, err) @@ -7347,7 +7351,7 @@ func TestTraitsPropagation(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: log, + Logger: log, }) // Create leaf cluster. @@ -7357,7 +7361,7 @@ func TestTraitsPropagation(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: log, + Logger: log, }) // Make root cluster config. @@ -7529,7 +7533,7 @@ func createTrustedClusterPair(t *testing.T, suite *integrationTestSuite, extraSe NodeName: Host, Priv: suite.Priv, Pub: suite.Pub, - Log: suite.Log, + Logger: suite.Log, } rootCfg.Listeners = standardPortsOrMuxSetup(t, false, &rootCfg.Fds) @@ -7541,7 +7545,7 @@ func createTrustedClusterPair(t *testing.T, suite *integrationTestSuite, extraSe NodeName: Host, Priv: suite.Priv, Pub: suite.Pub, - Log: suite.Log, + Logger: suite.Log, } leafCfg.Listeners = standardPortsOrMuxSetup(t, false, &leafCfg.Fds) @@ -8440,7 +8444,7 @@ func TestProxySSHPortMultiplexing(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) rcConf := servicecfg.MakeDefaultConfig() @@ -8566,7 +8570,7 @@ func TestConnectivityWithoutAuth(t *testing.T) { // Create auth config. authCfg := servicecfg.MakeDefaultConfig() authCfg.Console = nil - authCfg.Log = utils.NewLoggerForTests() + authCfg.Logger = utils.NewSlogLoggerForTests() authCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() authCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() authCfg.Auth.Preference.SetSecondFactor("off") @@ -8584,7 +8588,7 @@ func TestConnectivityWithoutAuth(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) // Create a user and role. @@ -8619,7 +8623,7 @@ func TestConnectivityWithoutAuth(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) // Create node config. @@ -8629,7 +8633,7 @@ func TestConnectivityWithoutAuth(t *testing.T) { nodeCfg.CachePolicy.Enabled = true nodeCfg.DataDir = t.TempDir() nodeCfg.Console = nil - nodeCfg.Log = utils.NewLoggerForTests() + nodeCfg.Logger = utils.NewSlogLoggerForTests() nodeCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() nodeCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() nodeCfg.Auth.Enabled = false @@ -8710,7 +8714,7 @@ func TestConnectivityDuringAuthRestart(t *testing.T) { // Create auth config. authCfg := servicecfg.MakeDefaultConfig() authCfg.Console = nil - authCfg.Log = utils.NewLoggerForTests() + authCfg.Logger = utils.NewSlogLoggerForTests() authCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() authCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() authCfg.Auth.Preference.SetSecondFactor("off") @@ -8728,7 +8732,7 @@ func TestConnectivityDuringAuthRestart(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) // Create a user and role. @@ -8760,7 +8764,7 @@ func TestConnectivityDuringAuthRestart(t *testing.T) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) // Create node config. @@ -8770,7 +8774,7 @@ func TestConnectivityDuringAuthRestart(t *testing.T) { nodeCfg.CachePolicy.Enabled = true nodeCfg.DataDir = t.TempDir() nodeCfg.Console = nil - nodeCfg.Log = utils.NewLoggerForTests() + nodeCfg.Logger = utils.NewSlogLoggerForTests() nodeCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() nodeCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() nodeCfg.DiagnosticAddr = *utils.MustParseAddr(helpers.NewListener(t, service.ListenerType("diag"), &node.Fds)) diff --git a/integration/joinopenssh_test.go b/integration/joinopenssh_test.go index 25818f91ba197..79a995889a7ef 100644 --- a/integration/joinopenssh_test.go +++ b/integration/joinopenssh_test.go @@ -47,7 +47,7 @@ func TestJoinOpenSSH(t *testing.T) { ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } cfg.Listeners = helpers.StandardListenerSetup(t, &cfg.Fds) rc := helpers.NewInstance(t, cfg) diff --git a/integration/kube_integration_test.go b/integration/kube_integration_test.go index 2127486ff7a7f..264bbfdf50706 100644 --- a/integration/kube_integration_test.go +++ b/integration/kube_integration_test.go @@ -26,6 +26,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -41,7 +42,6 @@ import ( "github.com/google/uuid" "github.com/gorilla/websocket" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/http2" @@ -105,7 +105,7 @@ type KubeSuite struct { kubeConfig *rest.Config // log defines the test-specific logger - log utils.Logger + log *slog.Logger } func newKubeSuite(t *testing.T) *KubeSuite { @@ -168,7 +168,7 @@ type kubeIntegrationTest func(t *testing.T, suite *KubeSuite) func (s *KubeSuite) bind(test kubeIntegrationTest) func(t *testing.T) { return func(t *testing.T) { - s.log = utils.NewLoggerForTests() + s.log = utils.NewSlogLoggerForTests() os.RemoveAll(profile.FullProfilePath("")) t.Cleanup(func() { s.log = nil }) test(t, s) @@ -204,7 +204,7 @@ func testExec(t *testing.T, suite *KubeSuite, pinnedIP string, clientError strin NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) username := suite.me.Username @@ -418,7 +418,7 @@ func testKubeDeny(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) username := suite.me.Username @@ -475,7 +475,7 @@ func testKubePortForward(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) username := suite.me.Username @@ -604,7 +604,7 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) // main cluster has a role and user called main-kube @@ -635,7 +635,7 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) lib.SetInsecureDevMode(true) @@ -694,11 +694,9 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) { // try and upsert a trusted cluster var upsertSuccess bool for i := 0; i < 10; i++ { - log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i) _, err = aux.Process.GetAuthServer().UpsertTrustedClusterV2(ctx, trustedCluster) if err != nil { if trace.IsConnectionProblem(err) { - log.Debugf("retrying on connection problem: %v", err) continue } t.Fatalf("got non connection problem %v", err) @@ -876,7 +874,7 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) // main cluster has a role and user called main-kube @@ -907,7 +905,7 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) lib.SetInsecureDevMode(true) @@ -970,11 +968,9 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) { // try and upsert a trusted cluster var upsertSuccess bool for i := 0; i < 10; i++ { - log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i) _, err = aux.Process.GetAuthServer().UpsertTrustedClusterV2(ctx, trustedCluster) if err != nil { if trace.IsConnectionProblem(err) { - log.Debugf("retrying on connection problem: %v", err) continue } t.Fatalf("got non connection problem %v", err) @@ -1176,7 +1172,7 @@ func runKubeDisconnectTest(t *testing.T, suite *KubeSuite, tc disconnectTestCase NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) username := suite.me.Username @@ -1270,7 +1266,7 @@ func testKubeTransportProtocol(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) username := suite.me.Username @@ -1377,7 +1373,7 @@ func testKubeEphemeralContainers(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) username := suite.me.Username @@ -1653,7 +1649,7 @@ func testKubeExecWeb(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) // Setup user and role. @@ -1838,7 +1834,7 @@ type sessionMetadataResponse struct { func (s *KubeSuite) teleKubeConfig(hostname string) *servicecfg.Config { tconf := servicecfg.MakeDefaultConfig() tconf.Console = nil - tconf.Log = s.log + tconf.Logger = s.log tconf.SSH.Enabled = true tconf.Proxy.DisableWebInterface = true tconf.PollingPeriod = 500 * time.Millisecond @@ -1859,7 +1855,7 @@ func (s *KubeSuite) teleKubeConfig(hostname string) *servicecfg.Config { func (s *KubeSuite) teleAuthConfig(hostname string) *servicecfg.Config { tconf := servicecfg.MakeDefaultConfig() tconf.Console = nil - tconf.Log = s.log + tconf.Logger = s.log tconf.PollingPeriod = 500 * time.Millisecond tconf.Testing.ClientTimeout = time.Second tconf.Testing.ShutdownTimeout = 2 * tconf.Testing.ClientTimeout @@ -2114,7 +2110,7 @@ func testKubeJoin(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) // fooey @@ -2373,7 +2369,7 @@ func testExecNoAuth(t *testing.T, suite *KubeSuite) { NodeName: Host, Priv: suite.priv, Pub: suite.pub, - Log: suite.log, + Logger: suite.log, }) adminUsername := "admin" diff --git a/integration/port_forwarding_test.go b/integration/port_forwarding_test.go index 4c83cdcbaaeda..88af150695872 100644 --- a/integration/port_forwarding_test.go +++ b/integration/port_forwarding_test.go @@ -170,7 +170,7 @@ func testPortForwarding(t *testing.T, suite *integrationTestSuite) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) for _, login := range logins { @@ -196,7 +196,7 @@ func testPortForwarding(t *testing.T, suite *integrationTestSuite) { NodeName: Host, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) // Create node config. diff --git a/integration/proxy/automaticupgrades_test.go b/integration/proxy/automaticupgrades_test.go index 7620bb9077c7c..1818fb7acd4e4 100644 --- a/integration/proxy/automaticupgrades_test.go +++ b/integration/proxy/automaticupgrades_test.go @@ -49,7 +49,7 @@ func createProxyWithChannels(t *testing.T, channels automaticupgrades.Channels) ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } cfg.Listeners = helpers.SingleProxyPortSetup(t, &cfg.Fds) rc := helpers.NewInstance(t, cfg) diff --git a/integration/proxy/proxy_helpers.go b/integration/proxy/proxy_helpers.go index 9de514caf73e1..789ab0f4f577f 100644 --- a/integration/proxy/proxy_helpers.go +++ b/integration/proxy/proxy_helpers.go @@ -115,7 +115,7 @@ func newSuite(t *testing.T, opts ...proxySuiteOptionsFunc) *Suite { ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: options.rootClusterNodeName, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } rCfg.Listeners = options.rootClusterListeners(t, &rCfg.Fds) rc := helpers.NewInstance(t, rCfg) @@ -127,7 +127,7 @@ func newSuite(t *testing.T, opts ...proxySuiteOptionsFunc) *Suite { NodeName: options.leafClusterNodeName, Priv: rc.Secrets.PrivKey, Pub: rc.Secrets.PubKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } lCfg.Listeners = options.leafClusterListeners(t, &lCfg.Fds) lc := helpers.NewInstance(t, lCfg) @@ -196,7 +196,7 @@ func (p *Suite) addNodeToLeafCluster(t *testing.T, tunnelNodeHostname string) { nodeConfig := func() *servicecfg.Config { tconf := servicecfg.MakeDefaultConfig() tconf.Console = nil - tconf.Log = utils.NewLoggerForTests() + tconf.Logger = utils.NewSlogLoggerForTests() tconf.Hostname = tunnelNodeHostname tconf.SetToken("token") tconf.SetAuthServerAddress(utils.NetAddr{ diff --git a/integration/proxy/proxy_test.go b/integration/proxy/proxy_test.go index db28b969020d6..cf75bfd5f146b 100644 --- a/integration/proxy/proxy_test.go +++ b/integration/proxy/proxy_test.go @@ -562,7 +562,7 @@ func TestKubePROXYProtocol(t *testing.T) { ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } tconf := servicecfg.MakeDefaultConfig() tconf.Proxy.Kube.ListenAddr = *utils.MustParseAddr(helpers.NewListener(t, service.ListenerProxyKube, &cfg.Fds)) @@ -1395,7 +1395,7 @@ func TestALPNProxyAuthClientConnectWithUserIdentity(t *testing.T) { ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } cfg.Listeners = helpers.SingleProxyPortSetup(t, &cfg.Fds) rc := helpers.NewInstance(t, cfg) @@ -1499,7 +1499,7 @@ func TestALPNProxyDialProxySSHWithoutInsecureMode(t *testing.T) { NodeName: helpers.Loopback, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } rootCfg.Listeners = helpers.StandardListenerSetup(t, &rootCfg.Fds) rc := helpers.NewInstance(t, rootCfg) @@ -1569,7 +1569,7 @@ func TestALPNProxyHTTPProxyNoProxyDial(t *testing.T) { ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: addr, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } instanceCfg.Listeners = helpers.SingleProxyPortSetupOn(addr)(t, &instanceCfg.Fds) rc := helpers.NewInstance(t, instanceCfg) @@ -1636,7 +1636,7 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) { lib.SetInsecureDevMode(true) defer lib.SetInsecureDevMode(false) - log := utils.NewLoggerForTests() + log := utils.NewSlogLoggerForTests() // We need to use the non-loopback address for our Teleport cluster, as the // Go HTTP library will recognize requests to the loopback address and @@ -1644,17 +1644,15 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) { rcAddr, err := apihelpers.GetLocalIP() require.NoError(t, err) - log.Info("Creating Teleport instance...") cfg := helpers.InstanceConfig{ ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: rcAddr, - Log: log, + Logger: log, } cfg.Listeners = helpers.SingleProxyPortSetupOn(rcAddr)(t, &cfg.Fds) rc := helpers.NewInstance(t, cfg) defer rc.StopAll() - log.Info("Teleport root cluster instance created") username := helpers.MustGetCurrentUser(t).Username rc.AddUser(username, []string{username}) @@ -1668,20 +1666,15 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) { rcConf.Proxy.DisableWebInterface = true rcConf.SSH.Enabled = false rcConf.CircuitBreakerConfig = breaker.NoopBreakerConfig() - rcConf.Log = log + rcConf.Logger = log - log.Infof("Root cluster config: %#v", rcConf) - - log.Info("Creating Root cluster...") err = rc.CreateEx(t, nil, rcConf) require.NoError(t, err) - log.Info("Starting Root Cluster...") err = rc.Start() require.NoError(t, err) // Create and start http_proxy server. - log.Info("Creating HTTP Proxy server...") ph := &helpers.ProxyHandler{} authorizer := helpers.NewProxyAuthorizer(ph, "alice", "rosebud") ts := httptest.NewServer(authorizer) @@ -1689,7 +1682,6 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) { proxyURL, err := url.Parse(ts.URL) require.NoError(t, err) - log.Infof("HTTP Proxy server running on %s", proxyURL) // set http_proxy to user:password@host // these credentials will be rejected by the auth proxy (initially). @@ -1699,7 +1691,7 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) { rcProxyAddr := net.JoinHostPort(rcAddr, helpers.PortStr(t, rc.Web)) nodeCfg := makeNodeConfig("node1", rcProxyAddr) - nodeCfg.Log = log + nodeCfg.Logger = log timeout := time.Second * 60 startErrC := make(chan error) diff --git a/integration/proxy/proxy_tunnel_strategy_test.go b/integration/proxy/proxy_tunnel_strategy_test.go index da833e999d513..1c2a22824807e 100644 --- a/integration/proxy/proxy_tunnel_strategy_test.go +++ b/integration/proxy/proxy_tunnel_strategy_test.go @@ -28,7 +28,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jackc/pgconn" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" apidefaults "github.com/gravitational/teleport/api/defaults" @@ -63,8 +62,6 @@ type proxyTunnelStrategy struct { db *helpers.TeleInstance dbAuthClient *authclient.Client postgresDB *postgres.TestServer - - log *logrus.Logger } func newProxyTunnelStrategy(t *testing.T, cluster string, strategy *types.TunnelStrategyV1) *proxyTunnelStrategy { @@ -72,7 +69,6 @@ func newProxyTunnelStrategy(t *testing.T, cluster string, strategy *types.Tunnel cluster: cluster, username: helpers.MustGetCurrentUser(t).Username, strategy: strategy, - log: utils.NewLoggerForTests(), } lib.SetInsecureDevMode(true) @@ -303,14 +299,14 @@ func (p *proxyTunnelStrategy) makeAuth(t *testing.T) { NodeName: helpers.Loopback, Priv: privateKey, Pub: publicKey, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) auth.AddUser(p.username, []string{p.username}) conf := servicecfg.MakeDefaultConfig() conf.DataDir = t.TempDir() - conf.Log = auth.Log + conf.Logger = auth.Log conf.Auth.Enabled = true conf.Auth.NetworkingConfig.SetTunnelStrategy(p.strategy) @@ -331,7 +327,7 @@ func (p *proxyTunnelStrategy) makeProxy(t *testing.T) { ClusterName: p.cluster, HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) authAddr := utils.MustParseAddr(p.auth.Auth) @@ -340,7 +336,7 @@ func (p *proxyTunnelStrategy) makeProxy(t *testing.T) { conf.SetAuthServerAddress(*authAddr) conf.SetToken("token") conf.DataDir = t.TempDir() - conf.Log = proxy.Log + conf.Logger = proxy.Log conf.InstanceMetadataClient = imds.NewDisabledIMDSClient() conf.Auth.Enabled = false @@ -378,14 +374,14 @@ func (p *proxyTunnelStrategy) makeNode(t *testing.T) { ClusterName: p.cluster, HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) conf := servicecfg.MakeDefaultConfig() conf.Version = types.V3 conf.SetToken("token") conf.DataDir = t.TempDir() - conf.Log = node.Log + conf.Logger = node.Log conf.InstanceMetadataClient = imds.NewDisabledIMDSClient() conf.Auth.Enabled = false @@ -423,14 +419,14 @@ func (p *proxyTunnelStrategy) makeDatabase(t *testing.T) { ClusterName: p.cluster, HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) conf := servicecfg.MakeDefaultConfig() conf.Version = types.V3 conf.SetToken("token") conf.DataDir = t.TempDir() - conf.Log = db.Log + conf.Logger = db.Log conf.InstanceMetadataClient = imds.NewDisabledIMDSClient() conf.Auth.Enabled = false @@ -554,20 +550,15 @@ func (p *proxyTunnelStrategy) waitForResource(t *testing.T, role string, check f for _, proxy := range p.proxies { ok, err := check(proxy, availability) if err != nil { - p.log.Debugf("check for %s availability error: %+v", role, trace.Wrap(err)) return false } if !ok { - p.log.Debugf("%s not found", role) return false } propagated++ } - if len(p.proxies) != propagated { - p.log.Debugf("%s not available", role) - return false - } - return true + + return len(p.proxies) == propagated }, 30*time.Second, time.Second, diff --git a/integration/tctl_terraform_env_test.go b/integration/tctl_terraform_env_test.go index 70d45b1832618..8dff58a024b0f 100644 --- a/integration/tctl_terraform_env_test.go +++ b/integration/tctl_terraform_env_test.go @@ -61,7 +61,7 @@ func TestTCTLTerraformCommand_ProxyJoin(t *testing.T) { ClusterName: clusterName, HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } cfg.Listeners = helpers.SingleProxyPortSetup(t, &cfg.Fds) rc := helpers.NewInstance(t, cfg) @@ -137,7 +137,7 @@ func TestTCTLTerraformCommand_AuthJoin(t *testing.T) { ClusterName: clusterName, HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), } cfg.Listeners = helpers.SingleProxyPortSetup(t, &cfg.Fds) rc := helpers.NewInstance(t, cfg) diff --git a/integration/teleterm_test.go b/integration/teleterm_test.go index 2b02b92c7d40b..baad6e2871e7e 100644 --- a/integration/teleterm_test.go +++ b/integration/teleterm_test.go @@ -988,7 +988,7 @@ func testWaitForConnectMyComputerNodeJoin(t *testing.T, pack *dbhelpers.Database nodeConfig := newNodeConfig(t, "token", types.JoinMethodToken) nodeConfig.SetAuthServerAddress(pack.Root.Cluster.Config.Auth.ListenAddr) nodeConfig.DataDir = filepath.Join(agentsDir, profileName, "data") - nodeConfig.Log = libutils.NewLoggerForTests() + nodeConfig.Logger = libutils.NewSlogLoggerForTests() nodeSvc, err := service.NewTeleport(nodeConfig) require.NoError(t, err) require.NoError(t, nodeSvc.Start()) @@ -1066,7 +1066,7 @@ func testDeleteConnectMyComputerNode(t *testing.T, pack *dbhelpers.DatabasePack) nodeConfig := newNodeConfig(t, "token", types.JoinMethodToken) nodeConfig.SetAuthServerAddress(pack.Root.Cluster.Config.Auth.ListenAddr) nodeConfig.DataDir = filepath.Join(agentsDir, profileName, "data") - nodeConfig.Log = libutils.NewLoggerForTests() + nodeConfig.Logger = libutils.NewSlogLoggerForTests() nodeSvc, err := service.NewTeleport(nodeConfig) require.NoError(t, err) require.NoError(t, nodeSvc.Start()) diff --git a/integrations/lib/embeddedtbot/bot_test.go b/integrations/lib/embeddedtbot/bot_test.go index ca4bc9480af43..fa31e5772742b 100644 --- a/integrations/lib/embeddedtbot/bot_test.go +++ b/integrations/lib/embeddedtbot/bot_test.go @@ -45,7 +45,7 @@ func TestBotJoinAuth(t *testing.T) { ClusterName: clusterName, HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: utils.NewLoggerForTests(), + Logger: utils.NewSlogLoggerForTests(), }) rcConf := servicecfg.MakeDefaultConfig() diff --git a/integrations/operator/controllers/resources/testlib/env.go b/integrations/operator/controllers/resources/testlib/env.go index df04115401b59..9de19230826c3 100644 --- a/integrations/operator/controllers/resources/testlib/env.go +++ b/integrations/operator/controllers/resources/testlib/env.go @@ -20,6 +20,7 @@ package testlib import ( "context" + "log/slog" "math/rand/v2" "os" "path/filepath" @@ -28,7 +29,6 @@ import ( "time" "github.com/google/uuid" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" @@ -112,7 +112,7 @@ func defaultTeleportServiceConfig(t *testing.T) (*helpers.TeleInstance, string) ClusterName: "root.example.com", HostID: uuid.New().String(), NodeName: helpers.Loopback, - Log: logrus.StandardLogger(), + Logger: slog.Default(), }) rcConf := servicecfg.MakeDefaultConfig() diff --git a/lib/config/configuration.go b/lib/config/configuration.go index 5d42d9f4aa8a4..5955da7b93daa 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -45,7 +45,6 @@ import ( "github.com/go-ldap/ldap/v3" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" kyaml "k8s.io/apimachinery/pkg/util/yaml" "github.com/gravitational/teleport" @@ -429,10 +428,10 @@ func ReadConfigFile(cliConfigPath string) (*FileConfig, error) { } // default config doesn't exist? quietly return: if !utils.FileExists(configFilePath) { - log.Info("not using a config file") + slog.InfoContext(context.Background(), "not using a config file") return nil, nil } - log.Debug("reading config file: ", configFilePath) + slog.DebugContext(context.Background(), "reading config file", "config_file", configFilePath) return ReadFromFile(configFilePath) } @@ -569,13 +568,14 @@ func ApplyFileConfig(fc *FileConfig, cfg *servicecfg.Config) error { return trace.Wrap(err) } + ctx := context.Background() if fc.CachePolicy.TTL != "" { - log.Warn("cache.ttl config option is deprecated and will be ignored, caches no longer attempt to anticipate resource expiration.") + slog.WarnContext(ctx, "cache.ttl config option is deprecated and will be ignored, caches no longer attempt to anticipate resource expiration") } if fc.CachePolicy.Type == memory.GetName() { - log.Debugf("cache.type config option is explicitly set to %v.", memory.GetName()) + slog.DebugContext(ctx, "cache.type config option is explicitly set to memory") } else if fc.CachePolicy.Type != "" { - log.Warn("cache.type config option is deprecated and will be ignored, caches are always in memory in this version.") + slog.WarnContext(ctx, "cache.type config option is deprecated and will be ignored, caches are always in memory in this version") } // apply cache policy for node and proxy @@ -604,7 +604,7 @@ func ApplyFileConfig(fc *FileConfig, cfg *servicecfg.Config) error { cfg.MACAlgorithms = fc.MACAlgorithms } if fc.CASignatureAlgorithm != nil { - log.Warn("ca_signing_algo config option is deprecated and will be removed in a future release, Teleport defaults to rsa-sha2-512.") + slog.WarnContext(ctx, "ca_signing_algo config option is deprecated and will be removed in a future release, Teleport defaults to rsa-sha2-512") } // Read in how nodes will validate the CA. A single empty string in the file @@ -791,38 +791,23 @@ func applyAuthOrProxyAddress(fc *FileConfig, cfg *servicecfg.Config) error { func applyLogConfig(loggerConfig Log, cfg *servicecfg.Config) error { // TODO: this code is copied in the access plugin logging setup `logger.Config.NewSLogLogger` // We'll want to deduplicate the logic next time we refactor the logging setup - logger := log.StandardLogger() - var w io.Writer switch loggerConfig.Output { case "": - w = logutils.NewSharedWriter(os.Stderr) + w = os.Stderr case "stderr", "error", "2": - w = logutils.NewSharedWriter(os.Stderr) + w = os.Stderr cfg.Console = io.Discard // disable console printing case "stdout", "out", "1": - w = logutils.NewSharedWriter(os.Stdout) + w = os.Stdout cfg.Console = io.Discard // disable console printing case teleport.Syslog: - w = os.Stderr - sw, err := utils.NewSyslogWriter() - if err != nil { - logger.Errorf("Failed to switch logging to syslog: %v.", err) - break - } - - hook, err := utils.NewSyslogHook(sw) + var err error + w, err = utils.NewSyslogWriter() if err != nil { - logger.Errorf("Failed to switch logging to syslog: %v.", err) + slog.ErrorContext(context.Background(), "Failed to switch logging to syslog", "error", err) break } - - logger.ReplaceHooks(make(log.LevelHooks)) - logger.AddHook(hook) - // If syslog output has been configured and is supported by the operating system, - // then the shared writer is not needed because the syslog writer is already - // protected with a mutex. - w = sw default: // Assume this is a file path. sharedWriter, err := logutils.NewFileSharedWriter(loggerConfig.Output, logFileDefaultFlag, logFileDefaultMode) @@ -838,19 +823,14 @@ func applyLogConfig(loggerConfig Log, cfg *servicecfg.Config) error { level := new(slog.LevelVar) switch strings.ToLower(loggerConfig.Severity) { case "", "info": - logger.SetLevel(log.InfoLevel) level.Set(slog.LevelInfo) case "err", "error": - logger.SetLevel(log.ErrorLevel) level.Set(slog.LevelError) case teleport.DebugLevel: - logger.SetLevel(log.DebugLevel) level.Set(slog.LevelDebug) case "warn", "warning": - logger.SetLevel(log.WarnLevel) level.Set(slog.LevelWarn) case "trace": - logger.SetLevel(log.TraceLevel) level.Set(logutils.TraceLevel) default: return trace.BadParameter("unsupported logger severity: %q", loggerConfig.Severity) @@ -861,65 +841,28 @@ func applyLogConfig(loggerConfig Log, cfg *servicecfg.Config) error { return trace.Wrap(err) } - var slogLogger *slog.Logger + var logger *slog.Logger switch strings.ToLower(loggerConfig.Format.Output) { case "": fallthrough // not set. defaults to 'text' case "text": - enableColors := utils.IsTerminal(os.Stderr) - formatter := &logutils.TextFormatter{ - ExtraFields: configuredFields, - EnableColors: enableColors, - } - - if err := formatter.CheckAndSetDefaults(); err != nil { - return trace.Wrap(err) - } - - logger.SetFormatter(formatter) - // Disable writing output to stderr/stdout and syslog. The logging - // hook will take care of writing the output to the correct location. - if len(logger.Hooks) > 0 { - logger.SetOutput(io.Discard) - } else { - logger.SetOutput(w) - } - - slogLogger = slog.New(logutils.NewSlogTextHandler(w, logutils.SlogTextHandlerConfig{ + logger = slog.New(logutils.NewSlogTextHandler(w, logutils.SlogTextHandlerConfig{ Level: level, - EnableColors: enableColors, + EnableColors: utils.IsTerminal(os.Stderr), ConfiguredFields: configuredFields, })) - slog.SetDefault(slogLogger) + slog.SetDefault(logger) case "json": - formatter := &logutils.JSONFormatter{ - ExtraFields: configuredFields, - } - - if err := formatter.CheckAndSetDefaults(); err != nil { - return trace.Wrap(err) - } - - logger.SetFormatter(formatter) - // Disable writing output to stderr/stdout and syslog. The logging - // hook will take care of writing the output to the correct location. - if len(logger.Hooks) > 0 { - logger.SetOutput(io.Discard) - } else { - logger.SetOutput(w) - } - - slogLogger = slog.New(logutils.NewSlogJSONHandler(w, logutils.SlogJSONHandlerConfig{ + logger = slog.New(logutils.NewSlogJSONHandler(w, logutils.SlogJSONHandlerConfig{ Level: level, ConfiguredFields: configuredFields, })) - slog.SetDefault(slogLogger) + slog.SetDefault(logger) default: return trace.BadParameter("unsupported log output format : %q", loggerConfig.Format.Output) } - cfg.Log = logger - cfg.Logger = slogLogger + cfg.Logger = logger cfg.LoggerLevel = level return nil } @@ -929,9 +872,9 @@ func applyAuthConfig(fc *FileConfig, cfg *servicecfg.Config) error { var err error if fc.Auth.KubeconfigFile != "" { - warningMessage := "The auth_service no longer needs kubeconfig_file. It has " + + const warningMessage = "The auth_service no longer needs kubeconfig_file. It has " + "been moved to proxy_service section. This setting is ignored." - log.Warning(warningMessage) + slog.WarnContext(context.Background(), warningMessage) } cfg.Auth.PROXYProtocolMode = multiplexer.PROXYProtocolUnspecified @@ -1315,10 +1258,10 @@ func applyProxyConfig(fc *FileConfig, cfg *servicecfg.Config) error { return trace.Wrap(err) } if utils.IsSelfSigned(certificateChain) { - warningMessage := "Starting Teleport with a self-signed TLS certificate, this is " + + const warningMessage = "Starting Teleport with a self-signed TLS certificate, this is " + "not safe for production clusters. Using a self-signed certificate opens " + "Teleport users to Man-in-the-Middle attacks." - log.Warn(warningMessage) + slog.WarnContext(context.Background(), warningMessage) } else { if err := utils.VerifyCertificateChain(certificateChain); err != nil { return trace.BadParameter("unable to verify HTTPS certificate chain in %v:\n\n %s\n\n %s", @@ -1530,7 +1473,7 @@ func applySSHConfig(fc *FileConfig, cfg *servicecfg.Config) (err error) { if fc.SSH.DisableCreateHostUser || runtime.GOOS != constants.LinuxOS { cfg.SSH.DisableCreateHostUser = true if runtime.GOOS != constants.LinuxOS { - log.Debugln("Disabling host user creation as this feature is only available on Linux") + slog.DebugContext(context.Background(), "Disabling host user creation as this feature is only available on Linux") } } if fc.SSH.PAM != nil { @@ -1540,14 +1483,14 @@ func applySSHConfig(fc *FileConfig, cfg *servicecfg.Config) (err error) { // and the PAM library was found at runtime. if cfg.SSH.PAM.Enabled { if !pam.BuildHasPAM() { - errorMessage := "Unable to start Teleport: PAM was enabled in file configuration but this \n" + + const errorMessage = "Unable to start Teleport: PAM was enabled in file configuration but this \n" + "Teleport binary was built without PAM support. To continue either download a \n" + "Teleport binary build with PAM support from https://goteleport.com/teleport \n" + "or disable PAM in file configuration." return trace.BadParameter(errorMessage) } if !pam.SystemHasPAM() { - errorMessage := "Unable to start Teleport: PAM was enabled in file configuration but this \n" + + const errorMessage = "Unable to start Teleport: PAM was enabled in file configuration but this \n" + "system does not have the needed PAM library installed. To continue either \n" + "install libpam or disable PAM in file configuration." return trace.BadParameter(errorMessage) @@ -1565,7 +1508,7 @@ func applySSHConfig(fc *FileConfig, cfg *servicecfg.Config) (err error) { cfg.SSH.BPF = fc.SSH.BPF.Parse() } if fc.SSH.RestrictedSession != nil { - log.Error("Restricted Sessions for SSH were removed in Teleport 15.") + slog.ErrorContext(context.Background(), "Restricted Sessions for SSH were removed in Teleport 15") } cfg.SSH.AllowTCPForwarding = fc.SSH.AllowTCPForwarding() @@ -1626,11 +1569,11 @@ func applyDiscoveryConfig(fc *FileConfig, cfg *servicecfg.Config) error { for _, region := range matcher.Regions { if !awsutils.IsKnownRegion(region) { - log.Warnf("AWS matcher uses unknown region %q. "+ - "There could be a typo in %q. "+ - "Ignore this message if this is a new AWS region that is unknown to the AWS SDK used to compile this binary. "+ - "Known regions are: %v.", - region, region, awsutils.GetKnownRegions(), + const message = "AWS matcher uses unknown region" + + "This is either a typo or a new AWS region that is unknown to the AWS SDK used to compile this binary. " + slog.WarnContext(context.Background(), message, + "region", region, + "known_regions", awsutils.GetKnownRegions(), ) } } @@ -1977,10 +1920,10 @@ func readCACert(database *Database) ([]byte, error) { if database.CACertFile != "" { if database.TLS.CACertFile != "" { // New and old fields are set. Ignore the old field. - log.Warnf("Ignoring deprecated ca_cert_file in %s configuration; using tls.ca_cert_file.", database.Name) + slog.WarnContext(context.Background(), "Ignoring deprecated ca_cert_file database in configuration; using tls.ca_cert_file", "dababase", database.Name) } else { // Only old field is set, inform about deprecation. - log.Warnf("ca_cert_file is deprecated, please use tls.ca_cert_file instead for %s.", database.Name) + slog.WarnContext(context.Background(), "ca_cert_file is deprecated, please use tls.ca_cert_file instead for databases", "database", database.Name) caBytes, err = os.ReadFile(database.CACertFile) if err != nil { @@ -2202,10 +2145,10 @@ func applyWindowsDesktopConfig(fc *FileConfig, cfg *servicecfg.Config) error { } cfg.WindowsDesktop.ShowDesktopWallpaper = fc.WindowsDesktop.ShowDesktopWallpaper if len(fc.WindowsDesktop.ADHosts) > 0 { - log.Warnln("hosts field is deprecated, prefer static_hosts instead") + slog.WarnContext(context.Background(), "hosts field is deprecated, prefer static_hosts instead") } if len(fc.WindowsDesktop.NonADHosts) > 0 { - log.Warnln("non_ad_hosts field is deprecated, prefer static_hosts instead") + slog.WarnContext(context.Background(), "non_ad_hosts field is deprecated, prefer static_hosts instead") } cfg.WindowsDesktop.StaticHosts, err = staticHostsWithAddress(fc.WindowsDesktop) if err != nil { @@ -2655,7 +2598,7 @@ func Configure(clf *CommandLineFlags, cfg *servicecfg.Config, legacyAppFlags boo // apply --auth-server flag: if len(clf.AuthServerAddr) > 0 { if cfg.Auth.Enabled { - log.Warnf("not starting the local auth service. --auth-server flag tells to connect to another auth server") + slog.WarnContext(context.Background(), "not starting the local auth service. --auth-server flag tells to connect to another auth server") cfg.Auth.Enabled = false } @@ -2780,7 +2723,7 @@ func ConfigureOpenSSH(clf *CommandLineFlags, cfg *servicecfg.Config) error { cfg.SetToken(clf.AuthToken) } - log.Debugf("Disabling all services, only the Teleport OpenSSH service can run during the `teleport join openssh` command") + slog.DebugContext(context.Background(), "Disabling all services, only the Teleport OpenSSH service can run during the `teleport join openssh` command") servicecfg.DisableLongRunningServices(cfg) cfg.DataDir = clf.DataDir @@ -2928,7 +2871,7 @@ func applyListenIP(ip net.IP, cfg *servicecfg.Config) { func replaceHost(addr *utils.NetAddr, newHost string) { _, port, err := net.SplitHostPort(addr.Addr) if err != nil { - log.Errorf("failed parsing address: '%v'", addr.Addr) + slog.ErrorContext(context.Background(), "failed parsing address", "address", addr.Addr, "error", err) } addr.Addr = net.JoinHostPort(newHost, port) } diff --git a/lib/config/fileconf.go b/lib/config/fileconf.go index 0c417106924e7..fe383d8444e32 100644 --- a/lib/config/fileconf.go +++ b/lib/config/fileconf.go @@ -20,12 +20,14 @@ package config import ( "bytes" + "context" "crypto/tls" "encoding/base64" "errors" "fmt" "io" "io/fs" + "log/slog" "net" "net/url" "os" @@ -34,7 +36,6 @@ import ( "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "golang.org/x/crypto/acme" "golang.org/x/crypto/ssh" "gopkg.in/yaml.v2" @@ -1083,7 +1084,7 @@ func (a *AuthenticationConfig) Parse() (types.AuthPreference, error) { switch { case a.HardwareKey != nil: if a.PIVSlot != "" { - log.Warn(`Both "piv_slot" and "hardware_key" settings were populated, using "hardware_key" setting.`) + slog.WarnContext(context.Background(), `Both "piv_slot" and "hardware_key" settings were populated, using "hardware_key" setting`) } h, err = a.HardwareKey.Parse() if err != nil { @@ -1101,9 +1102,9 @@ func (a *AuthenticationConfig) Parse() (types.AuthPreference, error) { } if a.SecondFactor != "" && a.SecondFactors != nil { - log.Warn(`` + - `second_factor and second_factors are both set. second_factors will take precedence. ` + - `second_factor should be unset to remove this warning.`) + const msg = `second_factor and second_factors are both set. second_factors will take precedence. ` + + `second_factor should be unset to remove this warning.` + slog.WarnContext(context.Background(), msg) } return types.NewAuthPreferenceFromConfigFile(types.AuthPreferenceSpecV2{ @@ -1163,10 +1164,10 @@ func (w *Webauthn) Parse() (*types.Webauthn, error) { return nil, trace.BadParameter("webauthn.attestation_denied_cas: %v", err) } if w.Disabled { - log.Warnf(`` + - `The "webauthn.disabled" setting is marked for removal and currently has no effect. ` + + const msg = `The "webauthn.disabled" setting is marked for removal and currently has no effect. ` + `Please update your configuration to use WebAuthn. ` + - `Refer to https://goteleport.com/docs/access-controls/guides/webauthn/`) + `Refer to https://goteleport.com/docs/access-controls/guides/webauthn/` + slog.WarnContext(context.Background(), msg) } return &types.Webauthn{ // Allow any RPID to go through, we rely on diff --git a/lib/service/service_test.go b/lib/service/service_test.go index 264a9c47c1281..16309ed59ac72 100644 --- a/lib/service/service_test.go +++ b/lib/service/service_test.go @@ -949,6 +949,7 @@ func TestTeleportProcess_reconnectToAuth(t *testing.T) { cfg.Testing.ClientTimeout = time.Millisecond cfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() cfg.Log = utils.NewLoggerForTests() + cfg.Logger = utils.NewSlogLoggerForTests() process, err := NewTeleport(cfg) require.NoError(t, err)