-
Notifications
You must be signed in to change notification settings - Fork 217
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
replace deprecated logger methods #702
Merged
Merged
Conversation
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
juev
added
enhancement
New feature or request
go
Pull requests that update Go code
labels
Dec 17, 2024
juev
force-pushed
the
feature/replace-deprecated-logger-methods
branch
2 times, most recently
from
December 17, 2024 14:56
3aea0dd
to
fe5d0ba
Compare
@juev sorry to bother you, but we've got some conflicts to resolve :| |
Signed-off-by: Evsyukov Denis <[email protected]> diff --git c/pkg/app/log.go i/pkg/app/log.go index 88bfe80..27e5d36 100644 --- c/pkg/app/log.go +++ i/pkg/app/log.go @@ -2,6 +2,7 @@ package app import ( "fmt" + "log/slog" "strings" "time" @@ -70,7 +71,7 @@ func SetupLogging(runtimeConfig *config.Config, logger *log.Logger) { fmt.Sprintf("Global log level. Default duration for debug level is %s", ForcedDurationForDebugLevel), strings.ToLower(LogLevel), func(_ string, newValue string) error { - logger.Infof("Set log level to '%s'", newValue) + logger.Info("Change log level", slog.String("value", newValue)) log.SetDefaultLevel(log.LogLevelFromStr(newValue)) return nil }, func(_ string, newValue string) time.Duration { diff --git c/pkg/debug/server.go i/pkg/debug/server.go index 4b723c2..7254f19 100644 --- c/pkg/debug/server.go +++ i/pkg/debug/server.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "os" @@ -67,11 +68,11 @@ func (s *Server) Init() error { return fmt.Errorf("Debug HTTP server fail to listen on '%s': %w", address, err) } - s.logger.Infof("Debug endpoint listen on %s", address) + s.logger.Info("Debug endpoint listen on address", slog.String("address", address)) go func() { if err := http.Serve(listener, s.Router); err != nil { - s.logger.Errorf("Error starting Debug socket server: %s", err) + s.logger.Error("Error starting Debug socket server", log.Err(err)) os.Exit(1) } }() @@ -79,7 +80,7 @@ func (s *Server) Init() error { if s.HttpAddr != "" { go func() { if err := http.ListenAndServe(s.HttpAddr, s.Router); err != nil { - s.logger.Errorf("Error starting Debug HTTP server: %s", err) + s.logger.Error("Error starting Debug HTTP server", log.Err(err)) os.Exit(1) } }() @@ -130,7 +131,7 @@ func handleFormattedOutput(writer http.ResponseWriter, request *http.Request, ha } format := FormatFromRequest(request) - structuredLogger.GetLogEntry(request).Debugf("use format '%s'", format) + structuredLogger.GetLogEntry(request).Debug("used format", slog.String("format", format)) switch format { case "text": diff --git c/pkg/executor/executor.go i/pkg/executor/executor.go index 040c6f5..2b4c1f4 100644 --- c/pkg/executor/executor.go +++ i/pkg/executor/executor.go @@ -21,7 +21,7 @@ import ( func Run(cmd *exec.Cmd) error { // TODO context: hook name, hook phase, hook binding // TODO observability - log.Debugf("Executing command '%s' in '%s' dir", strings.Join(cmd.Args, " "), cmd.Dir) + log.Debug("Executing command", slog.String("command", strings.Join(cmd.Args, " ")), slog.String("dir", cmd.Dir)) return cmd.Run() } @@ -82,7 +82,7 @@ func NewExecutor(dir string, entrypoint string, args []string, envs []string) *E } func (e *Executor) Output() ([]byte, error) { - e.logger.Debugf("Executing command '%s' in '%s' dir", strings.Join(e.cmd.Args, " "), e.cmd.Dir) + e.logger.Debug("Executing command", strings.Join(e.cmd.Args, " "), e.cmd.Dir) return e.cmd.Output() } @@ -98,7 +98,7 @@ func (e *Executor) RunAndLogLines(logLabels map[string]string) (*CmdUsage, error stdoutLogEntry := logEntry.With("output", "stdout") stderrLogEntry := logEntry.With("output", "stderr") - logEntry.Debugf("Executing command '%s' in '%s' dir", strings.Join(e.cmd.Args, " "), e.cmd.Dir) + log.Debug("Executing command", slog.String("command", strings.Join(e.cmd.Args, " ")), slog.String("dir", e.cmd.Dir)) plo := &proxyLogger{e.logProxyHookJSON, e.proxyJsonKey, stdoutLogEntry, make([]byte, 0)} ple := &proxyLogger{e.logProxyHookJSON, e.proxyJsonKey, stderrLogEntry, make([]byte, 0)} diff --git c/pkg/hook/binding_context/binding_context.go i/pkg/hook/binding_context/binding_context.go index 968bbec..7296207 100644 --- c/pkg/hook/binding_context/binding_context.go +++ i/pkg/hook/binding_context/binding_context.go @@ -50,7 +50,7 @@ func (bc BindingContext) Map() map[string]interface{} { case "v1": return bc.MapV1() default: - log.Errorf("Possible bug!!! Call Map for BindingContext without version.") + log.Error("Possible bug!!! Call Map for BindingContext without version.") return make(map[string]interface{}) } } diff --git c/pkg/hook/controller/admission_bindings_controller.go i/pkg/hook/controller/admission_bindings_controller.go index 97597b7..e4dec81 100644 --- c/pkg/hook/controller/admission_bindings_controller.go +++ i/pkg/hook/controller/admission_bindings_controller.go @@ -1,6 +1,8 @@ package controller import ( + "log/slog" + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/api/admission/v1" @@ -67,7 +69,9 @@ func (c *AdmissionBindingsController) EnableValidatingBindings() { continue } if config.Webhook.Metadata.ConfigurationId != confId { - log.Errorf("Possible bug!!! kubernetesValidating has non-unique configurationIds: '%s' '%s'", config.Webhook.Metadata.ConfigurationId, confId) + log.Error("Possible bug!!! kubernetesValidating has non-unique configurationIds", + slog.String("configurationID", config.Webhook.Metadata.ConfigurationId), + slog.String("confID", confId)) } } c.ConfigurationId = confId @@ -101,7 +105,9 @@ func (c *AdmissionBindingsController) EnableMutatingBindings() { continue } if config.Webhook.Metadata.ConfigurationId != confId { - log.Errorf("Possible bug!!! kubernetesMutating has non-unique configurationIds: '%s' '%s'", config.Webhook.Metadata.ConfigurationId, confId) + log.Error("Possible bug!!! kubernetesMutating has non-unique configurationIds", + slog.String("configurationID", config.Webhook.Metadata.ConfigurationId), + slog.String("confID", confId)) } } c.ConfigurationId = confId @@ -137,7 +143,9 @@ func (c *AdmissionBindingsController) CanHandleEvent(event admission.Event) bool func (c *AdmissionBindingsController) HandleEvent(event admission.Event) BindingExecutionInfo { if c.ConfigurationId != event.ConfigurationId { - log.Errorf("Possible bug!!! Unknown validating event: no binding for configurationId '%s' (webhookId '%s')", event.ConfigurationId, event.WebhookId) + log.Error("Possible bug!!! Unknown validating event: no binding for configurationId", + slog.String("configurationId", event.ConfigurationId), + slog.String("webhookId", event.WebhookId)) return BindingExecutionInfo{ BindingContext: []bctx.BindingContext{}, AllowFailure: false, @@ -146,7 +154,9 @@ func (c *AdmissionBindingsController) HandleEvent(event admission.Event) Binding link, hasKey := c.AdmissionLinks[event.WebhookId] if !hasKey { - log.Errorf("Possible bug!!! Unknown validating event: no binding for configurationId '%s', webhookId '%s'", event.ConfigurationId, event.WebhookId) + log.Error("Possible bug!!! Unknown validating event: no binding for configurationId", + slog.String("configurationId", event.ConfigurationId), + slog.String("webhookId", event.WebhookId)) return BindingExecutionInfo{ BindingContext: []bctx.BindingContext{}, AllowFailure: false, diff --git c/pkg/hook/controller/conversion_bindings_controller.go i/pkg/hook/controller/conversion_bindings_controller.go index ca4c632..3975f82 100644 --- c/pkg/hook/controller/conversion_bindings_controller.go +++ i/pkg/hook/controller/conversion_bindings_controller.go @@ -1,6 +1,8 @@ package controller import ( + "log/slog" + "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -9,7 +11,7 @@ import ( "github.com/flant/shell-operator/pkg/webhook/conversion" ) -// A link between a hook and a kube monitor +// ConversionBindingToWebhookLink a link between a hook and a kube monitor type ConversionBindingToWebhookLink struct { BindingName string // Useful fields to create a BindingContext @@ -60,7 +62,8 @@ func (c *ConversionBindingsController) EnableConversionBindings() { ToVersion: conv.ToVersion, } } - log.Infof("conversion binding controller: add webhook from config: %v", config) + log.Info("conversion binding controller: add webhook from config", + slog.String("name", config.Webhook.Metadata.Name)) c.webhookManager.AddWebhook(config.Webhook) } } @@ -81,7 +84,7 @@ func (c *ConversionBindingsController) CanHandleEvent(crdName string, _ *v1.Conv func (c *ConversionBindingsController) HandleEvent(crdName string, request *v1.ConversionRequest, rule conversion.Rule) BindingExecutionInfo { _, hasKey := c.Links[crdName] if !hasKey { - log.Errorf("Possible bug!!! No binding for conversion event for crd/%s", crdName) + log.Error("Possible bug!!! No binding for conversion event for crd", slog.String("name", crdName)) return BindingExecutionInfo{ BindingContext: []bctx.BindingContext{}, AllowFailure: false, @@ -89,7 +92,9 @@ func (c *ConversionBindingsController) HandleEvent(crdName string, request *v1.C } link, has := c.Links[crdName][rule] if !has { - log.Errorf("Possible bug!!! Event has an unknown conversion rule %s for crd/%s: no binding was registered", rule.String(), crdName) + log.Error("Possible bug!!! Event has an unknown conversion rule: no binding was registered", + slog.String("rule", rule.String()), + slog.String("crd", crdName)) return BindingExecutionInfo{ BindingContext: []bctx.BindingContext{}, AllowFailure: false, diff --git c/pkg/hook/controller/kubernetes_bindings_controller.go i/pkg/hook/controller/kubernetes_bindings_controller.go index b03d024..e430ef7 100644 --- c/pkg/hook/controller/kubernetes_bindings_controller.go +++ i/pkg/hook/controller/kubernetes_bindings_controller.go @@ -2,6 +2,7 @@ package controller import ( "fmt" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" @@ -127,8 +128,10 @@ func (c *kubernetesBindingsController) UpdateMonitor(monitorId string, kind, api } utils.EnrichLoggerWithLabels(c.logger, link.BindingConfig.Monitor.Metadata.LogLabels). - Infof("Monitor for '%s' is recreated with new kind=%s and apiVersion=%s", - link.BindingConfig.BindingName, link.BindingConfig.Monitor.Kind, link.BindingConfig.Monitor.ApiVersion) + Info("Monitor is recreated", + slog.String("bindingName", link.BindingConfig.BindingName), + slog.String("kind", link.BindingConfig.Monitor.Kind), + slog.String("apiVersion", link.BindingConfig.Monitor.ApiVersion)) // Synchronization has no meaning for UpdateMonitor. Just emit Added event to handle objects of // a new kind. @@ -159,7 +162,7 @@ func (c *kubernetesBindingsController) UnlockEvents() { func (c *kubernetesBindingsController) UnlockEventsFor(monitorID string) { m := c.kubeEventsManager.GetMonitor(monitorID) if m == nil { - log.Warnf("monitor %q was not found", monitorID) + log.Warn("monitor was not found", slog.String("monitorID", monitorID)) return } m.EnableKubeEventCb() @@ -187,7 +190,7 @@ func (c *kubernetesBindingsController) CanHandleEvent(kubeEvent kemtypes.KubeEve func (c *kubernetesBindingsController) HandleEvent(kubeEvent kemtypes.KubeEvent) BindingExecutionInfo { link, hasKey := c.BindingMonitorLinks[kubeEvent.MonitorId] if !hasKey { - log.Errorf("Possible bug!!! Unknown kube event: no such monitor id '%s' registered", kubeEvent.MonitorId) + log.Error("Possible bug!!! Unknown kube event: no such monitor id registered", slog.String("monitorID", kubeEvent.MonitorId)) return BindingExecutionInfo{ BindingContext: []bctx.BindingContext{}, AllowFailure: false, diff --git c/pkg/hook/hook_manager.go i/pkg/hook/hook_manager.go index d1599dd..2fd9f17 100644 --- c/pkg/hook/hook_manager.go +++ i/pkg/hook/hook_manager.go @@ -1,7 +1,9 @@ package hook import ( + "errors" "fmt" + "log/slog" "os" "os/exec" "path/filepath" @@ -92,7 +94,9 @@ func (hm *Manager) Init() error { hm.hooksByName = make(map[string]*Hook) if err := utils_file.RecursiveCheckLibDirectory(hm.workingDir); err != nil { - log.Errorf("failed to check lib directory %s: %v", hm.workingDir, err) + log.Error("failed to check lib directory", + slog.String("workingDir", hm.workingDir), + log.Err(err)) } hooksRelativePaths, err := utils_file.RecursiveGetExecutablePaths(hm.workingDir) @@ -102,7 +106,7 @@ func (hm *Manager) Init() error { // sort hooks by path sort.Strings(hooksRelativePaths) - log.Debugf(" Search hooks in this paths: %+v", hooksRelativePaths) + log.Debug("Search hooks in paths", slog.Any("paths", hooksRelativePaths)) for _, hookPath := range hooksRelativePaths { hook, err := hm.loadHook(hookPath) @@ -137,14 +141,16 @@ func (hm *Manager) loadHook(hookPath string) (*Hook, error) { hook := NewHook(hookName, hookPath, app.DebugKeepTmpFiles, app.LogProxyHookJSON, app.ProxyJsonLogKey, hm.logger.Named("hook")) hookEntry := hm.logger.With("hook", hook.Name). With("phase", "config") - hookEntry.Infof("Load config from '%s'", hookPath) + + hookEntry.Info("Load config", slog.String("path", hookPath)) envs := make([]string, 0) configOutput, err := hm.execCommandOutput(hook.Name, hm.workingDir, hookPath, envs, []string{"--config"}) if err != nil { - hookEntry.Errorf("Hook config output:\n%s", string(configOutput)) - if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { - hookEntry.Errorf("Hook config stderr:\n%s", string(ee.Stderr)) + hookEntry.Error("Hook config output", slog.String("value", string(configOutput))) + var ee *exec.ExitError + if errors.As(err, &ee) && len(ee.Stderr) > 0 { + hookEntry.Error("Hook config stderr", slog.String("value", string(ee.Stderr))) } return nil, fmt.Errorf("cannot get config for hook '%s': %w", hookPath, err) } @@ -204,7 +210,7 @@ func (hm *Manager) loadHook(hookPath string) (*Hook, error) { return nil, fmt.Errorf("hook %q is marked as executable but doesn't contain config section", hook.Path) } - hookEntry.Infof("Loaded config: %s", hook.GetConfigDescription()) + hookEntry.Info("Loaded config", slog.String("value", hook.GetConfigDescription())) return hook, nil } @@ -225,14 +231,14 @@ func (hm *Manager) execCommandOutput(hookName string, dir string, entrypoint str debugEntry := hm.logger.With("hook", hookName). With("cmd", strings.Join(args, " ")) - debugEntry.Debugf("Executing hook in %s", dir) + debugEntry.Debug("Executing hook", slog.String("dir", dir)) output, err := hookCmd.Output() if err != nil { return output, err } - debugEntry.Debugf("output:\n%s", string(output)) + debugEntry.Debug("execCommandOutput", slog.String("output", string(output))) return output, nil } @@ -242,7 +248,7 @@ func (hm *Manager) GetHook(name string) *Hook { if exists { return hook } - log.Errorf("Possible bug!!! Hook '%s' not found in hook manager", name) + log.Error("Possible bug!!! Hook not found in hook manager", slog.String("name", name)) return nil } @@ -351,7 +357,12 @@ func (hm *Manager) DetectAdmissionEventType(event admission.Event) htypes.Bindin } } - log.Errorf("Possible bug!!! No linked hook for admission event %s %s kind=%s name=%s ns=%s", event.ConfigurationId, event.WebhookId, event.Request.Kind, event.Request.Name, event.Request.Namespace) + log.Error("Possible bug!!! No linked hook for admission event %s %s kind=%s name=%s ns=%s", + slog.String("configId", event.ConfigurationId), + slog.String("webhookId", event.WebhookId), + slog.String("kind", event.Request.Kind.String()), + slog.String("name", event.Request.Name), + slog.String("namespace", event.Request.Namespace)) return "" } diff --git c/pkg/hook/task_metadata/task_metadata.go i/pkg/hook/task_metadata/task_metadata.go index ab46fa1..b356790 100644 --- c/pkg/hook/task_metadata/task_metadata.go +++ i/pkg/hook/task_metadata/task_metadata.go @@ -2,6 +2,7 @@ package task_metadata import ( "fmt" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" @@ -51,13 +52,14 @@ var ( func HookMetadataAccessor(t task.Task) HookMetadata { meta := t.GetMetadata() if meta == nil { - log.Errorf("Possible Bug! task metadata is nil") + log.Error("Possible Bug! task metadata is nil") return HookMetadata{} } hookMeta, ok := meta.(HookMetadata) if !ok { - log.Errorf("Possible Bug! task metadata is not of type HookMetadata: got %T", meta) + log.Error("Possible Bug! task metadata is not of type HookMetadata", + slog.String("type", fmt.Sprintf("%T", meta))) return HookMetadata{} } diff --git c/pkg/kube/object_patch/operation.go i/pkg/kube/object_patch/operation.go index 8128f13..6819783 100644 --- c/pkg/kube/object_patch/operation.go +++ i/pkg/kube/object_patch/operation.go @@ -2,6 +2,7 @@ package object_patch import ( "fmt" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" "github.com/hashicorp/go-multierror" @@ -13,7 +14,7 @@ import ( "github.com/flant/shell-operator/pkg/filter/jq" ) -// A JSON and YAML representation of the operation for shell hooks +// OperationSpec a JSON and YAML representation of the operation for shell hooks type OperationSpec struct { Operation OperationType `json:"operation" yaml:"operation"` ApiVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` @@ -67,7 +68,7 @@ func GetPatchStatusOperationsOnHookError(operations []Operation) []Operation { } func ParseOperations(specBytes []byte) ([]Operation, error) { - log.Debugf("parsing patcher operations:\n%s", specBytes) + log.Debug("parsing patcher operations", slog.String("value", string(specBytes))) specs, err := unmarshalFromJSONOrYAML(specBytes) if err != nil { diff --git c/pkg/kube/object_patch/patch.go i/pkg/kube/object_patch/patch.go index e3bcf83..f7917be 100644 --- c/pkg/kube/object_patch/patch.go +++ i/pkg/kube/object_patch/patch.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "log/slog" "time" "github.com/deckhouse/deckhouse/pkg/log" @@ -45,7 +46,7 @@ func (o *ObjectPatcher) ExecuteOperations(ops []Operation) error { applyErrors := &multierror.Error{} for _, op := range ops { - log.Debugf("Applying operation: %s", op.Description()) + log.Debug("Applying operation", slog.String("name", op.Description())) if err := o.ExecuteOperation(op); err != nil { err = gerror.WithMessage(err, op.Description()) applyErrors = multierror.Append(applyErrors, err) diff --git c/pkg/kube_events_manager/error_handler.go i/pkg/kube_events_manager/error_handler.go index c222b99..b022510 100644 --- c/pkg/kube_events_manager/error_handler.go +++ i/pkg/kube_events_manager/error_handler.go @@ -1,7 +1,9 @@ package kubeeventsmanager import ( + "errors" "io" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -38,16 +40,25 @@ func (weh *WatchErrorHandler) handler(_ *cache.Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - weh.logger.Errorf("%s: Watch of %v closed with: %v", weh.description, weh.kind, err) + weh.logger.Error("Watch closed", + slog.String("description", weh.description), + slog.String("kind", weh.kind), + log.Err(err)) errorType = "expired" case err == io.EOF: // watch closed normally errorType = "eof" - case err == io.ErrUnexpectedEOF: - weh.logger.Errorf("%s: Watch for %v closed with unexpected EOF: %v", weh.description, weh.kind, err) + case errors.Is(err, io.ErrUnexpectedEOF): + weh.logger.Error("Watch closed with unexpected EOF", + slog.String("description", weh.description), + slog.String("kind", weh.kind), + log.Err(err)) errorType = "unexpected-eof" case err != nil: - weh.logger.Errorf("%s: Failed to watch %v: %v", weh.description, weh.kind, err) + weh.logger.Error("Watch Failed", + slog.String("description", weh.description), + slog.String("kind", weh.kind), + log.Err(err)) errorType = "fail" } diff --git c/pkg/kube_events_manager/factory.go i/pkg/kube_events_manager/factory.go index 7b35f9d..35bfd52 100644 --- c/pkg/kube_events_manager/factory.go +++ i/pkg/kube_events_manager/factory.go @@ -2,6 +2,7 @@ package kubeeventsmanager import ( "context" + "log/slog" "sync" "time" @@ -52,17 +53,19 @@ func (c *FactoryStore) add(index FactoryIndex, f dynamicinformer.DynamicSharedIn ctx, cancel := context.WithCancel(context.Background()) c.data[index] = Factory{ shared: f, - handlerRegistrations: make(map[string]cache.ResourceEventHandlerRegistration, 0), + handlerRegistrations: make(map[string]cache.ResourceEventHandlerRegistration), ctx: ctx, cancel: cancel, } - log.Debugf("Factory store: added a new factory for %v index", index) + log.Debug("Factory store: added a new factory for index", + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String())) } func (c *FactoryStore) get(client dynamic.Interface, index FactoryIndex) Factory { f, ok := c.data[index] if ok { - log.Debugf("Factory store: the factory with %v index found", index) + log.Debug("Factory store: the factory with index found", + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String())) return f } @@ -97,10 +100,14 @@ func (c *FactoryStore) Start(ctx context.Context, informerId string, client dyna _ = informer.SetWatchErrorHandler(errorHandler.handler) registration, err := informer.AddEventHandler(handler) if err != nil { - log.Warnf("Factory store: couldn't add event handler to the %v factory's informer: %v", index, err) + log.Warn("Factory store: couldn't add event handler to the factory's informer", + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String()), + log.Err(err)) } factory.handlerRegistrations[informerId] = registration - log.Debugf("Factory store: increased usage counter to %d of the factory with %v index", len(factory.handlerRegistrations), index) + log.Debug("Factory store: increased usage counter of the factory", + slog.Int("value", len(factory.handlerRegistrations)), + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String())) if !informer.HasSynced() { go informer.Run(factory.ctx.Done()) @@ -111,7 +118,8 @@ func (c *FactoryStore) Start(ctx context.Context, informerId string, client dyna return err } } - log.Debugf("Factory store: started informer for %v index", index) + log.Debug("Factory store: started informer", + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String())) return nil } @@ -128,14 +136,19 @@ func (c *FactoryStore) Stop(informerId string, index FactoryIndex) { if handlerRegistration, found := f.handlerRegistrations[informerId]; found { err := f.shared.ForResource(index.GVR).Informer().RemoveEventHandler(handlerRegistration) if err != nil { - log.Warnf("Factory store: couldn't remove event handler from the %v factory's informer: %v", index, err) + log.Warn("Factory store: couldn't remove event handler from the factory's informer", + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String()), + log.Err(err)) } delete(f.handlerRegistrations, informerId) - log.Debugf("Factory store: decreased usage counter to %d of the factory with %v index", len(f.handlerRegistrations), index) + log.Debug("Factory store: decreased usage counter of the factory", + slog.Int("value", len(f.handlerRegistrations)), + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String())) if len(f.handlerRegistrations) == 0 { f.cancel() delete(c.data, index) - log.Debugf("Factory store: deleted factory for %v index", index) + log.Debug("Factory store: deleted factory", + slog.String("namespace", index.Namespace), slog.String("gvr", index.GVR.String())) } } } diff --git c/pkg/kube_events_manager/kube_events_manager.go i/pkg/kube_events_manager/kube_events_manager.go index 2ed4040..5e8efe3 100644 --- c/pkg/kube_events_manager/kube_events_manager.go +++ i/pkg/kube_events_manager/kube_events_manager.go @@ -2,6 +2,8 @@ package kubeeventsmanager import ( "context" + "fmt" + "log/slog" "runtime/trace" "sync" @@ -67,7 +69,8 @@ func (mgr *kubeEventsManager) WithMetricStorage(mstor *metricstorage.MetricStora // TODO cleanup informers in case of error // TODO use Context to stop informers func (mgr *kubeEventsManager) AddMonitor(monitorConfig *MonitorConfig) error { - log.Debugf("Add MONITOR %+v", monitorConfig) + log.Debug("Add MONITOR", + slog.String("config", fmt.Sprintf("%+v", monitorConfig))) monitor := NewMonitor( mgr.ctx, mgr.KubeClient, diff --git c/pkg/kube_events_manager/monitor.go i/pkg/kube_events_manager/monitor.go index b7a3a15..7bc0943 100644 --- c/pkg/kube_events_manager/monitor.go +++ i/pkg/kube_events_manager/monitor.go @@ -3,6 +3,7 @@ package kubeeventsmanager import ( "context" "fmt" + "log/slog" "sort" "github.com/deckhouse/deckhouse/pkg/log" @@ -82,14 +83,16 @@ func (m *monitor) CreateInformers() error { With("binding.name", m.Config.Metadata.DebugName) if m.Config.Kind == "" && m.Config.ApiVersion == "" { - logEntry.Debugf("Create Informers for Config with empty kind and apiVersion: %+v", m.Config) + logEntry.Debug("Create Informers for Config with empty kind and apiVersion", + slog.String("value", fmt.Sprintf("%+v", m.Config))) return nil } - logEntry.Debugf("Create Informers Config: %+v", m.Config) + logEntry.Debug("Create Informers Config: %+v", + slog.String("value", fmt.Sprintf("%+v", m.Config))) nsNames := m.Config.namespaces() if len(nsNames) > 0 { - logEntry.Debugf("create static ResourceInformers") + logEntry.Debug("create static ResourceInformers") // create informers for each specified object name in each specified namespace // This list of informers is static. @@ -106,7 +109,7 @@ func (m *monitor) CreateInformers() error { } if m.Config.NamespaceSelector != nil && m.Config.NamespaceSelector.LabelSelector != nil { - logEntry.Debugf("Create NamespaceInformer for namespace.labelSelector") + logEntry.Debug("Create NamespaceInformer for namespace.labelSelector") m.NamespaceInformer = NewNamespaceInformer(m.ctx, m.KubeClient, m.Config) err := m.NamespaceInformer.createSharedInformer( func(nsName string) { @@ -121,12 +124,14 @@ func (m *monitor) CreateInformers() error { return } - logEntry.Infof("got ns/%s, create dynamic ResourceInformers", nsName) + logEntry.Info("got ns, create dynamic ResourceInformers", slog.String("name", nsName)) var err error m.VaryingInformers[nsName], err = m.CreateInformersForNamespace(nsName) if err != nil { - logEntry.Errorf("create ResourceInformers for ns/%s: %v", nsName, err) + logEntry.Error("create ResourceInformers for ns", + slog.String("name", nsName), + log.Err(err)) } var ctx context.Context @@ -142,7 +147,7 @@ func (m *monitor) CreateInformers() error { }, func(nsName string) { // Delete event: check, stop and remove informers for Ns - logEntry.Infof("deleted ns/%s, stop dynamic ResourceInformers", nsName) + logEntry.Info("deleted ns, stop dynamic ResourceInformers", slog.String("name", nsName)) // ignore statically specified namespaces if _, ok := m.staticNamespaces[nsName]; ok { @@ -167,7 +172,7 @@ func (m *monitor) CreateInformers() error { return fmt.Errorf("create namespace informer: %v", err) } for nsName := range m.NamespaceInformer.getExistedObjects() { - logEntry.Infof("got ns/%s, create dynamic ResourceInformers", nsName) + logEntry.Info("got ns, create dynamic ResourceInformers", slog.String("name", nsName)) // ignore event if namespace is already has static ResourceInformers if _, ok := m.staticNamespaces[nsName]; ok { @@ -177,7 +182,9 @@ func (m *monitor) CreateInformers() error { var err error m.VaryingInformers[nsName], err = m.CreateInformersForNamespace(nsName) if err != nil { - logEntry.Errorf("create ResourceInformers for ns/%s: %v", nsName, err) + logEntry.Error("create ResourceInformers for ns", + slog.String("name", nsName), + log.Err(err)) } } } diff --git c/pkg/kube_events_manager/namespace_informer.go i/pkg/kube_events_manager/namespace_informer.go index 6abe915..2920641 100644 --- c/pkg/kube_events_manager/namespace_informer.go +++ i/pkg/kube_events_manager/namespace_informer.go @@ -5,6 +5,7 @@ package kubeeventsmanager import ( "context" "fmt" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" v1 "k8s.io/api/core/v1" @@ -75,7 +76,7 @@ func (ni *namespaceInformer) createSharedInformer(addFn func(string), delFn func tweakListOptions(&listOptions) existedObjects, err := ni.KubeClient.CoreV1().Namespaces().List(context.TODO(), listOptions) if err != nil { - log.Errorf("list existing namespaces: %v", err) + log.Error("Failed list existing namespaces", log.Err(err)) return err } @@ -95,7 +96,7 @@ func (ni *namespaceInformer) OnAdd(obj interface{}, _ bool) { return } nsObj := obj.(*v1.Namespace) - log.Debugf("NamespaceInformer: Added ns/%s", nsObj.Name) + log.Debug("NamespaceInformer: Added ns", slog.String("name", nsObj.Name)) if ni.addFn != nil { ni.addFn(nsObj.Name) } @@ -113,16 +114,17 @@ func (ni *namespaceInformer) OnDelete(obj interface{}) { obj = staleObj.Obj } nsObj := obj.(*v1.Namespace) - log.Debugf("NamespaceInformer: Deleted ns/%s", nsObj.Name) + log.Debug("NamespaceInformer: Deleted ns", slog.String("name", nsObj.Name)) if ni.delFn != nil { ni.delFn(nsObj.Name) } } func (ni *namespaceInformer) start() { - log.Debugf("%s: Run namespace informer", ni.Monitor.Metadata.DebugName) + log.Debug("Run namespace informer", slog.String("name", ni.Monitor.Metadata.DebugName)) if ni.SharedInformer == nil { - log.Errorf("%s: Possible BUG!!! Start called before createSharedInformer, ShredInformer is nil", ni.Monitor.Metadata.DebugName) + log.Error("Possible BUG!!! Start called before createSharedInformer, ShredInformer is nil", + slog.String("name", ni.Monitor.Metadata.DebugName)) return } cctx, cancel := context.WithCancel(ni.ctx) @@ -137,10 +139,11 @@ func (ni *namespaceInformer) start() { if err := wait.PollUntilContextCancel(cctx, DefaultSyncTime, true, func(_ context.Context) (bool, error) { return ni.SharedInformer.HasSynced(), nil }); err != nil { - ni.Monitor.Logger.Errorf("%s: cache is not synced for informer", ni.Monitor.Metadata.DebugName) + ni.Monitor.Logger.Error("Cache is not synced for informer", + slog.String("name", ni.Monitor.Metadata.DebugName)) } - log.Debugf("%s: informer is ready", ni.Monitor.Metadata.DebugName) + log.Debug("Informer is ready", slog.String("name", ni.Monitor.Metadata.DebugName)) } func (ni *namespaceInformer) pauseHandleEvents() { diff --git c/pkg/kube_events_manager/resource_informer.go i/pkg/kube_events_manager/resource_informer.go index eb35c59..1307a1d 100644 --- c/pkg/kube_events_manager/resource_informer.go +++ i/pkg/kube_events_manager/resource_informer.go @@ -3,6 +3,7 @@ package kubeeventsmanager import ( "context" "fmt" + "log/slog" "runtime/trace" "sync" "time" @@ -109,12 +110,18 @@ func (ei *resourceInformer) createSharedInformer() error { var err error // discover GroupVersionResource for informer - log.Debugf("%s: discover GVR for apiVersion '%s' kind '%s'...", ei.Monitor.Metadata.DebugName, ei.Monitor.ApiVersion, ei.Monitor.Kind) + log.Debug("discover GVR for apiVersion...", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("apiVersion", ei.Monitor.ApiVersion), + slog.String("kind", ei.Monitor.Kind)) if ei.GroupVersionResource, err = ei.KubeClient.GroupVersionResource(ei.Monitor.ApiVersion, ei.Monitor.Kind); err != nil { return fmt.Errorf("%s: Cannot get GroupVersionResource info for apiVersion '%s' kind '%s' from api-server. Possibly CRD is not created before informers are started. Error was: %w", ei.Monitor.Metadata.DebugName, ei.Monitor.ApiVersion, ei.Monitor.Kind, err) } - log.Debugf("%s: GVR for kind '%s' is '%s'", ei.Monitor.Metadata.DebugName, ei.Monitor.Kind, ei.GroupVersionResource.String()) + log.Debug("%GVR for kind", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("kind", ei.Monitor.Kind), + slog.String("GVR", ei.GroupVersionResource.String())) // define tweakListOptions for informer fmtLabelSelector, err := FormatLabelSelector(ei.Monitor.LabelSelector) @@ -189,18 +196,26 @@ func (ei *resourceInformer) loadExistedObjects() error { Namespace(ei.Namespace). List(context.TODO(), ei.ListOptions) if err != nil { - log.Errorf("%s: initial list resources of kind '%s': %v", ei.Monitor.Metadata.DebugName, ei.Monitor.Kind, err) + log.Error("%s: initial list resources of kind '%s': %v", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("kind", ei.Monitor.Kind), + log.Err(err)) return err } if objList == nil || len(objList.Items) == 0 { - log.Debugf("%s: Got no existing '%s' resources", ei.Monitor.Metadata.DebugName, ei.Monitor.Kind) + log.Debug("Got no existing resources", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("kind", ei.Monitor.Kind)) return nil } // FIXME objList.Items has too much information for log // log.Debugf("%s: Got %d existing '%s' resources: %+v", ei.Monitor.Metadata.DebugName, len(objList.Items), ei.Monitor.Kind, objList.Items) - log.Debugf("%s: '%s' initial list: Got %d existing resources", ei.Monitor.Metadata.DebugName, ei.Monitor.Kind, len(objList.Items)) + log.Debug("initial list: Got existing resources", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("kind", ei.Monitor.Kind), + slog.Int("count", len(objList.Items))) filteredObjects := make(map[string]*kemtypes.ObjectAndFilterResult) @@ -228,10 +243,10 @@ func (ei *resourceInformer) loadExistedObjects() error { filteredObjects[objFilterRes.Metadata.ResourceId] = objFilterRes - log.Debugf("%s: initial list: '%s' is cached with checksum %s", - ei.Monitor.Metadata.DebugName, - objFilterRes.Metadata.ResourceId, - objFilterRes.Metadata.Checksum) + log.Debug("initial list: cached with checksum", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("resourceId", objFilterRes.Metadata.ResourceId), + slog.String("checksum", objFilterRes.Metadata.Checksum)) } // Save objects to the cache. @@ -266,11 +281,11 @@ func (ei *resourceInformer) OnDelete(obj interface{}) { func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType kemtypes.WatchEventType) { // check if stop if ei.stopped { - log.Debugf("%s: received WATCH for a stopped %s/%s informer %s", - ei.Monitor.Metadata.DebugName, - ei.Namespace, - ei.Name, - eventType) + log.Debug("received WATCH for stopped informer", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("namespace", ei.Namespace), + slog.String("name", ei.Name), + slog.String("eventType", string(eventType))) return } @@ -298,10 +313,10 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType kemty objFilterRes, err = applyFilter(ei.Monitor.JqFilter, filter, ei.Monitor.FilterFunc, obj) }() if err != nil { - log.Errorf("%s: WATCH %s: %s", - ei.Monitor.Metadata.DebugName, - eventType, - err) + log.Error("handleWatchEvent: applyFilter error", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("eventType", string(eventType)), + log.Err(err)) return } @@ -321,10 +336,10 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType kemty skipEvent := false if objectInCache && cachedObject.Metadata.Checksum == objFilterRes.Metadata.Checksum { // update object in cache and do not send event - log.Debugf("%s: %s %s: checksum is not changed, no KubeEvent", - ei.Monitor.Metadata.DebugName, - string(eventType), - resourceId, + log.Debug("skip KubeEvent", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("eventType", string(eventType)), + slog.String("resourceId", resourceId), ) skipEvent = true } @@ -363,11 +378,10 @@ func (ei *resourceInformer) handleWatchEvent(object interface{}, eventType kemty // Fire KubeEvent only if needed. if ei.shouldFireEvent(eventType) { - log.Debugf("%s: %s %s: send KubeEvent", - ei.Monitor.Metadata.DebugName, - string(eventType), - resourceId, - ) + log.Debug("send KubeEvent", + slog.String("debugName", ei.Monitor.Metadata.DebugName), + slog.String("eventType", string(eventType)), + slog.String("resourceId", resourceId)) // TODO: should be disabled by default and enabled by a debug feature switch // log.Debugf("HandleKubeEvent: obj type is %T, value:\n%#v", obj, obj) @@ -438,7 +452,7 @@ func (ei *resourceInformer) shouldFireEvent(checkEvent kemtypes.WatchEventType) } func (ei *resourceInformer) start() { - log.Debugf("%s: RUN resource informer", ei.Monitor.Metadata.DebugName) + log.Debug("RUN resource informer", slog.String("debugName", ei.Monitor.Metadata.DebugName)) go func() { if ei.ctx != nil { @@ -451,15 +465,15 @@ func (ei *resourceInformer) start() { errorHandler := newWatchErrorHandler(ei.Monitor.Metadata.DebugName, ei.Monitor.Kind, ei.Monitor.Metadata.LogLabels, ei.metricStorage, ei.logger.Named("watch-error-handler")) err := DefaultFactoryStore.Start(ei.ctx, ei.id, ei.KubeClient.Dynamic(), ei.FactoryIndex, ei, errorHandler) if err != nil { - ei.Monitor.Logger.Errorf("%s: cache is not synced for informer", ei.Monitor.Metadata.DebugName) + ei.Monitor.Logger.Error("cache is not synced for informer", slog.String("debugName", ei.Monitor.Metadata.DebugName)) return } - log.Debugf("%s: informer is ready", ei.Monitor.Metadata.DebugName) + log.Debug("informer is ready", slog.String("debugName", ei.Monitor.Metadata.DebugName)) } func (ei *resourceInformer) pauseHandleEvents() { - log.Debugf("%s: PAUSE resource informer", ei.Monitor.Metadata.DebugName) + log.Debug("PAUSE resource informer", slog.String("debugName", ei.Monitor.Metadata.DebugName)) ei.stopped = true } diff --git c/pkg/kube_events_manager/types/types.go i/pkg/kube_events_manager/types/types.go index dabb7c1..5bdfb8d 100644 --- c/pkg/kube_events_manager/types/types.go +++ i/pkg/kube_events_manager/types/types.go @@ -3,6 +3,7 @@ package types import ( "encoding/json" "fmt" + "log/slog" "strings" "github.com/deckhouse/deckhouse/pkg/log" @@ -67,15 +68,12 @@ func (o ObjectAndFilterResult) Map() map[string]interface{} { return m } - if filterResString == "" { - m["filterResult"] = nil - return m - } - // Convert string with jq output into Go object. err := json.Unmarshal([]byte(filterResString), &filterResultValue) if err != nil { - log.Errorf("Possible bug!!! Cannot unmarshal jq filter '%s' result: %s", o.Metadata.JqFilter, err) + log.Error("Possible bug!!! Cannot unmarshal jq filter result", + slog.String("jqFilter", o.Metadata.JqFilter), + log.Err(err)) m["filterResult"] = nil return m } diff --git c/pkg/metric_storage/metric_storage.go i/pkg/metric_storage/metric_storage.go index 1ab1f0e..93583f8 100644 --- c/pkg/metric_storage/metric_storage.go +++ i/pkg/metric_storage/metric_storage.go @@ -3,6 +3,7 @@ package metricstorage import ( "context" "fmt" + "log/slog" "net/http" "strings" "sync" @@ -94,7 +95,11 @@ func (m *MetricStorage) GaugeSet(metric string, value float64, labels map[string } defer func() { if r := recover(); r != nil { - m.logger.Errorf("Metric gauge set %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Error("Metric gauge set", + slog.String("metric", m.resolveMetricName(metric)), + slog.String("labels", strings.Join(LabelNames(labels), ", ")), + slog.String("labels_values", fmt.Sprintf("%v", labels)), + slog.String("recover", fmt.Sprintf("%v", r))) } }() @@ -107,7 +112,11 @@ func (m *MetricStorage) GaugeAdd(metric string, value float64, labels map[string } defer func() { if r := recover(); r != nil { - m.logger.Errorf("Metric gauge add %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Error("Metric gauge add", + slog.String("metric", m.resolveMetricName(metric)), + slog.String("labels", strings.Join(LabelNames(labels), ", ")), + slog.String("labels_values", fmt.Sprintf("%v", labels)), + slog.String("recover", fmt.Sprintf("%v", r))) } }() @@ -132,7 +141,11 @@ func (m *MetricStorage) RegisterGauge(metric string, labels map[string]string) * defer func() { if r := recover(); r != nil { - m.logger.Errorf("Create metric gauge %s %v with %v: %v", metricName, LabelNames(labels), labels, r) + m.logger.Error("Create metric gauge", + slog.String("metric", m.resolveMetricName(metric)), + slog.String("labels", strings.Join(LabelNames(labels), ", ")), + slog.String("labels_values", fmt.Sprintf("%v", labels)), + slog.String("recover", fmt.Sprintf("%v", r))) } }() @@ -144,7 +157,7 @@ func (m *MetricStorage) RegisterGauge(metric string, labels map[string]string) * return vec } - m.logger.Infof("Create metric gauge %s", metricName) + m.logger.Info("Create metric gauge", slog.String("name", metricName)) vec = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: metricName, @@ -165,13 +178,17 @@ func (m *MetricStorage) CounterAdd(metric string, value float64, labels map[stri } defer func() { if r := recover(); r != nil { - m.logger.Errorf("Metric counter add %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Error("Metric counter add", + slog.String("metric", m.resolveMetricName(metric)), + slog.String("labels", strings.Join(LabelNames(labels), ", ")), + slog.String("labels_values", fmt.Sprintf("%v", labels)), + slog.String("recover", fmt.Sprintf("%v", r))) } }() m.Counter(metric, labels).With(labels).Add(value) } -// Counter +// Counter ... func (m *MetricStorage) Counter(metric string, labels map[string]string) *prometheus.CounterVec { m.countersLock.RLock() vec, ok := m.Counters[metric] @@ -189,7 +206,11 @@ func (m *MetricStorage) RegisterCounter(metric string, labels map[string]string) defer func() { if r := recover(); r != nil { - m.logger.Errorf("Create metric counter %s %v with %v: %v", metricName, LabelNames(labels), labels, r) + m.logger.Error("Create metric counter", + slog.String("metric", metricName), + slog.String("labels", strings.Join(LabelNames(labels), ", ")), + slog.String("labels_values", fmt.Sprintf("%v", labels)), + slog.String("recover", fmt.Sprintf("%v", r))) } }() @@ -201,7 +222,7 @@ func (m *MetricStorage) RegisterCounter(metric string, labels map[string]string) return vec } - m.logger.Infof("Create metric counter %s", metricName) + m.logger.Info("Create metric counter", slog.String("name", metricName)) vec = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: metricName, @@ -215,13 +236,18 @@ func (m *MetricStorage) RegisterCounter(metric string, labels map[string]string) } // Histograms + func (m *MetricStorage) HistogramObserve(metric string, value float64, labels map[string]string, buckets []float64) { if m == nil { return } defer func() { if r := recover(); r != nil { - m.logger.Errorf("Metric histogram observe %s %v with %v: %v", m.resolveMetricName(metric), LabelNames(labels), labels, r) + m.logger.Error("Metric histogram observe", + slog.String("metric", m.resolveMetricName(metric)), + slog.String("labels", strings.Join(LabelNames(labels), ", ")), + slog.String("labels_values", fmt.Sprintf("%v", labels)), + slog.String("recover", fmt.Sprintf("%v", r))) } }() m.Histogram(metric, labels, buckets).With(labels).Observe(value) @@ -242,7 +268,11 @@ func (m *MetricStorage) RegisterHistogram(metric string, labels map[string]strin defer func() { if r := recover(); r != nil { - m.logger.Errorf("Create metric histogram %s %v with %v: %v", metricName, LabelNames(labels), labels, r) + m.logger.Error("Create metric histogram", + slog.String("metric", metricName), + slog.String("labels", strings.Join(LabelNames(labels), ", ")), + slog.String("labels_values", fmt.Sprintf("%v", labels)), + slog.String("recover", fmt.Sprintf("%v", r))) } }() @@ -254,7 +284,7 @@ func (m *MetricStorage) RegisterHistogram(metric string, labels map[string]strin return vec } - m.logger.Infof("Create metric histogram %s", metricName) + m.logger.Info("Create metric histogram", slog.String("name", metricName)) b, has := m.HistogramBuckets[metric] // This shouldn't happen except when entering this concurrently // If there are buckets for this histogram about to be registered, keep them diff --git c/pkg/metric_storage/vault/vault.go i/pkg/metric_storage/vault/vault.go index 118107f..1d9b9d8 100644 --- c/pkg/metric_storage/vault/vault.go +++ i/pkg/metric_storage/vault/vault.go @@ -33,7 +33,7 @@ func (v *GroupedVault) SetRegisterer(r prometheus.Registerer) { v.registerer = r } -// ClearAllMetrics takes each collector in collectors and clear all metrics by group. +// ExpireGroupMetrics takes each collector in collectors and clear all metrics by group. func (v *GroupedVault) ExpireGroupMetrics(group string) { v.mtx.Lock() for _, collector := range v.collectors { @@ -98,7 +98,7 @@ func (v *GroupedVault) CounterAdd(group string, name string, value float64, labe metricName := v.resolveMetricNameFunc(name) c, err := v.GetOrCreateCounterCollector(metricName, LabelNames(labels)) if err != nil { - log.Errorf("CounterAdd: %v", err) + log.Error("CounterAdd", log.Err(err)) return } c.Add(group, value, labels) @@ -108,7 +108,7 @@ func (v *GroupedVault) GaugeSet(group string, name string, value float64, labels metricName := v.resolveMetricNameFunc(name) c, err := v.GetOrCreateGaugeCollector(metricName, LabelNames(labels)) if err != nil { - log.Errorf("GaugeSet: %v", err) + log.Error("GaugeSet", log.Err(err)) return } c.Set(group, value, labels) diff --git c/pkg/schedule_manager/schedule_manager.go i/pkg/schedule_manager/schedule_manager.go index 2ee10c4..b62be1a 100644 --- c/pkg/schedule_manager/schedule_manager.go +++ i/pkg/schedule_manager/schedule_manager.go @@ -2,6 +2,7 @@ package schedulemanager import ( "context" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" "gopkg.in/robfig/cron.v2" @@ -67,11 +68,11 @@ func (sm *scheduleManager) Add(newEntry smtypes.ScheduleEntry) { // The error can occur in case of bad format of crontab string. // All crontab strings should be validated before add. entryId, _ := sm.cron.AddFunc(newEntry.Crontab, func() { - logEntry.Debugf("fire schedule event for entry '%s'", newEntry.Crontab) + logEntry.Debug("fire schedule event for entry", slog.String("name", newEntry.Crontab)) sm.ScheduleCh <- newEntry.Crontab }) - logEntry.Debugf("entry '%s' added", newEntry.Crontab) + logEntry.Debug("entry added", slog.String("name", newEntry.Crontab)) sm.Entries[newEntry.Crontab] = CronEntry{ EntryID: entryId, @@ -109,7 +110,8 @@ func (sm *scheduleManager) Remove(delEntry smtypes.ScheduleEntry) { if len(sm.Entries[delEntry.Crontab].Ids) == 0 { sm.cron.Remove(sm.Entries[delEntry.Crontab].EntryID) delete(sm.Entries, delEntry.Crontab) - sm.logger.With("operator.component", "scheduleManager").Debugf("entry '%s' deleted", delEntry.Crontab) + sm.logger.With("operator.component", "scheduleManager"). + Debug("entry deleted", slog.String("name", delEntry.Crontab)) } } diff --git c/pkg/shell-operator/combine_binding_context.go i/pkg/shell-operator/combine_binding_context.go index 5b05e10..4aae583 100644 --- c/pkg/shell-operator/combine_binding_context.go +++ i/pkg/shell-operator/combine_binding_context.go @@ -2,6 +2,7 @@ package shell_operator import ( "fmt" + "log/slog" bctx "github.com/flant/shell-operator/pkg/hook/binding_context" . "github.com/flant/shell-operator/pkg/hook/task_metadata" @@ -118,7 +119,11 @@ func (op *ShellOperator) combineBindingContextForHook(tqs *queue.TaskQueueSet, q } else { compactMsg = fmt.Sprintf("are combined to %d contexts", len(combinedContext)) } - op.logger.Infof("Binding contexts from %d tasks %s. %d tasks are dropped from queue '%s'", len(otherTasks)+1, compactMsg, len(tasksFilter)-1, t.GetQueueName()) + op.logger.Info("Binding contexts from are dropped from queue", + slog.Int("count", len(otherTasks)+1), + slog.String("message", compactMsg), + slog.Int("dropped", len(tasksFilter)-1), + slog.String("queue", t.GetQueueName())) res.BindingContexts = compactedContext res.MonitorIDs = monitorIDs diff --git c/pkg/shell-operator/http_server.go i/pkg/shell-operator/http_server.go index d9a9be3..aef13d0 100644 --- c/pkg/shell-operator/http_server.go +++ i/pkg/shell-operator/http_server.go @@ -3,7 +3,9 @@ package shell_operator import ( "bytes" "context" + "errors" "fmt" + "log/slog" "net/http" "strings" "time" @@ -32,11 +34,13 @@ func (bhs *baseHTTPServer) Start(ctx context.Context) { } go func() { - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Fatalf("base http server listen: %s\n", err) + if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Fatal("base http server listen", log.Err(err)) } }() - log.Infof("base http server started at %s:%s", bhs.address, bhs.port) + log.Info("base http server started", + slog.String("address", bhs.address), + slog.String("port", bhs.port)) go func() { <-ctx.Done() @@ -49,7 +53,7 @@ func (bhs *baseHTTPServer) Start(ctx context.Context) { }() if err := srv.Shutdown(cctx); err != nil { - log.Fatalf("base http server shutdown failed:%+v", err) + log.Fatal("base http server shutdown failed", log.Err(err)) } }() } diff --git c/pkg/shell-operator/manager_events_handler.go i/pkg/shell-operator/manager_events_handler.go index 21f37be..e0df9a5 100644 --- c/pkg/shell-operator/manager_events_handler.go +++ i/pkg/shell-operator/manager_events_handler.go @@ -2,6 +2,7 @@ package shell_operator import ( "context" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" @@ -81,7 +82,7 @@ func (m *ManagerEventsHandler) Start() { } case <-m.ctx.Done(): - logEntry.Infof("Stop") + logEntry.Info("Stop") return } @@ -89,7 +90,9 @@ func (m *ManagerEventsHandler) Start() { for _, resTask := range tailTasks { q := tqs.GetByName(resTask.GetQueueName()) if q == nil { - log.Errorf("Possible bug!!! Got task for queue '%s' but queue is not created yet. task: %s", resTask.GetQueueName(), resTask.GetDescription()) + log.Error("Possible bug!!! Got task for queue but queue is not created yet.", + slog.String("queueName", resTask.GetQueueName()), + slog.String("description", resTask.GetDescription())) } else { q.AddLast(resTask) } diff --git c/pkg/shell-operator/operator.go i/pkg/shell-operator/operator.go index 42044e1..7d6146f 100644 --- c/pkg/shell-operator/operator.go +++ i/pkg/shell-operator/operator.go @@ -3,6 +3,7 @@ package shell_operator import ( "context" "fmt" + "log/slog" "time" "github.com/deckhouse/deckhouse/pkg/log" @@ -136,7 +137,7 @@ func (op *ShellOperator) initHookManager() error { "binding": string(types.OnKubernetesEvent), } logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) - logEntry.Debugf("Create tasks for 'kubernetes' event '%s'", kubeEvent.String()) + logEntry.Debug("Create tasks for 'kubernetes' event", slog.String("name", kubeEvent.String())) var tasks []task.Task op.HookManager.HandleKubeEvent(kubeEvent, func(hook *hook.Hook, info controller.BindingExecutionInfo) { @@ -154,7 +155,7 @@ func (op *ShellOperator) initHookManager() error { tasks = append(tasks, newTask.WithQueuedAt(time.Now())) logEntry.With("queue", info.QueueName). - Infof("queue task %s", newTask.GetDescription()) + Info("queue task", slog.String("name", newTask.GetDescription())) }) return tasks @@ -165,7 +166,7 @@ func (op *ShellOperator) initHookManager() error { "binding": string(types.Schedule), } logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) - logEntry.Debugf("Create tasks for 'schedule' event '%s'", crontab) + logEntry.Debug("Create tasks for 'schedule' event", slog.String("name", crontab)) var tasks []task.Task op.HookManager.HandleScheduleEvent(crontab, func(hook *hook.Hook, info controller.BindingExecutionInfo) { @@ -183,7 +184,7 @@ func (op *ShellOperator) initHookManager() error { tasks = append(tasks, newTask.WithQueuedAt(time.Now())) logEntry.With("queue", info.QueueName). - Infof("queue task %s", newTask.GetDescription()) + Info("queue task", slog.String("name", newTask.GetDescription())) }) return tasks @@ -229,7 +230,10 @@ func (op *ShellOperator) initValidatingWebhookManager() error { "event": string(eventBindingType), } logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) - logEntry.Debugf("Handle '%s' event '%s' '%s'", eventBindingType, event.ConfigurationId, event.WebhookId) + logEntry.Debug("Handle event", + slog.String("type", string(eventBindingType)), + slog.String("configurationId", event.ConfigurationId), + slog.String("webhookID", event.WebhookId)) var admissionTask task.Task op.HookManager.HandleAdmissionEvent(event, func(hook *hook.Hook, info controller.BindingExecutionInfo) { @@ -248,7 +252,10 @@ func (op *ShellOperator) initValidatingWebhookManager() error { // Assert exactly one task is created. if admissionTask == nil { - logEntry.Errorf("Possible bug!!! No hook found for '%s' event '%s' '%s'", string(types.KubernetesValidating), event.ConfigurationId, event.WebhookId) + logEntry.Error("Possible bug!!! No hook found for event", + slog.String("type", string(types.KubernetesValidating)), + slog.String("configurationId", event.ConfigurationId), + slog.String("webhookID", event.WebhookId)) return nil, fmt.Errorf("no hook found for '%s' '%s'", event.ConfigurationId, event.WebhookId) } @@ -264,7 +271,8 @@ func (op *ShellOperator) initValidatingWebhookManager() error { admissionProp := admissionTask.GetProp("admissionResponse") admissionResponse, ok := admissionProp.(*admission.Response) if !ok { - logEntry.Errorf("'admissionResponse' task prop is not of type *AdmissionResponse: %T", admissionProp) + logEntry.Error("'admissionResponse' task prop is not of type *AdmissionResponse", + slog.String("type", fmt.Sprintf("%T", admissionProp))) return nil, fmt.Errorf("hook task prop error") } return admissionResponse, nil @@ -318,7 +326,10 @@ func (op *ShellOperator) conversionEventHandler(crdName string, request *v1.Conv logEntry := utils.EnrichLoggerWithLabels(op.logger, logLabels) sourceVersions := conversion.ExtractAPIVersions(request.Objects) - logEntry.Infof("Handle '%s' event for crd/%s: %d objects with versions %v", string(types.KubernetesConversion), crdName, len(request.Objects), sourceVersions) + logEntry.Info("Handle kubernetesCustomResourceConversion event for crd", + slog.String("name", crdName), + slog.Int("len", len(request.Objects)), + slog.Any("versions", sourceVersions)) done := false for _, srcVer := range sourceVersions { @@ -330,7 +341,9 @@ func (op *ShellOperator) conversionEventHandler(crdName string, request *v1.Conv if len(convPath) == 0 { continue } - logEntry.Infof("Find conversion path for %s: %v", rule.String(), convPath) + logEntry.Info("Find conversion path for rule", + slog.String("name", rule.String()), + slog.Any("value", convPath)) for _, convRule := range convPath { var convTask task.Task @@ -364,7 +377,8 @@ func (op *ShellOperator) conversionEventHandler(crdName string, request *v1.Conv prop := convTask.GetProp("conversionResponse") response, ok := prop.(*conversion.Response) if !ok { - logEntry.Errorf("'conversionResponse' task prop is not of type *conversion.Response: %T", prop) + logEntry.Error("'conversionResponse' task prop is not of type *conversion.Response", + slog.String("type", fmt.Sprintf("%T", prop))) return nil, fmt.Errorf("hook task prop error") } @@ -422,7 +436,7 @@ func (op *ShellOperator) taskHandler(t task.Task) queue.TaskResult { taskHook := op.HookManager.GetHook(hookMeta.HookName) taskHook.HookController.EnableScheduleBindings() - taskLogEntry.Infof("Schedule binding for hook enabled successfully") + taskLogEntry.Info("Schedule binding for hook enabled successfully") res.Status = "Success" } @@ -478,11 +492,14 @@ func (op *ShellOperator) taskHandleEnableKubernetesBindings(t task.Task) queue.T if err != nil { errors = 1.0 t.UpdateFailureMessage(err.Error()) - taskLogEntry.Errorf("Enable Kubernetes binding for hook failed. Will retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, err) + taskLogEntry.Error("Enable Kubernetes binding for hook failed. Will retry after delay.", + slog.Int("failedCount", t.GetFailureCount()+1), + log.Err(err)) res.Status = "Fail" } else { success = 1.0 - taskLogEntry.Infof("Kubernetes bindings for hook are enabled successfully, %d tasks generated", len(hookRunTasks)) + taskLogEntry.Info("Kubernetes bindings for hook are enabled successfully", + slog.Int("count", len(hookRunTasks))) res.Status = "Success" now := time.Now() for _, t := range hookRunTasks { @@ -580,18 +597,20 @@ func (op *ShellOperator) taskHandleHookRun(t task.Task) queue.TaskResult { if err != nil { if hookMeta.AllowFailure { allowed = 1.0 - taskLogEntry.Infof("Hook failed, but allowed to fail: %v", err) + taskLogEntry.Info("Hook failed, but allowed to fail", log.Err(err)) res.Status = "Success" } else { errors = 1.0 t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(time.Now()) // Reset queueAt for correct results in 'task_wait_in_queue' metric. - taskLogEntry.Errorf("Hook failed. Will retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, err) + taskLogEntry.Error("Hook failed. Will retry after delay.", + slog.Int("failedCount", t.GetFailureCount()+1), + log.Err(err)) res.Status = "Fail" } } else { success = 1.0 - taskLogEntry.Infof("Hook executed successfully") + taskLogEntry.Info("Hook executed successfully") res.Status = "Success" } op.MetricStorage.CounterAdd("{PREFIX}hook_run_allowed_errors_total", allowed, metricLabels) @@ -612,7 +631,7 @@ func (op *ShellOperator) taskHandleHookRun(t task.Task) queue.TaskResult { func (op *ShellOperator) handleRunHook(t task.Task, taskHook *hook.Hook, hookMeta task_metadata.HookMetadata, taskLogEntry *log.Logger, hookLogLabels map[string]string, metricLabels map[string]string) error { for _, info := range taskHook.HookController.SnapshotsInfo() { - taskLogEntry.Debugf("snapshot info: %s", info) + taskLogEntry.Debug("snapshot info", slog.String("info", info)) } result, err := taskHook.Run(hookMeta.BindingType, hookMeta.BindingContext, hookLogLabels) @@ -631,8 +650,12 @@ func (op *ShellOperator) handleRunHook(t task.Task, taskHook *hook.Hook, hookMet return err } - if result != nil && result.Usage != nil { - taskLogEntry.Debugf("Usage: %+v", result.Usage) + if result == nil { + return nil + } + + if result.Usage != nil { + taskLogEntry.Debug("Usage", slog.String("value", fmt.Sprintf("%+v", result.Usage))) op.MetricStorage.HistogramObserve("{PREFIX}hook_run_sys_seconds", result.Usage.Sys.Seconds(), metricLabels, nil) op.MetricStorage.HistogramObserve("{PREFIX}hook_run_user_seconds", result.Usage.User.Seconds(), metricLabels, nil) op.MetricStorage.GaugeSet("{PREFIX}hook_run_max_rss_bytes", float64(result.Usage.MaxRss)*1024, metricLabels) @@ -661,19 +684,21 @@ func (op *ShellOperator) handleRunHook(t task.Task, taskHook *hook.Hook, hookMet // Save validatingResponse in task props for future use. if result.AdmissionResponse != nil { t.SetProp("admissionResponse", result.AdmissionResponse) - taskLogEntry.Infof("AdmissionResponse from hook: %s", result.AdmissionResponse.Dump()) + taskLogEntry.Info("AdmissionResponse from hook", + slog.String("value", result.AdmissionResponse.Dump())) } // Save conversionResponse in task props for future use. if result.ConversionResponse != nil { t.SetProp("conversionResponse", result.ConversionResponse) - taskLogEntry.Infof("ConversionResponse from hook: %s", result.ConversionResponse.Dump()) + taskLogEntry.Info("ConversionResponse from hook", + slog.String("value", result.ConversionResponse.Dump())) } return nil } -// combineBindingContextForHook combines binding contexts from a sequence of task with similar +// CombineBindingContextForHook combines binding contexts from a sequence of task with similar // hook name and task type into array of binding context and delete excess tasks from queue. // // Also, sequences of binding contexts with similar group are compacted in one binding context. @@ -712,7 +737,7 @@ func (op *ShellOperator) CombineBindingContextForHook(q *queue.TaskQueue, t task } nextHookName := hm.(task_metadata.HookNameAccessor).GetHookName() // Only tasks for the same hook and of the same type can be combined (HookRun cannot be combined with OnStartup). - // Using stopCombineFn function more stricter combine rules can be defined. + // Using stopCombineFn function stricter combine rules can be defined. if nextHookName == hookName && t.GetType() == tsk.GetType() { if stopCombineFn != nil { stopIterate = stopCombineFn(tsk) @@ -777,7 +802,11 @@ func (op *ShellOperator) CombineBindingContextForHook(q *queue.TaskQueue, t task } else { compactMsg = fmt.Sprintf("are combined to %d contexts", len(combinedContext)) } - log.Infof("Binding contexts from %d tasks %s. %d tasks are dropped from queue '%s'", len(otherTasks)+1, compactMsg, len(tasksFilter)-1, t.GetQueueName()) + log.Info("Binding contexts from tasks. Tasks are dropped from queue", + slog.Int("count", len(otherTasks)+1), + slog.String("tasks", compactMsg), + slog.Int("filteredCount", len(tasksFilter)-1), + slog.String("queueName", t.GetQueueName())) res.BindingContexts = compactedContext res.MonitorIDs = monitorIDs @@ -798,7 +827,7 @@ func (op *ShellOperator) bootstrapMainQueue(tqs *queue.TaskQueueSet) { // Add tasks to run OnStartup bindings onStartupHooks, err := op.HookManager.GetHooksInOrder(types.OnStartup) if err != nil { - logEntry.Errorf("%v", err) + logEntry.Error("add tasks to run OnStartup bindings", log.Err(err)) return } @@ -816,7 +845,9 @@ func (op *ShellOperator) bootstrapMainQueue(tqs *queue.TaskQueueSet) { }). WithQueuedAt(time.Now()) mainQueue.AddLast(newTask) - logEntry.Infof("queue task %s with hook %s", newTask.GetDescription(), hookName) + logEntry.Info("queue task with hook", + slog.String("task", newTask.GetDescription()), + slog.String("hook", hookName)) } // Add tasks to enable kubernetes monitors and schedules for each hook @@ -831,7 +862,9 @@ func (op *ShellOperator) bootstrapMainQueue(tqs *queue.TaskQueueSet) { }). WithQueuedAt(time.Now()) mainQueue.AddLast(newTask) - logEntry.Infof("queue task %s for hook %s", newTask.GetDescription(), hookName) + logEntry.Info("queue task with hook", + slog.String("task", newTask.GetDescription()), + slog.String("hook", hookName)) } if h.GetConfig().HasBinding(types.Schedule) { @@ -842,7 +875,9 @@ func (op *ShellOperator) bootstrapMainQueue(tqs *queue.TaskQueueSet) { }). WithQueuedAt(time.Now()) mainQueue.AddLast(newTask) - logEntry.Infof("queue task %s with hook %s", newTask.GetDescription(), hookName) + logEntry.Info("queue task with hook", + slog.String("task", newTask.GetDescription()), + slog.String("hook", hookName)) } } } diff --git c/pkg/task/queue/task_queue.go i/pkg/task/queue/task_queue.go index c404834..54c0fab 100644 --- c/pkg/task/queue/task_queue.go +++ i/pkg/task/queue/task_queue.go @@ -3,6 +3,7 @@ package queue import ( "context" "fmt" + "log/slog" "os" "strings" "sync" @@ -383,7 +384,7 @@ func (q *TaskQueue) debugf(format string, args ...interface{}) { if !q.debug { return } - log.Debugf(format, args...) + log.Debug("", fmt.Sprintf(format, args...)) } func (q *TaskQueue) Stop() { @@ -398,7 +399,7 @@ func (q *TaskQueue) Start() { } if q.Handler == nil { - log.Errorf("queue %s: should set handler before start", q.Name) + log.Error("should set handler before start in queue", slog.String("name", q.Name)) q.Status = "no handler set" return } @@ -411,7 +412,7 @@ func (q *TaskQueue) Start() { t := q.waitForTask(sleepDelay) if t == nil { q.Status = "stop" - log.Infof("queue '%s' stopped", q.Name) + log.Info("queue stopped", slog.String("name", q.Name)) return } @@ -424,10 +425,10 @@ func (q *TaskQueue) Start() { q.Status = "run first task" taskRes := q.Handler(t) - // Chec…
Signed-off-by: Evsyukov Denis <[email protected]>
Signed-off-by: Evsyukov Denis <[email protected]> diff --git c/pkg/executor/executor.go i/pkg/executor/executor.go index 2b4c1f4..56060ea 100644 --- c/pkg/executor/executor.go +++ i/pkg/executor/executor.go @@ -82,7 +82,9 @@ func NewExecutor(dir string, entrypoint string, args []string, envs []string) *E } func (e *Executor) Output() ([]byte, error) { - e.logger.Debug("Executing command", strings.Join(e.cmd.Args, " "), e.cmd.Dir) + e.logger.Debug("Executing command", + slog.String("command", strings.Join(e.cmd.Args, " ")), + slog.String("dir", e.cmd.Dir)) return e.cmd.Output() } @@ -98,7 +100,9 @@ func (e *Executor) RunAndLogLines(logLabels map[string]string) (*CmdUsage, error stdoutLogEntry := logEntry.With("output", "stdout") stderrLogEntry := logEntry.With("output", "stderr") - log.Debug("Executing command", slog.String("command", strings.Join(e.cmd.Args, " ")), slog.String("dir", e.cmd.Dir)) + log.Debug("Executing command", + slog.String("command", strings.Join(e.cmd.Args, " ")), + slog.String("dir", e.cmd.Dir)) plo := &proxyLogger{e.logProxyHookJSON, e.proxyJsonKey, stdoutLogEntry, make([]byte, 0)} ple := &proxyLogger{e.logProxyHookJSON, e.proxyJsonKey, stderrLogEntry, make([]byte, 0)} diff --git c/pkg/executor/executor_test.go i/pkg/executor/executor_test.go index 7c707d0..04fa272 100644 --- c/pkg/executor/executor_test.go +++ i/pkg/executor/executor_test.go @@ -107,8 +107,8 @@ func TestRunAndLogLines(t *testing.T) { _, err := ex.RunAndLogLines(map[string]string{"a": "b"}) assert.NoError(t, err) - assert.Equal(t, buf.String(), `{"level":"debug","msg":"json log line not map[string]interface{}","source":"executor/executor.go:173","a":"b","line":["a","b","c"],"output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n"+ - `{"level":"info","msg":"[\"a\",\"b\",\"c\"]\n","source":"executor/executor.go:176","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") + assert.Equal(t, buf.String(), `{"level":"debug","msg":"json log line not map[string]interface{}","source":"executor/executor.go:177","a":"b","line":["a","b","c"],"output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n"+ + `{"level":"info","msg":"[\"a\",\"b\",\"c\"]\n","source":"executor/executor.go:180","a":"b","output":"stdout","time":"2006-01-02T15:04:05Z"}`+"\n") buf.Reset() }) diff --git c/pkg/hook/controller/conversion_bindings_controller.go i/pkg/hook/controller/conversion_bindings_controller.go index 3975f82..a7920ef 100644 --- c/pkg/hook/controller/conversion_bindings_controller.go +++ i/pkg/hook/controller/conversion_bindings_controller.go @@ -84,7 +84,7 @@ func (c *ConversionBindingsController) CanHandleEvent(crdName string, _ *v1.Conv func (c *ConversionBindingsController) HandleEvent(crdName string, request *v1.ConversionRequest, rule conversion.Rule) BindingExecutionInfo { _, hasKey := c.Links[crdName] if !hasKey { - log.Error("Possible bug!!! No binding for conversion event for crd", slog.String("name", crdName)) + log.Error("Possible bug!!! No binding for conversion event for crd", slog.String("crd", crdName)) return BindingExecutionInfo{ BindingContext: []bctx.BindingContext{}, AllowFailure: false, diff --git c/pkg/kube_events_manager/namespace_informer.go i/pkg/kube_events_manager/namespace_informer.go index 2920641..a97c6bb 100644 --- c/pkg/kube_events_manager/namespace_informer.go +++ i/pkg/kube_events_manager/namespace_informer.go @@ -124,7 +124,7 @@ func (ni *namespaceInformer) start() { log.Debug("Run namespace informer", slog.String("name", ni.Monitor.Metadata.DebugName)) if ni.SharedInformer == nil { log.Error("Possible BUG!!! Start called before createSharedInformer, ShredInformer is nil", - slog.String("name", ni.Monitor.Metadata.DebugName)) + slog.String("debugName", ni.Monitor.Metadata.DebugName)) return } cctx, cancel := context.WithCancel(ni.ctx) @@ -140,10 +140,10 @@ func (ni *namespaceInformer) start() { return ni.SharedInformer.HasSynced(), nil }); err != nil { ni.Monitor.Logger.Error("Cache is not synced for informer", - slog.String("name", ni.Monitor.Metadata.DebugName)) + slog.String("debugName", ni.Monitor.Metadata.DebugName)) } - log.Debug("Informer is ready", slog.String("name", ni.Monitor.Metadata.DebugName)) + log.Debug("Informer is ready", slog.String("debugName", ni.Monitor.Metadata.DebugName)) } func (ni *namespaceInformer) pauseHandleEvents() { diff --git c/pkg/kube_events_manager/resource_informer.go i/pkg/kube_events_manager/resource_informer.go index 1307a1d..0da0991 100644 --- c/pkg/kube_events_manager/resource_informer.go +++ i/pkg/kube_events_manager/resource_informer.go @@ -117,11 +117,10 @@ func (ei *resourceInformer) createSharedInformer() error { if ei.GroupVersionResource, err = ei.KubeClient.GroupVersionResource(ei.Monitor.ApiVersion, ei.Monitor.Kind); err != nil { return fmt.Errorf("%s: Cannot get GroupVersionResource info for apiVersion '%s' kind '%s' from api-server. Possibly CRD is not created before informers are started. Error was: %w", ei.Monitor.Metadata.DebugName, ei.Monitor.ApiVersion, ei.Monitor.Kind, err) } - - log.Debug("%GVR for kind", + log.Debug("GVR for kind", slog.String("debugName", ei.Monitor.Metadata.DebugName), slog.String("kind", ei.Monitor.Kind), - slog.String("GVR", ei.GroupVersionResource.String())) + slog.String("gvr", ei.GroupVersionResource.String())) // define tweakListOptions for informer fmtLabelSelector, err := FormatLabelSelector(ei.Monitor.LabelSelector) @@ -196,7 +195,7 @@ func (ei *resourceInformer) loadExistedObjects() error { Namespace(ei.Namespace). List(context.TODO(), ei.ListOptions) if err != nil { - log.Error("%s: initial list resources of kind '%s': %v", + log.Error("initial list resources of kind", slog.String("debugName", ei.Monitor.Metadata.DebugName), slog.String("kind", ei.Monitor.Kind), log.Err(err)) diff --git c/pkg/task/queue/task_queue.go i/pkg/task/queue/task_queue.go index 54c0fab..44e64bc 100644 --- c/pkg/task/queue/task_queue.go +++ i/pkg/task/queue/task_queue.go @@ -384,7 +384,7 @@ func (q *TaskQueue) debugf(format string, args ...interface{}) { if !q.debug { return } - log.Debug("", fmt.Sprintf(format, args...)) + log.Debug("DEBUG", fmt.Sprintf(format, args...)) } func (q *TaskQueue) Stop() {
Signed-off-by: Evsyukov Denis <[email protected]>
juev
force-pushed
the
feature/replace-deprecated-logger-methods
branch
from
December 20, 2024 07:47
1d50a7e
to
f036912
Compare
The rebase is complete. All conflicts have been resolved. |
miklezzzz
reviewed
Dec 20, 2024
Signed-off-by: Evsyukov Denis <[email protected]>
miklezzzz
approved these changes
Dec 20, 2024
yalosev
approved these changes
Dec 23, 2024
ldmonster
reviewed
Dec 23, 2024
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Waiting for log formatting response from observability
- Log message formatting
- Log field names (snake case? formatting currently field keys?)
resolved
ldmonster
approved these changes
Dec 27, 2024
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Overview
Just replace deprecated logger methods with new ones
What this PR does / why we need it
Fixed: #685
Special notes for your reviewer
nope