diff --git a/cmd/create.go b/cmd/create.go index e172b91..46875ac 100644 --- a/cmd/create.go +++ b/cmd/create.go @@ -17,8 +17,7 @@ package cmd import ( "github.com/cloud-barista/mc-data-manager/internal/execfunc" - "github.com/cloud-barista/mc-data-manager/internal/log" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -36,9 +35,8 @@ Semi-structured data: json, xml You must enter the data size in GB.`, Run: func(_ *cobra.Command, _ []string) { - logrus.SetFormatter(&log.CustomTextFormatter{CmdName: "create", JobName: "dummy create"}) if err := execfunc.DummyCreate(commandTask); err != nil { - logrus.Errorf("dummy create failed : %v", err) + log.Error().Str("CmdName", "create").Str("JobName", "dummy create").Msgf("dummy create failed : %v", err) } }, } diff --git a/cmd/delete.go b/cmd/delete.go index 6b8eace..09cff26 100644 --- a/cmd/delete.go +++ b/cmd/delete.go @@ -18,8 +18,7 @@ package cmd import ( "os" - "github.com/cloud-barista/mc-data-manager/internal/log" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -35,14 +34,14 @@ var deleteLocalCmd = &cobra.Command{ Use: "local", Run: func(cmd *cobra.Command, args []string) { - logrus.SetFormatter(&log.CustomTextFormatter{CmdName: "delete"}) - logrus.WithFields(logrus.Fields{"jobName": "local delete"}).Info("start deleting local data") + // logrus.SetFormatter(&log.CustomTextFormatter{CmdName: "delete"}) + log.Info().Msg("start deleting local data") if err := os.RemoveAll(commandTask.Directory); err != nil { - logrus.WithFields(logrus.Fields{"jobName": "local delete"}).Errorf("failed to delete local : %v", err) + log.Error().Str("jobName", "local delete").Msgf("failed to delete local : %v", err) return } - logrus.Infof("successfully deleted : %s\n", commandTask.Directory) + log.Info().Msgf("successfully deleted : %s\n", commandTask.Directory) }, } diff --git a/cmd/server.go b/cmd/server.go index 2227ef6..a9dd984 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -16,9 +16,6 @@ limitations under the License. package cmd import ( - log "github.com/cloud-barista/mc-data-manager/internal/zerolog" - "github.com/rs/zerolog" - dmsv "github.com/cloud-barista/mc-data-manager/websrc/serve" "github.com/spf13/cobra" ) @@ -32,7 +29,6 @@ var serverCmd = &cobra.Command{ Short: "Start Web Server", Long: `Start Web Server`, Run: func(cmd *cobra.Command, args []string) { - log.GetInstance().NewLogEntry().WithCmdName("server").WithJobName("web Server").WithLevel(zerolog.InfoLevel).WithMessage("Start Web Server") dmsv.Run(dmsv.InitServer(listenPort, allowIP...), listenPort) }, } diff --git a/cmd/test.go b/cmd/test.go index 9256660..92ff82c 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -17,8 +17,7 @@ package cmd import ( "github.com/cloud-barista/mc-data-manager/internal/execfunc" - "github.com/cloud-barista/mc-data-manager/internal/log" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -28,9 +27,8 @@ var testCmd = &cobra.Command{ Short: "test-command", Long: `test-command`, Run: func(_ *cobra.Command, _ []string) { - logrus.SetFormatter(&log.CustomTextFormatter{CmdName: "test", JobName: "test dummy create"}) if err := execfunc.DummyCreate(commandTask); err != nil { - logrus.Errorf("test dummy create failed : %v", err) + log.Error().Msgf("test dummy create failed : %v", err) } }, } diff --git a/config/config.go b/config/config.go index 35a6c87..aa5bd3d 100644 --- a/config/config.go +++ b/config/config.go @@ -7,8 +7,66 @@ import ( "os" "path/filepath" "sync" + + "github.com/rs/zerolog/log" + "github.com/spf13/viper" ) +var Settings InitConfig + +type InitConfig struct { + Profile ProfileConfig `mapstructure:"profile"` + Logger LogConfig `mapstructure:"log"` +} + +type ProfileConfig struct { + Default string `mapstructure:"default"` +} + +type LogConfig struct { + ZeroConfig `mapstructure:",squash"` + File LumberConfig `mapstructure:",squash"` +} + +type ZeroConfig struct { + LogLevel string `mapstructure:"level"` + LogWriter string `mapstructure:"writer"` +} +type LumberConfig struct { + Path string `mapstructure:"filepath"` + MaxSize int `mapstructure:"maxsize"` + MaxBackups int `mapstructure:"maxbackups"` + MaxAge int `mapstructure:"maxage"` + Compress bool `mapstructure:"compress"` +} + +// init +func Init() { + execPath, err := os.Executable() + if err != nil { + log.Error().Err(err).Msg("Failed to get executable path") + log.Info().Msg("Using Default Config") + } + viper.AddConfigPath(filepath.Join(execPath, "../../data/var/run/data-manager/config")) + viper.AddConfigPath(filepath.Join(execPath, "/data/var/run/data-manager/config")) + viper.AddConfigPath(filepath.Join(execPath, "./")) + viper.AddConfigPath(filepath.Join(execPath, "./config/")) + viper.SetConfigName("config") + + err = viper.ReadInConfig() + if err != nil { + log.Error().Err(err).Msg("Failed to read config file") + return + } + + err = viper.Unmarshal(&Settings) + if err != nil { + log.Fatal().Err(err).Msg("Unable to decode into struct") + } + log.Debug().Msgf("config params : %+v", Settings) + log.Info().Str("loglevel", Settings.Logger.LogLevel).Msg("Logger initialized with loglevel") +} + // ConfigManager structure definition type ConfigManager struct { DefaultProfile string diff --git a/data/var/run/data-manager/config/config.json b/data/var/run/data-manager/config/config.json index ee177e8..6feeb3f 100644 --- a/data/var/run/data-manager/config/config.json +++ b/data/var/run/data-manager/config/config.json @@ -1,3 +1,14 @@ { - "defaultProfile":"default" + "profile":{ + "default":"default" + }, + "log":{ + "level":"debug", + "writer":"both", + "filepath": "./data/var/log/data-manager.log", + "maxsize": 1000, + "maxbackups": 3, + "maxage": 30, + "compress": false + } } \ No newline at end of file diff --git a/data/var/run/data-manager/task/task.json b/data/var/run/data-manager/task/task.json new file mode 100644 index 0000000..0936fe3 --- /dev/null +++ b/data/var/run/data-manager/task/task.json @@ -0,0 +1,593 @@ +{ + "tasks": [ + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "generate", + "taskId": "operation-001-task-0-20240911-164107", + "taskName": "Database Gen Task", + "description": "Gen the main database" + }, + "Directory": "./tmp/backup/dir", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "admin", + "path": "./tmp/source/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "", + "region": "", + "profileName": "default", + "path": "./tmp/backup/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "generate", + "taskId": "operation-001-task-0-20240911-164110", + "taskName": "Database Gen Task", + "description": "Gen the main database" + }, + "Directory": "./tmp/backup/dir", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "admin", + "path": "./tmp/source/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "", + "region": "", + "profileName": "default", + "path": "./tmp/backup/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "generate", + "taskId": "operation-001-task-0-20240911-172550", + "taskName": "Database Gen Task", + "description": "Gen the main database" + }, + "Directory": "./tmp/backup/dir", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "admin", + "path": "./tmp/source/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "", + "region": "", + "profileName": "default", + "path": "./tmp/backup/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "generate", + "taskId": "operation-001-task-0-20240911-172833", + "taskName": "Database Gen Task", + "description": "Gen the main database" + }, + "Directory": "./tmp/backup/dir", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "admin", + "path": "./tmp/source/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "", + "region": "", + "profileName": "default", + "path": "./tmp/backup/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "generate", + "taskId": "operation-001-task-0-20240911-173200", + "taskName": "Database Gen Task", + "description": "Gen the main database" + }, + "Directory": "./tmp/backup/dir", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "admin", + "path": "./tmp/source/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "", + "region": "", + "profileName": "default", + "path": "./tmp/backup/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "generate", + "taskId": "operation-001-task-0-20240911-180002", + "taskName": "Database Gen Task", + "description": "Gen the main database" + }, + "Directory": "./tmp/backup/dir", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "admin", + "path": "./tmp/source/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "", + "region": "", + "profileName": "default", + "path": "./tmp/backup/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "backup", + "taskId": "operation-18542-task-0-20240911-180022", + "taskName": "Database Backup Task", + "description": "Backup the main database" + }, + "Directory": "./tmp/schedule/dummy/NRDB/aws", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "aws", + "region": "ap-northeast-2", + "profileName": "admin", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "backup", + "taskId": "operation-18542-task-0-20240911-180024", + "taskName": "Database Backup Task", + "description": "Backup the main database" + }, + "Directory": "./tmp/schedule/dummy/NRDB/gcp", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "gcp", + "region": "asia-northeast2", + "profileName": "admin", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + }, + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "backup", + "taskId": "operation-18542-task-0-20240911-180026", + "taskName": "Database Backup Task", + "description": "Backup the main database" + }, + "Directory": "./tmp/schedule/dummy/NRDB/ncp", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "ncp", + "region": "kr", + "profileName": "admin", + "path": "", + "bucket": "", + "endpoint": "", + "host": "p4spq.pub-vpc.mg.naverncp.com", + "port": "17017", + "username": "manager", + "password": "N@mutech7^^7", + "databaseName": "spero", + "databaseId": "", + "projectId": "" + } + } + ], + "flows": [], + "schedules": [ + { + "operationId": "operation-001", + "tag": [ + "gen", + "minutely" + ], + "ScheduleID": "operation-001-schedule-20240911-180002", + "ScheduleName": "", + "tasks": [ + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "generate", + "taskId": "task-1", + "taskName": "Database Gen Task", + "description": "Gen the main database" + }, + "Directory": "./tmp/backup/dir", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "admin", + "path": "./tmp/source/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "", + "region": "", + "profileName": "default", + "path": "./tmp/backup/db", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + } + ], + "cron": "* * * * *", + "tz": "Asia/Seoul", + "status": "active" + }, + { + "operationId": "operation-18542", + "tag": [ + "backup", + "a minute" + ], + "ScheduleID": "operation-18542-schedule-20240911-180022", + "ScheduleName": "", + "tasks": [ + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "backup", + "taskName": "Database Backup Task", + "description": "Backup the main database" + }, + "Directory": "./tmp/schedule/dummy/NRDB/aws", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "aws", + "region": "ap-northeast-2", + "profileName": "admin", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + } + ], + "cron": "* * * * *", + "tz": "Asia/Seoul", + "status": "active" + }, + { + "operationId": "operation-18542", + "tag": [ + "backup", + "a minute" + ], + "ScheduleID": "operation-18542-schedule-20240911-180024", + "ScheduleName": "", + "tasks": [ + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "backup", + "taskName": "Database Backup Task", + "description": "Backup the main database" + }, + "Directory": "./tmp/schedule/dummy/NRDB/gcp", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "gcp", + "region": "asia-northeast2", + "profileName": "admin", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + } + } + ], + "cron": "* * * * *", + "tz": "Asia/Seoul", + "status": "active" + }, + { + "operationId": "operation-18542", + "tag": [ + "backup", + "a minute" + ], + "ScheduleID": "operation-18542-schedule-20240911-180026", + "ScheduleName": "", + "tasks": [ + { + "operationId": "", + "meta": { + "serviceType": "nrdbms", + "taskType": "backup", + "taskName": "Database Backup Task", + "description": "Backup the main database" + }, + "Directory": "./tmp/schedule/dummy/NRDB/ncp", + "sourcePoint": { + "provider": "", + "region": "", + "profileName": "", + "path": "", + "bucket": "", + "endpoint": "", + "host": "", + "port": "", + "username": "", + "password": "", + "databaseName": "", + "databaseId": "", + "projectId": "" + }, + "targetPoint": { + "provider": "ncp", + "region": "kr", + "profileName": "admin", + "path": "", + "bucket": "", + "endpoint": "", + "host": "p4spq.pub-vpc.mg.naverncp.com", + "port": "17017", + "username": "manager", + "password": "N@mutech7^^7", + "databaseName": "spero", + "databaseId": "", + "projectId": "" + } + } + ], + "cron": "* * * * *", + "tz": "Asia/Seoul", + "status": "active" + } + ] +} diff --git a/go.mod b/go.mod index cf219a8..96ddd6a 100644 --- a/go.mod +++ b/go.mod @@ -41,6 +41,7 @@ require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/KyleBanks/depth v1.2.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -49,11 +50,20 @@ require ( github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/hashicorp/hcl v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/labstack/gommon v0.4.2 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/swaggo/files/v2 v2.0.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect @@ -63,7 +73,10 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/tools v0.24.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) @@ -100,6 +113,7 @@ require ( github.com/labstack/echo/v4 v4.12.0 github.com/montanaflynn/stats v0.7.1 // indirect github.com/spf13/cast v1.7.0 + github.com/spf13/viper v1.19.0 github.com/swaggo/echo-swagger v1.4.1 github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect @@ -113,4 +127,5 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.65.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) diff --git a/go.sum b/go.sum index 254a2aa..190b267 100644 --- a/go.sum +++ b/go.sum @@ -87,6 +87,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= @@ -147,6 +149,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -170,6 +174,8 @@ github.com/labstack/echo/v4 v4.12.0 h1:IKpw49IMryVB2p1a4dzwlhP1O2Tf2E0Ir/450lH+k github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0ShAyycHSJvM= github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -178,8 +184,12 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -195,25 +205,39 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk= github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc= github.com/swaggo/files/v2 v2.0.1 h1:XCVJO/i/VosCDsJu1YLpdejGsGnBE9deRMpjN4pJLHk= @@ -251,12 +275,16 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -354,6 +382,10 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/auth/base.go b/internal/auth/base.go index 8edeed6..77a809c 100644 --- a/internal/auth/base.go +++ b/internal/auth/base.go @@ -24,7 +24,6 @@ import ( "strconv" "github.com/cloud-barista/mc-data-manager/config" - "github.com/cloud-barista/mc-data-manager/internal/log" "github.com/cloud-barista/mc-data-manager/models" "github.com/cloud-barista/mc-data-manager/pkg/nrdbms/awsdnmdb" "github.com/cloud-barista/mc-data-manager/pkg/nrdbms/gcpfsdb" @@ -36,7 +35,7 @@ import ( "github.com/cloud-barista/mc-data-manager/service/osc" "github.com/cloud-barista/mc-data-manager/service/rdbc" _ "github.com/go-sql-driver/mysql" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) func GetConfig(credPath string, ConfigData *models.CommandTask) error { @@ -53,13 +52,13 @@ func GetConfig(credPath string, ConfigData *models.CommandTask) error { } func preRunProfileE(pName, cmdName string, params *models.ProviderConfig) error { - logrus.Info("initiate a profile scan") + log.Info().Msg("initiate a profile scan") credentailMangeer := config.NewProfileManager() if srcCreds, err := credentailMangeer.LoadCredentialsByProfile(params.ProfileName, params.Provider); err != nil { return fmt.Errorf("get config error : %s", err) } else { - logrus.Infof("initiate a profile scan %v", srcCreds) + log.Info().Interface("credentials", srcCreds).Msg("initiate a profile scan") } switch cmdName { @@ -75,7 +74,7 @@ func preRunProfileE(pName, cmdName string, params *models.ProviderConfig) error } func preRunE(pName string, cmdName string, params *models.CommandTask) error { - logrus.Info("initiate a configuration scan") + log.Info().Msg("initiate a configuration scan") if err := GetConfig(params.TaskFilePath, params); err != nil { return fmt.Errorf("get config error: %s", err) } @@ -93,150 +92,146 @@ func preRunE(pName string, cmdName string, params *models.CommandTask) error { } func PreRun(task string, datamoldParams *models.CommandTask, use string) { - logrus.SetFormatter(&log.CustomTextFormatter{CmdName: use, JobName: task}) - logrus.Infof("launch an %s to %s", use, task) + log.Info().Msgf("launch an %s to %s", use, task) err := preRunE(use, task, datamoldParams) if err != nil { - logrus.Errorf("Pre-check for %s operation errors : %v", task, err) + log.Error().Err(err).Msgf("Pre-check for %s operation errors", task) os.Exit(1) } - logrus.Infof("successful pre-check %s into %s", use, task) + log.Info().Msgf("successful pre-check %s into %s", use, task) } func GetOS(params *models.ProviderConfig) (*osc.OSController, error) { var OSC *osc.OSController - logrus.Infof("ProfileName : %s", params.ProfileName) - logrus.Infof("Provider : %s", params.Provider) - logrus.Info("Get Credentail") + log.Info().Str("ProfileName", params.ProfileName).Msg("GetOS") + log.Info().Str("Provider", params.Provider).Msg("GetOS") + log.Info().Msg("Get Credential") credentailManger := config.NewProfileManager() creds, err := credentailManger.LoadCredentialsByProfile(params.ProfileName, params.Provider) if err != nil { - logrus.Errorf("credentail load failed : %v", err) - + log.Error().Err(err).Msg("credential load failed") return nil, err } - if params.Provider == "aws" { + switch params.Provider { + case "aws": awsc, ok := creds.(models.AWSCredentials) if !ok { return nil, errors.New("credential load failed") } - logrus.Infof("AccessKey : %s", awsc.AccessKey) - logrus.Infof("SecretKey : %s", awsc.SecretKey) - logrus.Infof("Region : %s", params.Region) - logrus.Infof("BucketName : %s", params.Bucket) + log.Info().Str("AccessKey", awsc.AccessKey).Msg("AWS Credentials") + log.Info().Str("SecretKey", awsc.SecretKey).Msg("AWS Credentials") + log.Info().Str("Region", params.Region).Msg("AWS Region") + log.Info().Str("BucketName", params.Bucket).Msg("AWS BucketName") s3c, err := config.NewS3Client(awsc.AccessKey, awsc.SecretKey, params.Region) if err != nil { return nil, fmt.Errorf("NewS3Client error : %v", err) } - OSC, err = osc.New(s3fs.New(models.AWS, s3c, params.Bucket, params.Region), osc.WithLogger(logrus.StandardLogger())) + OSC, err = osc.New(s3fs.New(models.AWS, s3c, params.Bucket, params.Region)) if err != nil { return nil, fmt.Errorf("osc error : %v", err) } - } else if params.Provider == "gcp" { + case "gcp": gcpc, ok := creds.(models.GCPCredentials) if !ok { return nil, errors.New("credential load failed") } - logrus.Infof("ProjectID : %s", gcpc.ProjectID) - + log.Info().Str("ProjectID", gcpc.ProjectID).Msg("GCP Project") credentialsJson, err := json.Marshal(gcpc) if err != nil { return nil, err } - logrus.Infof("Region : %s", params.Region) - logrus.Infof("BucketName : %s", params.Bucket) + log.Info().Str("Region", params.Region).Msg("GCP Region") + log.Info().Str("BucketName", params.Bucket).Msg("GCP BucketName") gc, err := config.NewGCPClient(string(credentialsJson)) if err != nil { return nil, fmt.Errorf("NewGCPClient error : %v", err) } - OSC, err = osc.New(gcpfs.New(gc, gcpc.ProjectID, params.Bucket, params.Region), osc.WithLogger(logrus.StandardLogger())) + OSC, err = osc.New(gcpfs.New(gc, gcpc.ProjectID, params.Bucket, params.Region)) if err != nil { return nil, fmt.Errorf("osc error : %v", err) } - } else if params.Provider == "ncp" { - + case "ncp": ncpc, ok := creds.(models.NCPCredentials) if !ok { return nil, errors.New("credential load failed") } - logrus.Infof("AccessKey : %s", ncpc.AccessKey) - logrus.Infof("SecretKey : %s", ncpc.SecretKey) - logrus.Infof("Endpoint : %s", params.Endpoint) - logrus.Infof("Region : %s", params.Region) - logrus.Infof("BucketName : %s", params.Bucket) + log.Info().Str("AccessKey", ncpc.AccessKey).Msg("NCP Credentials") + log.Info().Str("SecretKey", ncpc.SecretKey).Msg("NCP Credentials") + log.Info().Str("Endpoint", params.Endpoint).Msg("NCP Endpoint") + log.Info().Str("Region", params.Region).Msg("NCP Region") + log.Info().Str("BucketName", params.Bucket).Msg("NCP BucketName") s3c, err := config.NewS3ClientWithEndpoint(ncpc.AccessKey, ncpc.SecretKey, params.Region, params.Endpoint) if err != nil { return nil, fmt.Errorf("NewS3ClientWithEndpint error : %v", err) } - OSC, err = osc.New(s3fs.New(models.NCP, s3c, params.Bucket, params.Region), osc.WithLogger(logrus.StandardLogger())) + OSC, err = osc.New(s3fs.New(models.NCP, s3c, params.Bucket, params.Region)) if err != nil { return nil, fmt.Errorf("osc error : %v", err) } } return OSC, nil } - func GetRDMS(params *models.ProviderConfig) (*rdbc.RDBController, error) { - logrus.Infof("Provider : %s", params.Provider) - logrus.Infof("Username : %s", params.User) - logrus.Infof("Password : %s", params.Password) - logrus.Infof("Host : %s", params.Host) - logrus.Infof("Port : %s", params.Port) + log.Info().Str("Provider", params.Provider).Msg("GetRDMS") + log.Info().Str("Username", params.User).Msg("GetRDMS") + log.Info().Str("Password", params.Password).Msg("GetRDMS") + log.Info().Str("Host", params.Host).Msg("GetRDMS") + log.Info().Str("Port", params.Port).Msg("GetRDMS") dst, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/", params.User, params.Password, params.Host, params.Port)) if err != nil { return nil, err } - return rdbc.New(mysql.New(models.Provider(params.Provider), dst), rdbc.WithLogger(logrus.StandardLogger())) + return rdbc.New(mysql.New(models.Provider(params.Provider), dst)) } func GetNRDMS(params *models.ProviderConfig) (*nrdbc.NRDBController, error) { var NRDBC *nrdbc.NRDBController - logrus.Infof("ProfileName : %s", params.ProfileName) - logrus.Infof("Provider : %s", params.Provider) + log.Info().Str("ProfileName", params.ProfileName).Msg("GetNRDMS") + log.Info().Str("Provider", params.Provider).Msg("GetNRDMS") - logrus.Info("Get Credentail") + log.Info().Msg("Get Credential") credentailManger := config.NewProfileManager() creds, err := credentailManger.LoadCredentialsByProfile(params.ProfileName, params.Provider) if err != nil { - logrus.Errorf("credentail load failed : %v", err) - + log.Error().Err(err).Msg("credential load failed") return nil, err } - if params.Provider == "aws" { + switch params.Provider { + case "aws": awsc, ok := creds.(models.AWSCredentials) if !ok { return nil, errors.New("credential load failed") } - logrus.Infof("AccessKey : %s", awsc.AccessKey) - logrus.Infof("SecretKey : %s", awsc.SecretKey) - logrus.Infof("Region : %s", params.Region) + log.Info().Str("AccessKey", awsc.AccessKey).Msg("AWS Credentials") + log.Info().Str("SecretKey", awsc.SecretKey).Msg("AWS Credentials") + log.Info().Str("Region", params.Region).Msg("AWS Region") awsnrdb, err := config.NewDynamoDBClient(awsc.AccessKey, awsc.SecretKey, params.Region) if err != nil { return nil, err } - NRDBC, err = nrdbc.New(awsdnmdb.New(awsnrdb, params.Region), nrdbc.WithLogger(logrus.StandardLogger())) + NRDBC, err = nrdbc.New(awsdnmdb.New(awsnrdb, params.Region)) if err != nil { return nil, err } - } else if params.Provider == "gcp" { + case "gcp": gcpc, ok := creds.(models.GCPCredentials) if !ok { return nil, errors.New("credential load failed") } - logrus.Infof("ProjectID : %s", gcpc.ProjectID) - logrus.Infof("Region : %s", params.Region) + log.Info().Str("ProjectID", gcpc.ProjectID).Msg("GCP Project") + log.Info().Str("Region", params.Region).Msg("GCP Region") credentialsJson, err := json.Marshal(gcpc) if err != nil { @@ -248,15 +243,15 @@ func GetNRDMS(params *models.ProviderConfig) (*nrdbc.NRDBController, error) { return nil, err } - NRDBC, err = nrdbc.New(gcpfsdb.New(gcpnrdb, params.Region), nrdbc.WithLogger(logrus.StandardLogger())) + NRDBC, err = nrdbc.New(gcpfsdb.New(gcpnrdb, params.Region)) if err != nil { return nil, err } - } else if params.Provider == "ncp" { - logrus.Infof("Username : %s", params.User) - logrus.Infof("Password : %s", params.Password) - logrus.Infof("Host : %s", params.Host) - logrus.Infof("Port : %s", params.Port) + case "ncp": + log.Info().Str("Username", params.User).Msg("NCP Credentials") + log.Info().Str("Password", params.Password).Msg("NCP Credentials") + log.Info().Str("Host", params.Host).Msg("NCP Host") + log.Info().Str("Port", params.Port).Msg("NCP Port") port, err := strconv.Atoi(params.Port) if err != nil { return nil, err @@ -267,7 +262,7 @@ func GetNRDMS(params *models.ProviderConfig) (*nrdbc.NRDBController, error) { return nil, err } - NRDBC, err = nrdbc.New(ncpmgdb.New(ncpnrdb, params.DatabaseName), nrdbc.WithLogger(logrus.StandardLogger())) + NRDBC, err = nrdbc.New(ncpmgdb.New(ncpnrdb, params.DatabaseName)) if err != nil { return nil, err } diff --git a/internal/auth/nrdb.go b/internal/auth/nrdb.go index dfda194..525ccda 100644 --- a/internal/auth/nrdb.go +++ b/internal/auth/nrdb.go @@ -24,7 +24,7 @@ import ( "github.com/cloud-barista/mc-data-manager/models" "github.com/cloud-barista/mc-data-manager/service/nrdbc" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) func ImportNRDMFunc(params *models.CommandTask) error { @@ -32,7 +32,7 @@ func ImportNRDMFunc(params *models.CommandTask) error { var err error NRDBC, err = GetNRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("NRDBController error importing into nrdbms : %v", err) + log.Error().Msgf("NRDBController error importing into nrdbms : %v", err) return err } @@ -48,7 +48,7 @@ func ImportNRDMFunc(params *models.CommandTask) error { }) if err != nil { - logrus.Errorf("Walk error : %v", err) + log.Error().Msgf("Walk error : %v", err) return err } @@ -58,25 +58,25 @@ func ImportNRDMFunc(params *models.CommandTask) error { file, err := os.Open(jsonFile) if err != nil { - logrus.Errorf("file open error : %v", err) + log.Error().Msgf("file open error : %v", err) return err } defer file.Close() if err := json.NewDecoder(file).Decode(&srcData); err != nil { - logrus.Errorf("file decoding error : %v", err) + log.Error().Msgf("file decoding error : %v", err) return err } fileName := filepath.Base(jsonFile) tableName := fileName[:len(fileName)-len(filepath.Ext(fileName))] - logrus.Infof("Import start: %s", fileName) + log.Info().Msgf("Import start: %s", fileName) if err := NRDBC.Put(tableName, &srcData); err != nil { - logrus.Error("Put error importing into nrdbms") + log.Error().Msgf("Put error importing into nrdbms") return err } - logrus.Infof("successfully imported : %s", params.Directory) + log.Info().Msgf("successfully imported : %s", params.Directory) } return nil } @@ -86,29 +86,29 @@ func ExportNRDMFunc(params *models.CommandTask) error { var err error NRDBC, err = GetNRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("NRDBController error importing into nrdbms : %v", err) + log.Error().Msgf("NRDBController error importing into nrdbms : %v", err) return err } tableList, err := NRDBC.ListTables() if err != nil { - logrus.Errorf("ListTables error : %v", err) + log.Error().Msgf("ListTables error : %v", err) return err } var dstData []map[string]interface{} for _, table := range tableList { - logrus.Infof("Export start: %s", table) + log.Info().Msgf("Export start: %s", table) dstData = []map[string]interface{}{} if err := NRDBC.Get(table, &dstData); err != nil { - logrus.Errorf("Get error : %v", err) + log.Error().Msgf("Get error : %v", err) return err } file, err := os.Create(filepath.Join(params.Directory, fmt.Sprintf("%s.json", table))) if err != nil { - logrus.Errorf("File create error : %v", err) + log.Error().Msgf("File create error : %v", err) return err } defer file.Close() @@ -116,12 +116,12 @@ func ExportNRDMFunc(params *models.CommandTask) error { encoder := json.NewEncoder(file) encoder.SetIndent("", " ") if err := encoder.Encode(dstData); err != nil { - logrus.Errorf("data encoding error : %v", err) + log.Error().Msgf("data encoding error : %v", err) return err } - logrus.Infof("successfully exported : %s", file.Name()) + log.Info().Msgf("successfully exported : %s", file.Name()) } - logrus.Infof("successfully exported : %s", params.Directory) + log.Info().Msgf("successfully exported : %s", params.Directory) return nil } @@ -130,25 +130,25 @@ func MigrationNRDMFunc(params *models.CommandTask) error { var srcErr error var dstNRDBC *nrdbc.NRDBController var dstErr error - logrus.Infof("Source Information") + log.Info().Msgf("Source Information") srcNRDBC, srcErr = GetNRDMS(¶ms.SourcePoint) if srcErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", srcErr) + log.Error().Msgf("NRDBController error migration into nrdbms : %v", srcErr) return srcErr } - logrus.Infof("Target Information") + log.Info().Msgf("Target Information") dstNRDBC, dstErr = GetNRDMS(¶ms.TargetPoint) if dstErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", dstErr) + log.Error().Msgf("NRDBController error migration into nrdbms : %v", dstErr) return dstErr } - logrus.Info("Launch NRDBController Copy") + log.Info().Msgf("Launch NRDBController Copy") if err := srcNRDBC.Copy(dstNRDBC); err != nil { - logrus.Errorf("Copy error copying into nrdbms : %v", err) + log.Error().Msgf("Copy error copying into nrdbms : %v", err) return err } - logrus.Info("successfully migrationed") + log.Info().Msgf("successfully migrationed") return nil } @@ -158,15 +158,15 @@ func DeleteNRDMFunc(params *models.CommandTask) error { NRDBC, err = GetNRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("NRDBController error deleting into nrdbms : %v", err) + log.Error().Msgf("NRDBController error deleting into nrdbms : %v", err) return err } - logrus.Info("Launch NRDBController Delete") + log.Info().Msgf("Launch NRDBController Delete") if err := NRDBC.DeleteTables(params.DeleteTableList...); err != nil { - logrus.Errorf("Delete error deleting into nrdbms : %v", err) + log.Error().Msgf("Delete error deleting into nrdbms : %v", err) return err } - logrus.Info("successfully deleted") + log.Info().Msgf("successfully deleted") return nil } diff --git a/internal/auth/os.go b/internal/auth/os.go index 61aa0c2..84727a9 100644 --- a/internal/auth/os.go +++ b/internal/auth/os.go @@ -18,46 +18,46 @@ package auth import ( "github.com/cloud-barista/mc-data-manager/models" "github.com/cloud-barista/mc-data-manager/service/osc" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) func ImportOSFunc(params *models.CommandTask) error { var OSC *osc.OSController var err error - logrus.Infof("User Information") + log.Info().Msgf("User Information") OSC, err = GetOS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("OSController error importing into objectstorage : %v", err) + log.Error().Msgf("OSController error importing into objectstorage : %v", err) return err } - logrus.Info("Launch OSController MPut") + log.Info().Msgf("Launch OSController MPut") if err := OSC.MPut(params.Directory); err != nil { - logrus.Error("MPut error importing into objectstorage") - logrus.Infof("params : %+v", params.TargetPoint) + log.Error().Msgf("MPut error importing into objectstorage") + log.Info().Msgf("params : %+v", params.TargetPoint) return err } - logrus.Infof("successfully imported : %s", params.Directory) + log.Info().Msgf("successfully imported : %s", params.Directory) return nil } func ExportOSFunc(params *models.CommandTask) error { var OSC *osc.OSController var err error - logrus.Infof("User Information") + log.Info().Msgf("User Information") OSC, err = GetOS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("OSController error importing into objectstorage : %v", err) + log.Error().Msgf("OSController error importing into objectstorage : %v", err) return err } - logrus.Info("Launch OSController MGet") + log.Info().Msgf("Launch OSController MGet") if err := OSC.MGet(params.Directory); err != nil { - logrus.Errorf("MGet error exporting into objectstorage : %v", err) + log.Error().Msgf("MGet error exporting into objectstorage : %v", err) return err } - logrus.Infof("successfully exported : %s", params.Directory) + log.Info().Msgf("successfully exported : %s", params.Directory) return nil } @@ -66,44 +66,44 @@ func MigrationOSFunc(params *models.CommandTask) error { var srcErr error var dst *osc.OSController var dstErr error - logrus.Infof("Source Information") + log.Info().Msgf("Source Information") src, srcErr = GetOS(¶ms.TargetPoint) if srcErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", srcErr) + log.Error().Msgf("OSController error migration into objectstorage : %v", srcErr) return srcErr } - logrus.Infof("Target Information") + log.Info().Msgf("Target Information") dst, dstErr = GetOS(¶ms.TargetPoint) if dstErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", dstErr) + log.Error().Msgf("OSController error migration into objectstorage : %v", dstErr) return dstErr } - logrus.Info("Launch OSController Copy") + log.Info().Msgf("Launch OSController Copy") if err := src.Copy(dst); err != nil { - logrus.Errorf("Copy error copying into objectstorage : %v", err) + log.Error().Msgf("Copy error copying into objectstorage : %v", err) return err } - logrus.Info("successfully migrationed") + log.Info().Msgf("successfully migrationed") return nil } func DeleteOSFunc(params *models.CommandTask) error { var OSC *osc.OSController var err error - logrus.Infof("User Information") + log.Info().Msgf("User Information") OSC, err = GetOS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("OSController error importing into objectstorage : %v", err) + log.Error().Msgf("OSController error importing into objectstorage : %v", err) return err } - logrus.Info("Launch OSController Delete") + log.Info().Msgf("Launch OSController Delete") if err := OSC.DeleteBucket(); err != nil { - logrus.Errorf("Delete error deleting into objectstorage : %v", err) + log.Error().Msgf("Delete error deleting into objectstorage : %v", err) return err } - logrus.Info("successfully deleted") + log.Info().Msgf("successfully deleted") return nil } diff --git a/internal/auth/rdb.go b/internal/auth/rdb.go index 0c09b7f..7b59794 100644 --- a/internal/auth/rdb.go +++ b/internal/auth/rdb.go @@ -23,16 +23,16 @@ import ( "github.com/cloud-barista/mc-data-manager/models" "github.com/cloud-barista/mc-data-manager/service/rdbc" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) func ImportRDMFunc(datamoldParams *models.CommandTask) error { var RDBC *rdbc.RDBController var err error - logrus.Infof("User Information") + log.Info().Msgf("User Information") RDBC, err = GetRDMS(&datamoldParams.TargetPoint) if err != nil { - logrus.Errorf("RDBController error importing into rdbms : %v", err) + log.Error().Msgf("RDBController error importing into rdbms : %v", err) return err } @@ -47,74 +47,74 @@ func ImportRDMFunc(datamoldParams *models.CommandTask) error { return nil }) if err != nil { - logrus.Errorf("Walk error : %v", err) + log.Error().Msgf("Walk error : %v", err) return err } for _, sqlPath := range sqlList { data, err := os.ReadFile(sqlPath) if err != nil { - logrus.Errorf("ReadFile error : %v", err) + log.Error().Msgf("ReadFile error : %v", err) return err } - logrus.Infof("Import start: %s", sqlPath) + log.Info().Msgf("Import start: %s", sqlPath) if err := RDBC.Put(string(data)); err != nil { - logrus.Error("Put error importing into rdbms") + log.Error().Msgf("Put error importing into rdbms") return err } - logrus.Infof("Import success: %s", sqlPath) + log.Info().Msgf("Import success: %s", sqlPath) } - logrus.Infof("successfully imported : %s", datamoldParams.Directory) + log.Info().Msgf("successfully imported : %s", datamoldParams.Directory) return nil } func ExportRDMFunc(datamoldParams *models.CommandTask) error { var RDBC *rdbc.RDBController var err error - logrus.Infof("User Information") + log.Info().Msgf("User Information") RDBC, err = GetRDMS(&datamoldParams.TargetPoint) if err != nil { - logrus.Errorf("RDBController error importing into rdbms : %v", err) + log.Error().Msgf("RDBController error importing into rdbms : %v", err) return err } err = os.MkdirAll(datamoldParams.Directory, 0755) if err != nil { - logrus.Errorf("MkdirAll error : %v", err) + log.Error().Msgf("MkdirAll error : %v", err) return err } dbList := []string{} if err := RDBC.ListDB(&dbList); err != nil { - logrus.Errorf("ListDB error : %v", err) + log.Error().Msgf("ListDB error : %v", err) return err } var sqlData string for _, db := range dbList { sqlData = "" - logrus.Infof("Export start: %s", db) + log.Info().Msgf("Export start: %s", db) if err := RDBC.Get(db, &sqlData); err != nil { - logrus.Errorf("Get error : %v", err) + log.Error().Msgf("Get error : %v", err) return err } file, err := os.Create(filepath.Join(datamoldParams.Directory, fmt.Sprintf("%s.sql", db))) if err != nil { - logrus.Errorf("File create error : %v", err) + log.Error().Msgf("File create error : %v", err) return err } defer file.Close() _, err = file.WriteString(sqlData) if err != nil { - logrus.Errorf("File write error : %v", err) + log.Error().Msgf("File write error : %v", err) return err } - logrus.Infof("successfully exported : %s", file.Name()) + log.Info().Msgf("successfully exported : %s", file.Name()) file.Close() } - logrus.Infof("successfully exported : %s", datamoldParams.Directory) + log.Info().Msgf("successfully exported : %s", datamoldParams.Directory) return nil } @@ -123,25 +123,25 @@ func MigrationRDMFunc(datamoldParams *models.CommandTask) error { var srcErr error var dstRDBC *rdbc.RDBController var dstErr error - logrus.Infof("Source Information") + log.Info().Msgf("Source Information") srcRDBC, srcErr = GetRDMS(&datamoldParams.SourcePoint) if srcErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", srcErr) + log.Error().Msgf("RDBController error migration into rdbms : %v", srcErr) return srcErr } - logrus.Infof("Target Information") + log.Info().Msgf("Target Information") dstRDBC, dstErr = GetRDMS(&datamoldParams.TargetPoint) if dstErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", dstErr) + log.Error().Msgf("RDBController error migration into rdbms : %v", dstErr) return dstErr } - logrus.Info("Launch RDBController Copy") + log.Info().Msgf("Launch RDBController Copy") if err := srcRDBC.Copy(dstRDBC); err != nil { - logrus.Errorf("Copy error copying into rdbms : %v", err) + log.Error().Msgf("Copy error copying into rdbms : %v", err) return err } - logrus.Info("successfully migrationed") + log.Info().Msgf("successfully migrationed") return nil } @@ -151,15 +151,15 @@ func DeleteRDMFunc(datamoldParams *models.CommandTask) error { RDBC, err = GetRDMS(&datamoldParams.TargetPoint) if err != nil { - logrus.Errorf("RDBController error deleting into rdbms : %v", err) + log.Error().Msgf("RDBController error deleting into rdbms : %v", err) return err } - logrus.Info("Launch RDBController Delete") + log.Info().Msgf("Launch RDBController Delete") if err := RDBC.DeleteDB(datamoldParams.DeleteDBList...); err != nil { - logrus.Errorf("Delete error deleting into rdbms : %v", err) + log.Error().Msgf("Delete error deleting into rdbms : %v", err) return err } - logrus.Info("successfully deleted") + log.Info().Msgf("successfully deleted") return nil } diff --git a/internal/execfunc/dummycreate.go b/internal/execfunc/dummycreate.go index 1729a63..0379ca8 100644 --- a/internal/execfunc/dummycreate.go +++ b/internal/execfunc/dummycreate.go @@ -20,91 +20,91 @@ import ( "github.com/cloud-barista/mc-data-manager/pkg/dummy/semistructured" "github.com/cloud-barista/mc-data-manager/pkg/dummy/structured" "github.com/cloud-barista/mc-data-manager/pkg/dummy/unstructured" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" "github.com/spf13/cast" ) func DummyCreate(params models.CommandTask) error { - logrus.Info("check directory DummyPaths") + log.Info().Msgf("check directory DummyPaths") if cast.ToInt(params.SizeSQL) != 0 { - logrus.Info("start sql generation") + log.Info().Msgf("start sql generation") if err := structured.GenerateRandomSQL(params.DummyPath, cast.ToInt(params.SizeSQL)); err != nil { - logrus.Error("failed to generate sql") + log.Error().Msgf("failed to generate sql") return err } - logrus.Infof("successfully generated sql : %s", params.DummyPath) + log.Info().Msgf("successfully generated sql : %s", params.DummyPath) } - logrus.Info("start Serversql generation Boolean? :", (cast.ToInt(params.SizeServerSQL) != 0)) + log.Info().Msgf("start Serversql generation Boolean? :", (cast.ToInt(params.SizeServerSQL) != 0)) if cast.ToInt(params.SizeServerSQL) != 0 { - logrus.Info("start Serversql generation") + log.Info().Msgf("start Serversql generation") if err := structured.GenerateRandomSQLWithServer(params.DummyPath, cast.ToInt(params.SizeServerSQL)); err != nil { - logrus.Error("failed to generate sql") + log.Error().Msgf("failed to generate sql") return err } - logrus.Infof("successfully generated sql : %s", params.DummyPath) + log.Info().Msgf("successfully generated sql : %s", params.DummyPath) } if cast.ToInt(params.SizeCSV) != 0 { - logrus.Info("start csv generation") + log.Info().Msgf("start csv generation") if err := structured.GenerateRandomCSV(params.DummyPath, cast.ToInt(params.SizeCSV)); err != nil { - logrus.Error("failed to generate csv") + log.Error().Msgf("failed to generate csv") return err } - logrus.Infof("successfully generated csv : %s", params.DummyPath) + log.Info().Msgf("successfully generated csv : %s", params.DummyPath) } if cast.ToInt(params.SizeJSON) != 0 { - logrus.Info("start json generation") + log.Info().Msgf("start json generation") if err := semistructured.GenerateRandomJSON(params.DummyPath, cast.ToInt(params.SizeJSON)); err != nil { - logrus.Error("failed to generate json") + log.Error().Msgf("failed to generate json") return err } - logrus.Infof("successfully generated json : %s", params.DummyPath) + log.Info().Msgf("successfully generated json : %s", params.DummyPath) } if cast.ToInt(params.SizeXML) != 0 { - logrus.Info("start xml generation") + log.Info().Msgf("start xml generation") if err := semistructured.GenerateRandomXML(params.DummyPath, cast.ToInt(params.SizeXML)); err != nil { - logrus.Error("failed to generate xml") + log.Error().Msgf("failed to generate xml") return err } - logrus.Infof("successfully generated xml : %s", params.DummyPath) + log.Info().Msgf("successfully generated xml : %s", params.DummyPath) } if cast.ToInt(params.SizeTXT) != 0 { - logrus.Info("start txt generation") + log.Info().Msgf("start txt generation") if err := unstructured.GenerateRandomTXT(params.DummyPath, cast.ToInt(params.SizeTXT)); err != nil { - logrus.Error("failed to generate txt") + log.Error().Msgf("failed to generate txt") return err } - logrus.Infof("successfully generated txt : %s", params.DummyPath) + log.Info().Msgf("successfully generated txt : %s", params.DummyPath) } if cast.ToInt(params.SizePNG) != 0 { - logrus.Info("start png generation") + log.Info().Msgf("start png generation") if err := unstructured.GenerateRandomPNGImage(params.DummyPath, cast.ToInt(params.SizePNG)); err != nil { - logrus.Error("failed to generate png") + log.Error().Msgf("failed to generate png") return err } - logrus.Infof("successfully generated png : %s", params.DummyPath) + log.Info().Msgf("successfully generated png : %s", params.DummyPath) } if cast.ToInt(params.SizeGIF) != 0 { - logrus.Info("start gif generation") + log.Info().Msgf("start gif generation") if err := unstructured.GenerateRandomGIF(params.DummyPath, cast.ToInt(params.SizeGIF)); err != nil { - logrus.Error("failed to generate gif") + log.Error().Msgf("failed to generate gif") return err } - logrus.Infof("successfully generated gif : %s", params.DummyPath) + log.Info().Msgf("successfully generated gif : %s", params.DummyPath) } if cast.ToInt(params.SizeZIP) != 0 { - logrus.Info("start zip generation") + log.Info().Msgf("start zip generation") if err := unstructured.GenerateRandomZIP(params.DummyPath, cast.ToInt(params.SizeZIP)); err != nil { - logrus.Error("failed to generate zip") + log.Error().Msgf("failed to generate zip") return err } - logrus.Infof("successfully generated zip : %s", params.DummyPath) + log.Info().Msgf("successfully generated zip : %s", params.DummyPath) } return nil } diff --git a/internal/log/log.go b/internal/log/log.go index ba4d513..77b645a 100644 --- a/internal/log/log.go +++ b/internal/log/log.go @@ -1,115 +1,115 @@ -/* -Copyright 2023 The Cloud-Barista Authors. +// /* +// Copyright 2023 The Cloud-Barista Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ package log -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/sirupsen/logrus" -) - -var ( - instance *Logger - once sync.Once -) - -type Logger struct { - *logrus.Logger -} - -// GetInstance returns the singleton instance of Logger -func GetInstance() *Logger { - once.Do(func() { - instance = &Logger{ - Logger: logrus.New(), - } - instance.setupLogger() - }) - return instance -} - -func (l *Logger) setupLogger() { - execPath, err := os.Executable() - if err != nil { - l.Fatal("Failed to get executable path: ", err) - } - - // Get the directory path of the binary file - execDir := filepath.Dir(execPath) - - // Set the log directory path - logDir := filepath.Join(execDir, "log") - - // Create the log directory - if err := os.MkdirAll(logDir, os.ModePerm); err != nil { - logrus.WithError(err).Fatal("Failed to create log directory") - } - - // Set the log file path - logFilePath := filepath.Join(logDir, "data-manager.log") - - // Open or create the log file - logFile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - logrus.WithError(err).Fatal("Failed to create log file") - } - logrus.SetLevel(logrus.DebugLevel) - logrus.SetFormatter(&CustomTextFormatter{}) - logrus.SetOutput(io.MultiWriter(os.Stdout, logFile)) -} - -type CustomTextFormatter struct { - CmdName string - JobName string -} - -func (f *CustomTextFormatter) Format(entry *logrus.Entry) ([]byte, error) { - timeFormatted := entry.Time.Format("2006-01-02T15:04:05-07:00") - cn := f.CmdName - jn := f.JobName - if _, ok := entry.Data["cmdbName"]; ok { - cn = entry.Data["cmdbName"].(string) - } - if _, ok := entry.Data["jobName"]; ok { - jn = entry.Data["jobName"].(string) - } - return []byte(fmt.Sprintf("[%s] [%s] [%s] [%s] %s\n", timeFormatted, entry.Level, cn, jn, strings.ToUpper(entry.Message[:1])+entry.Message[1:])), nil -} - -func Debug(args ...interface{}) { - GetInstance().Debug(args...) -} - -func Info(args ...interface{}) { - GetInstance().Info(args...) -} - -func Warn(args ...interface{}) { - GetInstance().Warn(args...) -} - -func Error(args ...interface{}) { - GetInstance().Error(args...) -} - -func Fatal(args ...interface{}) { - GetInstance().Fatal(args...) -} +// import ( +// "fmt" +// "io" +// "os" +// "path/filepath" +// "strings" +// "sync" + +// "github.com/sirupsen/logrus" +// ) + +// var ( +// instance *Logger +// once sync.Once +// ) + +// type Logger struct { +// *zerolog.Logger +// } + +// // GetInstance returns the singleton instance of Logger +// func GetInstance() *Logger { +// once.Do(func() { +// instance = &Logger{ +// Logger: logrus.New(), +// } +// instance.setupLogger() +// }) +// return instance +// } + +// func (l *Logger) setupLogger() { +// execPath, err := os.Executable() +// if err != nil { +// l.Fatal("Failed to get executable path: ", err) +// } + +// // Get the directory path of the binary file +// execDir := filepath.Dir(execPath) + +// // Set the log directory path +// logDir := filepath.Join(execDir, "log") + +// // Create the log directory +// if err := os.MkdirAll(logDir, os.ModePerm); err != nil { +// logrus.WithError(err).Fatal("Failed to create log directory") +// } + +// // Set the log file path +// logFilePath := filepath.Join(logDir, "data-manager.log") + +// // Open or create the log file +// logFile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) +// if err != nil { +// logrus.WithError(err).Fatal("Failed to create log file") +// } +// logrus.SetLevel(logrus.DebugLevel) +// logrus.SetFormatter(&CustomTextFormatter{}) +// logrus.SetOutput(io.MultiWriter(os.Stdout, logFile)) +// } + +// type CustomTextFormatter struct { +// CmdName string +// JobName string +// } + +// func (f *CustomTextFormatter) Format(entry *logrus.Entry) ([]byte, error) { +// timeFormatted := entry.Time.Format("2006-01-02T15:04:05-07:00") +// cn := f.CmdName +// jn := f.JobName +// if _, ok := entry.Data["cmdbName"]; ok { +// cn = entry.Data["cmdbName"].(string) +// } +// if _, ok := entry.Data["jobName"]; ok { +// jn = entry.Data["jobName"].(string) +// } +// return []byte(fmt.Sprintf("[%s] [%s] [%s] [%s] %s\n", timeFormatted, entry.Level, cn, jn, strings.ToUpper(entry.Message[:1])+entry.Message[1:])), nil +// } + +// func Debug(args ...interface{}) { +// GetInstance().Debug(args...) +// } + +// func Info(args ...interface{}) { +// GetInstance().Info(args...) +// } + +// func Warn(args ...interface{}) { +// GetInstance().Warn(args...) +// } + +// func Error(args ...interface{}) { +// GetInstance().Error(args...) +// } + +// func Fatal(args ...interface{}) { +// GetInstance().Fatal(args...) +// } diff --git a/internal/zerolog/logger.go b/internal/zerolog/logger.go index fb02f13..26337ea 100644 --- a/internal/zerolog/logger.go +++ b/internal/zerolog/logger.go @@ -1,171 +1,187 @@ -/* -Copyright 2023 The Cloud-Barista Authors. +// /* +// Copyright 2023 The Cloud-Barista Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ package zlog -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" -) - -var ( - instance *Logger - once sync.Once -) - -type Logger struct { - zerolog.Logger -} - -type LogEntry struct { - logger *Logger - level zerolog.Level - cmdName string - jobName string - message string -} - -// GetInstance returns the singleton instance of Logger -func GetInstance() *Logger { - once.Do(func() { - instance = &Logger{ - Logger: log.Output(zerolog.ConsoleWriter{Out: os.Stderr}), - } - instance.setupLogger() - }) - return instance -} - -func (l *Logger) setupLogger() { - execPath, err := os.Executable() - if err != nil { - log.Fatal().Msgf("Failed to get executable path: %v", err) - } - - // Get the directory path of the binary file - execDir := filepath.Dir(execPath) - - // Set the log directory path - logDir := filepath.Join(execDir, "./data/var/log") - - // Create the log directory - if err := os.MkdirAll(logDir, os.ModePerm); err != nil { - log.Fatal().Msgf("Failed to create log directory: %v", err) - } - - // Set the log file path - logFilePath := filepath.Join(logDir, "data-manager.log") - - // Open or create the log file - logFile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - log.Fatal().Msgf("Failed to create log file: %v", err) - } - - // Set zerolog level and output - l.Logger = l.Output(io.MultiWriter(os.Stdout, logFile)).With().Timestamp().Logger() - zerolog.SetGlobalLevel(zerolog.DebugLevel) -} - -func (l *Logger) NewLogEntry() *LogEntry { - return &LogEntry{ - logger: l, - } -} - -func (e *LogEntry) WithLevel(level zerolog.Level) *LogEntry { - e.level = level - return e -} - -func (e *LogEntry) WithCmdName(cmdName string) *LogEntry { - e.cmdName = cmdName - return e -} - -func (e *LogEntry) WithJobName(jobName string) *LogEntry { - e.jobName = jobName - return e -} - -func (e *LogEntry) WithMessage(message string) *LogEntry { - e.message = message - return e -} - -func (e *LogEntry) logWithCustomFormat() { - timeFormatted := time.Now().Format("2006-01-02T15:04:05-07:00") - logEvent := e.logger.With(). - Str("time", timeFormatted). - Str("level", e.level.String()). - Str("cmdName", e.cmdName). - Str("jobName", e.jobName). - Logger() - - logEvent.Log().Msg(strings.ToUpper(e.message[:1]) + e.message[1:]) -} - -func Debug(cmdName, jobName string, args ...interface{}) { - GetInstance().NewLogEntry(). - WithLevel(zerolog.DebugLevel). - WithCmdName(cmdName). - WithJobName(jobName). - WithMessage(fmt.Sprint(args...)). - logWithCustomFormat() -} - -func Info(cmdName, jobName string, args ...interface{}) { - GetInstance().NewLogEntry(). - WithLevel(zerolog.InfoLevel). - WithCmdName(cmdName). - WithJobName(jobName). - WithMessage(fmt.Sprint(args...)). - logWithCustomFormat() -} - -func Warn(cmdName, jobName string, args ...interface{}) { - GetInstance().NewLogEntry(). - WithLevel(zerolog.WarnLevel). - WithCmdName(cmdName). - WithJobName(jobName). - WithMessage(fmt.Sprint(args...)). - logWithCustomFormat() -} - -func Error(cmdName, jobName string, args ...interface{}) { - GetInstance().NewLogEntry(). - WithLevel(zerolog.ErrorLevel). - WithCmdName(cmdName). - WithJobName(jobName). - WithMessage(fmt.Sprint(args...)). - logWithCustomFormat() -} - -func Fatal(cmdName, jobName string, args ...interface{}) { - GetInstance().NewLogEntry(). - WithLevel(zerolog.FatalLevel). - WithCmdName(cmdName). - WithJobName(jobName). - WithMessage(fmt.Sprint(args...)). - logWithCustomFormat() -} +// import ( +// "fmt" +// "os" +// "path/filepath" +// "strings" +// "sync" +// "time" + +// "github.com/rs/zerolog" +// "github.com/rs/zerolog/log" +// "gopkg.in/natefinch/lumberjack.v2" +// ) + +// var ( +// instance *Logger +// once sync.Once +// ) + +// type Logger struct { +// zerolog.Logger +// } + +// type LogEntry struct { +// logger *Logger +// level zerolog.Level +// cmdName string // ServiceType +// jobName string // TaskType +// message string +// } + +// // GetInstance returns the singleton instance of Logger +// func GetInstance() *Logger { +// once.Do(func() { +// instance = &Logger{} +// instance.setupLogger() +// }) +// return instance +// } + +// // setupLogger configures the Logger instance with lumberjack for log rotation and zerolog.MultiWriter +// func (l *Logger) setupLogger() { +// execPath, err := os.Executable() +// if err != nil { +// log.Fatal().Msgf("Failed to get executable path: %v", err) +// } + +// // Get the directory path of the binary file +// execDir := filepath.Dir(execPath) + +// // Set the log directory path +// logDir := filepath.Join(execDir, "./data/var/log") + +// // Create the log directory if it doesn't exist +// if err := os.MkdirAll(logDir, os.ModePerm); err != nil { +// log.Fatal().Msgf("Failed to create log directory: %v", err) +// } + +// // Set the log file path +// logFilePath := filepath.Join(logDir, "data-manager.log") + +// // Configure lumberjack for log rotation +// rotationLogger := &lumberjack.Logger{ +// Filename: logFilePath, +// MaxSize: 100, // Maximum size in megabytes before log is rotated +// MaxBackups: 3, // Maximum number of old log files to retain +// MaxAge: 28, // Maximum number of days to retain old log files +// Compress: true, // Whether to compress/zip old log files +// } + +// // Use Zerolog's MultiWriter to write to both stdout and the rotated log file +// multiWriter := zerolog.MultiLevelWriter(os.Stdout, rotationLogger) + +// // Set zerolog level and output +// l.Logger = zerolog.New(multiWriter).With().Timestamp().Logger() +// zerolog.SetGlobalLevel(zerolog.DebugLevel) +// } + +// // NewLogEntry creates a new log entry +// func (l *Logger) NewLogEntry() *LogEntry { +// return &LogEntry{ +// logger: l, +// } +// } + +// func (e *LogEntry) WithLevel(level zerolog.Level) *LogEntry { +// e.level = level +// return e +// } + +// func (e *LogEntry) WithCmdName(cmdName string) *LogEntry { +// e.cmdName = cmdName +// return e +// } + +// func (e *LogEntry) WithJobName(jobName string) *LogEntry { +// e.jobName = jobName +// return e +// } + +// func (e *LogEntry) WithMessage(message string) *LogEntry { +// e.message = message +// return e +// } + +// func (e *LogEntry) logWithCustomFormat() { +// timeFormatted := time.Now().Format(time.RFC3339) +// logEvent := e.logger.With(). +// Str("time", timeFormatted). +// Str("level", e.level.String()). +// Str("cmdName", e.cmdName). +// Str("jobName", e.jobName). +// Logger() + +// logEvent.Log().Msg(strings.ToUpper(e.message[:1]) + e.message[1:]) +// } + +// // Convenience methods for logging at different levels +// func Debug(cmdName, jobName string, args ...interface{}) { +// GetInstance().NewLogEntry(). +// WithLevel(zerolog.DebugLevel). +// WithCmdName(cmdName). +// WithJobName(jobName). +// WithMessage(fmt.Sprint(args...)). +// logWithCustomFormat() +// } + +// func Info(cmdName, jobName string, args ...interface{}) { +// GetInstance().NewLogEntry(). +// WithLevel(zerolog.InfoLevel). +// WithCmdName(cmdName). +// WithJobName(jobName). +// WithMessage(fmt.Sprint(args...)). +// logWithCustomFormat() +// } + +// func Warn(cmdName, jobName string, args ...interface{}) { +// GetInstance().NewLogEntry(). +// WithLevel(zerolog.WarnLevel). +// WithCmdName(cmdName). +// WithJobName(jobName). +// WithMessage(fmt.Sprint(args...)). +// logWithCustomFormat() +// } + +// func Error(cmdName, jobName string, args ...interface{}) { +// GetInstance().NewLogEntry(). +// WithLevel(zerolog.ErrorLevel). +// WithCmdName(cmdName). +// WithJobName(jobName). +// WithMessage(fmt.Sprint(args...)). +// logWithCustomFormat() +// } + +// func Fatal(cmdName, jobName string, args ...interface{}) { +// GetInstance().NewLogEntry(). +// WithLevel(zerolog.FatalLevel). +// WithCmdName(cmdName). +// WithJobName(jobName). +// WithMessage(fmt.Sprint(args...)). +// logWithCustomFormat() +// } + +// func Trace(cmdName, jobName string, args ...interface{}) { +// GetInstance().NewLogEntry(). +// WithLevel(zerolog.TraceLevel). +// WithCmdName(cmdName). +// WithJobName(jobName). +// WithMessage(fmt.Sprint(args...)). +// logWithCustomFormat() +// } diff --git a/main.go b/main.go index 86db9cb..7de4bc3 100644 --- a/main.go +++ b/main.go @@ -17,8 +17,18 @@ package main import ( "github.com/cloud-barista/mc-data-manager/cmd" + "github.com/cloud-barista/mc-data-manager/config" + "github.com/cloud-barista/mc-data-manager/pkg/logger" + "github.com/rs/zerolog/log" ) func main() { cmd.Execute() } + +func init() { + config.Init() + loggerConfig := logger.Config{LogConfig: config.Settings.Logger} + logger := logger.NewLogger(loggerConfig) + log.Logger = *logger +} diff --git a/pkg/csp/aws/awsc_test.go b/pkg/csp/aws/awsc_test.go index aa92577..7cc2ed8 100644 --- a/pkg/csp/aws/awsc_test.go +++ b/pkg/csp/aws/awsc_test.go @@ -2,13 +2,13 @@ package aws import ( "fmt" + "log" "path/filepath" "runtime" "testing" cfg "github.com/cloud-barista/mc-data-manager/config" "github.com/cloud-barista/mc-data-manager/models" - "github.com/sirupsen/logrus" ) func TestMain(m *testing.M) { @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { _, filename, _, ok := runtime.Caller(0) if !ok { - logrus.Fatalf("Failed to get current file path") + log.Fatal().MsgF("Failed to get current file path") } rootDir := filepath.Join(filepath.Dir(filename), "../../..") diff --git a/pkg/dummy/semistructured/json.go b/pkg/dummy/semistructured/json.go index 65a39b1..aafd1b7 100644 --- a/pkg/dummy/semistructured/json.go +++ b/pkg/dummy/semistructured/json.go @@ -9,7 +9,7 @@ import ( "github.com/brianvoe/gofakeit/v6" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) // Structures to be used to generate json dummy data @@ -91,7 +91,7 @@ type personInfo struct { func GenerateRandomJSON(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "json") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -121,7 +121,7 @@ func GenerateRandomJSON(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("return error : %v", ret) + log.Error().Msgf("return error : %v", ret) return ret } } @@ -136,7 +136,7 @@ func GenerateRandomJSON(dummyDir string, capacitySize int) error { func GenerateRandomJSONWithServer(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "json") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -166,7 +166,7 @@ func GenerateRandomJSONWithServer(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("return error : %v", ret) + log.Error().Msgf("return error : %v", ret) return ret } } @@ -219,7 +219,7 @@ func generateJSONBook(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -249,7 +249,7 @@ func generateJSONCar(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -279,7 +279,7 @@ func generateJSONAddress(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -309,7 +309,7 @@ func generateJSONCreditCard(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -339,7 +339,7 @@ func generateJSONJob(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -369,7 +369,7 @@ func generateJSONMovie(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -399,7 +399,7 @@ func generateJSONPerson(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } diff --git a/pkg/dummy/semistructured/xml.go b/pkg/dummy/semistructured/xml.go index ec4dd58..1d17fba 100644 --- a/pkg/dummy/semistructured/xml.go +++ b/pkg/dummy/semistructured/xml.go @@ -9,7 +9,7 @@ import ( "github.com/brianvoe/gofakeit/v6" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) // xml generation function using gofakeit @@ -19,7 +19,7 @@ import ( func GenerateRandomXML(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "xml") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -48,7 +48,7 @@ func GenerateRandomXML(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("return error : %v", ret) + log.Error().Msgf("return error : %v", ret) return ret } } @@ -107,7 +107,7 @@ func generateXMLBook(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -142,7 +142,7 @@ func generateXMLCar(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -177,7 +177,7 @@ func generateXMLAddress(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -212,7 +212,7 @@ func generateXMLCreditCard(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -247,7 +247,7 @@ func generateXMLJob(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -282,7 +282,7 @@ func generateXMLMovie(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } @@ -317,7 +317,7 @@ func generateXMLPerson(cnt int, dirPath string, count int) error { _, err = file.Write(data) if err == nil { - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) } return err } diff --git a/pkg/dummy/structured/csv.go b/pkg/dummy/structured/csv.go index 05fea63..cac13cb 100644 --- a/pkg/dummy/structured/csv.go +++ b/pkg/dummy/structured/csv.go @@ -25,7 +25,7 @@ import ( "github.com/brianvoe/gofakeit/v6" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) // CSV generation function using gofakeit @@ -35,7 +35,7 @@ import ( func GenerateRandomCSV(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "csv") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -63,7 +63,7 @@ func GenerateRandomCSV(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("return error : %v", ret) + log.Error().Msgf("return error : %v", ret) return ret } } @@ -128,7 +128,7 @@ func generateCSVBook(cnt int, dirPath string, count int) error { } } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) csvWriter.Flush() return csvWriter.Error() @@ -170,7 +170,7 @@ func generateCSVCar(cnt int, dirPath string, count int) error { return err } } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) csvWriter.Flush() return csvWriter.Error() } @@ -211,7 +211,7 @@ func generateCSVAddress(cnt int, dirPath string, count int) error { return err } } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) csvWriter.Flush() return csvWriter.Error() @@ -253,7 +253,7 @@ func generateCSVCreditCard(cnt int, dirPath string, count int) error { return err } } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) csvWriter.Flush() return csvWriter.Error() @@ -295,7 +295,7 @@ func generateCSVJob(cnt int, dirPath string, count int) error { return err } } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) csvWriter.Flush() return csvWriter.Error() @@ -337,7 +337,7 @@ func generateCSVMovie(cnt int, dirPath string, count int) error { return err } } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) csvWriter.Flush() return csvWriter.Error() @@ -379,7 +379,7 @@ func generateCSVPerson(cnt int, dirPath string, count int) error { return err } } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) csvWriter.Flush() return csvWriter.Error() diff --git a/pkg/dummy/structured/sql.go b/pkg/dummy/structured/sql.go index 7d679cd..6b8a700 100644 --- a/pkg/dummy/structured/sql.go +++ b/pkg/dummy/structured/sql.go @@ -27,7 +27,7 @@ import ( "github.com/brianvoe/gofakeit/v6" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) type sqlData struct { @@ -128,7 +128,7 @@ INSERT INTO BorrowedBooks (MemberID, BookID, BorrowedDate, DueDate, ReturnedDate func GenerateRandomSQL(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "sql") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -158,7 +158,7 @@ func GenerateRandomSQL(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("result error : %v", ret) + log.Error().Msgf("result error : %v", ret) return ret } } @@ -169,7 +169,7 @@ func GenerateRandomSQL(dummyDir string, capacitySize int) error { func GenerateRandomSQLWithServer(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "sql") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -199,7 +199,7 @@ func GenerateRandomSQLWithServer(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("result error : %v", ret) + log.Error().Msgf("result error : %v", ret) return ret } } @@ -257,7 +257,7 @@ func randomSQLWorker(countNum chan int, dirPath string, resultChan chan<- error) continue } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) file.Close() resultChan <- nil diff --git a/pkg/dummy/unstructured/gif.go b/pkg/dummy/unstructured/gif.go index cff1e82..d6ceba7 100644 --- a/pkg/dummy/unstructured/gif.go +++ b/pkg/dummy/unstructured/gif.go @@ -29,7 +29,7 @@ import ( "time" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) // GIF generation function using gofakeit @@ -39,23 +39,23 @@ import ( func GenerateRandomGIF(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "gif") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } tempPath := filepath.Join(dummyDir, "tmpImg") if err := os.MkdirAll(tempPath, 0755); err != nil { - logrus.Errorf("MkdirAll function error : %v", err) + log.Error().Msgf("MkdirAll function error : %v", err) return err } defer os.RemoveAll(tempPath) - logrus.Info("start png generation") + log.Info().Msgf("start png generation") if err := GenerateRandomPNGImage(tempPath, 1); err != nil { - logrus.Error("failed to generate png") + log.Error().Msgf("failed to generate png") return err } - logrus.Info("successfully generated png") + log.Info().Msgf("successfully generated png") var files []string size := capacitySize * 34 * 10 @@ -73,7 +73,7 @@ func GenerateRandomGIF(dummyDir string, capacitySize int) error { }) if err != nil { - logrus.Errorf("Walk function error : %v", err) + log.Error().Msgf("Walk function error : %v", err) return err } @@ -81,14 +81,14 @@ func GenerateRandomGIF(dummyDir string, capacitySize int) error { for _, imgName := range files { imgFile, err := os.Open(imgName) if err != nil { - logrus.Errorf("file open error : %v", err) + log.Error().Msgf("file open error : %v", err) return err } defer imgFile.Close() img, err := png.Decode(imgFile) if err != nil { - logrus.Errorf("file decoding error : %v", err) + log.Error().Msgf("file decoding error : %v", err) return err } imgList = append(imgList, img) @@ -118,7 +118,7 @@ func GenerateRandomGIF(dummyDir string, capacitySize int) error { for err := range resultChan { if err != nil { - logrus.Errorf("result error : %v", err) + log.Error().Msgf("result error : %v", err) return err } } @@ -158,7 +158,7 @@ func randomGIFWorker(imgList []image.Image, countNum chan int, tmpDir string, re err = gif.EncodeAll(gifFile, gifImage) if err == nil { - logrus.Infof("Creation success: %v", gifFile.Name()) + log.Info().Msgf("Creation success: %v", gifFile.Name()) } if cerr := gifFile.Close(); cerr != nil { diff --git a/pkg/dummy/unstructured/img.go b/pkg/dummy/unstructured/img.go index e6741d9..7e5be81 100644 --- a/pkg/dummy/unstructured/img.go +++ b/pkg/dummy/unstructured/img.go @@ -23,7 +23,7 @@ import ( "github.com/brianvoe/gofakeit/v6" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) type ImageType string @@ -35,7 +35,7 @@ type ImageType string func GenerateRandomPNGImage(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "png") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -65,7 +65,7 @@ func GenerateRandomPNGImage(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("result error : %v", ret) + log.Error().Msgf("result error : %v", ret) return ret } } @@ -85,7 +85,7 @@ func randomImageWorker(countNum chan int, dirPath string, resultChan chan<- erro if _, err := file.Write(gofakeit.ImagePng(500, 500)); err != nil { resultChan <- err } - logrus.Infof("Creation success: %v", file.Name()) + log.Info().Msgf("Creation success: %v", file.Name()) file.Close() } diff --git a/pkg/dummy/unstructured/txt.go b/pkg/dummy/unstructured/txt.go index be3eb8d..8c5916a 100644 --- a/pkg/dummy/unstructured/txt.go +++ b/pkg/dummy/unstructured/txt.go @@ -23,7 +23,7 @@ import ( "github.com/brianvoe/gofakeit/v6" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) // TXT generation function using gofakeit @@ -33,7 +33,7 @@ import ( func GenerateRandomTXT(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "txt") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } @@ -61,7 +61,7 @@ func GenerateRandomTXT(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("result error : %v", ret) + log.Error().Msgf("result error : %v", ret) return ret } } @@ -83,7 +83,7 @@ func randomTxtWorker(countNum chan int, dirPath string, resultChan chan<- error) } } - logrus.Infof("successfully generated : %s", file.Name()) + log.Info().Msgf("successfully generated : %s", file.Name()) if err := file.Close(); err != nil { resultChan <- err diff --git a/pkg/dummy/unstructured/zip.go b/pkg/dummy/unstructured/zip.go index ac43f37..0ac3040 100644 --- a/pkg/dummy/unstructured/zip.go +++ b/pkg/dummy/unstructured/zip.go @@ -24,7 +24,7 @@ import ( "sync" "github.com/cloud-barista/mc-data-manager/pkg/utils" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) // ZIP generation function using gofakeit @@ -34,23 +34,23 @@ import ( func GenerateRandomZIP(dummyDir string, capacitySize int) error { dummyDir = filepath.Join(dummyDir, "zip") if err := utils.IsDir(dummyDir); err != nil { - logrus.Errorf("IsDir function error : %v", err) + log.Error().Msgf("IsDir function error : %v", err) return err } tempPath := filepath.Join(dummyDir, "tmpTxt") if err := os.MkdirAll(tempPath, 0755); err != nil { - logrus.Errorf("MkdirAll function error : %v", err) + log.Error().Msgf("MkdirAll function error : %v", err) return err } defer os.RemoveAll(tempPath) - logrus.Info("start txt generation") + log.Info().Msgf("start txt generation") if err := GenerateRandomTXT(tempPath, 1); err != nil { - logrus.Error("failed to generate txt") + log.Error().Msgf("failed to generate txt") return err } - logrus.Info("successfully generated txt") + log.Info().Msgf("successfully generated txt") countNum := make(chan int, capacitySize) resultChan := make(chan error, capacitySize) @@ -76,7 +76,7 @@ func GenerateRandomZIP(dummyDir string, capacitySize int) error { for ret := range resultChan { if ret != nil { - logrus.Errorf("result error : %v", ret) + log.Error().Msgf("result error : %v", ret) return ret } } @@ -99,7 +99,7 @@ func randomZIPWorker(countNum chan int, dummyDir, tempPath string, resultChan ch if err := gzip(tempPath, zipWriter); err != nil { resultChan <- err } - logrus.Infof("successfully generated : %s", w.Name()) + log.Info().Msgf("successfully generated : %s", w.Name()) zipWriter.Close() w.Close() resultChan <- nil diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go new file mode 100644 index 0000000..934701b --- /dev/null +++ b/pkg/logger/logger.go @@ -0,0 +1,189 @@ +package logger + +import ( + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + + "github.com/cloud-barista/mc-data-manager/config" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "gopkg.in/natefinch/lumberjack.v2" +) + +// Define context keys +type contextKey string + +const ( + TraceIdKey contextKey = "traceId" + SpanIdKey contextKey = "spanId" +) + +// Define TracingHook struct +type TracingHook struct{} + +// Run method: Executed when a log event occurs +func (h TracingHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + ctx := e.GetCtx() + traceID := ctx.Value(TraceIdKey) + spanID := ctx.Value(SpanIdKey) + + if traceID != nil { + e.Str(string(TraceIdKey), traceID.(string)) + } + if spanID != nil { + e.Str(string(SpanIdKey), spanID.(string)) + } +} + +var ( + sharedLogFile *lumberjack.Logger + once sync.Once +) + +type Config struct { + config.LogConfig +} + +func init() { + + // For consistent log format across different running environments (e.g., local, Docker, Kubernetes) + // Set the caller field to the relative path from the project root + _, b, _, _ := runtime.Caller(0) + projectRoot := filepath.Join(filepath.Dir(b), "../../") // predict the project root directory from the current file having init() function + + zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string { + // relative path from the project root + relPath, err := filepath.Rel(projectRoot, file) + if err != nil { + return filepath.Base(file) + ":" + strconv.Itoa(line) // return the original file path with line number if the relative path cannot be resolved + } + return relPath + ":" + strconv.Itoa(line) + } +} + +// NewLogger initializes a new logger with default values if not provided +func NewLogger(config Config) *zerolog.Logger { + // Apply default values if not provided + if config.LogLevel == "" { + config.LogLevel = "debug" + } + if config.LogWriter == "" { + config.LogWriter = "-" + } + if config.File.Path == "" { + config.File.Path = "./data/var/log/app.log" + } + if config.File.MaxSize == 0 { + config.File.MaxSize = 100 // in MB + } + if config.File.MaxBackups == 0 { + config.File.MaxBackups = 3 + } + if config.File.MaxAge == 0 { + config.File.MaxAge = 30 // in days + } + + // Initialize shared log file for log rotation once + once.Do(func() { + sharedLogFile = &lumberjack.Logger{ + Filename: config.File.Path, + MaxSize: config.File.MaxSize, + MaxBackups: config.File.MaxBackups, + MaxAge: config.File.MaxAge, + Compress: config.File.Compress, + } + + // Ensure the log file exists before changing its permissions + if _, err := os.Stat(config.File.Path); os.IsNotExist(err) { + // Create the log file if it does not exist + file, err := os.Create(config.File.Path) + if err != nil { + log.Fatal().Msgf("Failed to create log file: %v", err) + } + file.Close() + } + + // Change file permissions to -rw-r--r-- + if err := os.Chmod(config.File.Path, 0644); err != nil { + log.Fatal().Msgf("Failed to change file permissions: %v", err) + } + }) + + level := getLogLevel(config.LogLevel) + logger := configureWriter(config.LogWriter, level) + + // Add tracing hook to the logger + logger.Hook(TracingHook{}) + + // Log a message to confirm logger setup + logger.Info(). + Str("logLevel", level.String()). + Msg("New logger created") + + return logger +} + +// getLogLevel returns the zerolog.Level based on the string level +func getLogLevel(logLevel string) zerolog.Level { + switch logLevel { + case "trace": + return zerolog.TraceLevel + case "debug": + return zerolog.DebugLevel + case "info": + return zerolog.InfoLevel + case "warn": + return zerolog.WarnLevel + case "error": + return zerolog.ErrorLevel + case "fatal": + return zerolog.FatalLevel + case "panic": + return zerolog.PanicLevel + default: + log.Warn().Msgf("Invalid log level: %s. Using default value: info", logLevel) + return zerolog.InfoLevel + } +} + +// configureWriter sets up the logger based on the writer type +func configureWriter(logWriter string, level zerolog.Level) *zerolog.Logger { + var logger zerolog.Logger + multi := zerolog.MultiLevelWriter(sharedLogFile, zerolog.ConsoleWriter{Out: os.Stdout}) + + switch logWriter { + case "both": + logger = zerolog.New(multi).Level(level).With().Timestamp().Caller().Logger() + case "file": + logger = zerolog.New(sharedLogFile).Level(level).With().Timestamp().Caller().Logger() + case "stdout": + logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout}).Level(level).With().Timestamp().Caller().Logger() + default: + log.Warn().Msgf("Invalid log writer: %s. Using default value: both", logWriter) + logger = zerolog.New(multi).Level(level).With().Timestamp().Caller().Logger() + } + + logSetupInfo(logger, logWriter) + return &logger +} + +// logSetupInfo logs the logger setup details +func logSetupInfo(logger zerolog.Logger, logWriter string) { + if logWriter == "file" { + logger.Info(). + Str("logFilePath", sharedLogFile.Filename). + Msg("Single-write setup (logs to file only)") + } else if logWriter == "stdout" { + logger.Info(). + Str("ConsoleWriter", "os.Stdout"). + Msg("Single-write setup (logs to console only)") + } else { + logger.Info(). + Str("logFilePath", sharedLogFile.Filename). + Str("ConsoleWriter", "os.Stdout"). + Msg("Multi-writes setup (logs to both file and console)") + } +} diff --git a/pkg/rdbms/mysql/mysql.go b/pkg/rdbms/mysql/mysql.go index cfc97ae..74a7743 100644 --- a/pkg/rdbms/mysql/mysql.go +++ b/pkg/rdbms/mysql/mysql.go @@ -22,9 +22,9 @@ import ( "regexp" "strings" - "github.com/cloud-barista/mc-data-manager/internal/log" "github.com/cloud-barista/mc-data-manager/models" _ "github.com/go-sql-driver/mysql" + "github.com/rs/zerolog/log" ) // mysqlDBMS struct @@ -72,8 +72,9 @@ func (d *MysqlDBMS) Exec(query string) error { _, err := d.db.Exec(query) if err != nil { - log.Error(query) + log.Error().Err(err).Str("query", query).Msg("Failed to execute SQL query") } + log.Info().Str("query", query).Msg("SQL query executed successfully") return err } diff --git a/service/nrdbc/nrdbc.go b/service/nrdbc/nrdbc.go index 05e789b..fea1a10 100644 --- a/service/nrdbc/nrdbc.go +++ b/service/nrdbc/nrdbc.go @@ -18,7 +18,7 @@ package nrdbc import ( "fmt" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog" ) type NRDBMS interface { @@ -32,12 +32,12 @@ type NRDBMS interface { type NRDBController struct { client NRDBMS - logger *logrus.Logger + logger *zerolog.Logger } type Option func(*NRDBController) -func WithLogger(logger *logrus.Logger) Option { +func WithLogger(logger *zerolog.Logger) Option { return func(n *NRDBController) { n.logger = logger } @@ -47,7 +47,6 @@ func New(nrdb NRDBMS, opts ...Option) (*NRDBController, error) { nrdbc := &NRDBController{ client: nrdb, } - for _, opt := range opts { opt(nrdbc) } @@ -153,9 +152,9 @@ func (nrdbc *NRDBController) logWrite(logLevel, msg string, err error) { if nrdbc.logger != nil { switch logLevel { case "Info": - nrdbc.logger.Info(msg) + nrdbc.logger.Info().Msg(msg) case "Error": - nrdbc.logger.Errorf("%s : %v", msg, err) + nrdbc.logger.Error().Msgf("%s : %v", msg, err) } } } diff --git a/service/osc/osc.go b/service/osc/osc.go index 228eb05..d41039e 100644 --- a/service/osc/osc.go +++ b/service/osc/osc.go @@ -19,7 +19,7 @@ import ( "io" "github.com/cloud-barista/mc-data-manager/models" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog" ) type OSFS interface { @@ -34,7 +34,7 @@ type OSFS interface { type OSController struct { osfs OSFS - logger *logrus.Logger + logger *zerolog.Logger threads int } @@ -77,7 +77,7 @@ func WithThreads(count int) Option { } } -func WithLogger(logger *logrus.Logger) Option { +func WithLogger(logger *zerolog.Logger) Option { return func(o *OSController) { o.logger = logger } @@ -101,9 +101,9 @@ func (osc *OSController) logWrite(logLevel, msg string, err error) { if osc.logger != nil { switch logLevel { case "Info": - osc.logger.Info(msg) + osc.logger.Info().Msg(msg) case "Error": - osc.logger.Errorf("%s : %v", msg, err) + osc.logger.Error().Msgf("%s : %v", msg, err) } } } diff --git a/service/rdbc/rdbc.go b/service/rdbc/rdbc.go index bdf2907..2da9c9b 100644 --- a/service/rdbc/rdbc.go +++ b/service/rdbc/rdbc.go @@ -21,7 +21,8 @@ import ( "strings" "github.com/cloud-barista/mc-data-manager/models" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) type EngineType string @@ -50,12 +51,12 @@ type RDBMS interface { type RDBController struct { client RDBMS - logger *logrus.Logger + logger *zerolog.Logger } type Option func(*RDBController) -func WithLogger(logger *logrus.Logger) Option { +func WithLogger(logger *zerolog.Logger) Option { return func(r *RDBController) { r.logger = logger } @@ -78,7 +79,7 @@ func New(rdb RDBMS, opts ...Option) (*RDBController, error) { func (rdb *RDBController) ListDB(dst *[]string) error { err := rdb.client.ListDB(dst) if err != nil { - logrus.Info("RDB", *dst) + log.Info().Msgf("RDB", *dst) return err } return nil @@ -101,7 +102,7 @@ func (rdb *RDBController) Put(sql string) error { err = rdb.client.Exec(line) if err != nil { rdb.client.Exec("ROLLBACK") - rdb.logger.Errorf("err Line : %+v", line) + rdb.logger.Error().Msgf("err Line : %+v", line) rdb.logWrite("Error", "sql exec error", err) return err } @@ -119,7 +120,7 @@ func (rdb *RDBController) Put(sql string) error { func (rdb *RDBController) PutDoc(sql string) error { err := rdb.client.Exec(sql) if err != nil { - rdb.logger.Errorf("err SQL : %+v", sql) + rdb.logger.Error().Msgf("err SQL : %+v", sql) rdb.logWrite("Error", "sql exec error", err) return err } @@ -156,7 +157,7 @@ func (rdb *RDBController) Copy(dst *RDBController) error { func (rdb *RDBController) Get(dbName string, sql *string) error { var sqlTemp string if err := rdb.client.ShowCreateDBSql(dbName, &sqlTemp); err != nil { - logrus.Error("ERR DB") + log.Error().Msgf("ERR DB") return err } sqlWrite(sql, sqlTemp) @@ -164,7 +165,7 @@ func (rdb *RDBController) Get(dbName string, sql *string) error { var tableList []string if err := rdb.client.ListTable(dbName, &tableList); err != nil { - logrus.Error("ERR List TB") + log.Error().Msgf("ERR List TB") return err } @@ -173,7 +174,7 @@ func (rdb *RDBController) Get(dbName string, sql *string) error { sqlWrite(sql, fmt.Sprintf("DROP TABLE IF EXISTS %s;", table)) if err := rdb.client.ShowCreateTableSql(dbName, table, &sqlTemp); err != nil { - logrus.Error("ERR Creatte TB") + log.Error().Msgf("ERR Creatte TB") return err } @@ -183,7 +184,7 @@ func (rdb *RDBController) Get(dbName string, sql *string) error { for _, table := range tableList { var insertData []string if err := rdb.client.GetInsert(dbName, table, &insertData); err != nil { - logrus.Error("Insert quer err") + log.Error().Msgf("Insert quer err") return err } @@ -226,9 +227,9 @@ func (rdbc *RDBController) logWrite(logLevel, msg string, err error) { if rdbc.logger != nil { switch logLevel { case "Info": - rdbc.logger.Info(msg) + rdbc.logger.Info().Msg(msg) case "Error": - rdbc.logger.Errorf("%s : %v", msg, err) + rdbc.logger.Error().Msgf("%s : %v", msg, err) } } } diff --git a/service/task/task.go b/service/task/task.go index 52c9269..8bed0a5 100644 --- a/service/task/task.go +++ b/service/task/task.go @@ -19,7 +19,7 @@ import ( "github.com/cloud-barista/mc-data-manager/service/osc" "github.com/cloud-barista/mc-data-manager/service/rdbc" "github.com/go-co-op/gocron" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) var ( @@ -51,7 +51,7 @@ func InitFileScheduleManager() *FileScheduleManager { } if err := managerInstance.loadFromFile(); err != nil { - logrus.Errorf("Failed to load tasks from file: %v", err) + log.Error().Err(err).Msg("Failed to load tasks from file") managerInstance = nil return } @@ -60,7 +60,7 @@ func InitFileScheduleManager() *FileScheduleManager { }) if managerInstance == nil { - logrus.Error("FileScheduleManager initialization failed") + log.Error().Msg("FileScheduleManager initialization failed") } return managerInstance } @@ -83,7 +83,7 @@ func (m *FileScheduleManager) loadFromFile() error { file, err := os.Open(m.filename) if err != nil { if os.IsNotExist(err) { - logrus.Warnf("Task file %s does not exist, skipping load", m.filename) + log.Warn().Str("filename", m.filename).Msg("Task file does not exist, skipping load") return nil } return fmt.Errorf("failed to open task file %s: %w", m.filename, err) @@ -99,7 +99,7 @@ func (m *FileScheduleManager) loadFromFile() error { err = decoder.Decode(&data) if err != nil { - logrus.Errorf("Failed to decode task file %s: %v. Saving corrupted file as task_err.json and skipping load.", m.filename, err) + log.Error().Err(err).Str("filename", m.filename).Msg("Failed to decode task file. Saving corrupted file as task_err.json and skipping load.") // Create a backup of the corrupted file as task_err.json err = backupAndRemoveCorruptedFile(m.filename) @@ -121,7 +121,7 @@ func (m *FileScheduleManager) loadFromFile() error { } } - logrus.Infof("Successfully loaded and scheduled %d tasks from %s", len(m.schedules), m.filename) + log.Info().Int("tasks", len(m.schedules)).Str("filename", m.filename).Msg("Successfully loaded and scheduled tasks") return nil } @@ -332,7 +332,7 @@ func (m *FileScheduleManager) RunTasks(tasks []models.DataTask) { } err := m.saveToFile() if err != nil { - fmt.Printf("Error saving tasks to file: %v\n", err) + log.Error().Err(err).Msg("Error saving tasks to file") } } @@ -354,7 +354,7 @@ func handleTask(serviceType models.CloudServiceType, taskType models.TaskType, p case "restore": taskStatus = handleObjectStorageRestoreTask(params) default: - fmt.Printf("Error: Unknown TaskType: %s for ServiceType: %s\n", taskType, serviceType) + log.Error().Msgf("Error: Unknown TaskType: %s for ServiceType: %s\n", taskType, serviceType) taskStatus = models.StatusFailed } case "rdbms": @@ -368,7 +368,7 @@ func handleTask(serviceType models.CloudServiceType, taskType models.TaskType, p case "restore": taskStatus = handleRDBMSRestoreTask(params) default: - fmt.Printf("Error: Unknown TaskType: %s for ServiceType: %s\n", taskType, serviceType) + log.Error().Msgf("Error: Unknown TaskType: %s for ServiceType: %s\n", taskType, serviceType) taskStatus = models.StatusFailed } @@ -383,12 +383,12 @@ func handleTask(serviceType models.CloudServiceType, taskType models.TaskType, p case "restore": taskStatus = handleNRDBMSRestoreTask(params) default: - fmt.Printf("Error: Unknown TaskType: %s for ServiceType: %s\n", taskType, serviceType) + log.Error().Msgf("Error: Unknown TaskType: %s for ServiceType: %s\n", taskType, serviceType) taskStatus = models.StatusFailed } default: - fmt.Printf("Error: Unknown ServiceType: %s\n", serviceType) + log.Error().Msgf("Error: Unknown ServiceType: %s\n", serviceType) taskStatus = models.StatusFailed } @@ -397,7 +397,7 @@ func handleTask(serviceType models.CloudServiceType, taskType models.TaskType, p } func handleGenTest(params models.DataTask) models.Status { - logrus.Infof("Handling object storage Gen task") + log.Info().Msg("Handling object storage Gen task") _ = params var cParams models.CommandTask cParams.SizeServerSQL = "1" @@ -405,166 +405,165 @@ func handleGenTest(params models.DataTask) models.Status { execfunc.DummyCreate(cParams) return models.StatusCompleted } - func handleObjectStorageMigrateTask(params models.DataTask) models.Status { - fmt.Println("Handling object storage migrate task") + log.Info().Msg("Handling object storage migrate task") var src *osc.OSController var srcErr error var dst *osc.OSController var dstErr error - logrus.Infof("Source Information") + log.Info().Msg("Source Information") src, srcErr = auth.GetOS(¶ms.SourcePoint) if srcErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", srcErr) + log.Error().Err(srcErr).Msg("OSController error migration into object storage") return models.StatusFailed } - logrus.Infof("Target Information") + log.Info().Msg("Target Information") dst, dstErr = auth.GetOS(¶ms.TargetPoint) if dstErr != nil { - logrus.Errorf("OSController error migration into objectstorage : %v", dstErr) + log.Error().Err(dstErr).Msg("OSController error migration into object storage") return models.StatusFailed } - logrus.Info("Launch OSController Copy") + log.Info().Msg("Launch OSController Copy") if err := src.Copy(dst); err != nil { - logrus.Errorf("Copy error copying into objectstorage : %v", err) + log.Error().Err(err).Msg("Copy error copying into object storage") return models.StatusFailed } - logrus.Info("successfully migrationed") + log.Info().Msg("Successfully migrated") return models.StatusCompleted } func handleObjectStorageBackupTask(params models.DataTask) models.Status { - fmt.Println("Handling object storage backup task") + log.Info().Msg("Handling object storage backup task") var OSC *osc.OSController var err error - logrus.Infof("User Information") + log.Info().Msg("User Information") OSC, err = auth.GetOS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("OSController error importing into objectstorage : %v", err) + log.Error().Err(err).Msg("OSController error importing into objectstorage ") return models.StatusFailed } - logrus.Info("Launch OSController MGet") + log.Info().Msg("Launch OSController MGet") if err := OSC.MGet(params.Directory); err != nil { - logrus.Errorf("MGet error exporting into objectstorage : %v", err) + log.Error().Err(err).Msg("MGet error exporting into objectstorage ") return models.StatusFailed } - logrus.Infof("successfully backup : %s", params.Directory) + log.Info().Msgf("successfully backup : %s", params.Directory) return models.StatusCompleted } func handleObjectStorageRestoreTask(params models.DataTask) models.Status { - fmt.Println("Handling object storage restore task") + log.Info().Msg("Handling object storage restore task") var OSC *osc.OSController var err error - logrus.Infof("User Information") + log.Info().Msg("User Information") OSC, err = auth.GetOS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("OSController error importing into objectstorage : %v", err) + log.Error().Err(err).Msg("OSController error importing into objectstorage ") return models.StatusFailed } - logrus.Info("Launch OSController MGet") + log.Info().Msg("Launch OSController MGet") if err := OSC.MPut(params.SourcePoint.Path); err != nil { - logrus.Errorf("MPut error importing into objectstorage : %v", err) + log.Error().Err(err).Msg("MPut error importing into objectstorage ") return models.StatusFailed } - logrus.Infof("successfully restore : %s", params.Directory) + log.Info().Msgf("successfully restore : %s", params.Directory) return models.StatusCompleted } func handleRDBMSMigrateTask(params models.DataTask) models.Status { - fmt.Println("Handling RDBMS migrate task") + log.Info().Msg("Handling RDBMS migrate task") var srcRDBC *rdbc.RDBController var srcErr error var dstRDBC *rdbc.RDBController var dstErr error - logrus.Infof("Source Information") + log.Info().Msg("Source Information") srcRDBC, srcErr = auth.GetRDMS(¶ms.SourcePoint) if srcErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", srcErr) + log.Error().Err(srcErr).Msg("RDBController error migration into rdbms ") return models.StatusFailed } - logrus.Infof("Target Information") + log.Info().Msg("Target Information") dstRDBC, dstErr = auth.GetRDMS(¶ms.TargetPoint) if dstErr != nil { - logrus.Errorf("RDBController error migration into rdbms : %v", dstErr) + log.Error().Err(dstErr).Msg("RDBController error migration into rdbms ") return models.StatusFailed } - logrus.Info("Launch RDBController Copy") + log.Info().Msg("Launch RDBController Copy") if err := srcRDBC.Copy(dstRDBC); err != nil { - logrus.Errorf("Copy error copying into rdbms : %v", err) + log.Error().Err(err).Msg("Copy error copying into rdbms ") return models.StatusFailed } - logrus.Info("successfully migrationed") + log.Info().Msg("successfully migrationed") return models.StatusCompleted } func handleRDBMSBackupTask(params models.DataTask) models.Status { - fmt.Println("Handling RDBMS backup task") + log.Info().Msg("Handling RDBMS backup task") var RDBC *rdbc.RDBController var err error - logrus.Infof("User Information") + log.Info().Msg("User Information") RDBC, err = auth.GetRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("RDBController error importing into rdbms : %v", err) + log.Error().Err(err).Msg("RDBController error importing into rdbms ") return models.StatusFailed } err = os.MkdirAll(params.Directory, 0755) if err != nil { - logrus.Errorf("MkdirAll error : %v", err) + log.Error().Err(err).Msg("MkdirAll error ") return models.StatusFailed } dbList := []string{} if err := RDBC.ListDB(&dbList); err != nil { - logrus.Errorf("ListDB error : %v", err) + log.Error().Err(err).Msg("ListDB error ") return models.StatusFailed } var sqlData string for _, db := range dbList { sqlData = "" - logrus.Infof("Export start: %s", db) + log.Info().Msgf("Export start: %s", db) if err := RDBC.Get(db, &sqlData); err != nil { - logrus.Errorf("Get error : %v", err) + log.Error().Err(err).Msg("Get error ") return models.StatusFailed } file, err := os.Create(filepath.Join(params.Directory, fmt.Sprintf("%s.sql", db))) if err != nil { - logrus.Errorf("File create error : %v", err) + log.Error().Err(err).Msg("File create error ") return models.StatusFailed } defer file.Close() _, err = file.WriteString(sqlData) if err != nil { - logrus.Errorf("File write error : %v", err) + log.Error().Err(err).Msg("File write error ") return models.StatusFailed } - logrus.Infof("successfully exported : %s", file.Name()) + log.Info().Msgf("successfully exported : %s", file.Name()) file.Close() } - logrus.Infof("successfully backup : %s", params.Directory) + log.Info().Msgf("successfully backup : %s", params.Directory) return models.StatusCompleted } func handleRDBMSRestoreTask(params models.DataTask) models.Status { - fmt.Println("Handling RDBMS restore task") + log.Info().Msg("Handling RDBMS restore task") var RDBC *rdbc.RDBController var err error - logrus.Infof("User Information") + log.Info().Msg("User Information") RDBC, err = auth.GetRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("RDBController error importing into rdbms : %v", err) + log.Error().Err(err).Msg("RDBController error importing into rdbms ") return models.StatusFailed } @@ -579,96 +578,96 @@ func handleRDBMSRestoreTask(params models.DataTask) models.Status { return nil }) if err != nil { - logrus.Errorf("Walk error : %v", err) + log.Error().Err(err).Msg("Walk error ") return models.StatusFailed } for _, sqlPath := range sqlList { data, err := os.ReadFile(sqlPath) if err != nil { - logrus.Errorf("ReadFile error : %v", err) + log.Error().Err(err).Msg("ReadFile error ") return models.StatusFailed } - logrus.Infof("Import start: %s", sqlPath) + log.Info().Msgf("Import start: %s", sqlPath) if err := RDBC.Put(string(data)); err != nil { - logrus.Error("Put error importing into rdbms") + log.Error().Msg("Put error importing into rdbms") return models.StatusFailed } - logrus.Infof("Import success: %s", sqlPath) + log.Info().Msgf("Import success: %s", sqlPath) } - logrus.Infof("successfully restore : %s", params.Directory) + log.Info().Msgf("successfully restore : %s", params.Directory) return models.StatusCompleted } func handleNRDBMSMigrateTask(params models.DataTask) models.Status { - fmt.Println("Handling NRDBMS migrate task") + log.Info().Msg("Handling NRDBMS migrate task") var srcNRDBC *nrdbc.NRDBController var srcErr error var dstNRDBC *nrdbc.NRDBController var dstErr error - logrus.Infof("Source Information") + log.Info().Msg("Source Information") srcNRDBC, srcErr = auth.GetNRDMS(¶ms.SourcePoint) if srcErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", srcErr) + log.Error().Err(srcErr).Msg("NRDBController error migration into nrdbms ") return models.StatusFailed } - logrus.Infof("Target Information") + log.Info().Msg("Target Information") dstNRDBC, dstErr = auth.GetNRDMS(¶ms.TargetPoint) if dstErr != nil { - logrus.Errorf("NRDBController error migration into nrdbms : %v", dstErr) + log.Error().Err(dstErr).Msg("NRDBController error migration into nrdbms ") return models.StatusFailed } - logrus.Info("Launch NRDBController Copy") + log.Info().Msg("Launch NRDBController Copy") if err := srcNRDBC.Copy(dstNRDBC); err != nil { - logrus.Errorf("Copy error copying into nrdbms : %v", err) + log.Error().Err(err).Msg("Copy error copying into nrdbms ") return models.StatusFailed } - logrus.Info("successfully migrationed") + log.Info().Msg("successfully migrationed") return models.StatusCompleted } func handleNRDBMSBackupTask(params models.DataTask) models.Status { - fmt.Println("Handling NRDBMS backup task") + log.Info().Msg("Handling NRDBMS backup task") var NRDBC *nrdbc.NRDBController var err error NRDBC, err = auth.GetNRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("NRDBController error importing into nrdbms : %v", err) + log.Error().Err(err).Msg("NRDBController error importing into nrdbms ") return models.StatusFailed } tableList, err := NRDBC.ListTables() if err != nil { - logrus.Infof("ListTables error : %v", err) + log.Info().Msgf("ListTables error : %v", err) return models.StatusFailed } if !utils.FileExists(params.Directory) { - logrus.Infof("directory does not exist") - logrus.Infof("Make Directory") + log.Info().Msg("directory does not exist") + log.Info().Msg("Make Directory") err = os.MkdirAll(params.Directory, 0755) if err != nil { - logrus.Infof("Make Failed 0755 : %s", params.Directory) + log.Info().Msgf("Make Failed 0755 : %s", params.Directory) return models.StatusFailed } } var dstData []map[string]interface{} for _, table := range tableList { - logrus.Infof("Export start: %s", table) + log.Info().Msgf("Export start: %s", table) dstData = []map[string]interface{}{} if err := NRDBC.Get(table, &dstData); err != nil { - logrus.Errorf("Get error : %v", err) + log.Error().Err(err).Msg("Get error ") return models.StatusFailed } file, err := os.Create(filepath.Join(params.Directory, fmt.Sprintf("%s.json", table))) if err != nil { - logrus.Errorf("File create error : %v", err) + log.Error().Err(err).Msg("File create error ") return models.StatusFailed } defer file.Close() @@ -676,23 +675,23 @@ func handleNRDBMSBackupTask(params models.DataTask) models.Status { encoder := json.NewEncoder(file) encoder.SetIndent("", " ") if err := encoder.Encode(dstData); err != nil { - logrus.Errorf("data encoding error : %v", err) + log.Error().Err(err).Msg("data encoding error ") return models.StatusFailed } - logrus.Infof("successfully create File : %s", file.Name()) + log.Info().Msgf("successfully create File : %s", file.Name()) } - logrus.Infof("successfully backup to : %s", params.Directory) + log.Info().Msgf("successfully backup to : %s", params.Directory) return models.StatusCompleted } func handleNRDBMSRestoreTask(params models.DataTask) models.Status { - fmt.Println("Handling NRDBMS restore task") + log.Info().Msg("Handling NRDBMS restore task") var NRDBC *nrdbc.NRDBController var err error NRDBC, err = auth.GetNRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("NRDBController error importing into nrdbms : %v", err) + log.Error().Err(err).Msg("NRDBController error importing into nrdbms ") return models.StatusFailed } @@ -708,7 +707,7 @@ func handleNRDBMSRestoreTask(params models.DataTask) models.Status { }) if err != nil { - logrus.Errorf("Walk error : %v", err) + log.Error().Err(err).Msg("Walk error ") return models.StatusFailed } @@ -718,25 +717,25 @@ func handleNRDBMSRestoreTask(params models.DataTask) models.Status { file, err := os.Open(jsonFile) if err != nil { - logrus.Errorf("file open error : %v", err) + log.Error().Err(err).Msg("file open error ") return models.StatusFailed } defer file.Close() if err := json.NewDecoder(file).Decode(&srcData); err != nil { - logrus.Errorf("file decoding error : %v", err) + log.Error().Err(err).Msg("file decoding error ") return models.StatusFailed } fileName := filepath.Base(jsonFile) tableName := fileName[:len(fileName)-len(filepath.Ext(fileName))] - logrus.Infof("Import start: %s", fileName) + log.Info().Msgf("Import start: %s", fileName) if err := NRDBC.Put(tableName, &srcData); err != nil { - logrus.Error("Put error importing into nrdbms") + log.Error().Msg("Put error importing into nrdbms") return models.StatusFailed } - logrus.Infof("successfully Restore : %s", params.Directory) + log.Info().Msgf("successfully Restore : %s", params.Directory) } return models.StatusCompleted diff --git a/websrc/controllers/backupHandlers.go b/websrc/controllers/backupHandlers.go index d1dd72c..9e48e3a 100644 --- a/websrc/controllers/backupHandlers.go +++ b/websrc/controllers/backupHandlers.go @@ -27,7 +27,6 @@ import ( "github.com/cloud-barista/mc-data-manager/models" "github.com/cloud-barista/mc-data-manager/service/nrdbc" "github.com/labstack/echo/v4" - "github.com/sirupsen/logrus" ) // BackupOSPostHandler godoc @@ -60,7 +59,7 @@ func BackupOSPostHandler(ctx echo.Context) error { case string(models.GCP): return MigrationGCPToLinuxPostHandler(ctx) default: - logger.Errorf("Unsupported provider: %v", params.SourcePoint.Provider) + logger.Error().Str("provider", params.SourcePoint.Provider).Msg("Unsupported provider") errorMsg := fmt.Sprintf("unsupported provider: %v", params.SourcePoint.Provider) return ctx.JSON(http.StatusBadRequest, models.BasicResponse{ Result: logstrings.String(), @@ -69,7 +68,7 @@ func BackupOSPostHandler(ctx echo.Context) error { } } -// BackupMySQLPostHandler godoc +// BackupRDBPostHandler godoc // // @Summary Export data from MySQL // @Description Export data from a MySQL database to SQL files. @@ -145,7 +144,7 @@ func BackupRDBPostHandler(ctx echo.Context) error { Error: nil, }) } - logrus.Infof("successfully exported : %s", file.Name()) + logger.Info().Str("file", file.Name()).Msg("Successfully exported") file.Close() } @@ -157,7 +156,7 @@ func BackupRDBPostHandler(ctx echo.Context) error { } -// BackupMySQLPostHandler godoc +// BackupNRDBPostHandler godoc // // @Summary Export data from MySQL // @Description Export data from a MySQL database to SQL files. @@ -210,17 +209,17 @@ func BackupNRDBPostHandler(ctx echo.Context) error { var dstData []map[string]interface{} for _, table := range tableList { - logrus.Infof("Export start: %s", table) + logger.Info().Str("table", table).Msg("Export start") dstData = []map[string]interface{}{} if err := NRDBC.Get(table, &dstData); err != nil { - logrus.Errorf("Get error : %v", err) + logger.Error().Err(err).Msg("Get error") return err } file, err := os.Create(filepath.Join(params.TargetPoint.Path, fmt.Sprintf("%s.json", table))) if err != nil { - logrus.Errorf("File create error : %v", err) + logger.Error().Err(err).Msg("File create error") return err } defer file.Close() @@ -228,13 +227,13 @@ func BackupNRDBPostHandler(ctx echo.Context) error { encoder := json.NewEncoder(file) encoder.SetIndent("", " ") if err := encoder.Encode(dstData); err != nil { - logrus.Errorf("data encoding error : %v", err) + logger.Error().Err(err).Msg("Data encoding error") return err } - logrus.Infof("successfully exported : %s", file.Name()) + logger.Info().Str("file", file.Name()).Msg("Successfully exported") } - jobEnd(logger, "Successfully exported data from mysql", start) + jobEnd(logger, "Successfully exported data from NRDB", start) return ctx.JSON(http.StatusOK, models.BasicResponse{ Result: logstrings.String(), Error: nil, diff --git a/websrc/controllers/gendata.go b/websrc/controllers/gendata.go index 1797476..90524c4 100644 --- a/websrc/controllers/gendata.go +++ b/websrc/controllers/gendata.go @@ -21,111 +21,111 @@ import ( "github.com/cloud-barista/mc-data-manager/pkg/dummy/semistructured" "github.com/cloud-barista/mc-data-manager/pkg/dummy/structured" "github.com/cloud-barista/mc-data-manager/pkg/dummy/unstructured" + "github.com/rs/zerolog" "github.com/spf13/cast" - - "github.com/sirupsen/logrus" ) -func genData(params GenFileParams, logger *logrus.Logger) error { - logger.Info("Let's getnetate") +func genData(params GenFileParams, logger *zerolog.Logger) error { + logger.Info().Msg("Let's generate") if params.CheckSQL { - logger.Info("Start creating sql dummy") + logger.Info().Msg("Start creating SQL dummy") sql, _ := strconv.Atoi(params.SizeSQL) if err := structured.GenerateRandomSQL(params.DummyPath, sql); err != nil { - logger.Info("Failed to create sql dummy") + logger.Error().Err(err).Msg("Failed to create SQL dummy") return err } - logger.Info("Successfully generated sql dummy") + logger.Info().Msg("Successfully generated SQL dummy") } + if cast.ToBool(params.CheckCSV) { - logger.Info("Start creating csv dummy") + logger.Info().Msg("Start creating CSV dummy") csv, _ := strconv.Atoi(params.SizeCSV) if err := structured.GenerateRandomCSV(params.DummyPath, csv); err != nil { - logger.Info("Failed to create csv dummy") + logger.Error().Err(err).Msg("Failed to create CSV dummy") return err } - logger.Info("Successfully generated csv dummy") + logger.Info().Msg("Successfully generated CSV dummy") } if params.CheckTXT { - logger.Info("Start creating txt dummy") + logger.Info().Msg("Start creating TXT dummy") txt, _ := strconv.Atoi(params.SizeTXT) if err := unstructured.GenerateRandomTXT(params.DummyPath, txt); err != nil { - logger.Info("Failed to create txt dummy") + logger.Error().Err(err).Msg("Failed to create TXT dummy") return err } - logger.Info("Successfully generated txt dummy") + logger.Info().Msg("Successfully generated TXT dummy") } if params.CheckPNG { - logger.Info("Start creating png dummy") + logger.Info().Msg("Start creating PNG dummy") png, _ := strconv.Atoi(params.SizePNG) if err := unstructured.GenerateRandomPNGImage(params.DummyPath, png); err != nil { - logger.Info("Failed to create png dummy") + logger.Error().Err(err).Msg("Failed to create PNG dummy") return err } - logger.Info("Successfully generated png dummy") + logger.Info().Msg("Successfully generated PNG dummy") } if params.CheckGIF { - logger.Info("Start creating gif dummy") + logger.Info().Msg("Start creating GIF dummy") gif, _ := strconv.Atoi(params.SizeGIF) if err := unstructured.GenerateRandomGIF(params.DummyPath, gif); err != nil { - logger.Info("Failed to create gif dummy") + logger.Error().Err(err).Msg("Failed to create GIF dummy") return err } - logger.Info("Successfully generated gif dummy") + logger.Info().Msg("Successfully generated GIF dummy") } if params.CheckZIP { - logger.Info("Start creating a pile of zip files that compressed txt") + logger.Info().Msg("Start creating a pile of ZIP files that compress TXT") zip, _ := strconv.Atoi(params.SizeZIP) if err := unstructured.GenerateRandomZIP(params.DummyPath, zip); err != nil { - logger.Info("Failed to create zip file dummy compressed txt") + logger.Error().Err(err).Msg("Failed to create ZIP file dummy compressed TXT") return err } - logger.Info("Successfully created zip file dummy compressed txt") + logger.Info().Msg("Successfully created ZIP file dummy compressed TXT") } if params.CheckJSON { - logger.Info("Start creating json dummy") + logger.Info().Msg("Start creating JSON dummy") json, _ := strconv.Atoi(params.SizeJSON) if err := semistructured.GenerateRandomJSON(params.DummyPath, json); err != nil { - logger.Info("Failed to create json dummy") + logger.Error().Err(err).Msg("Failed to create JSON dummy") return err } - logger.Info("Successfully generated json dummy") + logger.Info().Msg("Successfully generated JSON dummy") } if params.CheckXML { - logger.Info("Start creating xml dummy") + logger.Info().Msg("Start creating XML dummy") xml, _ := strconv.Atoi(params.SizeXML) if err := semistructured.GenerateRandomXML(params.DummyPath, xml); err != nil { - logger.Info("Failed to create xml dummy") + logger.Error().Err(err).Msg("Failed to create XML dummy") return err } - logger.Info("Successfully generated xml dummy") + logger.Info().Msg("Successfully generated XML dummy") } if params.CheckServerJSON { - logger.Info("Start creating json dummy") + logger.Info().Msg("Start creating JSON dummy") json, _ := strconv.Atoi(params.SizeServerJSON) if err := semistructured.GenerateRandomJSONWithServer(params.DummyPath, json); err != nil { - logger.Info("Failed to create json dummy") + logger.Error().Err(err).Msg("Failed to create JSON dummy") return err } - logger.Info("Successfully generated json dummy") + logger.Info().Msg("Successfully generated JSON dummy") } if params.CheckServerSQL { - logger.Info("Start creating sql dummy") + logger.Info().Msg("Start creating SQL dummy") sql, _ := strconv.Atoi(params.SizeServerSQL) if err := structured.GenerateRandomSQLWithServer(params.DummyPath, sql); err != nil { - logger.Info("Failed to create sql dummy") + logger.Error().Err(err).Msg("Failed to create SQL dummy") return err } - logger.Info("Successfully generated sql dummy") + logger.Info().Msg("Successfully generated SQL dummy") } return nil diff --git a/websrc/controllers/generateHandlers.go b/websrc/controllers/generateHandlers.go index 3119f31..ad7fa64 100644 --- a/websrc/controllers/generateHandlers.go +++ b/websrc/controllers/generateHandlers.go @@ -16,7 +16,6 @@ limitations under the License. package controllers import ( - "fmt" "net/http" "os" "path/filepath" @@ -92,7 +91,6 @@ func GenerateWindowsPostHandler(ctx echo.Context) error { logger, logstrings := pageLogInit("genwindows", "Create dummy data in windows", start) if !osCheck(logger, start, "windows") { - fmt.Println("test") return ctx.JSON(http.StatusBadRequest, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -389,15 +387,15 @@ func GenerateMySQLPostHandler(ctx echo.Context) error { }) } - logger.Info("Start Import with mysql") + logger.Info().Msg("Start Import with mysql") for _, sql := range sqlList { - logger.Infof("Read sql file : %s", sql) + logger.Info().Str("file", sql).Msg("Read sql file") data, err := os.ReadFile(sql) if err != nil { end := time.Now() - logger.Errorf("os ReadFile failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Err(err).Msg("os ReadFile failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(start).String()).Msg("") return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -405,18 +403,18 @@ func GenerateMySQLPostHandler(ctx echo.Context) error { } - logger.Infof("Put start : %s", filepath.Base(sql)) + logger.Info().Str("file", filepath.Base(sql)).Msg("Put start") if err := rdbc.Put(string(data)); err != nil { end := time.Now() - logger.Errorf("RDBController import failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Err(err).Msg("RDBController import failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(start).String()).Msg("") return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, }) } - logger.Infof("sql put success : %s", filepath.Base(sql)) + logger.Info().Str("file", filepath.Base(sql)).Msg("sql put success") } jobEnd(logger, "Dummy creation and import successful with mysql", start) diff --git a/websrc/controllers/migrationGCPHandlers.go b/websrc/controllers/migrationGCPHandlers.go index 8d9a3c4..95e0c08 100644 --- a/websrc/controllers/migrationGCPHandlers.go +++ b/websrc/controllers/migrationGCPHandlers.go @@ -25,18 +25,17 @@ import ( // MigrationGCPToLinuxPostHandler godoc // -// @Summary Migrate data from GCP to Linux -// @Description Migrate data stored in GCP Cloud Storage to a Linux-based system. -// @Tags [Data Migration] -// @Accept json -// @Produce json -// @Param RequestBody body MigrateTask true "Parameters required for migration" -// @Success 200 {object} models.BasicResponse "Successfully migrated data" -// @Failure 400 {object} models.BasicResponse "Invalid Request" -// @Failure 500 {object} models.BasicResponse "Internal Server Error" -// @Router /migration/gcp/linux [post] +// @Summary Migrate data from GCP to Linux +// @Description Migrate data stored in GCP Cloud Storage to a Linux-based system. +// @Tags [Data Migration] +// @Accept json +// @Produce json +// @Param RequestBody body MigrateTask true "Parameters required for migration" +// @Success 200 {object} models.BasicResponse "Successfully migrated data" +// @Failure 400 {object} models.BasicResponse "Invalid Request" +// @Failure 500 {object} models.BasicResponse "Internal Server Error" +// @Router /migration/gcp/linux [post] func MigrationGCPToLinuxPostHandler(ctx echo.Context) error { - start := time.Now() logger, logstrings := pageLogInit("miggcplin", "Export gcp data to windows", start) @@ -81,18 +80,17 @@ func MigrationGCPToLinuxPostHandler(ctx echo.Context) error { // MigrationGCPToWindowsPostHandler godoc // -// @Summary Migrate data from GCP to Windows -// @Description Migrate data stored in GCP Cloud Storage to a Windows-based system. -// @Tags [Data Migration] -// @Accept json -// @Produce json -// @Param RequestBody body MigrateTask true "Parameters required for migration" -// @Success 200 {object} models.BasicResponse "Successfully migrated data" -// @Failure 400 {object} models.BasicResponse "Invalid Request" -// @Failure 500 {object} models.BasicResponse "Internal Server Error" -// @Router /migration/gcp/windows [post] +// @Summary Migrate data from GCP to Windows +// @Description Migrate data stored in GCP Cloud Storage to a Windows-based system. +// @Tags [Data Migration] +// @Accept json +// @Produce json +// @Param RequestBody body MigrateTask true "Parameters required for migration" +// @Success 200 {object} models.BasicResponse "Successfully migrated data" +// @Failure 400 {object} models.BasicResponse "Invalid Request" +// @Failure 500 {object} models.BasicResponse "Internal Server Error" +// @Router /migration/gcp/windows [post] func MigrationGCPToWindowsPostHandler(ctx echo.Context) error { - start := time.Now() logger, logstrings := pageLogInit("miggcpwin", "Export gcp data to windows", start) @@ -137,17 +135,16 @@ func MigrationGCPToWindowsPostHandler(ctx echo.Context) error { // MigrationGCPToS3PostHandler godoc // -// @Summary Migrate data from GCP to AWS S3 -// @Description Migrate data stored in GCP Cloud Storage to AWS S3. -// @Tags [Data Migration], [Object Storage] -// @Accept json -// @Produce json -// @Param RequestBody body MigrateTask true "Parameters required for migration" -// @Success 200 {object} models.BasicResponse "Successfully migrated data" -// @Failure 500 {object} models.BasicResponse "Internal Server Error" -// @Router /migration/gcp/aws [post] +// @Summary Migrate data from GCP to AWS S3 +// @Description Migrate data stored in GCP Cloud Storage to AWS S3. +// @Tags [Data Migration], [Object Storage] +// @Accept json +// @Produce json +// @Param RequestBody body MigrateTask true "Parameters required for migration" +// @Success 200 {object} models.BasicResponse "Successfully migrated data" +// @Failure 500 {object} models.BasicResponse "Internal Server Error" +// @Router /migration/gcp/aws [post] func MigrationGCPToS3PostHandler(ctx echo.Context) error { - start := time.Now() logger, logstrings := pageLogInit("genlinux", "Export gcp data to s3", start) @@ -176,12 +173,12 @@ func MigrationGCPToS3PostHandler(ctx echo.Context) error { }) } - logger.Infof("Start migration of GCP Cloud Storage to AWS S3") + logger.Info().Msg("Start migration of GCP Cloud Storage to AWS S3") if err := gcpOSC.Copy(awsOSC); err != nil { end := time.Now() - logger.Errorf("OSController migration failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Err(err).Msg("OSController migration failed") + logger.Info().Str("End time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(start).String()).Msg("") return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -198,17 +195,16 @@ func MigrationGCPToS3PostHandler(ctx echo.Context) error { // MigrationGCPToNCPPostHandler godoc // -// @Summary Migrate data from GCP to NCP Object Storage -// @Description Migrate data stored in GCP Cloud Storage to NCP Object Storage. -// @Tags [Data Migration], [Object Storage] -// @Accept json -// @Produce json -// @Param RequestBody body MigrateTask true "Parameters required for migration" -// @Success 200 {object} models.BasicResponse "Successfully migrated data" -// @Failure 500 {object} models.BasicResponse "Internal Server Error" -// @Router /migration/gcp/ncp [post] +// @Summary Migrate data from GCP to NCP Object Storage +// @Description Migrate data stored in GCP Cloud Storage to NCP Object Storage. +// @Tags [Data Migration], [Object Storage] +// @Accept json +// @Produce json +// @Param RequestBody body MigrateTask true "Parameters required for migration" +// @Success 200 {object} models.BasicResponse "Successfully migrated data" +// @Failure 500 {object} models.BasicResponse "Internal Server Error" +// @Router /migration/gcp/ncp [post] func MigrationGCPToNCPPostHandler(ctx echo.Context) error { - start := time.Now() logger, logstrings := pageLogInit("miggcpncp", "Export gcp data to ncp objectstorage", start) @@ -237,12 +233,12 @@ func MigrationGCPToNCPPostHandler(ctx echo.Context) error { }) } - logger.Infof("Start migration of GCP Cloud Storage to NCP Object Storage") + logger.Info().Msg("Start migration of GCP Cloud Storage to NCP Object Storage") if err := gcpOSC.Copy(ncpOSC); err != nil { end := time.Now() - logger.Errorf("OSController migration failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Err(err).Msg("OSController migration failed") + logger.Info().Str("End time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(start).String()).Msg("") return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, diff --git a/websrc/controllers/migrationHandlers.go b/websrc/controllers/migrationHandlers.go index 74a9c7c..f509e8f 100644 --- a/websrc/controllers/migrationHandlers.go +++ b/websrc/controllers/migrationHandlers.go @@ -407,10 +407,6 @@ func MigrationMySQLPostHandler(ctx echo.Context) error { } if err := srdbc.Copy(trdbc); err != nil { - end := time.Now() - logger.Errorf("RDBController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusOK, models.BasicResponse{ Result: logstrings.String(), Error: nil, diff --git a/websrc/controllers/migrationNCPHandlers.go b/websrc/controllers/migrationNCPHandlers.go index 0815d11..73a3625 100644 --- a/websrc/controllers/migrationNCPHandlers.go +++ b/websrc/controllers/migrationNCPHandlers.go @@ -175,12 +175,12 @@ func MigrationNCPToS3PostHandler(ctx echo.Context) error { }) } - logger.Infof("Start migration of NCP Object Storage to AWS S3") + logger.Info().Msgf("Start migration of NCP Object Storage to AWS S3") if err := ncpOSC.Copy(awsOSC); err != nil { end := time.Now() - logger.Errorf("OSController migration failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("OSController migration failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -237,12 +237,12 @@ func MigrationNCPToGCPPostHandler(ctx echo.Context) error { }) } - logger.Infof("Start migration of NCP Object Storage to GCP Cloud Storage") + logger.Info().Msgf("Start migration of NCP Object Storage to GCP Cloud Storage") if err := ncpOSC.Copy(gcpOSC); err != nil { end := time.Now() - logger.Errorf("OSController migration failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("OSController migration failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, diff --git a/websrc/controllers/migrationNoSqlHandlers.go b/websrc/controllers/migrationNoSqlHandlers.go index b3b54e3..fb8d3dd 100644 --- a/websrc/controllers/migrationNoSqlHandlers.go +++ b/websrc/controllers/migrationNoSqlHandlers.go @@ -65,9 +65,9 @@ func MigrationDynamoDBToFirestorePostHandler(ctx echo.Context) error { if err := awsNRDB.Copy(gcpNRDB); err != nil { end := time.Now() - logger.Errorf("NRDBController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("NRDBController copy failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -124,9 +124,9 @@ func MigrationDynamoDBToMongoDBPostHandler(ctx echo.Context) error { if err := awsNRDB.Copy(ncpNRDB); err != nil { end := time.Now() - logger.Errorf("NRDBController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("NRDBController copy failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -183,9 +183,9 @@ func MigrationFirestoreToDynamoDBPostHandler(ctx echo.Context) error { if err := gcpNRDB.Copy(awsNRDB); err != nil { end := time.Now() - logger.Errorf("NRDBController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("NRDBController copy failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -242,9 +242,9 @@ func MigrationFirestoreToMongoDBPostHandler(ctx echo.Context) error { if err := gcpNRDB.Copy(ncpNRDB); err != nil { end := time.Now() - logger.Errorf("NRDBController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("NRDBController copy failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -301,9 +301,9 @@ func MigrationMongoDBToDynamoDBPostHandler(ctx echo.Context) error { if err := ncpNRDB.Copy(awsNRDB); err != nil { end := time.Now() - logger.Errorf("NRDBController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("NRDBController copy failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -360,9 +360,9 @@ func MigrationMongoDBToFirestorePostHandler(ctx echo.Context) error { if err := ncpNRDB.Copy(gcpNRDB); err != nil { end := time.Now() - logger.Errorf("NRDBController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("NRDBController copy failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, diff --git a/websrc/controllers/migrationS3Handlers.go b/websrc/controllers/migrationS3Handlers.go index 33d80ce..df375b7 100644 --- a/websrc/controllers/migrationS3Handlers.go +++ b/websrc/controllers/migrationS3Handlers.go @@ -172,12 +172,12 @@ func MigrationS3ToGCPPostHandler(ctx echo.Context) error { }) } - logger.Infof("Start migration of AWS S3 to GCP Cloud Storage") + logger.Info().Msgf("Start migration of AWS S3 to GCP Cloud Storage") if err := awsOSC.Copy(gcpOSC); err != nil { end := time.Now() - logger.Errorf("OSController migration failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("OSController migration failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -232,12 +232,12 @@ func MigrationS3ToNCPPostHandler(ctx echo.Context) error { }) } - logger.Info("Start migration of AWS S3 to NCP Objest Storage") + logger.Info().Msg("Start migration of AWS S3 to NCP Objest Storage") if err := awsOSC.Copy(ncpOSC); err != nil { end := time.Now() - logger.Errorf("OSController copy failed : %v", err) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(start).String()) + logger.Error().Msgf("OSController copy failed : %v", err) + logger.Info().Msgf("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msgf("Elapsed time : %s", end.Sub(start).String()) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: nil, diff --git a/websrc/controllers/pageHandlers.go b/websrc/controllers/pageHandlers.go index 8b4985b..489531a 100644 --- a/websrc/controllers/pageHandlers.go +++ b/websrc/controllers/pageHandlers.go @@ -39,7 +39,7 @@ func MainGetHandler(ctx echo.Context) error { func GenerateLinuxGetHandler(ctx echo.Context) error { logger := getLogger("genlinux") - logger.Info("genlinux get page accessed") + logger.Info().Msg("genlinux get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-Linux", @@ -53,7 +53,7 @@ func GenerateWindowsGetHandler(ctx echo.Context) error { // tmpPath := filepath.Join(os.TempDir(), "dummy") logger := getLogger("genwindows") - logger.Info("genwindows get page accessed") + logger.Info().Msg("genwindows get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-Windows", OS: runtime.GOOS, @@ -64,7 +64,7 @@ func GenerateWindowsGetHandler(ctx echo.Context) error { func GenerateS3GetHandler(ctx echo.Context) error { logger := getLogger("genS3") - logger.Info("genS3 get page accessed") + logger.Info().Msg("genS3 get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-S3", OS: runtime.GOOS, @@ -75,7 +75,7 @@ func GenerateS3GetHandler(ctx echo.Context) error { func GenerateGCPGetHandler(ctx echo.Context) error { logger := getLogger("genGCP") - logger.Info("genGCP get page accessed") + logger.Info().Msg("genGCP get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-GCP", OS: runtime.GOOS, @@ -87,7 +87,7 @@ func GenerateGCPGetHandler(ctx echo.Context) error { func GenerateNCPGetHandler(ctx echo.Context) error { logger := getLogger("genNCP") - logger.Info("genNCP get page accessed") + logger.Info().Msg("genNCP get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-NCP", OS: runtime.GOOS, @@ -99,7 +99,7 @@ func GenerateNCPGetHandler(ctx echo.Context) error { func GenerateMySQLGetHandler(ctx echo.Context) error { logger := getLogger("genmysql") - logger.Info("genmysql get page accessed") + logger.Info().Msg("genmysql get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-MySQL", OS: runtime.GOOS, @@ -109,7 +109,7 @@ func GenerateMySQLGetHandler(ctx echo.Context) error { func GenerateDynamoDBGetHandler(ctx echo.Context) error { logger := getLogger("gendynamodb") - logger.Info("gendynamodb get page accessed") + logger.Info().Msg("gendynamodb get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-DynamoDB", OS: runtime.GOOS, @@ -120,7 +120,7 @@ func GenerateDynamoDBGetHandler(ctx echo.Context) error { func GenerateFirestoreGetHandler(ctx echo.Context) error { logger := getLogger("genfirestore") - logger.Info("genfirestore get page accessed") + logger.Info().Msg("genfirestore get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-Firestore", OS: runtime.GOOS, @@ -131,7 +131,7 @@ func GenerateFirestoreGetHandler(ctx echo.Context) error { func GenerateMongoDBGetHandler(ctx echo.Context) error { logger := getLogger("genfirestore") - logger.Info("genmongodb get page accessed") + logger.Info().Msg("genmongodb get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Generate-MongoDB", OS: runtime.GOOS, @@ -145,7 +145,7 @@ func GenerateMongoDBGetHandler(ctx echo.Context) error { func BackupHandler(ctx echo.Context) error { logger := getLogger("backup") - logger.Info("backup get page accessed") + logger.Info().Msg("backup get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Backup", Regions: GetAWSRegions(), @@ -161,7 +161,7 @@ func BackupHandler(ctx echo.Context) error { func MigrationLinuxToS3GetHandler(ctx echo.Context) error { logger := getLogger("miglins3") - logger.Info("miglinux get page accessed") + logger.Info().Msg("miglinux get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Linux-S3", Regions: GetAWSRegions(), @@ -172,7 +172,7 @@ func MigrationLinuxToS3GetHandler(ctx echo.Context) error { func MigrationLinuxToGCPGetHandler(ctx echo.Context) error { logger := getLogger("miglingcp") - logger.Info("miglingcp get page accessed") + logger.Info().Msg("miglingcp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Linux-GCP", Regions: GetGCPRegions(), @@ -184,7 +184,7 @@ func MigrationLinuxToGCPGetHandler(ctx echo.Context) error { func MigrationLinuxToNCPGetHandler(ctx echo.Context) error { logger := getLogger("miglinncp") - logger.Info("miglinncp get page accessed") + logger.Info().Msg("miglinncp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Linux-NCP", Regions: GetNCPRegions(), @@ -198,7 +198,7 @@ func MigrationLinuxToNCPGetHandler(ctx echo.Context) error { func MigrationWindowsToS3GetHandler(ctx echo.Context) error { tmpPath := filepath.Join(os.TempDir(), "dummy") logger := getLogger("migwins3") - logger.Info("migwins3 get page accessed") + logger.Info().Msg("migwins3 get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Windows-S3", Regions: GetAWSRegions(), @@ -212,7 +212,7 @@ func MigrationWindowsToS3GetHandler(ctx echo.Context) error { func MigrationWindowsToGCPGetHandler(ctx echo.Context) error { tmpPath := filepath.Join(os.TempDir(), "dummy") logger := getLogger("migwingcp") - logger.Info("migwingcp get page accessed") + logger.Info().Msg("migwingcp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Windows-GCP", Regions: GetGCPRegions(), @@ -226,7 +226,7 @@ func MigrationWindowsToNCPGetHandler(ctx echo.Context) error { tmpPath := filepath.Join(os.TempDir(), "dummy") logger := getLogger("migwinncp") - logger.Info("migwinncp get page accessed") + logger.Info().Msg("migwinncp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Windows-NCP", Regions: GetNCPRegions(), @@ -241,7 +241,7 @@ func MigrationWindowsToNCPGetHandler(ctx echo.Context) error { func MigrationMySQLGetHandler(ctx echo.Context) error { logger := getLogger("migmysql") - logger.Info("migmysql get page accessed") + logger.Info().Msg("migmysql get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-MySQL", Error: nil, @@ -255,7 +255,7 @@ func MigrationMySQLGetHandler(ctx echo.Context) error { func MigrationS3ToLinuxGetHandler(ctx echo.Context) error { logger := getLogger("migs3lin") - logger.Info("migs3lin get page accessed") + logger.Info().Msg("migs3lin get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-S3-Linux", Regions: GetAWSRegions(), @@ -268,7 +268,7 @@ func MigrationS3ToWindowsGetHandler(ctx echo.Context) error { tmpPath := filepath.Join(os.TempDir(), "dummy") logger := getLogger("migs3win") - logger.Info("migs3win get page accessed") + logger.Info().Msg("migs3win get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-S3-Windows", Regions: GetAWSRegions(), @@ -281,7 +281,7 @@ func MigrationS3ToWindowsGetHandler(ctx echo.Context) error { func MigrationS3ToGCPGetHandler(ctx echo.Context) error { logger := getLogger("migs3gcp") - logger.Info("migs3gcp get page accessed") + logger.Info().Msg("migs3gcp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-S3-GCP", AWSRegions: GetAWSRegions(), @@ -294,7 +294,7 @@ func MigrationS3ToGCPGetHandler(ctx echo.Context) error { func MigrationS3ToNCPGetHandler(ctx echo.Context) error { logger := getLogger("migs3ncp") - logger.Info("migs3ncp get page accessed") + logger.Info().Msg("migs3ncp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-S3-NCP", AWSRegions: GetAWSRegions(), @@ -310,7 +310,7 @@ func MigrationS3ToNCPGetHandler(ctx echo.Context) error { func MigrationGCPToLinuxGetHandler(ctx echo.Context) error { logger := getLogger("miggcplin") - logger.Info("miggcplin get page accessed") + logger.Info().Msg("miggcplin get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-GCP-Linux", OS: runtime.GOOS, @@ -323,7 +323,7 @@ func MigrationGCPToWindowsGetHandler(ctx echo.Context) error { tmpPath := filepath.Join(os.TempDir(), "dummy") logger := getLogger("miggcpwin") - logger.Info("miggcpwin get page accessed") + logger.Info().Msg("miggcpwin get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-GCP-Windows", OS: runtime.GOOS, @@ -347,7 +347,7 @@ func MigrationGCPToS3GetHandler(ctx echo.Context) error { func MigrationGCPToNCPGetHandler(ctx echo.Context) error { logger := getLogger("miggcpncp") - logger.Info("miggcpncp get page accessed") + logger.Info().Msg("miggcpncp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-GCP-NCP", OS: runtime.GOOS, @@ -363,7 +363,7 @@ func MigrationGCPToNCPGetHandler(ctx echo.Context) error { func MigrationNCPToLinuxGetHandler(ctx echo.Context) error { logger := getLogger("migncplin") - logger.Info("migncplin get page accessed") + logger.Info().Msg("migncplin get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-NCP-Linux", Regions: GetNCPRegions(), @@ -376,7 +376,7 @@ func MigrationNCPToWindowsGetHandler(ctx echo.Context) error { tmpPath := filepath.Join(os.TempDir(), "dummy") logger := getLogger("migncpwin") - logger.Info("migncpwin get page accessed") + logger.Info().Msg("migncpwin get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-NCP-Windows", Regions: GetNCPRegions(), @@ -389,7 +389,7 @@ func MigrationNCPToWindowsGetHandler(ctx echo.Context) error { func MigrationNCPToS3GetHandler(ctx echo.Context) error { logger := getLogger("migncps3") - logger.Info("migncps3 get page accessed") + logger.Info().Msg("migncps3 get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-NCP-S3", NCPRegions: GetNCPRegions(), @@ -402,7 +402,7 @@ func MigrationNCPToS3GetHandler(ctx echo.Context) error { func MigrationNCPToGCPGetHandler(ctx echo.Context) error { logger := getLogger("migncpgcp") - logger.Info("migncpgcp get page accessed") + logger.Info().Msg("migncpgcp get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-NCP-GCP", NCPRegions: GetNCPRegions(), @@ -418,7 +418,7 @@ func MigrationNCPToGCPGetHandler(ctx echo.Context) error { func MigrationDynamoDBToFirestoreGetHandler(ctx echo.Context) error { logger := getLogger("migDNFS") - logger.Info("migDNFS get page accessed") + logger.Info().Msg("migDNFS get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-DynamoDB-Firestore", AWSRegions: GetAWSRegions(), @@ -431,7 +431,7 @@ func MigrationDynamoDBToFirestoreGetHandler(ctx echo.Context) error { func MigrationDynamoDBToMongoDBGetHandler(ctx echo.Context) error { logger := getLogger("migDNMG") - logger.Info("migDNMG get page accessed") + logger.Info().Msg("migDNMG get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-DynamoDB-MongoDB", Regions: GetAWSRegions(), @@ -446,7 +446,7 @@ func MigrationDynamoDBToMongoDBGetHandler(ctx echo.Context) error { func MigrationFirestoreToDynamoDBGetHandler(ctx echo.Context) error { logger := getLogger("migFSDN") - logger.Info("migFSDN get page accessed") + logger.Info().Msg("migFSDN get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Firestore-DynamoDB", AWSRegions: GetAWSRegions(), @@ -459,7 +459,7 @@ func MigrationFirestoreToDynamoDBGetHandler(ctx echo.Context) error { func MigrationFirestoreToMongoDBGetHandler(ctx echo.Context) error { logger := getLogger("migFSMG") - logger.Info("migFSMG get page accessed") + logger.Info().Msg("migFSMG get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-Firestore-MongoDB", Regions: GetGCPRegions(), @@ -474,7 +474,7 @@ func MigrationFirestoreToMongoDBGetHandler(ctx echo.Context) error { func MigrationMongoDBToDynamoDBGetHandler(ctx echo.Context) error { logger := getLogger("migMGDN") - logger.Info("migMGDN get page accessed") + logger.Info().Msg("migMGDN get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-MongoDB-DynamoDB", Regions: GetAWSRegions(), @@ -486,7 +486,7 @@ func MigrationMongoDBToDynamoDBGetHandler(ctx echo.Context) error { func MigrationMongoDBToFirestoreGetHandler(ctx echo.Context) error { logger := getLogger("migMGFS") - logger.Info("migMGFS get page accessed") + logger.Info().Msg("migMGFS get page accessed") return ctx.Render(http.StatusOK, "index.html", models.BasicPageResponse{ Content: "Migration-MongoDB-Firestore", Regions: GetGCPRegions(), diff --git a/websrc/controllers/publicfunc.go b/websrc/controllers/publicfunc.go index 6396c7e..e1a6db2 100644 --- a/websrc/controllers/publicfunc.go +++ b/websrc/controllers/publicfunc.go @@ -33,7 +33,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/cloud-barista/mc-data-manager/config" - "github.com/cloud-barista/mc-data-manager/internal/log" "github.com/cloud-barista/mc-data-manager/models" "github.com/cloud-barista/mc-data-manager/pkg/nrdbms/awsdnmdb" "github.com/cloud-barista/mc-data-manager/pkg/nrdbms/gcpfsdb" @@ -45,278 +44,278 @@ import ( "github.com/cloud-barista/mc-data-manager/service/osc" "github.com/cloud-barista/mc-data-manager/service/rdbc" "github.com/labstack/echo/v4" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "github.com/spf13/cast" "go.mongodb.org/mongo-driver/mongo" ) -func getLogger(jobName string) *logrus.Logger { - logger := logrus.StandardLogger() - logger.SetFormatter(&log.CustomTextFormatter{CmdName: "server", JobName: jobName}) - return logger +func getLogger(jobName string) *zerolog.Logger { + logger := log.With().Str("jobName", jobName).Logger() + return &logger } -func pageLogInit(pageName, pageInfo string, startTime time.Time) (*logrus.Logger, *strings.Builder) { +func pageLogInit(pageName, pageInfo string, startTime time.Time) (*zerolog.Logger, *strings.Builder) { logger := getLogger(pageName) - var logstrings = strings.Builder{} + var logstrings strings.Builder - logger.Infof("%s post page accessed", pageName) + logger.Info().Msgf("%s post page accessed", pageName) - logger.SetOutput(io.MultiWriter(logger.Out, &logstrings)) + logger.Output(io.MultiWriter(logger, &logstrings)) - logger.Info(pageInfo) - logger.Infof("start time : %s", startTime.Format("2006-01-02T15:04:05-07:00")) + logger.Info().Msg(pageInfo) + logger.Info().Str("start time", startTime.Format("2006-01-02T15:04:05-07:00")).Msg("") return logger, &logstrings } -func osCheck(logger *logrus.Logger, startTime time.Time, osName string) bool { - logger.Info("Check the operating system") +func osCheck(logger *zerolog.Logger, startTime time.Time, osName string) bool { + logger.Info().Msg("Check the operating system") if runtime.GOOS != osName { end := time.Now() - logger.Errorf("Not a %s operating system", osName) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Msgf("Not a %s operating system", osName) + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } return true } -func dummyCreate(logger *logrus.Logger, startTime time.Time, params GenFileParams) bool { - logger.Info("Start dummy generation") +func dummyCreate(logger *zerolog.Logger, startTime time.Time, params GenFileParams) bool { + logger.Info().Msg("Start dummy generation") err := genData(params, logger) if err != nil { end := time.Now() - logger.Errorf("Failed to generate dummy data : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("Failed to generate dummy data") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } return true } -func jobEnd(logger *logrus.Logger, endInfo string, startTime time.Time) { +func jobEnd(logger *zerolog.Logger, endInfo string, startTime time.Time) { end := time.Now() - logger.Info(endInfo) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Info().Msg(endInfo) + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") } -func createDummyTemp(logger *logrus.Logger, startTime time.Time) (string, bool) { - logger.Info("Create a temporary directory where dummy data will be created") +func createDummyTemp(logger *zerolog.Logger, startTime time.Time) (string, bool) { + logger.Info().Msg("Create a temporary directory where dummy data will be created") tmpDir, err := os.MkdirTemp("", "datamold-dummy") if err != nil { end := time.Now() - logger.Error("Failed to generate dummy data : failed to create tmpdir") - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("Failed to generate dummy data: failed to create tmpdir") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return "", false } else { return tmpDir, true } } -func getS3OSC(logger *logrus.Logger, startTime time.Time, jobType string, params interface{}) *osc.OSController { +func getS3OSC(logger *zerolog.Logger, startTime time.Time, jobType string, params interface{}) *osc.OSController { gparam, _ := params.(ProviderConfig) var err error var s3c *s3.Client var awsOSC *osc.OSController - logger.Info("Get S3 Client") + logger.Info().Msg("Get S3 Client") credentailManger := config.NewProfileManager() creds, err := credentailManger.LoadCredentialsByProfile(gparam.ProfileName, gparam.Provider) if err != nil { end := time.Now() - logger.Errorf("credentail load failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("credentail load failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } awsc, ok := creds.(models.AWSCredentials) if !ok { end := time.Now() - logger.Errorf("AWS client creation failed") - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Msg("AWS client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } s3c, err = config.NewS3Client(awsc.AccessKey, awsc.SecretKey, gparam.Region) if err != nil { end := time.Now() - logger.Errorf("s3 client creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("s3 client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } - logger.Info("Set up the client as an OSController") + logger.Info().Msg("Set up the client as an OSController") if jobType == "gen" { - awsOSC, err = osc.New(s3fs.New(models.AWS, s3c, gparam.Bucket, gparam.Region), osc.WithLogger(logger)) + awsOSC, err = osc.New(s3fs.New(models.AWS, s3c, gparam.Bucket, gparam.Region)) } else { - awsOSC, err = osc.New(s3fs.New(models.AWS, s3c, gparam.Bucket, gparam.Region), osc.WithLogger(logger)) + awsOSC, err = osc.New(s3fs.New(models.AWS, s3c, gparam.Bucket, gparam.Region)) } if err != nil { end := time.Now() - logger.Errorf("OSController creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("OSController creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } return awsOSC } -func getS3COSC(logger *logrus.Logger, startTime time.Time, jobType string, params interface{}) *osc.OSController { +func getS3COSC(logger *zerolog.Logger, startTime time.Time, jobType string, params interface{}) *osc.OSController { gparam, _ := params.(ProviderConfig) var err error var s3c *s3.Client var OSC *osc.OSController - logger.Info("Get S3 Compataible Client") + logger.Info().Msg("Get S3 Compataible Client") credentailManger := config.NewProfileManager() creds, err := credentailManger.LoadCredentialsByProfile(gparam.ProfileName, gparam.Provider) if err != nil { end := time.Now() - logger.Errorf("S3 credentail load failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("S3 credentail load failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } ncpc, ok := creds.(models.NCPCredentials) if !ok { - logger.Errorf(" credential load failed") + logger.Error().Msg("credential load failed") } s3c, err = config.NewS3ClientWithEndpoint(ncpc.AccessKey, ncpc.SecretKey, gparam.Region, gparam.Endpoint) if err != nil { end := time.Now() - logger.Errorf("S3 s3 compatible client creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("S3 s3 compatible client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } - logger.Info("Set up the client as an OSController") - OSC, err = osc.New(s3fs.New(models.NCP, s3c, gparam.Bucket, gparam.Region), osc.WithLogger(logger)) + logger.Info().Msg("Set up the client as an OSController") + OSC, err = osc.New(s3fs.New(models.NCP, s3c, gparam.Bucket, gparam.Region)) if err != nil { end := time.Now() - logger.Errorf("OSController creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("OSController creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } return OSC } -func getGCPCOSC(logger *logrus.Logger, startTime time.Time, jobType string, params interface{}) *osc.OSController { +func getGCPCOSC(logger *zerolog.Logger, startTime time.Time, jobType string, params interface{}) *osc.OSController { gparam, _ := params.(ProviderConfig) var err error var gcpOSC *osc.OSController - logger.Info("Get GCP Client") + logger.Info().Msg("Get GCP Client") credentailManger := config.NewProfileManager() creds, err := credentailManger.LoadCredentialsByProfile(gparam.ProfileName, gparam.Provider) if err != nil { end := time.Now() - logger.Errorf("gcp credentail load failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("gcp credentail load failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } gcpc, ok := creds.(models.GCPCredentials) if !ok { - logger.Errorf(" credential load failed") + logger.Error().Msg("credential load failed") return nil } credentialsJson, err := json.Marshal(gcpc) if err != nil { end := time.Now() - logger.Errorf("gcp credentail json Marshal failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("gcp credentail json Marshal failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } gc, err := config.NewGCPClient(string(credentialsJson)) if err != nil { end := time.Now() - logger.Errorf("gcp client creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("gcp client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } - logger.Info("Set up the client as an OSController") + logger.Info().Msg("Set up the client as an OSController") if jobType == "gen" { - gcpOSC, err = osc.New(gcpfs.New(gc, gcpc.ProjectID, gparam.Bucket, gparam.Region), osc.WithLogger(logger)) + gcpOSC, err = osc.New(gcpfs.New(gc, gcpc.ProjectID, gparam.Bucket, gparam.Region)) } else { - gcpOSC, err = osc.New(gcpfs.New(gc, gcpc.ProjectID, gparam.Bucket, gparam.Region), osc.WithLogger(logger)) + gcpOSC, err = osc.New(gcpfs.New(gc, gcpc.ProjectID, gparam.Bucket, gparam.Region)) } if err != nil { end := time.Now() - logger.Errorf("OSController creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("OSController creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } return gcpOSC } -func getMysqlRDBC(logger *logrus.Logger, startTime time.Time, jobType string, params interface{}) *rdbc.RDBController { +func getMysqlRDBC(logger *zerolog.Logger, startTime time.Time, jobType string, params interface{}) *rdbc.RDBController { gparam, _ := params.(ProviderConfig) var err error var sqlDB *sql.DB var RDBC *rdbc.RDBController - logger.Infof("Get SQL Client %v", jobType) + logger.Info().Msgf("Get SQL Client %v", jobType) sqlDB, err = sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/", gparam.User, gparam.Password, gparam.Host, gparam.Port)) if err != nil { end := time.Now() - logger.Errorf("sqlDB client creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("sqlDB client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } - logger.Info("Set up the client as an RDBController") - RDBC, err = rdbc.New(mysql.New(models.Provider(gparam.Provider), sqlDB), rdbc.WithLogger(logger)) + logger.Info().Msg("Set up the client as an RDBController") + RDBC, err = rdbc.New(mysql.New(models.Provider(gparam.Provider), sqlDB)) if err != nil { end := time.Now() - logger.Errorf("RDBController creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("RDBController creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } return RDBC } -func getDynamoNRDBC(logger *logrus.Logger, startTime time.Time, jobType string, params interface{}) *nrdbc.NRDBController { +func getDynamoNRDBC(logger *zerolog.Logger, startTime time.Time, jobType string, params interface{}) *nrdbc.NRDBController { gparam, _ := params.(ProviderConfig) var err error var dc *dynamodb.Client var NRDBC *nrdbc.NRDBController - logger.Info("Get DynamoDB Client") + logger.Info().Msg("Get DynamoDB Client") credentailManger := config.NewProfileManager() creds, err := credentailManger.LoadCredentialsByProfile(gparam.ProfileName, gparam.Provider) if err != nil { end := time.Now() - logger.Errorf("aws credentail load failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("aws credentail load failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } awsc, ok := creds.(models.AWSCredentials) if !ok { - logger.Errorf(" credential load failed") + logger.Error().Msg("credential load failed") } if jobType == "gen" { dc, err = config.NewDynamoDBClient(awsc.AccessKey, awsc.SecretKey, gparam.Region) @@ -325,58 +324,58 @@ func getDynamoNRDBC(logger *logrus.Logger, startTime time.Time, jobType string, } if err != nil { end := time.Now() - logger.Errorf("dynamoDB client creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("dynamoDB client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } - logger.Info("Set up the client as an NRDBController") + logger.Info().Msg("Set up the client as an NRDBController") if jobType == "gen" { - NRDBC, err = nrdbc.New(awsdnmdb.New(dc, gparam.Region), nrdbc.WithLogger(logger)) + NRDBC, err = nrdbc.New(awsdnmdb.New(dc, gparam.Region)) } else { - NRDBC, err = nrdbc.New(awsdnmdb.New(dc, gparam.Region), nrdbc.WithLogger(logger)) + NRDBC, err = nrdbc.New(awsdnmdb.New(dc, gparam.Region)) } if err != nil { end := time.Now() - logger.Errorf("NRDBController creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("NRDBController creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } return NRDBC } -func getFirestoreNRDBC(logger *logrus.Logger, startTime time.Time, jobType string, params interface{}) *nrdbc.NRDBController { +func getFirestoreNRDBC(logger *zerolog.Logger, startTime time.Time, jobType string, params interface{}) *nrdbc.NRDBController { gparam, _ := params.(ProviderConfig) var err error var fc *firestore.Client var NRDBC *nrdbc.NRDBController - logger.Info("Get FirestoreDB Client") + logger.Info().Msg("Get FirestoreDB Client") credentailManger := config.NewProfileManager() creds, err := credentailManger.LoadCredentialsByProfile(gparam.ProfileName, gparam.Provider) if err != nil { end := time.Now() - logger.Errorf("gcp credentail load failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("gcp credentail load failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } gcpc, ok := creds.(models.GCPCredentials) if !ok { - logger.Errorf(" credential load failed") + logger.Error().Msg("credential load failed") return nil } credentialsJson, err := json.Marshal(gcpc) if err != nil { end := time.Now() - logger.Errorf("gcp credentail json Marshal failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("gcp credentail json Marshal failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } @@ -384,34 +383,34 @@ func getFirestoreNRDBC(logger *logrus.Logger, startTime time.Time, jobType strin if err != nil { end := time.Now() - logger.Errorf("firestoreDB client creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("firestoreDB client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } - logger.Info("Set up the client as an NRDBController") - NRDBC, err = nrdbc.New(gcpfsdb.New(fc, gparam.Region), nrdbc.WithLogger(logger)) + logger.Info().Msg("Set up the client as an NRDBController") + NRDBC, err = nrdbc.New(gcpfsdb.New(fc, gparam.Region)) if err != nil { end := time.Now() - logger.Errorf("NRDBController creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("NRDBController creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } return NRDBC } -func getMongoNRDBC(logger *logrus.Logger, startTime time.Time, jobType string, params interface{}) *nrdbc.NRDBController { +func getMongoNRDBC(logger *zerolog.Logger, startTime time.Time, jobType string, params interface{}) *nrdbc.NRDBController { gparam, _ := params.(ProviderConfig) var err error var mc *mongo.Client var NRDBC *nrdbc.NRDBController - logger.Info("Get MongoDB Client") + logger.Info().Msg("Get MongoDB Client") if jobType == "gen" { mc, err = config.NewNCPMongoDBClient(gparam.User, gparam.Password, gparam.Host, cast.ToInt(gparam.Port)) } else { @@ -419,42 +418,42 @@ func getMongoNRDBC(logger *logrus.Logger, startTime time.Time, jobType string, p } if err != nil { end := time.Now() - logger.Errorf("mongoDB client creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("mongoDB client creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } - logger.Info("Set up the client as an NRDBController") + logger.Info().Msg("Set up the client as an NRDBController") if jobType == "gen" { - NRDBC, err = nrdbc.New(ncpmgdb.New(mc, gparam.DatabaseName), nrdbc.WithLogger(logger)) + NRDBC, err = nrdbc.New(ncpmgdb.New(mc, gparam.DatabaseName)) } else { - NRDBC, err = nrdbc.New(ncpmgdb.New(mc, gparam.DatabaseName), nrdbc.WithLogger(logger)) + NRDBC, err = nrdbc.New(ncpmgdb.New(mc, gparam.DatabaseName)) } if err != nil { end := time.Now() - logger.Errorf("NRDBController creation failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("NRDBController creation failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return nil } return NRDBC } -func nrdbPutWorker(logger *logrus.Logger, startTime time.Time, dbType string, nrdbc *nrdbc.NRDBController, jsonList []string) bool { +func nrdbPutWorker(logger *zerolog.Logger, startTime time.Time, dbType string, nrdbc *nrdbc.NRDBController, jsonList []string) bool { var wg sync.WaitGroup var mu sync.Mutex ret := make(chan error) - logger.Infof("Start Import with %s", dbType) + logger.Info().Msgf("Start Import with %s", dbType) for _, j := range jsonList { wg.Add(1) go func(jPath string, jret chan<- error) { defer wg.Done() mu.Lock() - logger.Infof("Read json file : %s", jPath) + logger.Info().Msgf("Read json file : %s", jPath) mu.Unlock() data, err := os.ReadFile(jPath) @@ -463,7 +462,7 @@ func nrdbPutWorker(logger *logrus.Logger, startTime time.Time, dbType string, nr return } - logger.Infof("data unmarshal : %s", filepath.Base(jPath)) + logger.Info().Msgf("data unmarshal : %s", filepath.Base(jPath)) var jsonData []map[string]interface{} err = json.Unmarshal(data, &jsonData) if err != nil { @@ -474,7 +473,7 @@ func nrdbPutWorker(logger *logrus.Logger, startTime time.Time, dbType string, nr tableName := strings.TrimSuffix(filepath.Base(jPath), ".json") mu.Lock() - logger.Infof("Put start : %s", filepath.Base(jPath)) + logger.Info().Msgf("Put start : %s", filepath.Base(jPath)) mu.Unlock() if err := nrdbc.Put(tableName, &jsonData); err != nil { @@ -494,9 +493,9 @@ func nrdbPutWorker(logger *logrus.Logger, startTime time.Time, dbType string, nr for result := range ret { if result != nil { end := time.Now() - logger.Errorf("NRDBController Import failed : %v", result) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(result).Msg("NRDBController Import failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } } @@ -504,7 +503,7 @@ func nrdbPutWorker(logger *logrus.Logger, startTime time.Time, dbType string, nr return true } -func walk(logger *logrus.Logger, startTime time.Time, list *[]string, dirPath string, ext string) bool { +func walk(logger *zerolog.Logger, startTime time.Time, list *[]string, dirPath string, ext string) bool { err := filepath.Walk(dirPath, func(path string, _ fs.FileInfo, err error) error { if err != nil { return err @@ -518,33 +517,33 @@ func walk(logger *logrus.Logger, startTime time.Time, list *[]string, dirPath st }) if err != nil { end := time.Now() - logger.Errorf("filepath walk failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("filepath walk failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } return true } -func oscImport(logger *logrus.Logger, startTime time.Time, osType string, osc *osc.OSController, dstDir string) bool { - logger.Infof("Start Import with %s", osType) +func oscImport(logger *zerolog.Logger, startTime time.Time, osType string, osc *osc.OSController, dstDir string) bool { + logger.Info().Msgf("Start Import with %s", osType) if err := osc.MPut(dstDir); err != nil { end := time.Now() - logger.Errorf("OSController import failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("OSController import failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } return true } -func oscExport(logger *logrus.Logger, startTime time.Time, osType string, osc *osc.OSController, dstDir string) bool { - logger.Infof("Start Export with %s", osType) +func oscExport(logger *zerolog.Logger, startTime time.Time, osType string, osc *osc.OSController, dstDir string) bool { + logger.Info().Msgf("Start Export with %s", osType) if err := osc.MGet(dstDir); err != nil { end := time.Now() - logger.Errorf("OSController export failed : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("OSController export failed") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } return true @@ -591,36 +590,38 @@ func getFileData(jobtype string, ctx echo.Context) interface{} { } // Bind onetime -func getDataWithBind(logger *logrus.Logger, startTime time.Time, ctx echo.Context, params interface{}) bool { +func getDataWithBind(logger *zerolog.Logger, startTime time.Time, ctx echo.Context, params interface{}) bool { + if err := ctx.Bind(params); err != nil { end := time.Now() - logger.Error("Failed to bind form data") - logger.Infof("params : %+v", ctx.Request().Body) - logger.Infof("End time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Msg("Failed to bind form data") + logger.Info().Interface("params", ctx.Request().Body).Msg("") + logger.Info().Str("End time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } return true } // For Rebind -func getDataWithReBind(logger *logrus.Logger, startTime time.Time, ctx echo.Context, params interface{}) bool { +func getDataWithReBind(logger *zerolog.Logger, startTime time.Time, ctx echo.Context, params interface{}) bool { + bodyBytes, err := io.ReadAll(ctx.Request().Body) if err != nil { - logger.Error("Failed to read request body") + logger.Error().Msg("Failed to read request body") return false } - logger.Infof("Request Body: %s", string(bodyBytes)) + logger.Info().Str("Request Body", string(bodyBytes)).Msg("") ctx.Request().Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) if err := ctx.Bind(params); err != nil { end := time.Now() - logger.Error("Failed to bind form data") - logger.Infof("Params: %+v", string(bodyBytes)) - logger.Infof("End time: %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time: %s", end.Sub(startTime).String()) + logger.Error().Msg("Failed to bind form data") + logger.Info().Interface("Params", string(bodyBytes)).Msg("") + logger.Info().Str("End time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return false } @@ -628,25 +629,25 @@ func getDataWithReBind(logger *logrus.Logger, startTime time.Time, ctx echo.Cont return true } -func gcpCreateCredFile(logger *logrus.Logger, startTime time.Time, ctx echo.Context) (string, string, bool) { - logger.Info("Create a temporary directory where credential files will be stored") +func gcpCreateCredFile(logger *zerolog.Logger, startTime time.Time, ctx echo.Context) (string, string, bool) { + logger.Info().Msg("Create a temporary directory where credential files will be stored") // func (*http.Request).FormFile(key string) (multipart.File, *multipart.FileHeader, error) // gcpCredentialFile, gcpCredentialHeader, err := ctx.Request.FormFile("gcpCredential") gcpCredentialHeader, err := ctx.FormFile("gcpCredential") if err != nil { end := time.Now() - logger.Errorf("Get CredentialFile error : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("Get CredentialFile error") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return "", "", false } credTmpDir, err := os.MkdirTemp("", "datamold-gcp-cred-") if err != nil { end := time.Now() - logger.Errorf("Get CredentialFile error : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("Get CredentialFile error") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return "", "", false } @@ -655,9 +656,9 @@ func gcpCreateCredFile(logger *logrus.Logger, startTime time.Time, ctx echo.Cont // err = ctx.SaveUploadedFile(gcpCredentialHeader, credFileName) if err != nil { end := time.Now() - logger.Errorf("Get CredentialFile error : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("Get CredentialFile error") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return "", "", false } defer gcpCredentialFile.Close() @@ -665,18 +666,18 @@ func gcpCreateCredFile(logger *logrus.Logger, startTime time.Time, ctx echo.Cont dst, err := os.Create(credFileName) if err != nil { end := time.Now() - logger.Errorf("File create error : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("File create error") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return "", "", false } defer dst.Close() if _, err = io.Copy(dst, gcpCredentialFile); err != nil { end := time.Now() - logger.Errorf("File copy error : %v", err) - logger.Infof("end time : %s", end.Format("2006-01-02T15:04:05-07:00")) - logger.Infof("Elapsed time : %s", end.Sub(startTime).String()) + logger.Error().Err(err).Msg("File copy error") + logger.Info().Str("end time", end.Format("2006-01-02T15:04:05-07:00")).Msg("") + logger.Info().Str("Elapsed time", end.Sub(startTime).String()).Msg("") return "", "", false } diff --git a/websrc/controllers/restoreHandlers.go b/websrc/controllers/restoreHandlers.go index 9fd545d..0eb33f6 100644 --- a/websrc/controllers/restoreHandlers.go +++ b/websrc/controllers/restoreHandlers.go @@ -29,7 +29,7 @@ import ( "github.com/cloud-barista/mc-data-manager/service/nrdbc" "github.com/cloud-barista/mc-data-manager/service/rdbc" "github.com/labstack/echo/v4" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog/log" ) // RestoreOSPostHandler godoc @@ -48,12 +48,12 @@ func RestoreOSPostHandler(ctx echo.Context) error { logger, logstrings := pageLogInit("Restore-objectstorage", "Import data to objectstorage", start) params := models.RestoreTask{} if !getDataWithReBind(logger, start, ctx, ¶ms) { + log.Error().Msgf("Req params err") return ctx.JSON(http.StatusOK, models.BasicResponse{ Result: logstrings.String(), Error: nil, }) } - switch params.TargetPoint.Provider { case string(models.AWS): return MigrationLinuxToS3PostHandler(ctx) @@ -62,7 +62,7 @@ func RestoreOSPostHandler(ctx echo.Context) error { case string(models.GCP): return MigrationLinuxToGCPPostHandler(ctx) default: - logger.Errorf("Unsupported provider: %v", params.TargetPoint.Provider) + logger.Error().Msgf("Unsupported provider: %v", params.TargetPoint.Provider) errorMsg := fmt.Sprintf("unsupported provider: %v", params.TargetPoint.Provider) return ctx.JSON(http.StatusBadRequest, models.BasicResponse{ Result: logstrings.String(), @@ -93,6 +93,7 @@ func RestoreRDBPostHandler(ctx echo.Context) error { params := models.RestoreTask{} if !getDataWithBind(logger, start, ctx, ¶ms) { + log.Error().Msgf("Req params err") return ctx.JSON(http.StatusOK, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -120,7 +121,7 @@ func RestoreRDBPostHandler(ctx echo.Context) error { }) if err != nil { errorMsg := fmt.Sprintf("Walk error: %v", err) - logger.Error(errorMsg) + logger.Error().Msgf(errorMsg) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: &errorMsg, @@ -131,22 +132,22 @@ func RestoreRDBPostHandler(ctx echo.Context) error { data, err := os.ReadFile(sqlPath) if err != nil { errorMsg := fmt.Sprintf("ReadFile error: %v", err) - logger.Error(errorMsg) + logger.Error().Msgf(errorMsg) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: &errorMsg, }) } - logrus.Infof("Import start: %s", sqlPath) + log.Info().Msgf("Import start: %s", sqlPath) if err := RDBC.Put(string(data)); err != nil { errorMsg := fmt.Sprintf("Put error importing into RDBMS: %v", err) - logger.Error(errorMsg) + logger.Error().Msgf(errorMsg) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: &errorMsg, }) } - logrus.Infof("Import success: %s", sqlPath) + log.Info().Msgf("Import success: %s", sqlPath) } jobEnd(logger, "Successfully Imported data from mysql", start) @@ -178,6 +179,7 @@ func RestoreNRDBPostHandler(ctx echo.Context) error { params := models.RestoreTask{} if !getDataWithReBind(logger, start, ctx, ¶ms) { + log.Error().Msgf("Req params err") return ctx.JSON(http.StatusOK, models.BasicResponse{ Result: logstrings.String(), Error: nil, @@ -186,7 +188,7 @@ func RestoreNRDBPostHandler(ctx echo.Context) error { NRDBC, err = auth.GetNRDMS(¶ms.TargetPoint) if err != nil { - logrus.Errorf("NRDBController error importing into nrdbms : %v", err) + log.Error().Msgf("NRDBController error importing into nrdbms : %v", err) return err } @@ -202,7 +204,7 @@ func RestoreNRDBPostHandler(ctx echo.Context) error { }) if err != nil { - logrus.Errorf("Walk error : %v", err) + log.Error().Msgf("Walk error : %v", err) return err } @@ -212,25 +214,25 @@ func RestoreNRDBPostHandler(ctx echo.Context) error { file, err := os.Open(jsonFile) if err != nil { - logrus.Errorf("file open error : %v", err) + log.Error().Msgf("file open error : %v", err) return err } defer file.Close() if err := json.NewDecoder(file).Decode(&srcData); err != nil { - logrus.Errorf("file decoding error : %v", err) + log.Error().Msgf("file decoding error : %v", err) return err } fileName := filepath.Base(jsonFile) tableName := fileName[:len(fileName)-len(filepath.Ext(fileName))] - logrus.Infof("Import start: %s", fileName) + log.Info().Msgf("Import start: %s", fileName) if err := NRDBC.Put(tableName, &srcData); err != nil { - logrus.Error("Put error importing into nrdbms") + log.Error().Msgf("Put error importing into nrdbms") return err } - logrus.Infof("successfully imported : %s", params.SourcePoint.Path) + log.Info().Msgf("successfully imported : %s", params.SourcePoint.Path) } jobEnd(logger, "Successfully Imported NRDB from Data", start) diff --git a/websrc/controllers/taskHandlers.go b/websrc/controllers/taskHandlers.go index 3c09647..6e773aa 100644 --- a/websrc/controllers/taskHandlers.go +++ b/websrc/controllers/taskHandlers.go @@ -22,7 +22,6 @@ import ( "github.com/cloud-barista/mc-data-manager/models" "github.com/cloud-barista/mc-data-manager/service/task" "github.com/labstack/echo/v4" - "github.com/sirupsen/logrus" ) // TaskController is a struct that holds a reference to the TaskService @@ -40,11 +39,14 @@ type TaskController struct { // @Failure 500 {object} models.BasicResponse "Internal Server Error" // @Router /task [get] func (tc *TaskController) GetAllTasksHandler(ctx echo.Context) error { + start := time.Now() + logger, logstrings := pageLogInit("Get-task-list", "Get an existing task", start) tasks, err := tc.TaskService.GetScheduleList() if err != nil { errStr := err.Error() + logger.Error().Err(err) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ - Result: "Failed to retrieve tasks", + Result: logstrings.String(), Error: &errStr, }) } @@ -69,14 +71,15 @@ func (tc *TaskController) CreateTaskHandler(ctx echo.Context) error { params := models.Schedule{} if !getDataWithReBind(logger, start, ctx, ¶ms) { errStr := "Invalid request data" + logger.Error().Msg(errStr) return ctx.JSON(http.StatusBadRequest, models.BasicResponse{ Result: logstrings.String(), Error: &errStr, }) } - logrus.Infof("parasm : %+v", params) if err := tc.TaskService.CreateSchedule(params); err != nil { errStr := err.Error() + logger.Error().Err(err) return ctx.JSON(http.StatusInternalServerError, models.BasicResponse{ Result: logstrings.String(), Error: &errStr, @@ -84,7 +87,7 @@ func (tc *TaskController) CreateTaskHandler(ctx echo.Context) error { } return ctx.JSON(http.StatusOK, models.BasicResponse{ - Result: "Task created successfully", + Result: logstrings.String(), Error: nil, }) } @@ -100,12 +103,15 @@ func (tc *TaskController) CreateTaskHandler(ctx echo.Context) error { // @Failure 404 {object} models.BasicResponse "Task not found" // @Router /task/{id} [get] func (tc *TaskController) GetTaskHandler(ctx echo.Context) error { + start := time.Now() + logger, logstrings := pageLogInit("Get-task", "Get an existing task", start) id := ctx.Param("id") task, err := tc.TaskService.GetSchedule(id) if err != nil { errStr := err.Error() + logger.Error().Err(err) return ctx.JSON(http.StatusNotFound, models.BasicResponse{ - Result: "Task not found", + Result: logstrings.String(), Error: &errStr, }) } @@ -133,6 +139,7 @@ func (tc *TaskController) UpdateTaskHandler(ctx echo.Context) error { params := models.Schedule{} if !getDataWithReBind(logger, start, ctx, ¶ms) { errStr := "Invalid request data" + logger.Error().Msg(errStr) return ctx.JSON(http.StatusBadRequest, models.BasicResponse{ Result: logstrings.String(), Error: &errStr, @@ -148,7 +155,7 @@ func (tc *TaskController) UpdateTaskHandler(ctx echo.Context) error { } return ctx.JSON(http.StatusOK, models.BasicResponse{ - Result: "Task updated successfully", + Result: logstrings.String(), Error: nil, }) } @@ -164,17 +171,21 @@ func (tc *TaskController) UpdateTaskHandler(ctx echo.Context) error { // @Failure 404 {object} models.BasicResponse "Task not found" // @Router /task/{id} [delete] func (tc *TaskController) DeleteTaskHandler(ctx echo.Context) error { + start := time.Now() + logger, logstrings := pageLogInit("Delete-task", "Delete an existing task", start) id := ctx.Param("id") if err := tc.TaskService.DeleteSchedule(id); err != nil { errStr := "Task not found" + logger.Error().Msg(errStr) + return ctx.JSON(http.StatusNotFound, models.BasicResponse{ - Result: "Task not found", + Result: logstrings.String(), Error: &errStr, }) } return ctx.JSON(http.StatusOK, models.BasicResponse{ - Result: "Task deleted successfully", + Result: logstrings.String(), Error: nil, }) } diff --git a/websrc/middlewares/Tracing.go b/websrc/middlewares/Tracing.go new file mode 100644 index 0000000..9ffc2df --- /dev/null +++ b/websrc/middlewares/Tracing.go @@ -0,0 +1,65 @@ +package middlewares + +import ( + "context" + "fmt" + "time" + + "github.com/cloud-barista/mc-data-manager/pkg/logger" + "github.com/labstack/echo/v4" + "github.com/rs/zerolog/log" +) + +// TracingMiddleware intercepts the request, sets up tracing information, and logs both request and response details. +func TracingMiddleware(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + // Get the context and initialize trace and span IDs + ctx := c.Request().Context() + traceId := c.Response().Header().Get(echo.HeaderXRequestID) + spanId := fmt.Sprintf("%d", time.Now().UnixNano()) + + // Store trace and span IDs in the context + ctx = context.WithValue(ctx, logger.TraceIdKey, traceId) + ctx = context.WithValue(ctx, logger.SpanIdKey, spanId) + + // Create a logger with trace_id and span_id and store it in the context + requestLogger := log.With(). + Str("Host", c.Request().Host). + Str("RemoteAddr", c.Request().RemoteAddr). + Str("RequestURI", c.Request().RequestURI). + Str("UserAgent", c.Request().UserAgent()). + Str("X-Request-ID", c.Request().Header.Get("X-Request-ID")). + Str("X-Trace-ID", c.Request().Header.Get("X-Trace-ID")). + Str("X-Forwarded-For", c.Request().Header.Get("X-Forwarded-For")). + Str("X-Real-IP", c.Request().Header.Get("X-Real-IP")). + Str("Authorization", c.Request().Header.Get("Authorization")). + Str(string(logger.TraceIdKey), traceId). + Str(string(logger.SpanIdKey), spanId). + Caller(). + Logger() + + // Add the logger with context + ctx = requestLogger.WithContext(ctx) + c.SetRequest(c.Request().WithContext(ctx)) + + // Log the incoming request + log.Ctx(ctx).Info().Msg("[tracing] receive request") + + // Measure the latency + startTime := time.Now() + latency := time.Since(startTime) + // Log the response details + c.Response().Before(func() { + log.Ctx(ctx).Info(). + Int("Status", c.Response().Status). + Int64("Latency", latency.Nanoseconds()). + Str("LatencyHuman", latency.String()). + Int64("BytesIn", c.Request().ContentLength). + Int64("BytesOut", c.Response().Size). + Msg("[tracing] send response") + }) + + // Return the error if any + return next(c) + } +} diff --git a/websrc/serve/serve.go b/websrc/serve/serve.go index af9bb21..8196ee5 100644 --- a/websrc/serve/serve.go +++ b/websrc/serve/serve.go @@ -25,7 +25,9 @@ import ( "github.com/cloud-barista/mc-data-manager/service/task" "github.com/cloud-barista/mc-data-manager/websrc/controllers" + "github.com/cloud-barista/mc-data-manager/websrc/middlewares" "github.com/cloud-barista/mc-data-manager/websrc/routes" + "github.com/rs/zerolog/log" // REST API (echo) "github.com/labstack/echo/v4" @@ -108,15 +110,17 @@ func TrustedProxiesMiddleware(trustedProxies []string) echo.MiddlewareFunc { func InitServer(port string, addIP ...string) *echo.Echo { e := echo.New() - // Middleware - e.Use(middleware.Logger()) - e.Use(middleware.Recover()) - e.HideBanner = true allowIP := []string{"127.0.0.1", "::1"} allowIP = append(allowIP, addIP...) + + // Middleware e.Use(TrustedProxiesMiddleware(allowIP)) + e.Use(middleware.Logger()) + e.Use(middleware.Recover()) + // Custom middleware for tracing + e.Use(middlewares.TracingMiddleware) e.Static("/res", "./web") e.File("/favicon.ico", "./web/assets/favicon.ico") @@ -152,13 +156,13 @@ func InitServer(port string, addIP ...string) *echo.Echo { website := " http://" + selfEndpoint apidashboard := " http://" + selfEndpoint + "/swagger/index.html" - fmt.Println("Data Manager Web UI is available at") - fmt.Printf(noticeColor, website) - fmt.Println("\n ") + log.Info().Msgf("Data Manager Web UI is available at") + log.Info().Msgf(noticeColor, website) + log.Info().Msgf("\n ") - fmt.Println("Swagger UI (REST API Document) is available at") - fmt.Printf(noticeColor, apidashboard) - fmt.Println("\n ") + log.Info().Msgf("Swagger UI (REST API Document) is available at") + log.Info().Msgf(noticeColor, apidashboard) + log.Info().Msgf("\n ") return e }