diff --git a/Makefile b/Makefile index 8b1e86a170..3d537b9b40 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,8 @@ RPM_BUILD_IMAGE ?= rpmbuild/centos7 ## Static Parameter, should not be overwrite GOBIN = ${shell pwd}/bin PARSER_PATH = ${shell pwd}/vendor/github.com/pingcap/parser +LOCALE_PATH = ${shell pwd}/sqle/locale +PLUGIN_LOCALE_PATH = ${shell pwd}/sqle/driver/mysql/plocale ## Arm Build ARM_CGO_BUILD_FLAG = @@ -70,6 +72,23 @@ ifeq ($(EDITION)_$(GOARCH),ee_arm64) endif default: install + +######################################## i18n ########################################################## +install_i18n_tool: + GOBIN=$(GOBIN) go install -v github.com/nicksnyder/go-i18n/v2/goi18n@latest + +extract_i18n: + cd ${LOCALE_PATH} && $(GOBIN)/goi18n extract -sourceLanguage zh + cd ${PLUGIN_LOCALE_PATH} && $(GOBIN)/goi18n extract -sourceLanguage zh + +start_trans_i18n: + cd ${LOCALE_PATH} && touch translate.en.toml && $(GOBIN)/goi18n merge -sourceLanguage=zh active.*.toml + cd ${PLUGIN_LOCALE_PATH} && touch translate.en.toml && $(GOBIN)/goi18n merge -sourceLanguage=zh active.*.toml + +end_trans_i18n: + cd ${LOCALE_PATH} && $(GOBIN)/goi18n merge active.en.toml translate.en.toml && rm -rf translate.en.toml + cd ${PLUGIN_LOCALE_PATH} && $(GOBIN)/goi18n merge active.en.toml translate.en.toml && rm -rf translate.en.toml + ######################################## Code Check #################################################### ## Static Code Analysis vet: swagger diff --git a/go.mod b/go.mod index 46df732814..e0e9fe8f8f 100644 --- a/go.mod +++ b/go.mod @@ -65,10 +65,12 @@ require ( ) require ( + github.com/BurntSushi/toml v1.3.2 github.com/aliyun/credentials-go v1.1.2 github.com/hashicorp/go-version v1.7.0 github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.69 - golang.org/x/text v0.13.0 + github.com/nicksnyder/go-i18n/v2 v2.4.0 + golang.org/x/text v0.14.0 gorm.io/driver/mysql v1.4.7 gorm.io/gorm v1.24.3 ) diff --git a/go.sum b/go.sum index 2aa0230b6c..5b29c96926 100644 --- a/go.sum +++ b/go.sum @@ -20,7 +20,8 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -630,6 +631,8 @@ github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/nicksnyder/go-i18n/v2 v2.4.0 h1:3IcvPOAvnCKwNm0TB0dLDTuawWEj+ax/RERNC+diLMM= +github.com/nicksnyder/go-i18n/v2 v2.4.0/go.mod h1:nxYSZE9M0bf3Y70gPQjN9ha7XNHX7gMc814+6wVyEI4= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -1141,8 +1144,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/sqle/api/app.go b/sqle/api/app.go index 9b257dc61e..a8e37cff29 100644 --- a/sqle/api/app.go +++ b/sqle/api/app.go @@ -8,6 +8,7 @@ import ( "github.com/actiontech/dms/pkg/dms-common/api/accesstoken" dmsV1 "github.com/actiontech/dms/pkg/dms-common/api/dms/v1" jwtPkg "github.com/actiontech/dms/pkg/dms-common/api/jwt" + "github.com/actiontech/sqle/sqle/locale" // "github.com/actiontech/sqle/sqle/api/cloudbeaver_wrapper" "github.com/actiontech/sqle/sqle/api/controller" @@ -107,9 +108,9 @@ func StartApi(net *gracenet.Net, exitChan chan struct{}, config *config.SqleOpti }) v1Router := e.Group(apiV1) - v1Router.Use(sqleMiddleware.JWTTokenAdapter(), sqleMiddleware.JWTWithConfig(dmsV1.JwtSigningKey), sqleMiddleware.VerifyUserIsDisabled(), sqleMiddleware.OperationLogRecord(), accesstoken.CheckLatestAccessToken(controller.GetDMSServerAddress(), jwtPkg.GetTokenDetailFromContextWithOldJwt)) + v1Router.Use(sqleMiddleware.JWTTokenAdapter(), sqleMiddleware.JWTWithConfig(dmsV1.JwtSigningKey), sqleMiddleware.VerifyUserIsDisabled(), sqleMiddleware.OperationLogRecord(), accesstoken.CheckLatestAccessToken(controller.GetDMSServerAddress(), jwtPkg.GetTokenDetailFromContextWithOldJwt), locale.EchoMiddlewareI18nByAcceptLanguage()) v2Router := e.Group(apiV2) - v2Router.Use(sqleMiddleware.JWTTokenAdapter(), sqleMiddleware.JWTWithConfig(dmsV1.JwtSigningKey), sqleMiddleware.VerifyUserIsDisabled(), sqleMiddleware.OperationLogRecord(), accesstoken.CheckLatestAccessToken(controller.GetDMSServerAddress(), jwtPkg.GetTokenDetailFromContextWithOldJwt)) + v2Router.Use(sqleMiddleware.JWTTokenAdapter(), sqleMiddleware.JWTWithConfig(dmsV1.JwtSigningKey), sqleMiddleware.VerifyUserIsDisabled(), sqleMiddleware.OperationLogRecord(), accesstoken.CheckLatestAccessToken(controller.GetDMSServerAddress(), jwtPkg.GetTokenDetailFromContextWithOldJwt), locale.EchoMiddlewareI18nByAcceptLanguage()) // v1 admin api, just admin user can access. { diff --git a/sqle/api/controller/v1/audit_plan.go b/sqle/api/controller/v1/audit_plan.go index 0eeb2a8e23..09a4b3f9f2 100644 --- a/sqle/api/controller/v1/audit_plan.go +++ b/sqle/api/controller/v1/audit_plan.go @@ -19,6 +19,7 @@ import ( "github.com/actiontech/sqle/sqle/driver" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" "github.com/actiontech/sqle/sqle/model" "github.com/actiontech/sqle/sqle/notification" @@ -1463,7 +1464,7 @@ func GetAuditPlanReportSQLsV1(c echo.Context) error { for i, auditPlanReportSQL := range auditPlanReportSQLs { auditPlanReportSQLsResV1[i] = AuditPlanReportSQLResV1{ SQL: auditPlanReportSQL.SQL, - AuditResult: auditPlanReportSQL.AuditResults.String(), + AuditResult: auditPlanReportSQL.AuditResults.String(c.Request().Context()), Number: auditPlanReportSQL.Number, } } @@ -1474,10 +1475,13 @@ func GetAuditPlanReportSQLsV1(c echo.Context) error { }) } -func spliceAuditResults(auditResults []model.AuditResult) string { +func spliceAuditResults(ctx context.Context, auditResults []model.AuditResult) string { + lang := locale.GetLangTagFromCtx(ctx) results := []string{} for _, auditResult := range auditResults { - results = append(results, fmt.Sprintf("[%v]%v", auditResult.Level, auditResult.Message)) + results = append(results, + fmt.Sprintf("[%v]%v", auditResult.Level, auditResult.GetAuditMsgByLangTag(lang.String())), + ) } return strings.Join(results, "\n") } @@ -1517,16 +1521,17 @@ func ExportAuditPlanReportV1(c echo.Context) error { return controller.JSONBaseErrorReq(c, fmt.Errorf("the audit plan corresponding to the report was not found")) } + ctx := c.Request().Context() baseInfo := [][]string{ - {"扫描任务名称", auditPlanName}, - {"报告生成时间", reportInfo.CreatedAt.Format("2006/01/02 15:04")}, - {"审核结果评分", strconv.FormatInt(int64(reportInfo.Score), 10)}, - {"审核通过率", fmt.Sprintf("%v%%", reportInfo.PassRate*100)}, - {"所属项目", projectName}, - {"扫描任务创建人", dms.GetUserNameWithDelTag(reportInfo.AuditPlan.CreateUserID)}, - {"扫描任务类型", reportInfo.AuditPlan.Type}, - {"数据库类型", reportInfo.AuditPlan.DBType}, - {"审核的数据库", reportInfo.AuditPlan.InstanceDatabase}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportTaskName), auditPlanName}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportGenerationTime), reportInfo.CreatedAt.Format("2006/01/02 15:04")}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportResultRating), strconv.FormatInt(int64(reportInfo.Score), 10)}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportApprovalRate), fmt.Sprintf("%v%%", reportInfo.PassRate*100)}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportBelongingProject), projectName}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportCreator), dms.GetUserNameWithDelTag(reportInfo.AuditPlan.CreateUserID)}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportType), reportInfo.AuditPlan.Type}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportDbType), reportInfo.AuditPlan.DBType}, + {locale.ShouldLocalizeMsg(ctx, locale.APExportDatabase), reportInfo.AuditPlan.InstanceDatabase}, } err = csvWriter.WriteAll(baseInfo) if err != nil { @@ -1539,14 +1544,18 @@ func ExportAuditPlanReportV1(c echo.Context) error { return controller.JSONBaseErrorReq(c, err) } - err = csvWriter.Write([]string{"编号", "SQL", "审核结果"}) + err = csvWriter.Write([]string{ + locale.ShouldLocalizeMsg(ctx, locale.APExportNumber), // 编号 + "SQL", + locale.ShouldLocalizeMsg(ctx, locale.APExportAuditResult), // 审核结果 + }) if err != nil { return controller.JSONBaseErrorReq(c, err) } sqlInfo := [][]string{} for idx, sql := range reportInfo.AuditPlanReportSQLs { - sqlInfo = append(sqlInfo, []string{strconv.Itoa(idx + 1), sql.SQL, spliceAuditResults(sql.AuditResults)}) + sqlInfo = append(sqlInfo, []string{strconv.Itoa(idx + 1), sql.SQL, spliceAuditResults(ctx, sql.AuditResults)}) } err = csvWriter.WriteAll(sqlInfo) @@ -1556,7 +1565,7 @@ func ExportAuditPlanReportV1(c echo.Context) error { csvWriter.Flush() - fileName := fmt.Sprintf("扫描任务报告_%s_%s.csv", auditPlanName, time.Now().Format("20060102150405")) + fileName := fmt.Sprintf("audit_plan_report_%s_%s.csv", auditPlanName, time.Now().Format("20060102150405")) c.Response().Header().Set(echo.HeaderContentDisposition, mime.FormatMediaType("attachment", map[string]string{ "filename": fileName, })) diff --git a/sqle/api/controller/v1/instance.go b/sqle/api/controller/v1/instance.go index 4176ea5df1..b28f34ca5f 100644 --- a/sqle/api/controller/v1/instance.go +++ b/sqle/api/controller/v1/instance.go @@ -446,6 +446,7 @@ func GetInstanceTips(c echo.Context) error { // @router /v1/projects/{project_name}/instances/{instance_name}/rules [get] func GetInstanceRules(c echo.Context) error { s := model.GetStorage() + ctx := c.Request().Context() instanceName := c.Param("instance_name") projectUid, err := dms.GetPorjectUIDByName(context.TODO(), c.Param("project_name")) @@ -475,7 +476,7 @@ func GetInstanceRules(c echo.Context) error { } return c.JSON(http.StatusOK, &GetRulesResV1{ BaseRes: controller.NewBaseReq(nil), - Data: convertRulesToRes(rules), + Data: convertRulesToRes(ctx, rules), }) } diff --git a/sqle/api/controller/v1/instance_audit_plan.go b/sqle/api/controller/v1/instance_audit_plan.go index e02579fccd..03bcc198dd 100644 --- a/sqle/api/controller/v1/instance_audit_plan.go +++ b/sqle/api/controller/v1/instance_audit_plan.go @@ -17,6 +17,7 @@ import ( v1 "github.com/actiontech/dms/pkg/dms-common/api/dms/v1" dmsCommonJwt "github.com/actiontech/dms/pkg/dms-common/api/jwt" "github.com/actiontech/sqle/sqle/api/controller" + scannerCmd "github.com/actiontech/sqle/sqle/cmd/scannerd/command" "github.com/actiontech/sqle/sqle/config" dms "github.com/actiontech/sqle/sqle/dms" "github.com/actiontech/sqle/sqle/driver" @@ -191,7 +192,7 @@ func CreateInstanceAuditPlan(c echo.Context) error { AuditPlans: auditPlans, ActiveStatus: model.ActiveStatusNormal, } - err = s.Save(ap) + err = s.SaveInstanceAuditPlan(ap) if err != nil { return controller.JSONBaseErrorReq(c, err) } @@ -781,16 +782,18 @@ func GetInstanceAuditPlanOverview(c echo.Context) error { typeBase := ConvertAuditPlanTypeToRes(v.ID, v.Type) resAuditPlan := InstanceAuditPlanInfo{ - ID: v.ID, - Type: typeBase, - DBType: detail.DBType, - InstanceName: inst.Name, - ExecCmd: execCmd, - RuleTemplate: ruleTemplate, - TotalSQLNums: totalSQLNums, - UnsolvedSQLNums: unsolvedSQLNums, - LastCollectionTime: v.LastCollectionTime, - ActiveStatus: v.ActiveStatus, + ID: v.ID, + Type: typeBase, + DBType: detail.DBType, + InstanceName: inst.Name, + ExecCmd: execCmd, + RuleTemplate: ruleTemplate, + TotalSQLNums: totalSQLNums, + UnsolvedSQLNums: unsolvedSQLNums, + ActiveStatus: v.ActiveStatus, + } + if v.AuditPlanTaskInfo != nil { + resAuditPlan.LastCollectionTime = v.AuditPlanTaskInfo.LastCollectionTime } resAuditPlans = append(resAuditPlans, resAuditPlan) } @@ -820,8 +823,23 @@ func GetAuditPlanExecCmd(projectName string, iap *model.InstanceAuditPlan, ap *m return "" } - cmd := `./scannerd %s --project=%s --host=%s --port=%s --audit_plan_id=%d --token=%s` - return fmt.Sprintf(cmd, ap.Type, projectName, ip, port, ap.ID, iap.Token) + scannerd, err := scannerCmd.GetScannerdCmd(ap.Type) + if err != nil { + logger.Infof("get scannerd %s failed %s", ap.Type, err) + return "" + } + cmd, err := scannerd.GenCommand("./scannerd", map[string]string{ + scannerCmd.FlagHost: ip, + scannerCmd.FlagPort: port, + scannerCmd.FlagProject: projectName, + scannerCmd.FlagToken: iap.Token, + scannerCmd.FlagAuditPlanID: fmt.Sprint(ap.ID), + }) + if err != nil { + logger.Infof("generate scannerd %s command failed %s", ap.Type, err) + return "" + } + return cmd } type UpdateInstanceAuditPlanStatusReqV1 struct { diff --git a/sqle/api/controller/v1/pipeline.go b/sqle/api/controller/v1/pipeline.go index 6871f21712..3d72283991 100644 --- a/sqle/api/controller/v1/pipeline.go +++ b/sqle/api/controller/v1/pipeline.go @@ -52,6 +52,10 @@ func (p *pipelineNodeDetail) fillWith(node *pipeline.PipelineNode) { if node == nil { return } + integrationInfo, err := node.IntegrationInfo() + if err != nil { + integrationInfo = err.Error() + } p.ID = node.ID p.Name = node.Name p.Type = node.NodeType @@ -61,7 +65,7 @@ func (p *pipelineNodeDetail) fillWith(node *pipeline.PipelineNode) { p.ObjectType = node.ObjectType p.AuditMethod = node.AuditMethod p.RuleTemplateName = node.RuleTemplateName - p.IntegrationInfo = node.IntegrationInfo() + p.IntegrationInfo = integrationInfo } // pipelineNodeBase 流水线节点基础信息 @@ -332,7 +336,7 @@ func UpdatePipeline(c echo.Context) error { var pipelineSvc pipeline.PipelineSvc svcPipeline := req.convertToSvcPipeline(projectUid, uint(pipelineID)) - + err = pipelineSvc.CheckRuleTemplate(svcPipeline) if err != nil { return controller.JSONBaseErrorReq(c, err) diff --git a/sqle/api/controller/v1/rule.go b/sqle/api/controller/v1/rule.go index b08ca15358..659ae60281 100644 --- a/sqle/api/controller/v1/rule.go +++ b/sqle/api/controller/v1/rule.go @@ -11,9 +11,10 @@ import ( "github.com/actiontech/sqle/sqle/api/controller" "github.com/actiontech/sqle/sqle/dms" + driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/model" - "github.com/labstack/echo/v4" ) @@ -272,14 +273,13 @@ type RuleTemplateDetailResV1 struct { RuleList []RuleResV1 `json:"rule_list,omitempty"` } -func convertRuleTemplateToRes(template *model.RuleTemplate) *RuleTemplateDetailResV1 { - +func convertRuleTemplateToRes(ctx context.Context, template *model.RuleTemplate) *RuleTemplateDetailResV1 { ruleList := make([]RuleResV1, 0, len(template.RuleList)) for _, r := range template.RuleList { if r.Rule == nil { continue } - ruleList = append(ruleList, convertRuleToRes(r.GetRule())) + ruleList = append(ruleList, convertRuleToRes(ctx, r.GetRule())) } for _, r := range template.CustomRuleList { if r.CustomRule == nil { @@ -306,6 +306,7 @@ func convertRuleTemplateToRes(template *model.RuleTemplate) *RuleTemplateDetailR // @router /v1/rule_templates/{rule_template_name}/ [get] func GetRuleTemplate(c echo.Context) error { s := model.GetStorage() + templateName := c.Param("rule_template_name") req := new(GetRuleTemplateReqV1) if err := controller.BindAndValidateReq(c, req); err != nil { @@ -319,10 +320,9 @@ func GetRuleTemplate(c echo.Context) error { return c.JSON(200, controller.NewBaseReq(errors.New(errors.DataNotExist, fmt.Errorf("rule template is not exist")))) } - return c.JSON(http.StatusOK, &GetRuleTemplateResV1{ BaseRes: controller.NewBaseReq(nil), - Data: convertRuleTemplateToRes(template), + Data: convertRuleTemplateToRes(c.Request().Context(), template), }) } @@ -420,7 +420,7 @@ func GetRuleTemplates(c echo.Context) error { return c.JSON(http.StatusOK, &GetRuleTemplatesResV1{ BaseRes: controller.NewBaseReq(nil), - Data: convertRuleTemplatesToRes(ruleTemplates), + Data: convertDefaultRuleTemplatesToRes(c.Request().Context(), ruleTemplates), TotalNums: count, }) } @@ -438,13 +438,13 @@ func getRuleTemplatesByReq(s *model.Storage, limit, offset uint32, projectId str return } -func convertRuleTemplatesToRes(ruleTemplates []*model.RuleTemplateDetail) []RuleTemplateResV1 { +func convertDefaultRuleTemplatesToRes(ctx context.Context, ruleTemplates []*model.RuleTemplateDetail) []RuleTemplateResV1 { ruleTemplatesReq := make([]RuleTemplateResV1, 0, len(ruleTemplates)) for _, ruleTemplate := range ruleTemplates { ruleTemplateReq := RuleTemplateResV1{ Name: ruleTemplate.Name, - Desc: ruleTemplate.Desc, + Desc: locale.ShouldLocalizeMsg(ctx, locale.DefaultRuleTemplatesDesc), DBType: ruleTemplate.DBType, } ruleTemplatesReq = append(ruleTemplatesReq, ruleTemplateReq) @@ -484,25 +484,31 @@ type RuleParamResV1 struct { Type string `json:"type" form:"type" enums:"string,int,bool"` } -func convertRuleToRes(rule *model.Rule) RuleResV1 { +func convertRuleToRes(ctx context.Context, rule *model.Rule) RuleResV1 { + lang := locale.GetLangTagFromCtx(ctx) + if rule.I18nRuleInfo == nil { + rule.I18nRuleInfo = make(driverV2.I18nRuleInfo) // avoid panic + } + ruleInfo := rule.I18nRuleInfo.GetRuleInfoByLangTag(lang.String()) ruleRes := RuleResV1{ Name: rule.Name, - Desc: rule.Desc, - Annotation: rule.Annotation, + Desc: ruleInfo.Desc, + Annotation: ruleInfo.Annotation, Level: rule.Level, - Typ: rule.Typ, + Typ: ruleInfo.Category, DBType: rule.DBType, HasAuditPower: rule.HasAuditPower, HasRewritePower: rule.HasRewritePower, } - if rule.Params != nil && len(rule.Params) > 0 { - paramsRes := make([]RuleParamResV1, 0, len(rule.Params)) - for _, p := range rule.Params { + params := ruleInfo.Params + if params != nil && len(params) > 0 { + paramsRes := make([]RuleParamResV1, 0, len(params)) + for _, p := range params { paramRes := RuleParamResV1{ Key: p.Key, Desc: p.Desc, Type: string(p.Type), - Value: p.Value, + Value: rule.Params.GetParam(p.Key).Value, // RuleInfo 中的params只用于国际化,Value 以 rule.Params 为准 } paramsRes = append(paramsRes, paramRes) } @@ -526,12 +532,12 @@ func convertCustomRuleToRuleResV1(rule *model.CustomRule) RuleResV1 { return ruleRes } -func convertRulesToRes(rules interface{}) []RuleResV1 { +func convertRulesToRes(ctx context.Context, rules interface{}) []RuleResV1 { rulesRes := []RuleResV1{} switch ruleSlice := rules.(type) { case []*model.Rule: for _, rule := range ruleSlice { - rulesRes = append(rulesRes, convertRuleToRes(rule)) + rulesRes = append(rulesRes, convertRuleToRes(ctx, rule)) } case []*model.CustomRule: for _, rule := range ruleSlice { @@ -557,6 +563,7 @@ func GetRules(c echo.Context) error { if err := controller.BindAndValidateReq(c, req); err != nil { return err } + ctx := c.Request().Context() s := model.GetStorage() var rules []*model.Rule var customRules []*model.CustomRule @@ -580,8 +587,8 @@ func GetRules(c echo.Context) error { return controller.JSONBaseErrorReq(c, err) } - ruleRes := convertRulesToRes(rules) - customRuleRes := convertRulesToRes(customRules) + ruleRes := convertRulesToRes(ctx, rules) + customRuleRes := convertRulesToRes(ctx, customRules) ruleRes = append(ruleRes, customRuleRes...) return c.JSON(http.StatusOK, &GetRulesResV1{ BaseRes: controller.NewBaseReq(nil), @@ -913,6 +920,7 @@ func GetProjectRuleTemplate(c echo.Context) error { if err != nil { return controller.JSONBaseErrorReq(c, err) } + ctx := c.Request().Context() s := model.GetStorage() templateName := c.Param("rule_template_name") req := new(GetRuleTemplateReqV1) @@ -930,17 +938,17 @@ func GetProjectRuleTemplate(c echo.Context) error { return c.JSON(http.StatusOK, &GetProjectRuleTemplateResV1{ BaseRes: controller.NewBaseReq(nil), - Data: convertProjectRuleTemplateToRes(template), + Data: convertProjectRuleTemplateToRes(ctx, template), }) } -func convertProjectRuleTemplateToRes(template *model.RuleTemplate) *RuleProjectTemplateDetailResV1 { +func convertProjectRuleTemplateToRes(ctx context.Context, template *model.RuleTemplate) *RuleProjectTemplateDetailResV1 { ruleList := make([]RuleResV1, 0, len(template.RuleList)) for _, r := range template.RuleList { if r.Rule == nil { continue } - ruleList = append(ruleList, convertRuleToRes(r.GetRule())) + ruleList = append(ruleList, convertRuleToRes(ctx, r.GetRule())) } for _, r := range template.CustomRuleList { if r.CustomRule == nil { @@ -1193,6 +1201,10 @@ func GetProjectRuleTemplateTips(c echo.Context) error { return getRuleTemplateTips(c, projectUid, req.FilterDBType) } +type ParseProjectRuleTemplateFileReqV1 struct { + FileType string `json:"file_type" form:"file_type" enums:"csv,json" valid:"required,oneof=csv json"` +} + type ParseProjectRuleTemplateFileResV1 struct { controller.BaseRes Data ParseProjectRuleTemplateFileResDataV1 `json:"data"` @@ -1212,8 +1224,10 @@ type ParseProjectRuleTemplateFileResDataV1 struct { // @Tags rule_template // @Accept mpfd // @Security ApiKeyAuth +// @Param file_type formData string true "file type" Enums(csv,json) // @Param rule_template_file formData file true "SQLE rule template file" // @Success 200 {object} v1.ParseProjectRuleTemplateFileResV1 +// @Success 400 file 1 "return error file" // @router /v1/rule_templates/parse [post] func ParseProjectRuleTemplateFile(c echo.Context) error { // 读取+解析文件 @@ -1237,11 +1251,30 @@ func ParseProjectRuleTemplateFile(c echo.Context) error { }) } +// GetRuleTemplateFile +// @Summary 获取规则模板文件 +// @Description get rule template file +// @Id getRuleTemplateFileV1 +// @Tags rule_template +// @Security ApiKeyAuth +// @Param instance_type query string true "instance type" +// @Param file_type query string true "file type" Enums(csv,json) +// @Success 200 file 1 "sqle rule template file" +// @router /v1/import_rule_template [get] +func GetRuleTemplateFile(c echo.Context) error { + return nil +} + +type ExportRuleTemplateFileReqV1 struct { + ExportType string `json:"export_type" query:"export_type" enums:"csv,json" valid:"required,oneof=csv json"` +} + // ExportRuleTemplateFile // @Summary 导出全局规则模板 // @Description export rule template // @Id exportRuleTemplateV1 // @Tags rule_template +// @Param export_type query string true "export type" Enums(csv,json) // @Param rule_template_name path string true "rule template name" // @Security ApiKeyAuth // @Success 200 file 1 "sqle rule template file" @@ -1251,6 +1284,10 @@ func ExportRuleTemplateFile(c echo.Context) error { return exportRuleTemplateFile(c, model.ProjectIdForGlobalRuleTemplate, templateName) } +type ExportProjectRuleTemplateFileReqV1 struct { + ExportType string `json:"export_type" query:"export_type" enums:"csv,json" valid:"required,oneof=csv json"` +} + // ExportProjectRuleTemplateFile // @Summary 导出项目规则模板 // @Description export rule template in a project @@ -1258,6 +1295,7 @@ func ExportRuleTemplateFile(c echo.Context) error { // @Tags rule_template // @Param project_name path string true "project name" // @Param rule_template_name path string true "rule template name" +// @Param export_type query string true "export type" Enums(csv,json) // @Security ApiKeyAuth // @Success 200 file 1 "sqle rule template file" // @router /v1/projects/{project_name}/rule_templates/{rule_template_name}/export [get] @@ -1284,6 +1322,8 @@ func exportRuleTemplateFile(c echo.Context, projectID string, ruleTemplateName s return controller.JSONBaseErrorReq(c, ErrRuleTemplateNotExist) } + lang := locale.GetLangTagFromCtx(c.Request().Context()) + // 补充缺失的信息(规则说明等描述信息) ruleNames := []string{} for _, rule := range template.RuleList { @@ -1295,9 +1335,9 @@ func exportRuleTemplateFile(c echo.Context, projectID string, ruleTemplateName s return controller.JSONBaseErrorReq(c, err) } - ruleCache := map[string] /*rule name*/ model.Rule{} + ruleCache := map[string] /*rule name*/ *model.Rule{} for _, rule := range rules { - ruleCache[rule.Name] = rule + ruleCache[rule.Name] = &rule } resp := ParseProjectRuleTemplateFileResDataV1{ @@ -1307,12 +1347,13 @@ func exportRuleTemplateFile(c echo.Context, projectID string, ruleTemplateName s RuleList: []RuleResV1{}, } for _, rule := range template.RuleList { + ruleInfo := ruleCache[rule.RuleName].I18nRuleInfo.GetRuleInfoByLangTag(lang.String()) r := RuleResV1{ Name: rule.RuleName, - Desc: ruleCache[rule.RuleName].Desc, - Annotation: ruleCache[rule.RuleName].Annotation, + Desc: ruleInfo.Desc, + Annotation: ruleInfo.Annotation, Level: rule.RuleLevel, - Typ: ruleCache[rule.RuleName].Typ, + Typ: ruleInfo.Category, DBType: rule.RuleDBType, Params: []RuleParamResV1{}, } diff --git a/sqle/api/controller/v1/sql_audit.go b/sqle/api/controller/v1/sql_audit.go index 9ff967735d..5afd28d11d 100644 --- a/sqle/api/controller/v1/sql_audit.go +++ b/sqle/api/controller/v1/sql_audit.go @@ -15,7 +15,6 @@ import ( "github.com/actiontech/sqle/sqle/log" "github.com/actiontech/sqle/sqle/model" "github.com/actiontech/sqle/sqle/server" - "github.com/labstack/echo/v4" ) @@ -99,6 +98,7 @@ func DirectAudit(c echo.Context) error { } l := log.NewEntry().WithField("/v1/sql_audit", "direct audit failed") + ctx := c.Request().Context() var instance *model.Instance var exist bool if req.ProjectName != nil && req.InstanceName != nil { @@ -129,17 +129,17 @@ func DirectAudit(c echo.Context) error { return c.JSON(http.StatusOK, DirectAuditResV1{ BaseRes: controller.BaseRes{}, - Data: convertTaskResultToAuditResV1(task), + Data: convertTaskResultToAuditResV1(ctx, task), }) } -func convertTaskResultToAuditResV1(task *model.Task) *AuditResDataV1 { +func convertTaskResultToAuditResV1(ctx context.Context, task *model.Task) *AuditResDataV1 { results := make([]AuditSQLResV1, len(task.ExecuteSQLs)) for i, sql := range task.ExecuteSQLs { results[i] = AuditSQLResV1{ Number: sql.Number, ExecSQL: sql.Content, - AuditResult: sql.GetAuditResults(), + AuditResult: sql.GetAuditResults(ctx), AuditLevel: sql.AuditLevel, } } @@ -206,6 +206,7 @@ func DirectAuditFiles(c echo.Context) error { l := log.NewEntry().WithField("api", "[post]/v1/audit_files") + ctx := c.Request().Context() var instance *model.Instance var exist bool if req.InstanceName != nil { @@ -236,7 +237,7 @@ func DirectAuditFiles(c echo.Context) error { return c.JSON(http.StatusOK, DirectAuditResV1{ BaseRes: controller.BaseRes{}, - Data: convertTaskResultToAuditResV1(task), + Data: convertTaskResultToAuditResV1(ctx, task), }) } diff --git a/sqle/api/controller/v1/sql_audit_record.go b/sqle/api/controller/v1/sql_audit_record.go index f94ca5cb65..514ab4254a 100644 --- a/sqle/api/controller/v1/sql_audit_record.go +++ b/sqle/api/controller/v1/sql_audit_record.go @@ -21,6 +21,7 @@ import ( "github.com/actiontech/sqle/sqle/dms" "github.com/actiontech/sqle/sqle/driver" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" "github.com/actiontech/sqle/sqle/model" "github.com/actiontech/sqle/sqle/server" @@ -881,6 +882,9 @@ type GetSQLAuditRecordTagTipsResV1 struct { func GetSQLAuditRecordTagTipsV1(c echo.Context) error { return c.JSON(http.StatusOK, &GetSQLAuditRecordTagTipsResV1{ BaseRes: controller.BaseRes{}, - Tags: []string{"全量", "增量"}, + Tags: []string{ + locale.ShouldLocalizeMsg(c.Request().Context(), locale.AuditRecordTagFull), // 全量 + locale.ShouldLocalizeMsg(c.Request().Context(), locale.AuditRecordTagIncrement), // 增量 + }, }) } diff --git a/sqle/api/controller/v1/task.go b/sqle/api/controller/v1/task.go index 9c9c40b716..c641c8af11 100644 --- a/sqle/api/controller/v1/task.go +++ b/sqle/api/controller/v1/task.go @@ -23,6 +23,7 @@ import ( "github.com/actiontech/sqle/sqle/config" "github.com/actiontech/sqle/sqle/dms" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" "github.com/actiontech/sqle/sqle/model" "github.com/actiontech/sqle/sqle/server" @@ -495,6 +496,7 @@ func GetTaskSQLs(c echo.Context) error { if err := controller.BindAndValidateReq(c, req); err != nil { return err } + ctx := c.Request().Context() s := model.GetStorage() taskId := c.Param("task_id") task, err := getTaskById(c.Request().Context(), taskId) @@ -529,7 +531,7 @@ func GetTaskSQLs(c echo.Context) error { Number: taskSQL.Number, Description: taskSQL.Description, ExecSQL: taskSQL.ExecSQL, - AuditResult: taskSQL.GetAuditResults(), + AuditResult: taskSQL.GetAuditResults(ctx), AuditLevel: taskSQL.AuditLevel, AuditStatus: taskSQL.AuditStatus, ExecResult: taskSQL.ExecResult, @@ -585,10 +587,21 @@ func DownloadTaskSQLReportFile(c echo.Context) error { if err != nil { return controller.JSONBaseErrorReq(c, err) } + + ctx := c.Request().Context() buff := &bytes.Buffer{} buff.WriteString("\xEF\xBB\xBF") // 写入UTF-8 BOM cw := csv.NewWriter(buff) - err = cw.Write([]string{"序号", "SQL", "SQL审核状态", "SQL审核结果", "SQL执行状态", "SQL执行结果", "SQL对应的回滚语句", "SQL描述"}) + err = cw.Write([]string{ + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportIndex), // "序号", + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportSQL), // "SQL", + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportAuditStatus), // "SQL审核状态", + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportAuditResult), // "SQL审核结果", + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportExecStatus), // "SQL执行状态", + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportExecResult), // "SQL执行结果", + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportRollbackSQL), // "SQL对应的回滚语句", + locale.ShouldLocalizeMsg(ctx, locale.TaskSQLReportDescription), // "SQL描述", + }) if err != nil { return controller.JSONBaseErrorReq(c, errors.New(errors.WriteDataToTheFileError, err)) } @@ -601,9 +614,9 @@ func DownloadTaskSQLReportFile(c echo.Context) error { err := cw.Write([]string{ strconv.FormatUint(uint64(td.Number), 10), td.ExecSQL, - taskSql.GetAuditStatusDesc(), - taskSql.GetAuditResultDesc(), - taskSql.GetExecStatusDesc(), + taskSql.GetAuditStatusDesc(ctx), + taskSql.GetAuditResultDesc(ctx), + taskSql.GetExecStatusDesc(ctx), td.ExecResult, td.RollbackSQL.String, td.Description, @@ -613,7 +626,7 @@ func DownloadTaskSQLReportFile(c echo.Context) error { } } cw.Flush() - fileName := fmt.Sprintf("SQL审核报告_%v_%v.csv", task.InstanceName(), taskId) + fileName := fmt.Sprintf("SQL_audit_report_%v_%v.csv", task.InstanceName(), taskId) c.Response().Header().Set(echo.HeaderContentDisposition, mime.FormatMediaType("attachment", map[string]string{"filename": fileName})) return c.Blob(http.StatusOK, "text/csv", buff.Bytes()) diff --git a/sqle/api/controller/v1/workflow_ce.go b/sqle/api/controller/v1/workflow_ce.go index 62a2de4571..187fbc1513 100644 --- a/sqle/api/controller/v1/workflow_ce.go +++ b/sqle/api/controller/v1/workflow_ce.go @@ -6,11 +6,13 @@ package v1 import ( "context" e "errors" + "fmt" "net/http" "github.com/actiontech/sqle/sqle/api/controller" "github.com/actiontech/sqle/sqle/dms" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/model" "github.com/labstack/echo/v4" ) @@ -33,6 +35,7 @@ func getWorkflowTemplate(c echo.Context) error { } td := model.DefaultWorkflowTemplate(projectUid) + td.Desc = fmt.Sprintf(locale.ShouldLocalizeMsg(c.Request().Context(), locale.DefaultTemplatesDesc), projectUid) return c.JSON(http.StatusOK, &GetWorkflowTemplateResV1{ BaseRes: controller.NewBaseReq(nil), diff --git a/sqle/api/controller/v2/audit_plan.go b/sqle/api/controller/v2/audit_plan.go index 63a4268df3..51aad02f5f 100644 --- a/sqle/api/controller/v2/audit_plan.go +++ b/sqle/api/controller/v2/audit_plan.go @@ -10,6 +10,7 @@ import ( "github.com/actiontech/sqle/sqle/common" "github.com/actiontech/sqle/sqle/driver" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" "github.com/actiontech/sqle/sqle/pkg/params" "github.com/actiontech/sqle/sqle/server" @@ -203,6 +204,7 @@ func GetAuditPlanReportSQLs(c echo.Context) error { } limit, offset := controller.GetLimitAndOffset(req.PageIndex, req.PageSize) + lang := locale.GetLangTagFromCtx(c.Request().Context()) data := map[string]interface{}{ "audit_plan_report_id": c.Param("audit_plan_report_id"), @@ -225,7 +227,7 @@ func GetAuditPlanReportSQLs(c echo.Context) error { ar := auditPlanReportSQL.AuditResults[j] auditPlanReportSQLsRes[i].AuditResult = append(auditPlanReportSQLsRes[i].AuditResult, &AuditResult{ Level: ar.Level, - Message: ar.Message, + Message: ar.GetAuditMsgByLangTag(lang.String()), RuleName: ar.RuleName, DbType: ap.DBType, }) diff --git a/sqle/api/controller/v2/sql_audit.go b/sqle/api/controller/v2/sql_audit.go index c3aaa826a5..e770756630 100644 --- a/sqle/api/controller/v2/sql_audit.go +++ b/sqle/api/controller/v2/sql_audit.go @@ -10,10 +10,10 @@ import ( v1 "github.com/actiontech/sqle/sqle/api/controller/v1" "github.com/actiontech/sqle/sqle/dms" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" "github.com/actiontech/sqle/sqle/model" "github.com/actiontech/sqle/sqle/server" - "github.com/labstack/echo/v4" ) @@ -60,6 +60,7 @@ func DirectAudit(c echo.Context) error { return err } + ctx := c.Request().Context() sql := req.SQLContent if req.SQLType == v1.SQLTypeMyBatis { sql, err = parser.ParseXML(req.SQLContent) @@ -78,11 +79,12 @@ func DirectAudit(c echo.Context) error { return c.JSON(http.StatusOK, DirectAuditResV2{ BaseRes: controller.BaseRes{}, - Data: convertTaskResultToAuditResV2(task), + Data: convertTaskResultToAuditResV2(ctx, task), }) } -func convertTaskResultToAuditResV2(task *model.Task) *AuditResDataV2 { +func convertTaskResultToAuditResV2(ctx context.Context, task *model.Task) *AuditResDataV2 { + lang := locale.GetLangTagFromCtx(ctx) results := make([]AuditSQLResV2, len(task.ExecuteSQLs)) for i, sql := range task.ExecuteSQLs { @@ -90,7 +92,7 @@ func convertTaskResultToAuditResV2(task *model.Task) *AuditResDataV2 { for j := range sql.AuditResults { ar[j] = &AuditResult{ Level: sql.AuditResults[j].Level, - Message: sql.AuditResults[j].Message, + Message: sql.AuditResults[j].GetAuditMsgByLangTag(lang.String()), RuleName: sql.AuditResults[j].RuleName, DbType: task.DBType, } @@ -99,7 +101,7 @@ func convertTaskResultToAuditResV2(task *model.Task) *AuditResDataV2 { results[i] = AuditSQLResV2{ Number: sql.Number, ExecSQL: sql.Content, - AuditResult: convertAuditResultToAuditResV2(sql.AuditResults), + AuditResult: convertAuditResultToAuditResV2(ctx, sql.AuditResults), AuditLevel: sql.AuditLevel, } @@ -168,6 +170,7 @@ func DirectAuditFiles(c echo.Context) error { l := log.NewEntry().WithField("api", "[post]/v2/audit_files") + ctx := c.Request().Context() var instance *model.Instance var exist bool if req.InstanceName != nil { @@ -209,17 +212,17 @@ func DirectAuditFiles(c echo.Context) error { return c.JSON(http.StatusOK, DirectAuditResV2{ BaseRes: controller.BaseRes{}, - Data: convertFileAuditTaskResultToAuditResV2(task), + Data: convertFileAuditTaskResultToAuditResV2(ctx, task), }) } -func convertFileAuditTaskResultToAuditResV2(task *model.Task) *AuditResDataV2 { +func convertFileAuditTaskResultToAuditResV2(ctx context.Context, task *model.Task) *AuditResDataV2 { results := make([]AuditSQLResV2, len(task.ExecuteSQLs)) for i, sql := range task.ExecuteSQLs { results[i] = AuditSQLResV2{ Number: sql.Number, ExecSQL: sql.Content, - AuditResult: convertAuditResultToAuditResV2(sql.AuditResults), + AuditResult: convertAuditResultToAuditResV2(ctx, sql.AuditResults), AuditLevel: sql.AuditLevel, } @@ -232,12 +235,13 @@ func convertFileAuditTaskResultToAuditResV2(task *model.Task) *AuditResDataV2 { } } -func convertAuditResultToAuditResV2(auditResults model.AuditResults) []AuditResult { +func convertAuditResultToAuditResV2(ctx context.Context, auditResults model.AuditResults) []AuditResult { + lang := locale.GetLangTagFromCtx(ctx) ar := make([]AuditResult, len(auditResults)) for i := range auditResults { ar[i] = AuditResult{ Level: auditResults[i].Level, - Message: auditResults[i].Message, + Message: auditResults[i].GetAuditMsgByLangTag(lang.String()), RuleName: auditResults[i].RuleName, } } diff --git a/sqle/api/controller/v2/task.go b/sqle/api/controller/v2/task.go index 3bd815bad2..e0daf23969 100644 --- a/sqle/api/controller/v2/task.go +++ b/sqle/api/controller/v2/task.go @@ -5,6 +5,7 @@ import ( "github.com/actiontech/sqle/sqle/api/controller" v1 "github.com/actiontech/sqle/sqle/api/controller/v1" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/model" "github.com/labstack/echo/v4" @@ -116,7 +117,7 @@ func GetTaskSQLs(c echo.Context) error { ar := taskSQL.AuditResults[i] taskSQLRes.AuditResult = append(taskSQLRes.AuditResult, &AuditResult{ Level: ar.Level, - Message: ar.Message, + Message: ar.GetAuditMsgByLangTag(locale.GetLangTagFromCtx(c.Request().Context()).String()), RuleName: ar.RuleName, DbType: task.DBType, }) diff --git a/sqle/cmd/scannerd/cmd/mybatis.go b/sqle/cmd/scannerd/cmd/mybatis.go index 8734ea2c52..9533f10538 100644 --- a/sqle/cmd/scannerd/cmd/mybatis.go +++ b/sqle/cmd/scannerd/cmd/mybatis.go @@ -10,7 +10,7 @@ import ( "github.com/actiontech/sqle/sqle/cmd/scannerd/scanners/supervisor" "github.com/actiontech/sqle/sqle/pkg/scanner" - pkgAP "github.com/actiontech/sqle/sqle/server/auditplan" + scannerCmd "github.com/actiontech/sqle/sqle/cmd/scannerd/command" "github.com/fatih/color" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -25,7 +25,7 @@ var ( schemaNameXml string mybatisCmd = &cobra.Command{ - Use: pkgAP.TypeMySQLMybatis, + Use: scannerCmd.TypeMySQLMybatis, Short: "Parse MyBatis XML file", Run: func(cmd *cobra.Command, args []string) { param := &mybatis.Params{ @@ -55,12 +55,20 @@ var ( ) func init() { - mybatisCmd.Flags().StringVarP(&dir, "dir", "D", "", "xml directory") - mybatisCmd.Flags().BoolVarP(&skipErrorQuery, "skip-error-query", "S", false, "skip the statement that the scanner failed to parse from within the xml file") - mybatisCmd.Flags().BoolVarP(&skipErrorXml, "skip-error-xml", "X", false, "skip the xml file that failed to parse") - mybatisCmd.Flags().StringVarP(&dbTypeXml, "db-type", "B", "", "database type") - mybatisCmd.Flags().StringVarP(&instNameXml, "instance-name", "I", "", "instance name") - mybatisCmd.Flags().StringVarP(&schemaNameXml, "schema-name", "C", "", "schema name") - _ = mybatisCmd.MarkFlagRequired("dir") + mybatis, err := scannerCmd.GetScannerdCmd(scannerCmd.TypeMySQLMybatis) + if err != nil { + panic(err) + } + mybatisCmd.Flags().StringVarP(mybatis.StringFlagFn[scannerCmd.FlagDirectory](&dir)) + mybatisCmd.Flags().BoolVarP(mybatis.BoolFlagFn[scannerCmd.FlagSkipErrorQuery](&skipErrorQuery)) + mybatisCmd.Flags().BoolVarP(mybatis.BoolFlagFn[scannerCmd.FlagSkipErrorXml](&skipErrorXml)) + mybatisCmd.Flags().StringVarP(mybatis.StringFlagFn[scannerCmd.FlagDbType](&dbTypeXml)) + mybatisCmd.Flags().StringVarP(mybatis.StringFlagFn[scannerCmd.FlagInstanceName](&instNameXml)) + mybatisCmd.Flags().StringVarP(mybatis.StringFlagFn[scannerCmd.FlagSchemaName](&schemaNameXml)) + + for _, requiredFlag := range mybatis.RequiredFlags { + _ = mybatisCmd.MarkFlagRequired(requiredFlag) + } + rootCmd.AddCommand(mybatisCmd) } diff --git a/sqle/cmd/scannerd/cmd/root.go b/sqle/cmd/scannerd/cmd/root.go index a425143400..469ce8dd81 100644 --- a/sqle/cmd/scannerd/cmd/root.go +++ b/sqle/cmd/scannerd/cmd/root.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - pkgScanner "github.com/actiontech/sqle/sqle/pkg/scanner" + scannerCmd "github.com/actiontech/sqle/sqle/cmd/scannerd/command" "github.com/spf13/cobra" ) @@ -27,13 +27,20 @@ var ( ) func init() { - rootCmd.PersistentFlags().StringVarP(&rootCmdFlags.host, "host", "H", "127.0.0.1", "sqle host") - rootCmd.PersistentFlags().StringVarP(&rootCmdFlags.port, "port", "P", "10000", "sqle port") - rootCmd.PersistentFlags().StringVarP(&rootCmdFlags.auditPlanID, "audit_plan_id", "", "", "audit plan id") - rootCmd.PersistentFlags().StringVarP(&rootCmdFlags.token, "token", "A", "", "sqle token") - rootCmd.PersistentFlags().IntVarP(&rootCmdFlags.timeout, "timeout", "T", pkgScanner.DefaultTimeoutNum, "request sqle timeout in seconds") - rootCmd.PersistentFlags().StringVarP(&rootCmdFlags.project, "project", "J", "default", "project name") - _ = rootCmd.MarkPersistentFlagRequired("token") + root, err := scannerCmd.GetScannerdCmd(scannerCmd.TypeRootScannerd) + if err != nil { + panic(err) + } + rootCmd.PersistentFlags().StringVarP(root.StringFlagFn[scannerCmd.FlagHost](&rootCmdFlags.host)) + rootCmd.PersistentFlags().StringVarP(root.StringFlagFn[scannerCmd.FlagPort](&rootCmdFlags.port)) + rootCmd.PersistentFlags().StringVarP(root.StringFlagFn[scannerCmd.FlagAuditPlanID](&rootCmdFlags.auditPlanID)) + rootCmd.PersistentFlags().StringVarP(root.StringFlagFn[scannerCmd.FlagToken](&rootCmdFlags.token)) + rootCmd.PersistentFlags().IntVarP(root.IntFlagFn[scannerCmd.FlagTimeout](&rootCmdFlags.timeout)) + rootCmd.PersistentFlags().StringVarP(root.StringFlagFn[scannerCmd.FlagProject](&rootCmdFlags.project)) + + for _, requiredFlag := range root.RequiredFlags { + _ = rootCmd.MarkPersistentFlagRequired(requiredFlag) + } } func Execute(ctx context.Context) error { diff --git a/sqle/cmd/scannerd/cmd/slowquery.go b/sqle/cmd/scannerd/cmd/slowquery.go index 1cfa9cbed4..213767719e 100644 --- a/sqle/cmd/scannerd/cmd/slowquery.go +++ b/sqle/cmd/scannerd/cmd/slowquery.go @@ -6,10 +6,10 @@ import ( "os" "time" + scannerCmd "github.com/actiontech/sqle/sqle/cmd/scannerd/command" "github.com/actiontech/sqle/sqle/cmd/scannerd/scanners/slowquery" "github.com/actiontech/sqle/sqle/cmd/scannerd/scanners/supervisor" "github.com/actiontech/sqle/sqle/pkg/scanner" - pkgAP "github.com/actiontech/sqle/sqle/server/auditplan" "github.com/fatih/color" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -23,7 +23,7 @@ var ( excludeSchemas string slowlogCmd = &cobra.Command{ - Use: pkgAP.TypeMySQLSlowLog, + Use: scannerCmd.TypeMySQLSlowLog, Short: "Parse slow query", Run: func(cmd *cobra.Command, args []string) { param := &slowquery.Params{ @@ -52,11 +52,19 @@ var ( ) func init() { - slowlogCmd.Flags().StringVarP(&logFilePath, "log-file", "", "", "log file absolute path") - slowlogCmd.Flags().StringVarP(&includeUsers, "include-user-list", "", "", "include mysql user list, split by \",\"") - slowlogCmd.Flags().StringVarP(&excludeUsers, "exclude-user-list", "", "", "exclude mysql user list, split by \",\"") - slowlogCmd.Flags().StringVarP(&includeSchemas, "include-schema-list", "", "", "include mysql schema list, split by \",\"") - slowlogCmd.Flags().StringVarP(&excludeSchemas, "exclude-schema-list", "", "", "exclude mysql schema list, split by \",\"") - _ = slowlogCmd.MarkFlagRequired("log-file") + slowlog, err := scannerCmd.GetScannerdCmd(scannerCmd.TypeMySQLSlowLog) + if err != nil { + panic(err) + } + slowlogCmd.Flags().StringVarP(slowlog.StringFlagFn[scannerCmd.FlagLogFile](&logFilePath)) + slowlogCmd.Flags().StringVarP(slowlog.StringFlagFn[scannerCmd.FlagIncludeUserList](&includeUsers)) + slowlogCmd.Flags().StringVarP(slowlog.StringFlagFn[scannerCmd.FlagExcludeUserList](&excludeUsers)) + slowlogCmd.Flags().StringVarP(slowlog.StringFlagFn[scannerCmd.FlagIncludeSchemaList](&includeSchemas)) + slowlogCmd.Flags().StringVarP(slowlog.StringFlagFn[scannerCmd.FlagExcludeSchemaList](&excludeSchemas)) + + for _, requiredFlag := range slowlog.RequiredFlags { + _ = slowlogCmd.MarkFlagRequired(requiredFlag) + } + rootCmd.AddCommand(slowlogCmd) } diff --git a/sqle/cmd/scannerd/cmd/sqlfile.go b/sqle/cmd/scannerd/cmd/sqlfile.go index bc3b9d2d8d..9724e57b5d 100644 --- a/sqle/cmd/scannerd/cmd/sqlfile.go +++ b/sqle/cmd/scannerd/cmd/sqlfile.go @@ -6,12 +6,11 @@ import ( "os" "time" + scannerCmd "github.com/actiontech/sqle/sqle/cmd/scannerd/command" sqlFile "github.com/actiontech/sqle/sqle/cmd/scannerd/scanners/sql_file" "github.com/actiontech/sqle/sqle/cmd/scannerd/scanners/supervisor" "github.com/actiontech/sqle/sqle/pkg/scanner" - pkgAP "github.com/actiontech/sqle/sqle/server/auditplan" - "github.com/fatih/color" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -24,7 +23,7 @@ var ( schemaNameSqlFile string sqlFileCmd = &cobra.Command{ - Use: pkgAP.TypeSQLFile, + Use: scannerCmd.TypeSQLFile, Short: "Parse sql file", Run: func(cmd *cobra.Command, args []string) { param := &sqlFile.Params{ @@ -54,11 +53,19 @@ var ( ) func init() { - sqlFileCmd.Flags().StringVarP(&dir, "dir", "D", "", "sql file directory") - sqlFileCmd.Flags().BoolVarP(&skipErrorSqlFile, "skip-error-sql-file", "S", false, "skip the sql file that failed to parse") - sqlFileCmd.Flags().StringVarP(&dbTypeSqlFile, "db-type", "B", "", "database type") - sqlFileCmd.Flags().StringVarP(&instNameSqlFile, "instance-name", "I", "", "instance name") - sqlFileCmd.Flags().StringVarP(&schemaNameSqlFile, "schema-name", "C", "", "schema name") - _ = sqlFileCmd.MarkFlagRequired("dir") + sqlfile, err := scannerCmd.GetScannerdCmd(scannerCmd.TypeSQLFile) + if err != nil { + panic(err) + } + sqlFileCmd.Flags().StringVarP(sqlfile.StringFlagFn[scannerCmd.FlagDirectory](&dir)) + sqlFileCmd.Flags().BoolVarP(sqlfile.BoolFlagFn[scannerCmd.FlagSkipErrorSqlFile](&skipErrorSqlFile)) + sqlFileCmd.Flags().StringVarP(sqlfile.StringFlagFn[scannerCmd.FlagDbType](&dbTypeSqlFile)) + sqlFileCmd.Flags().StringVarP(sqlfile.StringFlagFn[scannerCmd.FlagInstanceName](&instNameSqlFile)) + sqlFileCmd.Flags().StringVarP(sqlfile.StringFlagFn[scannerCmd.FlagSchemaName](&schemaNameSqlFile)) + + for _, requiredFlag := range sqlfile.RequiredFlags { + _ = sqlFileCmd.MarkFlagRequired(requiredFlag) + } + rootCmd.AddCommand(sqlFileCmd) } diff --git a/sqle/cmd/scannerd/command/base.go b/sqle/cmd/scannerd/command/base.go new file mode 100644 index 0000000000..b5d096aaa1 --- /dev/null +++ b/sqle/cmd/scannerd/command/base.go @@ -0,0 +1,184 @@ +package command + +import ( + "fmt" + + "strconv" +) + +const ( + FlagDirectory string = "dir" + FlagDirectorySort string = "D" + FlagFile string = "file" + FlagFileSort string = "f" + FlagInstanceName string = "instance-name" + FlagInstanceNameSort string = "I" + FlagDbType string = "db-type" + FlagDbTypeSort string = "B" + FlagSchemaName string = "schema-name" + FlagSchemaNameSort string = "C" + // empty + EmptyDefaultValue string = "" + EmptyFlagSort string = "" + // root + FlagHost string = "host" + FlagHostSort string = "H" + FlagPort string = "port" + FlagPortSort string = "P" + FlagAuditPlanID string = "audit_plan_id" + FlagToken string = "token" + FlagTokenSort string = "A" + FlagTimeout string = "timeout" + FlagTimeoutSort string = "T" + FlagProject string = "project" + FlagProjectSort string = "J" + // mybatis + FlagSkipErrorQuery string = "skip-error-query" + FlagSkipErrorQuerySort string = "S" + FlagSkipErrorXml string = "skip-error-xml" + FlagSkipErrorXmlSort string = "X" + // sqlfile + FlagSkipErrorSqlFile string = "skip-error-sql-file" + FlagSkipErrorSqlFileSort string = "S" + // slow log + FlagLogFile string = "log-file" + FlagIncludeUserList string = "include-user-list" + FlagExcludeUserList string = "exclude-user-list" + FlagIncludeSchemaList string = "include-schema-list" + FlagExcludeSchemaList string = "exclude-schema-list" + // tbase + FlagFileFormat string = "format" + FlagFileFormatSort string = "F" +) + +func newScannerCmd(scannerType string) scannerCmd { + return scannerCmd{ + ScannerType: scannerType, + StringFlagFn: make(map[string]func(variable *string) (p *string, name string, shorthand string, value string, usage string)), + BoolFlagFn: make(map[string]func(variable *bool) (p *bool, name string, shorthand string, value bool, usage string)), + IntFlagFn: make(map[string]func(variable *int) (p *int, name string, shorthand string, value int, usage string)), + } +} + +type scannerCmd struct { + ScannerType string + FatherCmds []*scannerCmd + StringFlagFn map[string]func(variable *string) (p *string, name string, shorthand string, value string, usage string) + BoolFlagFn map[string]func(variable *bool) (p *bool, name string, shorthand string, value bool, usage string) + IntFlagFn map[string]func(variable *int) (p *int, name string, shorthand string, value int, usage string) + RequiredFlags []string +} + +func GetScannerdCmd(scannerType string) (*scannerCmd, error) { + switch scannerType { + case TypeRootScannerd: + return &rootCmd, nil + case TypeMySQLMybatis: + return &myBatis, nil + case TypeMySQLSlowLog: + return &slowLog, nil + case TypeTiDBAuditLog: + return &tidbAuditLog, nil + case TypeSQLFile: + return &sqlFile, nil + case TypeTBaseSlowLog: + return &tbaseLog, nil + default: + return nil, fmt.Errorf("unsupport scannerd type %s", scannerType) + } +} + +func (newCmd *scannerCmd) addFather(cmd *scannerCmd) { + newCmd.FatherCmds = append(newCmd.FatherCmds, cmd) +} + +func (cmd *scannerCmd) addStringFlag(name string, shorthand string, value string, usage string) { + cmd.StringFlagFn[name] = func(variable *string) (*string, string, string, string, string) { + return variable, name, shorthand, value, usage + } +} + +func (cmd *scannerCmd) addIntFlag(name string, shorthand string, value int, usage string) { + cmd.IntFlagFn[name] = func(variable *int) (*int, string, string, int, string) { + return variable, name, shorthand, value, usage + } +} + +func (cmd *scannerCmd) addBoolFlag(name string, shorthand string, value bool, usage string) { + cmd.BoolFlagFn[name] = func(variable *bool) (*bool, string, string, bool, string) { + return variable, name, shorthand, value, usage + } +} + +func (cmd *scannerCmd) addRequiredFlag(name string) { + cmd.RequiredFlags = append(cmd.RequiredFlags, name) +} + +func (cmd scannerCmd) Type() string { + return cmd.ScannerType +} + +// path can be relative path or absolute path. params is flagName:flagValue map, bool type input true or false string. +func (cmd scannerCmd) GenCommand(path string, params map[string] /* flag name */ string /* flag value */) (string, error) { + // check required flag exist + for _, father := range cmd.FatherCmds { + for _, requiredFlag := range father.RequiredFlags { + if value, exist := params[requiredFlag]; !exist || value == "" { + return "", fmt.Errorf("required flag: %s value: %s", requiredFlag, value) + } + } + } + for _, requiredFlag := range cmd.RequiredFlags { + if value, exist := params[requiredFlag]; !exist || value == "" { + return "", fmt.Errorf("required flag: %s value: %s", requiredFlag, value) + } + } + var command string = fmt.Sprintf("%s %s", path, cmd.Type()) + var addParamTpl string = "%s --%s %s" + // check is flag valid and add flag + for flagName, flagValue := range params { + var err error + var exist bool + for _, father := range cmd.FatherCmds { + exist, err = father.checkFlag(flagName, flagValue) + if err != nil { + return "", fmt.Errorf("when checking flag: %s,error %w", flagName, err) + } + if exist { + break + } + } + + if !exist { + exist, err = cmd.checkFlag(flagName, flagValue) + if err != nil { + return "", fmt.Errorf("when checking flag: %s,error %w", flagName, err) + } + } + if exist { + if flagValue == "" { + continue + } + command = fmt.Sprintf(addParamTpl, command, flagName, flagValue) + continue + } + return "", fmt.Errorf("unsupport flag %s", flagName) + } + return command, nil +} + +func (cmd scannerCmd) checkFlag(flagName string, flagValue string) (exist bool, err error) { + if _, exist = cmd.StringFlagFn[flagName]; exist { + return true, nil + } + if _, exist = cmd.BoolFlagFn[flagName]; exist { + if flagValue != "false" && flagValue != "true" { + return true, fmt.Errorf("flage %s is bool type, should input false or true", flagName) + } + } + if _, exist = cmd.IntFlagFn[flagName]; exist { + _, err = strconv.Atoi(flagValue) + return true, err + } + return false, nil +} diff --git a/sqle/cmd/scannerd/command/command.go b/sqle/cmd/scannerd/command/command.go new file mode 100644 index 0000000000..647361bbe0 --- /dev/null +++ b/sqle/cmd/scannerd/command/command.go @@ -0,0 +1,61 @@ +package command + +const ( + TypeMySQLMybatis = "mysql_mybatis" + TypeMySQLSlowLog = "mysql_slow_log" + TypeSQLFile = "sql_file" + TypeTBaseSlowLog = "TBase_slow_log" + TypeTiDBAuditLog = "tidb_audit_log" + TypeRootScannerd = "root" +) + +var ( + rootCmd scannerCmd = newScannerCmd(TypeRootScannerd) + myBatis scannerCmd = newScannerCmd(TypeMySQLMybatis) + slowLog scannerCmd = newScannerCmd(TypeMySQLSlowLog) + sqlFile scannerCmd = newScannerCmd(TypeSQLFile) + tbaseLog scannerCmd = newScannerCmd(TypeTBaseSlowLog) + tidbAuditLog scannerCmd = newScannerCmd(TypeTiDBAuditLog) +) + +func init() { + rootCmd.addStringFlag(FlagHost, FlagHostSort, "127.0.0.1", "sqle host") + rootCmd.addStringFlag(FlagPort, FlagPortSort, "10000", "sqle port") + rootCmd.addStringFlag(FlagAuditPlanID, EmptyFlagSort, "", "audit plan id") + rootCmd.addStringFlag(FlagToken, FlagTokenSort, "", "sqle token") + rootCmd.addIntFlag(FlagTimeout, FlagTimeoutSort, 10, "request sqle timeout in seconds") + rootCmd.addStringFlag(FlagProject, FlagProjectSort, "default", "project name") + rootCmd.addRequiredFlag(FlagToken) +} + +func init() { + myBatis.addFather(&rootCmd) + myBatis.addStringFlag(FlagDirectory, FlagDirectorySort, EmptyDefaultValue, "xml directory") + myBatis.addStringFlag(FlagDbType, FlagDbTypeSort, EmptyDefaultValue, "database type") + myBatis.addStringFlag(FlagInstanceName, FlagInstanceNameSort, EmptyDefaultValue, "instance name") + myBatis.addStringFlag(FlagSchemaName, FlagSchemaNameSort, EmptyDefaultValue, "schema name") + myBatis.addBoolFlag(FlagSkipErrorQuery, FlagSkipErrorQuerySort, false, + "skip the statement that the scanner failed to parse from within the xml file") + myBatis.addBoolFlag(FlagSkipErrorXml, FlagSkipErrorXmlSort, false, "skip the xml file that failed to parse") + myBatis.addRequiredFlag(FlagDirectory) +} + +func init() { + slowLog.addFather(&rootCmd) + slowLog.addStringFlag(FlagLogFile, EmptyFlagSort, EmptyDefaultValue, "log file absolute path") + slowLog.addStringFlag(FlagIncludeUserList, EmptyFlagSort, EmptyDefaultValue, "include mysql user list, split by \",\"") + slowLog.addStringFlag(FlagExcludeUserList, EmptyFlagSort, EmptyDefaultValue, "exclude mysql user list, split by \",\"") + slowLog.addStringFlag(FlagIncludeSchemaList, EmptyFlagSort, EmptyDefaultValue, "include mysql schema list, split by \",\"") + slowLog.addStringFlag(FlagExcludeSchemaList, EmptyFlagSort, EmptyDefaultValue, "exclude mysql schema list, split by \",\"") + slowLog.addRequiredFlag(FlagLogFile) +} + +func init() { + sqlFile.addFather(&rootCmd) + sqlFile.addStringFlag(FlagDirectory, FlagDirectorySort, EmptyDefaultValue, "sql file directory") + sqlFile.addBoolFlag(FlagSkipErrorSqlFile, FlagSkipErrorSqlFileSort, false, "skip the sql file that failed to parse") + sqlFile.addStringFlag(FlagDbType, FlagDbTypeSort, EmptyDefaultValue, "database type") + sqlFile.addStringFlag(FlagInstanceName, FlagInstanceNameSort, EmptyDefaultValue, "instance name") + sqlFile.addStringFlag(FlagSchemaName, FlagSchemaNameSort, EmptyDefaultValue, "schema name") + sqlFile.addRequiredFlag(FlagDirectory) +} \ No newline at end of file diff --git a/sqle/common/instance.go b/sqle/common/instance.go index 584c127688..82f27a51be 100644 --- a/sqle/common/instance.go +++ b/sqle/common/instance.go @@ -40,6 +40,12 @@ func CheckDeleteInstance(instanceId int64) error { if isBoundToAuditPlan { return errors.New("there is an unbound audit plan") } - + nodes, err := s.GetPipelineNodesByInstanceId(uint64(instanceId)) + if err != nil { + return fmt.Errorf("check that all pipeline node are unbound failed: %v", err) + } + if len(nodes) > 0 { + return errors.New("there is an unbound pipeline node") + } return nil } diff --git a/sqle/docs/docs.go b/sqle/docs/docs.go index e3707eb89f..73cf8c0a00 100644 --- a/sqle/docs/docs.go +++ b/sqle/docs/docs.go @@ -908,6 +908,49 @@ var doc = `{ } } }, + "/v1/import_rule_template": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "get rule template file", + "tags": [ + "rule_template" + ], + "summary": "获取规则模板文件", + "operationId": "getRuleTemplateFileV1", + "parameters": [ + { + "type": "string", + "description": "instance type", + "name": "instance_type", + "in": "query", + "required": true + }, + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "file type", + "name": "file_type", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "sqle rule template file", + "schema": { + "type": "file" + } + } + } + } + }, "/v1/operation_records": { "get": { "security": [ @@ -3991,6 +4034,17 @@ var doc = `{ "name": "rule_template_name", "in": "path", "required": true + }, + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "export type", + "name": "export_type", + "in": "query", + "required": true } ], "responses": { @@ -7021,6 +7075,17 @@ var doc = `{ "summary": "解析规则模板文件", "operationId": "importProjectRuleTemplateV1", "parameters": [ + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "file type", + "name": "file_type", + "in": "formData", + "required": true + }, { "type": "file", "description": "SQLE rule template file", @@ -7035,6 +7100,12 @@ var doc = `{ "schema": { "$ref": "#/definitions/v1.ParseProjectRuleTemplateFileResV1" } + }, + "400": { + "description": "return error file", + "schema": { + "type": "file" + } } } } @@ -7204,6 +7275,17 @@ var doc = `{ "summary": "导出全局规则模板", "operationId": "exportRuleTemplateV1", "parameters": [ + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "export type", + "name": "export_type", + "in": "query", + "required": true + }, { "type": "string", "description": "rule template name", diff --git a/sqle/docs/swagger.json b/sqle/docs/swagger.json index 5efe6701c8..ef7056e6aa 100644 --- a/sqle/docs/swagger.json +++ b/sqle/docs/swagger.json @@ -892,6 +892,49 @@ } } }, + "/v1/import_rule_template": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "get rule template file", + "tags": [ + "rule_template" + ], + "summary": "获取规则模板文件", + "operationId": "getRuleTemplateFileV1", + "parameters": [ + { + "type": "string", + "description": "instance type", + "name": "instance_type", + "in": "query", + "required": true + }, + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "file type", + "name": "file_type", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "sqle rule template file", + "schema": { + "type": "file" + } + } + } + } + }, "/v1/operation_records": { "get": { "security": [ @@ -3975,6 +4018,17 @@ "name": "rule_template_name", "in": "path", "required": true + }, + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "export type", + "name": "export_type", + "in": "query", + "required": true } ], "responses": { @@ -7005,6 +7059,17 @@ "summary": "解析规则模板文件", "operationId": "importProjectRuleTemplateV1", "parameters": [ + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "file type", + "name": "file_type", + "in": "formData", + "required": true + }, { "type": "file", "description": "SQLE rule template file", @@ -7019,6 +7084,12 @@ "schema": { "$ref": "#/definitions/v1.ParseProjectRuleTemplateFileResV1" } + }, + "400": { + "description": "return error file", + "schema": { + "type": "file" + } } } } @@ -7188,6 +7259,17 @@ "summary": "导出全局规则模板", "operationId": "exportRuleTemplateV1", "parameters": [ + { + "enum": [ + "csv", + "json" + ], + "type": "string", + "description": "export type", + "name": "export_type", + "in": "query", + "required": true + }, { "type": "string", "description": "rule template name", diff --git a/sqle/docs/swagger.yaml b/sqle/docs/swagger.yaml index 03c32eaaa9..8753288daa 100644 --- a/sqle/docs/swagger.yaml +++ b/sqle/docs/swagger.yaml @@ -5673,6 +5673,34 @@ paths: summary: 获取 dashboard 信息 tags: - dashboard + /v1/import_rule_template: + get: + description: get rule template file + operationId: getRuleTemplateFileV1 + parameters: + - description: instance type + in: query + name: instance_type + required: true + type: string + - description: file type + enum: + - csv + - json + in: query + name: file_type + required: true + type: string + responses: + "200": + description: sqle rule template file + schema: + type: file + security: + - ApiKeyAuth: [] + summary: 获取规则模板文件 + tags: + - rule_template /v1/operation_records: get: description: Get operation record list @@ -7674,6 +7702,14 @@ paths: name: rule_template_name required: true type: string + - description: export type + enum: + - csv + - json + in: query + name: export_type + required: true + type: string responses: "200": description: sqle rule template file @@ -9767,6 +9803,14 @@ paths: description: export rule template operationId: exportRuleTemplateV1 parameters: + - description: export type + enum: + - csv + - json + in: query + name: export_type + required: true + type: string - description: rule template name in: path name: rule_template_name @@ -9789,6 +9833,14 @@ paths: description: parse rule template operationId: importProjectRuleTemplateV1 parameters: + - description: file type + enum: + - csv + - json + in: formData + name: file_type + required: true + type: string - description: SQLE rule template file in: formData name: rule_template_file @@ -9799,6 +9851,10 @@ paths: description: OK schema: $ref: '#/definitions/v1.ParseProjectRuleTemplateFileResV1' + "400": + description: return error file + schema: + type: file security: - ApiKeyAuth: [] summary: 解析规则模板文件 diff --git a/sqle/driver/mysql/advisor.go b/sqle/driver/mysql/advisor.go index 861e316d6b..51a7af4f8f 100644 --- a/sqle/driver/mysql/advisor.go +++ b/sqle/driver/mysql/advisor.go @@ -7,8 +7,10 @@ import ( "github.com/Masterminds/semver/v3" "github.com/actiontech/sqle/sqle/driver/mysql/executor" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" "github.com/actiontech/sqle/sqle/driver/mysql/session" "github.com/actiontech/sqle/sqle/driver/mysql/util" + driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/pkg/params" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/format" @@ -23,20 +25,12 @@ const ( MAX_INDEX_COLUMN_DEFAULT_VALUE int = 5 MIN_COLUMN_SELECTIVITY string = "min_column_selectivity" MIN_COLUMN_SELECTIVITY_DEFAULT_VALUE float64 = 2 - - threeStarIndexAdviceFormat string = "索引建议 | 根据三星索引设计规范,建议对表%s添加%s索引:【%s】" - prefixIndexAdviceFormat string = "索引建议 | SQL使用了前模糊匹配,数据量大时,可建立翻转函数索引" - extremalIndexAdviceFormat string = "索引建议 | SQL使用了最值函数,可以利用索引有序的性质快速找到最值,建议对表%s添加单列索引,参考列:%s" - functionIndexAdviceFormatV80 string = "索引建议 | SQL使用了函数作为查询条件,在MySQL8.0.13以上的版本,可以创建函数索引,建议对表%s添加函数索引,参考列:%s" - functionIndexAdviceFormatV57 string = "索引建议 | SQL使用了函数作为查询条件,在MySQL5.7以上的版本,可以在虚拟列上创建索引,建议对表%s添加虚拟列索引,参考列:%s" - functionIndexAdviceFormatAll string = "索引建议 | SQL使用了函数作为查询条件,在MySQL5.7以上的版本,可以在虚拟列上创建索引,在MySQL8.0.13以上的版本,可以创建函数索引,建议根据MySQL版本对表%s添加合适的索引,参考列:%s" - joinIndexAdviceFormat string = "索引建议 | SQL中字段%s为被驱动表%s上的关联字段,建议对表%s添加单列索引,参考列:%s" ) type OptimizeResult struct { TableName string IndexedColumns []string - Reason string + Reason driverV2.I18nStr } func optimize(log *logrus.Entry, ctx *session.Context, node ast.Node, params params.Params) []*OptimizeResult { @@ -357,14 +351,14 @@ func (a *threeStarIndexAdvisor) GiveAdvices() []*OptimizeResult { } tableName := util.GetTableNameFromTableSource(a.drivingTableSource) indexColumns := a.adviceColumns.stringSlice() - var indexType string = "复合" + var indexType = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AdvisorIndexTypeComposite) if len(indexColumns) == 1 { - indexType = "单列" + indexType = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AdvisorIndexTypeSingle) } return []*OptimizeResult{{ TableName: tableName, IndexedColumns: indexColumns, - Reason: fmt.Sprintf(threeStarIndexAdviceFormat, tableName, indexType, strings.Join(indexColumns, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.ThreeStarIndexAdviceFormat, tableName, indexType, strings.Join(indexColumns, ",")), }} } @@ -896,7 +890,7 @@ func (a *joinIndexAdvisor) giveAdvice() { a.advices = append(a.advices, &OptimizeResult{ TableName: drivenTableName, IndexedColumns: indexColumn, - Reason: fmt.Sprintf(joinIndexAdviceFormat, strings.Join(indexColumn, ","), drivenTableName, drivenTableName, strings.Join(indexColumn, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.JoinIndexAdviceFormat, strings.Join(indexColumn, ","), drivenTableName, drivenTableName, strings.Join(indexColumn, ",")), }) } @@ -1036,7 +1030,7 @@ func (a *functionIndexAdvisor) giveAdvice() { a.advices = append(a.advices, &OptimizeResult{ TableName: tableName, IndexedColumns: columns, - Reason: fmt.Sprintf(functionIndexAdviceFormatV57, tableName, strings.Join(columns, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.FunctionIndexAdviceFormatV57, tableName, strings.Join(columns, ",")), }) return } @@ -1044,7 +1038,7 @@ func (a *functionIndexAdvisor) giveAdvice() { a.advices = append(a.advices, &OptimizeResult{ TableName: tableName, IndexedColumns: columns, - Reason: fmt.Sprintf(functionIndexAdviceFormatV80, tableName, strings.Join(columns, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.FunctionIndexAdviceFormatV80, tableName, strings.Join(columns, ",")), }) return } @@ -1052,7 +1046,7 @@ func (a *functionIndexAdvisor) giveAdvice() { a.advices = append(a.advices, &OptimizeResult{ TableName: tableName, IndexedColumns: columns, - Reason: fmt.Sprintf(functionIndexAdviceFormatAll, tableName, strings.Join(columns, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.FunctionIndexAdviceFormatAll, tableName, strings.Join(columns, ",")), }) } @@ -1149,7 +1143,7 @@ func (a *extremalIndexAdvisor) giveAdvice() { a.advices = append(a.advices, &OptimizeResult{ TableName: tableName, IndexedColumns: []string{indexColumn}, - Reason: fmt.Sprintf(extremalIndexAdviceFormat, tableName, indexColumn), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.ExtremalIndexAdviceFormat, tableName, indexColumn), }) } @@ -1241,6 +1235,6 @@ func (a *prefixIndexAdvisor) giveAdvice() { a.advices = append(a.advices, &OptimizeResult{ TableName: tableName, IndexedColumns: []string{column.Name.Name.L}, - Reason: prefixIndexAdviceFormat, + Reason: plocale.ShouldLocalizeAll(plocale.PrefixIndexAdviceFormat), }) } diff --git a/sqle/driver/mysql/advisor_test.go b/sqle/driver/mysql/advisor_test.go index 6375da3659..9c2ec14393 100644 --- a/sqle/driver/mysql/advisor_test.go +++ b/sqle/driver/mysql/advisor_test.go @@ -6,6 +6,9 @@ import ( "strings" "testing" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" + "github.com/nicksnyder/go-i18n/v2/i18n" + sqlmock "github.com/DATA-DOG/go-sqlmock" "github.com/actiontech/sqle/sqle/driver/mysql/executor" "github.com/actiontech/sqle/sqle/driver/mysql/session" @@ -30,7 +33,7 @@ func newPrefixOptimizeResult(columns []string, tableName string) *OptimizeResult return &OptimizeResult{ TableName: tableName, IndexedColumns: columns, - Reason: prefixIndexAdviceFormat, + Reason: plocale.ShouldLocalizeAll(plocale.PrefixIndexAdviceFormat), } } @@ -39,12 +42,12 @@ func mockThreeStarOptimizeResult(caseName string, c optimizerTestContent, t *tes } func newThreeStarOptimizeResult(columns []string, tableName string) *OptimizeResult { - var indexType string = "复合" + var indexType = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AdvisorIndexTypeComposite) if len(columns) == 1 { - indexType = "单列" + indexType = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AdvisorIndexTypeSingle) } return &OptimizeResult{ - Reason: fmt.Sprintf(threeStarIndexAdviceFormat, tableName, indexType, strings.Join(columns, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.ThreeStarIndexAdviceFormat, tableName, indexType, strings.Join(columns, ",")), IndexedColumns: columns, TableName: tableName, } @@ -54,11 +57,11 @@ func mockFunctionOptimizeResult(caseName string, c optimizerTestContent, t *test return mockOptimizeResultWithAdvisor(c.sql, c.maxColumn, c.queryResults, caseName, t, newFunctionIndexAdvisor) } -func newFunctionIndexOptimizeResult(format string, columns []string, tableName string) *OptimizeResult { +func newFunctionIndexOptimizeResult(format *i18n.Message, columns []string, tableName string) *OptimizeResult { return &OptimizeResult{ TableName: tableName, IndexedColumns: columns, - Reason: fmt.Sprintf(format, tableName, strings.Join(columns, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(format, tableName, strings.Join(columns, ",")), } } @@ -70,7 +73,7 @@ func newExtremalIndexOptimizeResult(column string, tableName string) *OptimizeRe return &OptimizeResult{ TableName: tableName, IndexedColumns: []string{column}, - Reason: fmt.Sprintf(extremalIndexAdviceFormat, tableName, column), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.ExtremalIndexAdviceFormat, tableName, column), } } @@ -80,7 +83,7 @@ func mockJoinOptimizeResult(caseName string, c optimizerTestContent, t *testing. func newJoinIndexOptimizeResult(indexColumn []string, drivenTableName string) *OptimizeResult { return &OptimizeResult{ - Reason: fmt.Sprintf(joinIndexAdviceFormat, strings.Join(indexColumn, ","), drivenTableName, drivenTableName, strings.Join(indexColumn, ",")), + Reason: plocale.ShouldLocalizeAllWithArgs(plocale.JoinIndexAdviceFormat, strings.Join(indexColumn, ","), drivenTableName, drivenTableName, strings.Join(indexColumn, ",")), IndexedColumns: indexColumn, TableName: drivenTableName, } @@ -212,7 +215,7 @@ func TestFunctionIndexOptimize(t *testing.T) { }, }, expectResults: []*OptimizeResult{ - newFunctionIndexOptimizeResult(functionIndexAdviceFormatV80, []string{"v1"}, "exist_tb_1"), + newFunctionIndexOptimizeResult(plocale.FunctionIndexAdviceFormatV80, []string{"v1"}, "exist_tb_1"), }, maxColumn: 4, } @@ -228,7 +231,7 @@ func TestFunctionIndexOptimize(t *testing.T) { }, }, expectResults: []*OptimizeResult{ - newFunctionIndexOptimizeResult(functionIndexAdviceFormatV57, []string{"v1"}, "exist_tb_1"), + newFunctionIndexOptimizeResult(plocale.FunctionIndexAdviceFormatV57, []string{"v1"}, "exist_tb_1"), }, maxColumn: 4, } @@ -270,7 +273,7 @@ func TestFunctionIndexOptimize(t *testing.T) { }, }, expectResults: []*OptimizeResult{ - newFunctionIndexOptimizeResult(functionIndexAdviceFormatAll, []string{"v1"}, "exist_tb_1"), + newFunctionIndexOptimizeResult(plocale.FunctionIndexAdviceFormatAll, []string{"v1"}, "exist_tb_1"), }, maxColumn: 4, } diff --git a/sqle/driver/mysql/analysis.go b/sqle/driver/mysql/analysis.go index 3e8ad0679f..2575f8f0e9 100644 --- a/sqle/driver/mysql/analysis.go +++ b/sqle/driver/mysql/analysis.go @@ -3,6 +3,7 @@ package mysql import ( "context" "fmt" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" "strings" "github.com/actiontech/sqle/sqle/driver" @@ -86,28 +87,28 @@ func (i *MysqlDriverImpl) getTableColumnsInfo(conn *executor.Executor, schema, t columns := []driverV2.TabularDataHead{ { Name: "COLUMN_NAME", - Desc: "列名", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescColumnName), }, { Name: "COLUMN_TYPE", - Desc: "列类型", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescColumnType), }, { Name: "CHARACTER_SET_NAME", - Desc: "列字符集", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescCharacterSetName), }, { Name: "IS_NULLABLE", - Desc: "是否可以为空", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescIsNullable), }, { Name: "COLUMN_KEY", - Desc: "列索引", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescColumnKey), }, { Name: "COLUMN_DEFAULT", - Desc: "默认值", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescColumnDefault), }, { Name: "EXTRA", - Desc: "拓展信息", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescExtra), }, { Name: "COLUMN_COMMENT", - Desc: "列说明", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescColumnComment), }, } @@ -145,30 +146,30 @@ func (i *MysqlDriverImpl) getTableIndexesInfo(conn *executor.Executor, schema, t columns := []driverV2.TabularDataHead{ { Name: "Column_name", - Desc: "列名", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescColumnName), }, { Name: "Key_name", - Desc: "索引名", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescKeyName), }, { // set the row's value as Yes if Non_unique is 0 and No if Non_unique is 1 Name: "Unique", - Desc: "唯一性", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescUnique), }, { Name: "Seq_in_index", - Desc: "列序列", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescSeqInIndex), }, { Name: "Cardinality", - Desc: "基数", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescCardinality), }, { // set the row's value as Yes if the column may contain NULL values and No if not Name: "Null", - Desc: "是否可以为空", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescIsNullable), }, { Name: "Index_type", - Desc: "索引类型", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescIndexType), }, { Name: "Comment", - Desc: "备注", + Desc: plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnalysisDescComment), }, } diff --git a/sqle/driver/mysql/audit.go b/sqle/driver/mysql/audit.go index d3e4f09562..92e7b9c951 100644 --- a/sqle/driver/mysql/audit.go +++ b/sqle/driver/mysql/audit.go @@ -4,6 +4,8 @@ import ( "fmt" "strings" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" + rulepkg "github.com/actiontech/sqle/sqle/driver/mysql/rule" "github.com/actiontech/sqle/sqle/driver/mysql/session" "github.com/actiontech/sqle/sqle/driver/mysql/util" @@ -13,30 +15,6 @@ import ( "github.com/pingcap/parser/ast" ) -const ( - SchemaNotExistMessage = "schema %s 不存在" - SchemaExistMessage = "schema %s 已存在" - TableNotExistMessage = "表 %s 不存在" - TableExistMessage = "表 %s 已存在" - ColumnNotExistMessage = "字段 %s 不存在" - ColumnExistMessage = "字段 %s 已存在" - ColumnIsAmbiguousMessage = "字段 %s 指代不明" - IndexNotExistMessage = "索引 %s 不存在" - IndexExistMessage = "索引 %s 已存在" - DuplicateColumnsMessage = "字段名 %s 重复" - DuplicateIndexesMessage = "索引名 %s 重复" - MultiPrimaryKeyMessage = "主键只能设置一个" - KeyedColumnNotExistMessage = "索引字段 %s 不存在" - PrimaryKeyExistMessage = "已经存在主键,不能再添加" - PrimaryKeyNotExistMessage = "当前没有主键,不能执行删除" - ColumnsValuesNotMatchMessage = "指定的值列数与字段列数不匹配" - DuplicatePrimaryKeyedColumnMessage = "主键字段 %s 重复" - DuplicateIndexedColumnMessage = "索引 %s 字段 %s重复" -) - -const CheckInvalidErrorFormat = "预检查失败: %v" -const CheckInvalidError = "预检查失败" - func (i *MysqlDriverImpl) CheckInvalid(node ast.Node) error { var err error switch stmt := node.(type) { @@ -71,7 +49,7 @@ func (i *MysqlDriverImpl) CheckInvalid(node ast.Node) error { if err != nil && session.IsParseShowCreateTableContentErr(err) { return err // todo #1630 直接返回原始错误类型,方便跳过 } else if err != nil { - return fmt.Errorf(CheckInvalidErrorFormat, err) + return fmt.Errorf(plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.CheckInvalidErrorFormat), err) } return nil @@ -87,7 +65,8 @@ func (i *MysqlDriverImpl) CheckExplain(node ast.Node) error { _, err = i.Ctx.GetExecutionPlan(node.Text()) } if err != nil { - i.result.Add(driverV2.RuleLevelWarn, rulepkg.ConfigDMLExplainPreCheckEnable, fmt.Sprintf(CheckInvalidErrorFormat, err)) + i.result.Add(driverV2.RuleLevelWarn, rulepkg.ConfigDMLExplainPreCheckEnable, + plocale.ShouldLocalizeAll(plocale.CheckInvalidErrorFormat), err) } return nil @@ -108,7 +87,7 @@ func (i *MysqlDriverImpl) CheckInvalidOffline(node ast.Node) error { err = i.checkUnparsedStmt(stmt) } if err != nil { - return fmt.Errorf(CheckInvalidErrorFormat, err) + return fmt.Errorf(plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.CheckInvalidErrorFormat), err) } return nil } @@ -129,14 +108,14 @@ func (i *MysqlDriverImpl) checkInvalidCreateTable(stmt *ast.CreateTableStmt) err return err } if !schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, schemaName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), schemaName) } else { tableExist, err := i.Ctx.IsTableExist(stmt.Table) if err != nil { return err } if tableExist && !stmt.IfNotExists { - i.result.Add(driverV2.RuleLevelError, "", TableExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableExistMessage), i.getTableName(stmt.Table)) } if stmt.ReferTable != nil { @@ -145,7 +124,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateTable(stmt *ast.CreateTableStmt) err return err } if !referTableExist { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), i.getTableName(stmt.ReferTable)) } } @@ -190,7 +169,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateTableOffline(stmt *ast.CreateTableSt } duplicateName := utils.GetDuplicate(names) if len(duplicateName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicatePrimaryKeyedColumnMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicatePrimaryKeyedColumnMessage), strings.Join(duplicateName, ",")) } case ast.ConstraintIndex, ast.ConstraintUniq, ast.ConstraintFulltext: @@ -198,7 +177,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateTableOffline(stmt *ast.CreateTableSt if constraintName != "" { indexesName = append(indexesName, constraint.Name) } else { - constraintName = "(匿名)" + constraintName = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnonymousMark) } names := []string{} for _, col := range constraint.Keys { @@ -208,23 +187,23 @@ func (i *MysqlDriverImpl) checkInvalidCreateTableOffline(stmt *ast.CreateTableSt } duplicateName := utils.GetDuplicate(names) if len(duplicateName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, constraintName, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateIndexedColumnMessage), constraintName, strings.Join(duplicateName, ",")) } } } if d := utils.GetDuplicate(colsName); len(d) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateColumnsMessage), strings.Join(d, ",")) } if d := utils.GetDuplicate(indexesName); len(d) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateIndexesMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateIndexesMessage), strings.Join(d, ",")) } if pkCounter > 1 { - i.result.Add(driverV2.RuleLevelError, "", MultiPrimaryKeyMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.MultiPrimaryKeyMessage)) } notExistKeyColsName := []string{} for _, colName := range keyColsName { @@ -233,7 +212,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateTableOffline(stmt *ast.CreateTableSt } } if len(notExistKeyColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.KeyedColumnNotExistMessage), strings.Join(utils.RemoveDuplicate(notExistKeyColsName), ",")) } return nil @@ -262,7 +241,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error return err } if !schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, schemaName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), schemaName) return nil } createTableStmt, tableExist, err := i.Ctx.GetCreateTableStmt(stmt.Table) @@ -270,7 +249,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error return err } if !tableExist { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), i.getTableName(stmt.Table)) return nil } @@ -340,7 +319,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error } else { colNameMap[colName] = struct{}{} if hasPk && util.HasOneInOptions(col.Options, ast.ColumnOptionPrimaryKey) { - i.result.Add(driverV2.RuleLevelError, "", PrimaryKeyExistMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.PrimaryKeyExistMessage)) } else { hasPk = true } @@ -364,7 +343,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error if len(util.GetAlterTableSpecByTp(stmt.Specs, ast.AlterTableDropPrimaryKey)) > 0 && !hasPk { // primary key not exist, can not drop primary key - i.result.Add(driverV2.RuleLevelError, "", PrimaryKeyNotExistMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.PrimaryKeyNotExistMessage)) } for _, spec := range util.GetAlterTableSpecByTp(stmt.Specs, ast.AlterTableDropIndex) { @@ -399,7 +378,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error case ast.ConstraintPrimaryKey: if hasPk { // primary key has exist, can not add primary key - i.result.Add(driverV2.RuleLevelError, "", PrimaryKeyExistMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.PrimaryKeyExistMessage)) } else { hasPk = true } @@ -413,7 +392,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error } duplicateColumn := utils.GetDuplicate(names) if len(duplicateColumn) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicatePrimaryKeyedColumnMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicatePrimaryKeyedColumnMessage), strings.Join(duplicateColumn, ",")) } case ast.ConstraintUniq, ast.ConstraintIndex, ast.ConstraintFulltext: @@ -425,7 +404,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error indexLowerCaseNameMap.Add(indexName) } } else { - indexName = "(匿名)" + indexName = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnonymousMark) } names := []string{} for _, col := range spec.Constraint.Keys { @@ -437,30 +416,30 @@ func (i *MysqlDriverImpl) checkInvalidAlterTable(stmt *ast.AlterTableStmt) error } duplicateColumn := utils.GetDuplicate(names) if len(duplicateColumn) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, indexName, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateIndexedColumnMessage), indexName, strings.Join(duplicateColumn, ",")) } } } if len(needExistsColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", ColumnNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsColsName), ",")) } if len(needNotExistsColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", ColumnExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnExistMessage), strings.Join(utils.RemoveDuplicate(needNotExistsColsName), ",")) } if len(needExistsIndexesName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", IndexNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.IndexNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsIndexesName), ",")) } if len(needNotExistsIndexesName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", IndexExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.IndexExistMessage), strings.Join(utils.RemoveDuplicate(needNotExistsIndexesName), ",")) } if len(needExistsKeyColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.KeyedColumnNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsKeyColsName), ",")) } return nil @@ -481,7 +460,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTableOffline(stmt *ast.AlterTableStmt for _, spec := range util.GetAlterTableSpecByTp(stmt.Specs, ast.AlterTableAddColumns) { for _, col := range spec.NewColumns { if hasPk && util.HasOneInOptions(col.Options, ast.ColumnOptionPrimaryKey) { - i.result.Add(driverV2.RuleLevelError, "", PrimaryKeyExistMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.PrimaryKeyExistMessage)) } else { hasPk = true } @@ -494,7 +473,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTableOffline(stmt *ast.AlterTableStmt case ast.ConstraintPrimaryKey: if hasPk { // primary key has exist, can not add primary key - i.result.Add(driverV2.RuleLevelError, "", PrimaryKeyExistMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.PrimaryKeyExistMessage)) } else { hasPk = true } @@ -505,13 +484,13 @@ func (i *MysqlDriverImpl) checkInvalidAlterTableOffline(stmt *ast.AlterTableStmt } duplicateColumn := utils.GetDuplicate(names) if len(duplicateColumn) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicatePrimaryKeyedColumnMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicatePrimaryKeyedColumnMessage), strings.Join(duplicateColumn, ",")) } case ast.ConstraintUniq, ast.ConstraintIndex, ast.ConstraintFulltext: indexName := spec.Constraint.Name if indexName == "" { - indexName = "(匿名)" + indexName = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.AnonymousMark) } names := []string{} for _, col := range spec.Constraint.Keys { @@ -520,7 +499,7 @@ func (i *MysqlDriverImpl) checkInvalidAlterTableOffline(stmt *ast.AlterTableStmt } duplicateColumn := utils.GetDuplicate(names) if len(duplicateColumn) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, indexName, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateIndexedColumnMessage), indexName, strings.Join(duplicateColumn, ",")) } } @@ -561,11 +540,11 @@ func (i *MysqlDriverImpl) checkInvalidDropTable(stmt *ast.DropTableStmt) error { } } if len(needExistsSchemasName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsSchemasName), ",")) } if len(needExistsTablesName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsTablesName), ",")) } return nil @@ -584,7 +563,7 @@ func (i *MysqlDriverImpl) checkInvalidUse(stmt *ast.UseStmt) error { return err } if !schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, stmt.DBName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), stmt.DBName) } return nil } @@ -606,7 +585,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateDatabase(stmt *ast.CreateDatabaseStm return err } if schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaExistMessage, schemaName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaExistMessage), schemaName) } return nil } @@ -628,7 +607,7 @@ func (i *MysqlDriverImpl) checkInvalidDropDatabase(stmt *ast.DropDatabaseStmt) e return err } if !schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, schemaName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), schemaName) } return nil } @@ -650,7 +629,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateIndex(stmt *ast.CreateIndexStmt) err return err } if !schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, schemaName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), schemaName) return nil } createTableStmt, tableExist, err := i.Ctx.GetCreateTableStmt(stmt.Table) @@ -658,7 +637,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateIndex(stmt *ast.CreateIndexStmt) err return err } if !tableExist { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), i.getTableName(stmt.Table)) return nil } @@ -673,7 +652,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateIndex(stmt *ast.CreateIndexStmt) err } } if _, ok := indexNameMap[stmt.IndexName]; ok { - i.result.Add(driverV2.RuleLevelError, "", IndexExistMessage, stmt.IndexName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.IndexExistMessage), stmt.IndexName) } keyColsName := []string{} keyColNeedExist := []string{} @@ -686,12 +665,12 @@ func (i *MysqlDriverImpl) checkInvalidCreateIndex(stmt *ast.CreateIndexStmt) err } duplicateName := utils.GetDuplicate(keyColsName) if len(duplicateName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, stmt.IndexName, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateIndexedColumnMessage), stmt.IndexName, strings.Join(duplicateName, ",")) } if len(keyColNeedExist) > 0 { - i.result.Add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.KeyedColumnNotExistMessage), strings.Join(utils.RemoveDuplicate(keyColNeedExist), ",")) } return nil @@ -712,7 +691,7 @@ func (i *MysqlDriverImpl) checkInvalidCreateIndexOffline(stmt *ast.CreateIndexSt } duplicateName := utils.GetDuplicate(keyColsName) if len(duplicateName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, stmt.IndexName, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateIndexedColumnMessage), stmt.IndexName, strings.Join(duplicateName, ",")) } return nil @@ -737,7 +716,7 @@ func (i *MysqlDriverImpl) checkInvalidDropIndex(stmt *ast.DropIndexStmt) error { return err } if !schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, schemaName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), schemaName) return nil } createTableStmt, tableExist, err := i.Ctx.GetCreateTableStmt(stmt.Table) @@ -745,7 +724,7 @@ func (i *MysqlDriverImpl) checkInvalidDropIndex(stmt *ast.DropIndexStmt) error { return err } if !tableExist { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), i.getTableName(stmt.Table)) return nil } @@ -756,7 +735,7 @@ func (i *MysqlDriverImpl) checkInvalidDropIndex(stmt *ast.DropIndexStmt) error { } } if _, ok := indexNameMap[stmt.IndexName]; !ok { - i.result.Add(driverV2.RuleLevelError, "", IndexNotExistMessage, stmt.IndexName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.IndexNotExistMessage), stmt.IndexName) } return nil } @@ -781,7 +760,7 @@ func (i *MysqlDriverImpl) checkInvalidInsert(stmt *ast.InsertStmt) error { return err } if !schemaExist { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, schemaName) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), schemaName) return nil } createTableStmt, tableExist, err := i.Ctx.GetCreateTableStmt(table) @@ -789,7 +768,7 @@ func (i *MysqlDriverImpl) checkInvalidInsert(stmt *ast.InsertStmt) error { return err } if !tableExist { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), i.getTableName(table)) return nil } @@ -815,7 +794,7 @@ func (i *MysqlDriverImpl) checkInvalidInsert(stmt *ast.InsertStmt) error { } } if d := utils.GetDuplicate(insertColsName); len(d) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, strings.Join(d, ",")) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateColumnsMessage), strings.Join(d, ",")) } needExistColsName := []string{} @@ -825,14 +804,14 @@ func (i *MysqlDriverImpl) checkInvalidInsert(stmt *ast.InsertStmt) error { } } if len(needExistColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", ColumnNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistColsName), ",")) } if stmt.Lists != nil { for _, list := range stmt.Lists { if len(list) != len(insertColsName) { - i.result.Add(driverV2.RuleLevelError, "", ColumnsValuesNotMatchMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnsValuesNotMatchMessage)) break } } @@ -861,13 +840,13 @@ func (i *MysqlDriverImpl) checkInvalidInsertOffline(stmt *ast.InsertStmt) error } } if d := utils.GetDuplicate(insertColsName); len(d) > 0 { - i.result.Add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, strings.Join(d, ",")) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.DuplicateColumnsMessage), strings.Join(d, ",")) } if stmt.Lists != nil && len(insertColsName) > 0 { for _, list := range stmt.Lists { if len(list) != len(insertColsName) { - i.result.Add(driverV2.RuleLevelError, "", ColumnsValuesNotMatchMessage) + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnsValuesNotMatchMessage)) break } } @@ -926,11 +905,11 @@ func (i *MysqlDriverImpl) checkInvalidUpdate(stmt *ast.UpdateStmt) error { } } if len(needExistsSchemasName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsSchemasName), ",")) } if len(needExistsTablesName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsTablesName), ",")) } @@ -991,12 +970,12 @@ func (i *MysqlDriverImpl) checkInvalidUpdate(stmt *ast.UpdateStmt) error { }, stmt.Where) if len(needExistColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", ColumnNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistColsName), ",")) } if len(ambiguousColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", ColumnIsAmbiguousMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnIsAmbiguousMessage), strings.Join(utils.RemoveDuplicate(ambiguousColsName), ",")) } return nil @@ -1054,11 +1033,11 @@ func (i *MysqlDriverImpl) checkInvalidDelete(stmt *ast.DeleteStmt) error { } } if len(needExistsSchemasName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsSchemasName), ",")) } if len(needExistsTablesName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsTablesName), ",")) } if len(needExistsSchemasName) > 0 || len(needExistsTablesName) > 0 { @@ -1106,12 +1085,12 @@ func (i *MysqlDriverImpl) checkInvalidDelete(stmt *ast.DeleteStmt) error { }, stmt.Where) if len(needExistColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", ColumnNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistColsName), ",")) } if len(ambiguousColsName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", ColumnIsAmbiguousMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.ColumnIsAmbiguousMessage), strings.Join(utils.RemoveDuplicate(ambiguousColsName), ",")) } return nil @@ -1165,11 +1144,11 @@ func (i *MysqlDriverImpl) checkInvalidSelect(stmt *ast.SelectStmt) error { } } if len(needExistsSchemasName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", SchemaNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.SchemaNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsSchemasName), ",")) } if len(needExistsTablesName) > 0 { - i.result.Add(driverV2.RuleLevelError, "", TableNotExistMessage, + i.result.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAll(plocale.TableNotExistMessage), strings.Join(utils.RemoveDuplicate(needExistsTablesName), ",")) } return nil @@ -1177,6 +1156,6 @@ func (i *MysqlDriverImpl) checkInvalidSelect(stmt *ast.SelectStmt) error { // checkUnparsedStmt might add more check in future. func (i *MysqlDriverImpl) checkUnparsedStmt(stmt *ast.UnparsedStmt) error { - i.result.Add(driverV2.RuleLevelWarn, "", "语法错误或者解析器不支持,请人工确认SQL正确性") + i.result.Add(driverV2.RuleLevelWarn, "", plocale.ShouldLocalizeAll(plocale.UnsupportedSyntaxError)) return nil } diff --git a/sqle/driver/mysql/audit_test.go b/sqle/driver/mysql/audit_test.go index d09289a70f..971ea8fc2b 100644 --- a/sqle/driver/mysql/audit_test.go +++ b/sqle/driver/mysql/audit_test.go @@ -7,6 +7,8 @@ import ( "strings" "testing" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" + "github.com/actiontech/sqle/sqle/driver/mysql/executor" rulepkg "github.com/actiontech/sqle/sqle/driver/mysql/rule" "github.com/actiontech/sqle/sqle/driver/mysql/session" @@ -32,8 +34,8 @@ func newTestResult() *testResult { } } -func (t *testResult) add(level driverV2.RuleLevel, ruleName, message string, args ...interface{}) *testResult { - t.Results.Add(level, ruleName, message, args...) +func (t *testResult) add(level driverV2.RuleLevel, ruleName string, message string, args ...interface{}) *testResult { + t.Results.Add(level, ruleName, plocale.ConvertStr2I18n(message), args...) return t } @@ -43,9 +45,11 @@ func (t *testResult) addResult(ruleName string, args ...interface{}) *testResult panic("should not enter here, it means that the uint test result is not expect") } level := handler.Rule.Level - message := handler.Message - - return t.add(level, ruleName, message, args...) + var m string + if handler.Message != nil { + m = plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, handler.Message) + } + return t.add(level, ruleName, m, args...) } func (t *testResult) level() driverV2.RuleLevel { @@ -227,7 +231,7 @@ func TestMessage(t *testing.T) { func TestCheckInvalidUse(t *testing.T) { runDefaultRulesInspectCase(t, "use_database: database not exist", DefaultMysqlInspect(), "use no_exist_db", - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "no_exist_db"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "no_exist_db"), ) inspect1 := DefaultMysqlInspect() @@ -243,7 +247,7 @@ func TestCaseSensitive(t *testing.T) { ` select id from exist_db.EXIST_TB_1 where id = 1 limit 1; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.EXIST_TB_1"). + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.EXIST_TB_1"). add(driverV2.RuleLevelNotice, "", "LIMIT 查询建议使用ORDER BY")) inspect1 := DefaultMysqlInspect() @@ -267,7 +271,7 @@ func TestDDLCheckTableSize(t *testing.T) { runSingleRuleInspectCase(rule, t, "drop_table: table4 oversized", DefaultMysqlInspect(), `drop table exist_db.exist_tb_4;`, newTestResult().addResult(rulepkg.DDLCheckTableSize, "exist_tb_4", 16)) runSingleRuleInspectCase(rule, t, "alter_table: table4 oversized", DefaultMysqlInspect(), - `alter table exist_db.exist_tb_4;`, newTestResult().addResult(rulepkg.DDLCheckTableSize, "exist_tb_4", 16).addResult(rulepkg.ConfigDDLOSCMinSize, PTOSCNoUniqueIndexOrPrimaryKey)) + `alter table exist_db.exist_tb_4;`, newTestResult().addResult(rulepkg.DDLCheckTableSize, "exist_tb_4", 16).addResult(rulepkg.ConfigDDLOSCMinSize, plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PTOSCNoUniqueIndexOrPrimaryKey))) } @@ -324,7 +328,7 @@ v2 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test", PRIMARY KEY (id) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db"), + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"), ) runDefaultRulesInspectCase(t, "create_table: table is exist(1)", DefaultMysqlInspect(), @@ -356,14 +360,14 @@ v2 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test", PRIMARY KEY (id) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", TableExistMessage, "exist_db.exist_tb_1"), + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableExistMessage), "exist_db.exist_tb_1"), ) runDefaultRulesInspectCase(t, "create_table: refer table not exist", DefaultMysqlInspect(), ` CREATE TABLE exist_db.not_exist_tb_1 like exist_db.not_exist_tb_2; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb_2"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb_2"), ) runDefaultRulesInspectCase(t, "create_table: multi pk(1)", DefaultMysqlInspect(), @@ -377,7 +381,7 @@ v2 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test", PRIMARY KEY (id) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", MultiPrimaryKeyMessage)) + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.MultiPrimaryKeyMessage))) runDefaultRulesInspectCase(t, "create_table: multi pk(2)", DefaultMysqlInspect(), ` @@ -391,7 +395,7 @@ PRIMARY KEY (id), PRIMARY KEY (v1) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", MultiPrimaryKeyMessage)) + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.MultiPrimaryKeyMessage))) runDefaultRulesInspectCase(t, "create_table: duplicate column", DefaultMysqlInspect(), ` @@ -404,7 +408,7 @@ v1 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test", PRIMARY KEY (id) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateColumnsMessage), "v1")) runDefaultRulesInspectCase(t, "create_table: duplicate index", DefaultMysqlInspect(), @@ -420,7 +424,7 @@ INDEX idx_1 (v1), INDEX idx_1 (v2) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", DuplicateIndexesMessage, + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexesMessage), "idx_1")) runDefaultRulesInspectCase(t, "create_table: key column not exist", DefaultMysqlInspect(), @@ -436,7 +440,7 @@ INDEX idx_1 (v3), INDEX idx_2 (v4,v5) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.KeyedColumnNotExistMessage), "v3,v4,v5").add(driverV2.RuleLevelWarn, rulepkg.DDLCheckIndexNotNullConstraint, "这些索引字段(v3,v4,v5)需要有非空约束")) runDefaultRulesInspectCase(t, "create_table: pk column not exist", DefaultMysqlInspect(), @@ -450,7 +454,7 @@ v2 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test", PRIMARY KEY (id11) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.KeyedColumnNotExistMessage), "id11").addResult(rulepkg.DDLCheckFieldNotNUllMustContainDefaultValue, "id").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "id11")) runDefaultRulesInspectCase(t, "create_table: pk column is duplicate", DefaultMysqlInspect(), @@ -464,7 +468,7 @@ v2 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test", PRIMARY KEY (id,id) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", DuplicatePrimaryKeyedColumnMessage, "id")) + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicatePrimaryKeyedColumnMessage), "id")) runDefaultRulesInspectCase(t, "create_table: index column is duplicate", DefaultMysqlInspect(), ` @@ -478,7 +482,7 @@ PRIMARY KEY (id), INDEX idx_1 (v1,v1) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx_1", + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx_1", "v1")) runDefaultRulesInspectCase(t, "create_table: index column is duplicate(2)", DefaultMysqlInspect(), @@ -493,7 +497,7 @@ PRIMARY KEY (id), INDEX (v1,v1) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "(匿名)", + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "(匿名)", "v1").addResult(rulepkg.DDLCheckIndexPrefix, "idx_")) runDefaultRulesInspectCase(t, "create_table: index column is duplicate(3)", DefaultMysqlInspect(), @@ -509,8 +513,8 @@ INDEX idx_1 (v1,v1), INDEX idx_2 (v1,v2,v2) )ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb4 COMMENT="unit test"; `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx_1", "v1"). - add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx_2", "v2")) + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx_1", "v1"). + add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx_2", "v2")) } func TestCheckInvalidAlterTable(t *testing.T) { @@ -530,7 +534,7 @@ func TestCheckInvalidAlterTable(t *testing.T) { runDefaultRulesInspectCase(t, "alter_table: schema not exist", DefaultMysqlInspect(), `ALTER TABLE not_exist_db.exist_tb_1 add column v5 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test"; `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"), ) @@ -538,7 +542,7 @@ func TestCheckInvalidAlterTable(t *testing.T) { ` ALTER TABLE exist_db.not_exist_tb_1 add column v5 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test"; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb_1"), ) @@ -546,14 +550,14 @@ ALTER TABLE exist_db.not_exist_tb_1 add column v5 varchar(255) NOT NULL DEFAULT ` ALTER TABLE exist_db.exist_tb_1 Add column v1 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test"; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnExistMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnExistMessage), "v1"), ) runDefaultRulesInspectCase(t, "alter_table: drop a not exist column", DefaultMysqlInspect(), ` ALTER TABLE exist_db.exist_tb_1 drop column v5; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v5"), ) @@ -561,7 +565,7 @@ ALTER TABLE exist_db.exist_tb_1 drop column v5; ` ALTER TABLE exist_db.exist_tb_1 alter column v5 set default 'v5'; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v5"), ) @@ -576,7 +580,7 @@ ALTER TABLE exist_db.exist_tb_1 change column v1 v1 varchar(255) NOT NULL DEFAUL ` ALTER TABLE exist_db.exist_tb_1 change column v5 v5 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test"; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v5"), ) @@ -584,7 +588,7 @@ ALTER TABLE exist_db.exist_tb_1 change column v5 v5 varchar(255) NOT NULL DEFAUL ` ALTER TABLE exist_db.exist_tb_1 change column v2 v1 varchar(255) NOT NULL DEFAULT "unit test" COMMENT "unit test"; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnExistMessage, + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnExistMessage), "v1"), ) @@ -600,7 +604,7 @@ ALTER TABLE exist_db.exist_tb_2 Add primary key(id); ALTER TABLE exist_db.exist_tb_1 Add primary key(v1); `, newTestResult().addResult(rulepkg.DDLCheckPKName). - add(driverV2.RuleLevelError, "", PrimaryKeyExistMessage). + add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PrimaryKeyExistMessage)). addResult(rulepkg.DDLCheckPKWithoutAutoIncrement). addResult(rulepkg.DDLCheckPKWithoutBigintUnsigned), ) @@ -609,14 +613,14 @@ ALTER TABLE exist_db.exist_tb_1 Add primary key(v1); ` ALTER TABLE exist_db.exist_tb_2 Add primary key(id11); `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, "id11").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "id11"), + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.KeyedColumnNotExistMessage), "id11").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "id11"), ) runDefaultRulesInspectCase(t, "alter_table: Add pk but key column is duplicate", DefaultMysqlInspect(), ` ALTER TABLE exist_db.exist_tb_2 Add primary key(id,id); `, - newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", DuplicatePrimaryKeyedColumnMessage, + newTestResult().addResult(rulepkg.DDLCheckPKName).add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicatePrimaryKeyedColumnMessage), "id"), ) @@ -624,28 +628,28 @@ ALTER TABLE exist_db.exist_tb_2 Add primary key(id,id); ` ALTER TABLE exist_db.exist_tb_1 Add index idx_1 (v1); `, - newTestResult().add(driverV2.RuleLevelError, "", IndexExistMessage, "idx_1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.IndexExistMessage), "idx_1"), ) runDefaultRulesInspectCase(t, "alter_table: drop a not exist index", DefaultMysqlInspect(), ` ALTER TABLE exist_db.exist_tb_1 drop index idx_2; `, - newTestResult().add(driverV2.RuleLevelError, "", IndexNotExistMessage, "idx_2"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.IndexNotExistMessage), "idx_2"), ) runDefaultRulesInspectCase(t, "alter_table: Add index but key column not exist", DefaultMysqlInspect(), ` ALTER TABLE exist_db.exist_tb_1 Add index idx_2 (v3); `, - newTestResult().add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, "v3").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.KeyedColumnNotExistMessage), "v3").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v3"), ) runDefaultRulesInspectCase(t, "alter_table: Add index but key column is duplicate", DefaultMysqlInspect(), ` ALTER TABLE exist_db.exist_tb_1 Add index idx_2 (id,id); `, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx_2", + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx_2", "id"), ) @@ -653,7 +657,7 @@ ALTER TABLE exist_db.exist_tb_1 Add index idx_2 (id,id); ` ALTER TABLE exist_db.exist_tb_1 Add index (id,id); `, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "(匿名)", + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "(匿名)", "id").addResult(rulepkg.DDLCheckIndexPrefix, "idx_"), ) } @@ -670,7 +674,7 @@ CREATE DATABASE if not exists exist_db; ` CREATE DATABASE exist_db; `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaExistMessage, "exist_db"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaExistMessage), "exist_db"), ) } @@ -679,42 +683,42 @@ func TestCheckInvalidCreateIndex(t *testing.T) { ` CREATE INDEX idx_1 ON not_exist_db.not_exist_tb(v1); `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v1"), ) runDefaultRulesInspectCase(t, "create_index: table not exist", DefaultMysqlInspect(), ` CREATE INDEX idx_1 ON exist_db.not_exist_tb(v1); `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v1"), ) runDefaultRulesInspectCase(t, "create_index: index exist", DefaultMysqlInspect(), ` CREATE INDEX idx_1 ON exist_db.exist_tb_1(v1); `, - newTestResult().add(driverV2.RuleLevelError, "", IndexExistMessage, "idx_1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.IndexExistMessage), "idx_1"), ) runDefaultRulesInspectCase(t, "create_index: key column not exist", DefaultMysqlInspect(), ` CREATE INDEX idx_2 ON exist_db.exist_tb_1(v3); `, - newTestResult().add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, "v3").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.KeyedColumnNotExistMessage), "v3").addResult(rulepkg.DDLCheckIndexNotNullConstraint, "v3"), ) runDefaultRulesInspectCase(t, "create_index: key column is duplicate", DefaultMysqlInspect(), ` CREATE INDEX idx_2 ON exist_db.exist_tb_1(id,id); `, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx_2", "id"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx_2", "id"), ) runDefaultRulesInspectCase(t, "create_index: key column is duplicate", DefaultMysqlInspect(), ` CREATE INDEX idx_2 ON exist_db.exist_tb_1(id,id,v1); `, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx_2", "id"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx_2", "id"), ) } @@ -742,7 +746,7 @@ DROP DATABASE if exists not_exist_db; ` DROP DATABASE not_exist_db; `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"), ) runDefaultRulesInspectCase(t, "drop_table: ok", DefaultMysqlInspect(), @@ -763,14 +767,14 @@ DROP TABLE if exists not_exist_db.not_exist_tb_1; ` DROP TABLE not_exist_db.not_exist_tb_1; `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"), ) runDefaultRulesInspectCase(t, "drop_table: table not exist", DefaultMysqlInspect(), ` DROP TABLE exist_db.not_exist_tb_1; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb_1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb_1"), ) runDefaultRulesInspectCase(t, "drop_index: ok", DefaultMysqlInspect(), @@ -784,7 +788,7 @@ DROP INDEX idx_1 ON exist_db.exist_tb_1; ` DROP INDEX idx_2 ON exist_db.exist_tb_1; `, - newTestResult().add(driverV2.RuleLevelError, "", IndexNotExistMessage, "idx_2"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.IndexNotExistMessage), "idx_2"), ) runDefaultRulesInspectCase(t, "drop_index: if exists and index not exist", DefaultMysqlInspect(), @@ -800,49 +804,49 @@ func TestCheckInvalidInsert(t *testing.T) { ` insert into not_exist_db.not_exist_tb values (1,"1","1"); `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"), ) runDefaultRulesInspectCase(t, "insert: table not exist", DefaultMysqlInspect(), ` insert into exist_db.not_exist_tb values (1,"1","1"); `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb"), ) runDefaultRulesInspectCase(t, "insert: column not exist(1)", DefaultMysqlInspect(), ` insert into exist_db.exist_tb_1 (id,v1,v3) values (1,"1","1"); `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v3"), ) runDefaultRulesInspectCase(t, "insert: column not exist(2)", DefaultMysqlInspect(), ` insert into exist_db.exist_tb_1 set id=1,v1="1",v3="1"; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v3"), ) runDefaultRulesInspectCase(t, "insert: column is duplicate(1)", DefaultMysqlInspect(), ` insert into exist_db.exist_tb_1 (id,v1,v1) values (1,"1","1"); `, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateColumnsMessage), "v1"), ) runDefaultRulesInspectCase(t, "insert: column is duplicate(2)", DefaultMysqlInspect(), ` insert into exist_db.exist_tb_1 set id=1,v1="1",v1="1"; `, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateColumnsMessage), "v1"), ) runDefaultRulesInspectCase(t, "insert: do not match values and columns", DefaultMysqlInspect(), ` insert into exist_db.exist_tb_1 (id,v1,v2) values (1,"1","1"),(2,"2","2","2"); `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnsValuesNotMatchMessage), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnsValuesNotMatchMessage)), ) } @@ -977,28 +981,28 @@ update exist_tb_1 set v1="2" where exist_db.exist_tb_1.id=1; ` update not_exist_db.not_exist_tb set v1="2" where id=1; `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"), ) runDefaultRulesInspectCase(t, "update: table not exist", DefaultMysqlInspect(), ` update exist_db.not_exist_tb set v1="2" where id=1; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb"), ) runDefaultRulesInspectCase(t, "update: column not exist", DefaultMysqlInspect(), ` update exist_db.exist_tb_1 set v3="2" where id=1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v3"), ) runDefaultRulesInspectCase(t, "update: where column not exist", DefaultMysqlInspect(), ` update exist_db.exist_tb_1 set v1="2" where v3=1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v3"), ) runDefaultRulesInspectCase(t, "update with alias: ok", DefaultMysqlInspect(), @@ -1011,28 +1015,28 @@ update exist_tb_1 as t set t.v1 = "1" where t.id = 1; ` update exist_db.not_exist_tb as t set t.v3 = "1" where t.id = 1; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb"), ) runDefaultRulesInspectCase(t, "update with alias: column not exist", DefaultMysqlInspect(), ` update exist_tb_1 as t set t.v3 = "1" where t.id = 1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "t.v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "t.v3"), ) runDefaultRulesInspectCase(t, "update with alias: column not exist", DefaultMysqlInspect(), ` update exist_tb_1 as t set t.v1 = "1" where t.v3 = 1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "t.v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "t.v3"), ) runDefaultRulesInspectCase(t, "update with alias: column not exist", DefaultMysqlInspect(), ` update exist_tb_1 as t set exist_tb_1.v1 = "1" where t.id = 1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "exist_tb_1.v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "exist_tb_1.v1"), ) runDefaultRulesInspectCase(t, "multi-update: ok", DefaultMysqlInspect(), @@ -1053,28 +1057,28 @@ update exist_tb_1 inner join exist_tb_2 on exist_tb_1.id = exist_tb_2.id set exi ` update exist_db.not_exist_tb set exist_tb_1.v2 = "1" where exist_tb_1.id = exist_tb_2.id; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb"), ) runDefaultRulesInspectCase(t, "multi-update: column not exist 1", DefaultMysqlInspect(), ` update exist_tb_1,exist_tb_2 set exist_tb_1.v3 = "1" where exist_tb_1.id = exist_tb_2.id; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "exist_tb_1.v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "exist_tb_1.v3"), ) runDefaultRulesInspectCase(t, "multi-update: column not exist 2", DefaultMysqlInspect(), ` update exist_tb_1,exist_tb_2 set exist_tb_2.v3 = "1" where exist_tb_1.id = exist_tb_2.id; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "exist_tb_2.v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "exist_tb_2.v3"), ) runDefaultRulesInspectCase(t, "multi-update: column not exist 3", DefaultMysqlInspect(), ` update exist_tb_1,exist_tb_2 set exist_tb_1.v1 = "1" where exist_tb_1.v3 = exist_tb_2.v3; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "exist_tb_1.v3,exist_tb_2.v3"). + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "exist_tb_1.v3,exist_tb_2.v3"). addResult(rulepkg.DMLCheckJoinFieldUseIndex), ) @@ -1082,7 +1086,7 @@ update exist_tb_1,exist_tb_2 set exist_tb_1.v1 = "1" where exist_tb_1.v3 = exist ` update exist_db.exist_tb_1,exist_db.exist_tb_2 set exist_tb_3.v1 = "1" where exist_tb_1.v1 = exist_tb_2.v1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "exist_tb_3.v1"). + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "exist_tb_3.v1"). addResult(rulepkg.DMLCheckJoinFieldUseIndex), ) @@ -1090,7 +1094,7 @@ update exist_db.exist_tb_1,exist_db.exist_tb_2 set exist_tb_3.v1 = "1" where exi ` update exist_db.exist_tb_1,exist_db.exist_tb_2 set not_exist_db.exist_tb_1.v1 = "1" where exist_tb_1.v1 = exist_tb_2.v1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "not_exist_db.exist_tb_1.v1"). + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "not_exist_db.exist_tb_1.v1"). addResult(rulepkg.DMLCheckJoinFieldUseIndex), ) @@ -1105,21 +1109,21 @@ update exist_tb_1,exist_tb_2 set user_id = "1" where exist_tb_1.id = exist_tb_2. ` update exist_tb_1,exist_tb_2 set v1 = "1" where exist_tb_1.id = exist_tb_2.id; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnIsAmbiguousMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnIsAmbiguousMessage), "v1"), ) runDefaultRulesInspectCase(t, "multi-update: column not ambiguous", DefaultMysqlInspect(), ` update exist_tb_1,exist_tb_2 set v1 = "1" where exist_tb_1.id = exist_tb_2.id; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnIsAmbiguousMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnIsAmbiguousMessage), "v1"), ) runDefaultRulesInspectCase(t, "multi-update: where column not ambiguous", DefaultMysqlInspect(), ` update exist_tb_1,exist_tb_2 set exist_tb_1.v1 = "1" where v1 = 1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnIsAmbiguousMessage, "v1").addResult(rulepkg.DMLCheckHasJoinCondition), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnIsAmbiguousMessage), "v1").addResult(rulepkg.DMLCheckHasJoinCondition), ) } @@ -1135,35 +1139,35 @@ delete from exist_db.exist_tb_1 where id=1; ` delete from not_exist_db.not_exist_tb where id=1; `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"), ) runDefaultRulesInspectCase(t, "delete: table not exist", DefaultMysqlInspect(), ` delete from exist_db.not_exist_tb where id=1; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb"), ) runDefaultRulesInspectCase(t, "delete: where column not exist", DefaultMysqlInspect(), ` delete from exist_db.exist_tb_1 where v3=1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v3"), ) runDefaultRulesInspectCase(t, "delete: where column not exist", DefaultMysqlInspect(), ` delete from exist_db.exist_tb_1 where exist_tb_1.v3=1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "exist_tb_1.v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "exist_tb_1.v3"), ) runDefaultRulesInspectCase(t, "delete: where column not exist", DefaultMysqlInspect(), ` delete from exist_db.exist_tb_1 where exist_tb_2.id=1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "exist_tb_2.id"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "exist_tb_2.id"), ) } @@ -1172,7 +1176,7 @@ func TestCheckInvalidSelect(t *testing.T) { ` select id from not_exist_db.not_exist_tb where id=1 limit 1; `, - newTestResult().add(driverV2.RuleLevelError, "", SchemaNotExistMessage, "not_exist_db"). + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db"). add(driverV2.RuleLevelNotice, "", "LIMIT 查询建议使用ORDER BY"), ) @@ -1180,7 +1184,7 @@ select id from not_exist_db.not_exist_tb where id=1 limit 1; ` select id from exist_db.not_exist_tb where id=1 limit 1; `, - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb"). + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb"). add(driverV2.RuleLevelNotice, "", "LIMIT 查询建议使用ORDER BY"), ) } @@ -4669,7 +4673,7 @@ func TestWhitelist(t *testing.T) { // Value: "select * from t1 where id = 2", // MatchType: driver.SQLWhitelistFPMatch, // }, - // }, `select id from T1 where id = 4`, newTestResult().add(driver.RuleLevelError, TableNotExistMessage, "exist_db.T1")) + // }, `select id from T1 where id = 4`, newTestResult().add(driver.RuleLevelError, locale.ShouldLocalizeMessage(newEntry, locale.DefaultLocalizer, locale.TableNotExistMessage), "exist_db.T1")) // // inspect2 := DefaultMysqlInspect() // inspect2.Ctx = parentInspect.Ctx @@ -4711,12 +4715,12 @@ func Test_LowerCaseTableNameOpen(t *testing.T) { runEmptyRuleInspectCase(t, "test lower case table name open 1-1", getLowerCaseOpenInspect(), `use not_exist_db;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaNotExistMessage, "not_exist_db")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db")) runEmptyRuleInspectCase(t, "test lower case table name open 1-2", getLowerCaseOpenInspect(), `use NOT_EXIST_DB;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaNotExistMessage, "NOT_EXIST_DB")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "NOT_EXIST_DB")) runEmptyRuleInspectCase(t, "test lower case table name open 1-3", getLowerCaseOpenInspect(), `use EXIST_DB;`, @@ -4735,12 +4739,12 @@ func Test_LowerCaseTableNameOpen(t *testing.T) { runEmptyRuleInspectCase(t, "test lower case table name open 2-1", getLowerCaseOpenInspect(), `create database EXIST_DB;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaExistMessage, "EXIST_DB")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaExistMessage), "EXIST_DB")) runEmptyRuleInspectCase(t, "test lower case table name open 2-2", getLowerCaseOpenInspect(), `create database exist_db;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaExistMessage, "exist_db")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaExistMessage), "exist_db")) runEmptyRuleInspectCase(t, "test lower case table name open 2-3", getLowerCaseOpenInspect(), `create database not_exist_db;`, @@ -4755,45 +4759,45 @@ func Test_LowerCaseTableNameOpen(t *testing.T) { create database NOT_EXIST_DB;`, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - SchemaExistMessage, "NOT_EXIST_DB")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaExistMessage), "NOT_EXIST_DB")) runEmptyRuleInspectCase(t, "test lower case table name open 2-6", getLowerCaseOpenInspect(), `create database NOT_EXIST_DB; create database not_exist_db;`, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - SchemaExistMessage, "not_exist_db")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaExistMessage), "not_exist_db")) } // check table { runEmptyRuleInspectCase(t, "test lower case table name open 3-1", getLowerCaseOpenInspect(), `create table EXIST_DB.exist_tb_1 (id int);`, newTestResult().add(driverV2.RuleLevelError, "", - TableExistMessage, "EXIST_DB.exist_tb_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableExistMessage), "EXIST_DB.exist_tb_1")) runEmptyRuleInspectCase(t, "test lower case table name open 3-2", getLowerCaseOpenInspect(), `create table exist_db.exist_tb_1 (id int);`, newTestResult().add(driverV2.RuleLevelError, "", - TableExistMessage, "exist_db.exist_tb_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableExistMessage), "exist_db.exist_tb_1")) runEmptyRuleInspectCase(t, "test lower case table name open 3-3", getLowerCaseOpenInspect(), `create table EXIST_DB.EXIST_TB_1 (id int);`, newTestResult().add(driverV2.RuleLevelError, "", - TableExistMessage, "EXIST_DB.EXIST_TB_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableExistMessage), "EXIST_DB.EXIST_TB_1")) runEmptyRuleInspectCase(t, "test lower case table name open 3-4", getLowerCaseOpenInspect(), `create table EXIST_DB.EXIST_TB_2 (id int); create table EXIST_DB.exist_tb_2 (id int);`, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - TableExistMessage, "EXIST_DB.exist_tb_2")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableExistMessage), "EXIST_DB.exist_tb_2")) runEmptyRuleInspectCase(t, "test lower case table name open 3-5", getLowerCaseOpenInspect(), `create table EXIST_DB.exist_tb_2 (id int); create table EXIST_DB.EXIST_TB_2 (id int);`, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - TableExistMessage, "EXIST_DB.EXIST_TB_2")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableExistMessage), "EXIST_DB.EXIST_TB_2")) runEmptyRuleInspectCase(t, "test lower case table name open 3-6", getLowerCaseOpenInspect(), `alter table exist_db.EXIST_TB_1 add column v3 varchar(255) COMMENT "unit test";`, @@ -4805,7 +4809,7 @@ alter table exist_db.EXIST_TB_1 add column v3 varchar(255) COMMENT "unit test"; `, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - TableNotExistMessage, "exist_db.EXIST_TB_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.EXIST_TB_1")) runEmptyRuleInspectCase(t, "test lower case table name open 3-8", getLowerCaseOpenInspect(), `alter table exist_db.EXIST_TB_1 rename AS exist_tb_2; @@ -4827,12 +4831,12 @@ alter table exist_db.EXIST_TB_2 add column v3 varchar(255) COMMENT "unit test"; runEmptyRuleInspectCase(t, "test lower case table name open 4-1", getLowerCaseOpenInspect(), `select id from exist_db.EXIST_TB_2 where id = 1;`, newTestResult().add(driverV2.RuleLevelError, "", - TableNotExistMessage, "exist_db.EXIST_TB_2")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.EXIST_TB_2")) runEmptyRuleInspectCase(t, "test lower case table name open 4-2", getLowerCaseOpenInspect(), `select id from exist_db.exist_tb_2 where id = 1;`, newTestResult().add(driverV2.RuleLevelError, "", - TableNotExistMessage, "exist_db.exist_tb_2")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.exist_tb_2")) runEmptyRuleInspectCase(t, "test lower case table name open 4-3", getLowerCaseOpenInspect(), `select id from exist_db.EXIST_TB_1 where id = 1;`, newTestResult()) @@ -4853,12 +4857,12 @@ func Test_LowerCaseTableNameClose(t *testing.T) { runEmptyRuleInspectCase(t, "test lower case table name close 1-1", getLowerCaseCloseInspect(), `use not_exist_db;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaNotExistMessage, "not_exist_db")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "not_exist_db")) runEmptyRuleInspectCase(t, "test lower case table name close 1-2", getLowerCaseCloseInspect(), `use NOT_EXIST_DB;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaNotExistMessage, "NOT_EXIST_DB")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "NOT_EXIST_DB")) runEmptyRuleInspectCase(t, "test lower case table name close 1-3", getLowerCaseCloseInspect(), `use exist_db_1;`, @@ -4867,12 +4871,12 @@ func Test_LowerCaseTableNameClose(t *testing.T) { runEmptyRuleInspectCase(t, "test lower case table name close 1-4", getLowerCaseCloseInspect(), `use EXIST_DB_1;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaNotExistMessage, "EXIST_DB_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "EXIST_DB_1")) runEmptyRuleInspectCase(t, "test lower case table name close 1-5", getLowerCaseCloseInspect(), `use exist_DB_1;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaNotExistMessage, "exist_DB_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "exist_DB_1")) runEmptyRuleInspectCase(t, "test lower case table name close 1-6", getLowerCaseCloseInspect(), `use EXIST_DB_2;`, @@ -4881,14 +4885,14 @@ func Test_LowerCaseTableNameClose(t *testing.T) { runEmptyRuleInspectCase(t, "test lower case table name close 1-7", getLowerCaseCloseInspect(), `use exist_db_2;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaNotExistMessage, "exist_db_2")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaNotExistMessage), "exist_db_2")) } // check schema { runEmptyRuleInspectCase(t, "test lower case table name close 2-1", getLowerCaseCloseInspect(), `create database exist_db_1;`, newTestResult().add(driverV2.RuleLevelError, "", - SchemaExistMessage, "exist_db_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaExistMessage), "exist_db_1")) runEmptyRuleInspectCase(t, "test lower case table name close 2-2", getLowerCaseCloseInspect(), `create database EXIST_DB_1;`, @@ -4909,14 +4913,14 @@ create database not_exist_db;`, create database NOT_EXIST_DB;`, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - SchemaExistMessage, "NOT_EXIST_DB")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.SchemaExistMessage), "NOT_EXIST_DB")) } // check table { runEmptyRuleInspectCase(t, "test lower case table name close 3-1", getLowerCaseCloseInspect(), `create table exist_db_1.exist_tb_1 (id int);`, newTestResult().add(driverV2.RuleLevelError, "", - TableExistMessage, "exist_db_1.exist_tb_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableExistMessage), "exist_db_1.exist_tb_1")) runEmptyRuleInspectCase(t, "test lower case table name close 3-2", getLowerCaseCloseInspect(), `create table exist_db_1.EXIST_TB_1 (id int);`, @@ -4926,7 +4930,7 @@ create database NOT_EXIST_DB;`, `alter table exist_db_1.EXIST_TB_1 rename AS exist_tb_2; `, newTestResult().add(driverV2.RuleLevelError, "", - TableNotExistMessage, "exist_db_1.EXIST_TB_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db_1.EXIST_TB_1")) runEmptyRuleInspectCase(t, "test lower case table name close 3-4", getLowerCaseCloseInspect(), `alter table exist_db_1.exist_tb_1 rename AS exist_tb_2; @@ -4934,7 +4938,7 @@ alter table exist_db_1.exist_tb_1 add column v3 varchar(255) COMMENT "unit test" `, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - TableNotExistMessage, "exist_db_1.exist_tb_1")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db_1.exist_tb_1")) runEmptyRuleInspectCase(t, "test lower case table name close 3-5", getLowerCaseCloseInspect(), `alter table exist_db_1.exist_tb_1 rename AS exist_tb_2; @@ -4949,7 +4953,7 @@ alter table exist_db_1.EXIST_TB_2 add column v3 varchar(255) COMMENT "unit test" `, newTestResult(), newTestResult().add(driverV2.RuleLevelError, "", - TableNotExistMessage, "exist_db_1.EXIST_TB_2")) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db_1.EXIST_TB_2")) } } @@ -7159,7 +7163,7 @@ func TestMustMatchLeftMostPrefix(t *testing.T) { t.Run(arg.Name, func(t *testing.T) { res := newTestResult() if arg.TriggerRule { - res = newTestResult().add(rule.Level, rule.Name, rulepkg.RuleHandlerMap[rulepkg.DMLMustMatchLeftMostPrefix].Message) + res = newTestResult().add(rule.Level, rule.Name, plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, rulepkg.RuleHandlerMap[rulepkg.DMLMustMatchLeftMostPrefix].Message)) } runSingleRuleInspectCase(rulepkg.RuleHandlerMap[rulepkg.DMLMustMatchLeftMostPrefix].Rule, t, "", inspect, arg.Sql, res) }) @@ -7438,7 +7442,7 @@ func TestMustUseLeftMostPrefix(t *testing.T) { t.Run(arg.Name, func(t *testing.T) { res := newTestResult() if arg.TriggerRule { - res = newTestResult().add(rule.Level, rule.Name, rulepkg.RuleHandlerMap[rulepkg.DMLMustUseLeftMostPrefix].Message) + res = newTestResult().add(rule.Level, rule.Name, plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, rulepkg.RuleHandlerMap[rulepkg.DMLMustUseLeftMostPrefix].Message)) } runSingleRuleInspectCase(rulepkg.RuleHandlerMap[rulepkg.DMLMustUseLeftMostPrefix].Rule, t, "", inspect, arg.Sql, res) }) @@ -7665,7 +7669,7 @@ func TestDDLCheckCharLength(t *testing.T) { t.Run(arg.Name, func(t *testing.T) { res := newTestResult() if arg.TriggerRule { - res = newTestResult().add(rule.Level, rule.Name, rulepkg.RuleHandlerMap[rulepkg.DDLCheckCharLength].Message, arg.Param) + res = newTestResult().add(rule.Level, rule.Name, plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, rulepkg.RuleHandlerMap[rulepkg.DDLCheckCharLength].Message), arg.Param) } runSingleRuleInspectCase(rulepkg.RuleHandlerMap[rulepkg.DDLCheckCharLength].Rule, t, "", inspect, arg.Sql, res) }) diff --git a/sqle/driver/mysql/context_test.go b/sqle/driver/mysql/context_test.go index 9aa5ac0a22..6896835c6c 100644 --- a/sqle/driver/mysql/context_test.go +++ b/sqle/driver/mysql/context_test.go @@ -1,6 +1,7 @@ package mysql import ( + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" "testing" rulepkg "github.com/actiontech/sqle/sqle/driver/mysql/rule" @@ -37,7 +38,7 @@ alter table not_exist_tb_1 drop column v1; newTestResult().addResult(rulepkg.DDLCheckPKName), newTestResult(), newTestResult(), - newTestResult().add(driverV2.RuleLevelError, "", TableNotExistMessage, "exist_db.not_exist_tb_1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.TableNotExistMessage), "exist_db.not_exist_tb_1"), ) runDefaultRulesInspectCase(t, "drop column twice: column not exists(1)", DefaultMysqlInspect(), @@ -48,7 +49,7 @@ alter table not_exist_tb_1 drop column v1; `, newTestResult(), newTestResult(), - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v1"), ) runDefaultRulesInspectCase(t, "drop column twice: column not exists(2)", DefaultMysqlInspect(), ` @@ -67,7 +68,7 @@ alter table not_exist_tb_1 drop column v1; newTestResult(), newTestResult().addResult(rulepkg.DDLCheckPKName), newTestResult(), - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v1"), ) runDefaultRulesInspectCase(t, "change and drop column: column not exists", DefaultMysqlInspect(), @@ -78,7 +79,7 @@ alter table not_exist_tb_1 drop column v1; `, newTestResult(), newTestResult(), - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v1"), ) runDefaultRulesInspectCase(t, "Add column twice: column exists", DefaultMysqlInspect(), @@ -89,7 +90,7 @@ alter table not_exist_tb_1 drop column v1; `, newTestResult(), newTestResult(), - newTestResult().add(driverV2.RuleLevelError, "", ColumnExistMessage, "v3"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnExistMessage), "v3"), ) runDefaultRulesInspectCase(t, "drop index twice: index not exists", DefaultMysqlInspect(), @@ -100,7 +101,7 @@ alter table not_exist_tb_1 drop column v1; `, newTestResult(), newTestResult(), - newTestResult().add(driverV2.RuleLevelError, "", IndexNotExistMessage, "idx_1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.IndexNotExistMessage), "idx_1"), ) runDefaultRulesInspectCase(t, "drop index, rename index: index not exists", DefaultMysqlInspect(), ` @@ -110,7 +111,7 @@ alter table not_exist_tb_1 drop column v1; `, newTestResult(), newTestResult(), - newTestResult().add(driverV2.RuleLevelError, "", IndexNotExistMessage, "idx_1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.IndexNotExistMessage), "idx_1"), ) } @@ -157,7 +158,7 @@ alter table not_exist_tb_1 drop column v1; ` alter table not_exist_tb_1 drop column v1; `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v1"), ) inspect4 := DefaultMysqlInspect() @@ -175,7 +176,7 @@ alter table not_exist_tb_1 add column v3 varchar(255) NOT NULL DEFAULT "unit tes ` insert into not_exist_tb_1 (id,v1,v2) values (1,"1","1"); `, - newTestResult().add(driverV2.RuleLevelError, "", ColumnNotExistMessage, "v1"), + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnNotExistMessage), "v1"), ) inspect6 := DefaultMysqlInspect() diff --git a/sqle/driver/mysql/invalid_offline_test.go b/sqle/driver/mysql/invalid_offline_test.go index bbf218f45c..db48b7afd4 100644 --- a/sqle/driver/mysql/invalid_offline_test.go +++ b/sqle/driver/mysql/invalid_offline_test.go @@ -1,6 +1,7 @@ package mysql import ( + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "testing" @@ -16,50 +17,50 @@ func TestCheckInvalidOffline(t *testing.T) { func testCheckInvalidCreateTableOffline(t *testing.T) { runEmptyRuleInspectCase(t, "column name can't duplicated. f", DefaultMysqlInspectOffline(), `create table t (a int,b int, a int)`, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateColumnsMessage), "a")) runEmptyRuleInspectCase(t, "column name can't duplicated. t", DefaultMysqlInspectOffline(), `create table t (a int,b int)`, newTestResult()) runEmptyRuleInspectCase(t, "pk can only be set once. f1", DefaultMysqlInspectOffline(), `create table t (a int primary key,b int primary key)`, - newTestResult().add(driverV2.RuleLevelError, "", MultiPrimaryKeyMessage)) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.MultiPrimaryKeyMessage))) runEmptyRuleInspectCase(t, "pk can only be set once. f2", DefaultMysqlInspectOffline(), "create table t (a int primary key,b int, PRIMARY KEY (`b`))", - newTestResult().add(driverV2.RuleLevelError, "", MultiPrimaryKeyMessage)) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.MultiPrimaryKeyMessage))) runEmptyRuleInspectCase(t, "pk can only be set once. f3", DefaultMysqlInspectOffline(), "create table t (a int primary key,b int, PRIMARY KEY (`a`))", - newTestResult().add(driverV2.RuleLevelError, "", MultiPrimaryKeyMessage)) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.MultiPrimaryKeyMessage))) runEmptyRuleInspectCase(t, "pk can only be set once. f4", DefaultMysqlInspectOffline(), "create table t (a int ,b int, PRIMARY KEY (`a`), PRIMARY KEY (`b`))", - newTestResult().add(driverV2.RuleLevelError, "", MultiPrimaryKeyMessage)) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.MultiPrimaryKeyMessage))) runEmptyRuleInspectCase(t, "pk can only be set once. t", DefaultMysqlInspectOffline(), `create table t (a int,b int primary key)`, newTestResult()) runEmptyRuleInspectCase(t, "index name can't be duplicated. f1", DefaultMysqlInspectOffline(), "create table t (a int ,b int , KEY `a` (`a`), KEY `a` (`b`))", - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexesMessage, "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexesMessage), "a")) runEmptyRuleInspectCase(t, "index name can't be duplicated. f2", DefaultMysqlInspectOffline(), "create table t (a int ,b int , unique `a`(`a`), KEY `a`(`b`))", - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexesMessage, "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexesMessage), "a")) runEmptyRuleInspectCase(t, "index name can't be duplicated. t", DefaultMysqlInspectOffline(), "create table t (a int ,b int , unique `a`(`a`), KEY `b`(`b`))", newTestResult()) runEmptyRuleInspectCase(t, "index column must exist. f1", DefaultMysqlInspectOffline(), "create table t (a int ,b int , unique `a`(`c`))", - newTestResult().add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, "c")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.KeyedColumnNotExistMessage), "c")) runEmptyRuleInspectCase(t, "index column must exist. f2", DefaultMysqlInspectOffline(), "create table t (a int ,b int , unique `a`(`a`,`c`))", - newTestResult().add(driverV2.RuleLevelError, "", KeyedColumnNotExistMessage, "c")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.KeyedColumnNotExistMessage), "c")) runEmptyRuleInspectCase(t, "index column must exist. t", DefaultMysqlInspectOffline(), "create table t (a int ,b int , unique `a`(`b`))", newTestResult()) runEmptyRuleInspectCase(t, "index column can't duplicated. f", DefaultMysqlInspectOffline(), "create table t (a int ,b int , index `idx`(`a`,`a`))", - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx", "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx", "a")) runEmptyRuleInspectCase(t, "index column can't duplicated. t", DefaultMysqlInspectOffline(), "create table t (a int ,b int , index `idx`(`a`,`b`))", newTestResult()) @@ -69,10 +70,10 @@ func testCheckInvalidCreateTableOffline(t *testing.T) { func testCheckInvalidAlterTableOffline(t *testing.T) { runEmptyRuleInspectCase(t, "add pk, ok can only be set once. f1", DefaultMysqlInspectOffline(), `alter table t add (a int primary key, b int primary key)`, - newTestResult().add(driverV2.RuleLevelError, "", PrimaryKeyExistMessage)) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PrimaryKeyExistMessage))) runEmptyRuleInspectCase(t, "add pk, ok can only be set once. f2", DefaultMysqlInspectOffline(), `alter table t add primary key (a), add primary key (b)`, - newTestResult().add(driverV2.RuleLevelError, "", PrimaryKeyExistMessage)) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PrimaryKeyExistMessage))) runEmptyRuleInspectCase(t, "add pk, ok can only be set once. t1", DefaultMysqlInspectOffline(), `alter table t add primary key (a), add index (b)`, newTestResult()) @@ -82,10 +83,10 @@ func testCheckInvalidAlterTableOffline(t *testing.T) { runEmptyRuleInspectCase(t, "index column can't duplicated. f1", DefaultMysqlInspectOffline(), `alter table t add index b(a,a)`, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "b", "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "b", "a")) runEmptyRuleInspectCase(t, "index column can't duplicated. f2", DefaultMysqlInspectOffline(), `alter table t add primary key a(a,a)`, - newTestResult().add(driverV2.RuleLevelError, "", DuplicatePrimaryKeyedColumnMessage, "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicatePrimaryKeyedColumnMessage), "a")) runEmptyRuleInspectCase(t, "index column can't duplicated. t", DefaultMysqlInspectOffline(), `alter table t add index a(a,b), add index b(b,c)`, newTestResult()) @@ -95,7 +96,7 @@ func testCheckInvalidAlterTableOffline(t *testing.T) { func testCheckInvalidCreateIndexOffline(t *testing.T) { runEmptyRuleInspectCase(t, "index column name can't be duplicated. f", DefaultMysqlInspectOffline(), `create index idx on t (a,a)`, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateIndexedColumnMessage, "idx", "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateIndexedColumnMessage), "idx", "a")) runEmptyRuleInspectCase(t, "index column name can't be duplicated. t", DefaultMysqlInspectOffline(), `create index idx on t (a,b)`, newTestResult()) @@ -104,10 +105,10 @@ func testCheckInvalidCreateIndexOffline(t *testing.T) { func testCheckInvalidInsertOffline(t *testing.T) { runEmptyRuleInspectCase(t, "index column can't be duplicated. f1", DefaultMysqlInspectOffline(), `insert into t (a,a) value (1,1)`, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateColumnsMessage), "a")) runEmptyRuleInspectCase(t, "index column can't be duplicated. f2", DefaultMysqlInspectOffline(), `insert into t set a=1, a=1`, - newTestResult().add(driverV2.RuleLevelError, "", DuplicateColumnsMessage, "a")) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.DuplicateColumnsMessage), "a")) runEmptyRuleInspectCase(t, "index column can't be duplicated. t1", DefaultMysqlInspectOffline(), `insert into t set a=1, b=1`, newTestResult()) @@ -117,7 +118,7 @@ func testCheckInvalidInsertOffline(t *testing.T) { runEmptyRuleInspectCase(t, "value length must match column length. f", DefaultMysqlInspectOffline(), `insert into t (a,b) value (1,1,1)`, - newTestResult().add(driverV2.RuleLevelError, "", ColumnsValuesNotMatchMessage)) + newTestResult().add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.ColumnsValuesNotMatchMessage))) runEmptyRuleInspectCase(t, "value length must match column length. t1", DefaultMysqlInspectOffline(), `insert into t (a,b) value (1,1)`, newTestResult()) diff --git a/sqle/driver/mysql/mysql.go b/sqle/driver/mysql/mysql.go index e80b1df0ad..dd52990b45 100644 --- a/sqle/driver/mysql/mysql.go +++ b/sqle/driver/mysql/mysql.go @@ -10,6 +10,8 @@ import ( "github.com/actiontech/sqle/sqle/driver" "github.com/actiontech/sqle/sqle/driver/mysql/executor" "github.com/actiontech/sqle/sqle/driver/mysql/onlineddl" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" + "github.com/actiontech/sqle/sqle/locale" rulepkg "github.com/actiontech/sqle/sqle/driver/mysql/rule" "github.com/actiontech/sqle/sqle/driver/mysql/session" @@ -345,7 +347,10 @@ func (i *MysqlDriverImpl) audit(ctx context.Context, sql string) (*driverV2.Audi } if err != nil && session.IsParseShowCreateTableContentErr(err) { i.Logger().Errorf("check invalid failed: %v", err) - i.result.Add(driverV2.RuleLevelWarn, CheckInvalidError, fmt.Sprintf(CheckInvalidErrorFormat, "解析建表语句失败,部分在线审核规则可能失效,请人工确认")) + i.result.Add(driverV2.RuleLevelWarn, + "pre_check_err", // todo i18n 预检查失败规则名称 + plocale.ShouldLocalizeAllWithFmt(plocale.CheckInvalidErrorFormat, plocale.ParseDDLError), + ) } else if err != nil { return nil, err } @@ -393,7 +398,7 @@ func (i *MysqlDriverImpl) audit(ctx context.Context, sql string) (*driverV2.Audi if err := handler.Func(input); err != nil { // todo #1630 临时跳过解析建表语句失败导致的规则 if session.IsParseShowCreateTableContentErr(err) { - i.Logger().Errorf("skip rule, rule_desc_name=%v rule_desc=%v err:%v", rule.Name, rule.Desc, err.Error()) + i.Logger().Errorf("skip rule, rule_desc_name=%v rule_desc=%v err:%v", rule.Name, rule.I18nRuleInfo[locale.DefaultLang.String()].Desc, err.Error()) continue } return nil, err @@ -430,9 +435,10 @@ func (i *MysqlDriverImpl) audit(ctx context.Context, sql string) (*driverV2.Audi } if useGhost { if _, err := i.executeByGhost(ctx, sql, true); err != nil { - i.result.Add(driverV2.RuleLevelError, ghostRule.Name, fmt.Sprintf("表空间大小超过%vMB, 将使用gh-ost进行上线, 但是dry-run抛出如下错误: %v", i.cnf.DDLGhostMinSize, err)) + // todo + i.result.Add(driverV2.RuleLevelError, ghostRule.Name, plocale.ShouldLocalizeAll(plocale.GhostDryRunError), i.cnf.DDLGhostMinSize, err) } else { - i.result.Add(ghostRule.Level, ghostRule.Name, fmt.Sprintf("表空间大小超过%vMB, 将使用gh-ost进行上线", i.cnf.DDLGhostMinSize)) + i.result.Add(ghostRule.Level, ghostRule.Name, plocale.ShouldLocalizeAll(plocale.GhostDryRunNotice), i.cnf.DDLGhostMinSize) } } @@ -443,8 +449,8 @@ func (i *MysqlDriverImpl) audit(ctx context.Context, sql string) (*driverV2.Audi } else if err != nil { return nil, err } - if oscCommandLine != "" { - i.result.Add(driverV2.RuleLevelNotice, rulepkg.ConfigDDLOSCMinSize, fmt.Sprintf("[osc]%s", oscCommandLine)) + if oscCommandLine != nil { + i.result.Add(driverV2.RuleLevelNotice, rulepkg.ConfigDDLOSCMinSize, oscCommandLine) } if !i.IsExecutedSQL() { @@ -454,27 +460,27 @@ func (i *MysqlDriverImpl) audit(ctx context.Context, sql string) (*driverV2.Audi return i.result, nil } -func (i *MysqlDriverImpl) GenRollbackSQL(ctx context.Context, sql string) (string, string, error) { +func (i *MysqlDriverImpl) GenRollbackSQL(ctx context.Context, sql string) (string, driverV2.I18nStr, error) { if i.IsOfflineAudit() { - return "", "", nil + return "", nil, nil } if i.HasInvalidSql { - return "", "", nil + return "", nil, nil } nodes, err := i.ParseSql(sql) if err != nil { - return "", "", err + return "", nil, err } - rollback, reason, err := i.GenerateRollbackSql(nodes[0]) + rollback, i18nReason, err := i.GenerateRollbackSql(nodes[0]) if err != nil { - return "", "", err + return "", nil, err } i.Ctx.UpdateContext(nodes[0]) - return rollback, reason, nil + return rollback, i18nReason, nil } func (i *MysqlDriverImpl) Close(ctx context.Context) { diff --git a/sqle/driver/mysql/mysql_test.go b/sqle/driver/mysql/mysql_test.go index 77ce7815b2..f5f1356837 100644 --- a/sqle/driver/mysql/mysql_test.go +++ b/sqle/driver/mysql/mysql_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/actiontech/sqle/sqle/driver/mysql/util" - driverV2 "github.com/actiontech/sqle/sqle/driver/v2" + "github.com/actiontech/sqle/sqle/locale" "github.com/stretchr/testify/assert" ) @@ -165,17 +165,17 @@ func TestInspect_GenRollbackSQL(t *testing.T) { rollback, reason, err := i.GenRollbackSQL(context.TODO(), "create table t1(id int, c1 int)") assert.NoError(t, err) - assert.Equal(t, "", reason) + assert.Equal(t, "", reason[locale.DefaultLang.String()]) assert.Equal(t, "DROP TABLE IF EXISTS `exist_db`.`t1`", rollback) rollback, reason, err = i.GenRollbackSQL(context.TODO(), "alter table t1 drop column c1") assert.NoError(t, err) - assert.Equal(t, "", reason) + assert.Equal(t, "", reason[locale.DefaultLang.String()]) assert.Equal(t, "ALTER TABLE `exist_db`.`t1`\nADD COLUMN `c1` int(11);", rollback) rollback, reason, err = i.GenRollbackSQL(context.TODO(), "alter table t1 add column c1 int") assert.NoError(t, err) - assert.Equal(t, "", reason) + assert.Equal(t, "", reason[locale.DefaultLang.String()]) assert.Equal(t, "ALTER TABLE `exist_db`.`t1`\nDROP COLUMN `c1`;", rollback) } func TestInspect_assertSQLType(t *testing.T) { @@ -223,7 +223,7 @@ CREATE TABLE new_tbl AS SELECT * FROM orig_tbl;`, driverV2.SQLTypeDDL, }, { - "case 7", // unparsed + "case 7", // unparsed ` CREATEaa TABLE new_tbl AS SELECT * FROM orig_tbl;`, driverV2.SQLTypeDDL, diff --git a/sqle/driver/mysql/plocale/active.en.toml b/sqle/driver/mysql/plocale/active.en.toml new file mode 100644 index 0000000000..9176740092 --- /dev/null +++ b/sqle/driver/mysql/plocale/active.en.toml @@ -0,0 +1,692 @@ +AdvisorIndexTypeComposite = "Composite" +AdvisorIndexTypeSingle = "Single column" +AllCheckPrepareStatementPlaceholdersAnnotation = "Because overuse of bind variables will increase the complexity of the query, thus reducing the query performance. Overuse of bind variables will also increase maintenance costs. Default threshold: 100" +AllCheckPrepareStatementPlaceholdersDesc = "The number of bound variables should not exceed the threshold" +AllCheckPrepareStatementPlaceholdersMessage = "The number of bind variables is %v, it is not recommended to exceed the set threshold %v" +AllCheckPrepareStatementPlaceholdersParams1 = "Maximum number of bind variables" +AnalysisDescCardinality = "Cardinality" +AnalysisDescCharacterSetName = "Character set" +AnalysisDescColumnComment = "Column comment" +AnalysisDescColumnDefault = "Default value" +AnalysisDescColumnKey = "Column index" +AnalysisDescColumnName = "Column name" +AnalysisDescColumnType = "Column type" +AnalysisDescComment = "Comment" +AnalysisDescExtra = "Extra info" +AnalysisDescIndexType = "Index type" +AnalysisDescIsNullable = "Nullable" +AnalysisDescKeyName = "Index name" +AnalysisDescSeqInIndex = "Column sequence" +AnalysisDescUnique = "Unique" +AnonymousMark = "(anonymous)" +AuditResultMsgExcludedSQL = "Excluded SQLs" +AuditResultMsgWhiteList = "White list" +CheckInvalidError = "Pre-check failed" +CheckInvalidErrorFormat = "Pre-check failed: %v" +ColumnExistMessage = "Column %s already exists" +ColumnIsAmbiguousMessage = "Column %s is ambiguous" +ColumnNotExistMessage = "Column %s does not exist" +ColumnsValuesNotMatchMessage = "The number of specified values does not match the number of columns" +ConfigDDLGhostMinSizeAnnotation = "After enabling this rule, the gh-ost tool will be used automatically for online table modification for large tables; direct DDL changes to large tables may lead to long-term table locking issues, affecting business sustainability. The specific threshold for defining large tables can be adjusted according to business needs. Default value: 1024" +ConfigDDLGhostMinSizeDesc = "When modifying tables, use gh-ost online when table space exceeds the specified size (MB)" +ConfigDDLGhostMinSizeParams1 = "Table space size (MB)" +ConfigDDLOSCMinSizeAnnotation = "After enabling this rule, pt-osc rewrite suggestions will be given for DDL statements of large tables 【need to refer to the command for manual execution, automatic execution will be supported in the future】; direct DDL changes to large tables may lead to long-term table locking issues, affecting business sustainability. The specific threshold for defining large tables can be adjusted according to business needs. Default value: 1024" +ConfigDDLOSCMinSizeDesc = "When modifying tables, output osc rewrite suggestions when the table space exceeds the specified size (MB) for audit" +ConfigDDLOSCMinSizeParams1 = "Table space size (MB)" +ConfigDMLExplainPreCheckEnableAnnotation = "Check if the DML to be launched can be executed correctly in the form of EXPLAIN, and find errors in the statement in advance to improve the success rate of launching" +ConfigDMLExplainPreCheckEnableDesc = "Use EXPLAIN to enhance pre-check capability" +ConfigDMLRollbackMaxRowsAnnotation = "Large transaction rollback, easy to affect database performance, causing business fluctuations; the specific rule threshold can be adjusted according to business needs. Default value: 1000" +ConfigDMLRollbackMaxRowsDesc = "Do not roll back if the expected number of affected rows in the DML statement exceeds the specified value" +ConfigDMLRollbackMaxRowsParams1 = "Maximum number of affected rows" +ConfigOptimizeIndexEnabledAnnotation = "Enable index optimization suggestions through this rule, providing two parameters to configure the behavior of index optimization suggestions. 1. Minimum column distinguishability threshold (percentage): configure the distinguishability of columns in the current table less than how much, not as the column of the index; 2. Maximum number of columns in the combined index: limit the maximum number of columns given by the combined index to prevent the recommended combined index from not conforming to other SQL standards" +ConfigOptimizeIndexEnabledDesc = "Index creation suggestion" +ConfigOptimizeIndexEnabledParams1 = "Minimum column distinguishability threshold (percentage)" +ConfigOptimizeIndexEnabledParams2 = "Maximum number of columns in the combined index" +ConfigSQLIsExecutedAnnotation = "Enable this rule to be compatible with post-audit scenarios. DDL and DML statements collected after the event will no longer be checked for launch. For example, the library table metadata scan task can enable this rule" +ConfigSQLIsExecutedDesc = "Disable online audit mode" +DDLAvoidEventAnnotation = "Using event will increase the maintenance difficulty and dependency of the database, and will also cause security problems." +DDLAvoidEventDesc = "Prohibit using event" +DDLAvoidEventMessage = "Prohibit using event" +DDLAvoidFullTextAnnotation = "Using fulltext index will increase storage overhead and have some impact on write operation performance." +DDLAvoidFullTextDesc = "Prohibit using fulltext index" +DDLAvoidFullTextMessage = "Prohibit using fulltext index" +DDLAvoidGeometryAnnotation = "Using spatial fields and spatial indexes will increase storage requirements and have a certain impact on database performance" +DDLAvoidGeometryDesc = "Prohibit the use of spatial fields and spatial indexes" +DDLAvoidGeometryMessage = "Prohibit the use of spatial fields and spatial indexes" +DDLAvoidTextAnnotation = "Splitting text type fields from the original table's primary key into another table can improve database performance and query speed, reducing unnecessary i/o operations." +DDLAvoidTextDesc = "It is recommended to split text type fields from the original table and store them in a separate table with the primary key of the original table" +DDLAvoidTextMessage = "Field: %v is of text type, it is recommended to split it from the original table and store it in a separate table with the primary key of the original table" +DDLCheckAllIndexNotNullConstraintAnnotation = "All index fields are not constrained to be not null, please confirm the rationality of the index planning of the table." +DDLCheckAllIndexNotNullConstraintDesc = "It is recommended to add a not null constraint to at least one index" +DDLCheckAllIndexNotNullConstraintMessage = "It is recommended to add a not null constraint to at least one index" +DDLCheckAlterTableNeedMergeAnnotation = "Avoid the consumption and impact on online business caused by multiple table rebuilds" +DDLCheckAlterTableNeedMergeDesc = "There are multiple modification statements for the same table, it is recommended to merge them into one alter statement" +DDLCheckAlterTableNeedMergeMessage = "There are already modification statements for this table, it is recommended to merge them into one alter statement" +DDLCheckAutoIncrementAnnotation = "If auto_increment is set to 0 when creating a table, auto-increment will start from 1, which can avoid data holes. for example, when exporting table structure ddl, the auto_increment in the table structure is usually the current auto-increment value. if auto_increment is not set to 0 when creating a table, creating a table through this DDL will result in the auto-increment value starting from a meaningless number." +DDLCheckAutoIncrementDesc = "The initial auto_increment value of the table is recommended to be 0" +DDLCheckAutoIncrementFieldNumAnnotation = "Mysql innodb, myisam engine does not allow multiple auto-increment fields, setting multiple auto-increment fields will lead to deployment failure." +DDLCheckAutoIncrementFieldNumDesc = "When creating a table, only one auto-increment field can be set" +DDLCheckAutoIncrementFieldNumMessage = "When creating a table, only one auto-increment field can be set" +DDLCheckAutoIncrementMessage = "The initial auto_increment value of the table is recommended to be 0" +DDLCheckBigintInsteadOfDecimalAnnotation = "Because the cpu does not support direct operations on decimal, mysql itself implements high-precision calculations for decimal, but the calculation cost is high, and the space occupied is also more when storing the same range of values; using bigint instead of decimal, you can multiply by the corresponding multiplier according to the number of decimal places, so as to achieve precise floating-point storage calculation, avoiding the high calculation cost of decimal" +DDLCheckBigintInsteadOfDecimalDesc = "It is recommended to use bigint type instead of decimal" +DDLCheckBigintInsteadOfDecimalMessage = "It is recommended to use bigint type instead of decimal for column %s" +DDLCheckCharLengthAnnotation = "Using too long or too many varchar, char fields may increase the complexity of business logic; if the average length of the field is too large, it will occupy more storage space." +DDLCheckCharLengthDesc = "Prohibit the total character length of char, varchar type fields from exceeding the threshold" +DDLCheckCharLengthMessage = "Prohibit the total character length of char, varchar type fields from exceeding the threshold %v" +DDLCheckCharLengthParams1 = "Character length" +DDLCheckColumnBlobDefaultIsNotNullAnnotation = "In strict sql_mode, blob and text types cannot set default values, if data is inserted without specifying a value, the field will be set to null" +DDLCheckColumnBlobDefaultIsNotNullDesc = "The default value of blob and text type fields can only be null" +DDLCheckColumnBlobDefaultIsNotNullMessage = "The default value of blob and text type fields can only be null" +DDLCheckColumnBlobNoticeAnnotation = "Blob or text type consumes a lot of network and io bandwidth, and DML operations on this table will become very slow" +DDLCheckColumnBlobNoticeDesc = "It is not recommended to use blob or text type" +DDLCheckColumnBlobNoticeMessage = "It is not recommended to use blob or text type" +DDLCheckColumnBlobWithNotNullAnnotation = "Blob and text type fields cannot specify default values, if data is inserted without specifying a value, the field defaults to null. if a not null constraint is added, inserting data without specifying a value for this field will result in insertion failure" +DDLCheckColumnBlobWithNotNullDesc = "It is not recommended to set blob and text type fields to not null" +DDLCheckColumnBlobWithNotNullMessage = "It is not recommended to set blob and text type fields to not null" +DDLCheckColumnCharLengthAnnotation = "Varchar is a variable-length field, which takes up less storage space and can save storage space. at the same time, the retrieval efficiency of relatively small fields is obviously higher" +DDLCheckColumnCharLengthDesc = "Char length greater than 20 must use varchar type" +DDLCheckColumnCharLengthMessage = "Char length greater than 20 must use varchar type" +DDLCheckColumnEnumNoticeAnnotation = "Enum type is not sql standard, portability is poor, later modification or addition of enum values requires rebuilding the entire table, which is costly and cannot be sorted by literal values" +DDLCheckColumnEnumNoticeDesc = "It is not recommended to use enum type" +DDLCheckColumnEnumNoticeMessage = "It is not recommended to use enum type" +DDLCheckColumnNotNULLAnnotation = "Table fields are recommended to have not null constraints, which can ensure data integrity, prevent the insertion of null values, and improve query accuracy." +DDLCheckColumnNotNULLDesc = "Table fields are recommended to have not null constraints" +DDLCheckColumnNotNULLMessage = "It is recommended to set the not null constraint for field %v" +DDLCheckColumnQuantityAnnotation = "Avoid wide table design on oltp systems, which will have a great impact on performance in the future; the specific rule threshold can be adjusted according to business needs, the default value is 40" +DDLCheckColumnQuantityDesc = "The number of columns in a table is not recommended to exceed the threshold" +DDLCheckColumnQuantityInPKAnnotation = "Too many columns in the primary key will result in secondary indexes taking up more space and increasing the cost of index maintenance; the specific rule threshold can be adjusted according to business needs, the default value is 2" +DDLCheckColumnQuantityInPKDesc = "The number of columns included in the primary key is not recommended to exceed the threshold" +DDLCheckColumnQuantityInPKMessage = "The number of columns included in the primary key is not recommended to exceed the threshold" +DDLCheckColumnQuantityInPKParams1 = "Maximum number of columns" +DDLCheckColumnQuantityMessage = "The number of columns in a table is not recommended to exceed the threshold" +DDLCheckColumnQuantityParams1 = "Max column quantity" +DDLCheckColumnSetNoticeAnnotation = "Modifying a set requires redefining the column, which is expensive to modify later. it is recommended to implement it in the business layer" +DDLCheckColumnSetNoticeDesc = "It is not recommended to use set type" +DDLCheckColumnSetNoticeMessage = "It is not recommended to use set type" +DDLCheckColumnTimestampWithoutDefaultAnnotation = "Adding a default value to timestamp can avoid the occurrence of all 0 date formats that do not meet business expectations" +DDLCheckColumnTimestampWithoutDefaultDesc = "Timestamp columns must add default values" +DDLCheckColumnTimestampWithoutDefaultMessage = "Timestamp columns must add default values" +DDLCheckColumnTypeIntegerAnnotation = "Int(m) or bigint(m), where m represents the maximum display width, the width of the maximum value that can be stored is 10 and 20 respectively. using int(10) or bigint(20) can avoid the possibility of display truncation" +DDLCheckColumnTypeIntegerDesc = "It is recommended to use int(10) or bigint(20) for integer definition" +DDLCheckColumnTypeIntegerMessage = "It is recommended to use int(10) or bigint(20) for integer definition" +DDLCheckColumnWithoutCommentAnnotation = "Adding comments to columns can make the meaning of columns clearer and facilitate future maintenance" +DDLCheckColumnWithoutCommentDesc = "It is recommended to add comments to columns" +DDLCheckColumnWithoutCommentMessage = "It is recommended to add comments to columns" +DDLCheckColumnWithoutDefaultAnnotation = "Adding a default value to a column can avoid the impact of null values on queries" +DDLCheckColumnWithoutDefaultDesc = "Except for auto-increment columns and large field columns, every column must add a default value" +DDLCheckColumnWithoutDefaultMessage = "Except for auto-increment columns and large field columns, every column must add a default value" +DDLCheckCompositeIndexDistinctionAnnotation = "Placing fields with high distinction at the front in composite indexes helps improve query performance of indexes, because it can reduce the data range faster and improve retrieval efficiency." +DDLCheckCompositeIndexDistinctionDesc = "It is recommended to place fields with high distinction at the front in composite indexes" +DDLCheckCompositeIndexDistinctionMessage = "It is recommended to place fields with high distinction at the front in composite indexes, %v" +DDLCheckCompositeIndexMaxAnnotation = "Composite indexes create corresponding combination indexes according to the number of index columns. the more columns, the more indexes created, and each index will increase disk space overhead and increase index maintenance overhead; the specific rule threshold can be adjusted according to business needs, the default value: 3" +DDLCheckCompositeIndexMaxDesc = "The number of columns in composite indexes is not recommended to exceed the threshold" +DDLCheckCompositeIndexMaxMessage = "The number of columns in composite indexes is not recommended to exceed %v" +DDLCheckCompositeIndexMaxParams1 = "Max index column quantity" +DDLCheckCreateFunctionAnnotation = "User-defined functions are poorly maintained and highly dependent, which can lead to sql unable to use across databases" +DDLCheckCreateFunctionDesc = "Prohibit the use of user-defined functions" +DDLCheckCreateFunctionMessage = "Prohibit the use of user-defined functions" +DDLCheckCreateProcedureAnnotation = "Stored procedures make programs difficult to debug and extend to some extent. the syntax of stored procedures in various databases is very different, which brings great difficulty to future database migration, and greatly increases the probability of bugs" +DDLCheckCreateProcedureDesc = "Prohibit the use of stored procedures" +DDLCheckCreateProcedureMessage = "Prohibit the use of stored procedures" +DDLCheckCreateTimeColumnAnnotation = "Using the create_time field is beneficial to problem tracking and data retrieval, and it also avoids the inconvenience of managing the data lifecycle later. the default value is current_timestamp, which can ensure the accuracy of the time" +DDLCheckCreateTimeColumnDesc = "It is recommended that the table DDL includes a creation time field with a default value of current_timestamp" +DDLCheckCreateTimeColumnMessage = "It is recommended that the table DDL includes %v field with a default value of current_timestamp" +DDLCheckCreateTimeColumnParams1 = "Create time field name" +DDLCheckCreateTriggerAnnotation = "Triggers are difficult to develop and maintain, cannot be efficiently migrated, and are prone to deadlocks that affect business under complex logic and high concurrency" +DDLCheckCreateTriggerDesc = "Prohibit the use of triggers" +DDLCheckCreateTriggerMessage = "Prohibit the use of triggers" +DDLCheckCreateViewAnnotation = "The query performance of views is poor, and the base table structure changes need to be maintained for views. if the view readability is poor and contains complex logic, it will increase the maintenance cost" +DDLCheckCreateViewDesc = "Prohibit the use of views" +DDLCheckCreateViewMessage = "Prohibit the use of views" +DDLCheckDatabaseCollationAnnotation = "This rule constrains the global database collation to avoid creating unexpected database collations, preventing unexpected sorting results from the business side. it is recommended that the database tables in the project use unified character sets and collation. in some cases of joining tables, inconsistent character sets or collation of fields may cause index failure and are difficult to find" +DDLCheckDatabaseCollationDesc = "It is recommended to use the specified database collation" +DDLCheckDatabaseCollationMessage = "It is recommended to use the specified database collation as %s" +DDLCheckDatabaseCollationParams1 = "Database collation" +DDLCheckDatabaseSuffixAnnotation = "By configuring this rule, you can standardize the database naming rules of the specified business. specific naming rules can be customized. default prompt value: _db" +DDLCheckDatabaseSuffixDesc = "It is recommended to use a fixed suffix for the database name" +DDLCheckDatabaseSuffixMessage = "It is recommended that the database name ends with \"%v\"" +DDLCheckDatabaseSuffixParams1 = "Database name suffix" +DDLCheckDecimalTypeColumnAnnotation = "For floating point operations, decimal has higher accuracy" +DDLCheckDecimalTypeColumnDesc = "It is recommended to use decimal for precise floating point numbers" +DDLCheckDecimalTypeColumnMessage = "It is recommended to use decimal for precise floating point numbers" +DDLCheckFieldNotNUllMustContainDefaultValueAnnotation = "If there is a field with not null and without default value, insert without this field will cause an error" +DDLCheckFieldNotNUllMustContainDefaultValueDesc = "It is recommended to set default value for the field with not null constraint" +DDLCheckFieldNotNUllMustContainDefaultValueMessage = "It is recommended to set default value for the field with not null constraint, the following fields are not compliant: %v" +DDLCheckFullWidthQuotationMarksAnnotation = "It is recommended to enable this rule to avoid mysql identifying chinese full-width quotes as part of the name, which may lead to different execution results from business expectations" +DDLCheckFullWidthQuotationMarksDesc = "It is not recommended to use chinese full-width quotes in DDL statements" +DDLCheckFullWidthQuotationMarksMessage = "It is not recommended to use chinese full-width quotes in DDL statements, this may be a typo" +DDLCheckIndexCountAnnotation = "Each index created on a table will increase storage overhead, and index will also increase processing overhead for insert, delete and update operations. too many, insufficient and incorrect indexes will be of no benefit to performance; the specific rule threshold can be adjusted according to business needs, default value: 5" +DDLCheckIndexCountDesc = "The number of indexes is recommended not to exceed the threshold" +DDLCheckIndexCountMessage = "The number of indexes is recommended not to exceed %v" +DDLCheckIndexCountParams1 = "Max index count" +DDLCheckIndexNotNullConstraintAnnotation = "If there is no not null constraint on the indexed field, the table record and the index record will not be completely mapped." +DDLCheckIndexNotNullConstraintDesc = "The indexed field needs to have a not null constraint" +DDLCheckIndexNotNullConstraintMessage = "These indexed fields (%v) need to have a not null constraint" +DDLCheckIndexOptionAnnotation = "Choose fields with high selectivity as indexes, which can quickly locate data; if the selectivity is too low, it cannot be used effectively, and even may need to scan a large number of data pages, slowing down sql; the specific rule threshold can be adjusted according to business needs, default value: 70" +DDLCheckIndexOptionDesc = "It is recommended that the indexed field has a selectivity greater than the threshold" +DDLCheckIndexOptionMessage = "Index %v does not exceed the selectivity threshold of %v, it is not recommended to select it as an index" +DDLCheckIndexOptionParams1 = "Selectivity (percentage)" +DDLCheckIndexPrefixAnnotation = "By configuring this rule, you can standardize the index naming rules of the specified business, the specific naming rules can be customized, default prompt value: idx_" +DDLCheckIndexPrefixDesc = "It is recommended to use a fixed prefix for ordinary indexes" +DDLCheckIndexPrefixMessage = "It is recommended that ordinary indexes should be prefixed with \"%v\"" +DDLCheckIndexPrefixParams1 = "Index prefix" +DDLCheckIndexTooManyAnnotation = "There are too many indexes on a single field. in most cases, these indexes have no value. on the contrary, they will reduce the performance of data insertion and deletion, especially for tables that are frequently updated, the negative impact is even greater. the specific rule threshold can be adjusted according to business needs, default value: 2" +DDLCheckIndexTooManyDesc = "The number of indexes on a single field is recommended not to exceed the threshold" +DDLCheckIndexTooManyMessage = "The number of indexes on field %v is recommended not to exceed %v" +DDLCheckIndexTooManyParams1 = "Max index count for a single field" +DDLCheckIndexedColumnWithBlobAnnotation = "Blob type belongs to large field type, and indexing will take up a lot of storage space" +DDLCheckIndexedColumnWithBlobDesc = "Do not include blob type columns in indexes" +DDLCheckIndexedColumnWithBlobMessage = "Do not include blob type columns in indexes" +DDLCheckIndexesExistBeforeCreateConstraintsAnnotation = "Create index before creating constraint, constraint can act on secondary index to avoid full table scan and improve performance" +DDLCheckIndexesExistBeforeCreateConstraintsDesc = "It is recommended to create index before creating constraint for field" +DDLCheckIndexesExistBeforeCreateConstraintsMessage = "It is recommended to create index before creating constraint for field" +DDLCheckIsExistLimitOffsetAnnotation = "For example: limit n offset m or limit m, n. when the offset m is too large, the query efficiency will be very low, because mysql first retrieves m+n data, and then discards the first m data. for mysql tables with large data volume, using limit pagination has very serious performance problems" +DDLCheckIsExistLimitOffsetDesc = "Avoid using offset when using pagination query" +DDLCheckIsExistLimitOffsetMessage = "Avoid using offset when using pagination query" +DDLCheckObjectNameIsUpperAndLowerLetterMixedAnnotation = "Database object naming convention, it is not recommended to use a mixed case form, it is recommended to connect words with underscores, which improves code readability" +DDLCheckObjectNameIsUpperAndLowerLetterMixedDesc = "It is not recommended to mix uppercase and lowercase letters in database object names" +DDLCheckObjectNameIsUpperAndLowerLetterMixedMessage = "It is not recommended to mix uppercase and lowercase letters in database object names, the following object names are not compliant: %v" +DDLCheckObjectNameLengthAnnotation = "By configuring this rule, you can standardize the object naming length of the specified business, the specific length can be customized, default maximum length: 64. it is the maximum length of the identifier name specified by mysql, 64 bytes" +DDLCheckObjectNameLengthDesc = "The length of table name, column name and index name is recommended not to exceed the threshold" +DDLCheckObjectNameLengthMessage = "The length of table name, column name and index name is recommended not to exceed %v bytes" +DDLCheckObjectNameLengthParams1 = "Max length (bytes)" +DDLCheckObjectNameUseCNAnnotation = "By configuring this rule, you can standardize the data object naming rules of the specified business" +DDLCheckObjectNameUseCNDesc = "Database object names can only use english, underscores or numbers, and the first letter must be english" +DDLCheckObjectNameUseCNMessage = "Database object names can only use english, underscores or numbers, and the first letter must be english" +DDLCheckObjectNameUsingKeywordAnnotation = "By configuring this rule, you can standardize the data object naming rules of the specified business, avoid conflicts, and avoid confusion" +DDLCheckObjectNameUsingKeywordDesc = "Database object names are prohibited from using reserved words" +DDLCheckObjectNameUsingKeywordMessage = "Database object names are prohibited from using reserved words %s" +DDLCheckPKNameAnnotation = "By configuring this rule, you can standardize the primary key naming rules of the specified business" +DDLCheckPKNameDesc = "It is recommended to name the primary key \"pk_tablename\"" +DDLCheckPKNameMessage = "It is recommended to name the primary key \"pk_tablename\"" +DDLCheckPKNotExistAnnotation = "Primary key ensures data is globally unique, which can improve data retrieval efficiency" +DDLCheckPKNotExistDesc = "Table must have a primary key" +DDLCheckPKNotExistMessage = "Table must have a primary key" +DDLCheckPKProhibitAutoIncrementAnnotation = "Later maintenance is relatively inconvenient. It is too dependent on the database auto-increment mechanism to achieve global uniqueness. It is not easy to split and it is easy to cause primary key conflicts" +DDLCheckPKProhibitAutoIncrementDesc = "It is not recommended to use auto-increment for primary key" +DDLCheckPKProhibitAutoIncrementMessage = "It is not recommended to use auto-increment for primary key" +DDLCheckPKWithoutAutoIncrementAnnotation = "Auto-increment primary key, numerical type is fast, and it is incremental growth, occupies less space, and faster data insertion operations, avoiding increasing the cost of maintaining indexes" +DDLCheckPKWithoutAutoIncrementDesc = "It is recommended to use auto-increment for primary key" +DDLCheckPKWithoutAutoIncrementMessage = "It is recommended to use auto-increment for primary key" +DDLCheckPKWithoutBigintUnsignedAnnotation = "Bigint unsigned has a larger value range. It is recommended to enable this rule to avoid overflow" +DDLCheckPKWithoutBigintUnsignedDesc = "It is recommended to use bigint unsigned type for primary key, which is bigint unsigned" +DDLCheckPKWithoutBigintUnsignedMessage = "It is recommended to use bigint unsigned type for primary key, which is bigint unsigned" +DDLCheckPKWithoutIfNotExistsAnnotation = "When creating a new table, if the table already exists, adding if not exists create will cause an error in the execution of sql. It is recommended to enable this rule to avoid sql errors in actual execution" +DDLCheckPKWithoutIfNotExistsDesc = "It is recommended to add if not exists when creating a new table to ensure that repeated execution does not cause errors" +DDLCheckPKWithoutIfNotExistsMessage = "It is recommended to add if not exists when creating a new table to ensure that repeated execution does not cause errors" +DDLCheckRedundantIndexAnnotation = "Mysql needs to maintain redundant indexes separately, redundant indexes increase maintenance costs, and the optimizer needs to calculate costs one by one when optimizing queries, affecting query performance" +DDLCheckRedundantIndexDesc = "It is not recommended to create redundant indexes" +DDLCheckRedundantIndexMessage = "%v" +DDLCheckTableCharacterSetAnnotation = "This rule constrains the global database character set, avoiding the creation of unexpected character sets and preventing problems such as garbled characters on the business side. It is recommended that tables in the project use the same character set and character set collation. In some cases of join queries, inconsistent character sets or collation rules of fields may lead to index failure and are difficult to detect" +DDLCheckTableCharacterSetDesc = "It is recommended to use the specified database character set" +DDLCheckTableCharacterSetMessage = "It is recommended to use %v database character set" +DDLCheckTableCharacterSetParams1 = "Database character set" +DDLCheckTableDBEngineAnnotation = "By configuring this rule, you can standardize the database engine specified for the business. The specific rule can be customized. The default value is innodb. Innodb supports transactions, supports row-level locks, has better recovery, and has better performance under high concurrency" +DDLCheckTableDBEngineDesc = "It is recommended to use the specified database engine" +DDLCheckTableDBEngineMessage = "It is recommended to use %v database engine" +DDLCheckTableDBEngineParams1 = "Database engine" +DDLCheckTablePartitionAnnotation = "Partitioned tables appear as multiple files physically and as one table logically. Cross-partition queries may be less efficient. It is recommended to use physical table splitting to manage large data" +DDLCheckTablePartitionDesc = "It is not recommended to use partitioned table related functions" +DDLCheckTablePartitionMessage = "It is not recommended to use partitioned table related functions" +DDLCheckTableRowsAnnotation = "When the number of rows in the table exceeds the threshold, splitting the table can help improve database performance and query speed." +DDLCheckTableRowsDesc = "If the number of rows in the table exceeds the threshold, it is recommended to split the table" +DDLCheckTableRowsMessage = "If the number of rows in the table exceeds the threshold, it is recommended to split the table" +DDLCheckTableRowsParams1 = "Number of table rows (ten thousand)" +DDLCheckTableSizeAnnotation = "It takes a long time for a large table to execute ddl, and the load is high. It takes up lock resources for a long time, which will affect database performance. The specific rule threshold can be adjusted according to business needs. Default value: 1024" +DDLCheckTableSizeDesc = "It is not recommended to perform DDL operations on tables with large data volume" +DDLCheckTableSizeMessage = "The space of the table %v executing DDL is not recommended to exceed %vmb" +DDLCheckTableSizeParams1 = "Table space size (mb)" +DDLCheckTableWithoutCommentAnnotation = "Adding comments to the table can make the meaning of the table more clear, which is convenient for later maintenance" +DDLCheckTableWithoutCommentDesc = "It is recommended to add comments to the table" +DDLCheckTableWithoutCommentMessage = "It is recommended to add comments to the table" +DDLCheckTransactionIsolationLevelAnnotation = "Rc avoids dirty reads, but does not solve the problem of phantom reads. Using rr can avoid phantom reads, but the introduction of gap locks can lead to a larger locking scope, which can affect concurrency and is prone to deadlocks. Therefore, in most business scenarios, the probability of phantom reads is relatively low, and rc can basically meet business needs" +DDLCheckTransactionIsolationLevelDesc = "It is recommended to set the transaction isolation level to rc" +DDLCheckTransactionIsolationLevelMessage = "It is recommended to set the transaction isolation level to rc" +DDLCheckUniqueIndexAnnotation = "By configuring this rule, you can standardize the naming rules of unique indexes specified for the business" +DDLCheckUniqueIndexDesc = "It is recommended to use idx_uk_table name_field name for unique index name" +DDLCheckUniqueIndexMessage = "It is recommended to use idx_uk_table name_field name for unique index name" +DDLCheckUniqueIndexPrefixAnnotation = "By configuring this rule, you can standardize the naming rules of unique indexes specified for the business. The specific naming convention can be customized. The default prompt value: uniq_" +DDLCheckUniqueIndexPrefixDesc = "It is recommended to use a fixed prefix for unique index" +DDLCheckUniqueIndexPrefixMessage = "It is recommended that unique index should start with \"%v\"" +DDLCheckUniqueIndexPrefixParams1 = "Index prefix" +DDLCheckUpdateTimeColumnAnnotation = "Using the update time field is beneficial for problem tracking and data retrieval, and avoids the inconvenience of data lifecycle management in the later stage. the default value is update_time, which can ensure the accuracy of the time" +DDLCheckUpdateTimeColumnDesc = "The table DDL needs to include the update time field and the default value is current_timestamp on update current_timestamp" +DDLCheckUpdateTimeColumnMessage = "The table DDL needs to include %v field and the default value is current_timestamp on update current_timestamp" +DDLCheckUpdateTimeColumnParams1 = "Update time field name" +DDLCheckVarcharSizeAnnotation = "Mysql does not limit the size of the index when building the index. the index length will default to the length of the field. the longer the varchar definition length is, the larger the index storage size will be. the specific rule threshold can be adjusted according to business needs. default value: 1024" +DDLCheckVarcharSizeDesc = "It is not recommended to define the length of varchar greater than the threshold" +DDLCheckVarcharSizeMessage = "It is not recommended to define the length of varchar greater than the threshold, the threshold is %d" +DDLCheckVarcharSizeParams1 = "Maximum length of varchar" +DDLDisableAlterFieldUseFirstAndAfterAnnotation = "The alter operation of first, after is completed by copying the table, which has a great impact on the business" +DDLDisableAlterFieldUseFirstAndAfterDesc = "It is forbidden to use first, after for alter table fields" +DDLDisableAlterFieldUseFirstAndAfterMessage = "It is forbidden to use first, after for alter table fields" +DDLDisableDropStatementAnnotation = "Drop is a ddl, data changes will not be written to the log, and cannot be rolled back. it is recommended to enable this rule to avoid accidental deletion operations" +DDLDisableDropStatementDesc = "Prohibit drop operations except for indexes" +DDLDisableDropStatementMessage = "Prohibit drop operations except for indexes" +DDLDisableFKAnnotation = "Foreign keys have poor performance in high concurrency scenarios, are prone to deadlocks, and are not conducive to later maintenance (splitting, migration)" +DDLDisableFKDesc = "It is forbidden to use foreign keys" +DDLDisableFKMessage = "It is forbidden to use foreign keys" +DDLDisableTypeTimestampAnnotation = "Timestamp has a maximum value limit ('2038-01-19 03:14:07' utc), and there are problems with time zone conversion" +DDLDisableTypeTimestampDesc = "It is not recommended to use timestamp fields" +DDLDisableTypeTimestampMessage = "It is not recommended to use timestamp fields" +DDLHintDropColumnAnnotation = "The business logic and the deleted column dependencies are not completely eliminated. after the column is deleted, it may lead to program exceptions (unable to read and write normally). enabling this rule will remind sqle that deleting columns is a high-risk operation" +DDLHintDropColumnDesc = "It is forbidden to delete columns" +DDLHintDropColumnMessage = "It is forbidden to delete columns" +DDLHintDropForeignKeyAnnotation = "Deleting existing constraints will affect existing business logic. enabling this rule will remind sqle that deleting foreign keys is a high-risk operation" +DDLHintDropForeignKeyDesc = "It is forbidden to delete foreign keys" +DDLHintDropForeignKeyMessage = "It is forbidden to delete foreign keys" +DDLHintDropPrimaryKeyAnnotation = "Deleting existing constraints will affect existing business logic. enabling this rule will remind sqle that deleting primary keys is a high-risk operation" +DDLHintDropPrimaryKeyDesc = "It is forbidden to delete primary keys" +DDLHintDropPrimaryKeyMessage = "It is forbidden to delete primary keys" +DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetAnnotation = "Modifying the default character set of the table will only affect the subsequently added fields, not the character set of the existing fields in the table. if you need to modify the character set of all fields in the whole table, it is recommended to enable this rule" +DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetDesc = "It is not recommended to modify the default character set of the table" +DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetMessage = "It is not recommended to modify the default character set of the table" +DDLNotAllowRenamingAnnotation = "Rename/change table name/column name will affect online business non-stop release. if this operation is required, it should be manually intervened by dba" +DDLNotAllowRenamingDesc = "It is forbidden to use rename or change to modify the table name or column name" +DDLNotAllowRenamingMessage = "It is forbidden to use rename or change to modify the table name or column name" +DDLRecommendTableColumnCharsetSameAnnotation = "Unified character set can avoid garbled characters caused by character set conversion. different character sets need to be converted before comparison, which will cause index failure" +DDLRecommendTableColumnCharsetSameDesc = "It is recommended that columns and tables use the same character set" +DDLRecommendTableColumnCharsetSameMessage = "It is recommended that columns and tables use the same character set" +DMLAvoidWhereEqualNullAnnotation = "Null is a special value in sql, and cannot be compared with ordinary values. for example: column = null is always false. even if column has null value, it will not be queried out, so column = null should be written as column is null" +DMLAvoidWhereEqualNullDesc = "It is forbidden to compare null values with other fields or values in the where clause" +DMLAvoidWhereEqualNullMessage = "It is forbidden to compare null values with other fields or values in the where clause" +DMLCheckAffectedRowsAnnotation = "If the number of rows affected by the DML operation is too large, it will lead to a decline in query performance, because it needs to scan more data." +DMLCheckAffectedRowsDesc = "It is not recommended that the number of rows affected by update/delete operations exceed the threshold" +DMLCheckAffectedRowsMessage = "It is not recommended that the number of rows affected by update/delete operations exceed the threshold. the number of rows affected is %v, which exceeds the set threshold %v" +DMLCheckAffectedRowsParams1 = "Maximum number of rows affected" +DMLCheckAggregateAnnotation = "It is not recommended to use sql aggregate functions to ensure the simplicity, high performance and data consistency of the query." +DMLCheckAggregateDesc = "It is not recommended to use aggregate functions" +DMLCheckAggregateMessage = "It is not recommended to use aggregate functions to calculate" +DMLCheckAliasAnnotation = "The alias of the table or column is the same as its real name, which makes the query more difficult to distinguish" +DMLCheckAliasDesc = "It is not recommended to use the same name for the alias and the table or column" +DMLCheckAliasMessage = "These aliases (%v) are the same as the column or table names" +DMLCheckBatchInsertListsMaxAnnotation = "Avoid large transactions and reduce the impact of rollback on business; the specific rule threshold can be adjusted according to business needs, the default value: 100" +DMLCheckBatchInsertListsMaxDesc = "For a single insert statement, it is recommended to insert no more than the threshold in batches" +DMLCheckBatchInsertListsMaxMessage = "For a single insert statement, it is recommended to insert no more than %v rows in batches" +DMLCheckBatchInsertListsMaxParams1 = "Maximum number of inserted rows" +DMLCheckExplainAccessTypeAllAnnotation = "When scanning the entire table, scanning rows beyond the specified number is not recommended to avoid performance issues; the specific rule threshold can be adjusted according to business needs, the default value: 10000; if set to 0, all full table scans will trigger the rule" +DMLCheckExplainAccessTypeAllDesc = "When scanning the entire table, the number of scanned rows is not recommended to exceed the specified number (default value: 10000)" +DMLCheckExplainAccessTypeAllMessage = "This query uses a full table scan and the number of scanned rows is %v" +DMLCheckExplainAccessTypeAllParams1 = "Maximum number of scanned rows" +DMLCheckExplainExtraUsingFilesortAnnotation = "In the case of large amounts of data, file sorting means that SQL performance is relatively low, which will increase the overhead of the OS and affect database performance" +DMLCheckExplainExtraUsingFilesortDesc = "File sorting is not recommended" +DMLCheckExplainExtraUsingFilesortMessage = "File sorting is not recommended" +DMLCheckExplainExtraUsingIndexForSkipScanAnnotation = "Index scanning is a skip scan, does not follow the leftmost match rule, which may reduce the efficiency of index use and affect query performance" +DMLCheckExplainExtraUsingIndexForSkipScanDesc = "It is not recommended to perform index skip scanning on tables" +DMLCheckExplainExtraUsingIndexForSkipScanMessage = "It is not recommended to perform index skip scanning on tables" +DMLCheckExplainExtraUsingTemporaryAnnotation = "In the case of large amounts of data, temporary tables mean that SQL performance is relatively low, which will increase the overhead of the OS and affect database performance" +DMLCheckExplainExtraUsingTemporaryDesc = "It is not recommended to use temporary tables" +DMLCheckExplainExtraUsingTemporaryMessage = "It is not recommended to use temporary tables" +DMLCheckExplainFullIndexScanAnnotation = "In the case of large amounts of data, full index scanning seriously affects SQL performance." +DMLCheckExplainFullIndexScanDesc = "It is not recommended to perform full index scanning on tables" +DMLCheckExplainFullIndexScanMessage = "It is not recommended to perform full index scanning on tables" +DMLCheckExplainUsingIndexAnnotation = "Using indexes can significantly improve the performance of SQL queries." +DMLCheckExplainUsingIndexDesc = "SQL query conditions need to use indexes" +DMLCheckExplainUsingIndexMessage = "It is recommended to use indexes to optimize SQL query performance" +DMLCheckFuzzySearchAnnotation = "Using full fuzzy search or left fuzzy search will cause the query to be unable to use indexes, resulting in a full table scan" +DMLCheckFuzzySearchDesc = "Prohibit full fuzzy search or left fuzzy search" +DMLCheckFuzzySearchMessage = "Prohibit full fuzzy search or left fuzzy search" +DMLCheckHasJoinConditionAnnotation = "Specifying join conditions can ensure the correctness and reliability of join operations. If join conditions are not specified, it may lead to join failures or incorrect joins." +DMLCheckHasJoinConditionDesc = "It is recommended to specify join conditions for join operations" +DMLCheckHasJoinConditionMessage = "It is recommended to specify join conditions for join operations, there must be ON conditions after JOIN fields" +DMLCheckIfAfterUnionDistinctAnnotation = "UNION will sort and deduplicate according to the order of the fields, UNION ALL simply merges the two results and then returns them. In terms of efficiency, UNION ALL is much faster than UNION; if the two merged results are allowed to contain duplicate data and no sorting is required, it is recommended to enable this rule, use UNION ALL to replace UNION" +DMLCheckIfAfterUnionDistinctDesc = "It is recommended to use UNION ALL instead of UNION" +DMLCheckIfAfterUnionDistinctMessage = "It is recommended to use UNION ALL instead of UNION" +DMLCheckInQueryNumberAnnotation = "When there are too many IN values, it may lead to full table scans for the query, causing MySQL performance to drop sharply; the specific rule threshold can be adjusted according to business needs, the default value: 50" +DMLCheckInQueryNumberDesc = "The number of parameters in the IN statement in the WHERE condition cannot exceed the threshold" +DMLCheckInQueryNumberMessage = "The number of parameters in the IN statement in the WHERE condition is already %v, it is not recommended to exceed the threshold %v" +DMLCheckInQueryNumberParams1 = "Maximum number of in statement parameters" +DMLCheckIndexSelectivityAnnotation = "Ensure the selectivity of the high index used in the SQL execution plan, which helps to improve query performance and optimize query efficiency." +DMLCheckIndexSelectivityDesc = "It is recommended to ensure that the selectivity of the index used in the SQL execution plan is greater than the threshold when querying the database" +DMLCheckIndexSelectivityMessage = "Index: %v, not exceeding the selectivity threshold: %v, it is recommended to use an index that exceeds the threshold." +DMLCheckIndexSelectivityParams1 = "Selectivity (percentage)" +DMLCheckInsertColumnsExistAnnotation = "When the table structure changes, and the INSERT request does not explicitly specify the column name, there will be a mismatch in the inserted data; it is recommended to enable this rule to avoid the insertion result being inconsistent with business expectations" +DMLCheckInsertColumnsExistDesc = "INSERT statement needs to specify COLUMN" +DMLCheckInsertColumnsExistMessage = "INSERT statement needs to specify COLUMN" +DMLCheckInsertSelectAnnotation = "Using INSERT ... SELECT under the default transaction isolation level may cause table-level locks to be applied to the queried table." +DMLCheckInsertSelectDesc = "It is not recommended to use INSERT ... SELECT" +DMLCheckInsertSelectMessage = "It is not recommended to use INSERT ... SELECT" +DMLCheckJoinFieldCharacterSetAndCollationAnnotation = "Consistent character sets and collations of the joined table fields can avoid data inconsistency and query errors, ensuring that the join operation executes correctly." +DMLCheckJoinFieldCharacterSetAndCollationDesc = "The character set and collation of the joined table fields must be consistent" +DMLCheckJoinFieldCharacterSetAndCollationMessage = "The character set and collation of the joined table fields must be consistent" +DMLCheckJoinFieldTypeAnnotation = "Join field type mismatch will cause implicit conversion, which may lead to index invalidation. it is recommended to enable this rule to avoid index invalidation." +DMLCheckJoinFieldTypeDesc = "It is recommended to keep the join field types consistent" +DMLCheckJoinFieldTypeMessage = "It is recommended to keep the join field types consistent, otherwise it will cause implicit conversion" +DMLCheckJoinFieldUseIndexAnnotation = "Join fields containing indexes can improve the performance and query speed of join operations." +DMLCheckJoinFieldUseIndexDesc = "Join fields must contain indexes" +DMLCheckJoinFieldUseIndexMessage = "Join fields must contain indexes" +DMLCheckLimitMustExistAnnotation = "Limit condition can reduce the cost of writing wrong sql (deleting wrong data) and avoid long transactions from affecting business" +DMLCheckLimitMustExistDesc = "It is recommended that delete/update statements have limit conditions" +DMLCheckLimitMustExistMessage = "It is recommended that delete/update statements have limit conditions" +DMLCheckLimitOffsetNumAnnotation = "Because offset specifies the starting position of the result set, if the starting position is too large, mysql needs to process more data to return the result set, which may lead to a decline in query performance." +DMLCheckLimitOffsetNumDesc = "It is not recommended that the limit offset is greater than the threshold" +DMLCheckLimitOffsetNumMessage = "It is not recommended that the limit offset is greater than the threshold, offset=%v (threshold is %v)" +DMLCheckLimitOffsetNumParams1 = "Offset size" +DMLCheckMathComputationOrFuncOnIndexAnnotation = "Performing mathematical operations and using functions on index columns will cause index invalidation, leading to full table scans and affecting query performance." +DMLCheckMathComputationOrFuncOnIndexDesc = "It is forbidden to perform mathematical operations and use functions on index columns" +DMLCheckMathComputationOrFuncOnIndexMessage = "It is forbidden to perform mathematical operations and use functions on index columns" +DMLCheckNeedlessFuncAnnotation = "By configuring this rule, you can specify built-in functions that need to be prohibited in the business. using built-in functions may cause sql to not use indexes or produce some unexpected results. the actual functions that need to be disabled can be configured through the rules" +DMLCheckNeedlessFuncDesc = "Avoid using unnecessary built-in functions" +DMLCheckNeedlessFuncMessage = "Avoid using unnecessary built-in functions %v" +DMLCheckNeedlessFuncParams1 = "Set of specified functions (comma-separated)" +DMLCheckNotEqualSymbolAnnotation = "'!=' is a non-standard operator, '<>' is the standard not equal operator in sql" +DMLCheckNotEqualSymbolDesc = "It is recommended to use '<>' instead of '!=' " +DMLCheckNotEqualSymbolMessage = "It is recommended to use '<>' instead of '!=' " +DMLCheckNumberOfJoinTablesAnnotation = "The more tables are joined, the more driver relationship combinations there are, the higher the cost of comparing the execution cost of various result sets, and the sql query performance will be greatly reduced. the specific rule threshold can be adjusted according to business needs, default value: 3" +DMLCheckNumberOfJoinTablesDesc = "It is recommended that the number of tables joined using join does not exceed the threshold" +DMLCheckNumberOfJoinTablesMessage = "It is recommended that the number of tables joined using join does not exceed %v tables" +DMLCheckNumberOfJoinTablesParams1 = "Maximum number of joined tables" +DMLCheckSQLInjectionFuncAnnotation = "Attackers can access data in the database without authorization through sql injection, which poses security vulnerabilities such as theft of user information and data leakage" +DMLCheckSQLInjectionFuncDesc = "It is not recommended to use common sql injection functions" +DMLCheckSQLInjectionFuncMessage = "It is not recommended to use common sql injection functions" +DMLCheckSQLLengthAnnotation = "Too long sql is less readable, difficult to maintain, and prone to performance problems. the specific rule threshold can be adjusted according to business needs, default value: 1024" +DMLCheckSQLLengthDesc = "It is recommended to decompose long sql into several simple sqls" +DMLCheckSQLLengthMessage = "It is recommended to decompose long sql into several simple sqls" +DMLCheckSQLLengthParams1 = "Maximum sql length" +DMLCheckSameTableJoinedMultipleTimesAnnotation = "If a single table is queried multiple times, query performance will be reduced." +DMLCheckSameTableJoinedMultipleTimesDesc = "It is not recommended to join the same table multiple times" +DMLCheckSameTableJoinedMultipleTimesMessage = "Table %v is joined multiple times" +DMLCheckScanRowsAnnotation = "The filter condition must include the primary key or index to reduce the time complexity of the database query and improve the query efficiency." +DMLCheckScanRowsDesc = "The number of scanned rows exceeds the threshold, the filter condition must include the primary key or index" +DMLCheckScanRowsMessage = "The number of scanned rows exceeds the threshold, the filter condition must include the primary key or index" +DMLCheckScanRowsParams1 = "Number of scanned rows (ten thousand)" +DMLCheckSelectForUpdateAnnotation = "Select for update will add an exclusive lock to each row of data in the query result set. other threads' updates and deletions to the record will be blocked. under high concurrency, it is easy to cause a large number of lock waits in the database, affecting database query performance" +DMLCheckSelectForUpdateDesc = "It is not recommended to use select for update" +DMLCheckSelectForUpdateMessage = "It is not recommended to use select for update" +DMLCheckSelectLimitAnnotation = "If the number of scanned rows is very large, it may cause the optimizer to choose the wrong index or even not use the index. the specific rule threshold can be adjusted according to business needs, default value: 1000" +DMLCheckSelectLimitDesc = "Select statements need to have limit" +DMLCheckSelectLimitMessage = "Select statements need to have limit, and the limit number cannot exceed %v" +DMLCheckSelectLimitParams1 = "Maximum number of query rows" +DMLCheckSelectRowsAnnotation = "The filter condition must include the primary key or index to improve query performance and reduce the cost of full table scans." +DMLCheckSelectRowsDesc = "The amount of data queried exceeds the threshold, the filter condition must include the primary key or index" +DMLCheckSelectRowsMessage = "Query data volume exceeds the threshold, filter conditions must include primary key or index" +DMLCheckSelectRowsParams1 = "Query data volume (ten thousand)" +DMLCheckSelectWithOrderByAnnotation = "Order by has a significant impact on query performance, and it is not conducive to optimization and maintenance. it is recommended to put the sorting part into business processing" +DMLCheckSelectWithOrderByDesc = "Select statement cannot have order by" +DMLCheckSelectWithOrderByMessage = "Select statement cannot have order by" +DMLCheckSortColumnLengthAnnotation = "Sorting operations such as order by, distinct, group by, and union on long fields like varchar(2000) will cause sorting, which poses a performance risk" +DMLCheckSortColumnLengthDesc = "Prohibit sorting on long fields" +DMLCheckSortColumnLengthMessage = "Fields with lengths exceeding the threshold are not recommended for order by, distinct, group by, union. these fields are: %v" +DMLCheckSortColumnLengthParams1 = "Maximum length of sortable fields" +DMLCheckSortDirectionAnnotation = "Before mysql 8.0, when multiple columns specified in order by have different sorting directions, the established index cannot be used. after mysql 8.0, a corresponding combined index for the sorting order can be established to optimize" +DMLCheckSortDirectionDesc = "It is not recommended to use different directions for sorting on multiple different conditions in order by statement" +DMLCheckSortDirectionMessage = "It is not recommended to use different directions for sorting on multiple different conditions in order by statement" +DMLCheckSpacesAroundTheStringAnnotation = "Spaces before and after the string may cause the query judgment logic to be wrong. for example, in mysql 5.5, 'a' and 'a ' are considered the same value in the query" +DMLCheckSpacesAroundTheStringDesc = "It is not recommended to include spaces at the beginning or end of strings in quotation marks" +DMLCheckSpacesAroundTheStringMessage = "It is not recommended to include spaces at the beginning or end of strings in quotation marks" +DMLCheckSubQueryNestNumAnnotation = "The nesting level of subqueries exceeds the threshold. in some cases, subqueries cannot use indexes. also, for subqueries that return a large result set, a large number of temporary tables will be generated, consuming excessive cpu and io resources and generating a large number of slow queries" +DMLCheckSubQueryNestNumDesc = "The nesting level of subqueries is not recommended to exceed the threshold" +DMLCheckSubQueryNestNumMessage = "The nesting level of subqueries exceeds the threshold %v" +DMLCheckSubQueryNestNumParams1 = "The nesting level of subqueries is not recommended to exceed the threshold" +DMLCheckSubqueryLimitAnnotation = "Some mysql versions do not support 'limit & in/all/any/some' in subqueries" +DMLCheckSubqueryLimitDesc = "It is not recommended to use limit in subqueries" +DMLCheckSubqueryLimitMessage = "It is not recommended to use limit in subqueries" +DMLCheckTableSizeAnnotation = "DML operations on large tables take a long time and have a high load, which can easily affect database performance; the specific rule threshold can be adjusted according to business needs, the default value is: 1024" +DMLCheckTableSizeDesc = "It is not recommended to perform DML operations on tables with a large amount of data" +DMLCheckTableSizeMessage = "The space of the table %v for executing DML is not recommended to exceed %vmb" +DMLCheckTableSizeParams1 = "Table space size (mb)" +DMLCheckUpdateOrDeleteHasWhereAnnotation = "Because the purpose of these statements is to modify the data in the database, it is necessary to use the where condition to filter the records to be updated or deleted to ensure the correctness of the data. in addition, using the where condition can also improve query performance." +DMLCheckUpdateOrDeleteHasWhereDesc = "It is recommended to use where conditions for update/delete operations" +DMLCheckUpdateOrDeleteHasWhereMessage = "It is recommended to use where conditions for update/delete operations" +DMLCheckWhereExistFuncAnnotation = "Performing function operations on condition fields may break the order of index values, causing the optimizer to choose to abandon index traversal, resulting in a significant decrease in query performance" +DMLCheckWhereExistFuncDesc = "Avoid using function operations on condition fields" +DMLCheckWhereExistFuncMessage = "Avoid using function operations on condition fields" +DMLCheckWhereExistImplicitConversionAnnotation = "Using data types that are inconsistent with the filtering field in the where condition will cause implicit data type conversion, leading to the risk of the query not hitting the index. in high concurrency and large data volume scenarios, not using the index will severely degrade the query performance of the database" +DMLCheckWhereExistImplicitConversionDesc = "It is not recommended to use data types that are inconsistent with the filtering field in the where condition" +DMLCheckWhereExistImplicitConversionMessage = "It is not recommended to use data types that are inconsistent with the filtering field in the where condition" +DMLCheckWhereExistNotAnnotation = "Using negative queries will cause full table scans, resulting in slow sql" +DMLCheckWhereExistNotDesc = "It is not recommended to use negative queries for condition fields" +DMLCheckWhereExistNotMessage = "It is not recommended to use negative queries for condition fields" +DMLCheckWhereExistScalarSubqueryAnnotation = "Scalar subqueries have the problem of accessing the same table multiple times, which has a high execution overhead and low efficiency. left join can be used to replace scalar subqueries" +DMLCheckWhereExistScalarSubqueryDesc = "It is not recommended to use scalar subqueries" +DMLCheckWhereExistScalarSubqueryMessage = "It is not recommended to use scalar subqueries" +DMLCheckWhereIsInvalidAnnotation = "Sql lacks where condition, which will perform full table scan during execution and generate extra overhead. it is recommended to enable it in high data volume and high concurrency environments to avoid affecting database query performance" +DMLCheckWhereIsInvalidDesc = "Prohibit the use of sql without where condition or where condition is always true" +DMLCheckWhereIsInvalidMessage = "Prohibit the use of sql without where condition or where condition is always true" +DMLCheckWithLimitAnnotation = "Delete/update statements using limit conditions will randomly select data for deletion or update, which is unpredictable for business" +DMLCheckWithLimitDesc = "Delete/update statements cannot have limit conditions" +DMLCheckWithLimitMessage = "Delete/update statements cannot have limit conditions" +DMLCheckWithOrderByAnnotation = "Delete/update exists order by will use sorting, bringing unnecessary overhead" +DMLCheckWithOrderByDesc = "Delete/update statements cannot have order by" +DMLCheckWithOrderByMessage = "Delete/update statements cannot have order by" +DMLDisableSelectAllColumnAnnotation = "Using a * wildcard to select all columns when the table structure changes will cause the query behavior to change, which is not in line with business expectations; at the same time, useless fields in select * will bring unnecessary disk i/o and network overhead, and cannot cover the index, thus causing a full table scan, which greatly reduces the query efficiency" +DMLDisableSelectAllColumnDesc = "It is not recommended to use select *" +DMLDisableSelectAllColumnMessage = "It is not recommended to use select *" +DMLHintCountFuncWithColAnnotation = "It is recommended to use count(*), because using count(col) requires a full table scan, which may lead to performance degradation." +DMLHintCountFuncWithColDesc = "Avoid using count(col)" +DMLHintCountFuncWithColMessage = "Avoid using count(col)" +DMLHintDeleteTipsAnnotation = "Drop/truncate is ddl, the operation takes effect immediately, it will not be written to the log, so it cannot be rolled back. it is necessary to back up the data before performing high-risk operations." +DMLHintDeleteTipsDesc = "It is recommended to back up before performing delete/drop/truncate operations" +DMLHintDeleteTipsMessage = "It is recommended to back up before performing delete/drop/truncate operations" +DMLHintGroupByRequiresConditionsAnnotation = "In 5.7, mysql defaults to implicitly sorting 'group by col1, ...' in the following order 'order by col1, ...', which results in unnecessary sorting and brings extra overhead; in 8.0, this will not happen. if no sorting is required, it is recommended to explicitly add 'order by null'" +DMLHintGroupByRequiresConditionsDesc = "It is recommended to add order by condition to group by statement" +DMLHintGroupByRequiresConditionsMessage = "It is recommended to add order by condition to group by statement" +DMLHintInNullOnlyFalseAnnotation = "The query condition is never true, which will result in no matching results" +DMLHintInNullOnlyFalseDesc = "Avoid using in (null) or not in (null)" +DMLHintInNullOnlyFalseMessage = "Avoid using in (null)/not in (null), this usage will never be true, resulting in the condition being invalid" +DMLHintLimitMustBeCombinedWithOrderByAnnotation = "Limit without order by will result in non-deterministic results that may not be consistent with business requirements, depending on the execution plan" +DMLHintLimitMustBeCombinedWithOrderByDesc = "It is recommended to use order by for limit queries" +DMLHintLimitMustBeCombinedWithOrderByMessage = "It is recommended to use order by for limit queries" +DMLHintSumFuncTipsAnnotation = "When all values in a column are null, count(col) returns 0, but sum(col) returns null. therefore, when using sum(), it is necessary to pay attention to the npe problem (referring to data returning null); if the business needs to avoid npe problems, it is recommended to enable this rule" +DMLHintSumFuncTipsDesc = "Avoid using sum(col)" +DMLHintSumFuncTipsMessage = "Avoid using sum(col), this usage has the risk of returning null values, resulting in program null pointer" +DMLHintTruncateTipsAnnotation = "Truncate is dll, data cannot be rolled back. use truncate with caution without backup" +DMLHintTruncateTipsDesc = "It is not recommended to use truncate operation" +DMLHintTruncateTipsMessage = "It is not recommended to use truncate operation" +DMLHintUseTruncateInsteadOfDeleteAnnotation = "Truncate table is faster than delete, and uses fewer system and transaction log resources. at the same time, the space occupied by the table after truncate will be released, while delete needs to manually execute optimize to release the table space" +DMLHintUseTruncateInsteadOfDeleteDesc = "It is recommended to use truncate instead of delete when deleting the entire table" +DMLHintUseTruncateInsteadOfDeleteMessage = "It is recommended to use truncate instead of delete when deleting the entire table" +DMLMustMatchLeftMostPrefixAnnotation = "Non-equivalence queries such as in and or on the leftmost field of a combined index will cause the combined index to be invalid" +DMLMustMatchLeftMostPrefixDesc = "Prohibit non-equivalence queries such as in and or on the leftmost field of a combined index" +DMLMustMatchLeftMostPrefixMessage = "Non-equivalence queries such as in and or on the leftmost field of a combined index will cause the combined index to be invalid" +DMLMustUseLeftMostPrefixAnnotation = "Using a combined index without the first field will cause the combined index to be invalid" +DMLMustUseLeftMostPrefixDesc = "When using a combined index, the first field of the combined index must be used" +DMLMustUseLeftMostPrefixMessage = "When using a combined index, the first field of the combined index must be used" +DMLNotAllowInsertAutoincrementAnnotation = "Manual assignment may cause data gaps and primary key conflicts" +DMLNotAllowInsertAutoincrementDesc = "Prohibit manual setting of auto-increment field values" +DMLNotAllowInsertAutoincrementMessage = "Prohibit manual setting of auto-increment field values" +DMLNotRecommendFuncInWhereAnnotation = "Functions or operators will cause the query to be unable to use the index in the table. the query will perform a full table scan, which has poor performance" +DMLNotRecommendFuncInWhereDesc = "Functions or other operators should be avoided in the where condition" +DMLNotRecommendFuncInWhereMessage = "Functions or other operators should be avoided in the where condition" +DMLNotRecommendGroupByConstantAnnotation = "Group by 1 means group by the first column; using numbers instead of expressions or column names in the group by clause will cause query logic problems when the query column order changes" +DMLNotRecommendGroupByConstantDesc = "It is not recommended to group by constants" +DMLNotRecommendGroupByConstantMessage = "It is not recommended to group by constants" +DMLNotRecommendGroupByExpressionAnnotation = "When the order by condition is an expression or function, a temporary table will be used. if no where clause is specified or the result set returned by the where clause is too large, the performance will be very poor" +DMLNotRecommendGroupByExpressionDesc = "It is not recommended to use expressions as order by conditions" +DMLNotRecommendGroupByExpressionMessage = "It is not recommended to use expressions as order by conditions" +DMLNotRecommendHavingAnnotation = "For indexed fields, they will not be indexed when placed in the having clause; it is recommended to rewrite the having clause as the query condition in the where clause, so that the index can be used during query processing, improving the execution efficiency of the sql" +DMLNotRecommendHavingDesc = "It is not recommended to use the having clause" +DMLNotRecommendHavingMessage = "It is not recommended to use the having clause" +DMLNotRecommendInAnnotation = "When there are too many in values, it may cause the query to perform a full table scan, causing a sharp decline in mysql performance" +DMLNotRecommendInDesc = "It is not recommended to use in" +DMLNotRecommendInMessage = "Do not recommend using in" +DMLNotRecommendNotWildcardLikeAnnotation = "Like queries without wildcard are logically the same as equality queries, it is recommended to use equality queries instead" +DMLNotRecommendNotWildcardLikeDesc = "Do not recommend using like queries without wildcard" +DMLNotRecommendNotWildcardLikeMessage = "Do not recommend using like queries without wildcard" +DMLNotRecommendOrderByRandAnnotation = "Order by rand() uses temporary tables, and also needs to sort them. when the amount of data is large, it will increase the server load and increase the query time" +DMLNotRecommendOrderByRandDesc = "Do not recommend using order by rand()" +DMLNotRecommendOrderByRandMessage = "Do not recommend using order by rand()" +DMLNotRecommendSubqueryAnnotation = "In some cases, subqueries cannot use indexes. at the same time, for subqueries with larger result sets, a large number of temporary tables will be generated, consuming too much cpu and io resources and generating a large number of slow queries" +DMLNotRecommendSubqueryDesc = "Do not recommend using subqueries" +DMLNotRecommendSubqueryMessage = "Do not recommend using subqueries" +DMLNotRecommendSysdateAnnotation = "When the sysdate() function is in a statement-based master-slave environment, it may cause data inconsistency, because the statement is executed in the master library until the log is transmitted to the slave library, there is a time difference, and it will become a different time value when it is executed in the slave library, it is recommended to adopt row-based replication environment" +DMLNotRecommendSysdateDesc = "Do not recommend using the sysdate() function" +DMLNotRecommendSysdateMessage = "Do not recommend using the sysdate() function" +DMLNotRecommendUpdatePKAnnotation = "The order of the data columns of the primary key index is the physical storage order of the table records. frequent updates to the primary key will lead to the adjustment of the order of the entire table records, which will consume a lot of resources" +DMLNotRecommendUpdatePKDesc = "Do not recommend updating the primary key" +DMLNotRecommendUpdatePKMessage = "Do not recommend updating the primary key" +DMLSQLExplainLowestLevelAnnotation = "Verify the type field in the sql execution plan to ensure that it meets the required level to ensure query performance." +DMLSQLExplainLowestLevelDesc = "The type field in the sql execution plan is recommended to meet the specified level" +DMLSQLExplainLowestLevelMessage = "It is recommended to modify the sql to ensure that the type field in the execution plan can meet any level in the regulations: %v" +DMLSQLExplainLowestLevelParams1 = "Query plan type level, separated by english commas" +DMLWhereExistNullAnnotation = "Using is null or is not null may cause the query to abandon using the index and perform a full table scan" +DMLWhereExistNullDesc = "Do not recommend using null value judgment for conditional fields" +DMLWhereExistNullMessage = "Do not recommend using null value judgment for conditional fields" +DuplicateColumnsMessage = "Column name %s duplicated" +DuplicateIndexedColumnMessage = "Index %s column %s duplicated" +DuplicateIndexesMessage = "Index name %s duplicated" +DuplicatePrimaryKeyedColumnMessage = "Primary key column %s duplicated" +ExtremalIndexAdviceFormat = "Index suggestion | sql uses the extreme value function, you can use the ordered nature of the index to quickly find the extreme value. it is recommended to add a single-column index to table %s, refer to the column: %s" +FunctionIndexAdviceFormatAll = "Index suggestion | sql uses functions as query conditions. in mysql 5.7 and above versions, you can create indexes on virtual columns. in mysql 8.0.13 and above versions, you can create function indexes. it is recommended to add appropriate indexes to table %s based on the mysql version, refer to the column: %s" +FunctionIndexAdviceFormatV57 = "Index suggestion | sql uses functions as query conditions. in mysql 5.7 and above versions, you can create indexes on virtual columns. it is recommended to add virtual column indexes to table %s, refer to the column: %s" +FunctionIndexAdviceFormatV80 = "Index suggestion | sql uses functions as query conditions. in mysql 8.0.13 and above versions, you can create function indexes. it is recommended to add function indexes to table %s, refer to the column: %s" +GhostDryRunError = "Table space size exceeds %vmb, gh-ost will be used for online deployment, but the dry-run throws the following error: %v" +GhostDryRunNotice = "Table space size exceeds %vmb, gh-ost will be used for online deployment" +IndexExistMessage = "Index %s already exists" +IndexNotExistMessage = "Index %s does not exist" +JoinIndexAdviceFormat = "Index suggestion | sql field %s is the associated field on the driven table %s. it is recommended to add a single-column index to table %s, refer to the column: %s" +KeyedColumnNotExistMessage = "Index column %s does not exist" +MultiPrimaryKeyMessage = "The primary key can only be set to one" +NotSupportExceedMaxRowsRollback = "The estimated number of affected rows exceeds the configured maximum, no rollback statement is generated" +NotSupportHasVariableRollback = "Rollback does not support DML statements containing variables" +NotSupportInsertWithoutPrimaryKeyRollback = "Rollback does not support insert statements without specifying the primary key" +NotSupportMultiTableStatementRollback = "Rollback does not support multi-table DML statements" +NotSupportNoPrimaryKeyTableRollback = "Rollback does not support DML statements for tables without primary keys" +NotSupportOnDuplicatStatementRollback = "Rollback does not support on duplicate statements" +NotSupportParamMarkerStatementRollback = "Rollback does not support statements containing fingerprints" +NotSupportStatementRollback = "Rollback does not support this type of statement" +NotSupportSubQueryStatementRollback = "Rollback does not support statements with subqueries" +OptDMLCheckLimitOffsetNumAnnotation = "OptDMLCheckLimitOffsetNumAnnotation" +OptDMLCheckLimitOffsetNumDesc = "OptDMLCheckLimitOffsetNumDesc" +OptDMLCheckMathComputationOrFuncOnIndexAnnotation = "OptDMLCheckMathComputationOrFuncOnIndexAnnotation" +OptDMLCheckMathComputationOrFuncOnIndexDesc = "OptDMLCheckMathComputationOrFuncOnIndexDesc" +OptDMLCheckNotEqualSymbolAnnotation = "OptDMLCheckNotEqualSymbolAnnotation" +OptDMLCheckNotEqualSymbolDesc = "OptDMLCheckNotEqualSymbolDesc" +OptDMLCheckWhereExistImplicitConversionAnnotation = "OptDMLCheckWhereExistImplicitConversionAnnotation" +OptDMLCheckWhereExistImplicitConversionDesc = "OptDMLCheckWhereExistImplicitConversionDesc" +OptDMLCheckWhereExistScalarSubqueryAnnotation = "OptDMLCheckWhereExistScalarSubqueryAnnotation" +OptDMLCheckWhereExistScalarSubqueryDesc = "OptDMLCheckWhereExistScalarSubqueryDesc" +OptDMLHintGroupByRequiresConditionsAnnotation = "OptDMLHintGroupByRequiresConditionsAnnotation" +OptDMLHintGroupByRequiresConditionsDesc = "OptDMLHintGroupByRequiresConditionsDesc" +OptDMLHintInNullOnlyFalseAnnotation = "OptDMLHintInNullOnlyFalseAnnotation" +OptDMLHintInNullOnlyFalseDesc = "OptDMLHintInNullOnlyFalseDesc" +OptDMLHintUseTruncateInsteadOfDeleteAnnotation = "OptDMLHintUseTruncateInsteadOfDeleteAnnotation" +OptDMLHintUseTruncateInsteadOfDeleteDesc = "OptDMLHintUseTruncateInsteadOfDeleteDesc" +OptDMLNotRecommendHavingAnnotation = "OptDMLNotRecommendHavingAnnotation" +OptDMLNotRecommendHavingDesc = "OptDMLNotRecommendHavingDesc" +OptDMLNotRecommendInAnnotation = "OptDMLNotRecommendInAnnotation" +OptDMLNotRecommendInDesc = "OptDMLNotRecommendInDesc" +OptDMLNotRecommendNotWildcardLikeAnnotation = "OptDMLNotRecommendNotWildcardLikeAnnotation" +OptDMLNotRecommendNotWildcardLikeDesc = "OptDMLNotRecommendNotWildcardLikeDesc" +OptDMLRuleAllSubqueryRewriteAnnotation = "OptDMLRuleAllSubqueryRewriteAnnotation" +OptDMLRuleAllSubqueryRewriteDesc = "OptDMLRuleAllSubqueryRewriteDesc" +OptDMLRuleDiffOrderingSpecTypeWarningAnnotation = "OptDMLRuleDiffOrderingSpecTypeWarningAnnotation" +OptDMLRuleDiffOrderingSpecTypeWarningDesc = "OptDMLRuleDiffOrderingSpecTypeWarningDesc" +OptDMLRuleDistinctEliminationRewriteAnnotation = "OptDMLRuleDistinctEliminationRewriteAnnotation" +OptDMLRuleDistinctEliminationRewriteDesc = "OptDMLRuleDistinctEliminationRewriteDesc" +OptDMLRuleExists2JoinRewriteAnnotation = "OptDMLRuleExists2JoinRewriteAnnotation" +OptDMLRuleExists2JoinRewriteDesc = "OptDMLRuleExists2JoinRewriteDesc" +OptDMLRuleFilterPredicatePushDownRewriteAnnotation = "OptDMLRuleFilterPredicatePushDownRewriteAnnotation" +OptDMLRuleFilterPredicatePushDownRewriteDesc = "OptDMLRuleFilterPredicatePushDownRewriteDesc" +OptDMLRuleGroupingFromDiffTablesRewriteAnnotation = "OptDMLRuleGroupingFromDiffTablesRewriteAnnotation" +OptDMLRuleGroupingFromDiffTablesRewriteDesc = "OptDMLRuleGroupingFromDiffTablesRewriteDesc" +OptDMLRuleJoinEliminationRewriteAnnotation = "OptDMLRuleJoinEliminationRewriteAnnotation" +OptDMLRuleJoinEliminationRewriteDesc = "OptDMLRuleJoinEliminationRewriteDesc" +OptDMLRuleLimitClausePushDownRewriteAnnotation = "OptDMLRuleLimitClausePushDownRewriteAnnotation" +OptDMLRuleLimitClausePushDownRewriteDesc = "OptDMLRuleLimitClausePushDownRewriteDesc" +OptDMLRuleLimitClausePushDownRewriteParams1 = "OptDMLRuleLimitClausePushDownRewriteParams1" +OptDMLRuleMaxMinAggRewriteAnnotation = "OptDMLRuleMaxMinAggRewriteAnnotation" +OptDMLRuleMaxMinAggRewriteDesc = "OptDMLRuleMaxMinAggRewriteDesc" +OptDMLRuleMoveOrder2LeadingRewriteAnnotation = "OptDMLRuleMoveOrder2LeadingRewriteAnnotation" +OptDMLRuleMoveOrder2LeadingRewriteDesc = "OptDMLRuleMoveOrder2LeadingRewriteDesc" +OptDMLRuleNPERewriteAnnotation = "OptDMLRuleNPERewriteAnnotation" +OptDMLRuleNPERewriteDesc = "OptDMLRuleNPERewriteDesc" +OptDMLRuleOrCond4SelectRewriteAnnotation = "OptDMLRuleOrCond4SelectRewriteAnnotation" +OptDMLRuleOrCond4SelectRewriteDesc = "OptDMLRuleOrCond4SelectRewriteDesc" +OptDMLRuleOrCond4UpDeleteRewriteAnnotation = "OptDMLRuleOrCond4UpDeleteRewriteAnnotation" +OptDMLRuleOrCond4UpDeleteRewriteDesc = "OptDMLRuleOrCond4UpDeleteRewriteDesc" +OptDMLRuleOrderEliminationInSubqueryRewriteAnnotation = "OptDMLRuleOrderEliminationInSubqueryRewriteAnnotation" +OptDMLRuleOrderEliminationInSubqueryRewriteDesc = "OptDMLRuleOrderEliminationInSubqueryRewriteDesc" +OptDMLRuleOrderingFromDiffTablesRewriteAnnotation = "OptDMLRuleOrderingFromDiffTablesRewriteAnnotation" +OptDMLRuleOrderingFromDiffTablesRewriteDesc = "OptDMLRuleOrderingFromDiffTablesRewriteDesc" +OptDMLRuleOuter2InnerConversionRewriteAnnotation = "OptDMLRuleOuter2InnerConversionRewriteAnnotation" +OptDMLRuleOuter2InnerConversionRewriteDesc = "OptDMLRuleOuter2InnerConversionRewriteDesc" +OptDMLRuleProjectionPushdownRewriteAnnotation = "OptDMLRuleProjectionPushdownRewriteAnnotation" +OptDMLRuleProjectionPushdownRewriteDesc = "OptDMLRuleProjectionPushdownRewriteDesc" +OptDMLRuleQualifierSubQueryRewriteAnnotation = "OptDMLRuleQualifierSubQueryRewriteAnnotation" +OptDMLRuleQualifierSubQueryRewriteDesc = "OptDMLRuleQualifierSubQueryRewriteDesc" +OptDMLRuleQueryFoldingRewriteAnnotation = "OptDMLRuleQueryFoldingRewriteAnnotation" +OptDMLRuleQueryFoldingRewriteDesc = "OptDMLRuleQueryFoldingRewriteDesc" +OptDMLRuleSATTCRewriteAnnotation = "OptDMLRuleSATTCRewriteAnnotation" +OptDMLRuleSATTCRewriteDesc = "OptDMLRuleSATTCRewriteDesc" +OptDMLWhereExistNullAnnotation = "OptDMLWhereExistNullAnnotation" +OptDMLWhereExistNullDesc = "OptDMLWhereExistNullDesc" +OptOracle500Annotation = "OptOracle500Annotation" +OptOracle500Desc = "OptOracle500Desc" +OptOracle501Annotation = "OptOracle501Annotation" +OptOracle501Desc = "OptOracle501Desc" +OptOracle502Annotation = "OptOracle502Annotation" +OptOracle502Desc = "OptOracle502Desc" +OptOracle503Annotation = "OptOracle503Annotation" +OptOracle503Desc = "OptOracle503Desc" +OptOracle504Annotation = "OptOracle504Annotation" +OptOracle504Desc = "OptOracle504Desc" +OptOracle505Annotation = "OptOracle505Annotation" +OptOracle505Desc = "OptOracle505Desc" +OptOracle506Annotation = "OptOracle506Annotation" +OptOracle506Desc = "OptOracle506Desc" +OptOracle507Annotation = "OptOracle507Annotation" +OptOracle507Desc = "OptOracle507Desc" +OptOracle508Annotation = "OptOracle508Annotation" +OptOracle508Desc = "OptOracle508Desc" +OptOracle509Annotation = "OptOracle509Annotation" +OptOracle509Desc = "OptOracle509Desc" +OptOracle510Annotation = "OptOracle510Annotation" +OptOracle510Desc = "OptOracle510Desc" +OptOracle511Annotation = "OptOracle511Annotation" +OptOracle511Desc = "OptOracle511Desc" +OptOracle512Annotation = "OptOracle512Annotation" +OptOracle512Desc = "OptOracle512Desc" +OptOracle513Annotation = "OptOracle513Annotation" +OptOracle513Desc = "OptOracle513Desc" +OptOracle514Annotation = "OptOracle514Annotation" +OptOracle514Desc = "OptOracle514Desc" +OptOracle515Annotation = "OptOracle515Annotation" +OptOracle515Desc = "OptOracle515Desc" +OptOracle516Annotation = "OptOracle516Annotation" +OptOracle516Desc = "OptOracle516Desc" +OptOracle517Annotation = "OptOracle517Annotation" +OptOracle517Desc = "OptOracle517Desc" +OptOracle518Annotation = "OptOracle518Annotation" +OptOracle518Desc = "OptOracle518Desc" +OptOracle519Annotation = "OptOracle519Annotation" +OptOracle519Desc = "OptOracle519Desc" +OptOracle520Annotation = "OptOracle520Annotation" +OptOracle520Desc = "OptOracle520Desc" +OptOracle521Annotation = "OptOracle521Annotation" +OptOracle521Desc = "OptOracle521Desc" +OptOracle522Annotation = "OptOracle522Annotation" +OptOracle522Desc = "OptOracle522Desc" +OptOracle523Annotation = "OptOracle523Annotation" +OptOracle523Desc = "OptOracle523Desc" +OptOracle524Annotation = "OptOracle524Annotation" +OptOracle524Desc = "OptOracle524Desc" +OptOracle525Annotation = "OptOracle525Annotation" +OptOracle525Desc = "OptOracle525Desc" +OptOracle526Annotation = "OptOracle526Annotation" +OptOracle526Desc = "OptOracle526Desc" +OptOracle527Annotation = "OptOracle527Annotation" +OptOracle527Desc = "OptOracle527Desc" +PTOSCAvoidNoDefaultValueOnNotNullColumn = "[osc]Non-null fields must have default values, otherwise pt-online-schema-change will fail to execute" +PTOSCAvoidRenameTable = "[osc]Pt-online-schema-change does not support renaming tables using rename table" +PTOSCAvoidUniqueIndex = "[osc]Adding a unique key using pt-online-schema-change may cause data loss. insert ignore was used when data was migrated to a new table" +PTOSCNoUniqueIndexOrPrimaryKey = "[osc]At least one primary key or unique key index is required to use pt-online-schema-change" +ParseDDLError = "Failed to parse the create table statement. some online audit rules may be invalid. please confirm manually" +PrefixIndexAdviceFormat = "Index suggestion | the sql uses prefix fuzzy matching. when the data volume is large, you can create a reverse function index" +PrimaryKeyExistMessage = "Primary key already exists, cannot be added again" +PrimaryKeyNotExistMessage = "No primary key exists currently, cannot perform deletion" +RuleTypeDDLConvention = "DDL convention" +RuleTypeDMLConvention = "DML convention" +RuleTypeGlobalConfig = "Global configuration" +RuleTypeIndexInvalidation = "Index invalidation" +RuleTypeIndexOptimization = "Index optimization" +RuleTypeIndexingConvention = "Indexing convention" +RuleTypeNamingConvention = "Naming convention" +RuleTypeUsageSuggestion = "Usage suggestion" +SchemaExistMessage = "Schema %s already exists" +SchemaNotExistMessage = "Schema %s does not exist" +TableExistMessage = "Table %s already exists" +TableNotExistMessage = "Table %s does not exist" +ThreeStarIndexAdviceFormat = "Index suggestion | according to the three-star index design specification, it is recommended to add %s index to table %s: [%s]" +UnsupportedSyntaxError = "Syntax error or the parser does not support it. please confirm the sql correctness manually" diff --git a/sqle/driver/mysql/plocale/active.zh.toml b/sqle/driver/mysql/plocale/active.zh.toml new file mode 100644 index 0000000000..24824f9a73 --- /dev/null +++ b/sqle/driver/mysql/plocale/active.zh.toml @@ -0,0 +1,692 @@ +AdvisorIndexTypeComposite = "复合" +AdvisorIndexTypeSingle = "单列" +AllCheckPrepareStatementPlaceholdersAnnotation = "因为过度使用绑定变量会增加查询的复杂度,从而降低查询性能。过度使用绑定变量还会增加维护成本。默认阈值:100" +AllCheckPrepareStatementPlaceholdersDesc = "绑定的变量个数不建议超过阈值" +AllCheckPrepareStatementPlaceholdersMessage = "使用绑定变量数量为 %v,不建议超过设定阈值 %v" +AllCheckPrepareStatementPlaceholdersParams1 = "最大绑定变量数量" +AnalysisDescCardinality = "基数" +AnalysisDescCharacterSetName = "列字符集" +AnalysisDescColumnComment = "列说明" +AnalysisDescColumnDefault = "默认值" +AnalysisDescColumnKey = "列索引" +AnalysisDescColumnName = "列名" +AnalysisDescColumnType = "列类型" +AnalysisDescComment = "备注" +AnalysisDescExtra = "拓展信息" +AnalysisDescIndexType = "索引类型" +AnalysisDescIsNullable = "是否可以为空" +AnalysisDescKeyName = "索引名" +AnalysisDescSeqInIndex = "列序列" +AnalysisDescUnique = "唯一性" +AnonymousMark = "(匿名)" +AuditResultMsgExcludedSQL = "审核SQL例外" +AuditResultMsgWhiteList = "白名单" +CheckInvalidError = "预检查失败" +CheckInvalidErrorFormat = "预检查失败: %v" +ColumnExistMessage = "字段 %s 已存在" +ColumnIsAmbiguousMessage = "字段 %s 指代不明" +ColumnNotExistMessage = "字段 %s 不存在" +ColumnsValuesNotMatchMessage = "指定的值列数与字段列数不匹配" +ConfigDDLGhostMinSizeAnnotation = "开启该规则后会自动对大表的DDL操作使用gh-ost 工具进行在线改表;直接对大表进行DDL变更时可能会导致长时间锁表问题,影响业务可持续性。具体对大表定义的阈值可以根据业务需求调整,默认值:1024" +ConfigDDLGhostMinSizeDesc = "改表时,表空间超过指定大小(MB)时使用gh-ost上线" +ConfigDDLGhostMinSizeParams1 = "表空间大小(MB)" +ConfigDDLOSCMinSizeAnnotation = "开启该规则后会对大表的DDL语句给出 pt-osc工具的改写建议【需要参考命令进行手工执行,后续会支持自动执行】;直接对大表进行DDL变更时可能会导致长时间锁表问题,影响业务可持续性。具体对大表定义的阈值可以根据业务需求调整,默认值:1024" +ConfigDDLOSCMinSizeDesc = "改表时,表空间超过指定大小(MB)审核时输出osc改写建议" +ConfigDDLOSCMinSizeParams1 = "表空间大小(MB)" +ConfigDMLExplainPreCheckEnableAnnotation = "通过 EXPLAIN 的形式将待上线的DML进行SQL是否能正确执行的检查,提前发现语句的错误,提高上线成功率" +ConfigDMLExplainPreCheckEnableDesc = "使用EXPLAIN加强预检查能力" +ConfigDMLRollbackMaxRowsAnnotation = "大事务回滚,容易影响数据库性能,使得业务发生波动;具体规则阈值可以根据业务需求调整,默认值:1000" +ConfigDMLRollbackMaxRowsDesc = "在 DML 语句中预计影响行数超过指定值则不回滚" +ConfigDMLRollbackMaxRowsParams1 = "最大影响行数" +ConfigOptimizeIndexEnabledAnnotation = "通过该规则开启索引优化建议,提供两个参数配置来定义索引优化建议的行为。1. 列区分度最低值阈值(百分制):配置当前表中列的区分度小于多少时,不作为索引的列;2. 联合索引最大列数:限制联合索引给到的列数最大值,防止给出建议的联合索引不符合其他SQL标准" +ConfigOptimizeIndexEnabledDesc = "索引创建建议" +ConfigOptimizeIndexEnabledParams1 = "列区分度最低值阈值(百分比)" +ConfigOptimizeIndexEnabledParams2 = "联合索引最大列数" +ConfigSQLIsExecutedAnnotation = "启用该规则来兼容事后审核的场景,对于事后采集的DDL 和 DML 语句将不再进行上线校验。例如库表元数据的扫描任务可开启该规则" +ConfigSQLIsExecutedDesc = "停用上线审核模式" +DDLAvoidEventAnnotation = "使用event会增加数据库的维护难度和依赖性,并且也会造成安全问题。" +DDLAvoidEventDesc = "禁止使用event" +DDLAvoidEventMessage = "禁止使用event" +DDLAvoidFullTextAnnotation = "全文索引的使用会增加存储开销,并对写操作性能产生一定影响。" +DDLAvoidFullTextDesc = "禁止使用全文索引" +DDLAvoidFullTextMessage = "禁止使用全文索引" +DDLAvoidGeometryAnnotation = "使用空间字段和空间索引会增加存储需求,对数据库性能造成一定影响" +DDLAvoidGeometryDesc = "禁止使用空间字段和空间索引" +DDLAvoidGeometryMessage = "禁止使用空间字段和空间索引" +DDLAvoidTextAnnotation = "将TEXT类型的字段与原表主键分拆成另一个表可以提高数据库性能和查询速度,减少不必要的 I/O 操作。" +DDLAvoidTextDesc = "使用TEXT 类型的字段建议和原表进行分拆,与原表主键单独组成另外一个表进行存放" +DDLAvoidTextMessage = "字段:%v为TEXT类型,建议和原表进行分拆,与原表主键单独组成另外一个表进行存放" +DDLCheckAllIndexNotNullConstraintAnnotation = "所有索引字段均未做非空约束,请确认下表索引规划的合理性。" +DDLCheckAllIndexNotNullConstraintDesc = "建议为至少一个索引添加非空约束" +DDLCheckAllIndexNotNullConstraintMessage = "建议为至少一个索引添加非空约束" +DDLCheckAlterTableNeedMergeAnnotation = "避免多次 TABLE REBUILD 带来的消耗、以及对线上业务的影响" +DDLCheckAlterTableNeedMergeDesc = "存在多条对同一个表的修改语句,建议合并成一个ALTER语句" +DDLCheckAlterTableNeedMergeMessage = "已存在对该表的修改语句,建议合并成一个ALTER语句" +DDLCheckAutoIncrementAnnotation = "创建表时AUTO_INCREMENT设置为0则自增从1开始,可以避免数据空洞。例如在导出表结构DDL时,表结构内AUTO_INCREMENT通常为当前的自增值,如果建表时没有把AUTO_INCREMENT设置为0,那么通过该DDL进行建表操作会导致自增值从一个无意义数字开始。" +DDLCheckAutoIncrementDesc = "表的初始AUTO_INCREMENT值建议为0" +DDLCheckAutoIncrementFieldNumAnnotation = "MySQL InnoDB,MyISAM 引擎不允许存在多个自增字段,设置多个自增字段会导致上线失败。" +DDLCheckAutoIncrementFieldNumDesc = "建表时,自增字段只能设置一个" +DDLCheckAutoIncrementFieldNumMessage = "建表时,自增字段只能设置一个" +DDLCheckAutoIncrementMessage = "表的初始AUTO_INCREMENT值建议为0" +DDLCheckBigintInsteadOfDecimalAnnotation = "因为CPU不支持对DECIMAL的直接运算,只是MySQL自身实现了DECIMAL的高精度计算,但是计算代价高,并且存储同样范围值的时候,空间占用也更多;使用BIGINT代替DECIMAL,可根据小数的位数乘以相应的倍数,即可达到精确的浮点存储计算,避免DECIMAL计算代价高的问题" +DDLCheckBigintInsteadOfDecimalDesc = "建议用BIGINT类型代替DECIMAL" +DDLCheckBigintInsteadOfDecimalMessage = "建议列%s用BIGINT类型代替DECIMAL" +DDLCheckCharLengthAnnotation = "使用过长或者过多的varchar,char字段可能会增加业务逻辑的复杂性;如果字段平均长度过大时,会占用更多的存储空间。" +DDLCheckCharLengthDesc = "禁止char, varchar类型字段字符长度总和超过阈值" +DDLCheckCharLengthMessage = "禁止char, varchar类型字段字符长度总和超过阈值 %v" +DDLCheckCharLengthParams1 = "字符长度" +DDLCheckColumnBlobDefaultIsNotNullAnnotation = "在SQL_MODE严格模式下BLOB 和 TEXT 类型无法设置默认值,如插入数据不指定值,字段会被设置为NULL" +DDLCheckColumnBlobDefaultIsNotNullDesc = "BLOB 和 TEXT 类型的字段默认值只能为NULL" +DDLCheckColumnBlobDefaultIsNotNullMessage = "BLOB 和 TEXT 类型的字段默认值只能为NULL" +DDLCheckColumnBlobNoticeAnnotation = "BLOB 或 TEXT 类型消耗大量的网络和IO带宽,同时在该表上的DML操作都会变得很慢" +DDLCheckColumnBlobNoticeDesc = "不建议使用 BLOB 或 TEXT 类型" +DDLCheckColumnBlobNoticeMessage = "不建议使用 BLOB 或 TEXT 类型" +DDLCheckColumnBlobWithNotNullAnnotation = "BLOB 和 TEXT 类型的字段无法指定默认值,如插入数据不指定字段默认为NULL,如果添加了 NOT NULL 限制,写入数据时又未对该字段指定值会导致写入失败" +DDLCheckColumnBlobWithNotNullDesc = "BLOB 和 TEXT 类型的字段不建议设置为 NOT NULL" +DDLCheckColumnBlobWithNotNullMessage = "BLOB 和 TEXT 类型的字段不建议设置为 NOT NULL" +DDLCheckColumnCharLengthAnnotation = "VARCHAR是变长字段,存储空间小,可节省存储空间,同时相对较小的字段检索效率显然也要高些" +DDLCheckColumnCharLengthDesc = "CHAR长度大于20时,必须使用VARCHAR类型" +DDLCheckColumnCharLengthMessage = "CHAR长度大于20时,必须使用VARCHAR类型" +DDLCheckColumnEnumNoticeAnnotation = "ENUM类型不是SQL标准,移植性较差,后期如修改或增加枚举值需重建整张表,代价较大,且无法通过字面量值进行排序" +DDLCheckColumnEnumNoticeDesc = "不建议使用 ENUM 类型" +DDLCheckColumnEnumNoticeMessage = "不建议使用 ENUM 类型" +DDLCheckColumnNotNULLAnnotation = "表字段建议有 NOT NULL 约束,可确保数据的完整性,防止插入空值,提升查询准确性。" +DDLCheckColumnNotNULLDesc = "表字段建议有NOT NULL约束" +DDLCheckColumnNotNULLMessage = "建议字段%v设置NOT NULL约束" +DDLCheckColumnQuantityAnnotation = "避免在OLTP系统上做宽表设计,后期对性能影响很大;具体规则阈值可根据业务需求调整,默认值:40" +DDLCheckColumnQuantityDesc = "表的列数不建议超过阈值" +DDLCheckColumnQuantityInPKAnnotation = "主建中的列过多,会导致二级索引占用更多的空间,同时增加索引维护的开销;具体规则阈值可根据业务需求调整,默认值:2" +DDLCheckColumnQuantityInPKDesc = "主键包含的列数不建议超过阈值" +DDLCheckColumnQuantityInPKMessage = "主键包含的列数不建议超过阈值" +DDLCheckColumnQuantityInPKParams1 = "最大列数" +DDLCheckColumnQuantityMessage = "表的列数不建议超过阈值" +DDLCheckColumnQuantityParams1 = "最大列数" +DDLCheckColumnSetNoticeAnnotation = "集合的修改需要重新定义列,后期修改的代价大,建议在业务层实现" +DDLCheckColumnSetNoticeDesc = "不建议使用 SET 类型" +DDLCheckColumnSetNoticeMessage = "不建议使用 SET 类型" +DDLCheckColumnTimestampWithoutDefaultAnnotation = "TIMESTAMP添加默认值,可避免出现全为0的日期格式与业务预期不符" +DDLCheckColumnTimestampWithoutDefaultDesc = "TIMESTAMP 类型的列必须添加默认值" +DDLCheckColumnTimestampWithoutDefaultMessage = "TIMESTAMP 类型的列必须添加默认值" +DDLCheckColumnTypeIntegerAnnotation = "INT(M) 或 BIGINT(M),M 表示最大显示宽度,可存储最大值的宽度分别为10、20,采用 INT(10) 或 BIGINT(20)可避免发生显示截断的可能" +DDLCheckColumnTypeIntegerDesc = "整型定义建议采用 INT(10) 或 BIGINT(20)" +DDLCheckColumnTypeIntegerMessage = "整型定义建议采用 INT(10) 或 BIGINT(20)" +DDLCheckColumnWithoutCommentAnnotation = "列添加注释能够使列的意义更明确,方便日后的维护" +DDLCheckColumnWithoutCommentDesc = "列建议添加注释" +DDLCheckColumnWithoutCommentMessage = "列建议添加注释" +DDLCheckColumnWithoutDefaultAnnotation = "列添加默认值,可避免列为NULL值时对查询的影响" +DDLCheckColumnWithoutDefaultDesc = "除了自增列及大字段列之外,每个列都必须添加默认值" +DDLCheckColumnWithoutDefaultMessage = "除了自增列及大字段列之外,每个列都必须添加默认值" +DDLCheckCompositeIndexDistinctionAnnotation = "将区分度高的字段靠前放置在组合索引中有助于提高索引的查询性能,因为它能更快地减小数据范围,提高检索效率。" +DDLCheckCompositeIndexDistinctionDesc = "建议在组合索引中将区分度高的字段靠前放" +DDLCheckCompositeIndexDistinctionMessage = "建议在组合索引中将区分度高的字段靠前放,%v" +DDLCheckCompositeIndexMaxAnnotation = "复合索引会根据索引列数创建对应组合的索引,列数越多,创建的索引越多,每个索引都会增加磁盘空间的开销,同时增加索引维护的开销;具体规则阈值可以根据业务需求调整,默认值:3" +DDLCheckCompositeIndexMaxDesc = "复合索引的列数量不建议超过阈值" +DDLCheckCompositeIndexMaxMessage = "复合索引的列数量不建议超过%v个" +DDLCheckCompositeIndexMaxParams1 = "最大索引列数量" +DDLCheckCreateFunctionAnnotation = "自定义函数,维护较差,且依赖性高会导致SQL无法跨库使用" +DDLCheckCreateFunctionDesc = "禁止使用自定义函数" +DDLCheckCreateFunctionMessage = "禁止使用自定义函数" +DDLCheckCreateProcedureAnnotation = "存储过程在一定程度上会使程序难以调试和拓展,各种数据库的存储过程语法相差很大,给将来的数据库移植带来很大的困难,且会极大的增加出现BUG的概率" +DDLCheckCreateProcedureDesc = "禁止使用存储过程" +DDLCheckCreateProcedureMessage = "禁止使用存储过程" +DDLCheckCreateTimeColumnAnnotation = "使用CREATE_TIME字段,有利于问题查找跟踪和检索数据,同时避免后期对数据生命周期管理不便 ,默认值为CURRENT_TIMESTAMP可保证时间的准确性" +DDLCheckCreateTimeColumnDesc = "建议建表DDL包含创建时间字段且默认值为CURRENT_TIMESTAMP" +DDLCheckCreateTimeColumnMessage = "建议建表DDL包含%v字段且默认值为CURRENT_TIMESTAMP" +DDLCheckCreateTimeColumnParams1 = "创建时间字段名" +DDLCheckCreateTriggerAnnotation = "触发器难以开发和维护,不能高效移植,且在复杂的逻辑以及高并发下,容易出现死锁影响业务" +DDLCheckCreateTriggerDesc = "禁止使用触发器" +DDLCheckCreateTriggerMessage = "禁止使用触发器" +DDLCheckCreateViewAnnotation = "视图的查询性能较差,同时基表结构变更,需要对视图进行维护,如果视图可读性差且包含复杂的逻辑,都会增加维护的成本" +DDLCheckCreateViewDesc = "禁止使用视图" +DDLCheckCreateViewMessage = "禁止使用视图" +DDLCheckDatabaseCollationAnnotation = "通过该规则约束全局的数据库排序规则,避免创建非预期的数据库排序规则,防止业务侧出现排序结果非预期等问题。建议项目内库表使用统一的字符集和字符集排序,部分连表查询的情况下字段的字符集或排序规则不一致可能会导致索引失效且不易发现" +DDLCheckDatabaseCollationDesc = "建议使用规定的数据库排序规则" +DDLCheckDatabaseCollationMessage = "建议使用规定的数据库排序规则为%s" +DDLCheckDatabaseCollationParams1 = "数据库排序规则" +DDLCheckDatabaseSuffixAnnotation = "通过配置该规则可以规范指定业务的数据库命名规则,具体命名规范可以自定义设置,默认提示值:_DB" +DDLCheckDatabaseSuffixDesc = "建议数据库名称使用固定后缀结尾" +DDLCheckDatabaseSuffixMessage = "建议数据库名称以\"%v\"结尾" +DDLCheckDatabaseSuffixParams1 = "数据库名称后缀" +DDLCheckDecimalTypeColumnAnnotation = "对于浮点数运算,DECIMAL精确度较高" +DDLCheckDecimalTypeColumnDesc = "精确浮点数建议使用DECIMAL" +DDLCheckDecimalTypeColumnMessage = "精确浮点数建议使用DECIMAL" +DDLCheckFieldNotNUllMustContainDefaultValueAnnotation = "如存在NOT NULL且不带默认值的字段,INSERT时不包含该字段,会导致插入报错" +DDLCheckFieldNotNUllMustContainDefaultValueDesc = "建议字段约束为NOT NULL时带默认值" +DDLCheckFieldNotNUllMustContainDefaultValueMessage = "建议字段约束为NOT NULL时带默认值,以下字段不规范:%v" +DDLCheckFullWidthQuotationMarksAnnotation = "建议开启此规则,可避免MySQL会将中文全角引号识别为命名的一部分,执行结果与业务预期不符" +DDLCheckFullWidthQuotationMarksDesc = "DDL语句中不建议使用中文全角引号" +DDLCheckFullWidthQuotationMarksMessage = "DDL语句中不建议使用中文全角引号,这可能是书写错误" +DDLCheckIndexCountAnnotation = "在表上建立的每个索引都会增加存储开销,索引对于插入、删除、更新操作也会增加处理上的开销,太多与不充分、不正确的索引对性能都毫无益处;具体规则阈值可以根据业务需求调整,默认值:5" +DDLCheckIndexCountDesc = "索引个数建议不超过阈值" +DDLCheckIndexCountMessage = "索引个数建议不超过%v个" +DDLCheckIndexCountParams1 = "最大索引个数" +DDLCheckIndexNotNullConstraintAnnotation = "索引字段上如果没有非空约束,则表记录与索引记录不会完全映射。" +DDLCheckIndexNotNullConstraintDesc = "索引字段需要有非空约束" +DDLCheckIndexNotNullConstraintMessage = "这些索引字段(%v)需要有非空约束" +DDLCheckIndexOptionAnnotation = "选择区分度高的字段作为索引,可快速定位数据;区分度太低,无法有效利用索引,甚至可能需要扫描大量数据页,拖慢SQL;具体规则阈值可以根据业务需求调整,默认值:70" +DDLCheckIndexOptionDesc = "建议索引字段对区分度大于阈值" +DDLCheckIndexOptionMessage = "索引 %v 未超过区分度阈值 百分之%v, 不建议选为索引" +DDLCheckIndexOptionParams1 = "可选择性(百分比)" +DDLCheckIndexPrefixAnnotation = "通过配置该规则可以规范指定业务的索引命名规则,具体命名规范可以自定义设置,默认提示值:idx_" +DDLCheckIndexPrefixDesc = "建议普通索引使用固定前缀" +DDLCheckIndexPrefixMessage = "建议普通索引要以\"%v\"为前缀" +DDLCheckIndexPrefixParams1 = "索引前缀" +DDLCheckIndexTooManyAnnotation = "单字段上存在过多索引,一般情况下这些索引都是没有存在价值的;相反,还会降低数据增加删除时的性能,特别是对频繁更新的表来说,负面影响更大;具体规则阈值可以根据业务需求调整,默认值:2" +DDLCheckIndexTooManyDesc = "单字段上的索引数量不建议超过阈值" +DDLCheckIndexTooManyMessage = "字段 %v 上的索引数量不建议超过%v个" +DDLCheckIndexTooManyParams1 = "单字段的索引数最大值" +DDLCheckIndexedColumnWithBlobAnnotation = "BLOB类型属于大字段类型,作为索引会占用很大的存储空间" +DDLCheckIndexedColumnWithBlobDesc = "禁止将BLOB类型的列加入索引" +DDLCheckIndexedColumnWithBlobMessage = "禁止将BLOB类型的列加入索引" +DDLCheckIndexesExistBeforeCreateConstraintsAnnotation = "创建约束前,先行创建索引,约束可作用于二级索引,避免全表扫描,提高性能" +DDLCheckIndexesExistBeforeCreateConstraintsDesc = "对字段创建约束前,建议先创建索引" +DDLCheckIndexesExistBeforeCreateConstraintsMessage = "对字段创建约束前,建议先创建索引" +DDLCheckIsExistLimitOffsetAnnotation = "例如:LIMIT N OFFSET M 或 LIMIT M,N。当偏移量m过大的时候,查询效率会很低,因为MySQL是先查出m+n个数据,然后抛弃掉前m个数据;对于有大数据量的MySQL表来说,使用LIMIT分页存在很严重的性能问题" +DDLCheckIsExistLimitOffsetDesc = "使用分页查询时,避免使用偏移量" +DDLCheckIsExistLimitOffsetMessage = "使用分页查询时,避免使用偏移量" +DDLCheckObjectNameIsUpperAndLowerLetterMixedAnnotation = "数据库对象命名规范,不推荐采用大小写混用的形式建议词语之间使用下划线连接,提高代码可读性" +DDLCheckObjectNameIsUpperAndLowerLetterMixedDesc = "数据库对象命名不建议大小写字母混合" +DDLCheckObjectNameIsUpperAndLowerLetterMixedMessage = "数据库对象命名不建议大小写字母混合,以下对象命名不规范:%v" +DDLCheckObjectNameLengthAnnotation = "通过配置该规则可以规范指定业务的对象命名长度,具体长度可以自定义设置,默认最大长度:64。是MySQL规定标识符命名最大长度为64字节" +DDLCheckObjectNameLengthDesc = "表名、列名、索引名的长度不建议超过阈值" +DDLCheckObjectNameLengthMessage = "表名、列名、索引名的长度不建议大于%v字节" +DDLCheckObjectNameLengthParams1 = "最大长度(字节)" +DDLCheckObjectNameUseCNAnnotation = "通过配置该规则可以规范指定业务的数据对象命名规则" +DDLCheckObjectNameUseCNDesc = "数据库对象命名只能使用英文、下划线或数字,首字母必须是英文" +DDLCheckObjectNameUseCNMessage = "数据库对象命名只能使用英文、下划线或数字,首字母必须是英文" +DDLCheckObjectNameUsingKeywordAnnotation = "通过配置该规则可以规范指定业务的数据对象命名规则,避免发生冲突,以及混淆" +DDLCheckObjectNameUsingKeywordDesc = "数据库对象命名禁止使用保留字" +DDLCheckObjectNameUsingKeywordMessage = "数据库对象命名禁止使用保留字 %s" +DDLCheckPKNameAnnotation = "通过配置该规则可以规范指定业务的主键命名规则" +DDLCheckPKNameDesc = "建议主键命名为\"PK_表名\"" +DDLCheckPKNameMessage = "建议主键命名为\"PK_表名\"" +DDLCheckPKNotExistAnnotation = "主键使数据达到全局唯一,可提高数据检索效率" +DDLCheckPKNotExistDesc = "表必须有主键" +DDLCheckPKNotExistMessage = "表必须有主键" +DDLCheckPKProhibitAutoIncrementAnnotation = "后期维护相对不便,过于依赖数据库自增机制达到全局唯一,不易拆分,容易造成主键冲突" +DDLCheckPKProhibitAutoIncrementDesc = "不建议主键使用自增" +DDLCheckPKProhibitAutoIncrementMessage = "不建议主键使用自增" +DDLCheckPKWithoutAutoIncrementAnnotation = "自增主键,数字型速度快,而且是增量增长,占用空间小,更快速的做数据插入操作,避免增加维护索引的开销" +DDLCheckPKWithoutAutoIncrementDesc = "主键建议使用自增" +DDLCheckPKWithoutAutoIncrementMessage = "主键建议使用自增" +DDLCheckPKWithoutBigintUnsignedAnnotation = "BIGINT UNSIGNED拥有更大的取值范围,建议开启此规则,避免发生溢出" +DDLCheckPKWithoutBigintUnsignedDesc = "主键建议使用 BIGINT 无符号类型,即 BIGINT UNSIGNED" +DDLCheckPKWithoutBigintUnsignedMessage = "主键建议使用 BIGINT 无符号类型,即 BIGINT UNSIGNED" +DDLCheckPKWithoutIfNotExistsAnnotation = "新建表如果表已经存在,不添加IF NOT EXISTS CREATE执行SQL会报错,建议开启此规则,避免SQL实际执行报错" +DDLCheckPKWithoutIfNotExistsDesc = "新建表建议加入 IF NOT EXISTS,保证重复执行不报错" +DDLCheckPKWithoutIfNotExistsMessage = "新建表建议加入 IF NOT EXISTS,保证重复执行不报错" +DDLCheckRedundantIndexAnnotation = "MySQL需要单独维护重复的索引,冗余索引增加维护成本,并且优化器在优化查询时需要逐个进行代价计算,影响查询性能" +DDLCheckRedundantIndexDesc = "不建议创建冗余索引" +DDLCheckRedundantIndexMessage = "%v" +DDLCheckTableCharacterSetAnnotation = "通过该规则约束全局的数据库字符集,避免创建非预期的字符集,防止业务侧出现“乱码”等问题。建议项目内库表使用统一的字符集和字符集排序,部分连表查询的情况下字段的字符集或排序规则不一致可能会导致索引失效且不易发现" +DDLCheckTableCharacterSetDesc = "建议使用指定数据库字符集" +DDLCheckTableCharacterSetMessage = "建议使用%v数据库字符集" +DDLCheckTableCharacterSetParams1 = "数据库字符集" +DDLCheckTableDBEngineAnnotation = "通过配置该规则可以规范指定业务的数据库引擎,具体规则可以自定义设置。默认值是INNODB,INNODB 支持事务,支持行级锁,更好的恢复性,高并发下性能更好" +DDLCheckTableDBEngineDesc = "建议使用指定数据库引擎" +DDLCheckTableDBEngineMessage = "建议使用%v数据库引擎" +DDLCheckTableDBEngineParams1 = "数据库引擎" +DDLCheckTablePartitionAnnotation = "分区表在物理上表现为多个文件,在逻辑上表现为一个表,跨分区查询效率可能更低,建议采用物理分表的方式管理大数据" +DDLCheckTablePartitionDesc = "不建议使用分区表相关功能" +DDLCheckTablePartitionMessage = "不建议使用分区表相关功能" +DDLCheckTableRowsAnnotation = "当表行数超过阈值时,对表进行拆分有助于提高数据库性能和查询速度。" +DDLCheckTableRowsDesc = "表行数超过阈值,建议对表进行拆分" +DDLCheckTableRowsMessage = "表行数超过阈值,建议对表进行拆分" +DDLCheckTableRowsParams1 = "表行数(万)" +DDLCheckTableSizeAnnotation = "大表执行DDL,耗时较久且负载较高,长时间占用锁资源,会影响数据库性能;具体规则阈值可以根据业务需求调整,默认值:1024" +DDLCheckTableSizeDesc = "不建议对数据量过大的表执行DDL操作" +DDLCheckTableSizeMessage = "执行DDL的表 %v 空间不建议超过 %vMB" +DDLCheckTableSizeParams1 = "表空间大小(MB)" +DDLCheckTableWithoutCommentAnnotation = "表添加注释能够使表的意义更明确,方便日后的维护" +DDLCheckTableWithoutCommentDesc = "表建议添加注释" +DDLCheckTableWithoutCommentMessage = "表建议添加注释" +DDLCheckTransactionIsolationLevelAnnotation = "RC避免了脏读的现象,但没有解决幻读的问题;使用RR,能避免幻读,但是由于引入间隙锁导致加锁的范围可能扩大,从而会影响并发,还容易造成死锁,所以在大多数业务场景下,幻读出现的机率较少,RC基本上能满足业务需求" +DDLCheckTransactionIsolationLevelDesc = "事物隔离级别建议设置成RC" +DDLCheckTransactionIsolationLevelMessage = "事物隔离级别建议设置成RC" +DDLCheckUniqueIndexAnnotation = "通过配置该规则可以规范指定业务的UNIQUE索引命名规则" +DDLCheckUniqueIndexDesc = "建议UNIQUE索引名使用 IDX_UK_表名_字段名" +DDLCheckUniqueIndexMessage = "建议UNIQUE索引名使用 IDX_UK_表名_字段名" +DDLCheckUniqueIndexPrefixAnnotation = "通过配置该规则可以规范指定业务的UNIQUE索引命名规则,具体命名规范可以自定义设置,默认提示值:uniq_" +DDLCheckUniqueIndexPrefixDesc = "建议UNIQUE索引使用固定前缀" +DDLCheckUniqueIndexPrefixMessage = "建议UNIQUE索引要以\"%v\"为前缀" +DDLCheckUniqueIndexPrefixParams1 = "索引前缀" +DDLCheckUpdateTimeColumnAnnotation = "使用更新时间字段,有利于问题查找跟踪和检索数据,同时避免后期对数据生命周期管理不便 ,默认值为UPDATE_TIME可保证时间的准确性" +DDLCheckUpdateTimeColumnDesc = "建表DDL需要包含更新时间字段且默认值为CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" +DDLCheckUpdateTimeColumnMessage = "建表DDL需要包含%v字段且默认值为CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" +DDLCheckUpdateTimeColumnParams1 = "更新时间字段名" +DDLCheckVarcharSizeAnnotation = "MySQL建立索引时没有限制索引的大小,索引长度会默认采用的该字段的长度,VARCHAR 定义长度越长建立的索引存储大小越大;具体规则阈值可以根据业务需求调整,默认值:1024" +DDLCheckVarcharSizeDesc = "定义VARCHAR 长度时不建议大于阈值" +DDLCheckVarcharSizeMessage = "定义VARCHAR 长度时不建议大于阈值, 阈值为%d" +DDLCheckVarcharSizeParams1 = "VARCHAR最大长度" +DDLDisableAlterFieldUseFirstAndAfterAnnotation = "FIRST,AFTER 的ALTER操作通过COPY TABLE的方式完成,对业务影响较大" +DDLDisableAlterFieldUseFirstAndAfterDesc = "ALTER表字段禁止使用FIRST,AFTER" +DDLDisableAlterFieldUseFirstAndAfterMessage = "ALTER表字段禁止使用FIRST,AFTER" +DDLDisableDropStatementAnnotation = "DROP是DDL,数据变更不会写入日志,无法进行回滚;建议开启此规则,避免误删除操作" +DDLDisableDropStatementDesc = "禁止除索引外的DROP操作" +DDLDisableDropStatementMessage = "禁止除索引外的DROP操作" +DDLDisableFKAnnotation = "外键在高并发场景下性能较差,容易造成死锁,同时不利于后期维护(拆分、迁移)" +DDLDisableFKDesc = "禁止使用外键" +DDLDisableFKMessage = "禁止使用外键" +DDLDisableTypeTimestampAnnotation = "TIMESTAMP 有最大值限制('2038-01-19 03:14:07' UTC),且会时区转换的问题" +DDLDisableTypeTimestampDesc = "不建议使用TIMESTAMP字段" +DDLDisableTypeTimestampMessage = "不建议使用TIMESTAMP字段" +DDLHintDropColumnAnnotation = "业务逻辑与删除列依赖未完全消除,列被删除后可能导致程序异常(无法正常读写)的情况;开启该规则,SQLE将提醒删除列为高危操作" +DDLHintDropColumnDesc = "禁止进行删除列的操作" +DDLHintDropColumnMessage = "禁止进行删除列的操作" +DDLHintDropForeignKeyAnnotation = "删除已有约束会影响已有业务逻辑;开启该规则,SQLE将提醒删除外键为高危操作" +DDLHintDropForeignKeyDesc = "禁止进行删除外键的操作" +DDLHintDropForeignKeyMessage = "禁止进行删除外键的操作" +DDLHintDropPrimaryKeyAnnotation = "删除已有约束会影响已有业务逻辑;开启该规则,SQLE将提醒删除主键为高危操作" +DDLHintDropPrimaryKeyDesc = "禁止进行删除主键的操作" +DDLHintDropPrimaryKeyMessage = "禁止进行删除主键的操作" +DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetAnnotation = "修改表的默认字符集,只会影响后续新增的字段,不会修表已有字段的字符集;如需修改整张表所有字段的字符集建议开启此规则" +DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetDesc = "不建议修改表的默认字符集" +DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetMessage = "不建议修改表的默认字符集" +DDLNotAllowRenamingAnnotation = "RENAME/CHANGE 表名/列名会对线上业务不停机发布造成影响,如需这种操作应当DBA手工干预" +DDLNotAllowRenamingDesc = "禁止使用RENAME或CHANGE对表名字段名进行修改" +DDLNotAllowRenamingMessage = "禁止使用RENAME或CHANGE对表名字段名进行修改" +DDLRecommendTableColumnCharsetSameAnnotation = "统一字符集可以避免由于字符集转换产生的乱码,不同的字符集进行比较前需要进行转换会造成索引失效" +DDLRecommendTableColumnCharsetSameDesc = "建议列与表使用同一个字符集" +DDLRecommendTableColumnCharsetSameMessage = "建议列与表使用同一个字符集" +DMLAvoidWhereEqualNullAnnotation = "NULL在SQL中属于特殊值,无法与普通值进行比较。例如:column = NULL恒为false,即使column存在null值也不会查询出来,所以column = NULL应该写为column is NULL" +DMLAvoidWhereEqualNullDesc = "WHERE子句中禁止将NULL值与其他字段或值进行比较运算" +DMLAvoidWhereEqualNullMessage = "WHERE子句中禁止将NULL值与其他字段或值进行比较运算" +DMLCheckAffectedRowsAnnotation = "如果 DML 操作影响行数过多,会导致查询性能下降,因为需要扫描更多的数据。" +DMLCheckAffectedRowsDesc = "UPDATE/DELETE操作影响行数不建议超过阈值" +DMLCheckAffectedRowsMessage = "UPDATE/DELETE操作影响行数不建议超过阈值,影响行数为 %v,超过设定阈值 %v" +DMLCheckAffectedRowsParams1 = "最大影响行数" +DMLCheckAggregateAnnotation = "不建议使用SQL聚合函数,是为了确保查询的简单性、高性能和数据一致性。" +DMLCheckAggregateDesc = "不建议使用聚合函数" +DMLCheckAggregateMessage = "不建议使用聚合函数计算" +DMLCheckAliasAnnotation = "表或列的别名与其真实名称相同, 这样的别名会使得查询更难去分辨" +DMLCheckAliasDesc = "别名不建议与表或列的名字相同" +DMLCheckAliasMessage = "这些别名(%v)与列名或表名相同" +DMLCheckBatchInsertListsMaxAnnotation = "避免大事务,以及降低发生回滚对业务的影响;具体规则阈值可以根据业务需求调整,默认值:100" +DMLCheckBatchInsertListsMaxDesc = "单条INSERT语句,建议批量插入不超过阈值" +DMLCheckBatchInsertListsMaxMessage = "单条INSERT语句,建议批量插入不超过%v条" +DMLCheckBatchInsertListsMaxParams1 = "最大插入行数" +DMLCheckExplainAccessTypeAllAnnotation = "全表扫描时,扫描行数不建议超过指定行数是为了避免性能问题;具体规则阈值可以根据业务需求调整,默认值:10000;如果设置为0,全表扫描都会触发规则" +DMLCheckExplainAccessTypeAllDesc = "全表扫描时,扫描行数不建议超过指定行数(默认值:10000)" +DMLCheckExplainAccessTypeAllMessage = "该查询使用了全表扫描并且扫描行数为%v" +DMLCheckExplainAccessTypeAllParams1 = "最大扫描行数" +DMLCheckExplainExtraUsingFilesortAnnotation = "大数据量的情况下,文件排序意味着SQL性能较低,会增加OS的开销,影响数据库性能" +DMLCheckExplainExtraUsingFilesortDesc = "不建议使用文件排序" +DMLCheckExplainExtraUsingFilesortMessage = "不建议使用文件排序" +DMLCheckExplainExtraUsingIndexForSkipScanAnnotation = "索引扫描是跳跃扫描,未遵循最左匹配原则,可能降低索引的使用效率,影响查询性能" +DMLCheckExplainExtraUsingIndexForSkipScanDesc = "不建议对表进行索引跳跃扫描" +DMLCheckExplainExtraUsingIndexForSkipScanMessage = "不建议对表进行索引跳跃扫描" +DMLCheckExplainExtraUsingTemporaryAnnotation = "大数据量的情况下,临时表意味着SQL性能较低,会增加OS的开销,影响数据库性能" +DMLCheckExplainExtraUsingTemporaryDesc = "不建议使用临时表" +DMLCheckExplainExtraUsingTemporaryMessage = "不建议使用临时表" +DMLCheckExplainFullIndexScanAnnotation = "在数据量大的情况下索引全扫描严重影响SQL性能。" +DMLCheckExplainFullIndexScanDesc = "不建议对表进行全索引扫描" +DMLCheckExplainFullIndexScanMessage = "不建议对表进行全索引扫描" +DMLCheckExplainUsingIndexAnnotation = "使用索引可以显著提高SQL查询的性能。" +DMLCheckExplainUsingIndexDesc = "SQL查询条件需要走索引" +DMLCheckExplainUsingIndexMessage = "建议使用索引以优化 SQL 查询性能" +DMLCheckFuzzySearchAnnotation = "使用全模糊搜索或左模糊搜索将导致查询无法使用索引,导致全表扫描" +DMLCheckFuzzySearchDesc = "禁止使用全模糊搜索或左模糊搜索" +DMLCheckFuzzySearchMessage = "禁止使用全模糊搜索或左模糊搜索" +DMLCheckHasJoinConditionAnnotation = "指定连接条件可以确保连接操作的正确性和可靠性,如果没有指定连接条件,可能会导致连接失败或连接不正确的情况。" +DMLCheckHasJoinConditionDesc = "建议连接操作指定连接条件" +DMLCheckHasJoinConditionMessage = "建议连接操作指定连接条件,JOIN字段后必须有ON条件" +DMLCheckIfAfterUnionDistinctAnnotation = "UNION会按照字段的顺序进行排序同时去重,UNION ALL只是简单的将两个结果合并后就返回,从效率上看,UNION ALL 要比UNION快很多;如果合并的两个结果集中允许包含重复数据且不需要排序时的话,建议开启此规则,使用UNION ALL替代UNION" +DMLCheckIfAfterUnionDistinctDesc = "建议使用UNION ALL,替代UNION" +DMLCheckIfAfterUnionDistinctMessage = "建议使用UNION ALL,替代UNION" +DMLCheckInQueryNumberAnnotation = "当IN值过多时,有可能会导致查询进行全表扫描,使得MySQL性能急剧下降;具体规则阈值可以根据业务需求调整,默认值:50" +DMLCheckInQueryNumberDesc = "WHERE条件内IN语句中的参数个数不能超过阈值" +DMLCheckInQueryNumberMessage = "WHERE条件内IN语句中的参数已有%v个,不建议超过阙值%v" +DMLCheckInQueryNumberParams1 = "in语句参数最大个数" +DMLCheckIndexSelectivityAnnotation = "确保SQL执行计划中使用的高索引区分度,有助于提升查询性能并优化查询效率。" +DMLCheckIndexSelectivityDesc = "建议连库查询时,确保SQL执行计划中使用的索引区分度大于阈值" +DMLCheckIndexSelectivityMessage = "索引:%v,未超过区分度阈值:%v,建议使用超过阈值的索引。" +DMLCheckIndexSelectivityParams1 = "可选择性(百分比)" +DMLCheckInsertColumnsExistAnnotation = "当表结构发生变更,INSERT请求不明确指定列名,会发生插入数据不匹配的情况;建议开启此规则,避免插入结果与业务预期不符" +DMLCheckInsertColumnsExistDesc = "INSERT 语句需要指定COLUMN" +DMLCheckInsertColumnsExistMessage = "INSERT 语句需要指定COLUMN" +DMLCheckInsertSelectAnnotation = "使用 INSERT ... SELECT 在默认事务隔离级别下,可能会导致对查询的表施加表级锁。" +DMLCheckInsertSelectDesc = "不建议使用INSERT ... SELECT" +DMLCheckInsertSelectMessage = "不建议使用INSERT ... SELECT" +DMLCheckJoinFieldCharacterSetAndCollationAnnotation = "连接表字段的字符集和排序规则一致可避免数据不一致和查询错误,确保连接操作正确执行。" +DMLCheckJoinFieldCharacterSetAndCollationDesc = "连接表字段的字符集和排序规则必须一致" +DMLCheckJoinFieldCharacterSetAndCollationMessage = "连接表字段的字符集和排序规则必须一致" +DMLCheckJoinFieldTypeAnnotation = "JOIN字段类型不一致会导致类型不匹配发生隐式准换,建议开启此规则,避免索引失效" +DMLCheckJoinFieldTypeDesc = "建议JOIN字段类型保持一致" +DMLCheckJoinFieldTypeMessage = "建议JOIN字段类型保持一致, 否则会导致隐式转换" +DMLCheckJoinFieldUseIndexAnnotation = "JOIN字段包含索引可提高连接操作的性能和查询速度。" +DMLCheckJoinFieldUseIndexDesc = "JOIN字段必须包含索引" +DMLCheckJoinFieldUseIndexMessage = "JOIN字段必须包含索引" +DMLCheckLimitMustExistAnnotation = "LIMIT条件可以降低写错 SQL 的代价(删错数据),同时避免长事务影响业务" +DMLCheckLimitMustExistDesc = "建议DELETE/UPDATE 语句带有LIMIT条件" +DMLCheckLimitMustExistMessage = "建议DELETE/UPDATE 语句带有LIMIT条件" +DMLCheckLimitOffsetNumAnnotation = "因为OFFSET指定了结果集的起始位置,如果起始位置过大,那么 MySQL 需要处理更多的数据才能返回结果集,这可能会导致查询性能下降。" +DMLCheckLimitOffsetNumDesc = "不建议LIMIT的偏移OFFSET大于阈值" +DMLCheckLimitOffsetNumMessage = "不建议LIMIT的偏移OFFSET大于阈值,OFFSET=%v(阈值为%v)" +DMLCheckLimitOffsetNumParams1 = "offset 大小" +DMLCheckMathComputationOrFuncOnIndexAnnotation = "对索引列进行数学运算和使用函数会导致索引失效,从而导致全表扫描,影响查询性能。" +DMLCheckMathComputationOrFuncOnIndexDesc = "禁止对索引列进行数学运算和使用函数" +DMLCheckMathComputationOrFuncOnIndexMessage = "禁止对索引列进行数学运算和使用函数" +DMLCheckNeedlessFuncAnnotation = "通过配置该规则可以指定业务中需要禁止使用的内置函数,使用内置函数可能会导致SQL无法走索引或者产生一些非预期的结果。实际需要禁用的函数可通过规则设置" +DMLCheckNeedlessFuncDesc = "避免使用不必要的内置函数" +DMLCheckNeedlessFuncMessage = "避免使用不必要的内置函数%v" +DMLCheckNeedlessFuncParams1 = "指定的函数集合(逗号分割)" +DMLCheckNotEqualSymbolAnnotation = "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符" +DMLCheckNotEqualSymbolDesc = "建议使用'<>'代替'!='" +DMLCheckNotEqualSymbolMessage = "建议使用'<>'代替'!='" +DMLCheckNumberOfJoinTablesAnnotation = "表关联越多,意味着各种驱动关系组合就越多,比较各种结果集的执行成本的代价也就越高,进而SQL查询性能会大幅度下降;具体规则阈值可以根据业务需求调整,默认值:3" +DMLCheckNumberOfJoinTablesDesc = "使用JOIN连接表查询建议不超过阈值" +DMLCheckNumberOfJoinTablesMessage = "使用JOIN连接表查询建议不超过%v张" +DMLCheckNumberOfJoinTablesParams1 = "最大连接表个数" +DMLCheckSQLInjectionFuncAnnotation = "攻击者通过SQL注入,可未经授权可访问数据库中的数据,存在盗取用户信息,造成用户数据泄露等安全漏洞问题" +DMLCheckSQLInjectionFuncDesc = "不建议使用常见 SQL 注入函数" +DMLCheckSQLInjectionFuncMessage = "不建议使用常见 SQL 注入函数" +DMLCheckSQLLengthAnnotation = "过长的SQL可读性较差,难以维护,且容易引发性能问题;具体规则阈值可以根据业务需求调整,默认值:1024" +DMLCheckSQLLengthDesc = "建议将过长的SQL分解成几个简单的SQL" +DMLCheckSQLLengthMessage = "建议将过长的SQL分解成几个简单的SQL" +DMLCheckSQLLengthParams1 = "SQL最大长度" +DMLCheckSameTableJoinedMultipleTimesAnnotation = "如果对单表查询多次,会导致查询性能下降。" +DMLCheckSameTableJoinedMultipleTimesDesc = "不建议对同一张表连接多次" +DMLCheckSameTableJoinedMultipleTimesMessage = "表%v被连接多次" +DMLCheckScanRowsAnnotation = "筛选条件必须带上主键或索引可降低数据库查询的时间复杂度,提高查询效率。" +DMLCheckScanRowsDesc = "扫描行数超过阈值,筛选条件必须带上主键或者索引" +DMLCheckScanRowsMessage = "扫描行数超过阈值,筛选条件必须带上主键或者索引" +DMLCheckScanRowsParams1 = "扫描行数量(万)" +DMLCheckSelectForUpdateAnnotation = "SELECT FOR UPDATE 会对查询结果集中每行数据都添加排他锁,其他线程对该记录的更新与删除操作都会阻塞,在高并发下,容易造成数据库大量锁等待,影响数据库查询性能" +DMLCheckSelectForUpdateDesc = "不建议使用SELECT FOR UPDATE" +DMLCheckSelectForUpdateMessage = "不建议使用SELECT FOR UPDATE" +DMLCheckSelectLimitAnnotation = "如果查询的扫描行数很大,可能会导致优化器选择错误的索引甚至不走索引;具体规则阈值可以根据业务需求调整,默认值:1000" +DMLCheckSelectLimitDesc = "SELECT 语句需要带LIMIT" +DMLCheckSelectLimitMessage = "SELECT 语句需要带LIMIT,且限制数不得超过%v" +DMLCheckSelectLimitParams1 = "最大查询行数" +DMLCheckSelectRowsAnnotation = "筛选条件必须带上主键或索引可提高查询性能和减少全表扫描的成本。" +DMLCheckSelectRowsDesc = "查询数据量超过阈值,筛选条件必须带上主键或者索引" +DMLCheckSelectRowsMessage = "查询数据量超过阈值,筛选条件必须带上主键或者索引" +DMLCheckSelectRowsParams1 = "查询数据量(万)" +DMLCheckSelectWithOrderByAnnotation = "ORDER BY 对查询性能影响较大,同时不便于优化维护,建议将排序部分放到业务处理" +DMLCheckSelectWithOrderByDesc = "SELECT 语句不能有ORDER BY" +DMLCheckSelectWithOrderByMessage = "SELECT 语句不能有ORDER BY" +DMLCheckSortColumnLengthAnnotation = "对例如VARCHAR(2000)这样的长字段进行ORDER BY、DISTINCT、GROUP BY、UNION之类的操作,会引发排序,有性能隐患" +DMLCheckSortColumnLengthDesc = "禁止对长字段排序" +DMLCheckSortColumnLengthMessage = "长度超过阈值的字段不建议用于ORDER BY、DISTINCT、GROUP BY、UNION,这些字段有:%v" +DMLCheckSortColumnLengthParams1 = "可排序字段的最大长度" +DMLCheckSortDirectionAnnotation = "在 MySQL 8.0 之前当 ORDER BY 多个列指定的排序方向不同时将无法使用已经建立的索引。在MySQL8.0 之后可以建立对应的排序顺序的联合索引来优化" +DMLCheckSortDirectionDesc = "不建议在 ORDER BY 语句中对多个不同条件使用不同方向的排序" +DMLCheckSortDirectionMessage = "不建议在 ORDER BY 语句中对多个不同条件使用不同方向的排序" +DMLCheckSpacesAroundTheStringAnnotation = "字符串前后存在空格将可能导致查询判断逻辑出错,如在MySQL 5.5中'a'和'a '在查询中被认为是相同的值" +DMLCheckSpacesAroundTheStringDesc = "引号中的字符串开头或结尾不建议包含空格" +DMLCheckSpacesAroundTheStringMessage = "引号中的字符串开头或结尾不建议包含空格" +DMLCheckSubQueryNestNumAnnotation = "子查询嵌套层数超过阈值,有些情况下,子查询并不能使用到索引。同时对于返回结果集比较大的子查询,会产生大量的临时表,消耗过多的CPU和IO资源,产生大量的慢查询" +DMLCheckSubQueryNestNumDesc = "子查询嵌套层数不建议超过阈值" +DMLCheckSubQueryNestNumMessage = "子查询嵌套层数超过阈值%v" +DMLCheckSubQueryNestNumParams1 = "子查询嵌套层数不建议超过阈值" +DMLCheckSubqueryLimitAnnotation = "部分MySQL版本不支持在子查询中进行'LIMIT & IN/ALL/ANY/SOME'" +DMLCheckSubqueryLimitDesc = "不建议在子查询中使用LIMIT" +DMLCheckSubqueryLimitMessage = "不建议在子查询中使用LIMIT" +DMLCheckTableSizeAnnotation = "DML操作大表,耗时较久且负载较高,容易影响数据库性能;具体规则阈值可以根据业务需求调整,默认值:1024" +DMLCheckTableSizeDesc = "不建议对数据量过大的表执行DML操作" +DMLCheckTableSizeMessage = "执行DML的表 %v 空间不建议超过 %vMB" +DMLCheckTableSizeParams1 = "表空间大小(MB)" +DMLCheckUpdateOrDeleteHasWhereAnnotation = "因为这些语句的目的是修改数据库中的数据,需要使用 WHERE 条件来过滤需要更新或删除的记录,以确保数据的正确性。另外,使用 WHERE 条件还可以提高查询性能。" +DMLCheckUpdateOrDeleteHasWhereDesc = "建议UPDATE/DELETE操作使用WHERE条件" +DMLCheckUpdateOrDeleteHasWhereMessage = "建议UPDATE/DELETE操作使用WHERE条件" +DMLCheckWhereExistFuncAnnotation = "对条件字段做函数操作,可能会破坏索引值的有序性,导致优化器选择放弃走索引,使查询性能大幅度降低" +DMLCheckWhereExistFuncDesc = "避免对条件字段使用函数操作" +DMLCheckWhereExistFuncMessage = "避免对条件字段使用函数操作" +DMLCheckWhereExistImplicitConversionAnnotation = "WHERE条件中使用与过滤字段不一致的数据类型会引发隐式数据类型转换,导致查询有无法命中索引的风险,在高并发、大数据量的情况下,不走索引会使得数据库的查询性能严重下降" +DMLCheckWhereExistImplicitConversionDesc = "不建议在WHERE条件中使用与过滤字段不一致的数据类型" +DMLCheckWhereExistImplicitConversionMessage = "不建议在WHERE条件中使用与过滤字段不一致的数据类型" +DMLCheckWhereExistNotAnnotation = "使用负向查询,将导致全表扫描,出现慢SQL" +DMLCheckWhereExistNotDesc = "不建议对条件字段使用负向查询" +DMLCheckWhereExistNotMessage = "不建议对条件字段使用负向查询" +DMLCheckWhereExistScalarSubqueryAnnotation = "标量子查询存在多次访问同一张表的问题,执行开销大效率低,可使用LEFT JOIN 替代标量子查询" +DMLCheckWhereExistScalarSubqueryDesc = "不建议使用标量子查询" +DMLCheckWhereExistScalarSubqueryMessage = "不建议使用标量子查询" +DMLCheckWhereIsInvalidAnnotation = "SQL缺少WHERE条件在执行时会进行全表扫描产生额外开销,建议在大数据量高并发环境下开启,避免影响数据库查询性能" +DMLCheckWhereIsInvalidDesc = "禁止使用没有WHERE条件或者WHERE条件恒为TRUE的SQL" +DMLCheckWhereIsInvalidMessage = "禁止使用没有WHERE条件或者WHERE条件恒为TRUE的SQL" +DMLCheckWithLimitAnnotation = "DELETE/UPDATE 语句使用LIMIT条件将随机选取数据进行删除或者更新,业务无法预期" +DMLCheckWithLimitDesc = "DELETE/UPDATE 语句不能有LIMIT条件" +DMLCheckWithLimitMessage = "DELETE/UPDATE 语句不能有LIMIT条件" +DMLCheckWithOrderByAnnotation = "DELETE/UPDATE 存在ORDER BY会使用排序,带来无谓的开销" +DMLCheckWithOrderByDesc = "DELETE/UPDATE 语句不能有ORDER BY" +DMLCheckWithOrderByMessage = "DELETE/UPDATE 语句不能有ORDER BY" +DMLDisableSelectAllColumnAnnotation = "当表结构变更时,使用*通配符选择所有列将导致查询行为会发生更改,与业务期望不符;同时SELECT * 中的无用字段会带来不必要的磁盘I/O,以及网络开销,且无法覆盖索引进而回表,大幅度降低查询效率" +DMLDisableSelectAllColumnDesc = "不建议使用SELECT *" +DMLDisableSelectAllColumnMessage = "不建议使用SELECT *" +DMLHintCountFuncWithColAnnotation = "建议使用COUNT(*),因为使用 COUNT(COL) 需要对表进行全表扫描,这可能会导致性能下降。" +DMLHintCountFuncWithColDesc = "避免使用 COUNT(COL)" +DMLHintCountFuncWithColMessage = "避免使用 COUNT(COL)" +DMLHintDeleteTipsAnnotation = "DROP/TRUNCATE是DDL,操作立即生效,不会写入日志,所以无法回滚,在执行高危操作之前对数据进行备份是很有必要的" +DMLHintDeleteTipsDesc = "建议在执行DELETE/DROP/TRUNCATE等操作前进行备份" +DMLHintDeleteTipsMessage = "建议在执行DELETE/DROP/TRUNCATE等操作前进行备份" +DMLHintGroupByRequiresConditionsAnnotation = "在5.7中,MySQL默认会对’GROUP BY col1, …’按如下顺序’ORDER BY col1,…’隐式排序,导致产生无谓的排序,带来额外的开销;在8.0中,则不会出现这种情况。如果不需要排序建议显示添加’ORDER BY NULL’" +DMLHintGroupByRequiresConditionsDesc = "建议为GROUP BY语句添加ORDER BY条件" +DMLHintGroupByRequiresConditionsMessage = "建议为GROUP BY语句添加ORDER BY条件" +DMLHintInNullOnlyFalseAnnotation = "查询条件永远非真,这将导致查询无匹配到的结果" +DMLHintInNullOnlyFalseDesc = "避免使用 IN (NULL) 或者 NOT IN (NULL)" +DMLHintInNullOnlyFalseMessage = "避免使用IN (NULL)/NOT IN (NULL) ,该用法永远非真将导致条件失效" +DMLHintLimitMustBeCombinedWithOrderByAnnotation = "没有ORDER BY的LIMIT会导致非确定性的结果可能与业务需求不符,这取决于执行计划" +DMLHintLimitMustBeCombinedWithOrderByDesc = "LIMIT 查询建议使用ORDER BY" +DMLHintLimitMustBeCombinedWithOrderByMessage = "LIMIT 查询建议使用ORDER BY" +DMLHintSumFuncTipsAnnotation = "当某一列的值全是NULL时,COUNT(COL)的返回结果为0,但SUM(COL)的返回结果为NULL,因此使用SUM()时需注意NPE问题(指数据返回NULL);如业务需避免NPE问题,建议开启此规则" +DMLHintSumFuncTipsDesc = "避免使用 SUM(COL)" +DMLHintSumFuncTipsMessage = "避免使用 SUM(COL) ,该用法存在返回NULL值导致程序空指针的风险" +DMLHintTruncateTipsAnnotation = "TRUNCATE是DLL,数据不能回滚,在没有备份情况下,谨慎使用TRUNCATE" +DMLHintTruncateTipsDesc = "不建议使用TRUNCATE操作" +DMLHintTruncateTipsMessage = "不建议使用TRUNCATE操作" +DMLHintUseTruncateInsteadOfDeleteAnnotation = "TRUNCATE TABLE 比 DELETE 速度快,且使用的系统和事务日志资源少,同时TRUNCATE后表所占用的空间会被释放,而DELETE后需要手工执行OPTIMIZE才能释放表空间" +DMLHintUseTruncateInsteadOfDeleteDesc = "删除全表时建议使用 TRUNCATE 替代 DELETE" +DMLHintUseTruncateInsteadOfDeleteMessage = "删除全表时建议使用 TRUNCATE 替代 DELETE" +DMLMustMatchLeftMostPrefixAnnotation = "对联合索引左侧字段进行IN 、OR等非等值查询会导致联合索引失效" +DMLMustMatchLeftMostPrefixDesc = "禁止对联合索引左侧字段进行IN 、OR等非等值查询" +DMLMustMatchLeftMostPrefixMessage = "对联合索引左侧字段进行IN 、OR等非等值查询会导致联合索引失效" +DMLMustUseLeftMostPrefixAnnotation = "使用联合索引时,不包含首字段会导致联合索引失效" +DMLMustUseLeftMostPrefixDesc = "使用联合索引时,必须使用联合索引的首字段" +DMLMustUseLeftMostPrefixMessage = "使用联合索引时,必须使用联合索引的首字段" +DMLNotAllowInsertAutoincrementAnnotation = "手动赋值可能会造成数据空洞,主键冲突" +DMLNotAllowInsertAutoincrementDesc = "禁止手动设置自增字段值" +DMLNotAllowInsertAutoincrementMessage = "禁止手动设置自增字段值" +DMLNotRecommendFuncInWhereAnnotation = "函数或运算符会导致查询无法利用表中的索引,该查询将会全表扫描,性能较差" +DMLNotRecommendFuncInWhereDesc = "应避免在 WHERE 条件中使用函数或其他运算符" +DMLNotRecommendFuncInWhereMessage = "应避免在 WHERE 条件中使用函数或其他运算符" +DMLNotRecommendGroupByConstantAnnotation = "GROUP BY 1 表示按第一列进行GROUP BY;在GROUP BY子句中使用数字,而不是表达式或列名称,当查询列顺序改变时,会导致查询逻辑出现问题" +DMLNotRecommendGroupByConstantDesc = "不建议对常量进行 GROUP BY" +DMLNotRecommendGroupByConstantMessage = "不建议对常量进行 GROUP BY" +DMLNotRecommendGroupByExpressionAnnotation = "当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差" +DMLNotRecommendGroupByExpressionDesc = "不建议ORDER BY 的条件为表达式" +DMLNotRecommendGroupByExpressionMessage = "不建议ORDER BY 的条件为表达式" +DMLNotRecommendHavingAnnotation = "对于索引字段,放在HAVING子句中时不会走索引;建议将HAVING子句改写为WHERE中的查询条件,可以在查询处理期间使用索引,提高SQL的执行效率" +DMLNotRecommendHavingDesc = "不建议使用 HAVING 子句" +DMLNotRecommendHavingMessage = "不建议使用 HAVING 子句" +DMLNotRecommendInAnnotation = "当IN值过多时,有可能会导致查询进行全表扫描,使得MySQL性能急剧下降" +DMLNotRecommendInDesc = "不建议使用IN" +DMLNotRecommendInMessage = "不建议使用IN" +DMLNotRecommendNotWildcardLikeAnnotation = "不包含通配符的 LIKE 查询逻辑上与等值查询相同,建议使用等值查询替代" +DMLNotRecommendNotWildcardLikeDesc = "不建议使用没有通配符的 LIKE 查询" +DMLNotRecommendNotWildcardLikeMessage = "不建议使用没有通配符的 LIKE 查询" +DMLNotRecommendOrderByRandAnnotation = "ORDER BY RAND()使用了临时表,同时还要对其进行排序,在数据量很大的情况下会增加服务器负载以及增加查询时间" +DMLNotRecommendOrderByRandDesc = "不建议使用 ORDER BY RAND()" +DMLNotRecommendOrderByRandMessage = "不建议使用 ORDER BY RAND()" +DMLNotRecommendSubqueryAnnotation = "有些情况下,子查询并不能使用到索引,同时对于返回结果集比较大的子查询,会产生大量的临时表,消耗过多的CPU和IO资源,产生大量的慢查询" +DMLNotRecommendSubqueryDesc = "不推荐使用子查询" +DMLNotRecommendSubqueryMessage = "不推荐使用子查询" +DMLNotRecommendSysdateAnnotation = "当SYSDATE()函数在基于STATEMENT模式的主从环境下可能造成数据的不一致,因为语句在主库中执行到日志传递到备库,存在时间差,到备库执行的时候就会变成不同的时间值,建议采取ROW模式的复制环境" +DMLNotRecommendSysdateDesc = "不建议使用 SYSDATE() 函数" +DMLNotRecommendSysdateMessage = "不建议使用 SYSDATE() 函数" +DMLNotRecommendUpdatePKAnnotation = "主键索引数据列的顺序就是表记录的物理存储顺序,频繁更新主键将导致整个表记录的顺序的调整,会耗费相当大的资源" +DMLNotRecommendUpdatePKDesc = "不建议UPDATE主键" +DMLNotRecommendUpdatePKMessage = "不建议UPDATE主键" +DMLSQLExplainLowestLevelAnnotation = "验证 SQL 执行计划中的 type 字段,确保满足要求级别,以保证查询性能。" +DMLSQLExplainLowestLevelDesc = "SQL执行计划中type字段建议满足规定的级别" +DMLSQLExplainLowestLevelMessage = "建议修改SQL,确保执行计划中type字段可以满足规定中的任一等级:%v" +DMLSQLExplainLowestLevelParams1 = "查询计划type等级,以英文逗号隔开" +DMLWhereExistNullAnnotation = "使用 IS NULL 或 IS NOT NULL 可能导致查询放弃使用索引而进行全表扫描" +DMLWhereExistNullDesc = "不建议对条件字段使用 NULL 值判断" +DMLWhereExistNullMessage = "不建议对条件字段使用 NULL 值判断" +DuplicateColumnsMessage = "字段名 %s 重复" +DuplicateIndexedColumnMessage = "索引 %s 字段 %s重复" +DuplicateIndexesMessage = "索引名 %s 重复" +DuplicatePrimaryKeyedColumnMessage = "主键字段 %s 重复" +ExtremalIndexAdviceFormat = "索引建议 | SQL使用了最值函数,可以利用索引有序的性质快速找到最值,建议对表%s添加单列索引,参考列:%s" +FunctionIndexAdviceFormatAll = "索引建议 | SQL使用了函数作为查询条件,在MySQL5.7以上的版本,可以在虚拟列上创建索引,在MySQL8.0.13以上的版本,可以创建函数索引,建议根据MySQL版本对表%s添加合适的索引,参考列:%s" +FunctionIndexAdviceFormatV57 = "索引建议 | SQL使用了函数作为查询条件,在MySQL5.7以上的版本,可以在虚拟列上创建索引,建议对表%s添加虚拟列索引,参考列:%s" +FunctionIndexAdviceFormatV80 = "索引建议 | SQL使用了函数作为查询条件,在MySQL8.0.13以上的版本,可以创建函数索引,建议对表%s添加函数索引,参考列:%s" +GhostDryRunError = "表空间大小超过%vMB, 将使用gh-ost进行上线, 但是dry-run抛出如下错误: %v" +GhostDryRunNotice = "表空间大小超过%vMB, 将使用gh-ost进行上线" +IndexExistMessage = "索引 %s 已存在" +IndexNotExistMessage = "索引 %s 不存在" +JoinIndexAdviceFormat = "索引建议 | SQL中字段%s为被驱动表%s上的关联字段,建议对表%s添加单列索引,参考列:%s" +KeyedColumnNotExistMessage = "索引字段 %s 不存在" +MultiPrimaryKeyMessage = "主键只能设置一个" +NotSupportExceedMaxRowsRollback = "预计影响行数超过配置的最大值,不生成回滚语句" +NotSupportHasVariableRollback = "不支持回滚包含变量的 DML 语句" +NotSupportInsertWithoutPrimaryKeyRollback = "不支持回滚 INSERT 没有指定主键的语句" +NotSupportMultiTableStatementRollback = "暂不支持回滚多表的 DML 语句" +NotSupportNoPrimaryKeyTableRollback = "不支持回滚没有主键的表的DML语句" +NotSupportOnDuplicatStatementRollback = "暂不支持回滚 ON DUPLICATE 语句" +NotSupportParamMarkerStatementRollback = "不支持回滚包含指纹的语句" +NotSupportStatementRollback = "暂不支持回滚该类型的语句" +NotSupportSubQueryStatementRollback = "暂不支持回滚带子查询的语句" +OptDMLCheckLimitOffsetNumAnnotation = "使用LIMIT和OFFSET子句可以分别控制查询结果的数量和指定从哪一行开始返回数据。但是,当OFFSET值较大时,查询效率会降低,因为系统必须扫描更多数据才能找到起始行,这在大数据集中尤其会导致性能问题和资源消耗。" +OptDMLCheckLimitOffsetNumDesc = "OFFSET的值超过阈值" +OptDMLCheckMathComputationOrFuncOnIndexAnnotation = "在索引列上的运算将导致索引失效,容易造成全表扫描,产生严重的性能问题。所以需要尽量将索引列上的运算转换到常量端进行。" +OptDMLCheckMathComputationOrFuncOnIndexDesc = "索引列上的运算导致索引失效" +OptDMLCheckNotEqualSymbolAnnotation = "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符" +OptDMLCheckNotEqualSymbolDesc = "建议使用'<>'代替'!='" +OptDMLCheckWhereExistImplicitConversionAnnotation = "WHERE条件中使用与过滤字段不一致的数据类型会引发隐式数据类型转换,导致查询有无法命中索引的风险,在高并发、大数据量的情况下,不走索引会使得数据库的查询性能严重下降" +OptDMLCheckWhereExistImplicitConversionDesc = "隐式类型转换导致索引失效" +OptDMLCheckWhereExistScalarSubqueryAnnotation = "对于使用COUNT标量子查询来进行判断是否存在,可以重写为EXISTS子查询,从而避免一次聚集运算。" +OptDMLCheckWhereExistScalarSubqueryDesc = "COUNT标量子查询重写" +OptDMLHintGroupByRequiresConditionsAnnotation = "在早期版本的MySQL中,GROUP BY 默认进行排序,可通过添加 ORDER BY NULL 来取消此排序,提高查询效率。" +OptDMLHintGroupByRequiresConditionsDesc = "为GROUP BY显示添加 ORDER BY 条件('代替'!='"} + DMLCheckNotEqualSymbolAnnotation = &i18n.Message{ID: "DMLCheckNotEqualSymbolAnnotation", Other: "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符"} + DMLCheckNotEqualSymbolMessage = &i18n.Message{ID: "DMLCheckNotEqualSymbolMessage", Other: "建议使用'<>'代替'!='"} + DMLNotRecommendSubqueryDesc = &i18n.Message{ID: "DMLNotRecommendSubqueryDesc", Other: "不推荐使用子查询"} + DMLNotRecommendSubqueryAnnotation = &i18n.Message{ID: "DMLNotRecommendSubqueryAnnotation", Other: "有些情况下,子查询并不能使用到索引,同时对于返回结果集比较大的子查询,会产生大量的临时表,消耗过多的CPU和IO资源,产生大量的慢查询"} + DMLNotRecommendSubqueryMessage = &i18n.Message{ID: "DMLNotRecommendSubqueryMessage", Other: "不推荐使用子查询"} + DMLCheckSubqueryLimitDesc = &i18n.Message{ID: "DMLCheckSubqueryLimitDesc", Other: "不建议在子查询中使用LIMIT"} + DMLCheckSubqueryLimitAnnotation = &i18n.Message{ID: "DMLCheckSubqueryLimitAnnotation", Other: "部分MySQL版本不支持在子查询中进行'LIMIT & IN/ALL/ANY/SOME'"} + DMLCheckSubqueryLimitMessage = &i18n.Message{ID: "DMLCheckSubqueryLimitMessage", Other: "不建议在子查询中使用LIMIT"} + DDLCheckAutoIncrementDesc = &i18n.Message{ID: "DDLCheckAutoIncrementDesc", Other: "表的初始AUTO_INCREMENT值建议为0"} + DDLCheckAutoIncrementAnnotation = &i18n.Message{ID: "DDLCheckAutoIncrementAnnotation", Other: "创建表时AUTO_INCREMENT设置为0则自增从1开始,可以避免数据空洞。例如在导出表结构DDL时,表结构内AUTO_INCREMENT通常为当前的自增值,如果建表时没有把AUTO_INCREMENT设置为0,那么通过该DDL进行建表操作会导致自增值从一个无意义数字开始。"} + DDLCheckAutoIncrementMessage = &i18n.Message{ID: "DDLCheckAutoIncrementMessage", Other: "表的初始AUTO_INCREMENT值建议为0"} + DDLNotAllowRenamingDesc = &i18n.Message{ID: "DDLNotAllowRenamingDesc", Other: "禁止使用RENAME或CHANGE对表名字段名进行修改"} + DDLNotAllowRenamingAnnotation = &i18n.Message{ID: "DDLNotAllowRenamingAnnotation", Other: "RENAME/CHANGE 表名/列名会对线上业务不停机发布造成影响,如需这种操作应当DBA手工干预"} + DDLNotAllowRenamingMessage = &i18n.Message{ID: "DDLNotAllowRenamingMessage", Other: "禁止使用RENAME或CHANGE对表名字段名进行修改"} + DMLCheckExplainFullIndexScanDesc = &i18n.Message{ID: "DMLCheckExplainFullIndexScanDesc", Other: "不建议对表进行全索引扫描"} + DMLCheckExplainFullIndexScanAnnotation = &i18n.Message{ID: "DMLCheckExplainFullIndexScanAnnotation", Other: "在数据量大的情况下索引全扫描严重影响SQL性能。"} + DMLCheckExplainFullIndexScanMessage = &i18n.Message{ID: "DMLCheckExplainFullIndexScanMessage", Other: "不建议对表进行全索引扫描"} + DMLCheckLimitOffsetNumDesc = &i18n.Message{ID: "DMLCheckLimitOffsetNumDesc", Other: "不建议LIMIT的偏移OFFSET大于阈值"} + DMLCheckLimitOffsetNumAnnotation = &i18n.Message{ID: "DMLCheckLimitOffsetNumAnnotation", Other: "因为OFFSET指定了结果集的起始位置,如果起始位置过大,那么 MySQL 需要处理更多的数据才能返回结果集,这可能会导致查询性能下降。"} + DMLCheckLimitOffsetNumMessage = &i18n.Message{ID: "DMLCheckLimitOffsetNumMessage", Other: "不建议LIMIT的偏移OFFSET大于阈值,OFFSET=%v(阈值为%v)"} + DMLCheckLimitOffsetNumParams1 = &i18n.Message{ID: "DMLCheckLimitOffsetNumParams1", Other: "offset 大小"} + DMLCheckUpdateOrDeleteHasWhereDesc = &i18n.Message{ID: "DMLCheckUpdateOrDeleteHasWhereDesc", Other: "建议UPDATE/DELETE操作使用WHERE条件"} + DMLCheckUpdateOrDeleteHasWhereAnnotation = &i18n.Message{ID: "DMLCheckUpdateOrDeleteHasWhereAnnotation", Other: "因为这些语句的目的是修改数据库中的数据,需要使用 WHERE 条件来过滤需要更新或删除的记录,以确保数据的正确性。另外,使用 WHERE 条件还可以提高查询性能。"} + DMLCheckUpdateOrDeleteHasWhereMessage = &i18n.Message{ID: "DMLCheckUpdateOrDeleteHasWhereMessage", Other: "建议UPDATE/DELETE操作使用WHERE条件"} + DMLCheckSortColumnLengthDesc = &i18n.Message{ID: "DMLCheckSortColumnLengthDesc", Other: "禁止对长字段排序"} + DMLCheckSortColumnLengthAnnotation = &i18n.Message{ID: "DMLCheckSortColumnLengthAnnotation", Other: "对例如VARCHAR(2000)这样的长字段进行ORDER BY、DISTINCT、GROUP BY、UNION之类的操作,会引发排序,有性能隐患"} + DMLCheckSortColumnLengthMessage = &i18n.Message{ID: "DMLCheckSortColumnLengthMessage", Other: "长度超过阈值的字段不建议用于ORDER BY、DISTINCT、GROUP BY、UNION,这些字段有:%v"} + DMLCheckSortColumnLengthParams1 = &i18n.Message{ID: "DMLCheckSortColumnLengthParams1", Other: "可排序字段的最大长度"} + AllCheckPrepareStatementPlaceholdersDesc = &i18n.Message{ID: "AllCheckPrepareStatementPlaceholdersDesc", Other: "绑定的变量个数不建议超过阈值"} + AllCheckPrepareStatementPlaceholdersAnnotation = &i18n.Message{ID: "AllCheckPrepareStatementPlaceholdersAnnotation", Other: "因为过度使用绑定变量会增加查询的复杂度,从而降低查询性能。过度使用绑定变量还会增加维护成本。默认阈值:100"} + AllCheckPrepareStatementPlaceholdersMessage = &i18n.Message{ID: "AllCheckPrepareStatementPlaceholdersMessage", Other: "使用绑定变量数量为 %v,不建议超过设定阈值 %v"} + AllCheckPrepareStatementPlaceholdersParams1 = &i18n.Message{ID: "AllCheckPrepareStatementPlaceholdersParams1", Other: "最大绑定变量数量"} + DMLCheckExplainExtraUsingIndexForSkipScanDesc = &i18n.Message{ID: "DMLCheckExplainExtraUsingIndexForSkipScanDesc", Other: "不建议对表进行索引跳跃扫描"} + DMLCheckExplainExtraUsingIndexForSkipScanAnnotation = &i18n.Message{ID: "DMLCheckExplainExtraUsingIndexForSkipScanAnnotation", Other: "索引扫描是跳跃扫描,未遵循最左匹配原则,可能降低索引的使用效率,影响查询性能"} + DMLCheckExplainExtraUsingIndexForSkipScanMessage = &i18n.Message{ID: "DMLCheckExplainExtraUsingIndexForSkipScanMessage", Other: "不建议对表进行索引跳跃扫描"} + DMLCheckAffectedRowsDesc = &i18n.Message{ID: "DMLCheckAffectedRowsDesc", Other: "UPDATE/DELETE操作影响行数不建议超过阈值"} + DMLCheckAffectedRowsAnnotation = &i18n.Message{ID: "DMLCheckAffectedRowsAnnotation", Other: "如果 DML 操作影响行数过多,会导致查询性能下降,因为需要扫描更多的数据。"} + DMLCheckAffectedRowsMessage = &i18n.Message{ID: "DMLCheckAffectedRowsMessage", Other: "UPDATE/DELETE操作影响行数不建议超过阈值,影响行数为 %v,超过设定阈值 %v"} + DMLCheckAffectedRowsParams1 = &i18n.Message{ID: "DMLCheckAffectedRowsParams1", Other: "最大影响行数"} + DMLCheckSameTableJoinedMultipleTimesDesc = &i18n.Message{ID: "DMLCheckSameTableJoinedMultipleTimesDesc", Other: "不建议对同一张表连接多次"} + DMLCheckSameTableJoinedMultipleTimesAnnotation = &i18n.Message{ID: "DMLCheckSameTableJoinedMultipleTimesAnnotation", Other: "如果对单表查询多次,会导致查询性能下降。"} + DMLCheckSameTableJoinedMultipleTimesMessage = &i18n.Message{ID: "DMLCheckSameTableJoinedMultipleTimesMessage", Other: "表%v被连接多次"} + DMLCheckExplainUsingIndexDesc = &i18n.Message{ID: "DMLCheckExplainUsingIndexDesc", Other: "SQL查询条件需要走索引"} + DMLCheckExplainUsingIndexAnnotation = &i18n.Message{ID: "DMLCheckExplainUsingIndexAnnotation", Other: "使用索引可以显著提高SQL查询的性能。"} + DMLCheckExplainUsingIndexMessage = &i18n.Message{ID: "DMLCheckExplainUsingIndexMessage", Other: "建议使用索引以优化 SQL 查询性能"} + DMLCheckInsertSelectDesc = &i18n.Message{ID: "DMLCheckInsertSelectDesc", Other: "不建议使用INSERT ... SELECT"} + DMLCheckInsertSelectAnnotation = &i18n.Message{ID: "DMLCheckInsertSelectAnnotation", Other: "使用 INSERT ... SELECT 在默认事务隔离级别下,可能会导致对查询的表施加表级锁。"} + DMLCheckInsertSelectMessage = &i18n.Message{ID: "DMLCheckInsertSelectMessage", Other: "不建议使用INSERT ... SELECT"} + DMLCheckAggregateDesc = &i18n.Message{ID: "DMLCheckAggregateDesc", Other: "不建议使用聚合函数"} + DMLCheckAggregateAnnotation = &i18n.Message{ID: "DMLCheckAggregateAnnotation", Other: "不建议使用SQL聚合函数,是为了确保查询的简单性、高性能和数据一致性。"} + DMLCheckAggregateMessage = &i18n.Message{ID: "DMLCheckAggregateMessage", Other: "不建议使用聚合函数计算"} + DDLCheckColumnNotNULLDesc = &i18n.Message{ID: "DDLCheckColumnNotNULLDesc", Other: "表字段建议有NOT NULL约束"} + DDLCheckColumnNotNULLAnnotation = &i18n.Message{ID: "DDLCheckColumnNotNULLAnnotation", Other: "表字段建议有 NOT NULL 约束,可确保数据的完整性,防止插入空值,提升查询准确性。"} + DDLCheckColumnNotNULLMessage = &i18n.Message{ID: "DDLCheckColumnNotNULLMessage", Other: "建议字段%v设置NOT NULL约束"} + DMLCheckIndexSelectivityDesc = &i18n.Message{ID: "DMLCheckIndexSelectivityDesc", Other: "建议连库查询时,确保SQL执行计划中使用的索引区分度大于阈值"} + DMLCheckIndexSelectivityAnnotation = &i18n.Message{ID: "DMLCheckIndexSelectivityAnnotation", Other: "确保SQL执行计划中使用的高索引区分度,有助于提升查询性能并优化查询效率。"} + DMLCheckIndexSelectivityMessage = &i18n.Message{ID: "DMLCheckIndexSelectivityMessage", Other: "索引:%v,未超过区分度阈值:%v,建议使用超过阈值的索引。"} + DMLCheckIndexSelectivityParams1 = &i18n.Message{ID: "DMLCheckIndexSelectivityParams1", Other: "可选择性(百分比)"} + DDLCheckTableRowsDesc = &i18n.Message{ID: "DDLCheckTableRowsDesc", Other: "表行数超过阈值,建议对表进行拆分"} + DDLCheckTableRowsAnnotation = &i18n.Message{ID: "DDLCheckTableRowsAnnotation", Other: "当表行数超过阈值时,对表进行拆分有助于提高数据库性能和查询速度。"} + DDLCheckTableRowsMessage = &i18n.Message{ID: "DDLCheckTableRowsMessage", Other: "表行数超过阈值,建议对表进行拆分"} + DDLCheckTableRowsParams1 = &i18n.Message{ID: "DDLCheckTableRowsParams1", Other: "表行数(万)"} + DDLCheckCompositeIndexDistinctionDesc = &i18n.Message{ID: "DDLCheckCompositeIndexDistinctionDesc", Other: "建议在组合索引中将区分度高的字段靠前放"} + DDLCheckCompositeIndexDistinctionAnnotation = &i18n.Message{ID: "DDLCheckCompositeIndexDistinctionAnnotation", Other: "将区分度高的字段靠前放置在组合索引中有助于提高索引的查询性能,因为它能更快地减小数据范围,提高检索效率。"} + DDLCheckCompositeIndexDistinctionMessage = &i18n.Message{ID: "DDLCheckCompositeIndexDistinctionMessage", Other: "建议在组合索引中将区分度高的字段靠前放,%v"} + DDLAvoidTextDesc = &i18n.Message{ID: "DDLAvoidTextDesc", Other: "使用TEXT 类型的字段建议和原表进行分拆,与原表主键单独组成另外一个表进行存放"} + DDLAvoidTextAnnotation = &i18n.Message{ID: "DDLAvoidTextAnnotation", Other: "将TEXT类型的字段与原表主键分拆成另一个表可以提高数据库性能和查询速度,减少不必要的 I/O 操作。"} + DDLAvoidTextMessage = &i18n.Message{ID: "DDLAvoidTextMessage", Other: "字段:%v为TEXT类型,建议和原表进行分拆,与原表主键单独组成另外一个表进行存放"} + DMLCheckSelectRowsDesc = &i18n.Message{ID: "DMLCheckSelectRowsDesc", Other: "查询数据量超过阈值,筛选条件必须带上主键或者索引"} + DMLCheckSelectRowsAnnotation = &i18n.Message{ID: "DMLCheckSelectRowsAnnotation", Other: "筛选条件必须带上主键或索引可提高查询性能和减少全表扫描的成本。"} + DMLCheckSelectRowsMessage = &i18n.Message{ID: "DMLCheckSelectRowsMessage", Other: "查询数据量超过阈值,筛选条件必须带上主键或者索引"} + DMLCheckSelectRowsParams1 = &i18n.Message{ID: "DMLCheckSelectRowsParams1", Other: "查询数据量(万)"} + DMLCheckScanRowsDesc = &i18n.Message{ID: "DMLCheckScanRowsDesc", Other: "扫描行数超过阈值,筛选条件必须带上主键或者索引"} + DMLCheckScanRowsAnnotation = &i18n.Message{ID: "DMLCheckScanRowsAnnotation", Other: "筛选条件必须带上主键或索引可降低数据库查询的时间复杂度,提高查询效率。"} + DMLCheckScanRowsMessage = &i18n.Message{ID: "DMLCheckScanRowsMessage", Other: "扫描行数超过阈值,筛选条件必须带上主键或者索引"} + DMLCheckScanRowsParams1 = &i18n.Message{ID: "DMLCheckScanRowsParams1", Other: "扫描行数量(万)"} + DMLMustUseLeftMostPrefixDesc = &i18n.Message{ID: "DMLMustUseLeftMostPrefixDesc", Other: "使用联合索引时,必须使用联合索引的首字段"} + DMLMustUseLeftMostPrefixAnnotation = &i18n.Message{ID: "DMLMustUseLeftMostPrefixAnnotation", Other: "使用联合索引时,不包含首字段会导致联合索引失效"} + DMLMustUseLeftMostPrefixMessage = &i18n.Message{ID: "DMLMustUseLeftMostPrefixMessage", Other: "使用联合索引时,必须使用联合索引的首字段"} + DMLMustMatchLeftMostPrefixDesc = &i18n.Message{ID: "DMLMustMatchLeftMostPrefixDesc", Other: "禁止对联合索引左侧字段进行IN 、OR等非等值查询"} + DMLMustMatchLeftMostPrefixAnnotation = &i18n.Message{ID: "DMLMustMatchLeftMostPrefixAnnotation", Other: "对联合索引左侧字段进行IN 、OR等非等值查询会导致联合索引失效"} + DMLMustMatchLeftMostPrefixMessage = &i18n.Message{ID: "DMLMustMatchLeftMostPrefixMessage", Other: "对联合索引左侧字段进行IN 、OR等非等值查询会导致联合索引失效"} + DMLCheckJoinFieldUseIndexDesc = &i18n.Message{ID: "DMLCheckJoinFieldUseIndexDesc", Other: "JOIN字段必须包含索引"} + DMLCheckJoinFieldUseIndexAnnotation = &i18n.Message{ID: "DMLCheckJoinFieldUseIndexAnnotation", Other: "JOIN字段包含索引可提高连接操作的性能和查询速度。"} + DMLCheckJoinFieldUseIndexMessage = &i18n.Message{ID: "DMLCheckJoinFieldUseIndexMessage", Other: "JOIN字段必须包含索引"} + DMLCheckJoinFieldCharacterSetAndCollationDesc = &i18n.Message{ID: "DMLCheckJoinFieldCharacterSetAndCollationDesc", Other: "连接表字段的字符集和排序规则必须一致"} + DMLCheckJoinFieldCharacterSetAndCollationAnnotation = &i18n.Message{ID: "DMLCheckJoinFieldCharacterSetAndCollationAnnotation", Other: "连接表字段的字符集和排序规则一致可避免数据不一致和查询错误,确保连接操作正确执行。"} + DMLCheckJoinFieldCharacterSetAndCollationMessage = &i18n.Message{ID: "DMLCheckJoinFieldCharacterSetAndCollationMessage", Other: "连接表字段的字符集和排序规则必须一致"} + DMLCheckMathComputationOrFuncOnIndexDesc = &i18n.Message{ID: "DMLCheckMathComputationOrFuncOnIndexDesc", Other: "禁止对索引列进行数学运算和使用函数"} + DMLCheckMathComputationOrFuncOnIndexAnnotation = &i18n.Message{ID: "DMLCheckMathComputationOrFuncOnIndexAnnotation", Other: "对索引列进行数学运算和使用函数会导致索引失效,从而导致全表扫描,影响查询性能。"} + DMLCheckMathComputationOrFuncOnIndexMessage = &i18n.Message{ID: "DMLCheckMathComputationOrFuncOnIndexMessage", Other: "禁止对索引列进行数学运算和使用函数"} + DMLSQLExplainLowestLevelDesc = &i18n.Message{ID: "DMLSQLExplainLowestLevelDesc", Other: "SQL执行计划中type字段建议满足规定的级别"} + DMLSQLExplainLowestLevelAnnotation = &i18n.Message{ID: "DMLSQLExplainLowestLevelAnnotation", Other: "验证 SQL 执行计划中的 type 字段,确保满足要求级别,以保证查询性能。"} + DMLSQLExplainLowestLevelMessage = &i18n.Message{ID: "DMLSQLExplainLowestLevelMessage", Other: "建议修改SQL,确保执行计划中type字段可以满足规定中的任一等级:%v"} + DMLSQLExplainLowestLevelParams1 = &i18n.Message{ID: "DMLSQLExplainLowestLevelParams1", Other: "查询计划type等级,以英文逗号隔开"} + DDLAvoidFullTextDesc = &i18n.Message{ID: "DDLAvoidFullTextDesc", Other: "禁止使用全文索引"} + DDLAvoidFullTextAnnotation = &i18n.Message{ID: "DDLAvoidFullTextAnnotation", Other: "全文索引的使用会增加存储开销,并对写操作性能产生一定影响。"} + DDLAvoidFullTextMessage = &i18n.Message{ID: "DDLAvoidFullTextMessage", Other: "禁止使用全文索引"} + DDLAvoidGeometryDesc = &i18n.Message{ID: "DDLAvoidGeometryDesc", Other: "禁止使用空间字段和空间索引"} + DDLAvoidGeometryAnnotation = &i18n.Message{ID: "DDLAvoidGeometryAnnotation", Other: "使用空间字段和空间索引会增加存储需求,对数据库性能造成一定影响"} + DDLAvoidGeometryMessage = &i18n.Message{ID: "DDLAvoidGeometryMessage", Other: "禁止使用空间字段和空间索引"} + DMLAvoidWhereEqualNullDesc = &i18n.Message{ID: "DMLAvoidWhereEqualNullDesc", Other: "WHERE子句中禁止将NULL值与其他字段或值进行比较运算"} + DMLAvoidWhereEqualNullAnnotation = &i18n.Message{ID: "DMLAvoidWhereEqualNullAnnotation", Other: "NULL在SQL中属于特殊值,无法与普通值进行比较。例如:column = NULL恒为false,即使column存在null值也不会查询出来,所以column = NULL应该写为column is NULL"} + DMLAvoidWhereEqualNullMessage = &i18n.Message{ID: "DMLAvoidWhereEqualNullMessage", Other: "WHERE子句中禁止将NULL值与其他字段或值进行比较运算"} + DDLAvoidEventDesc = &i18n.Message{ID: "DDLAvoidEventDesc", Other: "禁止使用event"} + DDLAvoidEventAnnotation = &i18n.Message{ID: "DDLAvoidEventAnnotation", Other: "使用event会增加数据库的维护难度和依赖性,并且也会造成安全问题。"} + DDLAvoidEventMessage = &i18n.Message{ID: "DDLAvoidEventMessage", Other: "禁止使用event"} + DDLCheckCharLengthDesc = &i18n.Message{ID: "DDLCheckCharLengthDesc", Other: "禁止char, varchar类型字段字符长度总和超过阈值"} + DDLCheckCharLengthAnnotation = &i18n.Message{ID: "DDLCheckCharLengthAnnotation", Other: "使用过长或者过多的varchar,char字段可能会增加业务逻辑的复杂性;如果字段平均长度过大时,会占用更多的存储空间。"} + DDLCheckCharLengthMessage = &i18n.Message{ID: "DDLCheckCharLengthMessage", Other: "禁止char, varchar类型字段字符长度总和超过阈值 %v"} + DDLCheckCharLengthParams1 = &i18n.Message{ID: "DDLCheckCharLengthParams1", Other: "字符长度"} +) + +// rewrite rules +var ( + OptDMLHintGroupByRequiresConditionsDesc = &i18n.Message{ID: "OptDMLHintGroupByRequiresConditionsDesc", Other: "为GROUP BY显示添加 ORDER BY 条件('代替'!='"} + OptDMLCheckNotEqualSymbolAnnotation = &i18n.Message{ID: "OptDMLCheckNotEqualSymbolAnnotation", Other: "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符"} + OptDMLCheckLimitOffsetNumDesc = &i18n.Message{ID: "OptDMLCheckLimitOffsetNumDesc", Other: "OFFSET的值超过阈值"} + OptDMLCheckLimitOffsetNumAnnotation = &i18n.Message{ID: "OptDMLCheckLimitOffsetNumAnnotation", Other: "使用LIMIT和OFFSET子句可以分别控制查询结果的数量和指定从哪一行开始返回数据。但是,当OFFSET值较大时,查询效率会降低,因为系统必须扫描更多数据才能找到起始行,这在大数据集中尤其会导致性能问题和资源消耗。"} + OptDMLRuleNPERewriteDesc = &i18n.Message{ID: "OptDMLRuleNPERewriteDesc", Other: "NPE重写"} + OptDMLRuleNPERewriteAnnotation = &i18n.Message{ID: "OptDMLRuleNPERewriteAnnotation", Other: "SQL的NPE(Null Pointer Exception)问题是指在SQL查询中,当聚合列全为NULL时,SUM、AVG等聚合函数会返回NULL,这可能会导致后续的程序出现空指针异常。"} + OptDMLRuleAllSubqueryRewriteDesc = &i18n.Message{ID: "OptDMLRuleAllSubqueryRewriteDesc", Other: "ALL修饰的子查询重写"} + OptDMLRuleAllSubqueryRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleAllSubqueryRewriteAnnotation", Other: "如果ALL子查询的结果中存在NULL,这个SQL永远返回为空。正确的写法应该是在子查询里加上非空限制,或使用max/min的写法。"} + OptDMLRuleDiffOrderingSpecTypeWarningDesc = &i18n.Message{ID: "OptDMLRuleDiffOrderingSpecTypeWarningDesc", Other: "排序字段方向不同导致索引失效"} + OptDMLRuleDiffOrderingSpecTypeWarningAnnotation = &i18n.Message{ID: "OptDMLRuleDiffOrderingSpecTypeWarningAnnotation", Other: "ORDER BY 子句中的所有表达式需要按统一的 ASC 或 DESC 方向排序,才能利用索引来避免排序;如果ORDER BY 语句对多个不同条件使用不同方向的排序无法使用索引"} + OptDMLRuleDistinctEliminationRewriteDesc = &i18n.Message{ID: "OptDMLRuleDistinctEliminationRewriteDesc", Other: "子查询中的DISTINCT消除"} + OptDMLRuleDistinctEliminationRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleDistinctEliminationRewriteAnnotation", Other: "对于仅进行存在性测试的子查询,如果子查询包含DISTINCT通常可以删除,以避免一次去重操作。"} + OptDMLRuleExists2JoinRewriteDesc = &i18n.Message{ID: "OptDMLRuleExists2JoinRewriteDesc", Other: "EXISTS查询转换为表连接"} + OptDMLRuleExists2JoinRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleExists2JoinRewriteAnnotation", Other: "EXISTS子查询可以在适当情况下转换为JOIN来优化查询,提高数据库处理效率和性能。"} + OptDMLRuleFilterPredicatePushDownRewriteDesc = &i18n.Message{ID: "OptDMLRuleFilterPredicatePushDownRewriteDesc", Other: "过滤谓词下推"} + OptDMLRuleFilterPredicatePushDownRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleFilterPredicatePushDownRewriteAnnotation", Other: "滤条件下推(FPPD)是一种通过将过滤条件提前应用于内部查询块,以减少数据处理量并提升SQL执行效率。"} + OptDMLRuleGroupingFromDiffTablesRewriteDesc = &i18n.Message{ID: "OptDMLRuleGroupingFromDiffTablesRewriteDesc", Other: "GROUPBY字段来自不同表"} + OptDMLRuleGroupingFromDiffTablesRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleGroupingFromDiffTablesRewriteAnnotation", Other: "如果分组字段来自不同的表,数据库优化器将没有办法利用索引的有序性来避免一次排序,如果存在等值条件,可以替换这些字段为来自同一张表的字段,以利用索引优化排序和提高查询效率。"} + OptDMLRuleJoinEliminationRewriteDesc = &i18n.Message{ID: "OptDMLRuleJoinEliminationRewriteDesc", Other: "表连接消除"} + OptDMLRuleJoinEliminationRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleJoinEliminationRewriteAnnotation", Other: "在不影响结果的情况下通过删除不必要的表连接来简化查询并提升性能,适用于查询仅涉及到主表主键列的场景。"} + OptDMLRuleLimitClausePushDownRewriteDesc = &i18n.Message{ID: "OptDMLRuleLimitClausePushDownRewriteDesc", Other: "LIMIT下推至UNION分支"} + OptDMLRuleLimitClausePushDownRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleLimitClausePushDownRewriteAnnotation", Other: "Limit子句下推优化通过尽可能的 “下压” Limit子句,提前过滤掉部分数据, 减少中间结果集的大小,减少后续计算需要处理的数据量, 以提高查询性能。"} + OptDMLRuleLimitClausePushDownRewriteParams1 = &i18n.Message{ID: "OptDMLRuleLimitClausePushDownRewriteParams1", Other: "OFFSET最大阈值"} + OptDMLRuleMaxMinAggRewriteDesc = &i18n.Message{ID: "OptDMLRuleMaxMinAggRewriteDesc", Other: "MAX/MIN子查询重写"} + OptDMLRuleMaxMinAggRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleMaxMinAggRewriteAnnotation", Other: "对于使用MAX/MIN的子查询,可以通过重写从而利用索引的有序来避免一次聚集运算。"} + OptDMLRuleMoveOrder2LeadingRewriteDesc = &i18n.Message{ID: "OptDMLRuleMoveOrder2LeadingRewriteDesc", Other: "ORDER子句重排序优化"} + OptDMLRuleMoveOrder2LeadingRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleMoveOrder2LeadingRewriteAnnotation", Other: "如果一个查询中既包含来自同一个表的排序字段也包含分组字段,但字段顺序不同,可以通过调整分组字段顺序,使其和排序字段顺序一致,这样数据库可以避免一次排序操作。"} + OptDMLRuleOrCond4SelectRewriteDesc = &i18n.Message{ID: "OptDMLRuleOrCond4SelectRewriteDesc", Other: "OR条件的SELECT重写"} + OptDMLRuleOrCond4SelectRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleOrCond4SelectRewriteAnnotation", Other: "如果使用OR条件的查询语句,数据库优化器有可能无法使用索引来完成查询,可以把查询语句重写为UNION或UNION ALL查询,以便使用索引提升查询性能。"} + OptDMLRuleOrCond4UpDeleteRewriteDesc = &i18n.Message{ID: "OptDMLRuleOrCond4UpDeleteRewriteDesc", Other: "OR条件的UPDELETE重写"} + OptDMLRuleOrCond4UpDeleteRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleOrCond4UpDeleteRewriteAnnotation", Other: "如果有使用OR条件的UPDATE或DELETE语句,数据库优化器有可能无法使用索引来完成操作,可以把它重写为多个DELETE语句,利用索引提升查询性能。"} + OptDMLRuleOrderEliminationInSubqueryRewriteDesc = &i18n.Message{ID: "OptDMLRuleOrderEliminationInSubqueryRewriteDesc", Other: "IN子查询中没有LIMIT的排序消除"} + OptDMLRuleOrderEliminationInSubqueryRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleOrderEliminationInSubqueryRewriteAnnotation", Other: "如果子查询没有LIMIT子句,那么子查询的排序操作就没有意义,可以将其删除而不影响最终的结果。"} + OptDMLRuleOrderingFromDiffTablesRewriteDesc = &i18n.Message{ID: "OptDMLRuleOrderingFromDiffTablesRewriteDesc", Other: "避免ORDERBY字段来自不同表"} + OptDMLRuleOrderingFromDiffTablesRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleOrderingFromDiffTablesRewriteAnnotation", Other: "当排序字段来自不同表时,若存在等值条件,可替换这些字段为来自同一张表的字段,利用索引避免额外排序,提升效率。"} + OptDMLRuleOuter2InnerConversionRewriteDesc = &i18n.Message{ID: "OptDMLRuleOuter2InnerConversionRewriteDesc", Other: "外连接优化"} + OptDMLRuleOuter2InnerConversionRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleOuter2InnerConversionRewriteAnnotation", Other: "外连接优化指的是满足一定条件(外表具有NULL拒绝条件)的外连接可以转化为内连接,从而可以让数据库优化器可以选择更优的执行计划,提升SQL查询的性能。"} + OptDMLRuleProjectionPushdownRewriteDesc = &i18n.Message{ID: "OptDMLRuleProjectionPushdownRewriteDesc", Other: "投影下推(PROJECTION PUSHDOWN)"} + OptDMLRuleProjectionPushdownRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleProjectionPushdownRewriteAnnotation", Other: "投影下推指的通过删除DT子查询中无意义的列(在外查询中没有使用),来减少IO和网络的代价,同时提升优化器在进行表访问的规划时,采用无需回表的优化选项的几率。"} + OptDMLRuleQualifierSubQueryRewriteDesc = &i18n.Message{ID: "OptDMLRuleQualifierSubQueryRewriteDesc", Other: "修饰子查询重写优化"} + OptDMLRuleQualifierSubQueryRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleQualifierSubQueryRewriteAnnotation", Other: "ANY/SOME/ALL修饰的子查询用于比较值关系,但效率低下因为它们逐行处理比较。通过查询重写可以提升这类子查询的执行效率。"} + OptDMLRuleQueryFoldingRewriteDesc = &i18n.Message{ID: "OptDMLRuleQueryFoldingRewriteDesc", Other: "查询折叠(QUERY FOLDING)"} + OptDMLRuleQueryFoldingRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleQueryFoldingRewriteAnnotation", Other: "查询折叠指的是把视图、CTE或是DT子查询展开,并与引用它的查询语句合并,来减少序列化中间结果集,或是触发更优的关于表连接规划的优化技术。"} + OptDMLRuleSATTCRewriteDesc = &i18n.Message{ID: "OptDMLRuleSATTCRewriteDesc", Other: "SATTC重写优化"} + OptDMLRuleSATTCRewriteAnnotation = &i18n.Message{ID: "OptDMLRuleSATTCRewriteAnnotation", Other: "SAT-TC重写优化通过分析和处理查询条件的逻辑关系,以发现矛盾、简化条件或推断新条件,从而帮助数据库优化器制定更高效的执行计划,提升SQL性能。"} + + OptOracle500Desc = &i18n.Message{ID: "OptOracle500Desc", Other: "NPE重写"} + OptOracle500Annotation = &i18n.Message{ID: "OptOracle500Annotation", Other: "SQL的NPE(Null Pointer Exception)问题是指在SQL查询中,当聚合列全为NULL时,SUM、AVG等聚合函数会返回NULL,这可能会导致后续的程序出现空指针异常。"} + OptOracle501Desc = &i18n.Message{ID: "OptOracle501Desc", Other: "ALL修饰的子查询重写"} + OptOracle501Annotation = &i18n.Message{ID: "OptOracle501Annotation", Other: "如果ALL子查询的结果中存在NULL,这个SQL永远返回为空。正确的写法应该是在子查询里加上非空限制,或使用max/min的写法。"} + OptOracle502Desc = &i18n.Message{ID: "OptOracle502Desc", Other: "COUNT标量子查询重写"} + OptOracle502Annotation = &i18n.Message{ID: "OptOracle502Annotation", Other: "对于使用COUNT标量子查询来进行判断是否存在,可以重写为EXISTS子查询,从而避免一次聚集运算。"} + OptOracle503Desc = &i18n.Message{ID: "OptOracle503Desc", Other: "无条件的DELETE建议重写为Truncate"} + OptOracle503Annotation = &i18n.Message{ID: "OptOracle503Annotation", Other: "TRUNCATE TABLE 比 DELETE 速度快,且使用的系统和事务日志资源少,同时TRUNCATE后表所占用的空间会被释放,而DELETE后需要手工执行OPTIMIZE才能释放表空间"} + OptOracle504Desc = &i18n.Message{ID: "OptOracle504Desc", Other: "隐式类型转换导致索引失效"} + OptOracle504Annotation = &i18n.Message{ID: "OptOracle504Annotation", Other: "WHERE条件中使用与过滤字段不一致的数据类型会引发隐式数据类型转换,导致查询有无法命中索引的风险,在高并发、大数据量的情况下,不走索引会使得数据库的查询性能严重下降"} + OptOracle505Desc = &i18n.Message{ID: "OptOracle505Desc", Other: "排序字段方向不同导致索引失效"} + OptOracle505Annotation = &i18n.Message{ID: "OptOracle505Annotation", Other: "ORDER BY 子句中的所有表达式需要按统一的 ASC 或 DESC 方向排序,才能利用索引来避免排序;如果ORDER BY 语句对多个不同条件使用不同方向的排序无法使用索引"} + OptOracle506Desc = &i18n.Message{ID: "OptOracle506Desc", Other: "索引列上的运算导致索引失效"} + OptOracle506Annotation = &i18n.Message{ID: "OptOracle506Annotation", Other: "在索引列上的运算将导致索引失效,容易造成全表扫描,产生严重的性能问题。所以需要尽量将索引列上的运算转换到常量端进行。"} + OptOracle507Desc = &i18n.Message{ID: "OptOracle507Desc", Other: "HAVING条件下推"} + OptOracle507Annotation = &i18n.Message{ID: "OptOracle507Annotation", Other: "从逻辑上,HAVING条件是在分组之后执行的,而WHERE子句上的条件可以在表访问的时候(索引访问),或是表访问之后、分组之前执行,这两种条件都比在分组之后执行代价要小。"} + OptOracle508Desc = &i18n.Message{ID: "OptOracle508Desc", Other: "禁止使用=NULL判断空值"} + OptOracle508Annotation = &i18n.Message{ID: "OptOracle508Annotation", Other: "= null并不能判断表达式为空,= null总是被判断为假。判断表达式为空应该使用is null。"} + OptOracle509Desc = &i18n.Message{ID: "OptOracle509Desc", Other: "IN子查询优化"} + OptOracle509Annotation = &i18n.Message{ID: "OptOracle509Annotation", Other: "IN子查询是指符合下面形式的子查询,IN子查询可以改写成等价的相关EXISTS子查询或是内连接,从而可以产生一个新的过滤条件。"} + OptOracle510Desc = &i18n.Message{ID: "OptOracle510Desc", Other: "IN可空子查询可能导致结果集不符合预期"} + OptOracle510Annotation = &i18n.Message{ID: "OptOracle510Annotation", Other: "查询条件永远非真,这将导致查询无匹配到的结果"} + OptOracle511Desc = &i18n.Message{ID: "OptOracle511Desc", Other: "避免使用没有通配符的 LIKE 查询"} + OptOracle511Annotation = &i18n.Message{ID: "OptOracle511Annotation", Other: "不包含通配符的LIKE 查询逻辑上与等值查询相同,建议使用等值查询替代。而且不包含通配符的LIKE 查询逻辑通常是由于开发者错误导致的,可能不符合其期望的业务逻辑实现"} + OptOracle512Desc = &i18n.Message{ID: "OptOracle512Desc", Other: "建议使用'<>'代替'!='"} + OptOracle512Annotation = &i18n.Message{ID: "OptOracle512Annotation", Other: "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符"} + OptOracle513Desc = &i18n.Message{ID: "OptOracle513Desc", Other: "子查询中的DISTINCT消除"} + OptOracle513Annotation = &i18n.Message{ID: "OptOracle513Annotation", Other: "对于仅进行存在性测试的子查询,如果子查询包含DISTINCT通常可以删除,以避免一次去重操作。"} + OptOracle514Desc = &i18n.Message{ID: "OptOracle514Desc", Other: "EXISTS查询转换为表连接"} + OptOracle514Annotation = &i18n.Message{ID: "OptOracle514Annotation", Other: "EXISTS子查询可以在适当情况下转换为JOIN来优化查询,提高数据库处理效率和性能。"} + OptOracle515Desc = &i18n.Message{ID: "OptOracle515Desc", Other: "过滤谓词下推"} + OptOracle515Annotation = &i18n.Message{ID: "OptOracle515Annotation", Other: "滤条件下推(FPPD)是一种通过将过滤条件提前应用于内部查询块,以减少数据处理量并提升SQL执行效率。"} + OptOracle516Desc = &i18n.Message{ID: "OptOracle516Desc", Other: "GROUPBY字段来自不同表"} + OptOracle516Annotation = &i18n.Message{ID: "OptOracle516Annotation", Other: "如果分组字段来自不同的表,数据库优化器将没有办法利用索引的有序性来避免一次排序,如果存在等值条件,可以替换这些字段为来自同一张表的字段,以利用索引优化排序和提高查询效率。"} + OptOracle517Desc = &i18n.Message{ID: "OptOracle517Desc", Other: "表连接消除"} + OptOracle517Annotation = &i18n.Message{ID: "OptOracle517Annotation", Other: "在不影响结果的情况下通过删除不必要的表连接来简化查询并提升性能,适用于查询仅涉及到主表主键列的场景。"} + OptOracle518Desc = &i18n.Message{ID: "OptOracle518Desc", Other: "MAX/MIN子查询重写"} + OptOracle518Annotation = &i18n.Message{ID: "OptOracle518Annotation", Other: "对于使用MAX/MIN的子查询,可以通过重写从而利用索引的有序来避免一次聚集运算。"} + OptOracle519Desc = &i18n.Message{ID: "OptOracle519Desc", Other: "ORDER子句重排序优化"} + OptOracle519Annotation = &i18n.Message{ID: "OptOracle519Annotation", Other: "如果一个查询中既包含来自同一个表的排序字段也包含分组字段,但字段顺序不同,可以通过调整分组字段顺序,使其和排序字段顺序一致,这样数据库可以避免一次排序操作。"} + OptOracle520Desc = &i18n.Message{ID: "OptOracle520Desc", Other: "OR条件的SELECT重写"} + OptOracle520Annotation = &i18n.Message{ID: "OptOracle520Annotation", Other: "如果使用OR条件的查询语句,数据库优化器有可能无法使用索引来完成查询,可以把查询语句重写为UNION或UNION ALL查询,以便使用索引提升查询性能。"} + OptOracle521Desc = &i18n.Message{ID: "OptOracle521Desc", Other: "OR条件的UPDELETE重写"} + OptOracle521Annotation = &i18n.Message{ID: "OptOracle521Annotation", Other: "如果有使用OR条件的UPDATE或DELETE语句,数据库优化器有可能无法使用索引来完成操作,可以把它重写为多个DELETE语句,利用索引提升查询性能。"} + OptOracle522Desc = &i18n.Message{ID: "OptOracle522Desc", Other: "避免ORDERBY字段来自不同表"} + OptOracle522Annotation = &i18n.Message{ID: "OptOracle522Annotation", Other: "当排序字段来自不同表时,若存在等值条件,可替换这些字段为来自同一张表的字段,利用索引避免额外排序,提升效率。"} + OptOracle523Desc = &i18n.Message{ID: "OptOracle523Desc", Other: "外连接优化"} + OptOracle523Annotation = &i18n.Message{ID: "OptOracle523Annotation", Other: "外连接优化指的是满足一定条件(外表具有NULL拒绝条件)的外连接可以转化为内连接,从而可以让数据库优化器可以选择更优的执行计划,提升SQL查询的性能。"} + OptOracle524Desc = &i18n.Message{ID: "OptOracle524Desc", Other: "投影下推(PROJECTION PUSHDOWN)"} + OptOracle524Annotation = &i18n.Message{ID: "OptOracle524Annotation", Other: "投影下推指的通过删除DT子查询中无意义的列(在外查询中没有使用),来减少IO和网络的代价,同时提升优化器在进行表访问的规划时,采用无需回表的优化选项的几率。"} + OptOracle525Desc = &i18n.Message{ID: "OptOracle525Desc", Other: "修饰子查询重写优化"} + OptOracle525Annotation = &i18n.Message{ID: "OptOracle525Annotation", Other: "ANY/SOME/ALL修饰的子查询用于比较值关系,但效率低下因为它们逐行处理比较。通过查询重写可以提升这类子查询的执行效率。"} + OptOracle526Desc = &i18n.Message{ID: "OptOracle526Desc", Other: "查询折叠(QUERY FOLDING)"} + OptOracle526Annotation = &i18n.Message{ID: "OptOracle526Annotation", Other: "查询折叠指的是把视图、CTE或是DT子查询展开,并与引用它的查询语句合并,来减少序列化中间结果集,或是触发更优的关于表连接规划的优化技术。"} + OptOracle527Desc = &i18n.Message{ID: "OptOracle527Desc", Other: "SATTC重写优化"} + OptOracle527Annotation = &i18n.Message{ID: "OptOracle527Annotation", Other: "SAT-TC重写优化通过分析和处理查询条件的逻辑关系,以发现矛盾、简化条件或推断新条件,从而帮助数据库优化器制定更高效的执行计划,提升SQL性能。"} +) diff --git a/sqle/driver/mysql/pt_osc.go b/sqle/driver/mysql/pt_osc.go index 9385393817..afb24a3a0a 100644 --- a/sqle/driver/mysql/pt_osc.go +++ b/sqle/driver/mysql/pt_osc.go @@ -9,6 +9,10 @@ import ( "sync" "text/template" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" + driverV2 "github.com/actiontech/sqle/sqle/driver/v2" + "github.com/actiontech/sqle/sqle/locale" + "github.com/actiontech/sqle/sqle/driver/mysql/util" "github.com/pingcap/parser/ast" ) @@ -30,48 +34,41 @@ func LoadPtTemplateFromFile(fileName string) error { return nil } -const ( - PTOSCNoUniqueIndexOrPrimaryKey = "至少要包含主键或者唯一键索引才能使用 pt-online-schema-change" - PTOSCAvoidUniqueIndex = "添加唯一键使用 pt-online-schema-change,可能会导致数据丢失,在数据迁移到新表时使用了insert ignore" - PTOSCAvoidRenameTable = "pt-online-schema-change 不支持使用rename table 来重命名表" - PTOSCAvoidNoDefaultValueOnNotNullColumn = "非空字段必须设置默认值,不然 pt-online-schema-change 会执行失败" -) - // generateOSCCommandLine generate pt-online-schema-change command-line statement; // see https://www.percona.com/doc/percona-toolkit/LATEST/pt-online-schema-change.html. -func (i *MysqlDriverImpl) generateOSCCommandLine(node ast.Node) (string, error) { +func (i *MysqlDriverImpl) generateOSCCommandLine(node ast.Node) (driverV2.I18nStr, error) { if i.cnf.DDLOSCMinSize < 0 { - return "", nil + return nil, nil } stmt, ok := node.(*ast.AlterTableStmt) if !ok { - return "", nil + return nil, nil } tableSize, err := i.Ctx.GetTableSize(stmt.Table) if err != nil { - return "", err + return nil, err } if int64(tableSize) < i.cnf.DDLOSCMinSize { - return "", err + return nil, err } createTableStmt, exist, err := i.Ctx.GetCreateTableStmt(stmt.Table) if !exist || err != nil { - return "", err + return nil, err } // In almost all cases a PRIMARY KEY or UNIQUE INDEX needs to be present in the table. // This is necessary because the tool creates a DELETE trigger to keep the new table // updated while the process is running. if !util.HasPrimaryKey(createTableStmt) && !util.HasUniqIndex(createTableStmt) { - return PTOSCNoUniqueIndexOrPrimaryKey, nil + return plocale.ShouldLocalizeAll(plocale.PTOSCNoUniqueIndexOrPrimaryKey), nil } // The RENAME clause cannot be used to rename the table. if len(util.GetAlterTableSpecByTp(stmt.Specs, ast.AlterTableRenameTable)) > 0 { - return PTOSCAvoidRenameTable, nil + return plocale.ShouldLocalizeAll(plocale.PTOSCAvoidRenameTable), nil } // If you add a column without a default value and make it NOT NULL, the tool will fail, @@ -80,7 +77,7 @@ func (i *MysqlDriverImpl) generateOSCCommandLine(node ast.Node) (string, error) for _, col := range spec.NewColumns { if util.HasOneInOptions(col.Options, ast.ColumnOptionNotNull) { if !util.HasOneInOptions(col.Options, ast.ColumnOptionDefaultValue) { - return PTOSCAvoidNoDefaultValueOnNotNullColumn, nil + return plocale.ShouldLocalizeAll(plocale.PTOSCAvoidNoDefaultValueOnNotNullColumn), nil } } } @@ -92,7 +89,7 @@ func (i *MysqlDriverImpl) generateOSCCommandLine(node ast.Node) (string, error) for _, spec := range util.GetAlterTableSpecByTp(stmt.Specs, ast.AlterTableAddConstraint) { switch spec.Constraint.Tp { case ast.ConstraintUniq: - return PTOSCAvoidUniqueIndex, nil + return plocale.ShouldLocalizeAll(plocale.PTOSCAvoidUniqueIndex), nil } } @@ -116,7 +113,7 @@ func (i *MysqlDriverImpl) generateOSCCommandLine(node ast.Node) (string, error) } if len(changes) <= 0 { - return "", nil + return nil, nil } ptTemplateMutex.Lock() @@ -124,9 +121,9 @@ func (i *MysqlDriverImpl) generateOSCCommandLine(node ast.Node) (string, error) ptTemplateMutex.Unlock() tp, err := template.New("tp").Parse(text) if err != nil { - return "", err + return nil, err } - buff := bytes.NewBufferString("") + buff := bytes.NewBufferString("[osc]") err = tp.Execute(buff, map[string]interface{}{ "Alter": strings.Join(changes, ","), "Host": i.inst.Host, @@ -135,5 +132,5 @@ func (i *MysqlDriverImpl) generateOSCCommandLine(node ast.Node) (string, error) "Schema": i.Ctx.GetSchemaName(stmt.Table), "Table": stmt.Table.Name.String(), }) - return buff.String(), err + return map[string]string{locale.DefaultLang.String(): buff.String()}, err } diff --git a/sqle/driver/mysql/pt_osc_test.go b/sqle/driver/mysql/pt_osc_test.go index fe553d92f7..371ca3036d 100644 --- a/sqle/driver/mysql/pt_osc_test.go +++ b/sqle/driver/mysql/pt_osc_test.go @@ -4,28 +4,30 @@ import ( "fmt" "testing" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" "github.com/actiontech/sqle/sqle/driver/mysql/util" + "github.com/actiontech/sqle/sqle/locale" "github.com/stretchr/testify/assert" ) func TestPTOSC(t *testing.T) { - expect := "pt-online-schema-change D=exist_db,t=%s --alter='%s' --host=127.0.0.1 --user=root --port=3306 --ask-pass --print --execute" + expect := "[osc]pt-online-schema-change D=exist_db,t=%s --alter='%s' --host=127.0.0.1 --user=root --port=3306 --ask-pass --print --execute" runOSCCase(t, "add column not null no default", "alter table exist_tb_1 add column v3 varchar(255) NOT NULL;", - PTOSCAvoidNoDefaultValueOnNotNullColumn) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PTOSCAvoidNoDefaultValueOnNotNullColumn)) runOSCCase(t, "not pk and unique key", "alter table exist_tb_3 add column v3 varchar(255);", - PTOSCNoUniqueIndexOrPrimaryKey) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PTOSCNoUniqueIndexOrPrimaryKey)) runOSCCase(t, "rename table", "alter table exist_tb_1 rename as not_exist_tb_1;", - PTOSCAvoidRenameTable) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PTOSCAvoidRenameTable)) runOSCCase(t, "add unique index", "alter table exist_tb_1 add unique index u_1 (v1) ", - PTOSCAvoidUniqueIndex) + plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.PTOSCAvoidUniqueIndex)) runOSCCase(t, "add column ok", "alter table exist_tb_1 add column v3 varchar(255);", @@ -57,5 +59,5 @@ func runOSCCase(t *testing.T, desc string, sql, expect string) { t.Error(err) return } - assert.Equal(t, expect, actual, desc) + assert.Equal(t, expect, actual[locale.DefaultLang.String()], desc) } diff --git a/sqle/driver/mysql/rollback.go b/sqle/driver/mysql/rollback.go index 9d4a7def67..dce60514bc 100644 --- a/sqle/driver/mysql/rollback.go +++ b/sqle/driver/mysql/rollback.go @@ -7,6 +7,9 @@ import ( "strconv" "strings" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" + driverV2 "github.com/actiontech/sqle/sqle/driver/v2" + "github.com/actiontech/sqle/sqle/driver/mysql/util" "github.com/actiontech/sqle/sqle/errors" @@ -15,17 +18,17 @@ import ( parserMysql "github.com/pingcap/parser/mysql" ) -func (i *MysqlDriverImpl) GenerateRollbackSql(node ast.Node) (string, string, error) { +func (i *MysqlDriverImpl) GenerateRollbackSql(node ast.Node) (string, driverV2.I18nStr, error) { switch node.(type) { case ast.DDLNode: return i.GenerateDDLStmtRollbackSql(node) case ast.DMLNode: return i.GenerateDMLStmtRollbackSql(node) } - return "", "", nil + return "", nil, nil } -func (i *MysqlDriverImpl) GenerateDDLStmtRollbackSql(node ast.Node) (rollbackSql, unableRollbackReason string, err error) { +func (i *MysqlDriverImpl) GenerateDDLStmtRollbackSql(node ast.Node) (rollbackSql string, unableRollbackReason driverV2.I18nStr, err error) { switch stmt := node.(type) { case *ast.AlterTableStmt: rollbackSql, unableRollbackReason, err = i.generateAlterTableRollbackSql(stmt) @@ -43,22 +46,22 @@ func (i *MysqlDriverImpl) GenerateDDLStmtRollbackSql(node ast.Node) (rollbackSql return rollbackSql, unableRollbackReason, err } -func (i *MysqlDriverImpl) GenerateDMLStmtRollbackSql(node ast.Node) (rollbackSql, unableRollbackReason string, err error) { +func (i *MysqlDriverImpl) GenerateDMLStmtRollbackSql(node ast.Node) (rollbackSql string, unableRollbackReason driverV2.I18nStr, err error) { // MysqlDriverImpl may skip initialized cnf when Audited SQLs in whitelist. if i.cnf == nil || i.cnf.DMLRollbackMaxRows < 0 { - return "", "", nil + return "", nil, nil } paramMarkerChecker := util.ParamMarkerChecker{} node.Accept(¶mMarkerChecker) if paramMarkerChecker.HasParamMarker { - return "", NotSupportParamMarkerStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportParamMarkerStatementRollback), nil } hasVarChecker := util.HasVarChecker{} node.Accept(&hasVarChecker) if hasVarChecker.HasVar { - return "", NotSupportHasVariableRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportHasVariableRollback), nil } switch stmt := node.(type) { @@ -72,26 +75,14 @@ func (i *MysqlDriverImpl) GenerateDMLStmtRollbackSql(node ast.Node) (rollbackSql return } -const ( - NotSupportStatementRollback = "暂不支持回滚该类型的语句" - NotSupportMultiTableStatementRollback = "暂不支持回滚多表的 DML 语句" - NotSupportOnDuplicatStatementRollback = "暂不支持回滚 ON DUPLICATE 语句" - NotSupportSubQueryStatementRollback = "暂不支持回滚带子查询的语句" - NotSupportNoPrimaryKeyTableRollback = "不支持回滚没有主键的表的DML语句" - NotSupportInsertWithoutPrimaryKeyRollback = "不支持回滚 INSERT 没有指定主键的语句" - NotSupportParamMarkerStatementRollback = "不支持回滚包含指纹的语句" - NotSupportHasVariableRollback = "不支持回滚包含变量的 DML 语句" - NotSupportExceedMaxRowsRollback = "预计影响行数超过配置的最大值,不生成回滚语句" -) - // generateAlterTableRollbackSql generate alter table SQL for alter table. -func (i *MysqlDriverImpl) generateAlterTableRollbackSql(stmt *ast.AlterTableStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateAlterTableRollbackSql(stmt *ast.AlterTableStmt) (string, driverV2.I18nStr, error) { schemaName := i.Ctx.GetSchemaName(stmt.Table) tableName := stmt.Table.Name.String() createTableStmt, exist, err := i.Ctx.GetCreateTableStmt(stmt.Table) if err != nil || !exist { - return "", "", err + return "", nil, err } rollbackStmt := &ast.AlterTableStmt{ Table: util.NewTableName(schemaName, tableName), @@ -274,53 +265,53 @@ func (i *MysqlDriverImpl) generateAlterTableRollbackSql(stmt *ast.AlterTableStmt } rollbackSql := util.AlterTableStmtFormat(rollbackStmt) - return rollbackSql, "", nil + return rollbackSql, nil, nil } // generateCreateSchemaRollbackSql generate drop database SQL for create database. -func (i *MysqlDriverImpl) generateCreateSchemaRollbackSql(stmt *ast.CreateDatabaseStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateCreateSchemaRollbackSql(stmt *ast.CreateDatabaseStmt) (string, driverV2.I18nStr, error) { schemaName := stmt.Name schemaExist, err := i.Ctx.IsSchemaExist(schemaName) if err != nil { - return "", "", err + return "", nil, err } if schemaExist { - return "", "", err + return "", nil, err } rollbackSql := fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", schemaName) - return rollbackSql, "", nil + return rollbackSql, nil, nil } // generateCreateTableRollbackSql generate drop table SQL for create table. -func (i *MysqlDriverImpl) generateCreateTableRollbackSql(stmt *ast.CreateTableStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateCreateTableRollbackSql(stmt *ast.CreateTableStmt) (string, driverV2.I18nStr, error) { schemaExist, err := i.Ctx.IsSchemaExist(i.Ctx.GetSchemaName(stmt.Table)) if err != nil { - return "", "", err + return "", nil, err } // if schema not exist, create table will be failed. don't rollback if !schemaExist { - return "", "", nil + return "", nil, nil } tableExist, err := i.Ctx.IsTableExist(stmt.Table) if err != nil { - return "", "", err + return "", nil, err } if tableExist { - return "", "", nil + return "", nil, nil } rollbackSql := fmt.Sprintf("DROP TABLE IF EXISTS %s", i.getTableNameWithQuote(stmt.Table)) - return rollbackSql, "", nil + return rollbackSql, nil, nil } // generateDropTableRollbackSql generate create table SQL for drop table. -func (i *MysqlDriverImpl) generateDropTableRollbackSql(stmt *ast.DropTableStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateDropTableRollbackSql(stmt *ast.DropTableStmt) (string, driverV2.I18nStr, error) { rollbackSql := "" for _, table := range stmt.Tables { stmt, tableExist, err := i.Ctx.GetCreateTableStmt(table) if err != nil { - return "", "", err + return "", nil, err } // if table not exist, can not rollback it. if !tableExist { @@ -328,24 +319,24 @@ func (i *MysqlDriverImpl) generateDropTableRollbackSql(stmt *ast.DropTableStmt) } rollbackSql += stmt.Text() + ";\n" } - return rollbackSql, "", nil + return rollbackSql, nil, nil } // generateCreateIndexRollbackSql generate drop index SQL for create index. -func (i *MysqlDriverImpl) generateCreateIndexRollbackSql(stmt *ast.CreateIndexStmt) (string, string, error) { - return fmt.Sprintf("DROP INDEX `%s` ON %s", stmt.IndexName, i.getTableNameWithQuote(stmt.Table)), "", nil +func (i *MysqlDriverImpl) generateCreateIndexRollbackSql(stmt *ast.CreateIndexStmt) (string, driverV2.I18nStr, error) { + return fmt.Sprintf("DROP INDEX `%s` ON %s", stmt.IndexName, i.getTableNameWithQuote(stmt.Table)), nil, nil } // generateDropIndexRollbackSql generate create index SQL for drop index. -func (i *MysqlDriverImpl) generateDropIndexRollbackSql(stmt *ast.DropIndexStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateDropIndexRollbackSql(stmt *ast.DropIndexStmt) (string, driverV2.I18nStr, error) { indexName := stmt.IndexName createTableStmt, tableExist, err := i.Ctx.GetCreateTableStmt(stmt.Table) if err != nil { - return "", "", err + return "", nil, err } // if table not exist, don't rollback if !tableExist { - return "", "", nil + return "", nil, nil } rollbackSql := "" for _, constraint := range createTableStmt.Constraints { @@ -359,7 +350,7 @@ func (i *MysqlDriverImpl) generateDropIndexRollbackSql(stmt *ast.DropIndexStmt) sql = fmt.Sprintf("CREATE UNIQUE INDEX `%s` ON %s", indexName, i.getTableNameWithQuote(stmt.Table)) default: - return "", NotSupportStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportStatementRollback), nil } if constraint.Option != nil { sql = fmt.Sprintf("%s %s", sql, util.IndexOptionFormat(constraint.Option)) @@ -367,34 +358,34 @@ func (i *MysqlDriverImpl) generateDropIndexRollbackSql(stmt *ast.DropIndexStmt) rollbackSql = sql } } - return rollbackSql, "", nil + return rollbackSql, nil, nil } // generateInsertRollbackSql generate delete SQL for insert. -func (i *MysqlDriverImpl) generateInsertRollbackSql(stmt *ast.InsertStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateInsertRollbackSql(stmt *ast.InsertStmt) (string, driverV2.I18nStr, error) { tables := util.GetTables(stmt.Table.TableRefs) // table just has one in insert stmt. if len(tables) != 1 { - return "", NotSupportMultiTableStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportMultiTableStatementRollback), nil } if stmt.OnDuplicate != nil { - return "", NotSupportOnDuplicatStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportOnDuplicatStatementRollback), nil } table := tables[0] createTableStmt, exist, err := i.Ctx.GetCreateTableStmt(table) if err != nil { - return "", "", err + return "", nil, err } // if table not exist, insert will failed. if !exist { - return "", "", nil + return "", nil, nil } pkColumnsName, hasPk, err := i.getPrimaryKey(createTableStmt) if err != nil { - return "", "", err + return "", nil, err } if !hasPk { - return "", NotSupportNoPrimaryKeyTableRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportNoPrimaryKeyTableRollback), nil } rollbackSql := "" @@ -403,7 +394,7 @@ func (i *MysqlDriverImpl) generateInsertRollbackSql(stmt *ast.InsertStmt) (strin // match "insert into table_name value (v1,...)" if stmt.Lists != nil { if int64(len(stmt.Lists)) > i.cnf.DMLRollbackMaxRows { - return "", NotSupportExceedMaxRowsRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportExceedMaxRowsRollback), nil } columnsName := []string{} if stmt.Columns != nil { @@ -419,7 +410,7 @@ func (i *MysqlDriverImpl) generateInsertRollbackSql(stmt *ast.InsertStmt) (strin where := []string{} // mysql will throw error: 1136 (21S01): Column count doesn't match value count if len(columnsName) != len(value) { - return "", "", nil + return "", nil, nil } for n, name := range columnsName { _, isPk := pkColumnsName[name] @@ -428,18 +419,18 @@ func (i *MysqlDriverImpl) generateInsertRollbackSql(stmt *ast.InsertStmt) (strin } } if len(where) != len(pkColumnsName) { - return "", NotSupportInsertWithoutPrimaryKeyRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportInsertWithoutPrimaryKeyRollback), nil } rollbackSql += fmt.Sprintf("DELETE FROM %s WHERE %s;\n", i.getTableNameWithQuote(table), strings.Join(where, " AND ")) } - return rollbackSql, "", nil + return rollbackSql, nil, nil } // match "insert into table_name set col_name = value1, ..." if stmt.Setlist != nil { if 1 > i.cnf.DMLRollbackMaxRows { - return "", NotSupportExceedMaxRowsRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportExceedMaxRowsRollback), nil } where := []string{} for _, setExpr := range stmt.Setlist { @@ -450,12 +441,12 @@ func (i *MysqlDriverImpl) generateInsertRollbackSql(stmt *ast.InsertStmt) (strin } } if len(where) != len(pkColumnsName) { - return "", "", nil + return "", nil, nil } rollbackSql = fmt.Sprintf("DELETE FROM %s WHERE %s;\n", i.getTableNameWithQuote(table), strings.Join(where, " AND ")) } - return rollbackSql, "", nil + return rollbackSql, nil, nil } // 将二进制字段转化为十六进制字段 @@ -465,49 +456,49 @@ func getHexStrFromBytesStr(byteStr string) string { } // generateDeleteRollbackSql generate insert SQL for delete. -func (i *MysqlDriverImpl) generateDeleteRollbackSql(stmt *ast.DeleteStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateDeleteRollbackSql(stmt *ast.DeleteStmt) (string, driverV2.I18nStr, error) { // not support multi-table syntax if stmt.IsMultiTable { i.Logger().Infof("not support generate rollback sql with multi-delete statement") - return "", NotSupportMultiTableStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportMultiTableStatementRollback), nil } // sub query statement if util.WhereStmtHasSubQuery(stmt.Where) { i.Logger().Infof("not support generate rollback sql with sub query") - return "", NotSupportSubQueryStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportSubQueryStatementRollback), nil } var err error tables := util.GetTables(stmt.TableRefs.TableRefs) table := tables[0] createTableStmt, exist, err := i.Ctx.GetCreateTableStmt(table) if err != nil || !exist { - return "", "", err + return "", nil, err } _, hasPk, err := i.getPrimaryKey(createTableStmt) if err != nil { - return "", "", err + return "", nil, err } if !hasPk { - return "", NotSupportNoPrimaryKeyTableRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportNoPrimaryKeyTableRollback), nil } var max = i.cnf.DMLRollbackMaxRows limit, err := util.GetLimitCount(stmt.Limit, max+1) if err != nil { - return "", "", err + return "", nil, err } if limit > max { count, err := i.getRecordCount(table, "", stmt.Where, stmt.Order, limit) if err != nil { - return "", "", err + return "", nil, err } if count > max { - return "", NotSupportExceedMaxRowsRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportExceedMaxRowsRollback), nil } } records, err := i.getRecords(table, "", stmt.Where, stmt.Order, limit) if err != nil { - return "", "", err + return "", nil, err } values := []string{} @@ -519,7 +510,7 @@ func (i *MysqlDriverImpl) generateDeleteRollbackSql(stmt *ast.DeleteStmt) (strin } for _, record := range records { if len(record) != len(columnsName) { - return "", "", nil + return "", nil, nil } vs := []string{} for _, name := range columnsName { @@ -543,21 +534,21 @@ func (i *MysqlDriverImpl) generateDeleteRollbackSql(stmt *ast.DeleteStmt) (strin i.getTableNameWithQuote(table), strings.Join(columnsName, "`, `"), strings.Join(values, ", ")) } - return rollbackSql, "", nil + return rollbackSql, nil, nil } // generateUpdateRollbackSql generate update SQL for update. -func (i *MysqlDriverImpl) generateUpdateRollbackSql(stmt *ast.UpdateStmt) (string, string, error) { +func (i *MysqlDriverImpl) generateUpdateRollbackSql(stmt *ast.UpdateStmt) (string, driverV2.I18nStr, error) { tableSources := util.GetTableSources(stmt.TableRefs.TableRefs) // multi table syntax if len(tableSources) != 1 { i.Logger().Infof("not support generate rollback sql with multi-update statement") - return "", NotSupportMultiTableStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportMultiTableStatementRollback), nil } // sub query statement if util.WhereStmtHasSubQuery(stmt.Where) { i.Logger().Infof("not support generate rollback sql with sub query") - return "", NotSupportSubQueryStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportSubQueryStatementRollback), nil } var ( table *ast.TableName @@ -570,39 +561,39 @@ func (i *MysqlDriverImpl) generateUpdateRollbackSql(stmt *ast.UpdateStmt) (strin tableAlias = tableSource.AsName.String() case *ast.SelectStmt, *ast.UnionStmt: i.Logger().Infof("not support generate rollback sql with update-select statement") - return "", NotSupportSubQueryStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportSubQueryStatementRollback), nil default: - return "", NotSupportStatementRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportStatementRollback), nil } createTableStmt, exist, err := i.Ctx.GetCreateTableStmt(table) if err != nil || !exist { - return "", "", err + return "", nil, err } pkColumnsName, hasPk, err := i.getPrimaryKey(createTableStmt) if err != nil { - return "", "", err + return "", nil, err } if !hasPk { - return "", NotSupportNoPrimaryKeyTableRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportNoPrimaryKeyTableRollback), nil } var max = i.cnf.DMLRollbackMaxRows limit, err := util.GetLimitCount(stmt.Limit, max+1) if err != nil { - return "", "", err + return "", nil, err } if limit > max { count, err := i.getRecordCount(table, tableAlias, stmt.Where, stmt.Order, limit) if err != nil { - return "", "", err + return "", nil, err } if count > max { - return "", NotSupportExceedMaxRowsRollback, nil + return "", plocale.ShouldLocalizeAll(plocale.NotSupportExceedMaxRowsRollback), nil } } records, err := i.getRecords(table, tableAlias, stmt.Where, stmt.Order, limit) if err != nil { - return "", "", err + return "", nil, err } rollbackSql := "" colNameDefMap := make(map[string]*ast.ColumnDef) @@ -611,7 +602,7 @@ func (i *MysqlDriverImpl) generateUpdateRollbackSql(stmt *ast.UpdateStmt) (strin } for _, record := range records { if len(record) != len(colNameDefMap) { - return "", "", nil + return "", nil, nil } where := []string{} value := []string{} @@ -657,7 +648,7 @@ func (i *MysqlDriverImpl) generateUpdateRollbackSql(stmt *ast.UpdateStmt) (strin rollbackSql += fmt.Sprintf("UPDATE %s SET %s WHERE %s;", i.getTableNameWithQuote(table), strings.Join(value, ", "), strings.Join(where, " AND ")) } - return rollbackSql, "", nil + return rollbackSql, nil, nil } // getRecords select all data which will be update or delete. diff --git a/sqle/driver/mysql/rollback_test.go b/sqle/driver/mysql/rollback_test.go index 0149ef417d..4269378d99 100644 --- a/sqle/driver/mysql/rollback_test.go +++ b/sqle/driver/mysql/rollback_test.go @@ -4,6 +4,9 @@ import ( "context" "testing" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" + "github.com/actiontech/sqle/sqle/locale" + "github.com/actiontech/sqle/sqle/driver/mysql/util" "github.com/stretchr/testify/assert" ) @@ -38,7 +41,7 @@ func TestRollbackWithVariable(t *testing.T) { i := DefaultMysqlInspect() rollbackSQL, reason, err := i.GenRollbackSQL(context.Background(), sql) assert.NoError(t, err) - assert.Equal(t, NotSupportHasVariableRollback, reason) + assert.Equal(t, plocale.ShouldLocalizeMessage(plocale.DefaultLocalizer, plocale.NotSupportHasVariableRollback), reason[locale.DefaultLang.String()]) assert.Equal(t, "", rollbackSQL) } } diff --git a/sqle/driver/mysql/rule/rule.go b/sqle/driver/mysql/rule/rule.go index afb8e5ccd2..650ddab497 100644 --- a/sqle/driver/mysql/rule/rule.go +++ b/sqle/driver/mysql/rule/rule.go @@ -13,12 +13,15 @@ import ( "github.com/actiontech/sqle/sqle/driver/mysql/executor" "github.com/actiontech/sqle/sqle/driver/mysql/keyword" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" "github.com/actiontech/sqle/sqle/driver/mysql/session" "github.com/actiontech/sqle/sqle/driver/mysql/util" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" + "github.com/actiontech/sqle/sqle/pkg/params" "github.com/actiontech/sqle/sqle/utils" - + "github.com/nicksnyder/go-i18n/v2/i18n" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/opcode" @@ -29,6 +32,8 @@ import ( ) // rule type + +// todo i18n del after rewrite rules localized const ( RuleTypeGlobalConfig = "全局配置" RuleTypeNamingConvention = "命名规范" @@ -226,7 +231,42 @@ type RuleHandlerFunc func(input *RuleHandlerInput) error type RuleHandler struct { Rule driverV2.Rule - Message string + Message *i18n.Message + Func RuleHandlerFunc + AllowOffline bool + NotAllowOfflineStmts []ast.Node + // 开始事后审核时将会跳过这个值为ture的规则 + OnlyAuditNotExecutedSQL bool + // 事后审核时将会跳过下方列表中的类型 + NotSupportExecutedSQLAuditStmts []ast.Node +} + +type SourceEnum struct { + Value string `json:"value"` + Desc *i18n.Message `json:"desc"` +} + +type SourceParam struct { + Key string `json:"key"` + Value string `json:"value"` + Desc *i18n.Message `json:"desc"` + Type params.ParamType `json:"type"` + Enums []SourceEnum `json:"enums"` +} + +type SourceRule struct { + Name string + Desc *i18n.Message + Annotation *i18n.Message + Category *i18n.Message + Level driverV2.RuleLevel + Params []*SourceParam + Knowledge driverV2.RuleKnowledge +} + +type SourceHandler struct { + Rule SourceRule + Message *i18n.Message Func RuleHandlerFunc AllowOffline bool NotAllowOfflineStmts []ast.Node @@ -236,6 +276,62 @@ type RuleHandler struct { NotSupportExecutedSQLAuditStmts []ast.Node } +// 通过 source* 生成多语言版本的 RuleHandler +func generateRuleHandlers(shs []*SourceHandler) []RuleHandler { + rhs := make([]RuleHandler, len(shs)) + for k, v := range shs { + rhs[k] = RuleHandler{ + Rule: *ConvertSourceRule(&v.Rule), + Message: v.Message, + Func: v.Func, + AllowOffline: v.AllowOffline, + NotAllowOfflineStmts: v.NotAllowOfflineStmts, + OnlyAuditNotExecutedSQL: v.OnlyAuditNotExecutedSQL, + NotSupportExecutedSQLAuditStmts: v.NotSupportExecutedSQLAuditStmts, + } + } + return rhs +} + +func ConvertSourceRule(sr *SourceRule) *driverV2.Rule { + r := &driverV2.Rule{ + Name: sr.Name, + Level: sr.Level, + Params: nil, + I18nRuleInfo: genAllI18nRuleInfo(sr), + } + if info, exist := r.I18nRuleInfo[locale.DefaultLang.String()]; exist { + r.Params = info.Params + } + return r +} + +func genAllI18nRuleInfo(sr *SourceRule) map[string]*driverV2.RuleInfo { + result := make(map[string]*driverV2.RuleInfo, len(plocale.AllLocalizers)) + for langTag, localizer := range plocale.AllLocalizers { + newInfo := &driverV2.RuleInfo{ + Desc: plocale.ShouldLocalizeMessage(localizer, sr.Desc), + Annotation: plocale.ShouldLocalizeMessage(localizer, sr.Annotation), + Category: plocale.ShouldLocalizeMessage(localizer, sr.Category), + //Level: sr.Level, + Params: make(params.Params, len(sr.Params)), + Knowledge: driverV2.RuleKnowledge{Content: sr.Knowledge.Content}, //todo i18n Knowledge + } + + for k, v := range sr.Params { + newInfo.Params[k] = ¶ms.Param{ + Key: v.Key, + Value: v.Value, + Desc: plocale.ShouldLocalizeMessage(localizer, v.Desc), + Type: v.Type, + Enums: nil, // all nil now + } + } + result[langTag] = newInfo + } + return result +} + func init() { defaultRulesKnowledge, err := getDefaultRulesKnowledge() if err != nil { @@ -243,7 +339,8 @@ func init() { } for i, rh := range RuleHandlers { if knowledge, ok := defaultRulesKnowledge[rh.Rule.Name]; ok { - rh.Rule.Knowledge = driverV2.RuleKnowledge{Content: knowledge} + // todo i18n Knowledge + rh.Rule.I18nRuleInfo[locale.DefaultLang.String()].Knowledge = driverV2.RuleKnowledge{Content: knowledge} RuleHandlers[i] = rh } RuleHandlerMap[rh.Rule.Name] = rh @@ -262,7 +359,7 @@ func addResult(result *driverV2.AuditResults, currentRule driverV2.Rule, ruleNam } level := currentRule.Level message := RuleHandlerMap[ruleName].Message - result.Add(level, ruleName, message, args...) + result.Add(level, ruleName, plocale.ShouldLocalizeAll(message), args...) } func (rh *RuleHandler) IsAllowOfflineRule(node ast.Node) bool { diff --git a/sqle/driver/mysql/rule/rule_list.go b/sqle/driver/mysql/rule/rule_list.go index 7d5d7fd898..2abc8f4cb5 100644 --- a/sqle/driver/mysql/rule/rule_list.go +++ b/sqle/driver/mysql/rule/rule_list.go @@ -4,37 +4,40 @@ package rule import ( + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/pkg/params" "github.com/pingcap/parser/ast" ) -var RuleHandlers = []RuleHandler{ +var RuleHandlers = generateRuleHandlers(sourceRuleHandlers) + +var sourceRuleHandlers = []*SourceHandler{ { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLNotAllowInsertAutoincrement, - Desc: "禁止手动设置自增字段值", - Annotation: "手动赋值可能会造成数据空洞,主键冲突", + Desc: plocale.DMLNotAllowInsertAutoincrementDesc, + Annotation: plocale.DMLNotAllowInsertAutoincrementAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "禁止手动设置自增字段值", + Message: plocale.DMLNotAllowInsertAutoincrementMessage, AllowOffline: false, Func: notAllowInsertAutoincrement, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: ConfigDMLRollbackMaxRows, - Desc: "在 DML 语句中预计影响行数超过指定值则不回滚", - Annotation: "大事务回滚,容易影响数据库性能,使得业务发生波动;具体规则阈值可以根据业务需求调整,默认值:1000", + Desc: plocale.ConfigDMLRollbackMaxRowsDesc, + Annotation: plocale.ConfigDMLRollbackMaxRowsAnnotation, //Value: "1000", Level: driverV2.RuleLevelNotice, - Category: RuleTypeGlobalConfig, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeGlobalConfig, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1000", - Desc: "最大影响行数", + Desc: plocale.ConfigDMLRollbackMaxRowsParams1, Type: params.ParamTypeInt, }, }, @@ -42,18 +45,18 @@ var RuleHandlers = []RuleHandler{ Func: nil, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: ConfigDDLOSCMinSize, - Desc: "改表时,表空间超过指定大小(MB)审核时输出osc改写建议", - Annotation: "开启该规则后会对大表的DDL语句给出 pt-osc工具的改写建议【需要参考命令进行手工执行,后续会支持自动执行】;直接对大表进行DDL变更时可能会导致长时间锁表问题,影响业务可持续性。具体对大表定义的阈值可以根据业务需求调整,默认值:1024", + Desc: plocale.ConfigDDLOSCMinSizeDesc, + Annotation: plocale.ConfigDDLOSCMinSizeAnnotation, //Value: "16", Level: driverV2.RuleLevelNormal, - Category: RuleTypeGlobalConfig, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeGlobalConfig, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1024", - Desc: "表空间大小(MB)", + Desc: plocale.ConfigDDLOSCMinSizeParams1, Type: params.ParamTypeInt, }, }, @@ -61,104 +64,105 @@ var RuleHandlers = []RuleHandler{ Func: nil, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckTableSize, - Desc: "不建议对数据量过大的表执行DDL操作", - Annotation: "大表执行DDL,耗时较久且负载较高,长时间占用锁资源,会影响数据库性能;具体规则阈值可以根据业务需求调整,默认值:1024", + Desc: plocale.DDLCheckTableSizeDesc, + Annotation: plocale.DDLCheckTableSizeAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1024", - Desc: "表空间大小(MB)", + Desc: plocale.DDLCheckTableSizeParams1, Type: params.ParamTypeInt, }, }, }, - Message: "执行DDL的表 %v 空间不建议超过 %vMB", + Message: plocale.DDLCheckTableSizeMessage, OnlyAuditNotExecutedSQL: true, Func: checkDDLTableSize, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: DDLCheckIndexTooMany, - Desc: "单字段上的索引数量不建议超过阈值", - Annotation: "单字段上存在过多索引,一般情况下这些索引都是没有存在价值的;相反,还会降低数据增加删除时的性能,特别是对频繁更新的表来说,负面影响更大;具体规则阈值可以根据业务需求调整,默认值:2", + Desc: plocale.DDLCheckIndexTooManyDesc, + Annotation: plocale.DDLCheckIndexTooManyAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeIndexingConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeIndexingConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "2", - Desc: "单字段的索引数最大值", + Desc: plocale.DDLCheckIndexTooManyParams1, Type: params.ParamTypeInt, }, }, }, - Message: "字段 %v 上的索引数量不建议超过%v个", + Message: plocale.DDLCheckIndexTooManyMessage, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, Func: checkIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: ConfigDMLExplainPreCheckEnable, - Desc: "使用EXPLAIN加强预检查能力", - Annotation: "通过 EXPLAIN 的形式将待上线的DML进行SQL是否能正确执行的检查,提前发现语句的错误,提高上线成功率", + Desc: plocale.ConfigDMLExplainPreCheckEnableDesc, + Annotation: plocale.ConfigDMLExplainPreCheckEnableAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeGlobalConfig, + Category: plocale.RuleTypeGlobalConfig, }, Func: nil, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckRedundantIndex, - Desc: "不建议创建冗余索引", - Annotation: "MySQL需要单独维护重复的索引,冗余索引增加维护成本,并且优化器在优化查询时需要逐个进行代价计算,影响查询性能", + Desc: plocale.DDLCheckRedundantIndexDesc, + Annotation: plocale.DDLCheckRedundantIndexAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexOptimization, + Category: plocale.RuleTypeIndexOptimization, }, - Message: "%v", + Message: plocale.DDLCheckRedundantIndexMessage, AllowOffline: true, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, Func: checkIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckTableSize, - Desc: "不建议对数据量过大的表执行DML操作", - Annotation: "DML操作大表,耗时较久且负载较高,容易影响数据库性能;具体规则阈值可以根据业务需求调整,默认值:1024", + Desc: plocale.DMLCheckTableSizeDesc, + Annotation: plocale.DMLCheckTableSizeAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1024", - Desc: "表空间大小(MB)", + Desc: plocale.DMLCheckTableSizeParams1, Type: params.ParamTypeInt, }, }, }, - Message: "执行DML的表 %v 空间不建议超过 %vMB", + Message: plocale.DMLCheckTableSizeMessage, Func: checkDMLTableSize, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: ConfigOptimizeIndexEnabled, - Desc: "索引创建建议", - Annotation: "通过该规则开启索引优化建议,提供两个参数配置来定义索引优化建议的行为。1. 列区分度最低值阈值(百分制):配置当前表中列的区分度小于多少时,不作为索引的列;2. 联合索引最大列数:限制联合索引给到的列数最大值,防止给出建议的联合索引不符合其他SQL标准", + Desc: plocale.ConfigOptimizeIndexEnabledDesc, + Annotation: plocale.ConfigOptimizeIndexEnabledAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeIndexOptimization, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeIndexOptimization, + Params: []*SourceParam{ + { Key: DefaultMultiParamsFirstKeyName, Value: "2.00", - Desc: "列区分度最低值阈值(百分比)", + Desc: plocale.ConfigOptimizeIndexEnabledParams1, Type: params.ParamTypeFloat64, }, - ¶ms.Param{ + { Key: DefaultMultiParamsSecondKeyName, Value: "3", - Desc: "联合索引最大列数", + Desc: plocale.ConfigOptimizeIndexEnabledParams2, Type: params.ParamTypeInt, }, }, @@ -166,28 +170,28 @@ var RuleHandlers = []RuleHandler{ }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: ConfigSQLIsExecuted, - Desc: "停用上线审核模式", - Annotation: "启用该规则来兼容事后审核的场景,对于事后采集的DDL 和 DML 语句将不再进行上线校验。例如库表元数据的扫描任务可开启该规则", + Desc: plocale.ConfigSQLIsExecutedDesc, + Annotation: plocale.ConfigSQLIsExecutedAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeGlobalConfig, + Category: plocale.RuleTypeGlobalConfig, }, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: ConfigDDLGhostMinSize, - Desc: "改表时,表空间超过指定大小(MB)时使用gh-ost上线", - Annotation: "开启该规则后会自动对大表的DDL操作使用gh-ost 工具进行在线改表;直接对大表进行DDL变更时可能会导致长时间锁表问题,影响业务可持续性。具体对大表定义的阈值可以根据业务需求调整,默认值:1024", + Desc: plocale.ConfigDDLGhostMinSizeDesc, + Annotation: plocale.ConfigDDLGhostMinSizeAnnotation, //Value: "16", Level: driverV2.RuleLevelNormal, - Category: RuleTypeGlobalConfig, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeGlobalConfig, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1024", - Desc: "表空间大小(MB)", + Desc: plocale.ConfigDDLGhostMinSizeParams1, Type: params.ParamTypeInt, }, }, @@ -197,1953 +201,1989 @@ var RuleHandlers = []RuleHandler{ // rule { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKWithoutIfNotExists, - Desc: "新建表建议加入 IF NOT EXISTS,保证重复执行不报错", - Annotation: "新建表如果表已经存在,不添加IF NOT EXISTS CREATE执行SQL会报错,建议开启此规则,避免SQL实际执行报错", + Desc: plocale.DDLCheckPKWithoutIfNotExistsDesc, + Annotation: plocale.DDLCheckPKWithoutIfNotExistsAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "新建表建议加入 IF NOT EXISTS,保证重复执行不报错", + Message: plocale.DDLCheckPKWithoutIfNotExistsMessage, AllowOffline: true, Func: checkIfNotExist, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckObjectNameLength, - Desc: "表名、列名、索引名的长度不建议超过阈值", - Annotation: "通过配置该规则可以规范指定业务的对象命名长度,具体长度可以自定义设置,默认最大长度:64。是MySQL规定标识符命名最大长度为64字节", + Desc: plocale.DDLCheckObjectNameLengthDesc, + Annotation: plocale.DDLCheckObjectNameLengthAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, //Value: "64", - Params: params.Params{ - ¶ms.Param{ + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "64", - Desc: "最大长度(字节)", + Desc: plocale.DDLCheckObjectNameLengthParams1, Type: params.ParamTypeInt, }, }, }, - Message: "表名、列名、索引名的长度不建议大于%v字节", + Message: plocale.DDLCheckObjectNameLengthMessage, AllowOffline: true, Func: checkNewObjectName, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckObjectNameIsUpperAndLowerLetterMixed, - Desc: "数据库对象命名不建议大小写字母混合", - Annotation: "数据库对象命名规范,不推荐采用大小写混用的形式建议词语之间使用下划线连接,提高代码可读性", - Category: RuleTypeNamingConvention, + Desc: plocale.DDLCheckObjectNameIsUpperAndLowerLetterMixedDesc, + Annotation: plocale.DDLCheckObjectNameIsUpperAndLowerLetterMixedAnnotation, + Category: plocale.RuleTypeNamingConvention, Level: driverV2.RuleLevelNotice, }, - Message: "数据库对象命名不建议大小写字母混合,以下对象命名不规范:%v", + Message: plocale.DDLCheckObjectNameIsUpperAndLowerLetterMixedMessage, Func: checkIsObjectNameUpperAndLowerLetterMixed, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKNotExist, - Desc: "表必须有主键", - Annotation: "主键使数据达到全局唯一,可提高数据检索效率", + Desc: plocale.DDLCheckPKNotExistDesc, + Annotation: plocale.DDLCheckPKNotExistAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "表必须有主键", + Message: plocale.DDLCheckPKNotExistMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}}, Func: checkPrimaryKey, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKWithoutAutoIncrement, - Desc: "主键建议使用自增", - Annotation: "自增主键,数字型速度快,而且是增量增长,占用空间小,更快速的做数据插入操作,避免增加维护索引的开销", + Desc: plocale.DDLCheckPKWithoutAutoIncrementDesc, + Annotation: plocale.DDLCheckPKWithoutAutoIncrementAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "主键建议使用自增", + Message: plocale.DDLCheckPKWithoutAutoIncrementMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}}, Func: checkPrimaryKey, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKWithoutBigintUnsigned, - Desc: "主键建议使用 BIGINT 无符号类型,即 BIGINT UNSIGNED", - Annotation: "BIGINT UNSIGNED拥有更大的取值范围,建议开启此规则,避免发生溢出", + Desc: plocale.DDLCheckPKWithoutBigintUnsignedDesc, + Annotation: plocale.DDLCheckPKWithoutBigintUnsignedAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "主键建议使用 BIGINT 无符号类型,即 BIGINT UNSIGNED", + Message: plocale.DDLCheckPKWithoutBigintUnsignedMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}}, Func: checkPrimaryKey, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckJoinFieldType, - Desc: "建议JOIN字段类型保持一致", - Annotation: "JOIN字段类型不一致会导致类型不匹配发生隐式准换,建议开启此规则,避免索引失效", + Desc: plocale.DMLCheckJoinFieldTypeDesc, + Annotation: plocale.DMLCheckJoinFieldTypeAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议JOIN字段类型保持一致, 否则会导致隐式转换", + Message: plocale.DMLCheckJoinFieldTypeMessage, AllowOffline: false, Func: checkJoinFieldType, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckHasJoinCondition, - Desc: "建议连接操作指定连接条件", - Annotation: "指定连接条件可以确保连接操作的正确性和可靠性,如果没有指定连接条件,可能会导致连接失败或连接不正确的情况。", + Desc: plocale.DMLCheckHasJoinConditionDesc, + Annotation: plocale.DMLCheckHasJoinConditionAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议连接操作指定连接条件,JOIN字段后必须有ON条件", + Message: plocale.DMLCheckHasJoinConditionMessage, AllowOffline: true, Func: checkHasJoinCondition, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnCharLength, - Desc: "CHAR长度大于20时,必须使用VARCHAR类型", - Annotation: "VARCHAR是变长字段,存储空间小,可节省存储空间,同时相对较小的字段检索效率显然也要高些", + Desc: plocale.DDLCheckColumnCharLengthDesc, + Annotation: plocale.DDLCheckColumnCharLengthAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "CHAR长度大于20时,必须使用VARCHAR类型", + Message: plocale.DDLCheckColumnCharLengthMessage, AllowOffline: true, Func: checkStringType, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckFieldNotNUllMustContainDefaultValue, - Desc: "建议字段约束为NOT NULL时带默认值", - Annotation: "如存在NOT NULL且不带默认值的字段,INSERT时不包含该字段,会导致插入报错", + Desc: plocale.DDLCheckFieldNotNUllMustContainDefaultValueDesc, + Annotation: plocale.DDLCheckFieldNotNUllMustContainDefaultValueAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "建议字段约束为NOT NULL时带默认值,以下字段不规范:%v", + Message: plocale.DDLCheckFieldNotNUllMustContainDefaultValueMessage, AllowOffline: true, Func: checkFieldNotNUllMustContainDefaultValue, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLDisableFK, - Desc: "禁止使用外键", - Annotation: "外键在高并发场景下性能较差,容易造成死锁,同时不利于后期维护(拆分、迁移)", + Desc: plocale.DDLDisableFKDesc, + Annotation: plocale.DDLDisableFKAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "禁止使用外键", + Message: plocale.DDLDisableFKMessage, AllowOffline: true, Func: checkForeignKey, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLDisableAlterFieldUseFirstAndAfter, - Desc: "ALTER表字段禁止使用FIRST,AFTER", - Annotation: "FIRST,AFTER 的ALTER操作通过COPY TABLE的方式完成,对业务影响较大", + Desc: plocale.DDLDisableAlterFieldUseFirstAndAfterDesc, + Annotation: plocale.DDLDisableAlterFieldUseFirstAndAfterAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "ALTER表字段禁止使用FIRST,AFTER", + Message: plocale.DDLDisableAlterFieldUseFirstAndAfterMessage, AllowOffline: true, Func: disableAlterUseFirstAndAfter, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCreateTimeColumn, - Desc: "建议建表DDL包含创建时间字段且默认值为CURRENT_TIMESTAMP", - Annotation: "使用CREATE_TIME字段,有利于问题查找跟踪和检索数据,同时避免后期对数据生命周期管理不便 ,默认值为CURRENT_TIMESTAMP可保证时间的准确性", + Desc: plocale.DDLCheckCreateTimeColumnDesc, + Annotation: plocale.DDLCheckCreateTimeColumnAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "CREATE_TIME", - Desc: "创建时间字段名", + Desc: plocale.DDLCheckCreateTimeColumnParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议建表DDL包含%v字段且默认值为CURRENT_TIMESTAMP", + Message: plocale.DDLCheckCreateTimeColumnMessage, AllowOffline: true, Func: checkFieldCreateTime, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexCount, - Desc: "索引个数建议不超过阈值", - Annotation: "在表上建立的每个索引都会增加存储开销,索引对于插入、删除、更新操作也会增加处理上的开销,太多与不充分、不正确的索引对性能都毫无益处;具体规则阈值可以根据业务需求调整,默认值:5", + Desc: plocale.DDLCheckIndexCountDesc, + Annotation: plocale.DDLCheckIndexCountAnnotation, Level: driverV2.RuleLevelNotice, //Value: "5", - Category: RuleTypeIndexingConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeIndexingConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "5", - Desc: "最大索引个数", + Desc: plocale.DDLCheckIndexCountParams1, Type: params.ParamTypeInt, }, }, }, - Message: "索引个数建议不超过%v个", + Message: plocale.DDLCheckIndexCountMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, Func: checkIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckUpdateTimeColumn, - Desc: "建表DDL需要包含更新时间字段且默认值为CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP", - Annotation: "使用更新时间字段,有利于问题查找跟踪和检索数据,同时避免后期对数据生命周期管理不便 ,默认值为UPDATE_TIME可保证时间的准确性", + Desc: plocale.DDLCheckUpdateTimeColumnDesc, + Annotation: plocale.DDLCheckUpdateTimeColumnAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "UPDATE_TIME", - Desc: "更新时间字段名", + Desc: plocale.DDLCheckUpdateTimeColumnParams1, Type: params.ParamTypeString, }, }, }, - Message: "建表DDL需要包含%v字段且默认值为CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP", + Message: plocale.DDLCheckUpdateTimeColumnMessage, AllowOffline: true, Func: checkFieldUpdateTime, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCompositeIndexMax, - Desc: "复合索引的列数量不建议超过阈值", - Annotation: "复合索引会根据索引列数创建对应组合的索引,列数越多,创建的索引越多,每个索引都会增加磁盘空间的开销,同时增加索引维护的开销;具体规则阈值可以根据业务需求调整,默认值:3", + Desc: plocale.DDLCheckCompositeIndexMaxDesc, + Annotation: plocale.DDLCheckCompositeIndexMaxAnnotation, Level: driverV2.RuleLevelNotice, //Value: "3", - Category: RuleTypeIndexingConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeIndexingConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "3", - Desc: "最大索引列数量", + Desc: plocale.DDLCheckCompositeIndexMaxParams1, Type: params.ParamTypeInt, }, }, }, - Message: "复合索引的列数量不建议超过%v个", + Message: plocale.DDLCheckCompositeIndexMaxMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, Func: checkIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexNotNullConstraint, - Desc: "索引字段需要有非空约束", - Annotation: "索引字段上如果没有非空约束,则表记录与索引记录不会完全映射。", + Desc: plocale.DDLCheckIndexNotNullConstraintDesc, + Annotation: plocale.DDLCheckIndexNotNullConstraintAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "这些索引字段(%v)需要有非空约束", + Message: plocale.DDLCheckIndexNotNullConstraintMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, Func: checkIndexNotNullConstraint, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckObjectNameUsingKeyword, - Desc: "数据库对象命名禁止使用保留字", - Annotation: "通过配置该规则可以规范指定业务的数据对象命名规则,避免发生冲突,以及混淆", + Desc: plocale.DDLCheckObjectNameUsingKeywordDesc, + Annotation: plocale.DDLCheckObjectNameUsingKeywordAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, }, - Message: "数据库对象命名禁止使用保留字 %s", + Message: plocale.DDLCheckObjectNameUsingKeywordMessage, AllowOffline: true, Func: checkNewObjectName, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckObjectNameUseCN, - Desc: "数据库对象命名只能使用英文、下划线或数字,首字母必须是英文", - Annotation: "通过配置该规则可以规范指定业务的数据对象命名规则", + Desc: plocale.DDLCheckObjectNameUseCNDesc, + Annotation: plocale.DDLCheckObjectNameUseCNAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, }, - Message: "数据库对象命名只能使用英文、下划线或数字,首字母必须是英文", + Message: plocale.DDLCheckObjectNameUseCNMessage, AllowOffline: true, Func: checkNewObjectName, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckTableDBEngine, - Desc: "建议使用指定数据库引擎", - Annotation: "通过配置该规则可以规范指定业务的数据库引擎,具体规则可以自定义设置。默认值是INNODB,INNODB 支持事务,支持行级锁,更好的恢复性,高并发下性能更好", + Desc: plocale.DDLCheckTableDBEngineDesc, + Annotation: plocale.DDLCheckTableDBEngineAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, //Value: "Innodb", - Params: params.Params{ - ¶ms.Param{ + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "Innodb", - Desc: "数据库引擎", + Desc: plocale.DDLCheckTableDBEngineParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议使用%v数据库引擎", + Message: plocale.DDLCheckTableDBEngineMessage, AllowOffline: false, Func: checkEngine, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckTableCharacterSet, - Desc: "建议使用指定数据库字符集", - Annotation: "通过该规则约束全局的数据库字符集,避免创建非预期的字符集,防止业务侧出现“乱码”等问题。建议项目内库表使用统一的字符集和字符集排序,部分连表查询的情况下字段的字符集或排序规则不一致可能会导致索引失效且不易发现", + Desc: plocale.DDLCheckTableCharacterSetDesc, + Annotation: plocale.DDLCheckTableCharacterSetAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, //Value: "utf8mb4", - Params: params.Params{ - ¶ms.Param{ + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "utf8mb4", - Desc: "数据库字符集", + Desc: plocale.DDLCheckTableCharacterSetParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议使用%v数据库字符集", + Message: plocale.DDLCheckTableCharacterSetMessage, AllowOffline: false, Func: checkCharacterSet, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexedColumnWithBlob, - Desc: "禁止将BLOB类型的列加入索引", - Annotation: "BLOB类型属于大字段类型,作为索引会占用很大的存储空间", + Desc: plocale.DDLCheckIndexedColumnWithBlobDesc, + Annotation: plocale.DDLCheckIndexedColumnWithBlobAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "禁止将BLOB类型的列加入索引", + Message: plocale.DDLCheckIndexedColumnWithBlobMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, Func: disableAddIndexForColumnsTypeBlob, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWhereIsInvalid, - Desc: "禁止使用没有WHERE条件或者WHERE条件恒为TRUE的SQL", - Annotation: "SQL缺少WHERE条件在执行时会进行全表扫描产生额外开销,建议在大数据量高并发环境下开启,避免影响数据库查询性能", + Desc: plocale.DMLCheckWhereIsInvalidDesc, + Annotation: plocale.DMLCheckWhereIsInvalidAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "禁止使用没有WHERE条件或者WHERE条件恒为TRUE的SQL", + Message: plocale.DMLCheckWhereIsInvalidMessage, AllowOffline: true, Func: checkSelectWhere, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckAlterTableNeedMerge, - Desc: "存在多条对同一个表的修改语句,建议合并成一个ALTER语句", - Annotation: "避免多次 TABLE REBUILD 带来的消耗、以及对线上业务的影响", + Desc: plocale.DDLCheckAlterTableNeedMergeDesc, + Annotation: plocale.DDLCheckAlterTableNeedMergeAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "已存在对该表的修改语句,建议合并成一个ALTER语句", + Message: plocale.DDLCheckAlterTableNeedMergeMessage, AllowOffline: false, OnlyAuditNotExecutedSQL: true, Func: checkMergeAlterTable, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLDisableSelectAllColumn, - Desc: "不建议使用SELECT *", - Annotation: "当表结构变更时,使用*通配符选择所有列将导致查询行为会发生更改,与业务期望不符;同时SELECT * 中的无用字段会带来不必要的磁盘I/O,以及网络开销,且无法覆盖索引进而回表,大幅度降低查询效率", + Desc: plocale.DMLDisableSelectAllColumnDesc, + Annotation: plocale.DMLDisableSelectAllColumnAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用SELECT *", + Message: plocale.DMLDisableSelectAllColumnMessage, AllowOffline: true, Func: checkSelectAll, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLDisableDropStatement, - Desc: "禁止除索引外的DROP操作", - Annotation: "DROP是DDL,数据变更不会写入日志,无法进行回滚;建议开启此规则,避免误删除操作", + Desc: plocale.DDLDisableDropStatementDesc, + Annotation: plocale.DDLDisableDropStatementAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "禁止除索引外的DROP操作", + Message: plocale.DDLDisableDropStatementMessage, AllowOffline: true, Func: disableDropStmt, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckTableWithoutComment, - Desc: "表建议添加注释", - Annotation: "表添加注释能够使表的意义更明确,方便日后的维护", + Desc: plocale.DDLCheckTableWithoutCommentDesc, + Annotation: plocale.DDLCheckTableWithoutCommentAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "表建议添加注释", + Message: plocale.DDLCheckTableWithoutCommentMessage, AllowOffline: true, Func: checkTableWithoutComment, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnWithoutComment, - Desc: "列建议添加注释", - Annotation: "列添加注释能够使列的意义更明确,方便日后的维护", + Desc: plocale.DDLCheckColumnWithoutCommentDesc, + Annotation: plocale.DDLCheckColumnWithoutCommentAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "列建议添加注释", + Message: plocale.DDLCheckColumnWithoutCommentMessage, AllowOffline: true, Func: checkColumnWithoutComment, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexPrefix, - Desc: "建议普通索引使用固定前缀", - Annotation: "通过配置该规则可以规范指定业务的索引命名规则,具体命名规范可以自定义设置,默认提示值:idx_", + Desc: plocale.DDLCheckIndexPrefixDesc, + Annotation: plocale.DDLCheckIndexPrefixAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, //Value: "idx_", - Params: params.Params{ - ¶ms.Param{ + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "idx_", - Desc: "索引前缀", + Desc: plocale.DDLCheckIndexPrefixParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议普通索引要以\"%v\"为前缀", + Message: plocale.DDLCheckIndexPrefixMessage, AllowOffline: true, Func: checkIndexPrefix, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckUniqueIndexPrefix, - Desc: "建议UNIQUE索引使用固定前缀", - Annotation: "通过配置该规则可以规范指定业务的UNIQUE索引命名规则,具体命名规范可以自定义设置,默认提示值:uniq_", + Desc: plocale.DDLCheckUniqueIndexPrefixDesc, + Annotation: plocale.DDLCheckUniqueIndexPrefixAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, //Value: "uniq_", - Params: params.Params{ - ¶ms.Param{ + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "uniq_", - Desc: "索引前缀", + Desc: plocale.DDLCheckUniqueIndexPrefixParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议UNIQUE索引要以\"%v\"为前缀", + Message: plocale.DDLCheckUniqueIndexPrefixMessage, AllowOffline: true, Func: checkUniqIndexPrefix, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckUniqueIndex, - Desc: "建议UNIQUE索引名使用 IDX_UK_表名_字段名", - Annotation: "通过配置该规则可以规范指定业务的UNIQUE索引命名规则", + Desc: plocale.DDLCheckUniqueIndexDesc, + Annotation: plocale.DDLCheckUniqueIndexAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, }, - Message: "建议UNIQUE索引名使用 IDX_UK_表名_字段名", + Message: plocale.DDLCheckUniqueIndexMessage, AllowOffline: true, Func: checkUniqIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnWithoutDefault, - Desc: "除了自增列及大字段列之外,每个列都必须添加默认值", - Annotation: "列添加默认值,可避免列为NULL值时对查询的影响", + Desc: plocale.DDLCheckColumnWithoutDefaultDesc, + Annotation: plocale.DDLCheckColumnWithoutDefaultAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "除了自增列及大字段列之外,每个列都必须添加默认值", + Message: plocale.DDLCheckColumnWithoutDefaultMessage, AllowOffline: true, Func: checkColumnWithoutDefault, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnTimestampWithoutDefault, - Desc: "TIMESTAMP 类型的列必须添加默认值", - Annotation: "TIMESTAMP添加默认值,可避免出现全为0的日期格式与业务预期不符", + Desc: plocale.DDLCheckColumnTimestampWithoutDefaultDesc, + Annotation: plocale.DDLCheckColumnTimestampWithoutDefaultAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "TIMESTAMP 类型的列必须添加默认值", + Message: plocale.DDLCheckColumnTimestampWithoutDefaultMessage, AllowOffline: true, Func: checkColumnTimestampWithoutDefault, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnBlobWithNotNull, - Desc: "BLOB 和 TEXT 类型的字段不建议设置为 NOT NULL", - Annotation: "BLOB 和 TEXT 类型的字段无法指定默认值,如插入数据不指定字段默认为NULL,如果添加了 NOT NULL 限制,写入数据时又未对该字段指定值会导致写入失败", + Desc: plocale.DDLCheckColumnBlobWithNotNullDesc, + Annotation: plocale.DDLCheckColumnBlobWithNotNullAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "BLOB 和 TEXT 类型的字段不建议设置为 NOT NULL", + Message: plocale.DDLCheckColumnBlobWithNotNullMessage, AllowOffline: true, Func: checkColumnBlobNotNull, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnBlobDefaultIsNotNull, - Desc: "BLOB 和 TEXT 类型的字段默认值只能为NULL", - Annotation: "在SQL_MODE严格模式下BLOB 和 TEXT 类型无法设置默认值,如插入数据不指定值,字段会被设置为NULL", + Desc: plocale.DDLCheckColumnBlobDefaultIsNotNullDesc, + Annotation: plocale.DDLCheckColumnBlobDefaultIsNotNullAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "BLOB 和 TEXT 类型的字段默认值只能为NULL", + Message: plocale.DDLCheckColumnBlobDefaultIsNotNullMessage, AllowOffline: true, Func: checkColumnBlobDefaultNull, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckAutoIncrementFieldNum, - Desc: "建表时,自增字段只能设置一个", - Annotation: "MySQL InnoDB,MyISAM 引擎不允许存在多个自增字段,设置多个自增字段会导致上线失败。", + Desc: plocale.DDLCheckAutoIncrementFieldNumDesc, + Annotation: plocale.DDLCheckAutoIncrementFieldNumAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, AllowOffline: true, - Message: "建表时,自增字段只能设置一个", + Message: plocale.DDLCheckAutoIncrementFieldNumMessage, Func: checkAutoIncrementFieldNum, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckAllIndexNotNullConstraint, - Desc: "建议为至少一个索引添加非空约束", - Annotation: "所有索引字段均未做非空约束,请确认下表索引规划的合理性。", + Desc: plocale.DDLCheckAllIndexNotNullConstraintDesc, + Annotation: plocale.DDLCheckAllIndexNotNullConstraintAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, AllowOffline: true, - Message: "建议为至少一个索引添加非空约束", + Message: plocale.DDLCheckAllIndexNotNullConstraintMessage, Func: checkAllIndexNotNullConstraint, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWithLimit, - Desc: "DELETE/UPDATE 语句不能有LIMIT条件", - Annotation: "DELETE/UPDATE 语句使用LIMIT条件将随机选取数据进行删除或者更新,业务无法预期", + Desc: plocale.DMLCheckWithLimitDesc, + Annotation: plocale.DMLCheckWithLimitAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "DELETE/UPDATE 语句不能有LIMIT条件", + Message: plocale.DMLCheckWithLimitMessage, AllowOffline: true, Func: checkDMLWithLimit, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckSelectLimit, - Desc: "SELECT 语句需要带LIMIT", - Annotation: "如果查询的扫描行数很大,可能会导致优化器选择错误的索引甚至不走索引;具体规则阈值可以根据业务需求调整,默认值:1000", + Desc: plocale.DMLCheckSelectLimitDesc, + Annotation: plocale.DMLCheckSelectLimitAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1000", - Desc: "最大查询行数", + Desc: plocale.DMLCheckSelectLimitParams1, Type: params.ParamTypeInt, }, }, }, - Message: "SELECT 语句需要带LIMIT,且限制数不得超过%v", + Message: plocale.DMLCheckSelectLimitMessage, AllowOffline: true, Func: checkSelectLimit, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWithOrderBy, - Desc: "DELETE/UPDATE 语句不能有ORDER BY", - Annotation: "DELETE/UPDATE 存在ORDER BY会使用排序,带来无谓的开销", + Desc: plocale.DMLCheckWithOrderByDesc, + Annotation: plocale.DMLCheckWithOrderByAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "DELETE/UPDATE 语句不能有ORDER BY", + Message: plocale.DMLCheckWithOrderByMessage, AllowOffline: true, Func: checkDMLWithOrderBy, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckSelectWithOrderBy, - Desc: "SELECT 语句不能有ORDER BY", - Annotation: "ORDER BY 对查询性能影响较大,同时不便于优化维护,建议将排序部分放到业务处理", + Desc: plocale.DMLCheckSelectWithOrderByDesc, + Annotation: plocale.DMLCheckSelectWithOrderByAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "SELECT 语句不能有ORDER BY", + Message: plocale.DMLCheckSelectWithOrderByMessage, AllowOffline: true, Func: checkSelectWithOrderBy, }, { // TODO: 修改level以适配默认模板 - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckInsertColumnsExist, - Desc: "INSERT 语句需要指定COLUMN", - Annotation: "当表结构发生变更,INSERT请求不明确指定列名,会发生插入数据不匹配的情况;建议开启此规则,避免插入结果与业务预期不符", + Desc: plocale.DMLCheckInsertColumnsExistDesc, + Annotation: plocale.DMLCheckInsertColumnsExistAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "INSERT 语句需要指定COLUMN", + Message: plocale.DMLCheckInsertColumnsExistMessage, AllowOffline: true, Func: checkDMLWithInsertColumnExist, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckBatchInsertListsMax, - Desc: "单条INSERT语句,建议批量插入不超过阈值", - Annotation: "避免大事务,以及降低发生回滚对业务的影响;具体规则阈值可以根据业务需求调整,默认值:100", + Desc: plocale.DMLCheckBatchInsertListsMaxDesc, + Annotation: plocale.DMLCheckBatchInsertListsMaxAnnotation, Level: driverV2.RuleLevelNotice, //Value: "5000", - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "100", - Desc: "最大插入行数", + Desc: plocale.DMLCheckBatchInsertListsMaxParams1, Type: params.ParamTypeInt, }, }, }, - Message: "单条INSERT语句,建议批量插入不超过%v条", + Message: plocale.DMLCheckBatchInsertListsMaxMessage, AllowOffline: true, Func: checkDMLWithBatchInsertMaxLimits, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckInQueryNumber, - Desc: "WHERE条件内IN语句中的参数个数不能超过阈值", - Annotation: "当IN值过多时,有可能会导致查询进行全表扫描,使得MySQL性能急剧下降;具体规则阈值可以根据业务需求调整,默认值:50", + Desc: plocale.DMLCheckInQueryNumberDesc, + Annotation: plocale.DMLCheckInQueryNumberAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "50", - Desc: "in语句参数最大个数", + Desc: plocale.DMLCheckInQueryNumberParams1, Type: params.ParamTypeInt, }, }, }, - Message: "WHERE条件内IN语句中的参数已有%v个,不建议超过阙值%v", + Message: plocale.DMLCheckInQueryNumberMessage, AllowOffline: true, Func: checkInQueryLimit, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKProhibitAutoIncrement, - Desc: "不建议主键使用自增", - Annotation: "后期维护相对不便,过于依赖数据库自增机制达到全局唯一,不易拆分,容易造成主键冲突", + Desc: plocale.DDLCheckPKProhibitAutoIncrementDesc, + Annotation: plocale.DDLCheckPKProhibitAutoIncrementAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "不建议主键使用自增", + Message: plocale.DDLCheckPKProhibitAutoIncrementMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}}, Func: checkPrimaryKey, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWhereExistFunc, - Desc: "避免对条件字段使用函数操作", - Annotation: "对条件字段做函数操作,可能会破坏索引值的有序性,导致优化器选择放弃走索引,使查询性能大幅度降低", + Desc: plocale.DMLCheckWhereExistFuncDesc, + Annotation: plocale.DMLCheckWhereExistFuncAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "避免对条件字段使用函数操作", + Message: plocale.DMLCheckWhereExistFuncMessage, AllowOffline: false, Func: checkWhereExistFunc, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWhereExistNot, - Desc: "不建议对条件字段使用负向查询", - Annotation: "使用负向查询,将导致全表扫描,出现慢SQL", + Desc: plocale.DMLCheckWhereExistNotDesc, + Annotation: plocale.DMLCheckWhereExistNotAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议对条件字段使用负向查询", + Message: plocale.DMLCheckWhereExistNotMessage, AllowOffline: true, Func: checkSelectWhere, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLWhereExistNull, - Desc: "不建议对条件字段使用 NULL 值判断", - Annotation: "使用 IS NULL 或 IS NOT NULL 可能导致查询放弃使用索引而进行全表扫描", + Desc: plocale.DMLWhereExistNullDesc, + Annotation: plocale.DMLWhereExistNullAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议对条件字段使用 NULL 值判断", + Message: plocale.DMLWhereExistNullMessage, Func: checkWhereExistNull, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWhereExistImplicitConversion, - Desc: "不建议在WHERE条件中使用与过滤字段不一致的数据类型", - Annotation: "WHERE条件中使用与过滤字段不一致的数据类型会引发隐式数据类型转换,导致查询有无法命中索引的风险,在高并发、大数据量的情况下,不走索引会使得数据库的查询性能严重下降", + Desc: plocale.DMLCheckWhereExistImplicitConversionDesc, + Annotation: plocale.DMLCheckWhereExistImplicitConversionAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议在WHERE条件中使用与过滤字段不一致的数据类型", + Message: plocale.DMLCheckWhereExistImplicitConversionMessage, Func: checkWhereColumnImplicitConversion, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckLimitMustExist, - Desc: "建议DELETE/UPDATE 语句带有LIMIT条件", - Annotation: "LIMIT条件可以降低写错 SQL 的代价(删错数据),同时避免长事务影响业务", + Desc: plocale.DMLCheckLimitMustExistDesc, + Annotation: plocale.DMLCheckLimitMustExistAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议DELETE/UPDATE 语句带有LIMIT条件", + Message: plocale.DMLCheckLimitMustExistMessage, Func: checkDMLLimitExist, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWhereExistScalarSubquery, - Desc: "不建议使用标量子查询", - Annotation: "标量子查询存在多次访问同一张表的问题,执行开销大效率低,可使用LEFT JOIN 替代标量子查询", + Desc: plocale.DMLCheckWhereExistScalarSubqueryDesc, + Annotation: plocale.DMLCheckWhereExistScalarSubqueryAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用标量子查询", + Message: plocale.DMLCheckWhereExistScalarSubqueryMessage, AllowOffline: true, Func: checkSelectWhere, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexesExistBeforeCreateConstraints, - Desc: "对字段创建约束前,建议先创建索引", - Annotation: "创建约束前,先行创建索引,约束可作用于二级索引,避免全表扫描,提高性能", + Desc: plocale.DDLCheckIndexesExistBeforeCreateConstraintsDesc, + Annotation: plocale.DDLCheckIndexesExistBeforeCreateConstraintsAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "对字段创建约束前,建议先创建索引", + Message: plocale.DDLCheckIndexesExistBeforeCreateConstraintsMessage, OnlyAuditNotExecutedSQL: true, Func: checkIndexesExistBeforeCreatConstraints, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckSelectForUpdate, - Desc: "不建议使用SELECT FOR UPDATE", - Annotation: "SELECT FOR UPDATE 会对查询结果集中每行数据都添加排他锁,其他线程对该记录的更新与删除操作都会阻塞,在高并发下,容易造成数据库大量锁等待,影响数据库查询性能", + Desc: plocale.DMLCheckSelectForUpdateDesc, + Annotation: plocale.DMLCheckSelectForUpdateAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用SELECT FOR UPDATE", + Message: plocale.DMLCheckSelectForUpdateMessage, Func: checkDMLSelectForUpdate, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckDatabaseCollation, - Desc: "建议使用规定的数据库排序规则", - Annotation: "通过该规则约束全局的数据库排序规则,避免创建非预期的数据库排序规则,防止业务侧出现排序结果非预期等问题。建议项目内库表使用统一的字符集和字符集排序,部分连表查询的情况下字段的字符集或排序规则不一致可能会导致索引失效且不易发现", + Desc: plocale.DDLCheckDatabaseCollationDesc, + Annotation: plocale.DDLCheckDatabaseCollationAnnotation, Level: driverV2.RuleLevelNotice, //Value: "utf8mb4_0900_ai_ci", - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "utf8mb4_0900_ai_ci", - Desc: "数据库排序规则", + Desc: plocale.DDLCheckDatabaseCollationParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议使用规定的数据库排序规则为%s", + Message: plocale.DDLCheckDatabaseCollationMessage, Func: checkCollationDatabase, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckDecimalTypeColumn, - Desc: "精确浮点数建议使用DECIMAL", - Annotation: "对于浮点数运算,DECIMAL精确度较高", + Desc: plocale.DDLCheckDecimalTypeColumnDesc, + Annotation: plocale.DDLCheckDecimalTypeColumnAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "精确浮点数建议使用DECIMAL", + Message: plocale.DDLCheckDecimalTypeColumnMessage, Func: checkDecimalTypeColumn, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckBigintInsteadOfDecimal, - Desc: "建议用BIGINT类型代替DECIMAL", - Annotation: "因为CPU不支持对DECIMAL的直接运算,只是MySQL自身实现了DECIMAL的高精度计算,但是计算代价高,并且存储同样范围值的时候,空间占用也更多;使用BIGINT代替DECIMAL,可根据小数的位数乘以相应的倍数,即可达到精确的浮点存储计算,避免DECIMAL计算代价高的问题", + Desc: plocale.DDLCheckBigintInsteadOfDecimalDesc, + Annotation: plocale.DDLCheckBigintInsteadOfDecimalAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "建议列%s用BIGINT类型代替DECIMAL", + Message: plocale.DDLCheckBigintInsteadOfDecimalMessage, Func: checkBigintInsteadOfDecimal, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckSubQueryNestNum, - Desc: "子查询嵌套层数不建议超过阈值", - Annotation: "子查询嵌套层数超过阈值,有些情况下,子查询并不能使用到索引。同时对于返回结果集比较大的子查询,会产生大量的临时表,消耗过多的CPU和IO资源,产生大量的慢查询", + Desc: plocale.DMLCheckSubQueryNestNumDesc, + Annotation: plocale.DMLCheckSubQueryNestNumAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "3", - Desc: "子查询嵌套层数不建议超过阈值", + Desc: plocale.DMLCheckSubQueryNestNumParams1, Type: params.ParamTypeInt, }, }, }, - Message: "子查询嵌套层数超过阈值%v", + Message: plocale.DMLCheckSubQueryNestNumMessage, Func: checkSubQueryNestNum, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckNeedlessFunc, - Desc: "避免使用不必要的内置函数", - Annotation: "通过配置该规则可以指定业务中需要禁止使用的内置函数,使用内置函数可能会导致SQL无法走索引或者产生一些非预期的结果。实际需要禁用的函数可通过规则设置", + Desc: plocale.DMLCheckNeedlessFuncDesc, + Annotation: plocale.DMLCheckNeedlessFuncAnnotation, Level: driverV2.RuleLevelNotice, //Value: "sha(),sqrt(),md5()", - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "sha(),sqrt(),md5()", - Desc: "指定的函数集合(逗号分割)", + Desc: plocale.DMLCheckNeedlessFuncParams1, Type: params.ParamTypeString, }, }, }, - Message: "避免使用不必要的内置函数%v", + Message: plocale.DMLCheckNeedlessFuncMessage, Func: checkNeedlessFunc, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckDatabaseSuffix, - Desc: "建议数据库名称使用固定后缀结尾", - Annotation: "通过配置该规则可以规范指定业务的数据库命名规则,具体命名规范可以自定义设置,默认提示值:_DB", + Desc: plocale.DDLCheckDatabaseSuffixDesc, + Annotation: plocale.DDLCheckDatabaseSuffixAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, //Value: "_DB", - Params: params.Params{ - ¶ms.Param{ + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "_DB", - Desc: "数据库名称后缀", + Desc: plocale.DDLCheckDatabaseSuffixParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议数据库名称以\"%v\"结尾", + Message: plocale.DDLCheckDatabaseSuffixMessage, Func: checkDatabaseSuffix, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKName, - Desc: "建议主键命名为\"PK_表名\"", - Annotation: "通过配置该规则可以规范指定业务的主键命名规则", + Desc: plocale.DDLCheckPKNameDesc, + Annotation: plocale.DDLCheckPKNameAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, }, - Message: "建议主键命名为\"PK_表名\"", + Message: plocale.DDLCheckPKNameMessage, Func: checkPKIndexName, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckTransactionIsolationLevel, - Desc: "事物隔离级别建议设置成RC", - Annotation: "RC避免了脏读的现象,但没有解决幻读的问题;使用RR,能避免幻读,但是由于引入间隙锁导致加锁的范围可能扩大,从而会影响并发,还容易造成死锁,所以在大多数业务场景下,幻读出现的机率较少,RC基本上能满足业务需求", + Desc: plocale.DDLCheckTransactionIsolationLevelDesc, + Annotation: plocale.DDLCheckTransactionIsolationLevelAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "事物隔离级别建议设置成RC", + Message: plocale.DDLCheckTransactionIsolationLevelMessage, Func: checkTransactionIsolationLevel, AllowOffline: true, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckFuzzySearch, - Desc: "禁止使用全模糊搜索或左模糊搜索", - Annotation: "使用全模糊搜索或左模糊搜索将导致查询无法使用索引,导致全表扫描", + Desc: plocale.DMLCheckFuzzySearchDesc, + Annotation: plocale.DMLCheckFuzzySearchAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "禁止使用全模糊搜索或左模糊搜索", + Message: plocale.DMLCheckFuzzySearchMessage, AllowOffline: true, Func: checkSelectWhere, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckTablePartition, - Desc: "不建议使用分区表相关功能", - Annotation: "分区表在物理上表现为多个文件,在逻辑上表现为一个表,跨分区查询效率可能更低,建议采用物理分表的方式管理大数据", + Desc: plocale.DDLCheckTablePartitionDesc, + Annotation: plocale.DDLCheckTablePartitionAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "不建议使用分区表相关功能", + Message: plocale.DDLCheckTablePartitionMessage, AllowOffline: true, Func: checkTablePartition, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckNumberOfJoinTables, - Desc: "使用JOIN连接表查询建议不超过阈值", - Annotation: "表关联越多,意味着各种驱动关系组合就越多,比较各种结果集的执行成本的代价也就越高,进而SQL查询性能会大幅度下降;具体规则阈值可以根据业务需求调整,默认值:3", + Desc: plocale.DMLCheckNumberOfJoinTablesDesc, + Annotation: plocale.DMLCheckNumberOfJoinTablesAnnotation, Level: driverV2.RuleLevelNotice, //Value: "3", - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "3", - Desc: "最大连接表个数", + Desc: plocale.DMLCheckNumberOfJoinTablesParams1, Type: params.ParamTypeInt, }, }, }, - Message: "使用JOIN连接表查询建议不超过%v张", + Message: plocale.DMLCheckNumberOfJoinTablesMessage, AllowOffline: true, Func: checkNumberOfJoinTables, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckIfAfterUnionDistinct, - Desc: "建议使用UNION ALL,替代UNION", - Annotation: "UNION会按照字段的顺序进行排序同时去重,UNION ALL只是简单的将两个结果合并后就返回,从效率上看,UNION ALL 要比UNION快很多;如果合并的两个结果集中允许包含重复数据且不需要排序时的话,建议开启此规则,使用UNION ALL替代UNION", + Desc: plocale.DMLCheckIfAfterUnionDistinctDesc, + Annotation: plocale.DMLCheckIfAfterUnionDistinctAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议使用UNION ALL,替代UNION", + Message: plocale.DMLCheckIfAfterUnionDistinctMessage, AllowOffline: true, Func: checkIsAfterUnionDistinct, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIsExistLimitOffset, - Desc: "使用分页查询时,避免使用偏移量", - Annotation: "例如:LIMIT N OFFSET M 或 LIMIT M,N。当偏移量m过大的时候,查询效率会很低,因为MySQL是先查出m+n个数据,然后抛弃掉前m个数据;对于有大数据量的MySQL表来说,使用LIMIT分页存在很严重的性能问题", + Desc: plocale.DDLCheckIsExistLimitOffsetDesc, + Annotation: plocale.DDLCheckIsExistLimitOffsetAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "使用分页查询时,避免使用偏移量", + Message: plocale.DDLCheckIsExistLimitOffsetMessage, AllowOffline: true, Func: checkIsExistLimitOffset, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexOption, - Desc: "建议索引字段对区分度大于阈值", - Annotation: "选择区分度高的字段作为索引,可快速定位数据;区分度太低,无法有效利用索引,甚至可能需要扫描大量数据页,拖慢SQL;具体规则阈值可以根据业务需求调整,默认值:70", + Desc: plocale.DDLCheckIndexOptionDesc, + Annotation: plocale.DDLCheckIndexOptionAnnotation, Level: driverV2.RuleLevelNotice, //Value: "0.7", - Category: RuleTypeIndexOptimization, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeIndexOptimization, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "70", - Desc: "可选择性(百分比)", + Desc: plocale.DDLCheckIndexOptionParams1, Type: params.ParamTypeInt, }, }, }, - Message: "索引 %v 未超过区分度阈值 百分之%v, 不建议选为索引", + Message: plocale.DDLCheckIndexOptionMessage, AllowOffline: false, Func: checkIndexOption, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnEnumNotice, - Desc: "不建议使用 ENUM 类型", - Annotation: "ENUM类型不是SQL标准,移植性较差,后期如修改或增加枚举值需重建整张表,代价较大,且无法通过字面量值进行排序", + Desc: plocale.DDLCheckColumnEnumNoticeDesc, + Annotation: plocale.DDLCheckColumnEnumNoticeAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "不建议使用 ENUM 类型", + Message: plocale.DDLCheckColumnEnumNoticeMessage, AllowOffline: true, Func: checkColumnEnumNotice, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnSetNotice, - Desc: "不建议使用 SET 类型", - Annotation: "集合的修改需要重新定义列,后期修改的代价大,建议在业务层实现", + Desc: plocale.DDLCheckColumnSetNoticeDesc, + Annotation: plocale.DDLCheckColumnSetNoticeAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "不建议使用 SET 类型", + Message: plocale.DDLCheckColumnSetNoticeMessage, AllowOffline: true, Func: checkColumnSetNotice, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnBlobNotice, - Desc: "不建议使用 BLOB 或 TEXT 类型", - Annotation: "BLOB 或 TEXT 类型消耗大量的网络和IO带宽,同时在该表上的DML操作都会变得很慢", + Desc: plocale.DDLCheckColumnBlobNoticeDesc, + Annotation: plocale.DDLCheckColumnBlobNoticeAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "不建议使用 BLOB 或 TEXT 类型", + Message: plocale.DDLCheckColumnBlobNoticeMessage, AllowOffline: true, Func: checkColumnBlobNotice, }, { - Rule: driverV2.Rule{ - Name: DMLCheckExplainAccessTypeAll, - //Value: "10000", - Desc: "全表扫描时,扫描行数不建议超过指定行数(默认值:10000)", - Annotation: "全表扫描时,扫描行数不建议超过指定行数是为了避免性能问题;具体规则阈值可以根据业务需求调整,默认值:10000;如果设置为0,全表扫描都会触发规则", + Rule: SourceRule{ + Name: DMLCheckExplainAccessTypeAll, + Desc: plocale.DMLCheckExplainAccessTypeAllDesc, + Annotation: plocale.DMLCheckExplainAccessTypeAllAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "10000", - Desc: "最大扫描行数", + Desc: plocale.DMLCheckExplainAccessTypeAllParams1, Type: params.ParamTypeInt, }, }, }, - Message: "该查询使用了全表扫描并且扫描行数为%v", + Message: plocale.DMLCheckExplainAccessTypeAllMessage, AllowOffline: false, Func: checkExplain, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckExplainExtraUsingFilesort, - Desc: "不建议使用文件排序", - Annotation: "大数据量的情况下,文件排序意味着SQL性能较低,会增加OS的开销,影响数据库性能", + Desc: plocale.DMLCheckExplainExtraUsingFilesortDesc, + Annotation: plocale.DMLCheckExplainExtraUsingFilesortAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用文件排序", + Message: plocale.DMLCheckExplainExtraUsingFilesortMessage, AllowOffline: false, Func: checkExplain, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckExplainExtraUsingTemporary, - Desc: "不建议使用临时表", - Annotation: "大数据量的情况下,临时表意味着SQL性能较低,会增加OS的开销,影响数据库性能", + Desc: plocale.DMLCheckExplainExtraUsingTemporaryDesc, + Annotation: plocale.DMLCheckExplainExtraUsingTemporaryAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用临时表", + Message: plocale.DMLCheckExplainExtraUsingTemporaryMessage, AllowOffline: false, Func: checkExplain, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCreateView, - Desc: "禁止使用视图", - Annotation: "视图的查询性能较差,同时基表结构变更,需要对视图进行维护,如果视图可读性差且包含复杂的逻辑,都会增加维护的成本", + Desc: plocale.DDLCheckCreateViewDesc, + Annotation: plocale.DDLCheckCreateViewAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "禁止使用视图", + Message: plocale.DDLCheckCreateViewMessage, AllowOffline: true, Func: checkCreateView, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCreateTrigger, - Desc: "禁止使用触发器", - Annotation: "触发器难以开发和维护,不能高效移植,且在复杂的逻辑以及高并发下,容易出现死锁影响业务", + Desc: plocale.DDLCheckCreateTriggerDesc, + Annotation: plocale.DDLCheckCreateTriggerAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "禁止使用触发器", + Message: plocale.DDLCheckCreateTriggerMessage, AllowOffline: true, Func: checkCreateTrigger, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCreateFunction, - Desc: "禁止使用自定义函数", - Annotation: "自定义函数,维护较差,且依赖性高会导致SQL无法跨库使用", + Desc: plocale.DDLCheckCreateFunctionDesc, + Annotation: plocale.DDLCheckCreateFunctionAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "禁止使用自定义函数", + Message: plocale.DDLCheckCreateFunctionMessage, AllowOffline: true, Func: checkCreateFunction, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCreateProcedure, - Desc: "禁止使用存储过程", - Annotation: "存储过程在一定程度上会使程序难以调试和拓展,各种数据库的存储过程语法相差很大,给将来的数据库移植带来很大的困难,且会极大的增加出现BUG的概率", + Desc: plocale.DDLCheckCreateProcedureDesc, + Annotation: plocale.DDLCheckCreateProcedureAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "禁止使用存储过程", + Message: plocale.DDLCheckCreateProcedureMessage, AllowOffline: true, Func: checkCreateProcedure, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLDisableTypeTimestamp, - Desc: "不建议使用TIMESTAMP字段", - Annotation: "TIMESTAMP 有最大值限制('2038-01-19 03:14:07' UTC),且会时区转换的问题", + Desc: plocale.DDLDisableTypeTimestampDesc, + Annotation: plocale.DDLDisableTypeTimestampAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "不建议使用TIMESTAMP字段", + Message: plocale.DDLDisableTypeTimestampMessage, AllowOffline: true, Func: disableUseTypeTimestampField, }, { - Rule: driverV2.Rule{ //select a as id, id , b as user from mysql.user; + Rule: SourceRule{ //select a as id, id , b as user from mysql.user; Name: DMLCheckAlias, - Desc: "别名不建议与表或列的名字相同", - Annotation: "表或列的别名与其真实名称相同, 这样的别名会使得查询更难去分辨", + Desc: plocale.DMLCheckAliasDesc, + Annotation: plocale.DMLCheckAliasAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "这些别名(%v)与列名或表名相同", + Message: plocale.DMLCheckAliasMessage, Func: checkAlias, }, { - Rule: driverV2.Rule{ //ALTER TABLE test CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci; + Rule: SourceRule{ //ALTER TABLE test CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci; Name: DDLHintUpdateTableCharsetWillNotUpdateFieldCharset, - Desc: "不建议修改表的默认字符集", - Annotation: "修改表的默认字符集,只会影响后续新增的字段,不会修表已有字段的字符集;如需修改整张表所有字段的字符集建议开启此规则", + Desc: plocale.DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetDesc, + Annotation: plocale.DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "不建议修改表的默认字符集", + Message: plocale.DDLHintUpdateTableCharsetWillNotUpdateFieldCharsetMessage, Func: hintUpdateTableCharsetWillNotUpdateFieldCharset, - }, { - Rule: driverV2.Rule{ //ALTER TABLE tbl DROP COLUMN col; + }, + { + Rule: SourceRule{ //ALTER TABLE tbl DROP COLUMN col; Name: DDLHintDropColumn, - Desc: "禁止进行删除列的操作", - Annotation: "业务逻辑与删除列依赖未完全消除,列被删除后可能导致程序异常(无法正常读写)的情况;开启该规则,SQLE将提醒删除列为高危操作", + Desc: plocale.DDLHintDropColumnDesc, + Annotation: plocale.DDLHintDropColumnAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "禁止进行删除列的操作", + Message: plocale.DDLHintDropColumnMessage, Func: hintDropColumn, - }, { - Rule: driverV2.Rule{ //ALTER TABLE tbl DROP PRIMARY KEY; + }, + { + Rule: SourceRule{ //ALTER TABLE tbl DROP PRIMARY KEY; Name: DDLHintDropPrimaryKey, - Desc: "禁止进行删除主键的操作", - Annotation: "删除已有约束会影响已有业务逻辑;开启该规则,SQLE将提醒删除主键为高危操作", + Desc: plocale.DDLHintDropPrimaryKeyDesc, + Annotation: plocale.DDLHintDropPrimaryKeyAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "禁止进行删除主键的操作", + Message: plocale.DDLHintDropPrimaryKeyMessage, Func: hintDropPrimaryKey, - }, { - Rule: driverV2.Rule{ //ALTER TABLE tbl DROP FOREIGN KEY a; + }, + { + Rule: SourceRule{ //ALTER TABLE tbl DROP FOREIGN KEY a; Name: DDLHintDropForeignKey, - Desc: "禁止进行删除外键的操作", - Annotation: "删除已有约束会影响已有业务逻辑;开启该规则,SQLE将提醒删除外键为高危操作", + Desc: plocale.DDLHintDropForeignKeyDesc, + Annotation: plocale.DDLHintDropForeignKeyAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "禁止进行删除外键的操作", + Message: plocale.DDLHintDropForeignKeyMessage, Func: hintDropForeignKey, }, { - Rule: driverV2.Rule{ //select * from user where id like "a"; + Rule: SourceRule{ //select * from user where id like "a"; Name: DMLNotRecommendNotWildcardLike, - Desc: "不建议使用没有通配符的 LIKE 查询", - Annotation: "不包含通配符的 LIKE 查询逻辑上与等值查询相同,建议使用等值查询替代", + Desc: plocale.DMLNotRecommendNotWildcardLikeDesc, + Annotation: plocale.DMLNotRecommendNotWildcardLikeAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用没有通配符的 LIKE 查询", + Message: plocale.DMLNotRecommendNotWildcardLikeMessage, Func: notRecommendNotWildcardLike, - }, { - Rule: driverV2.Rule{ //SELECT * FROM tb WHERE col IN (NULL); + }, + { + Rule: SourceRule{ //SELECT * FROM tb WHERE col IN (NULL); Name: DMLHintInNullOnlyFalse, - Desc: "避免使用 IN (NULL) 或者 NOT IN (NULL)", - Annotation: "查询条件永远非真,这将导致查询无匹配到的结果", + Desc: plocale.DMLHintInNullOnlyFalseDesc, + Annotation: plocale.DMLHintInNullOnlyFalseAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "避免使用IN (NULL)/NOT IN (NULL) ,该用法永远非真将导致条件失效", + Message: plocale.DMLHintInNullOnlyFalseMessage, Func: hintInNullOnlyFalse, - }, { - Rule: driverV2.Rule{ //select * from user where id in (a); + }, + { + Rule: SourceRule{ //select * from user where id in (a); Name: DMLNotRecommendIn, - Desc: "不建议使用IN", - Annotation: "当IN值过多时,有可能会导致查询进行全表扫描,使得MySQL性能急剧下降", + Desc: plocale.DMLNotRecommendInDesc, + Annotation: plocale.DMLNotRecommendInAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用IN", + Message: plocale.DMLNotRecommendInMessage, Func: notRecommendIn, }, { - Rule: driverV2.Rule{ //select * from user where id = ' 1'; + Rule: SourceRule{ //select * from user where id = ' 1'; Name: DMLCheckSpacesAroundTheString, - Desc: "引号中的字符串开头或结尾不建议包含空格", - Annotation: "字符串前后存在空格将可能导致查询判断逻辑出错,如在MySQL 5.5中'a'和'a '在查询中被认为是相同的值", + Desc: plocale.DMLCheckSpacesAroundTheStringDesc, + Annotation: plocale.DMLCheckSpacesAroundTheStringAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "引号中的字符串开头或结尾不建议包含空格", + Message: plocale.DMLCheckSpacesAroundTheStringMessage, Func: checkSpacesAroundTheString, - }, { - Rule: driverV2.Rule{ //CREATE TABLE tb (a varchar(10) default '“'); + }, + { + Rule: SourceRule{ //CREATE TABLE tb (a varchar(10) default '“'); Name: DDLCheckFullWidthQuotationMarks, - Desc: "DDL语句中不建议使用中文全角引号", - Annotation: "建议开启此规则,可避免MySQL会将中文全角引号识别为命名的一部分,执行结果与业务预期不符", + Desc: plocale.DDLCheckFullWidthQuotationMarksDesc, + Annotation: plocale.DDLCheckFullWidthQuotationMarksAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "DDL语句中不建议使用中文全角引号,这可能是书写错误", + Message: plocale.DDLCheckFullWidthQuotationMarksMessage, Func: checkFullWidthQuotationMarks, - }, { - Rule: driverV2.Rule{ //select name from tbl where id < 1000 order by rand(1) + }, + { + Rule: SourceRule{ //select name from tbl where id < 1000 order by rand(1) Name: DMLNotRecommendOrderByRand, - Desc: "不建议使用 ORDER BY RAND()", - Annotation: "ORDER BY RAND()使用了临时表,同时还要对其进行排序,在数据量很大的情况下会增加服务器负载以及增加查询时间", + Desc: plocale.DMLNotRecommendOrderByRandDesc, + Annotation: plocale.DMLNotRecommendOrderByRandAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用 ORDER BY RAND()", + Message: plocale.DMLNotRecommendOrderByRandMessage, Func: notRecommendOrderByRand, - }, { - Rule: driverV2.Rule{ //select col1,col2 from tbl group by 1 + }, + { + Rule: SourceRule{ //select col1,col2 from tbl group by 1 Name: DMLNotRecommendGroupByConstant, - Desc: "不建议对常量进行 GROUP BY", - Annotation: "GROUP BY 1 表示按第一列进行GROUP BY;在GROUP BY子句中使用数字,而不是表达式或列名称,当查询列顺序改变时,会导致查询逻辑出现问题", + Desc: plocale.DMLNotRecommendGroupByConstantDesc, + Annotation: plocale.DMLNotRecommendGroupByConstantAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议对常量进行 GROUP BY", + Message: plocale.DMLNotRecommendGroupByConstantMessage, Func: notRecommendGroupByConstant, - }, { - Rule: driverV2.Rule{ //select c1,c2,c3 from t1 where c1='foo' order by c2 desc, c3 asc + }, + { + Rule: SourceRule{ //select c1,c2,c3 from t1 where c1='foo' order by c2 desc, c3 asc Name: DMLCheckSortDirection, - Desc: "不建议在 ORDER BY 语句中对多个不同条件使用不同方向的排序", - Annotation: "在 MySQL 8.0 之前当 ORDER BY 多个列指定的排序方向不同时将无法使用已经建立的索引。在MySQL8.0 之后可以建立对应的排序顺序的联合索引来优化", + Desc: plocale.DMLCheckSortDirectionDesc, + Annotation: plocale.DMLCheckSortDirectionAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议在 ORDER BY 语句中对多个不同条件使用不同方向的排序", + Message: plocale.DMLCheckSortDirectionMessage, Func: checkSortDirection, - }, { - Rule: driverV2.Rule{ //select col1,col2 from tbl group by 1 + }, + { + Rule: SourceRule{ //select col1,col2 from tbl group by 1 Name: DMLHintGroupByRequiresConditions, - Desc: "建议为GROUP BY语句添加ORDER BY条件", - Annotation: "在5.7中,MySQL默认会对’GROUP BY col1, …’按如下顺序’ORDER BY col1,…’隐式排序,导致产生无谓的排序,带来额外的开销;在8.0中,则不会出现这种情况。如果不需要排序建议显示添加’ORDER BY NULL’", + Desc: plocale.DMLHintGroupByRequiresConditionsDesc, + Annotation: plocale.DMLHintGroupByRequiresConditionsAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议为GROUP BY语句添加ORDER BY条件", + Message: plocale.DMLHintGroupByRequiresConditionsMessage, Func: hintGroupByRequiresConditions, - }, { - Rule: driverV2.Rule{ //select description from film where title ='ACADEMY DINOSAUR' order by length-language_id; + }, + { + Rule: SourceRule{ //select description from film where title ='ACADEMY DINOSAUR' order by length-language_id; Name: DMLNotRecommendGroupByExpression, - Desc: "不建议ORDER BY 的条件为表达式", - Annotation: "当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差", + Desc: plocale.DMLNotRecommendGroupByExpressionDesc, + Annotation: plocale.DMLNotRecommendGroupByExpressionAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议ORDER BY 的条件为表达式", + Message: plocale.DMLNotRecommendGroupByExpressionMessage, Func: notRecommendGroupByExpression, - }, { - Rule: driverV2.Rule{ //select description from film where title ='ACADEMY DINOSAUR' order by length-language_id; + }, + { + Rule: SourceRule{ //select description from film where title ='ACADEMY DINOSAUR' order by length-language_id; Name: DMLCheckSQLLength, - Desc: "建议将过长的SQL分解成几个简单的SQL", - Annotation: "过长的SQL可读性较差,难以维护,且容易引发性能问题;具体规则阈值可以根据业务需求调整,默认值:1024", + Desc: plocale.DMLCheckSQLLengthDesc, + Annotation: plocale.DMLCheckSQLLengthAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1024", - Desc: "SQL最大长度", + Desc: plocale.DMLCheckSQLLengthParams1, Type: params.ParamTypeInt, }, }, }, - Message: "建议将过长的SQL分解成几个简单的SQL", + Message: plocale.DMLCheckSQLLengthMessage, Func: checkSQLLength, - }, { - Rule: driverV2.Rule{ //SELECT s.c_id,count(s.c_id) FROM s where c = test GROUP BY s.c_id HAVING s.c_id <> '1660' AND s.c_id <> '2' order by s.c_id + }, + { + Rule: SourceRule{ //SELECT s.c_id,count(s.c_id) FROM s where c = test GROUP BY s.c_id HAVING s.c_id <> '1660' AND s.c_id <> '2' order by s.c_id Name: DMLNotRecommendHaving, - Desc: "不建议使用 HAVING 子句", - Annotation: "对于索引字段,放在HAVING子句中时不会走索引;建议将HAVING子句改写为WHERE中的查询条件,可以在查询处理期间使用索引,提高SQL的执行效率", + Desc: plocale.DMLNotRecommendHavingDesc, + Annotation: plocale.DMLNotRecommendHavingAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用 HAVING 子句", + Message: plocale.DMLNotRecommendHavingMessage, Func: notRecommendHaving, - }, { - Rule: driverV2.Rule{ //delete from tbl + }, + { + Rule: SourceRule{ //delete from tbl Name: DMLHintUseTruncateInsteadOfDelete, - Desc: "删除全表时建议使用 TRUNCATE 替代 DELETE", - Annotation: "TRUNCATE TABLE 比 DELETE 速度快,且使用的系统和事务日志资源少,同时TRUNCATE后表所占用的空间会被释放,而DELETE后需要手工执行OPTIMIZE才能释放表空间", + Desc: plocale.DMLHintUseTruncateInsteadOfDeleteDesc, + Annotation: plocale.DMLHintUseTruncateInsteadOfDeleteAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "删除全表时建议使用 TRUNCATE 替代 DELETE", + Message: plocale.DMLHintUseTruncateInsteadOfDeleteMessage, Func: hintUseTruncateInsteadOfDelete, - }, { - Rule: driverV2.Rule{ //update mysql.func set name ="hello"; + }, + { + Rule: SourceRule{ //update mysql.func set name ="hello"; Name: DMLNotRecommendUpdatePK, - Desc: "不建议UPDATE主键", - Annotation: "主键索引数据列的顺序就是表记录的物理存储顺序,频繁更新主键将导致整个表记录的顺序的调整,会耗费相当大的资源", + Desc: plocale.DMLNotRecommendUpdatePKDesc, + Annotation: plocale.DMLNotRecommendUpdatePKAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议UPDATE主键", + Message: plocale.DMLNotRecommendUpdatePKMessage, Func: notRecommendUpdatePK, - }, { - Rule: driverV2.Rule{ //create table t(c1 int,c2 int,c3 int,c4 int,c5 int,c6 int); + }, + { + Rule: SourceRule{ //create table t(c1 int,c2 int,c3 int,c4 int,c5 int,c6 int); Name: DDLCheckColumnQuantity, - Desc: "表的列数不建议超过阈值", - Annotation: "避免在OLTP系统上做宽表设计,后期对性能影响很大;具体规则阈值可根据业务需求调整,默认值:40", + Desc: plocale.DDLCheckColumnQuantityDesc, + Annotation: plocale.DDLCheckColumnQuantityAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "40", - Desc: "最大列数", + Desc: plocale.DDLCheckColumnQuantityParams1, Type: params.ParamTypeInt, }, }, }, - Message: "表的列数不建议超过阈值", + Message: plocale.DDLCheckColumnQuantityMessage, Func: checkColumnQuantity, AllowOffline: true, - }, { - Rule: driverV2.Rule{ //CREATE TABLE `tb2` ( `id` int(11) DEFAULT NULL, `col` char(10) CHARACTER SET utf8 DEFAULT NULL) + }, + { + Rule: SourceRule{ //CREATE TABLE `tb2` ( `id` int(11) DEFAULT NULL, `col` char(10) CHARACTER SET utf8 DEFAULT NULL) Name: DDLRecommendTableColumnCharsetSame, - Desc: "建议列与表使用同一个字符集", - Annotation: "统一字符集可以避免由于字符集转换产生的乱码,不同的字符集进行比较前需要进行转换会造成索引失效", + Desc: plocale.DDLRecommendTableColumnCharsetSameDesc, + Annotation: plocale.DDLRecommendTableColumnCharsetSameAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "建议列与表使用同一个字符集", + Message: plocale.DDLRecommendTableColumnCharsetSameMessage, Func: recommendTableColumnCharsetSame, - }, { - Rule: driverV2.Rule{ //CREATE TABLE tab (a INT(1)); + }, + { + Rule: SourceRule{ //CREATE TABLE tab (a INT(1)); Name: DDLCheckColumnTypeInteger, - Desc: "整型定义建议采用 INT(10) 或 BIGINT(20)", - Annotation: "INT(M) 或 BIGINT(M),M 表示最大显示宽度,可存储最大值的宽度分别为10、20,采用 INT(10) 或 BIGINT(20)可避免发生显示截断的可能", + Desc: plocale.DDLCheckColumnTypeIntegerDesc, + Annotation: plocale.DDLCheckColumnTypeIntegerAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "整型定义建议采用 INT(10) 或 BIGINT(20)", + Message: plocale.DDLCheckColumnTypeIntegerMessage, Func: checkColumnTypeInteger, - }, { - Rule: driverV2.Rule{ //CREATE TABLE tab (a varchar(3500)); + }, + { + Rule: SourceRule{ //CREATE TABLE tab (a varchar(3500)); Name: DDLCheckVarcharSize, - Desc: "定义VARCHAR 长度时不建议大于阈值", - Annotation: "MySQL建立索引时没有限制索引的大小,索引长度会默认采用的该字段的长度,VARCHAR 定义长度越长建立的索引存储大小越大;具体规则阈值可以根据业务需求调整,默认值:1024", + Desc: plocale.DDLCheckVarcharSizeDesc, + Annotation: plocale.DDLCheckVarcharSizeAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1024", - Desc: "VARCHAR最大长度", + Desc: plocale.DDLCheckVarcharSizeParams1, Type: params.ParamTypeInt, }, }, }, - Message: "定义VARCHAR 长度时不建议大于阈值, 阈值为%d", + Message: plocale.DDLCheckVarcharSizeMessage, Func: checkVarcharSize, - }, { - Rule: driverV2.Rule{ //select id from t where substring(name,1,3)='abc' + }, + { + Rule: SourceRule{ //select id from t where substring(name,1,3)='abc' Name: DMLNotRecommendFuncInWhere, - Desc: "应避免在 WHERE 条件中使用函数或其他运算符", - Annotation: "函数或运算符会导致查询无法利用表中的索引,该查询将会全表扫描,性能较差", + Desc: plocale.DMLNotRecommendFuncInWhereDesc, + Annotation: plocale.DMLNotRecommendFuncInWhereAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "应避免在 WHERE 条件中使用函数或其他运算符", + Message: plocale.DMLNotRecommendFuncInWhereMessage, Func: notRecommendFuncInWhere, - }, { - Rule: driverV2.Rule{ //SELECT SYSDATE(); + }, + { + Rule: SourceRule{ //SELECT SYSDATE(); Name: DMLNotRecommendSysdate, - Desc: "不建议使用 SYSDATE() 函数", - Annotation: "当SYSDATE()函数在基于STATEMENT模式的主从环境下可能造成数据的不一致,因为语句在主库中执行到日志传递到备库,存在时间差,到备库执行的时候就会变成不同的时间值,建议采取ROW模式的复制环境", + Desc: plocale.DMLNotRecommendSysdateDesc, + Annotation: plocale.DMLNotRecommendSysdateAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用 SYSDATE() 函数", + Message: plocale.DMLNotRecommendSysdateMessage, Func: notRecommendSysdate, - }, { - Rule: driverV2.Rule{ //SELECT SUM(COL) FROM tbl; + }, + { + Rule: SourceRule{ //SELECT SUM(COL) FROM tbl; Name: DMLHintSumFuncTips, - Desc: "避免使用 SUM(COL)", - Annotation: "当某一列的值全是NULL时,COUNT(COL)的返回结果为0,但SUM(COL)的返回结果为NULL,因此使用SUM()时需注意NPE问题(指数据返回NULL);如业务需避免NPE问题,建议开启此规则", + Desc: plocale.DMLHintSumFuncTipsDesc, + Annotation: plocale.DMLHintSumFuncTipsAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "避免使用 SUM(COL) ,该用法存在返回NULL值导致程序空指针的风险", + Message: plocale.DMLHintSumFuncTipsMessage, Func: hintSumFuncTips, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: DMLHintCountFuncWithCol, - Desc: "避免使用 COUNT(COL)", - Annotation: "建议使用COUNT(*),因为使用 COUNT(COL) 需要对表进行全表扫描,这可能会导致性能下降。", + Desc: plocale.DMLHintCountFuncWithColDesc, + Annotation: plocale.DMLHintCountFuncWithColAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "避免使用 COUNT(COL)", + Message: plocale.DMLHintCountFuncWithColMessage, Func: hintCountFuncWithCol, AllowOffline: true, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: DDLCheckColumnQuantityInPK, - Desc: "主键包含的列数不建议超过阈值", - Annotation: "主建中的列过多,会导致二级索引占用更多的空间,同时增加索引维护的开销;具体规则阈值可根据业务需求调整,默认值:2", + Desc: plocale.DDLCheckColumnQuantityInPKDesc, + Annotation: plocale.DDLCheckColumnQuantityInPKAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "2", - Desc: "最大列数", + Desc: plocale.DDLCheckColumnQuantityInPKParams1, Type: params.ParamTypeInt, }, }, }, - Message: "主键包含的列数不建议超过阈值", + Message: plocale.DDLCheckColumnQuantityInPKMessage, Func: checkColumnQuantityInPK, - }, { - Rule: driverV2.Rule{ //select col1,col2 from tbl where name=xx limit 10 + }, + { + Rule: SourceRule{ //select col1,col2 from tbl where name=xx limit 10 Name: DMLHintLimitMustBeCombinedWithOrderBy, - Desc: "LIMIT 查询建议使用ORDER BY", - Annotation: "没有ORDER BY的LIMIT会导致非确定性的结果可能与业务需求不符,这取决于执行计划", + Desc: plocale.DMLHintLimitMustBeCombinedWithOrderByDesc, + Annotation: plocale.DMLHintLimitMustBeCombinedWithOrderByAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "LIMIT 查询建议使用ORDER BY", + Message: plocale.DMLHintLimitMustBeCombinedWithOrderByMessage, Func: hintLimitMustBeCombinedWithOrderBy, }, { - Rule: driverV2.Rule{ //TRUNCATE TABLE tbl_name + Rule: SourceRule{ //TRUNCATE TABLE tbl_name Name: DMLHintTruncateTips, - Desc: "不建议使用TRUNCATE操作", - Annotation: "TRUNCATE是DLL,数据不能回滚,在没有备份情况下,谨慎使用TRUNCATE", + Desc: plocale.DMLHintTruncateTipsDesc, + Annotation: plocale.DMLHintTruncateTipsAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用TRUNCATE操作", + Message: plocale.DMLHintTruncateTipsMessage, Func: hintTruncateTips, - }, { - Rule: driverV2.Rule{ //delete from t where col = 'condition' + }, + { + Rule: SourceRule{ //delete from t where col = 'condition' Name: DMLHintDeleteTips, - Desc: "建议在执行DELETE/DROP/TRUNCATE等操作前进行备份", - Annotation: "DROP/TRUNCATE是DDL,操作立即生效,不会写入日志,所以无法回滚,在执行高危操作之前对数据进行备份是很有必要的", + Desc: plocale.DMLHintDeleteTipsDesc, + Annotation: plocale.DMLHintDeleteTipsAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议在执行DELETE/DROP/TRUNCATE等操作前进行备份", + Message: plocale.DMLHintDeleteTipsMessage, Func: hintDeleteTips, - }, { - Rule: driverV2.Rule{ //SELECT BENCHMARK(10, RAND()) + }, + { + Rule: SourceRule{ //SELECT BENCHMARK(10, RAND()) Name: DMLCheckSQLInjectionFunc, - Desc: "不建议使用常见 SQL 注入函数", - Annotation: "攻击者通过SQL注入,可未经授权可访问数据库中的数据,存在盗取用户信息,造成用户数据泄露等安全漏洞问题", + Desc: plocale.DMLCheckSQLInjectionFuncDesc, + Annotation: plocale.DMLCheckSQLInjectionFuncAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议使用常见 SQL 注入函数", + Message: plocale.DMLCheckSQLInjectionFuncMessage, Func: checkSQLInjectionFunc, - }, { - Rule: driverV2.Rule{ //select col1,col2 from tbl where type!=0 + }, + { + Rule: SourceRule{ //select col1,col2 from tbl where type!=0 Name: DMLCheckNotEqualSymbol, - Desc: "建议使用'<>'代替'!='", - Annotation: "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符", + Desc: plocale.DMLCheckNotEqualSymbolDesc, + Annotation: plocale.DMLCheckNotEqualSymbolAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议使用'<>'代替'!='", + Message: plocale.DMLCheckNotEqualSymbolMessage, Func: checkNotEqualSymbol, - }, { - Rule: driverV2.Rule{ //select col1,col2,col3 from table1 where col2 in(select col from table2) + }, + { + Rule: SourceRule{ //select col1,col2,col3 from table1 where col2 in(select col from table2) Name: DMLNotRecommendSubquery, - Desc: "不推荐使用子查询", - Annotation: "有些情况下,子查询并不能使用到索引,同时对于返回结果集比较大的子查询,会产生大量的临时表,消耗过多的CPU和IO资源,产生大量的慢查询", + Desc: plocale.DMLNotRecommendSubqueryDesc, + Annotation: plocale.DMLNotRecommendSubqueryAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不推荐使用子查询", + Message: plocale.DMLNotRecommendSubqueryMessage, Func: notRecommendSubquery, - }, { - Rule: driverV2.Rule{ //SELECT * FROM staff WHERE name IN (SELECT NAME FROM customer ORDER BY name LIMIT 1) + }, + { + Rule: SourceRule{ //SELECT * FROM staff WHERE name IN (SELECT NAME FROM customer ORDER BY name LIMIT 1) Name: DMLCheckSubqueryLimit, - Desc: "不建议在子查询中使用LIMIT", - Annotation: "部分MySQL版本不支持在子查询中进行'LIMIT & IN/ALL/ANY/SOME'", + Desc: plocale.DMLCheckSubqueryLimitDesc, + Annotation: plocale.DMLCheckSubqueryLimitAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "不建议在子查询中使用LIMIT", + Message: plocale.DMLCheckSubqueryLimitMessage, Func: checkSubqueryLimit, - }, { - Rule: driverV2.Rule{ //CREATE TABLE tbl (a int) AUTO_INCREMENT = 10; + }, + { + Rule: SourceRule{ //CREATE TABLE tbl (a int) AUTO_INCREMENT = 10; Name: DDLCheckAutoIncrement, - Desc: "表的初始AUTO_INCREMENT值建议为0", - Annotation: "创建表时AUTO_INCREMENT设置为0则自增从1开始,可以避免数据空洞。例如在导出表结构DDL时,表结构内AUTO_INCREMENT通常为当前的自增值,如果建表时没有把AUTO_INCREMENT设置为0,那么通过该DDL进行建表操作会导致自增值从一个无意义数字开始。", + Desc: plocale.DDLCheckAutoIncrementDesc, + Annotation: plocale.DDLCheckAutoIncrementAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "表的初始AUTO_INCREMENT值建议为0", + Message: plocale.DDLCheckAutoIncrementMessage, Func: checkAutoIncrement, - }, { - Rule: driverV2.Rule{ // rename table t1 to t2; + }, + { + Rule: SourceRule{ // rename table t1 to t2; Name: DDLNotAllowRenaming, - Desc: "禁止使用RENAME或CHANGE对表名字段名进行修改", - Annotation: "RENAME/CHANGE 表名/列名会对线上业务不停机发布造成影响,如需这种操作应当DBA手工干预", + Desc: plocale.DDLNotAllowRenamingDesc, + Annotation: plocale.DDLNotAllowRenamingAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, AllowOffline: true, - Message: "禁止使用RENAME或CHANGE对表名字段名进行修改", + Message: plocale.DDLNotAllowRenamingMessage, Func: ddlNotAllowRenaming, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: DMLCheckExplainFullIndexScan, - Desc: "不建议对表进行全索引扫描", - Annotation: "在数据量大的情况下索引全扫描严重影响SQL性能。", + Desc: plocale.DMLCheckExplainFullIndexScanDesc, + Annotation: plocale.DMLCheckExplainFullIndexScanAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, AllowOffline: false, - Message: "不建议对表进行全索引扫描", + Message: plocale.DMLCheckExplainFullIndexScanMessage, Func: checkExplain, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: DMLCheckLimitOffsetNum, - Desc: "不建议LIMIT的偏移OFFSET大于阈值", - Annotation: "因为OFFSET指定了结果集的起始位置,如果起始位置过大,那么 MySQL 需要处理更多的数据才能返回结果集,这可能会导致查询性能下降。", + Desc: plocale.DMLCheckLimitOffsetNumDesc, + Annotation: plocale.DMLCheckLimitOffsetNumAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "100", - Desc: "offset 大小", + Desc: plocale.DMLCheckLimitOffsetNumParams1, Type: params.ParamTypeInt, }, }, }, - Message: "不建议LIMIT的偏移OFFSET大于阈值,OFFSET=%v(阈值为%v)", + Message: plocale.DMLCheckLimitOffsetNumMessage, AllowOffline: true, Func: checkLimitOffsetNum, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: DMLCheckUpdateOrDeleteHasWhere, - Desc: "建议UPDATE/DELETE操作使用WHERE条件", - Annotation: "因为这些语句的目的是修改数据库中的数据,需要使用 WHERE 条件来过滤需要更新或删除的记录,以确保数据的正确性。另外,使用 WHERE 条件还可以提高查询性能。", + Desc: plocale.DMLCheckUpdateOrDeleteHasWhereDesc, + Annotation: plocale.DMLCheckUpdateOrDeleteHasWhereAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议UPDATE/DELETE操作使用WHERE条件", + Message: plocale.DMLCheckUpdateOrDeleteHasWhereMessage, AllowOffline: true, Func: checkUpdateOrDeleteHasWhere, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: DMLCheckSortColumnLength, - Desc: "禁止对长字段排序", - Annotation: "对例如VARCHAR(2000)这样的长字段进行ORDER BY、DISTINCT、GROUP BY、UNION之类的操作,会引发排序,有性能隐患", + Desc: plocale.DMLCheckSortColumnLengthDesc, + Annotation: plocale.DMLCheckSortColumnLengthAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeUsageSuggestion, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "2000", - Desc: "可排序字段的最大长度", + Desc: plocale.DMLCheckSortColumnLengthParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: false, - Message: "长度超过阈值的字段不建议用于ORDER BY、DISTINCT、GROUP BY、UNION,这些字段有:%v", + Message: plocale.DMLCheckSortColumnLengthMessage, Func: checkSortColumnLength, - }, { - Rule: driverV2.Rule{ + }, + { + Rule: SourceRule{ Name: AllCheckPrepareStatementPlaceholders, - Desc: "绑定的变量个数不建议超过阈值", - Annotation: "因为过度使用绑定变量会增加查询的复杂度,从而降低查询性能。过度使用绑定变量还会增加维护成本。默认阈值:100", + Desc: plocale.AllCheckPrepareStatementPlaceholdersDesc, + Annotation: plocale.AllCheckPrepareStatementPlaceholdersAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeUsageSuggestion, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "100", - Desc: "最大绑定变量数量", + Desc: plocale.AllCheckPrepareStatementPlaceholdersParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: true, - Message: "使用绑定变量数量为 %v,不建议超过设定阈值 %v", + Message: plocale.AllCheckPrepareStatementPlaceholdersMessage, Func: checkPrepareStatementPlaceholders, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckExplainExtraUsingIndexForSkipScan, - Desc: "不建议对表进行索引跳跃扫描", - Annotation: "索引扫描是跳跃扫描,未遵循最左匹配原则,可能降低索引的使用效率,影响查询性能", + Desc: plocale.DMLCheckExplainExtraUsingIndexForSkipScanDesc, + Annotation: plocale.DMLCheckExplainExtraUsingIndexForSkipScanAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, AllowOffline: false, - Message: "不建议对表进行索引跳跃扫描", + Message: plocale.DMLCheckExplainExtraUsingIndexForSkipScanMessage, Func: checkExplain, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckAffectedRows, - Desc: "UPDATE/DELETE操作影响行数不建议超过阈值", - Annotation: "如果 DML 操作影响行数过多,会导致查询性能下降,因为需要扫描更多的数据。", + Desc: plocale.DMLCheckAffectedRowsDesc, + Annotation: plocale.DMLCheckAffectedRowsAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "10000", - Desc: "最大影响行数", + Desc: plocale.DMLCheckAffectedRowsParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: false, - Message: "UPDATE/DELETE操作影响行数不建议超过阈值,影响行数为 %v,超过设定阈值 %v", + Message: plocale.DMLCheckAffectedRowsMessage, Func: checkAffectedRows, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckSameTableJoinedMultipleTimes, - Desc: "不建议对同一张表连接多次", - Annotation: "如果对单表查询多次,会导致查询性能下降。", + Desc: plocale.DMLCheckSameTableJoinedMultipleTimesDesc, + Annotation: plocale.DMLCheckSameTableJoinedMultipleTimesAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, AllowOffline: false, - Message: "表%v被连接多次", + Message: plocale.DMLCheckSameTableJoinedMultipleTimesMessage, Func: checkSameTableJoinedMultipleTimes, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckExplainUsingIndex, - Desc: "SQL查询条件需要走索引", - Annotation: "使用索引可以显著提高SQL查询的性能。", + Desc: plocale.DMLCheckExplainUsingIndexDesc, + Annotation: plocale.DMLCheckExplainUsingIndexAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, AllowOffline: false, - Message: "建议使用索引以优化 SQL 查询性能", + Message: plocale.DMLCheckExplainUsingIndexMessage, Func: checkExplain, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckInsertSelect, - Desc: "不建议使用INSERT ... SELECT", - Annotation: "使用 INSERT ... SELECT 在默认事务隔离级别下,可能会导致对查询的表施加表级锁。", + Desc: plocale.DMLCheckInsertSelectDesc, + Annotation: plocale.DMLCheckInsertSelectAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, AllowOffline: true, - Message: "不建议使用INSERT ... SELECT", + Message: plocale.DMLCheckInsertSelectMessage, Func: checkInsertSelect, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckAggregate, - Desc: "不建议使用聚合函数", - Annotation: "不建议使用SQL聚合函数,是为了确保查询的简单性、高性能和数据一致性。", + Desc: plocale.DMLCheckAggregateDesc, + Annotation: plocale.DMLCheckAggregateAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, AllowOffline: true, - Message: "不建议使用聚合函数计算", + Message: plocale.DMLCheckAggregateMessage, Func: checkAggregateFunc, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnNotNULL, - Desc: "表字段建议有NOT NULL约束", - Annotation: "表字段建议有 NOT NULL 约束,可确保数据的完整性,防止插入空值,提升查询准确性。", + Desc: plocale.DDLCheckColumnNotNULLDesc, + Annotation: plocale.DDLCheckColumnNotNULLAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, AllowOffline: false, - Message: "建议字段%v设置NOT NULL约束", + Message: plocale.DDLCheckColumnNotNULLMessage, Func: checkColumnNotNull, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckIndexSelectivity, - Desc: "建议连库查询时,确保SQL执行计划中使用的索引区分度大于阈值", - Annotation: "确保SQL执行计划中使用的高索引区分度,有助于提升查询性能并优化查询效率。", + Desc: plocale.DMLCheckIndexSelectivityDesc, + Annotation: plocale.DMLCheckIndexSelectivityAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "70", - Desc: "可选择性(百分比)", + Desc: plocale.DMLCheckIndexSelectivityParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: false, - Message: "索引:%v,未超过区分度阈值:%v,建议使用超过阈值的索引。", + Message: plocale.DMLCheckIndexSelectivityMessage, Func: checkIndexSelectivity, }, { // 该规则只适用于库表元数据扫描并且需要与停用上线审核模式规则一起使用 - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckTableRows, - Desc: "表行数超过阈值,建议对表进行拆分", - Annotation: "当表行数超过阈值时,对表进行拆分有助于提高数据库性能和查询速度。", + Desc: plocale.DDLCheckTableRowsDesc, + Annotation: plocale.DDLCheckTableRowsAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeUsageSuggestion, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeUsageSuggestion, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "1000", - Desc: "表行数(万)", + Desc: plocale.DDLCheckTableRowsParams1, Type: params.ParamTypeInt, }, }, }, - Message: "表行数超过阈值,建议对表进行拆分", + Message: plocale.DDLCheckTableRowsMessage, Func: checkTableRows, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCompositeIndexDistinction, - Desc: "建议在组合索引中将区分度高的字段靠前放", - Annotation: "将区分度高的字段靠前放置在组合索引中有助于提高索引的查询性能,因为它能更快地减小数据范围,提高检索效率。", + Desc: plocale.DDLCheckCompositeIndexDistinctionDesc, + Annotation: plocale.DDLCheckCompositeIndexDistinctionAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, AllowOffline: false, - Message: "建议在组合索引中将区分度高的字段靠前放,%v", + Message: plocale.DDLCheckCompositeIndexDistinctionMessage, Func: checkCompositeIndexSelectivity, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLAvoidText, - Desc: "使用TEXT 类型的字段建议和原表进行分拆,与原表主键单独组成另外一个表进行存放", - Annotation: "将TEXT类型的字段与原表主键分拆成另一个表可以提高数据库性能和查询速度,减少不必要的 I/O 操作。", + Desc: plocale.DDLAvoidTextDesc, + Annotation: plocale.DDLAvoidTextAnnotation, Level: driverV2.RuleLevelNotice, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, AllowOffline: true, - Message: "字段:%v为TEXT类型,建议和原表进行分拆,与原表主键单独组成另外一个表进行存放", + Message: plocale.DDLAvoidTextMessage, Func: checkText, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckSelectRows, - Desc: "查询数据量超过阈值,筛选条件必须带上主键或者索引", - Annotation: "筛选条件必须带上主键或索引可提高查询性能和减少全表扫描的成本。", + Desc: plocale.DMLCheckSelectRowsDesc, + Annotation: plocale.DMLCheckSelectRowsAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "10", - Desc: "查询数据量(万)", + Desc: plocale.DMLCheckSelectRowsParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: false, - Message: "查询数据量超过阈值,筛选条件必须带上主键或者索引", + Message: plocale.DMLCheckSelectRowsMessage, Func: checkSelectRows, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckScanRows, - Desc: "扫描行数超过阈值,筛选条件必须带上主键或者索引", - Annotation: "筛选条件必须带上主键或索引可降低数据库查询的时间复杂度,提高查询效率。", + Desc: plocale.DMLCheckScanRowsDesc, + Annotation: plocale.DMLCheckScanRowsAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "10", - Desc: "扫描行数量(万)", + Desc: plocale.DMLCheckScanRowsParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: false, - Message: "扫描行数超过阈值,筛选条件必须带上主键或者索引", + Message: plocale.DMLCheckScanRowsMessage, Func: checkScanRows, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLMustUseLeftMostPrefix, - Desc: "使用联合索引时,必须使用联合索引的首字段", - Annotation: "使用联合索引时,不包含首字段会导致联合索引失效", + Desc: plocale.DMLMustUseLeftMostPrefixDesc, + Annotation: plocale.DMLMustUseLeftMostPrefixAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexInvalidation, + Category: plocale.RuleTypeIndexInvalidation, }, AllowOffline: false, - Message: "使用联合索引时,必须使用联合索引的首字段", + Message: plocale.DMLMustUseLeftMostPrefixMessage, Func: mustMatchLeftMostPrefix, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLMustMatchLeftMostPrefix, - Desc: "禁止对联合索引左侧字段进行IN 、OR等非等值查询", - Annotation: "对联合索引左侧字段进行IN 、OR等非等值查询会导致联合索引失效", + Desc: plocale.DMLMustMatchLeftMostPrefixDesc, + Annotation: plocale.DMLMustMatchLeftMostPrefixAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexInvalidation, + Category: plocale.RuleTypeIndexInvalidation, }, AllowOffline: false, - Message: "对联合索引左侧字段进行IN 、OR等非等值查询会导致联合索引失效", + Message: plocale.DMLMustMatchLeftMostPrefixMessage, Func: mustMatchLeftMostPrefix, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckJoinFieldUseIndex, - Desc: "JOIN字段必须包含索引", - Annotation: "JOIN字段包含索引可提高连接操作的性能和查询速度。", + Desc: plocale.DMLCheckJoinFieldUseIndexDesc, + Annotation: plocale.DMLCheckJoinFieldUseIndexAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexInvalidation, + Category: plocale.RuleTypeIndexInvalidation, }, AllowOffline: false, - Message: "JOIN字段必须包含索引", + Message: plocale.DMLCheckJoinFieldUseIndexMessage, Func: checkJoinFieldUseIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckJoinFieldCharacterSetAndCollation, - Desc: "连接表字段的字符集和排序规则必须一致", - Annotation: "连接表字段的字符集和排序规则一致可避免数据不一致和查询错误,确保连接操作正确执行。", + Desc: plocale.DMLCheckJoinFieldCharacterSetAndCollationDesc, + Annotation: plocale.DMLCheckJoinFieldCharacterSetAndCollationAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexInvalidation, + Category: plocale.RuleTypeIndexInvalidation, }, AllowOffline: false, - Message: "连接表字段的字符集和排序规则必须一致", + Message: plocale.DMLCheckJoinFieldCharacterSetAndCollationMessage, Func: checkJoinFieldCharacterSetAndCollation, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckMathComputationOrFuncOnIndex, - Desc: "禁止对索引列进行数学运算和使用函数", - Annotation: "对索引列进行数学运算和使用函数会导致索引失效,从而导致全表扫描,影响查询性能。", + Desc: plocale.DMLCheckMathComputationOrFuncOnIndexDesc, + Annotation: plocale.DMLCheckMathComputationOrFuncOnIndexAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexInvalidation, + Category: plocale.RuleTypeIndexInvalidation, }, AllowOffline: false, - Message: "禁止对索引列进行数学运算和使用函数", + Message: plocale.DMLCheckMathComputationOrFuncOnIndexMessage, Func: checkMathComputationOrFuncOnIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLSQLExplainLowestLevel, - Desc: "SQL执行计划中type字段建议满足规定的级别", - Annotation: "验证 SQL 执行计划中的 type 字段,确保满足要求级别,以保证查询性能。", + Desc: plocale.DMLSQLExplainLowestLevelDesc, + Annotation: plocale.DMLSQLExplainLowestLevelAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDDLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "range,ref,const,eq_ref,system,NULL", - Desc: "查询计划type等级,以英文逗号隔开", + Desc: plocale.DMLSQLExplainLowestLevelParams1, Type: params.ParamTypeString, }, }, }, AllowOffline: false, - Message: "建议修改SQL,确保执行计划中type字段可以满足规定中的任一等级:%v", + Message: plocale.DMLSQLExplainLowestLevelMessage, Func: checkSQLExplainLowestLevel, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLAvoidFullText, - Desc: "禁止使用全文索引", - Annotation: "全文索引的使用会增加存储开销,并对写操作性能产生一定影响。", + Desc: plocale.DDLAvoidFullTextDesc, + Annotation: plocale.DDLAvoidFullTextAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, AllowOffline: true, - Message: "禁止使用全文索引", + Message: plocale.DDLAvoidFullTextMessage, Func: avoidFullText, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLAvoidGeometry, - Desc: "禁止使用空间字段和空间索引", - Annotation: "使用空间字段和空间索引会增加存储需求,对数据库性能造成一定影响", + Desc: plocale.DDLAvoidGeometryDesc, + Annotation: plocale.DDLAvoidGeometryAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, AllowOffline: true, - Message: "禁止使用空间字段和空间索引", + Message: plocale.DDLAvoidGeometryMessage, Func: avoidGeometry, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLAvoidWhereEqualNull, - Desc: "WHERE子句中禁止将NULL值与其他字段或值进行比较运算", - Annotation: "NULL在SQL中属于特殊值,无法与普通值进行比较。例如:column = NULL恒为false,即使column存在null值也不会查询出来,所以column = NULL应该写为column is NULL", + Desc: plocale.DMLAvoidWhereEqualNullDesc, + Annotation: plocale.DMLAvoidWhereEqualNullAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, AllowOffline: true, - Message: "WHERE子句中禁止将NULL值与其他字段或值进行比较运算", + Message: plocale.DMLAvoidWhereEqualNullMessage, Func: avoidWhereEqualNull, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLAvoidEvent, - Desc: "禁止使用event", - Annotation: "使用event会增加数据库的维护难度和依赖性,并且也会造成安全问题。", + Desc: plocale.DDLAvoidEventDesc, + Annotation: plocale.DDLAvoidEventAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, AllowOffline: true, - Message: "禁止使用event", + Message: plocale.DDLAvoidEventMessage, Func: avoidEvent, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckCharLength, - Desc: "禁止char, varchar类型字段字符长度总和超过阈值", - Annotation: "使用过长或者过多的varchar,char字段可能会增加业务逻辑的复杂性;如果字段平均长度过大时,会占用更多的存储空间。", + Desc: plocale.DDLCheckCharLengthDesc, + Annotation: plocale.DDLCheckCharLengthAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeUsageSuggestion, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "2000", - Desc: "字符长度", + Desc: plocale.DDLCheckCharLengthParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: false, - Message: "禁止char, varchar类型字段字符长度总和超过阈值 %v", + Message: plocale.DDLCheckCharLengthMessage, Func: checkCharLength, }, } diff --git a/sqle/driver/mysql/rule/rule_list_trial.go b/sqle/driver/mysql/rule/rule_list_trial.go index 7393e19360..03931b0119 100644 --- a/sqle/driver/mysql/rule/rule_list_trial.go +++ b/sqle/driver/mysql/rule/rule_list_trial.go @@ -4,217 +4,220 @@ package rule import ( + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/pkg/params" "github.com/pingcap/parser/ast" ) -var RuleHandlers = []RuleHandler{ +var RuleHandlers = generateRuleHandlers(sourceRuleHandlers) + +var sourceRuleHandlers = []*SourceHandler{ { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckFuzzySearch, - Desc: "禁止使用全模糊搜索或左模糊搜索", - Annotation: "使用全模糊搜索或左模糊搜索将导致查询无法使用索引,导致全表扫描", + Desc: plocale.DMLCheckFuzzySearchDesc, + Annotation: plocale.DMLCheckFuzzySearchAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "禁止使用全模糊搜索或左模糊搜索", + Message: plocale.DMLCheckFuzzySearchMessage, AllowOffline: true, Func: checkSelectWhere, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckJoinFieldType, - Desc: "建议JOIN字段类型保持一致", - Annotation: "JOIN字段类型不一致会导致类型不匹配发生隐式准换,建议开启此规则,避免索引失效", + Desc: plocale.DMLCheckJoinFieldTypeDesc, + Annotation: plocale.DMLCheckJoinFieldTypeAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "建议JOIN字段类型保持一致, 否则会导致隐式转换", + Message: plocale.DMLCheckJoinFieldTypeMessage, AllowOffline: false, Func: checkJoinFieldType, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLRecommendTableColumnCharsetSame, - Desc: "建议列与表使用同一个字符集", - Annotation: "统一字符集可以避免由于字符集转换产生的乱码,不同的字符集进行比较前需要进行转换会造成索引失效", + Desc: plocale.DDLRecommendTableColumnCharsetSameDesc, + Annotation: plocale.DDLRecommendTableColumnCharsetSameAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "建议列与表使用同一个字符集", + Message: plocale.DDLRecommendTableColumnCharsetSameMessage, Func: recommendTableColumnCharsetSame, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnTimestampWithoutDefault, - Desc: "TIMESTAMP 类型的列必须添加默认值", - Annotation: "TIMESTAMP添加默认值,可避免出现全为0的日期格式与业务预期不符", + Desc: plocale.DDLCheckColumnTimestampWithoutDefaultDesc, + Annotation: plocale.DDLCheckColumnTimestampWithoutDefaultAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "TIMESTAMP 类型的列必须添加默认值", + Message: plocale.DDLCheckColumnTimestampWithoutDefaultMessage, AllowOffline: true, Func: checkColumnTimestampWithoutDefault, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexPrefix, - Desc: "建议普通索引使用固定前缀", - Annotation: "通过配置该规则可以规范指定业务的索引命名规则,具体命名规范可以自定义设置,默认提示值:idx_", + Desc: plocale.DDLCheckIndexPrefixDesc, + Annotation: plocale.DDLCheckIndexPrefixAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeNamingConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "idx_", - Desc: "索引前缀", + Desc: plocale.DDLCheckUniqueIndexPrefixParams1, Type: params.ParamTypeString, }, }, }, - Message: "建议普通索引要以\"%v\"为前缀", + Message: plocale.DDLCheckIndexPrefixMessage, AllowOffline: true, Func: checkIndexPrefix, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKNotExist, - Desc: "表必须有主键", - Annotation: "主键使数据达到全局唯一,可提高数据检索效率", + Desc: plocale.DDLCheckPKNotExistDesc, + Annotation: plocale.DDLCheckPKNotExistAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "表必须有主键", + Message: plocale.DDLCheckPKNotExistMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}}, Func: checkPrimaryKey, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: ConfigDMLExplainPreCheckEnable, - Desc: "使用EXPLAIN加强预检查能力", - Annotation: "通过 EXPLAIN 的形式将待上线的DML进行SQL是否能正确执行的检查,提前发现语句的错误,提高上线成功率", + Desc: plocale.ConfigDMLExplainPreCheckEnableDesc, + Annotation: plocale.ConfigDMLExplainPreCheckEnableAnnotation, Level: driverV2.RuleLevelWarn, - Category: RuleTypeGlobalConfig, + Category: plocale.RuleTypeGlobalConfig, }, Func: nil, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckIndexCount, - Desc: "索引个数建议不超过阈值", - Annotation: "在表上建立的每个索引都会增加存储开销,索引对于插入、删除、更新操作也会增加处理上的开销,太多与不充分、不正确的索引对性能都毫无益处;具体规则阈值可以根据业务需求调整,默认值:5", + Desc: plocale.DDLCheckIndexCountDesc, + Annotation: plocale.DDLCheckIndexCountAnnotation, Level: driverV2.RuleLevelNotice, //Value: "5", - Category: RuleTypeIndexingConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeIndexingConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "5", - Desc: "最大索引个数", + Desc: plocale.DDLCheckIndexCountParams1, Type: params.ParamTypeInt, }, }, }, - Message: "索引个数建议不超过%v个", + Message: plocale.DDLCheckIndexCountMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}, &ast.CreateIndexStmt{}}, Func: checkIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckPKWithoutAutoIncrement, - Desc: "主键建议使用自增", - Annotation: "自增主键,数字型速度快,而且是增量增长,占用空间小,更快速的做数据插入操作,避免增加维护索引的开销", + Desc: plocale.DDLCheckPKWithoutAutoIncrementDesc, + Annotation: plocale.DDLCheckPKWithoutAutoIncrementAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexingConvention, + Category: plocale.RuleTypeIndexingConvention, }, - Message: "主键建议使用自增", + Message: plocale.DDLCheckPKWithoutAutoIncrementMessage, AllowOffline: true, NotAllowOfflineStmts: []ast.Node{&ast.AlterTableStmt{}}, NotSupportExecutedSQLAuditStmts: []ast.Node{&ast.AlterTableStmt{}}, Func: checkPrimaryKey, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckObjectNameUsingKeyword, - Desc: "数据库对象命名禁止使用保留字", - Annotation: "通过配置该规则可以规范指定业务的数据对象命名规则,避免发生冲突,以及混淆", + Desc: plocale.DDLCheckObjectNameUsingKeywordDesc, + Annotation: plocale.DDLCheckObjectNameUsingKeywordAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeNamingConvention, + Category: plocale.RuleTypeNamingConvention, }, - Message: "数据库对象命名禁止使用保留字 %s", + Message: plocale.DDLCheckObjectNameUsingKeywordMessage, AllowOffline: true, Func: checkNewObjectName, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckMathComputationOrFuncOnIndex, - Desc: "禁止对索引列进行数学运算和使用函数", - Annotation: "对索引列进行数学运算和使用函数会导致索引失效,从而导致全表扫描,影响查询性能。", + Desc: plocale.DMLCheckMathComputationOrFuncOnIndexDesc, + Annotation: plocale.DMLCheckMathComputationOrFuncOnIndexAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeIndexInvalidation, + Category: plocale.RuleTypeIndexInvalidation, }, AllowOffline: false, - Message: "禁止对索引列进行数学运算和使用函数", + Message: plocale.DMLCheckMathComputationOrFuncOnIndexMessage, Func: checkMathComputationOrFuncOnIndex, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLDisableDropStatement, - Desc: "禁止除索引外的DROP操作", - Annotation: "DROP是DDL,数据变更不会写入日志,无法进行回滚;建议开启此规则,避免误删除操作", + Desc: plocale.DDLDisableDropStatementDesc, + Annotation: plocale.DDLDisableDropStatementAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeUsageSuggestion, + Category: plocale.RuleTypeUsageSuggestion, }, - Message: "禁止除索引外的DROP操作", + Message: plocale.DDLDisableDropStatementMessage, AllowOffline: true, Func: disableDropStmt, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckScanRows, - Desc: "扫描行数超过阈值,筛选条件必须带上主键或者索引", - Annotation: "筛选条件必须带上主键或索引可降低数据库查询的时间复杂度,提高查询效率。", + Desc: plocale.DMLCheckScanRowsDesc, + Annotation: plocale.DMLCheckScanRowsAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*SourceParam{ + { Key: DefaultSingleParamKeyName, Value: "10", - Desc: "扫描行数量(万)", + Desc: plocale.DMLCheckScanRowsParams1, Type: params.ParamTypeInt, }, }, }, AllowOffline: false, - Message: "扫描行数超过阈值,筛选条件必须带上主键或者索引", + Message: plocale.DMLCheckScanRowsMessage, Func: checkScanRows, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DMLCheckWhereIsInvalid, - Desc: "禁止使用没有WHERE条件或者WHERE条件恒为TRUE的SQL", - Annotation: "SQL缺少WHERE条件在执行时会进行全表扫描产生额外开销,建议在大数据量高并发环境下开启,避免影响数据库查询性能", + Desc: plocale.DMLCheckWhereIsInvalidDesc, + Annotation: plocale.DMLCheckWhereIsInvalidAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, - Message: "禁止使用没有WHERE条件或者WHERE条件恒为TRUE的SQL", + Message: plocale.DMLCheckWhereIsInvalidMessage, AllowOffline: true, Func: checkSelectWhere, }, { - Rule: driverV2.Rule{ + Rule: SourceRule{ Name: DDLCheckColumnWithoutDefault, - Desc: "除了自增列及大字段列之外,每个列都必须添加默认值", - Annotation: "列添加默认值,可避免列为NULL值时对查询的影响", + Desc: plocale.DDLCheckColumnWithoutDefaultDesc, + Annotation: plocale.DDLCheckColumnWithoutDefaultAnnotation, Level: driverV2.RuleLevelError, - Category: RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, - Message: "除了自增列及大字段列之外,每个列都必须添加默认值", + Message: plocale.DDLCheckColumnWithoutDefaultMessage, AllowOffline: true, Func: checkColumnWithoutDefault, }, diff --git a/sqle/driver/mysql/rule/rule_test.go b/sqle/driver/mysql/rule/rule_test.go index 4a6cad1dfa..ac49f1c300 100644 --- a/sqle/driver/mysql/rule/rule_test.go +++ b/sqle/driver/mysql/rule/rule_test.go @@ -3,6 +3,7 @@ package rule import ( "testing" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/stretchr/testify/assert" ) @@ -10,19 +11,19 @@ import ( func TestInspectResults(t *testing.T) { results := driverV2.NewAuditResults() handler := RuleHandlerMap[DDLCheckPKWithoutIfNotExists] - results.Add(handler.Rule.Level, handler.Rule.Name, handler.Message) + results.Add(handler.Rule.Level, handler.Rule.Name, plocale.ShouldLocalizeAll(plocale.DDLCheckPKWithoutIfNotExistsMessage)) assert.Equal(t, driverV2.RuleLevelError, results.Level()) assert.Equal(t, "[error]新建表建议加入 IF NOT EXISTS,保证重复执行不报错", results.Message()) - results.Add(driverV2.RuleLevelError, "", "表 %s 不存在", "not_exist_tb") + results.Add(driverV2.RuleLevelError, "", plocale.ShouldLocalizeAllWithArgs(plocale.TableNotExistMessage, "not_exist_tb")) assert.Equal(t, driverV2.RuleLevelError, results.Level()) assert.Equal(t, `[error]新建表建议加入 IF NOT EXISTS,保证重复执行不报错 [error]表 not_exist_tb 不存在`, results.Message()) results2 := driverV2.NewAuditResults() - results2.Add(results.Level(), "", results.Message()) - results2.Add(driverV2.RuleLevelNotice, "", "test") + results2.Add(results.Level(), "", plocale.ConvertStr2I18n(results.Message())) + results2.Add(driverV2.RuleLevelNotice, "", plocale.ConvertStr2I18n("test")) assert.Equal(t, driverV2.RuleLevelError, results2.Level()) assert.Equal(t, `[error]新建表建议加入 IF NOT EXISTS,保证重复执行不报错 @@ -30,8 +31,8 @@ func TestInspectResults(t *testing.T) { [notice]test`, results2.Message()) results3 := driverV2.NewAuditResults() - results3.Add(results2.Level(), "", results2.Message()) - results3.Add(driverV2.RuleLevelNotice, "", "[osc]test") + results3.Add(results2.Level(), "", plocale.ConvertStr2I18n(results2.Message())) + results3.Add(driverV2.RuleLevelNotice, "", plocale.ConvertStr2I18n("[osc]test")) assert.Equal(t, driverV2.RuleLevelError, results3.Level()) assert.Equal(t, `[error]新建表建议加入 IF NOT EXISTS,保证重复执行不报错 @@ -40,16 +41,16 @@ func TestInspectResults(t *testing.T) { [osc]test`, results3.Message()) results4 := driverV2.NewAuditResults() - results4.Add(driverV2.RuleLevelNotice, "", "[notice]test") - results4.Add(driverV2.RuleLevelError, "", "[osc]test") + results4.Add(driverV2.RuleLevelNotice, "", plocale.ConvertStr2I18n("[notice]test")) + results4.Add(driverV2.RuleLevelError, "", plocale.ConvertStr2I18n("[osc]test")) assert.Equal(t, driverV2.RuleLevelError, results4.Level()) assert.Equal(t, `[osc]test [notice]test`, results4.Message()) results5 := driverV2.NewAuditResults() - results5.Add(driverV2.RuleLevelWarn, "", "[warn]test") - results5.Add(driverV2.RuleLevelNotice, "", "[osc]test") + results5.Add(driverV2.RuleLevelWarn, "", plocale.ConvertStr2I18n("[warn]test")) + results5.Add(driverV2.RuleLevelNotice, "", plocale.ConvertStr2I18n("[osc]test")) assert.Equal(t, driverV2.RuleLevelWarn, results5.Level()) assert.Equal(t, `[warn]test diff --git a/sqle/driver/plugin_adapter_v1.go b/sqle/driver/plugin_adapter_v1.go index 6d42569f82..f5c55b351d 100644 --- a/sqle/driver/plugin_adapter_v1.go +++ b/sqle/driver/plugin_adapter_v1.go @@ -8,8 +8,8 @@ import ( driverV1 "github.com/actiontech/sqle/sqle/driver/v1" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/pkg/params" - goPlugin "github.com/hashicorp/go-plugin" "github.com/sirupsen/logrus" ) @@ -33,12 +33,18 @@ func convertRuleFromV1ToV2(rule *driverV1.Rule) *driverV2.Rule { }) } return &driverV2.Rule{ - Name: rule.Name, - Category: rule.Category, - Desc: rule.Desc, - Annotation: rule.Annotation, - Level: driverV2.RuleLevel(rule.Level), - Params: ps, + Name: rule.Name, + Level: driverV2.RuleLevel(rule.Level), + Params: ps, + I18nRuleInfo: map[string]*driverV2.RuleInfo{ + locale.DefaultLang.String(): { + Desc: rule.Desc, + Annotation: rule.Annotation, + Category: rule.Category, + Params: ps, + Knowledge: driverV2.RuleKnowledge{}, + }, + }, } } @@ -97,9 +103,9 @@ func (d *PluginProcessorV1) Open(l *logrus.Entry, cfgV2 *driverV2.Config) (Plugi for _, rule := range cfgV2.Rules { cfg.Rules = append(cfg.Rules, &driverV1.Rule{ Name: rule.Name, - Desc: rule.Desc, - Annotation: rule.Annotation, - Category: rule.Category, + Desc: rule.I18nRuleInfo.GetRuleInfoByLangTag(locale.DefaultLang.String()).Desc, + Annotation: rule.I18nRuleInfo.GetRuleInfoByLangTag(locale.DefaultLang.String()).Annotation, + Category: rule.I18nRuleInfo.GetRuleInfoByLangTag(locale.DefaultLang.String()).Category, Level: driverV1.RuleLevel(rule.Level), Params: rule.Params, }) @@ -164,8 +170,12 @@ func (p *PluginImplV1) Audit(ctx context.Context, sqls []string) ([]*driverV2.Au resultV2 := &driverV2.AuditResults{} for _, result := range resultV1.Results { resultV2.Results = append(resultV2.Results, &driverV2.AuditResult{ - Level: driverV2.RuleLevel(result.Level), - Message: result.Message, + Level: driverV2.RuleLevel(result.Level), + I18nAuditResultInfo: map[string]driverV2.AuditResultInfo{ + locale.DefaultLang.String(): { + Message: result.Message, + }, + }, }) } resultsV2 = append(resultsV2, resultV2) @@ -173,12 +183,13 @@ func (p *PluginImplV1) Audit(ctx context.Context, sqls []string) ([]*driverV2.Au return resultsV2, nil } -func (p *PluginImplV1) GenRollbackSQL(ctx context.Context, sql string) (string, string, error) { +func (p *PluginImplV1) GenRollbackSQL(ctx context.Context, sql string) (string, driverV2.I18nStr, error) { client, err := p.DriverManager.GetAuditDriver() if err != nil { - return "", "", err + return "", nil, err } - return client.GenRollbackSQL(ctx, sql) + rollbackSql, reason, err := client.GenRollbackSQL(ctx, sql) + return rollbackSql, driverV2.I18nStr{locale.DefaultLang.String(): reason}, err } func (p *PluginImplV1) Ping(ctx context.Context) error { diff --git a/sqle/driver/plugin_adapter_v2.go b/sqle/driver/plugin_adapter_v2.go index e8e213340f..1d976d696a 100644 --- a/sqle/driver/plugin_adapter_v2.go +++ b/sqle/driver/plugin_adapter_v2.go @@ -10,6 +10,7 @@ import ( driverV2 "github.com/actiontech/sqle/sqle/driver/v2" protoV2 "github.com/actiontech/sqle/sqle/driver/v2/proto" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" "github.com/actiontech/sqle/sqle/pkg/params" @@ -77,7 +78,13 @@ func (d *PluginProcessorV2) GetDriverMetas() (*driverV2.DriverMetas, error) { rules := make([]*driverV2.Rule, 0, len(result.Rules)) for _, r := range result.Rules { - rules = append(rules, driverV2.ConvertRuleFromProtoToDriver(r)) + if len(r.I18NRuleInfo) > 0 { + if _, exist := r.I18NRuleInfo[locale.DefaultLang.String()]; !exist { + // 多语言插件必须支持 locale.DefaultLang 用以默认展示 + return nil, fmt.Errorf("client rule: %s not support language: %s", r.Name, locale.DefaultLang.String()) + } + } + rules = append(rules, driverV2.ConvertI18nRuleFromProtoToDriver(r)) } ms := make([]driverV2.OptionalModule, 0, len(result.EnabledOptionalModule)) @@ -118,14 +125,10 @@ func (d *PluginProcessorV2) Open(l *logrus.Entry, cfgV2 *driverV2.Config) (Plugi } } - var rules = make([]*protoV2.Rule, 0, len(cfgV2.Rules)) - for _, rule := range cfgV2.Rules { - rules = append(rules, driverV2.ConvertRuleFromDriverToProto(rule)) - } l.Infof("starting call plugin interface [Init]") result, err := c.Init(context.TODO(), &protoV2.InitRequest{ Dsn: dsn, - Rules: rules, + Rules: driverV2.ConvertI18nRulesFromDriverToProto(cfgV2.Rules), }) if err != nil { l.Errorf("fail to call plugin interface [Init], error: %v", err) @@ -237,22 +240,20 @@ func (s *PluginImplV2) Audit(ctx context.Context, sqls []string) ([]*driverV2.Au return nil, err } - rets := []*driverV2.AuditResults{} + rets := make([]*driverV2.AuditResults, 0, len(resp.AuditResults)) for _, results := range resp.AuditResults { - ret := &driverV2.AuditResults{} - for _, result := range results.Results { - ret.Results = append(ret.Results, &driverV2.AuditResult{ - Level: driverV2.RuleLevel(result.Level), - Message: result.Message, - RuleName: result.RuleName, - }) + dResult, err := driverV2.ConvertI18nAuditResultsFromProtoToDriver(results.Results) + if err != nil { + return nil, err } + ret := driverV2.NewAuditResults() + ret.SetResults(dResult) rets = append(rets, ret) } return rets, nil } -func (s *PluginImplV2) GenRollbackSQL(ctx context.Context, sql string) (string, string, error) { +func (s *PluginImplV2) GenRollbackSQL(ctx context.Context, sql string) (string, driverV2.I18nStr, error) { api := "GenRollbackSQL" s.preLog(api) resp, err := s.client.GenRollbackSQL(ctx, &protoV2.GenRollbackSQLRequest{ @@ -263,9 +264,19 @@ func (s *PluginImplV2) GenRollbackSQL(ctx context.Context, sql string) (string, }) s.afterLog(api, err) if err != nil { - return "", "", err + return "", nil, err + } + + var i18nReason driverV2.I18nStr + if resp.Sql.Message != "" && len(resp.Sql.I18NRollbackSQLInfo) == 0 { + i18nReason = driverV2.I18nStr{locale.DefaultLang.String(): resp.Sql.Message} + } else if len(resp.Sql.I18NRollbackSQLInfo) > 0 { + i18nReason = make(driverV2.I18nStr, len(resp.Sql.I18NRollbackSQLInfo)) + for langTag, v := range resp.Sql.I18NRollbackSQLInfo { + i18nReason[langTag] = v.Message + } } - return resp.Sql.Query, resp.Sql.Message, nil + return resp.Sql.Query, i18nReason, nil } // executor diff --git a/sqle/driver/plugin_interface.go b/sqle/driver/plugin_interface.go index 3b5e44a723..9e73b8a673 100644 --- a/sqle/driver/plugin_interface.go +++ b/sqle/driver/plugin_interface.go @@ -19,7 +19,7 @@ type Plugin interface { Audit(ctx context.Context, sqls []string) ([]*driverV2.AuditResults, error) // GenRollbackSQL generate sql's rollback SQL. - GenRollbackSQL(ctx context.Context, sql string) (string, string, error) + GenRollbackSQL(ctx context.Context, sql string) (string, driverV2.I18nStr, error) Ping(ctx context.Context) error Exec(ctx context.Context, query string) (driver.Result, error) diff --git a/sqle/driver/v2/driver_grpc_server.go b/sqle/driver/v2/driver_grpc_server.go index 3b39758d8f..57a70b16f1 100644 --- a/sqle/driver/v2/driver_grpc_server.go +++ b/sqle/driver/v2/driver_grpc_server.go @@ -2,10 +2,14 @@ package driverV2 import ( "context" + "database/sql/driver" + "encoding/json" "fmt" + "strings" "sync" protoV2 "github.com/actiontech/sqle/sqle/driver/v2/proto" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/pkg/params" "github.com/pkg/errors" @@ -30,18 +34,78 @@ type DSN struct { } type Rule struct { - Name string + Name string + Level RuleLevel + Params params.Params // 仅用于ParamValue处理,展示 Param.Desc 等以 I18nRuleInfo->RuleInfo.Params 为准 + I18nRuleInfo I18nRuleInfo +} + +type I18nRuleInfo map[string]*RuleInfo + +// GetRuleInfoByLangTag if the lang not exists, return DefaultLang +func (i *I18nRuleInfo) GetRuleInfoByLangTag(lang string) *RuleInfo { + for langTag, ruleInfo := range *i { + if strings.HasPrefix(lang, langTag) { + return ruleInfo + } + } + ruleInfo := (*i)[locale.DefaultLang.String()] + return ruleInfo +} + +func (i I18nRuleInfo) Value() (driver.Value, error) { + b, err := json.Marshal(i) + return string(b), err +} + +func (i *I18nRuleInfo) Scan(input interface{}) error { + return json.Unmarshal(input.([]byte), i) +} + +type RuleInfo struct { Desc string Annotation string // Category is the category of the rule. Such as "Naming Conventions"... // Rules will be displayed on the SQLE rule list page by category. Category string - Level RuleLevel - Params params.Params + Params params.Params // 仅用于国际化,ParamValue以 Rule.Params 为准 Knowledge RuleKnowledge } +type I18nStr map[string]string // lang -> str in the lang + +// GetStrInLang if the lang not exists, return DefaultLang +func (s *I18nStr) GetStrInLang(lang string) string { + if s == nil || *s == nil { + return "" + } + for langTag, str := range *s { + if strings.HasPrefix(lang, langTag) { + return str + } + } + return (*s)[locale.DefaultLang.String()] +} + +func (s *I18nStr) SetStrInLang(lang, str string) { + if *s == nil { + *s = map[string]string{lang: str} + } else { + (*s)[lang] = str + } + return +} + +func (s I18nStr) Value() (driver.Value, error) { + b, err := json.Marshal(s) + return string(b), err +} + +func (s *I18nStr) Scan(input interface{}) error { + return json.Unmarshal(input.([]byte), s) +} + type Config struct { DSN *DSN Rules []*Rule @@ -87,7 +151,7 @@ func (d *DriverGrpcServer) Metas(ctx context.Context, req *protoV2.Empty) (*prot DatabaseDefaultPort: d.Meta.DatabaseDefaultPort, Logo: d.Meta.Logo, DatabaseAdditionalParams: ConvertParamToProtoParam(d.Meta.DatabaseAdditionalParams), - Rules: rules, + Rules: ConvertI18nRulesFromDriverToProto(d.Meta.Rules), EnabledOptionalModule: ms, }, nil } @@ -95,7 +159,7 @@ func (d *DriverGrpcServer) Metas(ctx context.Context, req *protoV2.Empty) (*prot func (d *DriverGrpcServer) Init(ctx context.Context, req *protoV2.InitRequest) (*protoV2.InitResponse, error) { var rules = make([]*Rule, 0, len(req.GetRules())) for _, rule := range req.GetRules() { - rules = append(rules, ConvertRuleFromProtoToDriver(rule)) + rules = append(rules, ConvertI18nRuleFromProtoToDriver(rule)) } var dsn *DSN @@ -191,14 +255,10 @@ func (d *DriverGrpcServer) Audit(ctx context.Context, req *protoV2.AuditRequest) resp := &protoV2.AuditResponse{} for _, results := range auditResults { rets := &protoV2.AuditResults{ - Results: []*protoV2.AuditResult{}, + Results: make([]*protoV2.AuditResult, 0, len(results.Results)), } for _, result := range results.Results { - rets.Results = append(rets.Results, &protoV2.AuditResult{ - Level: string(result.Level), - Message: result.Message, - RuleName: result.RuleName, - }) + rets.Results = append(rets.Results, ConvertI18nAuditResultFromDriverToProto(result)) } resp.AuditResults = append(resp.AuditResults, rets) } diff --git a/sqle/driver/v2/driver_interface.go b/sqle/driver/v2/driver_interface.go index 84b4fa915d..fb862dd2ab 100644 --- a/sqle/driver/v2/driver_interface.go +++ b/sqle/driver/v2/driver_interface.go @@ -10,6 +10,7 @@ import ( "github.com/actiontech/sqle/sqle/driver/common" protoV2 "github.com/actiontech/sqle/sqle/driver/v2/proto" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/pkg/params" goPlugin "github.com/hashicorp/go-plugin" @@ -144,18 +145,24 @@ func RuleLevelLessOrEqual(a, b string) bool { } type AuditResults struct { - Results []*AuditResult + Results []*AuditResult + ruleNameIndex map[string]int } type AuditResult struct { - Level RuleLevel - Message string - RuleName string + Level RuleLevel + RuleName string + I18nAuditResultInfo map[string]AuditResultInfo +} + +type AuditResultInfo struct { + Message string } func NewAuditResults() *AuditResults { return &AuditResults{ - Results: []*AuditResult{}, + Results: []*AuditResult{}, + ruleNameIndex: map[string]int{}, } } @@ -174,7 +181,7 @@ func (rs *AuditResults) Message() string { repeatCheck := map[string]struct{}{} messages := []string{} for _, result := range rs.Results { - token := result.Message + string(result.Level) + token := result.I18nAuditResultInfo[locale.DefaultLang.String()].Message + string(result.Level) if _, ok := repeatCheck[token]; ok { continue } @@ -183,30 +190,54 @@ func (rs *AuditResults) Message() string { var message string match, _ := regexp.MatchString(fmt.Sprintf(`^\[%s|%s|%s|%s|%s\]`, RuleLevelError, RuleLevelWarn, RuleLevelNotice, RuleLevelNormal, "osc"), - result.Message) + result.I18nAuditResultInfo[locale.DefaultLang.String()].Message) if match { - message = result.Message + message = result.I18nAuditResultInfo[locale.DefaultLang.String()].Message } else { - message = fmt.Sprintf("[%s]%s", result.Level, result.Message) + message = fmt.Sprintf("[%s]%s", result.Level, result.I18nAuditResultInfo[locale.DefaultLang.String()].Message) } messages = append(messages, message) } return strings.Join(messages, "\n") } -func (rs *AuditResults) Add(level RuleLevel, ruleName string, messagePattern string, args ...interface{}) { - if level == "" || messagePattern == "" { +func (rs *AuditResults) Add(level RuleLevel, ruleName string, i18nMsgPattern I18nStr, args ...interface{}) { + if level == "" || len(i18nMsgPattern) == 0 { return } - message := messagePattern - if len(args) > 0 { - message = fmt.Sprintf(message, args...) + + if rs.ruleNameIndex == nil { + rs.ruleNameIndex = make(map[string]int) } - rs.Results = append(rs.Results, &AuditResult{ - Level: level, - Message: message, - RuleName: ruleName, - }) + + if index, exist := rs.ruleNameIndex[ruleName]; exist { + rs.Results[index].Level = level + for langTag, msg := range i18nMsgPattern { + rs.Results[index].I18nAuditResultInfo[langTag] = AuditResultInfo{ + Message: msg, + } + } + } else { + ar := &AuditResult{ + Level: level, + RuleName: ruleName, + I18nAuditResultInfo: make(map[string]AuditResultInfo, len(i18nMsgPattern)), + } + for langTag, msg := range i18nMsgPattern { + ari := AuditResultInfo{ + Message: msg, + } + if len(args) > 0 { + ari.Message = fmt.Sprintf(msg, args...) + } + ar.I18nAuditResultInfo[langTag] = ari + } + if ruleName != "" { + rs.ruleNameIndex[ruleName] = len(rs.Results) + } + rs.Results = append(rs.Results, ar) + } + rs.SortByLevel() } @@ -220,6 +251,14 @@ func (rs *AuditResults) HasResult() bool { return len(rs.Results) != 0 } +func (rs *AuditResults) SetResults(ars []*AuditResult) { + rs.Results = ars + rs.ruleNameIndex = make(map[string]int, len(ars)) + for k, v := range ars { + rs.ruleNameIndex[v.RuleName] = k + } +} + type QueryConf struct { TimeOutSecond uint32 } diff --git a/sqle/driver/v2/proto/driver_v2.pb.go b/sqle/driver/v2/proto/driver_v2.pb.go index eb752898ac..d4dfafbf8c 100644 --- a/sqle/driver/v2/proto/driver_v2.pb.go +++ b/sqle/driver/v2/proto/driver_v2.pb.go @@ -5,14 +5,17 @@ Package protoV2 is a generated protocol buffer package. It is generated from these files: + driver_v2.proto It has these top-level messages: + Empty Session Param DSN Rule + I18NRuleInfo Knowledge MetasResponse InitRequest @@ -25,11 +28,13 @@ It has these top-level messages: ParseResponse AuditSQL AuditRequest + I18NAuditResultInfo AuditResult AuditResults AuditResponse NeedRollbackSQL GenRollbackSQLRequest + I18NRollbackSQLInfo RollbackSQL GenRollbackSQLResponse PingRequest @@ -253,13 +258,14 @@ func (m *DSN) GetAdditionalParams() []*Param { } type Rule struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Desc string `protobuf:"bytes,2,opt,name=desc" json:"desc,omitempty"` - Level string `protobuf:"bytes,3,opt,name=level" json:"level,omitempty"` - Category string `protobuf:"bytes,4,opt,name=category" json:"category,omitempty"` - Params []*Param `protobuf:"bytes,5,rep,name=params" json:"params,omitempty"` - Annotation string `protobuf:"bytes,6,opt,name=annotation" json:"annotation,omitempty"` - Knowledge *Knowledge `protobuf:"bytes,7,opt,name=knowledge" json:"knowledge,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Desc string `protobuf:"bytes,2,opt,name=desc" json:"desc,omitempty"` + Level string `protobuf:"bytes,3,opt,name=level" json:"level,omitempty"` + Category string `protobuf:"bytes,4,opt,name=category" json:"category,omitempty"` + Params []*Param `protobuf:"bytes,5,rep,name=params" json:"params,omitempty"` + Annotation string `protobuf:"bytes,6,opt,name=annotation" json:"annotation,omitempty"` + Knowledge *Knowledge `protobuf:"bytes,7,opt,name=knowledge" json:"knowledge,omitempty"` + I18NRuleInfo map[string]*I18NRuleInfo `protobuf:"bytes,8,rep,name=i18nRuleInfo" json:"i18nRuleInfo,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *Rule) Reset() { *m = Rule{} } @@ -316,6 +322,62 @@ func (m *Rule) GetKnowledge() *Knowledge { return nil } +func (m *Rule) GetI18NRuleInfo() map[string]*I18NRuleInfo { + if m != nil { + return m.I18NRuleInfo + } + return nil +} + +type I18NRuleInfo struct { + Desc string `protobuf:"bytes,2,opt,name=desc" json:"desc,omitempty"` + // string level = 3; + Category string `protobuf:"bytes,4,opt,name=category" json:"category,omitempty"` + Params []*Param `protobuf:"bytes,5,rep,name=params" json:"params,omitempty"` + Annotation string `protobuf:"bytes,6,opt,name=annotation" json:"annotation,omitempty"` + Knowledge *Knowledge `protobuf:"bytes,7,opt,name=knowledge" json:"knowledge,omitempty"` +} + +func (m *I18NRuleInfo) Reset() { *m = I18NRuleInfo{} } +func (m *I18NRuleInfo) String() string { return proto.CompactTextString(m) } +func (*I18NRuleInfo) ProtoMessage() {} +func (*I18NRuleInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *I18NRuleInfo) GetDesc() string { + if m != nil { + return m.Desc + } + return "" +} + +func (m *I18NRuleInfo) GetCategory() string { + if m != nil { + return m.Category + } + return "" +} + +func (m *I18NRuleInfo) GetParams() []*Param { + if m != nil { + return m.Params + } + return nil +} + +func (m *I18NRuleInfo) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *I18NRuleInfo) GetKnowledge() *Knowledge { + if m != nil { + return m.Knowledge + } + return nil +} + type Knowledge struct { Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"` } @@ -323,7 +385,7 @@ type Knowledge struct { func (m *Knowledge) Reset() { *m = Knowledge{} } func (m *Knowledge) String() string { return proto.CompactTextString(m) } func (*Knowledge) ProtoMessage() {} -func (*Knowledge) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*Knowledge) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *Knowledge) GetContent() string { if m != nil { @@ -345,7 +407,7 @@ type MetasResponse struct { func (m *MetasResponse) Reset() { *m = MetasResponse{} } func (m *MetasResponse) String() string { return proto.CompactTextString(m) } func (*MetasResponse) ProtoMessage() {} -func (*MetasResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*MetasResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *MetasResponse) GetPluginName() string { if m != nil { @@ -398,7 +460,7 @@ type InitRequest struct { func (m *InitRequest) Reset() { *m = InitRequest{} } func (m *InitRequest) String() string { return proto.CompactTextString(m) } func (*InitRequest) ProtoMessage() {} -func (*InitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*InitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *InitRequest) GetDsn() *DSN { if m != nil { @@ -421,7 +483,7 @@ type InitResponse struct { func (m *InitResponse) Reset() { *m = InitResponse{} } func (m *InitResponse) String() string { return proto.CompactTextString(m) } func (*InitResponse) ProtoMessage() {} -func (*InitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*InitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *InitResponse) GetSession() *Session { if m != nil { @@ -438,7 +500,7 @@ type CloseRequest struct { func (m *CloseRequest) Reset() { *m = CloseRequest{} } func (m *CloseRequest) String() string { return proto.CompactTextString(m) } func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*CloseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *CloseRequest) GetSession() *Session { if m != nil { @@ -455,7 +517,7 @@ type KillProcessRequest struct { func (m *KillProcessRequest) Reset() { *m = KillProcessRequest{} } func (m *KillProcessRequest) String() string { return proto.CompactTextString(m) } func (*KillProcessRequest) ProtoMessage() {} -func (*KillProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*KillProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *KillProcessRequest) GetSession() *Session { if m != nil { @@ -472,7 +534,7 @@ type ParsedSQL struct { func (m *ParsedSQL) Reset() { *m = ParsedSQL{} } func (m *ParsedSQL) String() string { return proto.CompactTextString(m) } func (*ParsedSQL) ProtoMessage() {} -func (*ParsedSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*ParsedSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *ParsedSQL) GetQuery() string { if m != nil { @@ -489,7 +551,7 @@ type ParseRequest struct { func (m *ParseRequest) Reset() { *m = ParseRequest{} } func (m *ParseRequest) String() string { return proto.CompactTextString(m) } func (*ParseRequest) ProtoMessage() {} -func (*ParseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*ParseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *ParseRequest) GetSession() *Session { if m != nil { @@ -516,7 +578,7 @@ type Node struct { func (m *Node) Reset() { *m = Node{} } func (m *Node) String() string { return proto.CompactTextString(m) } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *Node) GetText() string { if m != nil { @@ -560,7 +622,7 @@ type ParseResponse struct { func (m *ParseResponse) Reset() { *m = ParseResponse{} } func (m *ParseResponse) String() string { return proto.CompactTextString(m) } func (*ParseResponse) ProtoMessage() {} -func (*ParseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*ParseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *ParseResponse) GetNodes() []*Node { if m != nil { @@ -577,7 +639,7 @@ type AuditSQL struct { func (m *AuditSQL) Reset() { *m = AuditSQL{} } func (m *AuditSQL) String() string { return proto.CompactTextString(m) } func (*AuditSQL) ProtoMessage() {} -func (*AuditSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*AuditSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *AuditSQL) GetQuery() string { if m != nil { @@ -594,7 +656,7 @@ type AuditRequest struct { func (m *AuditRequest) Reset() { *m = AuditRequest{} } func (m *AuditRequest) String() string { return proto.CompactTextString(m) } func (*AuditRequest) ProtoMessage() {} -func (*AuditRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*AuditRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *AuditRequest) GetSession() *Session { if m != nil { @@ -610,16 +672,33 @@ func (m *AuditRequest) GetSqls() []*AuditSQL { return nil } +type I18NAuditResultInfo struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *I18NAuditResultInfo) Reset() { *m = I18NAuditResultInfo{} } +func (m *I18NAuditResultInfo) String() string { return proto.CompactTextString(m) } +func (*I18NAuditResultInfo) ProtoMessage() {} +func (*I18NAuditResultInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *I18NAuditResultInfo) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + type AuditResult struct { - Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` - Level string `protobuf:"bytes,2,opt,name=level" json:"level,omitempty"` - RuleName string `protobuf:"bytes,3,opt,name=rule_name,json=ruleName" json:"rule_name,omitempty"` + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` + Level string `protobuf:"bytes,2,opt,name=level" json:"level,omitempty"` + RuleName string `protobuf:"bytes,3,opt,name=rule_name,json=ruleName" json:"rule_name,omitempty"` + I18NAuditResultInfo map[string]*I18NAuditResultInfo `protobuf:"bytes,4,rep,name=i18nAuditResultInfo" json:"i18nAuditResultInfo,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *AuditResult) Reset() { *m = AuditResult{} } func (m *AuditResult) String() string { return proto.CompactTextString(m) } func (*AuditResult) ProtoMessage() {} -func (*AuditResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*AuditResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *AuditResult) GetMessage() string { if m != nil { @@ -642,6 +721,13 @@ func (m *AuditResult) GetRuleName() string { return "" } +func (m *AuditResult) GetI18NAuditResultInfo() map[string]*I18NAuditResultInfo { + if m != nil { + return m.I18NAuditResultInfo + } + return nil +} + type AuditResults struct { Results []*AuditResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` } @@ -649,7 +735,7 @@ type AuditResults struct { func (m *AuditResults) Reset() { *m = AuditResults{} } func (m *AuditResults) String() string { return proto.CompactTextString(m) } func (*AuditResults) ProtoMessage() {} -func (*AuditResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*AuditResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *AuditResults) GetResults() []*AuditResult { if m != nil { @@ -665,7 +751,7 @@ type AuditResponse struct { func (m *AuditResponse) Reset() { *m = AuditResponse{} } func (m *AuditResponse) String() string { return proto.CompactTextString(m) } func (*AuditResponse) ProtoMessage() {} -func (*AuditResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*AuditResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *AuditResponse) GetAuditResults() []*AuditResults { if m != nil { @@ -682,7 +768,7 @@ type NeedRollbackSQL struct { func (m *NeedRollbackSQL) Reset() { *m = NeedRollbackSQL{} } func (m *NeedRollbackSQL) String() string { return proto.CompactTextString(m) } func (*NeedRollbackSQL) ProtoMessage() {} -func (*NeedRollbackSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*NeedRollbackSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *NeedRollbackSQL) GetQuery() string { if m != nil { @@ -699,7 +785,7 @@ type GenRollbackSQLRequest struct { func (m *GenRollbackSQLRequest) Reset() { *m = GenRollbackSQLRequest{} } func (m *GenRollbackSQLRequest) String() string { return proto.CompactTextString(m) } func (*GenRollbackSQLRequest) ProtoMessage() {} -func (*GenRollbackSQLRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*GenRollbackSQLRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *GenRollbackSQLRequest) GetSession() *Session { if m != nil { @@ -715,15 +801,32 @@ func (m *GenRollbackSQLRequest) GetSql() *NeedRollbackSQL { return nil } +type I18NRollbackSQLInfo struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *I18NRollbackSQLInfo) Reset() { *m = I18NRollbackSQLInfo{} } +func (m *I18NRollbackSQLInfo) String() string { return proto.CompactTextString(m) } +func (*I18NRollbackSQLInfo) ProtoMessage() {} +func (*I18NRollbackSQLInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *I18NRollbackSQLInfo) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + type RollbackSQL struct { - Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + I18NRollbackSQLInfo map[string]*I18NRollbackSQLInfo `protobuf:"bytes,3,rep,name=i18nRollbackSQLInfo" json:"i18nRollbackSQLInfo,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *RollbackSQL) Reset() { *m = RollbackSQL{} } func (m *RollbackSQL) String() string { return proto.CompactTextString(m) } func (*RollbackSQL) ProtoMessage() {} -func (*RollbackSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*RollbackSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *RollbackSQL) GetQuery() string { if m != nil { @@ -739,6 +842,13 @@ func (m *RollbackSQL) GetMessage() string { return "" } +func (m *RollbackSQL) GetI18NRollbackSQLInfo() map[string]*I18NRollbackSQLInfo { + if m != nil { + return m.I18NRollbackSQLInfo + } + return nil +} + type GenRollbackSQLResponse struct { Sql *RollbackSQL `protobuf:"bytes,1,opt,name=sql" json:"sql,omitempty"` } @@ -746,7 +856,7 @@ type GenRollbackSQLResponse struct { func (m *GenRollbackSQLResponse) Reset() { *m = GenRollbackSQLResponse{} } func (m *GenRollbackSQLResponse) String() string { return proto.CompactTextString(m) } func (*GenRollbackSQLResponse) ProtoMessage() {} -func (*GenRollbackSQLResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*GenRollbackSQLResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *GenRollbackSQLResponse) GetSql() *RollbackSQL { if m != nil { @@ -763,7 +873,7 @@ type PingRequest struct { func (m *PingRequest) Reset() { *m = PingRequest{} } func (m *PingRequest) String() string { return proto.CompactTextString(m) } func (*PingRequest) ProtoMessage() {} -func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } func (m *PingRequest) GetSession() *Session { if m != nil { @@ -780,7 +890,7 @@ type ExecSQL struct { func (m *ExecSQL) Reset() { *m = ExecSQL{} } func (m *ExecSQL) String() string { return proto.CompactTextString(m) } func (*ExecSQL) ProtoMessage() {} -func (*ExecSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*ExecSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *ExecSQL) GetQuery() string { if m != nil { @@ -797,7 +907,7 @@ type ExecRequest struct { func (m *ExecRequest) Reset() { *m = ExecRequest{} } func (m *ExecRequest) String() string { return proto.CompactTextString(m) } func (*ExecRequest) ProtoMessage() {} -func (*ExecRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*ExecRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } func (m *ExecRequest) GetSession() *Session { if m != nil { @@ -823,7 +933,7 @@ type ExecResult struct { func (m *ExecResult) Reset() { *m = ExecResult{} } func (m *ExecResult) String() string { return proto.CompactTextString(m) } func (*ExecResult) ProtoMessage() {} -func (*ExecResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*ExecResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *ExecResult) GetLastInsertId() int64 { if m != nil { @@ -861,7 +971,7 @@ type ExecBatchRequest struct { func (m *ExecBatchRequest) Reset() { *m = ExecBatchRequest{} } func (m *ExecBatchRequest) String() string { return proto.CompactTextString(m) } func (*ExecBatchRequest) ProtoMessage() {} -func (*ExecBatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*ExecBatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } func (m *ExecBatchRequest) GetSession() *Session { if m != nil { @@ -884,7 +994,7 @@ type ExecBatchResult struct { func (m *ExecBatchResult) Reset() { *m = ExecBatchResult{} } func (m *ExecBatchResult) String() string { return proto.CompactTextString(m) } func (*ExecBatchResult) ProtoMessage() {} -func (*ExecBatchResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*ExecBatchResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *ExecBatchResult) GetResults() []*ExecResult { if m != nil { @@ -900,7 +1010,7 @@ type ExecResponse struct { func (m *ExecResponse) Reset() { *m = ExecResponse{} } func (m *ExecResponse) String() string { return proto.CompactTextString(m) } func (*ExecResponse) ProtoMessage() {} -func (*ExecResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*ExecResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } func (m *ExecResponse) GetResult() *ExecResult { if m != nil { @@ -918,7 +1028,7 @@ type TxRequest struct { func (m *TxRequest) Reset() { *m = TxRequest{} } func (m *TxRequest) String() string { return proto.CompactTextString(m) } func (*TxRequest) ProtoMessage() {} -func (*TxRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*TxRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *TxRequest) GetSession() *Session { if m != nil { @@ -941,7 +1051,7 @@ type TxResponse struct { func (m *TxResponse) Reset() { *m = TxResponse{} } func (m *TxResponse) String() string { return proto.CompactTextString(m) } func (*TxResponse) ProtoMessage() {} -func (*TxResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*TxResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *TxResponse) GetResults() []*ExecResult { if m != nil { @@ -958,7 +1068,7 @@ type QuerySQL struct { func (m *QuerySQL) Reset() { *m = QuerySQL{} } func (m *QuerySQL) String() string { return proto.CompactTextString(m) } func (*QuerySQL) ProtoMessage() {} -func (*QuerySQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*QuerySQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *QuerySQL) GetQuery() string { if m != nil { @@ -974,7 +1084,7 @@ type QueryConf struct { func (m *QueryConf) Reset() { *m = QueryConf{} } func (m *QueryConf) String() string { return proto.CompactTextString(m) } func (*QueryConf) ProtoMessage() {} -func (*QueryConf) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*QueryConf) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } func (m *QueryConf) GetTimeoutSecond() uint32 { if m != nil { @@ -992,7 +1102,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (m *QueryRequest) String() string { return proto.CompactTextString(m) } func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } func (m *QueryRequest) GetSession() *Session { if m != nil { @@ -1023,7 +1133,7 @@ type QueryResponse struct { func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (m *QueryResponse) String() string { return proto.CompactTextString(m) } func (*QueryResponse) ProtoMessage() {} -func (*QueryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (*QueryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } func (m *QueryResponse) GetColumn() []*Param { if m != nil { @@ -1046,7 +1156,7 @@ type QueryResultRow struct { func (m *QueryResultRow) Reset() { *m = QueryResultRow{} } func (m *QueryResultRow) String() string { return proto.CompactTextString(m) } func (*QueryResultRow) ProtoMessage() {} -func (*QueryResultRow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*QueryResultRow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } func (m *QueryResultRow) GetValues() []*QueryResultValue { if m != nil { @@ -1062,7 +1172,7 @@ type QueryResultValue struct { func (m *QueryResultValue) Reset() { *m = QueryResultValue{} } func (m *QueryResultValue) String() string { return proto.CompactTextString(m) } func (*QueryResultValue) ProtoMessage() {} -func (*QueryResultValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (*QueryResultValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } func (m *QueryResultValue) GetValue() string { if m != nil { @@ -1079,7 +1189,7 @@ type ExplainSQL struct { func (m *ExplainSQL) Reset() { *m = ExplainSQL{} } func (m *ExplainSQL) String() string { return proto.CompactTextString(m) } func (*ExplainSQL) ProtoMessage() {} -func (*ExplainSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (*ExplainSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } func (m *ExplainSQL) GetQuery() string { if m != nil { @@ -1096,7 +1206,7 @@ type ExplainRequest struct { func (m *ExplainRequest) Reset() { *m = ExplainRequest{} } func (m *ExplainRequest) String() string { return proto.CompactTextString(m) } func (*ExplainRequest) ProtoMessage() {} -func (*ExplainRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (*ExplainRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } func (m *ExplainRequest) GetSession() *Session { if m != nil { @@ -1119,7 +1229,7 @@ type ExplainResponse struct { func (m *ExplainResponse) Reset() { *m = ExplainResponse{} } func (m *ExplainResponse) String() string { return proto.CompactTextString(m) } func (*ExplainResponse) ProtoMessage() {} -func (*ExplainResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*ExplainResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } func (m *ExplainResponse) GetClassicResult() *ExplainClassicResult { if m != nil { @@ -1135,7 +1245,7 @@ type ExplainClassicResult struct { func (m *ExplainClassicResult) Reset() { *m = ExplainClassicResult{} } func (m *ExplainClassicResult) String() string { return proto.CompactTextString(m) } func (*ExplainClassicResult) ProtoMessage() {} -func (*ExplainClassicResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +func (*ExplainClassicResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } func (m *ExplainClassicResult) GetData() *TabularData { if m != nil { @@ -1152,7 +1262,7 @@ type GetDatabasesRequest struct { func (m *GetDatabasesRequest) Reset() { *m = GetDatabasesRequest{} } func (m *GetDatabasesRequest) String() string { return proto.CompactTextString(m) } func (*GetDatabasesRequest) ProtoMessage() {} -func (*GetDatabasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (*GetDatabasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } func (m *GetDatabasesRequest) GetSession() *Session { if m != nil { @@ -1168,7 +1278,7 @@ type Database struct { func (m *Database) Reset() { *m = Database{} } func (m *Database) String() string { return proto.CompactTextString(m) } func (*Database) ProtoMessage() {} -func (*Database) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (*Database) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } func (m *Database) GetName() string { if m != nil { @@ -1184,7 +1294,7 @@ type GetDatabasesResponse struct { func (m *GetDatabasesResponse) Reset() { *m = GetDatabasesResponse{} } func (m *GetDatabasesResponse) String() string { return proto.CompactTextString(m) } func (*GetDatabasesResponse) ProtoMessage() {} -func (*GetDatabasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } +func (*GetDatabasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } func (m *GetDatabasesResponse) GetDatabases() []*Database { if m != nil { @@ -1202,7 +1312,7 @@ type Table struct { func (m *Table) Reset() { *m = Table{} } func (m *Table) String() string { return proto.CompactTextString(m) } func (*Table) ProtoMessage() {} -func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } func (m *Table) GetName() string { if m != nil { @@ -1226,7 +1336,7 @@ type GetTableMetaRequest struct { func (m *GetTableMetaRequest) Reset() { *m = GetTableMetaRequest{} } func (m *GetTableMetaRequest) String() string { return proto.CompactTextString(m) } func (*GetTableMetaRequest) ProtoMessage() {} -func (*GetTableMetaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (*GetTableMetaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } func (m *GetTableMetaRequest) GetSession() *Session { if m != nil { @@ -1249,7 +1359,7 @@ type GetTableMetaResponse struct { func (m *GetTableMetaResponse) Reset() { *m = GetTableMetaResponse{} } func (m *GetTableMetaResponse) String() string { return proto.CompactTextString(m) } func (*GetTableMetaResponse) ProtoMessage() {} -func (*GetTableMetaResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (*GetTableMetaResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } func (m *GetTableMetaResponse) GetTableMeta() *TableMeta { if m != nil { @@ -1268,7 +1378,7 @@ type TableMeta struct { func (m *TableMeta) Reset() { *m = TableMeta{} } func (m *TableMeta) String() string { return proto.CompactTextString(m) } func (*TableMeta) ProtoMessage() {} -func (*TableMeta) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +func (*TableMeta) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } func (m *TableMeta) GetColumnsInfo() *ColumnsInfo { if m != nil { @@ -1305,7 +1415,7 @@ type ColumnsInfo struct { func (m *ColumnsInfo) Reset() { *m = ColumnsInfo{} } func (m *ColumnsInfo) String() string { return proto.CompactTextString(m) } func (*ColumnsInfo) ProtoMessage() {} -func (*ColumnsInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (*ColumnsInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } func (m *ColumnsInfo) GetData() *TabularData { if m != nil { @@ -1321,7 +1431,7 @@ type IndexesInfo struct { func (m *IndexesInfo) Reset() { *m = IndexesInfo{} } func (m *IndexesInfo) String() string { return proto.CompactTextString(m) } func (*IndexesInfo) ProtoMessage() {} -func (*IndexesInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (*IndexesInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } func (m *IndexesInfo) GetData() *TabularData { if m != nil { @@ -1338,7 +1448,7 @@ type TabularDataHead struct { func (m *TabularDataHead) Reset() { *m = TabularDataHead{} } func (m *TabularDataHead) String() string { return proto.CompactTextString(m) } func (*TabularDataHead) ProtoMessage() {} -func (*TabularDataHead) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (*TabularDataHead) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } func (m *TabularDataHead) GetName() string { if m != nil { @@ -1361,7 +1471,7 @@ type TabularDataRows struct { func (m *TabularDataRows) Reset() { *m = TabularDataRows{} } func (m *TabularDataRows) String() string { return proto.CompactTextString(m) } func (*TabularDataRows) ProtoMessage() {} -func (*TabularDataRows) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (*TabularDataRows) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } func (m *TabularDataRows) GetItems() []string { if m != nil { @@ -1378,7 +1488,7 @@ type TabularData struct { func (m *TabularData) Reset() { *m = TabularData{} } func (m *TabularData) String() string { return proto.CompactTextString(m) } func (*TabularData) ProtoMessage() {} -func (*TabularData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (*TabularData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } func (m *TabularData) GetColumns() []*TabularDataHead { if m != nil { @@ -1402,7 +1512,7 @@ type ExtractedSQL struct { func (m *ExtractedSQL) Reset() { *m = ExtractedSQL{} } func (m *ExtractedSQL) String() string { return proto.CompactTextString(m) } func (*ExtractedSQL) ProtoMessage() {} -func (*ExtractedSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +func (*ExtractedSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } func (m *ExtractedSQL) GetQuery() string { if m != nil { @@ -1419,7 +1529,7 @@ type ExtractTableFromSQLRequest struct { func (m *ExtractTableFromSQLRequest) Reset() { *m = ExtractTableFromSQLRequest{} } func (m *ExtractTableFromSQLRequest) String() string { return proto.CompactTextString(m) } func (*ExtractTableFromSQLRequest) ProtoMessage() {} -func (*ExtractTableFromSQLRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (*ExtractTableFromSQLRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } func (m *ExtractTableFromSQLRequest) GetSession() *Session { if m != nil { @@ -1442,7 +1552,7 @@ type ExtractTableFromSQLResponse struct { func (m *ExtractTableFromSQLResponse) Reset() { *m = ExtractTableFromSQLResponse{} } func (m *ExtractTableFromSQLResponse) String() string { return proto.CompactTextString(m) } func (*ExtractTableFromSQLResponse) ProtoMessage() {} -func (*ExtractTableFromSQLResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (*ExtractTableFromSQLResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } func (m *ExtractTableFromSQLResponse) GetTables() []*Table { if m != nil { @@ -1459,7 +1569,7 @@ type AffectRowsSQL struct { func (m *AffectRowsSQL) Reset() { *m = AffectRowsSQL{} } func (m *AffectRowsSQL) String() string { return proto.CompactTextString(m) } func (*AffectRowsSQL) ProtoMessage() {} -func (*AffectRowsSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +func (*AffectRowsSQL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } func (m *AffectRowsSQL) GetQuery() string { if m != nil { @@ -1476,7 +1586,7 @@ type EstimateSQLAffectRowsRequest struct { func (m *EstimateSQLAffectRowsRequest) Reset() { *m = EstimateSQLAffectRowsRequest{} } func (m *EstimateSQLAffectRowsRequest) String() string { return proto.CompactTextString(m) } func (*EstimateSQLAffectRowsRequest) ProtoMessage() {} -func (*EstimateSQLAffectRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +func (*EstimateSQLAffectRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{62} } func (m *EstimateSQLAffectRowsRequest) GetSession() *Session { if m != nil { @@ -1500,7 +1610,7 @@ type EstimateSQLAffectRowsResponse struct { func (m *EstimateSQLAffectRowsResponse) Reset() { *m = EstimateSQLAffectRowsResponse{} } func (m *EstimateSQLAffectRowsResponse) String() string { return proto.CompactTextString(m) } func (*EstimateSQLAffectRowsResponse) ProtoMessage() {} -func (*EstimateSQLAffectRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } +func (*EstimateSQLAffectRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{63} } func (m *EstimateSQLAffectRowsResponse) GetCount() int64 { if m != nil { @@ -1523,7 +1633,7 @@ type KillProcessResponse struct { func (m *KillProcessResponse) Reset() { *m = KillProcessResponse{} } func (m *KillProcessResponse) String() string { return proto.CompactTextString(m) } func (*KillProcessResponse) ProtoMessage() {} -func (*KillProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } +func (*KillProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } func (m *KillProcessResponse) GetErrMessage() string { if m != nil { @@ -1538,6 +1648,7 @@ func init() { proto.RegisterType((*Param)(nil), "protoV2.Param") proto.RegisterType((*DSN)(nil), "protoV2.DSN") proto.RegisterType((*Rule)(nil), "protoV2.Rule") + proto.RegisterType((*I18NRuleInfo)(nil), "protoV2.I18nRuleInfo") proto.RegisterType((*Knowledge)(nil), "protoV2.Knowledge") proto.RegisterType((*MetasResponse)(nil), "protoV2.MetasResponse") proto.RegisterType((*InitRequest)(nil), "protoV2.InitRequest") @@ -1550,11 +1661,13 @@ func init() { proto.RegisterType((*ParseResponse)(nil), "protoV2.ParseResponse") proto.RegisterType((*AuditSQL)(nil), "protoV2.AuditSQL") proto.RegisterType((*AuditRequest)(nil), "protoV2.AuditRequest") + proto.RegisterType((*I18NAuditResultInfo)(nil), "protoV2.I18nAuditResultInfo") proto.RegisterType((*AuditResult)(nil), "protoV2.AuditResult") proto.RegisterType((*AuditResults)(nil), "protoV2.AuditResults") proto.RegisterType((*AuditResponse)(nil), "protoV2.AuditResponse") proto.RegisterType((*NeedRollbackSQL)(nil), "protoV2.NeedRollbackSQL") proto.RegisterType((*GenRollbackSQLRequest)(nil), "protoV2.GenRollbackSQLRequest") + proto.RegisterType((*I18NRollbackSQLInfo)(nil), "protoV2.I18nRollbackSQLInfo") proto.RegisterType((*RollbackSQL)(nil), "protoV2.RollbackSQL") proto.RegisterType((*GenRollbackSQLResponse)(nil), "protoV2.GenRollbackSQLResponse") proto.RegisterType((*PingRequest)(nil), "protoV2.PingRequest") @@ -2217,127 +2330,137 @@ var _Driver_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("driver_v2.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1945 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x18, 0xdb, 0x72, 0x14, 0xc7, - 0x35, 0x7b, 0xd7, 0x9e, 0xbd, 0x68, 0xdd, 0x92, 0x60, 0x59, 0x40, 0x56, 0x1a, 0x90, 0x55, 0x98, - 0x08, 0x2c, 0x12, 0x5c, 0x98, 0x24, 0x25, 0x2c, 0x29, 0x20, 0x03, 0x8a, 0x68, 0x11, 0x52, 0xe5, - 0x2a, 0x97, 0xdd, 0xda, 0x69, 0x89, 0x29, 0x66, 0xa7, 0x57, 0xd3, 0xbd, 0x48, 0xfa, 0x82, 0xe4, - 0x3b, 0xf2, 0x05, 0x79, 0xf1, 0x63, 0x3e, 0x24, 0xf9, 0x9a, 0x54, 0x5f, 0x66, 0xa6, 0x67, 0x76, - 0xd6, 0x36, 0x5b, 0xe5, 0xa7, 0xe9, 0x3e, 0xb7, 0x3e, 0xb7, 0x3e, 0x73, 0x4e, 0xc3, 0xa2, 0x17, - 0xf9, 0x1f, 0x58, 0xf4, 0xfd, 0x87, 0xad, 0xcd, 0x71, 0xc4, 0x25, 0x47, 0x0d, 0xfd, 0x79, 0xbb, - 0x85, 0x1b, 0x50, 0xdb, 0x1b, 0x8d, 0xe5, 0x25, 0xbe, 0x06, 0x8d, 0x23, 0x26, 0x84, 0xcf, 0x43, - 0xd4, 0x85, 0xb2, 0xef, 0xf5, 0x4b, 0x6b, 0xa5, 0x8d, 0x26, 0x29, 0xfb, 0x1e, 0xfe, 0x3b, 0xd4, - 0x0e, 0x69, 0x44, 0x47, 0xa8, 0x07, 0x95, 0xf7, 0xec, 0xd2, 0x62, 0xd4, 0x12, 0x2d, 0x43, 0xed, - 0x03, 0x0d, 0x26, 0xac, 0x5f, 0xd6, 0x30, 0xb3, 0x41, 0x08, 0xaa, 0x1e, 0x13, 0xc3, 0x7e, 0x45, - 0x03, 0xf5, 0x5a, 0xc1, 0xe4, 0xe5, 0x98, 0xf5, 0xab, 0x06, 0xa6, 0xd6, 0xf8, 0xc7, 0x12, 0x54, - 0x76, 0x8f, 0x0e, 0x14, 0xee, 0x1d, 0x17, 0xd2, 0x0a, 0xd6, 0x6b, 0x05, 0x1b, 0xf3, 0x48, 0x5a, - 0xc1, 0x7a, 0xad, 0x60, 0x13, 0xc1, 0xa2, 0x58, 0xae, 0x5a, 0xa3, 0x01, 0x2c, 0x8c, 0xa9, 0x10, - 0xe7, 0x3c, 0xf2, 0xac, 0xec, 0x64, 0xaf, 0x70, 0x1e, 0x95, 0xf4, 0x98, 0x0a, 0xd6, 0xaf, 0x19, - 0x5c, 0xbc, 0x47, 0x5f, 0x41, 0x8f, 0x7a, 0x9e, 0x2f, 0x7d, 0x1e, 0xd2, 0x40, 0x9b, 0x27, 0xfa, - 0xf5, 0xb5, 0xca, 0x46, 0x6b, 0xab, 0xbb, 0x69, 0x9d, 0xb3, 0xa9, 0xc1, 0x64, 0x8a, 0x0e, 0xff, - 0xaf, 0x04, 0x55, 0x32, 0x09, 0xb4, 0xa1, 0x21, 0x1d, 0xb1, 0x58, 0x71, 0xb5, 0x4e, 0x8c, 0x2f, - 0x3b, 0xc6, 0x2f, 0x43, 0x2d, 0x60, 0x1f, 0x58, 0x60, 0x35, 0x37, 0x1b, 0xa5, 0xde, 0x90, 0x4a, - 0x76, 0xca, 0xa3, 0xcb, 0x58, 0xf5, 0x78, 0x8f, 0xd6, 0xa1, 0x3e, 0x36, 0x4a, 0xd5, 0x0a, 0x95, - 0xb2, 0x58, 0xb4, 0x0a, 0x40, 0xc3, 0x90, 0x4b, 0xaa, 0x14, 0xec, 0xd7, 0xb5, 0x14, 0x07, 0x82, - 0x1e, 0x40, 0xf3, 0x7d, 0xc8, 0xcf, 0x03, 0xe6, 0x9d, 0xb2, 0x7e, 0x63, 0xad, 0xb4, 0xd1, 0xda, - 0x42, 0x89, 0xa8, 0x17, 0x31, 0x86, 0xa4, 0x44, 0xf8, 0x0e, 0x34, 0x13, 0x38, 0xea, 0x43, 0x63, - 0xc8, 0x43, 0xc9, 0xc2, 0x38, 0x38, 0xf1, 0x16, 0xff, 0x58, 0x86, 0xce, 0x2b, 0x26, 0xa9, 0x20, - 0x4c, 0x8c, 0x79, 0x28, 0x98, 0x52, 0x65, 0x1c, 0x4c, 0x4e, 0xfd, 0xf0, 0x20, 0x75, 0x89, 0x03, - 0x41, 0x0f, 0x60, 0x29, 0xf6, 0xfe, 0x2e, 0x3b, 0xa1, 0x93, 0x40, 0x1e, 0xc6, 0x01, 0xae, 0x90, - 0x22, 0x14, 0xfa, 0x06, 0xfa, 0x31, 0xf8, 0x69, 0x3e, 0x56, 0x95, 0x42, 0xb7, 0xcc, 0xa4, 0x47, - 0xb7, 0xa0, 0x16, 0x4d, 0x02, 0x26, 0xfa, 0x55, 0xcd, 0xd8, 0x49, 0x18, 0x55, 0x20, 0x89, 0xc1, - 0xa1, 0x57, 0xb0, 0xc2, 0x42, 0x7a, 0x1c, 0x30, 0xef, 0xaf, 0x63, 0xc3, 0xfd, 0x8a, 0x7b, 0x93, - 0x80, 0xe9, 0x20, 0x74, 0xb7, 0xae, 0x26, 0x4c, 0x59, 0x34, 0x29, 0xe6, 0x52, 0xa9, 0x10, 0xf0, - 0x53, 0xae, 0xc3, 0xd2, 0x26, 0x7a, 0x8d, 0x09, 0xb4, 0xf6, 0x43, 0x5f, 0x12, 0x76, 0x36, 0x61, - 0x42, 0xa2, 0x55, 0xa8, 0x78, 0x22, 0xd4, 0xde, 0x6a, 0x6d, 0xb5, 0x13, 0xf9, 0xbb, 0x47, 0x07, - 0x44, 0x21, 0x52, 0xb5, 0xcb, 0xb3, 0xd5, 0xc6, 0x5f, 0x41, 0xdb, 0xc8, 0xb4, 0x91, 0xb8, 0x0b, - 0x0d, 0x61, 0xee, 0xb2, 0x15, 0xdc, 0x4b, 0xd8, 0xec, 0x1d, 0x27, 0x31, 0x81, 0xe2, 0xdd, 0x09, - 0xb8, 0x60, 0xb1, 0x42, 0x1f, 0xc3, 0xbb, 0x0d, 0xe8, 0x85, 0x1f, 0x04, 0x87, 0x11, 0x1f, 0x32, - 0x21, 0xe6, 0x91, 0xf0, 0x5b, 0x68, 0x1e, 0xd2, 0x48, 0x30, 0xef, 0xe8, 0xf5, 0x4b, 0x75, 0x4b, - 0xce, 0x26, 0x2c, 0x8a, 0x0b, 0x8c, 0xd9, 0xe0, 0x1f, 0xa0, 0xad, 0x49, 0xe6, 0x10, 0x8f, 0x6e, - 0x43, 0x45, 0x9c, 0x05, 0x3a, 0xc5, 0xdc, 0xbc, 0x4f, 0x8e, 0x24, 0x0a, 0x8d, 0xff, 0x59, 0x82, - 0xea, 0x01, 0xf7, 0x74, 0xbc, 0x24, 0xbb, 0x48, 0xea, 0x90, 0x5a, 0x27, 0x75, 0xab, 0x9c, 0xd6, - 0x2d, 0xb4, 0x06, 0xad, 0x13, 0x3f, 0x3c, 0x65, 0xd1, 0x38, 0xf2, 0x43, 0x69, 0x2f, 0xb5, 0x0b, - 0x42, 0x37, 0xa0, 0x29, 0x24, 0x8d, 0xe4, 0x4b, 0x3f, 0x34, 0x25, 0xaf, 0x4a, 0x52, 0x80, 0xba, - 0x55, 0xc7, 0x54, 0x0e, 0xdf, 0xed, 0x7b, 0xba, 0x2c, 0x55, 0x49, 0xbc, 0xc5, 0xbf, 0x87, 0x8e, - 0x35, 0xd6, 0x86, 0xf2, 0x16, 0xd4, 0x42, 0xee, 0x31, 0xd1, 0x2f, 0xe5, 0xe2, 0xaf, 0x14, 0x26, - 0x06, 0x87, 0xd7, 0x60, 0xe1, 0xe9, 0xc4, 0xf3, 0xe5, 0x6c, 0x27, 0x52, 0x68, 0x6b, 0x8a, 0x79, - 0x9c, 0x78, 0x07, 0xaa, 0xe2, 0x2c, 0x88, 0x33, 0xf0, 0x93, 0x84, 0x30, 0x3e, 0x92, 0x68, 0x34, - 0xfe, 0x16, 0x5a, 0xf6, 0x08, 0x31, 0x09, 0xa4, 0xb2, 0x71, 0xc4, 0x84, 0xa0, 0xa7, 0x71, 0x29, - 0x88, 0xb7, 0x69, 0x31, 0x2c, 0xbb, 0xc5, 0xf0, 0x3a, 0x34, 0x55, 0x32, 0x7f, 0xaf, 0xeb, 0xa9, - 0xf1, 0xe8, 0x82, 0x02, 0xa8, 0xd2, 0x81, 0xff, 0x9c, 0xa8, 0xaf, 0x64, 0x0b, 0xb4, 0x09, 0x8d, - 0xc8, 0x2c, 0xad, 0x5f, 0x96, 0xb3, 0x5a, 0x19, 0x3a, 0x12, 0x13, 0xe1, 0x6f, 0xa0, 0x13, 0xc3, - 0x8d, 0x5b, 0x1f, 0x43, 0x9b, 0x3a, 0x02, 0xad, 0x94, 0x95, 0x22, 0x29, 0x82, 0x64, 0x48, 0xf1, - 0x67, 0xb0, 0x78, 0xc0, 0x98, 0x47, 0x78, 0x10, 0x1c, 0xd3, 0xe1, 0xfb, 0xd9, 0x3e, 0xe7, 0xb0, - 0xf2, 0x8c, 0x85, 0x0e, 0xdd, 0x3c, 0xce, 0xbf, 0xeb, 0x66, 0x70, 0x3f, 0x8d, 0x7e, 0x56, 0x03, - 0x93, 0xc7, 0x7f, 0x82, 0xd6, 0xcf, 0x6a, 0xe5, 0xc6, 0xa5, 0x9c, 0x89, 0x0b, 0xde, 0x86, 0x2b, - 0x79, 0x7d, 0xad, 0xb7, 0xd6, 0x8d, 0x12, 0x46, 0xd9, 0xd4, 0xd5, 0x53, 0x0a, 0x3c, 0x86, 0xd6, - 0xa1, 0x1f, 0x9e, 0xce, 0x53, 0x08, 0x3e, 0x85, 0xc6, 0xde, 0x05, 0x1b, 0xce, 0xf6, 0xe6, 0x77, - 0xd0, 0x52, 0x04, 0xf3, 0xf8, 0x10, 0xbb, 0x3e, 0x4c, 0xe9, 0xec, 0x79, 0x46, 0xf5, 0x7f, 0x97, - 0x00, 0x8c, 0x7c, 0x9d, 0xbd, 0x18, 0xda, 0x01, 0x15, 0x72, 0x3f, 0x14, 0x2c, 0x92, 0xfb, 0xa6, - 0x19, 0xaa, 0x90, 0x0c, 0x0c, 0xdd, 0x83, 0x4f, 0xdc, 0xfd, 0x5e, 0x14, 0xf1, 0xc8, 0xfa, 0x74, - 0x1a, 0xa1, 0x24, 0x46, 0xfc, 0x5c, 0x3c, 0x3d, 0x39, 0x61, 0x43, 0xc9, 0x3c, 0x9d, 0xe2, 0x15, - 0x92, 0x81, 0x29, 0x89, 0xee, 0xde, 0x48, 0x34, 0x9d, 0xc1, 0x34, 0x02, 0x7b, 0xd0, 0x53, 0x1a, - 0x7f, 0xad, 0x4a, 0xc7, 0x7c, 0xc5, 0xd1, 0xbd, 0xd7, 0xd3, 0x7e, 0x31, 0xd7, 0x7a, 0x1b, 0x16, - 0x9d, 0x53, 0xb4, 0x73, 0x7e, 0x97, 0xbf, 0x7d, 0x4b, 0x19, 0xde, 0xfc, 0xe5, 0x7b, 0x02, 0x6d, - 0x0b, 0x36, 0xd9, 0xf4, 0x39, 0xd4, 0x0d, 0xca, 0xaa, 0x58, 0xc8, 0x6d, 0x49, 0xf0, 0x77, 0xd0, - 0x7c, 0x73, 0xf1, 0xeb, 0x59, 0xf7, 0x04, 0x40, 0x89, 0xb7, 0x9a, 0x7d, 0xa4, 0x61, 0x6b, 0xb0, - 0xf0, 0x5a, 0xe5, 0xe6, 0xec, 0xa4, 0xfd, 0x02, 0x9a, 0x9a, 0x62, 0x87, 0x87, 0x27, 0xe8, 0x36, - 0x74, 0xa4, 0x3f, 0x62, 0x7c, 0x22, 0x8f, 0xd8, 0x90, 0x87, 0x26, 0xa9, 0x3a, 0x24, 0x0b, 0xc4, - 0xff, 0x28, 0x41, 0x5b, 0xf3, 0xcc, 0x63, 0xf4, 0x2d, 0x37, 0xd3, 0xd3, 0x4a, 0x1d, 0x6b, 0xa9, - 0x53, 0x1d, 0xad, 0x43, 0x75, 0xc8, 0xc3, 0x13, 0x9d, 0x81, 0xee, 0x5f, 0x31, 0xd1, 0x94, 0x68, - 0x3c, 0xf6, 0xa0, 0x63, 0x15, 0x49, 0xca, 0x40, 0x7d, 0xc8, 0x83, 0xc9, 0x28, 0xb4, 0xde, 0x99, - 0xea, 0x49, 0x0d, 0x16, 0x7d, 0x0e, 0x55, 0x95, 0xad, 0xd6, 0xf5, 0x57, 0xb3, 0x07, 0x58, 0x27, - 0xf2, 0x73, 0xa2, 0x89, 0xf0, 0x0e, 0x74, 0xb3, 0x70, 0xf4, 0x05, 0xd4, 0xf5, 0x18, 0x11, 0x07, - 0xe1, 0x5a, 0x91, 0x80, 0xb7, 0x8a, 0x82, 0x58, 0x42, 0xbc, 0x01, 0xbd, 0x3c, 0x2e, 0x1d, 0x4d, - 0x4a, 0xce, 0x68, 0x82, 0xb1, 0xba, 0xe6, 0xe3, 0x80, 0xfa, 0xe1, 0xec, 0xa8, 0x0d, 0xa1, 0x6b, - 0x69, 0xe6, 0xfb, 0x5d, 0x3a, 0x31, 0x70, 0x13, 0x28, 0x3e, 0xd5, 0x14, 0x9c, 0xb7, 0xea, 0x5e, - 0xd9, 0x43, 0xac, 0x7f, 0x77, 0xa0, 0x33, 0x0c, 0xa8, 0x10, 0xbe, 0xcd, 0x34, 0x7b, 0xd6, 0xcd, - 0xbc, 0x8c, 0x1d, 0x97, 0x88, 0x64, 0x79, 0xf0, 0x36, 0x2c, 0x17, 0x91, 0xa1, 0x0d, 0xa8, 0xaa, - 0xde, 0x78, 0xaa, 0x88, 0xbf, 0xa1, 0xc7, 0x93, 0x80, 0x46, 0xbb, 0x54, 0x52, 0xa2, 0x29, 0xf0, - 0x53, 0x58, 0x7a, 0xc6, 0xe4, 0xae, 0x6d, 0xa4, 0xe7, 0x6a, 0xeb, 0x56, 0x61, 0x21, 0xe6, 0x2f, - 0x9a, 0x91, 0xf0, 0x33, 0x58, 0xce, 0x1e, 0x61, 0x3d, 0x70, 0x1f, 0x9a, 0x71, 0x03, 0x1f, 0x47, - 0x3f, 0xcd, 0xe2, 0x98, 0x9c, 0xa4, 0x34, 0xf8, 0x21, 0xd4, 0xde, 0xa8, 0xce, 0xbb, 0x70, 0x12, - 0xbb, 0x02, 0x75, 0x31, 0x7c, 0xc7, 0x46, 0xd4, 0x56, 0x65, 0xbb, 0xc3, 0xa7, 0xda, 0x40, 0xcd, - 0xa7, 0x26, 0x98, 0xf9, 0xaa, 0x4b, 0x4d, 0x2a, 0x7e, 0x1b, 0xe6, 0xae, 0xeb, 0x4e, 0xd5, 0x97, - 0x6b, 0x24, 0x7e, 0xae, 0xcd, 0x74, 0x0e, 0xb2, 0x66, 0x3e, 0x80, 0xa6, 0x8c, 0x81, 0xf6, 0x2c, - 0x94, 0x95, 0xa0, 0xc9, 0x53, 0x22, 0xfc, 0x9f, 0x12, 0x34, 0x13, 0x04, 0x7a, 0x04, 0x2d, 0x73, - 0xd5, 0xc4, 0x7e, 0x78, 0xc2, 0xa7, 0x42, 0xba, 0x93, 0xe2, 0x88, 0x4b, 0xa8, 0xf8, 0xfc, 0xd0, - 0x63, 0x17, 0xcc, 0xf0, 0x95, 0x73, 0x7c, 0xfb, 0x29, 0x8e, 0xb8, 0x84, 0x68, 0x1d, 0xba, 0xc3, - 0x88, 0x51, 0xc9, 0xb4, 0x0a, 0x47, 0xaf, 0x5f, 0xda, 0x06, 0x2d, 0x07, 0x75, 0x7b, 0x8b, 0x6a, - 0xb6, 0xb7, 0xf8, 0x12, 0x5a, 0x8e, 0x56, 0x1f, 0x91, 0x8c, 0x5f, 0xaa, 0x71, 0x29, 0xd5, 0xe4, - 0x97, 0x33, 0x3e, 0x86, 0x45, 0x07, 0xf8, 0x9c, 0x51, 0xef, 0x97, 0x4e, 0xeb, 0xaa, 0xc3, 0x73, - 0xe5, 0xf1, 0x73, 0xa1, 0x0a, 0x85, 0x2f, 0xd9, 0xc8, 0x24, 0x65, 0x93, 0x98, 0x0d, 0xe6, 0xd0, - 0x72, 0x08, 0xd1, 0x96, 0x1a, 0x96, 0xb5, 0x91, 0x36, 0x77, 0xfb, 0x45, 0xfa, 0x29, 0x55, 0x48, - 0x4c, 0x88, 0xee, 0x65, 0x6a, 0x65, 0x21, 0x83, 0x52, 0xc0, 0x16, 0xcb, 0xdb, 0xea, 0x57, 0x2a, - 0x23, 0xaa, 0x9a, 0x80, 0xd9, 0xf5, 0xeb, 0x0c, 0x06, 0x96, 0x4a, 0x47, 0xe6, 0x2f, 0x11, 0x1f, - 0xcd, 0xd9, 0x7d, 0x7e, 0xe6, 0xd6, 0xb2, 0x15, 0xa7, 0x0e, 0xa5, 0x3a, 0x98, 0x6a, 0xb6, 0x07, - 0xd7, 0x0b, 0x8f, 0x4c, 0xff, 0x1c, 0x3a, 0x97, 0xc5, 0xd4, 0x9f, 0xc3, 0xdc, 0x17, 0x8b, 0xc5, - 0x77, 0xa0, 0x63, 0x7a, 0x1c, 0x65, 0xf3, 0x6c, 0x03, 0x25, 0xdc, 0xd8, 0x13, 0xd2, 0x1f, 0x51, - 0xa9, 0xd2, 0x2e, 0xe5, 0x98, 0xc7, 0xc4, 0x0d, 0xd7, 0xc4, 0x2b, 0xe9, 0x00, 0xe0, 0xaa, 0x61, - 0x6c, 0xfc, 0x1b, 0xdc, 0x9c, 0x71, 0xaa, 0xb5, 0x72, 0x19, 0x6a, 0x43, 0x3e, 0xb1, 0x4f, 0x25, - 0x15, 0x62, 0x36, 0x68, 0x15, 0x80, 0x45, 0xd1, 0xab, 0x4c, 0xcf, 0xed, 0x40, 0xf0, 0x1f, 0x60, - 0x29, 0x33, 0x44, 0xa7, 0xaf, 0x29, 0x0e, 0x5b, 0x29, 0xcf, 0x76, 0xf7, 0x5f, 0x25, 0xe8, 0x4e, - 0x3d, 0x37, 0x74, 0xb3, 0x0d, 0x7c, 0xef, 0x37, 0xa8, 0x09, 0x35, 0xfd, 0x67, 0xec, 0x95, 0x50, - 0x4b, 0xb5, 0xd8, 0xfa, 0xcf, 0xd0, 0x2b, 0xa3, 0x1e, 0xb4, 0xdd, 0xd2, 0xd4, 0xab, 0xa0, 0xab, - 0xb0, 0x54, 0x10, 0xc2, 0x5e, 0x15, 0x5d, 0x83, 0x95, 0x42, 0xbb, 0x7b, 0x35, 0xb4, 0x08, 0x2d, - 0x47, 0xf7, 0x5e, 0x1d, 0x75, 0xa0, 0x99, 0x74, 0x8b, 0xbd, 0xc6, 0xd6, 0x7f, 0x17, 0xa0, 0xbe, - 0xab, 0x9f, 0x1e, 0xd1, 0x7d, 0xa8, 0xe9, 0xe7, 0x22, 0x94, 0xc6, 0x5e, 0x3f, 0x3c, 0x0e, 0x52, - 0x9f, 0x67, 0x9f, 0x93, 0x1e, 0x42, 0x75, 0x3f, 0xf4, 0x25, 0x72, 0xeb, 0x53, 0x32, 0xc0, 0x0e, - 0x56, 0x72, 0x50, 0xcb, 0xb4, 0x09, 0x35, 0xfd, 0x9a, 0x81, 0x52, 0xbc, 0xfb, 0xba, 0x31, 0xc8, - 0x1d, 0x8e, 0x9e, 0x67, 0x0c, 0x40, 0xd7, 0xd3, 0xa7, 0xb1, 0xa9, 0x77, 0x8d, 0xc1, 0x8d, 0x62, - 0xa4, 0x3d, 0xf9, 0x91, 0x7e, 0x24, 0xcd, 0x9c, 0xec, 0x3e, 0x5b, 0x38, 0x66, 0x66, 0x07, 0xfc, - 0x47, 0x50, 0xd3, 0xc3, 0x26, 0x9a, 0x1a, 0x3e, 0xf3, 0x7c, 0xd9, 0x09, 0xf6, 0x75, 0x3e, 0xd8, - 0x68, 0x35, 0xa1, 0x2c, 0x1c, 0x3b, 0x07, 0x9f, 0xce, 0xc4, 0x5b, 0x91, 0xf7, 0xa0, 0xaa, 0xc6, - 0x37, 0xc7, 0xe3, 0xce, 0x34, 0x37, 0xe5, 0xba, 0x87, 0x50, 0x55, 0xa1, 0x76, 0xa8, 0x9d, 0xf9, - 0x6c, 0xb0, 0x92, 0xef, 0x9c, 0xcd, 0x11, 0xdb, 0x4e, 0x7e, 0xa0, 0x6b, 0x19, 0x1a, 0x77, 0x8e, - 0x19, 0xf4, 0x8b, 0x50, 0x76, 0xf8, 0x28, 0xbf, 0xb9, 0x40, 0xce, 0xef, 0x32, 0x9e, 0x0e, 0x06, - 0x4b, 0x19, 0x58, 0xea, 0x5e, 0x9d, 0xff, 0x8e, 0x7b, 0xdd, 0xee, 0xda, 0x71, 0x6f, 0xb6, 0xd7, - 0xfd, 0x63, 0x72, 0x59, 0xd0, 0xd5, 0x7c, 0xff, 0x55, 0xa4, 0x64, 0xb6, 0x93, 0x7b, 0xa1, 0x6f, - 0x57, 0xd2, 0xdf, 0xa0, 0x1b, 0x8e, 0xeb, 0xa7, 0x3a, 0xab, 0xc1, 0xcd, 0x19, 0xd8, 0x8c, 0xb0, - 0xf4, 0xef, 0x9f, 0x11, 0x96, 0xef, 0x62, 0xb2, 0xc2, 0xa6, 0x5b, 0x8f, 0x1f, 0x0a, 0x6f, 0x39, - 0xba, 0x95, 0xaf, 0xed, 0x05, 0x7f, 0x8e, 0xc1, 0xed, 0x9f, 0x26, 0xb2, 0x27, 0x9c, 0xcc, 0x28, - 0x17, 0xe8, 0x4e, 0xca, 0xfe, 0x13, 0xc5, 0x7b, 0xb0, 0xfe, 0x73, 0x64, 0xe6, 0x9c, 0xaf, 0xdb, - 0xdf, 0xc2, 0xe6, 0xfd, 0x27, 0x96, 0xf6, 0xb8, 0xae, 0x17, 0x0f, 0xff, 0x1f, 0x00, 0x00, 0xff, - 0xff, 0x6f, 0x34, 0xe4, 0x7e, 0xea, 0x18, 0x00, 0x00, + // 2110 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x5f, 0x6f, 0x13, 0xcb, + 0x15, 0xaf, 0xff, 0xc7, 0xc7, 0x76, 0x62, 0x26, 0x09, 0x18, 0x13, 0x42, 0x3a, 0x40, 0x6e, 0x04, + 0x5c, 0x03, 0xa6, 0xe5, 0x5e, 0xa0, 0xaa, 0xc2, 0x4d, 0x52, 0xc8, 0x05, 0xd2, 0x30, 0xa1, 0x54, + 0xaa, 0x74, 0xc5, 0xdd, 0x78, 0x27, 0x61, 0xc5, 0x7a, 0xd7, 0xd9, 0x19, 0x93, 0xe4, 0xbd, 0x52, + 0xfb, 0x35, 0xda, 0x4f, 0xd0, 0x97, 0x2b, 0xf5, 0xa5, 0x1f, 0xa4, 0x9f, 0xa4, 0xaf, 0xd5, 0xfc, + 0xd9, 0xdd, 0x99, 0xf5, 0xfa, 0x02, 0x96, 0x2a, 0xf5, 0xc9, 0x3b, 0xe7, 0xdf, 0x9c, 0x73, 0xe6, + 0x37, 0x67, 0xce, 0x8c, 0x61, 0xc1, 0x8d, 0xbc, 0x8f, 0x34, 0x7a, 0xf7, 0xb1, 0xdf, 0x1b, 0x45, + 0x21, 0x0f, 0x51, 0x4d, 0xfe, 0xbc, 0xed, 0xe3, 0x1a, 0x54, 0x76, 0x86, 0x23, 0x7e, 0x8e, 0x2f, + 0x43, 0xed, 0x80, 0x32, 0xe6, 0x85, 0x01, 0x9a, 0x87, 0xa2, 0xe7, 0x76, 0x0a, 0x6b, 0x85, 0x8d, + 0x3a, 0x29, 0x7a, 0x2e, 0xfe, 0x23, 0x54, 0xf6, 0x9d, 0xc8, 0x19, 0xa2, 0x36, 0x94, 0x3e, 0xd0, + 0x73, 0xcd, 0x11, 0x9f, 0x68, 0x09, 0x2a, 0x1f, 0x1d, 0x7f, 0x4c, 0x3b, 0x45, 0x49, 0x53, 0x03, + 0x84, 0xa0, 0xec, 0x52, 0x36, 0xe8, 0x94, 0x24, 0x51, 0x7e, 0x0b, 0x1a, 0x3f, 0x1f, 0xd1, 0x4e, + 0x59, 0xd1, 0xc4, 0x37, 0xfe, 0xa9, 0x00, 0xa5, 0xed, 0x83, 0x3d, 0xc1, 0x7b, 0x1f, 0x32, 0xae, + 0x0d, 0xcb, 0x6f, 0x41, 0x1b, 0x85, 0x11, 0xd7, 0x86, 0xe5, 0xb7, 0xa0, 0x8d, 0x19, 0x8d, 0x62, + 0xbb, 0xe2, 0x1b, 0x75, 0x61, 0x6e, 0xe4, 0x30, 0x76, 0x1a, 0x46, 0xae, 0xb6, 0x9d, 0x8c, 0x05, + 0xcf, 0x75, 0xb8, 0x73, 0xe8, 0x30, 0xda, 0xa9, 0x28, 0x5e, 0x3c, 0x46, 0x8f, 0xa1, 0xed, 0xb8, + 0xae, 0xc7, 0xbd, 0x30, 0x70, 0x7c, 0x19, 0x1e, 0xeb, 0x54, 0xd7, 0x4a, 0x1b, 0x8d, 0xfe, 0x7c, + 0x4f, 0x27, 0xa7, 0x27, 0xc9, 0x64, 0x42, 0x0e, 0xff, 0xa7, 0x08, 0x65, 0x32, 0xf6, 0x65, 0xa0, + 0x81, 0x33, 0xa4, 0xb1, 0xe3, 0xe2, 0x3b, 0x09, 0xbe, 0x68, 0x04, 0xbf, 0x04, 0x15, 0x9f, 0x7e, + 0xa4, 0xbe, 0xf6, 0x5c, 0x0d, 0x84, 0x7b, 0x03, 0x87, 0xd3, 0xe3, 0x30, 0x3a, 0x8f, 0x5d, 0x8f, + 0xc7, 0x68, 0x1d, 0xaa, 0x23, 0xe5, 0x54, 0x25, 0xd7, 0x29, 0xcd, 0x45, 0xab, 0x00, 0x4e, 0x10, + 0x84, 0xdc, 0x11, 0x0e, 0x76, 0xaa, 0xd2, 0x8a, 0x41, 0x41, 0xf7, 0xa0, 0xfe, 0x21, 0x08, 0x4f, + 0x7d, 0xea, 0x1e, 0xd3, 0x4e, 0x6d, 0xad, 0xb0, 0xd1, 0xe8, 0xa3, 0xc4, 0xd4, 0x8b, 0x98, 0x43, + 0x52, 0x21, 0xb4, 0x05, 0x4d, 0xef, 0xfe, 0xb7, 0x81, 0x88, 0x6f, 0x37, 0x38, 0x0a, 0x3b, 0x73, + 0x72, 0xfe, 0x6b, 0x89, 0x92, 0x60, 0xf4, 0x76, 0x0d, 0x89, 0x9d, 0x80, 0x47, 0xe7, 0xc4, 0x52, + 0xea, 0xbe, 0x85, 0x0b, 0x13, 0x22, 0x39, 0xf0, 0xb9, 0x6d, 0xc2, 0xa7, 0xd1, 0x5f, 0x4e, 0x26, + 0x31, 0x95, 0x35, 0xaa, 0x1e, 0x17, 0xbf, 0x2d, 0xe0, 0x7f, 0x16, 0xa0, 0x69, 0xf2, 0x72, 0xb3, + 0xfd, 0x7f, 0x99, 0x57, 0x7c, 0x13, 0xea, 0x09, 0x1d, 0x75, 0xa0, 0x36, 0x08, 0x03, 0x4e, 0x83, + 0x18, 0xf4, 0xf1, 0x10, 0xff, 0x54, 0x84, 0xd6, 0x2b, 0xca, 0x1d, 0x46, 0x28, 0x1b, 0x85, 0x01, + 0xa3, 0xc2, 0x95, 0x91, 0x3f, 0x3e, 0xf6, 0x82, 0xbd, 0x14, 0x6a, 0x06, 0x05, 0xdd, 0x83, 0xc5, + 0x18, 0xd5, 0xdb, 0xf4, 0xc8, 0x19, 0xfb, 0x7c, 0x3f, 0xde, 0x38, 0x25, 0x92, 0xc7, 0x42, 0xdf, + 0x43, 0x27, 0x26, 0x3f, 0xcd, 0xee, 0x81, 0x52, 0x6e, 0x5a, 0xa6, 0xca, 0xa3, 0xeb, 0x50, 0x89, + 0xc6, 0x3e, 0x65, 0x9d, 0xb2, 0x54, 0x6c, 0x59, 0x38, 0x21, 0x8a, 0x87, 0x5e, 0xc1, 0x32, 0x0d, + 0x9c, 0x43, 0x9f, 0xba, 0xbf, 0x1f, 0x29, 0xed, 0x57, 0xa1, 0x3b, 0xf6, 0xa9, 0x5c, 0x84, 0xf9, + 0xfe, 0xa5, 0x44, 0xc9, 0x66, 0x93, 0x7c, 0x2d, 0xb1, 0xe8, 0x7e, 0x78, 0x1c, 0xca, 0x65, 0x69, + 0x12, 0xf9, 0x8d, 0x09, 0x34, 0x76, 0x03, 0x8f, 0x13, 0x7a, 0x32, 0xa6, 0x8c, 0xa3, 0x55, 0x28, + 0xb9, 0x2c, 0x90, 0xd9, 0x6a, 0xf4, 0x9b, 0x89, 0xfd, 0xed, 0x83, 0x3d, 0x22, 0x18, 0xa9, 0xdb, + 0xc5, 0xe9, 0x6e, 0xe3, 0xc7, 0xd0, 0x54, 0x36, 0xf5, 0x4a, 0xdc, 0x82, 0x1a, 0x53, 0x35, 0x52, + 0x1b, 0x6e, 0x27, 0x6a, 0xba, 0x76, 0x92, 0x58, 0x40, 0xe8, 0x6e, 0xf9, 0x21, 0xa3, 0xb1, 0x43, + 0x5f, 0xa2, 0xbb, 0x09, 0xe8, 0x85, 0xe7, 0xfb, 0xfb, 0x51, 0x38, 0xa0, 0x8c, 0xcd, 0x62, 0xe1, + 0x97, 0x50, 0xdf, 0x77, 0x22, 0x46, 0xdd, 0x83, 0xd7, 0x2f, 0x45, 0xf5, 0x39, 0x19, 0xd3, 0x28, + 0xde, 0x79, 0x6a, 0x80, 0x7f, 0x84, 0xa6, 0x14, 0x99, 0xc1, 0x3c, 0xba, 0x01, 0x25, 0x76, 0xe2, + 0xeb, 0x5d, 0x8b, 0x4c, 0xac, 0xa8, 0x29, 0x89, 0x60, 0xe3, 0xbf, 0x16, 0xa0, 0xbc, 0x17, 0xba, + 0x72, 0xbd, 0x38, 0x3d, 0x4b, 0xea, 0xbb, 0xf8, 0x4e, 0xce, 0x83, 0x62, 0x7a, 0x1e, 0xa0, 0x35, + 0x68, 0x1c, 0x79, 0xc1, 0x31, 0x8d, 0x46, 0x91, 0x17, 0x70, 0x5d, 0x2c, 0x4d, 0x12, 0x5a, 0x81, + 0x3a, 0xe3, 0x4e, 0xc4, 0x5f, 0x7a, 0x81, 0x3a, 0x4a, 0xca, 0x24, 0x25, 0x88, 0x5d, 0x75, 0xe8, + 0xf0, 0xc1, 0xfb, 0x5d, 0x57, 0x96, 0xfb, 0x32, 0x89, 0x87, 0xf8, 0x57, 0xd0, 0xd2, 0xc1, 0xea, + 0xa5, 0xbc, 0x0e, 0x95, 0x20, 0x74, 0x29, 0xeb, 0x14, 0x32, 0xeb, 0x2f, 0x1c, 0x26, 0x8a, 0x87, + 0xd7, 0x60, 0xee, 0xe9, 0xd8, 0xf5, 0xf8, 0xf4, 0x24, 0x3a, 0xd0, 0x94, 0x12, 0xb3, 0x24, 0xf1, + 0x26, 0x94, 0xd9, 0x89, 0x1f, 0x23, 0xf0, 0x42, 0x22, 0x18, 0x4f, 0x49, 0x24, 0x1b, 0xdf, 0x85, + 0x45, 0x51, 0xf1, 0xf4, 0x34, 0x6c, 0xec, 0x73, 0x59, 0xf8, 0x3a, 0x50, 0x1b, 0x52, 0xc6, 0x9c, + 0xe3, 0xb8, 0x24, 0xc4, 0x43, 0xfc, 0xb7, 0x22, 0x34, 0x0c, 0xe9, 0xe9, 0x92, 0xe9, 0xb1, 0x54, + 0x34, 0x8f, 0xa5, 0x2b, 0x50, 0x17, 0xf0, 0x7f, 0x27, 0x4f, 0x36, 0xb5, 0x06, 0x73, 0x82, 0x20, + 0x8b, 0xcd, 0x3b, 0x58, 0xf4, 0x26, 0xbd, 0xd1, 0x9b, 0xff, 0x6b, 0x3b, 0x06, 0xc5, 0xef, 0xe5, + 0x78, 0xaf, 0x8e, 0x8c, 0x3c, 0x4b, 0x5d, 0x17, 0x3a, 0xd3, 0x14, 0x72, 0x0e, 0x90, 0xbe, 0x7d, + 0x80, 0xac, 0x58, 0x07, 0x48, 0xc6, 0x86, 0x79, 0x8e, 0xfc, 0x36, 0x59, 0x37, 0xc1, 0x65, 0xa8, + 0x07, 0xb5, 0x48, 0x7d, 0x6a, 0x40, 0x2c, 0xe5, 0x85, 0x42, 0x62, 0x21, 0xfc, 0x3d, 0xb4, 0x62, + 0xba, 0xc2, 0xd3, 0x23, 0x68, 0x3a, 0x86, 0x41, 0x6d, 0x65, 0x39, 0xcf, 0x0a, 0x23, 0x96, 0x28, + 0xfe, 0x0a, 0x16, 0xf6, 0x28, 0x75, 0x49, 0xe8, 0xfb, 0x87, 0xce, 0xe0, 0xc3, 0x74, 0xb0, 0x85, + 0xb0, 0xfc, 0x8c, 0x06, 0x86, 0xdc, 0x2c, 0xa8, 0xbb, 0x65, 0x6e, 0xdd, 0x4e, 0x0a, 0x7b, 0xdb, + 0x03, 0xb5, 0x81, 0x35, 0xf4, 0x0c, 0xfa, 0x27, 0xa0, 0xf7, 0xe7, 0x22, 0x34, 0x3e, 0x19, 0x87, + 0xa9, 0x5f, 0xb4, 0x01, 0xa9, 0xd1, 0x95, 0x99, 0x50, 0x9f, 0x49, 0x29, 0xba, 0x0c, 0x7e, 0x2f, + 0xc7, 0x41, 0x03, 0x5d, 0x19, 0x4e, 0x8c, 0xae, 0x3c, 0x85, 0x2f, 0x45, 0x57, 0xc6, 0x86, 0x89, + 0xae, 0x4d, 0xb8, 0x98, 0x5d, 0x28, 0x0d, 0x93, 0x75, 0x95, 0x7d, 0xb5, 0x4a, 0x4b, 0x79, 0x01, + 0xa9, 0xcc, 0x3f, 0x82, 0xc6, 0xbe, 0x17, 0x1c, 0xcf, 0x52, 0xfa, 0xaf, 0x41, 0x6d, 0xe7, 0x8c, + 0x0e, 0xa6, 0xc3, 0xe8, 0x07, 0x68, 0x08, 0x81, 0x59, 0xc0, 0x83, 0x4d, 0xf0, 0xa4, 0x72, 0x7a, + 0x3e, 0xe5, 0xfa, 0x3f, 0x0a, 0x00, 0xca, 0xbe, 0xac, 0x3e, 0x18, 0x9a, 0xbe, 0xc3, 0xf8, 0x6e, + 0xc0, 0x68, 0xc4, 0x77, 0xd5, 0xb5, 0xa2, 0x44, 0x2c, 0x1a, 0xba, 0x03, 0x17, 0xcc, 0xf1, 0x4e, + 0x14, 0x85, 0x91, 0x86, 0xc6, 0x24, 0x43, 0x58, 0x8c, 0xc2, 0x53, 0xf6, 0xf4, 0xe8, 0x88, 0x0e, + 0x38, 0x75, 0x65, 0x89, 0x2a, 0x11, 0x8b, 0x26, 0x2c, 0x9a, 0x63, 0x65, 0x51, 0xf5, 0x82, 0x93, + 0x0c, 0xec, 0x42, 0x5b, 0x78, 0xfc, 0x9d, 0x38, 0x2c, 0x66, 0x3b, 0x0e, 0xcd, 0x4a, 0x3e, 0x99, + 0x17, 0x55, 0xc8, 0x37, 0x61, 0xc1, 0x98, 0x45, 0x26, 0xe7, 0xeb, 0x6c, 0xd9, 0x59, 0xb4, 0x74, + 0xb3, 0x55, 0xe7, 0x09, 0x34, 0x35, 0x59, 0xa1, 0xe9, 0x36, 0x54, 0x15, 0x4b, 0xbb, 0x98, 0xab, + 0xad, 0x45, 0xf0, 0x0f, 0x50, 0x7f, 0x73, 0xf6, 0xbf, 0x8b, 0xee, 0x09, 0x80, 0x30, 0xaf, 0x3d, + 0xfb, 0xc2, 0xc0, 0xd6, 0x60, 0xee, 0xb5, 0xc0, 0xe6, 0x74, 0xd0, 0xde, 0x87, 0xba, 0x94, 0xd8, + 0x0a, 0x83, 0x23, 0x74, 0x03, 0x5a, 0xdc, 0x1b, 0xd2, 0x70, 0xcc, 0x0f, 0xe8, 0x20, 0x0c, 0x14, + 0xa8, 0x5a, 0xc4, 0x26, 0xe2, 0xbf, 0x14, 0xa0, 0x29, 0x75, 0x66, 0x09, 0xfa, 0xba, 0x89, 0xf4, + 0xf4, 0x6c, 0x8e, 0xbd, 0x94, 0x50, 0x47, 0xeb, 0x50, 0x1e, 0x84, 0xc1, 0x91, 0x44, 0xa0, 0xd9, + 0x07, 0x25, 0x9e, 0x12, 0xc9, 0xc7, 0x2e, 0xb4, 0xb4, 0x23, 0x49, 0x19, 0xa8, 0x0e, 0x42, 0x7f, + 0x3c, 0x0c, 0x74, 0x76, 0x26, 0x6e, 0x21, 0x8a, 0x8b, 0x6e, 0x43, 0x59, 0xa0, 0x55, 0xa7, 0xfe, + 0x92, 0x3d, 0x81, 0x4e, 0x62, 0x78, 0x4a, 0xa4, 0x10, 0xde, 0x82, 0x79, 0x9b, 0x8e, 0xee, 0x43, + 0x55, 0x16, 0xa5, 0x78, 0x11, 0x2e, 0xe7, 0x19, 0x78, 0x2b, 0x24, 0x88, 0x16, 0xc4, 0x1b, 0xd0, + 0xce, 0xf2, 0xd2, 0x4b, 0x7e, 0xc1, 0xb8, 0xe4, 0x63, 0x2c, 0xb6, 0xf9, 0xc8, 0x77, 0xbc, 0x60, + 0xfa, 0xaa, 0x0d, 0x60, 0x5e, 0xcb, 0xcc, 0xd6, 0x20, 0x19, 0x6b, 0x60, 0x02, 0x28, 0x9e, 0x55, + 0x15, 0x9c, 0xb7, 0x62, 0x5f, 0xe9, 0x49, 0x74, 0x7e, 0xb7, 0xa0, 0x35, 0xf0, 0x1d, 0xc6, 0x3c, + 0x8d, 0x34, 0x3d, 0xd7, 0xd5, 0xac, 0x8d, 0x2d, 0x53, 0x88, 0xd8, 0x3a, 0x78, 0x13, 0x96, 0xf2, + 0xc4, 0xd0, 0x06, 0x94, 0xc5, 0x6d, 0x68, 0xa2, 0x88, 0xbf, 0x71, 0x0e, 0xc7, 0xbe, 0x13, 0x6d, + 0x3b, 0xdc, 0x21, 0x52, 0x02, 0x3f, 0x85, 0xc5, 0x67, 0x94, 0x6f, 0xeb, 0xab, 0xd3, 0x4c, 0x8d, + 0xfc, 0x2a, 0xcc, 0xc5, 0xfa, 0x79, 0xaf, 0x0d, 0xf8, 0x19, 0x2c, 0xd9, 0x53, 0xe8, 0x0c, 0xdc, + 0x85, 0x7a, 0x7c, 0x65, 0x8b, 0x57, 0x3f, 0x45, 0x71, 0x2c, 0x4e, 0x52, 0x19, 0xfc, 0x00, 0x2a, + 0x6f, 0xc4, 0x5d, 0x2b, 0xf7, 0x4d, 0xe3, 0x22, 0x54, 0xd9, 0xe0, 0x3d, 0x1d, 0x3a, 0xba, 0x2a, + 0xeb, 0x11, 0x3e, 0x96, 0x01, 0x4a, 0x3d, 0x71, 0x67, 0x9d, 0xad, 0xba, 0x54, 0xb8, 0xd0, 0xd7, + 0xcb, 0x3c, 0x6f, 0xa6, 0x53, 0xdc, 0xc4, 0x24, 0x13, 0x3f, 0x97, 0x61, 0x1a, 0x13, 0xe9, 0x30, + 0xef, 0x41, 0x9d, 0xc7, 0x44, 0x3d, 0x17, 0xb2, 0x2d, 0x48, 0xf1, 0x54, 0x08, 0xff, 0xab, 0x00, + 0xf5, 0x84, 0x81, 0x1e, 0x42, 0x43, 0x6d, 0x35, 0x26, 0x1b, 0x8d, 0xec, 0x92, 0x6e, 0xa5, 0x3c, + 0x62, 0x0a, 0x0a, 0x3d, 0x2f, 0x70, 0xe9, 0x19, 0x55, 0x7a, 0xc5, 0x8c, 0xde, 0x6e, 0xca, 0x23, + 0xa6, 0x20, 0x5a, 0x87, 0xf9, 0x41, 0x44, 0x1d, 0x4e, 0xa5, 0x0b, 0x07, 0xaf, 0x5f, 0xea, 0x06, + 0x3b, 0x43, 0x35, 0x5b, 0xa4, 0xb2, 0xdd, 0x62, 0x7d, 0x03, 0x0d, 0xc3, 0xab, 0x2f, 0x00, 0xe3, + 0x37, 0xe2, 0x82, 0x9c, 0x7a, 0xf2, 0xf9, 0x8a, 0x8f, 0x60, 0xc1, 0x20, 0x3e, 0xa7, 0x8e, 0xfb, + 0xb9, 0xef, 0x5e, 0xa2, 0xb5, 0x35, 0xed, 0x85, 0xa7, 0x4c, 0x14, 0x0a, 0x8f, 0xd3, 0xa1, 0x02, + 0x65, 0x9d, 0xa8, 0x01, 0x0e, 0xa1, 0x61, 0x08, 0xa2, 0x3e, 0xd4, 0x74, 0xb6, 0x35, 0x76, 0x3b, + 0x79, 0xfe, 0x09, 0x57, 0x48, 0x2c, 0x88, 0xee, 0x58, 0xb5, 0x32, 0x57, 0x41, 0x38, 0xa0, 0x8b, + 0xe5, 0x0d, 0x71, 0x94, 0xf2, 0xc8, 0x11, 0x4d, 0xc0, 0xf4, 0xfa, 0x75, 0x02, 0x5d, 0x2d, 0x25, + 0x57, 0xe6, 0x77, 0x51, 0x38, 0x9c, 0xb1, 0xed, 0xfe, 0xca, 0xac, 0x65, 0xcb, 0x46, 0x1d, 0x4a, + 0x7d, 0x50, 0xd5, 0x6c, 0x07, 0xae, 0xe4, 0x4e, 0x99, 0x9e, 0x1c, 0x12, 0xcb, 0x6c, 0xe2, 0xe4, + 0x50, 0xfb, 0x45, 0x73, 0xf1, 0x4d, 0x68, 0xa9, 0x1e, 0x47, 0xc4, 0x3c, 0x3d, 0x40, 0x0e, 0x2b, + 0x3b, 0x8c, 0x7b, 0x43, 0x87, 0x0b, 0xd8, 0xa5, 0x1a, 0xb3, 0x84, 0xb8, 0x61, 0x86, 0x78, 0x31, + 0xbd, 0xf9, 0x98, 0x6e, 0xa8, 0x18, 0xff, 0x00, 0x57, 0xa7, 0xcc, 0xaa, 0xa3, 0x5c, 0x82, 0xca, + 0x20, 0x1c, 0xeb, 0xc7, 0xb1, 0x12, 0x51, 0x03, 0xb4, 0x0a, 0x40, 0xa3, 0xe8, 0x95, 0x75, 0x75, + 0x30, 0x28, 0xf8, 0xd7, 0xb0, 0x68, 0x3d, 0x9b, 0xa4, 0xef, 0x67, 0x86, 0x5a, 0x21, 0xab, 0x76, + 0xeb, 0xef, 0x05, 0x98, 0x9f, 0x78, 0x60, 0x9a, 0xb7, 0x1b, 0xf8, 0xf6, 0x2f, 0x50, 0x1d, 0x2a, + 0xf2, 0x64, 0x6c, 0x17, 0x50, 0x43, 0xb4, 0xd8, 0xf2, 0x64, 0x68, 0x17, 0x51, 0x1b, 0x9a, 0x66, + 0x69, 0x6a, 0x97, 0xd0, 0x25, 0x58, 0xcc, 0x59, 0xc2, 0x76, 0x19, 0x5d, 0x86, 0xe5, 0xdc, 0xb8, + 0xdb, 0x15, 0xb4, 0x00, 0x0d, 0xc3, 0xf7, 0x76, 0x15, 0xb5, 0xa0, 0x9e, 0x74, 0x8b, 0xed, 0x5a, + 0xff, 0xdf, 0x73, 0x50, 0xdd, 0x96, 0x8f, 0xf8, 0xe8, 0x2e, 0x54, 0xe4, 0x03, 0x21, 0x4a, 0xd7, + 0x5e, 0x3e, 0xe1, 0x77, 0xd3, 0x9c, 0xdb, 0x0f, 0x88, 0x0f, 0xa0, 0xbc, 0x1b, 0x78, 0x1c, 0x99, + 0xf5, 0x29, 0x79, 0xb2, 0xe8, 0x2e, 0x67, 0xa8, 0x5a, 0xa9, 0x07, 0x15, 0xf9, 0x7e, 0x85, 0x52, + 0xbe, 0xf9, 0x9e, 0xd5, 0xcd, 0x4c, 0x8e, 0x9e, 0x5b, 0x01, 0xa0, 0x2b, 0xe9, 0x63, 0xe8, 0xc4, + 0x4b, 0x56, 0x77, 0x25, 0x9f, 0xa9, 0x67, 0x7e, 0x28, 0xff, 0x6e, 0xb0, 0x66, 0x36, 0x1f, 0xaa, + 0x8c, 0x30, 0xed, 0x27, 0x9d, 0x87, 0x50, 0x91, 0xb7, 0x6c, 0x34, 0x71, 0xeb, 0xce, 0xea, 0xd9, + 0x57, 0xf7, 0xd7, 0xd9, 0xc5, 0x46, 0xab, 0x89, 0x64, 0xee, 0x7d, 0xbb, 0x7b, 0x6d, 0x2a, 0x5f, + 0x9b, 0xbc, 0x03, 0x65, 0x71, 0x7d, 0x33, 0x32, 0x6e, 0xdc, 0xe6, 0x26, 0x52, 0xf7, 0x00, 0xca, + 0x62, 0xa9, 0x0d, 0x69, 0xe3, 0x7e, 0xd6, 0x5d, 0xce, 0x76, 0xce, 0x6a, 0x8a, 0x4d, 0x03, 0x1f, + 0xe8, 0xb2, 0x25, 0x63, 0xde, 0x63, 0xba, 0x9d, 0x3c, 0x96, 0xbe, 0x7c, 0x14, 0xdf, 0x9c, 0x21, + 0xe3, 0xb8, 0x8c, 0x6f, 0x07, 0xdd, 0x45, 0x8b, 0x96, 0xa6, 0x57, 0xe2, 0xdf, 0x48, 0xaf, 0xd9, + 0x5d, 0x1b, 0xe9, 0xb5, 0x7b, 0xdd, 0xdf, 0x24, 0x9b, 0x05, 0x5d, 0xca, 0xf6, 0x5f, 0x79, 0x4e, + 0xda, 0x9d, 0xdc, 0x0b, 0xb9, 0xbb, 0x92, 0xfe, 0x06, 0xad, 0x18, 0xa9, 0x9f, 0xe8, 0xac, 0xba, + 0x57, 0xa7, 0x70, 0x2d, 0x63, 0xe9, 0xe9, 0x6f, 0x19, 0xcb, 0x76, 0x31, 0xb6, 0xb1, 0xc9, 0xd6, + 0xe3, 0xc7, 0xdc, 0x5d, 0x8e, 0xae, 0x67, 0x6b, 0x7b, 0xce, 0xc9, 0xd1, 0xbd, 0xf1, 0xf3, 0x42, + 0x7a, 0x86, 0xa3, 0x29, 0xe5, 0x02, 0xdd, 0x4c, 0xd5, 0x7f, 0xa6, 0x78, 0x77, 0xd7, 0x3f, 0x25, + 0xa6, 0xe6, 0xf9, 0xae, 0xf9, 0x27, 0xe8, 0xdd, 0x7d, 0xa2, 0x65, 0x0f, 0xab, 0xf2, 0xe3, 0xc1, + 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9f, 0xce, 0x61, 0x22, 0x34, 0x1c, 0x00, 0x00, } diff --git a/sqle/driver/v2/proto/driver_v2.proto b/sqle/driver/v2/proto/driver_v2.proto index ba529174f6..b96b91ee9b 100644 --- a/sqle/driver/v2/proto/driver_v2.proto +++ b/sqle/driver/v2/proto/driver_v2.proto @@ -75,6 +75,16 @@ message Rule { repeated Param params = 5; string annotation = 6; Knowledge knowledge = 7; + map i18nRuleInfo = 8; +} + +message I18nRuleInfo { + string desc = 2; +// string level = 3; + string category = 4; + repeated Param params = 5; + string annotation = 6; + Knowledge knowledge = 7; } message Knowledge { @@ -143,10 +153,15 @@ message AuditRequest { repeated AuditSQL sqls = 2; } +message I18nAuditResultInfo { + string message = 1; +} + message AuditResult { string message = 1; string level = 2; string rule_name = 3; + map i18nAuditResultInfo = 4; } message AuditResults { @@ -167,9 +182,14 @@ message GenRollbackSQLRequest { NeedRollbackSQL sql = 2; } +message I18nRollbackSQLInfo { + string message = 1; +} + message RollbackSQL { string query = 1; string message = 2; + map i18nRollbackSQLInfo = 3; } message GenRollbackSQLResponse { diff --git a/sqle/driver/v2/util.go b/sqle/driver/v2/util.go index 554d10830e..a96225e28a 100644 --- a/sqle/driver/v2/util.go +++ b/sqle/driver/v2/util.go @@ -6,6 +6,7 @@ import ( "time" protoV2 "github.com/actiontech/sqle/sqle/driver/v2/proto" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/pkg/params" ) @@ -85,6 +86,135 @@ type DriverMetas struct { EnabledOptionalModule []OptionalModule } +func ConvertI18nAuditResultsFromProtoToDriver(pars []*protoV2.AuditResult) ([]*AuditResult, error) { + ars := make([]*AuditResult, len(pars)) + for k, par := range pars { + ar, err := ConvertI18nAuditResultFromProtoToDriver(par) + if err != nil { + return nil, err + } + ars[k] = ar + } + return ars, nil +} + +func ConvertI18nAuditResultFromProtoToDriver(par *protoV2.AuditResult) (*AuditResult, error) { + ar := &AuditResult{ + RuleName: par.RuleName, + Level: RuleLevel(par.Level), + I18nAuditResultInfo: make(map[string]AuditResultInfo, len(par.I18NAuditResultInfo)), + } + if len(par.I18NAuditResultInfo) == 0 { + // 对非多语言的插件支持 + ar.I18nAuditResultInfo = map[string]AuditResultInfo{ + locale.DefaultLang.String(): {Message: par.Message}, + } + } else { + if _, exist := par.I18NAuditResultInfo[locale.DefaultLang.String()]; !exist { + // 多语言的插件 审核结果需包含 locale.DefaultLang + return nil, fmt.Errorf("client audit results must support language: %s", locale.DefaultLang.String()) + } + } + for langTag, ruleInfo := range par.I18NAuditResultInfo { + ar.I18nAuditResultInfo[langTag] = AuditResultInfo{ + Message: ruleInfo.Message, + } + } + return ar, nil +} + +func ConvertI18nAuditResultFromDriverToProto(ar *AuditResult) *protoV2.AuditResult { + par := &protoV2.AuditResult{ + Message: ar.I18nAuditResultInfo[locale.DefaultLang.String()].Message, + RuleName: ar.RuleName, + Level: string(ar.Level), + I18NAuditResultInfo: make(map[string]*protoV2.I18NAuditResultInfo, len(ar.I18nAuditResultInfo)), + } + for langTag, ruleInfo := range ar.I18nAuditResultInfo { + par.I18NAuditResultInfo[langTag] = &protoV2.I18NAuditResultInfo{ + Message: ruleInfo.Message, + } + } + return par +} + +func ConvertI18nRuleFromProtoToDriver(rule *protoV2.Rule) *Rule { + dRule := &Rule{ + Name: rule.Name, + Level: RuleLevel(rule.Level), + Params: ConvertProtoParamToParam(rule.Params), + I18nRuleInfo: make(I18nRuleInfo, len(rule.I18NRuleInfo)), + } + for langTag, ruleInfo := range rule.I18NRuleInfo { + dRule.I18nRuleInfo[langTag] = ConvertI18nRuleInfoFromProtoToDriver(ruleInfo) + } + if len(rule.I18NRuleInfo) == 0 { + ruleInfo := &RuleInfo{ + Desc: rule.Desc, + Annotation: rule.Annotation, + Category: rule.Category, + Params: ConvertProtoParamToParam(rule.Params), + } + if rule.Knowledge != nil { + ruleInfo.Knowledge = RuleKnowledge{Content: rule.Knowledge.Content} + } + dRule.I18nRuleInfo = I18nRuleInfo{ + locale.DefaultLang.String(): ruleInfo, + } + } + return dRule +} + +func ConvertI18nRuleInfoFromProtoToDriver(ruleInfo *protoV2.I18NRuleInfo) *RuleInfo { + return &RuleInfo{ + Desc: ruleInfo.Desc, + Category: ruleInfo.Category, + Params: ConvertProtoParamToParam(ruleInfo.Params), + Annotation: ruleInfo.Annotation, + Knowledge: RuleKnowledge{Content: ruleInfo.Knowledge.Content}, + } +} + +func ConvertI18nRulesFromDriverToProto(rules []*Rule) []*protoV2.Rule { + rs := make([]*protoV2.Rule, len(rules)) + for i, rule := range rules { + rs[i] = ConvertI18nRuleFromDriverToProto(rule) + } + return rs +} + +func ConvertI18nRuleFromDriverToProto(rule *Rule) *protoV2.Rule { + // 填充默认语言以支持非多语言插件 + pRule := &protoV2.Rule{ + Name: rule.Name, + Desc: rule.I18nRuleInfo[locale.DefaultLang.String()].Desc, + Level: string(rule.Level), + Category: rule.I18nRuleInfo[locale.DefaultLang.String()].Category, + Params: ConvertParamToProtoParam(rule.Params), + Annotation: rule.I18nRuleInfo[locale.DefaultLang.String()].Annotation, + Knowledge: &protoV2.Knowledge{ + Content: rule.I18nRuleInfo[locale.DefaultLang.String()].Knowledge.Content, + }, + I18NRuleInfo: make(map[string]*protoV2.I18NRuleInfo, len(rule.I18nRuleInfo)), + } + for langTag, ruleInfo := range rule.I18nRuleInfo { + pRule.I18NRuleInfo[langTag] = ConvertI18nRuleInfoFromDriverToProto(ruleInfo) + } + return pRule +} + +func ConvertI18nRuleInfoFromDriverToProto(ruleInfo *RuleInfo) *protoV2.I18NRuleInfo { + return &protoV2.I18NRuleInfo{ + Desc: ruleInfo.Desc, + Category: ruleInfo.Category, + Params: ConvertParamToProtoParam(ruleInfo.Params), + Annotation: ruleInfo.Annotation, + Knowledge: &protoV2.Knowledge{ + Content: ruleInfo.Knowledge.Content, + }, + } +} + func ConvertRuleFromProtoToDriver(rule *protoV2.Rule) *Rule { var ps = make(params.Params, 0, len(rule.Params)) for _, p := range rule.Params { @@ -95,38 +225,48 @@ func ConvertRuleFromProtoToDriver(rule *protoV2.Rule) *Rule { Type: params.ParamType(p.Type), }) } - return &Rule{ - Name: rule.Name, - Category: rule.Category, - Desc: rule.Desc, - Annotation: rule.Annotation, - Level: RuleLevel(rule.Level), - Params: ps, - Knowledge: RuleKnowledge{Content: rule.Knowledge.GetContent()}, + dr := &Rule{ + Name: rule.Name, + Level: RuleLevel(rule.Level), + Params: ps, + I18nRuleInfo: make(I18nRuleInfo, len(rule.I18NRuleInfo)), + } + for langTag, v := range rule.I18NRuleInfo { + ri := &RuleInfo{ + Desc: v.Desc, + Annotation: v.Annotation, + Category: v.Category, + Params: ps, + } + if v.Knowledge != nil { + ri.Knowledge = RuleKnowledge{Content: v.Knowledge.Content} + } + dr.I18nRuleInfo[langTag] = ri } + return dr } func ConvertRuleFromDriverToProto(rule *Rule) *protoV2.Rule { - var params = make([]*protoV2.Param, 0, len(rule.Params)) - for _, p := range rule.Params { - params = append(params, &protoV2.Param{ - Key: p.Key, - Value: p.Value, - Desc: p.Desc, - Type: string(p.Type), - }) + pr := &protoV2.Rule{ + Name: rule.Name, + Desc: "", + Level: string(rule.Level), + Category: "", + Params: ConvertParamToProtoParam(rule.Params), + Annotation: "", + Knowledge: nil, + I18NRuleInfo: make(map[string]*protoV2.I18NRuleInfo, len(rule.I18nRuleInfo)), } - return &protoV2.Rule{ - Name: rule.Name, - Desc: rule.Desc, - Annotation: rule.Annotation, - Level: string(rule.Level), - Category: rule.Category, - Params: params, - Knowledge: &protoV2.Knowledge{ - Content: rule.Knowledge.Content, - }, + for k, v := range rule.I18nRuleInfo { + pr.I18NRuleInfo[k] = &protoV2.I18NRuleInfo{ + Desc: v.Desc, + Category: v.Category, + Params: ConvertParamToProtoParam(v.Params), + Annotation: v.Annotation, + Knowledge: &protoV2.Knowledge{Content: v.Knowledge.Content}, + } } + return pr } func ConvertParamToProtoParam(p params.Params) []*protoV2.Param { diff --git a/sqle/locale/active.en.toml b/sqle/locale/active.en.toml new file mode 100644 index 0000000000..b35253ae94 --- /dev/null +++ b/sqle/locale/active.en.toml @@ -0,0 +1,94 @@ +APExportApprovalRate = "Approval rate" +APExportAuditResult = "Audit result" +APExportBelongingProject = "Belonging project" +APExportCreator = "Creator" +APExportDatabase = "Database" +APExportDbType = "Database type" +APExportGenerationTime = "Generation time" +APExportNumber = "Number" +APExportResultRating = "Rating" +APExportTaskName = "Task name" +APExportType = "Task type" +AuditRecordTagFull = "Full" +AuditRecordTagIncrement = "Incremental" +DefaultRuleTemplatesDesc = "Default rule templates" +DefaultTemplatesDesc = "%s default templates" +ExportCreateTime = "Create time" +ExportCreator = "Creator" +ExportDataSource = "DB instance" +ExportExecutionEndTime = "End time" +ExportExecutionStartTime = "Start time" +ExportExecutionStatus = "Status" +ExportExecutionTime = "Execution time" +ExportExecutor = "Executor" +ExportNode1AuditResult = "[node 1] audit result" +ExportNode1AuditTime = "[node 1] audit time" +ExportNode1Auditor = "[node 1] auditor" +ExportNode2AuditResult = "[node 2] audit result" +ExportNode2AuditTime = "[node 2] audit time" +ExportNode2Auditor = "[node 2] auditor" +ExportNode3AuditResult = "[node 3] audit result" +ExportNode3AuditTime = "[node 3] audit time" +ExportNode3Auditor = "[node 3] auditor" +ExportNode4AuditResult = "[node 4] audit result" +ExportNode4AuditTime = "[node 4] audit time" +ExportNode4Auditor = "[node 4] auditor" +ExportOperator = "Operator" +ExportSQLContent = "SQL content" +ExportTaskOrderStatus = "Status" +ExportWorkflowDescription = "Description" +ExportWorkflowName = "Name" +ExportWorkflowNumber = "Number" +FileOrderMethodPrefixNumAsc = "Prefix num asc" +FileOrderMethodSuffixNumAsc = "Suffix num asc" +SMExportAuditResult = "Audit result" +SMExportDataSource = "DB instance" +SMExportEndpoint = "Endpoint" +SMExportOptimizedSQLCount = "Optimized SQL count" +SMExportPersonInCharge = "Person in charge" +SMExportProblemSQLCount = "Problem SQL count" +SMExportRemarks = "Remarks" +SMExportSCHEMA = "Schema" +SMExportSQL = "SQL" +SMExportSQLFingerprint = "SQL fingerprint" +SMExportSource = "Source" +SMExportState = "State" +SMExportTotalSQLCount = "Total SQL count" +SQLAuditResultDescPass = "Pass audit" +SQLAuditStatusDoing = "Auditing" +SQLAuditStatusFinished = "Audit finished" +SQLAuditStatusInitialized = "Not audited" +SQLAuditStatusUnknown = "Unknown" +SQLExecuteStatusDoing = "Executing" +SQLExecuteStatusFailed = "Failed" +SQLExecuteStatusInitialized = "Ready to execute" +SQLExecuteStatusManuallyExecuted = "Manually executed" +SQLExecuteStatusSucceeded = "Succeeded" +SQLExecuteStatusUnknown = "Unknown" +SQLManageSourceAuditPlan = "Intelligent scan" +SQLManageSourceSqlAuditRecord = "SQL audit" +SQLManageStatusIgnored = "Ignored" +SQLManageStatusManualAudited = "Manually audited" +SQLManageStatusSolved = "Solved" +SQLManageStatusUnhandled = "Unhandled" +TaskSQLReportAuditResult = "Audit result" +TaskSQLReportAuditStatus = "Audit status" +TaskSQLReportDescription = "SQL description" +TaskSQLReportExecResult = "Execution result" +TaskSQLReportExecStatus = "Execution status" +TaskSQLReportIndex = "Index" +TaskSQLReportRollbackSQL = "Rollback SQL" +TaskSQLReportSQL = "SQL" +TaskStatusExecuteFailed = "Failed" +TaskStatusExecuteSucceeded = "Succeeded" +TaskStatusExecuting = "Executing" +TaskStatusManuallyExecuted = "Manually executed" +WorkflowStatusCancel = "Closed" +WorkflowStatusExecFailed = "Failed" +WorkflowStatusExecuting = "Executing" +WorkflowStatusFinish = "Succeeded" +WorkflowStatusReject = "Rejected" +WorkflowStatusWaitForAudit = "Waiting for audit" +WorkflowStatusWaitForExecution = "Waiting for execution" +WorkflowStepStateApprove = "Approved" +WorkflowStepStateReject = "Rejected" diff --git a/sqle/locale/active.zh.toml b/sqle/locale/active.zh.toml new file mode 100644 index 0000000000..dc12538b46 --- /dev/null +++ b/sqle/locale/active.zh.toml @@ -0,0 +1,94 @@ +APExportApprovalRate = "审核通过率" +APExportAuditResult = "审核结果" +APExportBelongingProject = "所属项目" +APExportCreator = "扫描任务创建人" +APExportDatabase = "审核的数据库" +APExportDbType = "数据库类型" +APExportGenerationTime = "报告生成时间" +APExportNumber = "编号" +APExportResultRating = "审核结果评分" +APExportTaskName = "扫描任务名称" +APExportType = "扫描任务类型" +AuditRecordTagFull = "全量" +AuditRecordTagIncrement = "增量" +DefaultRuleTemplatesDesc = "默认规则模板" +DefaultTemplatesDesc = "%s 默认模板" +ExportCreateTime = "创建时间" +ExportCreator = "创建人" +ExportDataSource = "数据源" +ExportExecutionEndTime = "上线结束时间" +ExportExecutionStartTime = "上线开始时间" +ExportExecutionStatus = "上线结果" +ExportExecutionTime = "工单执行时间" +ExportExecutor = "上线人" +ExportNode1AuditResult = "[节点1]审核结果" +ExportNode1AuditTime = "[节点1]审核时间" +ExportNode1Auditor = "[节点1]审核人" +ExportNode2AuditResult = "[节点2]审核结果" +ExportNode2AuditTime = "[节点2]审核时间" +ExportNode2Auditor = "[节点2]审核人" +ExportNode3AuditResult = "[节点3]审核结果" +ExportNode3AuditTime = "[节点3]审核时间" +ExportNode3Auditor = "[节点3]审核人" +ExportNode4AuditResult = "[节点4]审核结果" +ExportNode4AuditTime = "[节点4]审核时间" +ExportNode4Auditor = "[节点4]审核人" +ExportOperator = "操作人" +ExportSQLContent = "具体执行SQL内容" +ExportTaskOrderStatus = "工单状态" +ExportWorkflowDescription = "工单描述" +ExportWorkflowName = "工单名称" +ExportWorkflowNumber = "工单编号" +FileOrderMethodPrefixNumAsc = "文件名前缀数字升序" +FileOrderMethodSuffixNumAsc = "文件名后缀数字升序" +SMExportAuditResult = "审核结果" +SMExportDataSource = "数据源" +SMExportEndpoint = "端点信息" +SMExportOptimizedSQLCount = "已优化SQL数" +SMExportPersonInCharge = "负责人" +SMExportProblemSQLCount = "问题SQL数" +SMExportRemarks = "备注" +SMExportSCHEMA = "SCHEMA" +SMExportSQL = "SQL" +SMExportSQLFingerprint = "SQL指纹" +SMExportSource = "来源" +SMExportState = "状态" +SMExportTotalSQLCount = "SQL总数" +SQLAuditResultDescPass = "审核通过" +SQLAuditStatusDoing = "正在审核" +SQLAuditStatusFinished = "审核完成" +SQLAuditStatusInitialized = "未审核" +SQLAuditStatusUnknown = "未知状态" +SQLExecuteStatusDoing = "正在执行" +SQLExecuteStatusFailed = "执行失败" +SQLExecuteStatusInitialized = "准备执行" +SQLExecuteStatusManuallyExecuted = "人工执行" +SQLExecuteStatusSucceeded = "执行成功" +SQLExecuteStatusUnknown = "未知" +SQLManageSourceAuditPlan = "智能扫描" +SQLManageSourceSqlAuditRecord = "SQL审核" +SQLManageStatusIgnored = "已忽略" +SQLManageStatusManualAudited = "已人工审核" +SQLManageStatusSolved = "已解决" +SQLManageStatusUnhandled = "未处理" +TaskSQLReportAuditResult = "SQL审核结果" +TaskSQLReportAuditStatus = "SQL审核状态" +TaskSQLReportDescription = "SQL描述" +TaskSQLReportExecResult = "SQL执行结果" +TaskSQLReportExecStatus = "SQL执行状态" +TaskSQLReportIndex = "序号" +TaskSQLReportRollbackSQL = "SQL对应的回滚语句" +TaskSQLReportSQL = "SQL" +TaskStatusExecuteFailed = "上线失败" +TaskStatusExecuteSucceeded = "上线成功" +TaskStatusExecuting = "正在上线" +TaskStatusManuallyExecuted = "手动上线" +WorkflowStatusCancel = "已关闭" +WorkflowStatusExecFailed = "上线失败" +WorkflowStatusExecuting = "正在上线" +WorkflowStatusFinish = "上线成功" +WorkflowStatusReject = "已驳回" +WorkflowStatusWaitForAudit = "待审核" +WorkflowStatusWaitForExecution = "待上线" +WorkflowStepStateApprove = "通过" +WorkflowStepStateReject = "驳回" diff --git a/sqle/locale/locale.go b/sqle/locale/locale.go new file mode 100644 index 0000000000..162c6f1fa6 --- /dev/null +++ b/sqle/locale/locale.go @@ -0,0 +1,86 @@ +package locale + +import ( + "context" + "embed" + "fmt" + "strings" + + "github.com/BurntSushi/toml" + "github.com/actiontech/sqle/sqle/log" + "github.com/labstack/echo/v4" + "github.com/nicksnyder/go-i18n/v2/i18n" + "golang.org/x/text/language" +) + +// todo: 迁移到公共地方,给dms, sqle 用 + +const ( + LocalizerCtxKey = "localizer" + AcceptLanguageKey = "Accept-Language" +) + +//go:embed active.*.toml +var LocaleFS embed.FS + +var bundle *i18n.Bundle + +var newEntry = log.NewEntry() + +var DefaultLang = language.Chinese // todo i18n make sure plugins support + +func init() { + bundle = i18n.NewBundle(DefaultLang) + bundle.RegisterUnmarshalFunc("toml", toml.Unmarshal) + _, err := bundle.LoadMessageFileFS(LocaleFS, "active.zh.toml") + if err != nil { + panic(fmt.Sprintf("load i18n config failed, error: %v", err)) + } + _, err = bundle.LoadMessageFileFS(LocaleFS, "active.en.toml") + if err != nil { + panic(fmt.Sprintf("load i18n config failed, error: %v", err)) + } +} + +func EchoMiddlewareI18nByAcceptLanguage() echo.MiddlewareFunc { + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + acceptLang := c.Request().Header.Get(AcceptLanguageKey) + localizer := i18n.NewLocalizer(bundle, acceptLang) + + langTag := DefaultLang + for _, lang := range bundle.LanguageTags() { + if strings.HasPrefix(c.Request().Header.Get(AcceptLanguageKey), lang.String()) { + langTag = lang + } + } + + ctx := context.WithValue(c.Request().Context(), LocalizerCtxKey, localizer) + ctx = context.WithValue(ctx, AcceptLanguageKey, langTag) + c.SetRequest(c.Request().WithContext(ctx)) + return next(c) + } + } +} + +func ShouldLocalizeMsg(ctx context.Context, msg *i18n.Message) string { + l, ok := ctx.Value(LocalizerCtxKey).(*i18n.Localizer) + if !ok { + l = i18n.NewLocalizer(bundle) + newEntry.Warnf("No localizer in context when localize msg: %v, use default", msg.ID) + } + + m, err := l.LocalizeMessage(msg) + if err != nil { + newEntry.Errorf("LocalizeMessage: %v failed: %v", msg.ID, err) + } + return m +} + +func GetLangTagFromCtx(ctx context.Context) language.Tag { + al, ok := ctx.Value(AcceptLanguageKey).(language.Tag) + if ok { + return al + } + return DefaultLang +} diff --git a/sqle/locale/message_zh.go b/sqle/locale/message_zh.go new file mode 100644 index 0000000000..831b96f32b --- /dev/null +++ b/sqle/locale/message_zh.go @@ -0,0 +1,133 @@ +package locale + +import ( + "github.com/nicksnyder/go-i18n/v2/i18n" +) + +// rule template +var ( + DefaultRuleTemplatesDesc = &i18n.Message{ID: "DefaultRuleTemplatesDesc", Other: "默认规则模板"} + DefaultTemplatesDesc = &i18n.Message{ID: "DefaultTemplatesDesc", Other: "%s 默认模板"} +) + +// task +var ( + TaskStatusExecuting = &i18n.Message{ID: "TaskStatusExecuting", Other: "正在上线"} + TaskStatusExecuteSucceeded = &i18n.Message{ID: "TaskStatusExecuteSucceeded", Other: "上线成功"} + TaskStatusExecuteFailed = &i18n.Message{ID: "TaskStatusExecuteFailed", Other: "上线失败"} + TaskStatusManuallyExecuted = &i18n.Message{ID: "TaskStatusManuallyExecuted", Other: "手动上线"} + + FileOrderMethodPrefixNumAsc = &i18n.Message{ID: "FileOrderMethodPrefixNumAsc", Other: "文件名前缀数字升序"} + FileOrderMethodSuffixNumAsc = &i18n.Message{ID: "FileOrderMethodSuffixNumAsc", Other: "文件名后缀数字升序"} + + SQLAuditStatusInitialized = &i18n.Message{ID: "SQLAuditStatusInitialized", Other: "未审核"} + SQLAuditStatusDoing = &i18n.Message{ID: "SQLAuditStatusDoing", Other: "正在审核"} + SQLAuditStatusFinished = &i18n.Message{ID: "SQLAuditStatusFinished", Other: "审核完成"} + SQLAuditStatusUnknown = &i18n.Message{ID: "SQLAuditStatusUnknown", Other: "未知状态"} + + SQLAuditResultDescPass = &i18n.Message{ID: "SQLAuditResultDescPass", Other: "审核通过"} + + SQLExecuteStatusInitialized = &i18n.Message{ID: "SQLExecuteStatusInitialized", Other: "准备执行"} + SQLExecuteStatusDoing = &i18n.Message{ID: "SQLExecuteStatusDoing", Other: "正在执行"} + SQLExecuteStatusFailed = &i18n.Message{ID: "SQLExecuteStatusFailed", Other: "执行失败"} + SQLExecuteStatusSucceeded = &i18n.Message{ID: "SQLExecuteStatusSucceeded", Other: "执行成功"} + SQLExecuteStatusManuallyExecuted = &i18n.Message{ID: "SQLExecuteStatusManuallyExecuted", Other: "人工执行"} + SQLExecuteStatusUnknown = &i18n.Message{ID: "SQLExecuteStatusUnknown", Other: "未知"} + + TaskSQLReportIndex = &i18n.Message{ID: "TaskSQLReportIndex", Other: "序号"} + TaskSQLReportSQL = &i18n.Message{ID: "TaskSQLReportSQL", Other: "SQL"} + TaskSQLReportAuditStatus = &i18n.Message{ID: "TaskSQLReportAuditStatus", Other: "SQL审核状态"} + TaskSQLReportAuditResult = &i18n.Message{ID: "TaskSQLReportAuditResult", Other: "SQL审核结果"} + TaskSQLReportExecStatus = &i18n.Message{ID: "TaskSQLReportExecStatus", Other: "SQL执行状态"} + TaskSQLReportExecResult = &i18n.Message{ID: "TaskSQLReportExecResult", Other: "SQL执行结果"} + TaskSQLReportRollbackSQL = &i18n.Message{ID: "TaskSQLReportRollbackSQL", Other: "SQL对应的回滚语句"} + TaskSQLReportDescription = &i18n.Message{ID: "TaskSQLReportDescription", Other: "SQL描述"} +) + +// workflow +var ( + WorkflowStepStateApprove = &i18n.Message{ID: "WorkflowStepStateApprove", Other: "通过"} + WorkflowStepStateReject = &i18n.Message{ID: "WorkflowStepStateReject", Other: "驳回"} + + WorkflowStatusWaitForAudit = &i18n.Message{ID: "WorkflowStatusWaitForAudit", Other: "待审核"} + WorkflowStatusWaitForExecution = &i18n.Message{ID: "WorkflowStatusWaitForExecution", Other: "待上线"} + WorkflowStatusReject = &i18n.Message{ID: "WorkflowStatusReject", Other: "已驳回"} + WorkflowStatusCancel = &i18n.Message{ID: "WorkflowStatusCancel", Other: "已关闭"} + WorkflowStatusExecuting = &i18n.Message{ID: "WorkflowStatusExecuting", Other: "正在上线"} + WorkflowStatusExecFailed = &i18n.Message{ID: "WorkflowStatusExecFailed", Other: "上线失败"} + WorkflowStatusFinish = &i18n.Message{ID: "WorkflowStatusFinish", Other: "上线成功"} + + WFExportWorkflowNumber = &i18n.Message{ID: "ExportWorkflowNumber", Other: "工单编号"} + WFExportWorkflowName = &i18n.Message{ID: "ExportWorkflowName", Other: "工单名称"} + WFExportWorkflowDescription = &i18n.Message{ID: "ExportWorkflowDescription", Other: "工单描述"} + WFExportDataSource = &i18n.Message{ID: "ExportDataSource", Other: "数据源"} + WFExportCreateTime = &i18n.Message{ID: "ExportCreateTime", Other: "创建时间"} + WFExportCreator = &i18n.Message{ID: "ExportCreator", Other: "创建人"} + WFExportTaskOrderStatus = &i18n.Message{ID: "ExportTaskOrderStatus", Other: "工单状态"} + WFExportOperator = &i18n.Message{ID: "ExportOperator", Other: "操作人"} + WFExportExecutionTime = &i18n.Message{ID: "ExportExecutionTime", Other: "工单执行时间"} + WFExportSQLContent = &i18n.Message{ID: "ExportSQLContent", Other: "具体执行SQL内容"} + WFExportNode1Auditor = &i18n.Message{ID: "ExportNode1Auditor", Other: "[节点1]审核人"} + WFExportNode1AuditTime = &i18n.Message{ID: "ExportNode1AuditTime", Other: "[节点1]审核时间"} + WFExportNode1AuditResult = &i18n.Message{ID: "ExportNode1AuditResult", Other: "[节点1]审核结果"} + WFExportNode2Auditor = &i18n.Message{ID: "ExportNode2Auditor", Other: "[节点2]审核人"} + WFExportNode2AuditTime = &i18n.Message{ID: "ExportNode2AuditTime", Other: "[节点2]审核时间"} + WFExportNode2AuditResult = &i18n.Message{ID: "ExportNode2AuditResult", Other: "[节点2]审核结果"} + WFExportNode3Auditor = &i18n.Message{ID: "ExportNode3Auditor", Other: "[节点3]审核人"} + WFExportNode3AuditTime = &i18n.Message{ID: "ExportNode3AuditTime", Other: "[节点3]审核时间"} + WFExportNode3AuditResult = &i18n.Message{ID: "ExportNode3AuditResult", Other: "[节点3]审核结果"} + WFExportNode4Auditor = &i18n.Message{ID: "ExportNode4Auditor", Other: "[节点4]审核人"} + WFExportNode4AuditTime = &i18n.Message{ID: "ExportNode4AuditTime", Other: "[节点4]审核时间"} + WFExportNode4AuditResult = &i18n.Message{ID: "ExportNode4AuditResult", Other: "[节点4]审核结果"} + WFExportExecutor = &i18n.Message{ID: "ExportExecutor", Other: "上线人"} + WFExportExecutionStartTime = &i18n.Message{ID: "ExportExecutionStartTime", Other: "上线开始时间"} + WFExportExecutionEndTime = &i18n.Message{ID: "ExportExecutionEndTime", Other: "上线结束时间"} + WFExportExecutionStatus = &i18n.Message{ID: "ExportExecutionStatus", Other: "上线结果"} +) + +// audit plan +var ( + APExportTaskName = &i18n.Message{ID: "APExportTaskName", Other: "扫描任务名称"} + APExportGenerationTime = &i18n.Message{ID: "APExportGenerationTime", Other: "报告生成时间"} + APExportResultRating = &i18n.Message{ID: "APExportResultRating", Other: "审核结果评分"} + APExportApprovalRate = &i18n.Message{ID: "APExportApprovalRate", Other: "审核通过率"} + APExportBelongingProject = &i18n.Message{ID: "APExportBelongingProject", Other: "所属项目"} + APExportCreator = &i18n.Message{ID: "APExportCreator", Other: "扫描任务创建人"} + APExportType = &i18n.Message{ID: "APExportType", Other: "扫描任务类型"} + APExportDbType = &i18n.Message{ID: "APExportDbType", Other: "数据库类型"} + APExportDatabase = &i18n.Message{ID: "APExportDatabase", Other: "审核的数据库"} + + APExportNumber = &i18n.Message{ID: "APExportNumber", Other: "编号"} + APExportAuditResult = &i18n.Message{ID: "APExportAuditResult", Other: "审核结果"} +) + +// sql audit record +var ( + AuditRecordTagFull = &i18n.Message{ID: "AuditRecordTagFull", Other: "全量"} + AuditRecordTagIncrement = &i18n.Message{ID: "AuditRecordTagIncrement", Other: "增量"} +) + +// sql manager +var ( + SMExportTotalSQLCount = &i18n.Message{ID: "SMExportTotalSQLCount", Other: "SQL总数"} + SMExportProblemSQLCount = &i18n.Message{ID: "SMExportProblemSQLCount", Other: "问题SQL数"} + SMExportOptimizedSQLCount = &i18n.Message{ID: "SMExportOptimizedSQLCount", Other: "已优化SQL数"} + + SMExportSQLFingerprint = &i18n.Message{ID: "SMExportSQLFingerprint", Other: "SQL指纹"} + SMExportSQL = &i18n.Message{ID: "SMExportSQL", Other: "SQL"} + SMExportSource = &i18n.Message{ID: "SMExportSource", Other: "来源"} + SMExportDataSource = &i18n.Message{ID: "SMExportDataSource", Other: "数据源"} + SMExportSCHEMA = &i18n.Message{ID: "SMExportSCHEMA", Other: "SCHEMA"} + SMExportAuditResult = &i18n.Message{ID: "SMExportAuditResult", Other: "审核结果"} + SMExportEndpoint = &i18n.Message{ID: "SMExportEndpoint", Other: "端点信息"} + SMExportPersonInCharge = &i18n.Message{ID: "SMExportPersonInCharge", Other: "负责人"} + SMExportState = &i18n.Message{ID: "SMExportState", Other: "状态"} + SMExportRemarks = &i18n.Message{ID: "SMExportRemarks", Other: "备注"} + + SQLManageSourceSqlAuditRecord = &i18n.Message{ID: "SQLManageSourceSqlAuditRecord", Other: "SQL审核"} + SQLManageSourceAuditPlan = &i18n.Message{ID: "SQLManageSourceAuditPlan", Other: "智能扫描"} + SQLManageStatusUnhandled = &i18n.Message{ID: "SQLManageStatusUnhandled", Other: "未处理"} + SQLManageStatusSolved = &i18n.Message{ID: "SQLManageStatusSolved", Other: "已解决"} + SQLManageStatusIgnored = &i18n.Message{ID: "SQLManageStatusIgnored", Other: "已忽略"} + SQLManageStatusManualAudited = &i18n.Message{ID: "SQLManageStatusManualAudited", Other: "已人工审核"} +) diff --git a/sqle/model/instance_audit_plan.go b/sqle/model/instance_audit_plan.go index 637b909266..51ab3c0e4d 100644 --- a/sqle/model/instance_audit_plan.go +++ b/sqle/model/instance_audit_plan.go @@ -111,13 +111,10 @@ func (s *Storage) UpdateInstanceAuditPlanByID(id uint, attrs map[string]interfac return errors.New(errors.ConnectStorageError, err) } -// GetLatestAuditPlanIds 获取所有变更过的记录,包括删除 -// 采集时会更新last_collection_time会同步更新updated_at,此处获取updated_at > last_collection_time的任务,即为配置变更过的任务 -// 影响:会查出所有被删除的任务,在syncTask时做一次额外的删除操作 -func (s *Storage) GetLatestAuditPlanRecordsV2() ([]*AuditPlanDetail, error) { +// 获取所有变更过的记录,包括删除 +func (s *Storage) GetLatestAuditPlanRecordsV2(after time.Time) ([]*AuditPlanDetail, error) { var aps []*AuditPlanDetail - err := s.db.Unscoped().Model(AuditPlanV2{}).Select("audit_plans_v2.id, audit_plans_v2.updated_at,audit_plans_v2.last_collection_time"). - Where("(audit_plans_v2.updated_at > audit_plans_v2.last_collection_time OR last_collection_time IS NULL)").Order("updated_at").Scan(&aps).Error + err := s.db.Unscoped().Model(AuditPlanV2{}).Select("id, updated_at").Where("updated_at > ?", after).Order("updated_at").Find(&aps).Error return aps, errors.New(errors.ConnectStorageError, err) } @@ -131,9 +128,15 @@ type AuditPlanV2 struct { HighPriorityParams params.ParamsWithOperator `json:"high_priority_params" gorm:"type:varchar(1000)"` NeedMarkHighPrioritySQL bool `json:"need_mark_high_priority_sql"` ActiveStatus string `json:"active_status" gorm:"type:varchar(255)"` - LastCollectionTime *time.Time `json:"last_collection_time" gorm:"type:datetime(3)"` - AuditPlanSQLs []*SQLManageRecord `gorm:"-"` + AuditPlanSQLs []*SQLManageRecord `gorm:"-"` + AuditPlanTaskInfo *AuditPlanTaskInfo `gorm:"foreignkey:AuditPlanID"` +} + +type AuditPlanTaskInfo struct { + Model + AuditPlanID uint `json:"audit_plan_id" gorm:"not null"` + LastCollectionTime *time.Time `json:"last_collection_time" gorm:"type:datetime(3)"` } func (a AuditPlanV2) TableName() string { @@ -306,7 +309,7 @@ func (s *Storage) GetAuditPlanByInstanceIdAndType(instanceAuditPlanID string, au func (s *Storage) GetInstanceAuditPlanDetail(instanceAuditPlanID string) (*InstanceAuditPlan, bool, error) { instanceAuditPlan := &InstanceAuditPlan{} - err := s.db.Model(InstanceAuditPlan{}).Where("id = ?", instanceAuditPlanID).Preload("AuditPlans").First(&instanceAuditPlan).Error + err := s.db.Model(InstanceAuditPlan{}).Where("id = ?", instanceAuditPlanID).Preload("AuditPlans").Preload("AuditPlans.AuditPlanTaskInfo").First(&instanceAuditPlan).Error if err == gorm.ErrRecordNotFound { return instanceAuditPlan, false, nil } @@ -322,11 +325,42 @@ func (s *Storage) GetAuditPlanTotalSQL(auditPlanID uint) (int64, error) { return count, errors.ConnectStorageErrWrapper(err) } +func (s *Storage) SaveInstanceAuditPlan(instAuditPlans *InstanceAuditPlan) error { + return s.Tx(func(txDB *gorm.DB) error { + if err := txDB.Save(instAuditPlans).Error; err != nil { + return err + } + apTaskInfos := make([]*AuditPlanTaskInfo, 0, len(instAuditPlans.AuditPlans)) + for _, auditPlan := range instAuditPlans.AuditPlans { + apTaskInfos = append(apTaskInfos, &AuditPlanTaskInfo{ + AuditPlanID: auditPlan.ID, + }) + } + if err := txDB.Save(apTaskInfos).Error; err != nil { + return err + } + return nil + }) +} + func (s *Storage) BatchSaveAuditPlans(auditPlans []*AuditPlanV2) error { return s.Tx(func(txDB *gorm.DB) error { for _, auditPlan := range auditPlans { - if err := txDB.Save(auditPlan).Error; err != nil { - return err + // 新增的扫描任务类型需要保存audit task info + if auditPlan.ID == 0 { + if err := txDB.Save(auditPlan).Error; err != nil { + return err + } + apTaskInfo := &AuditPlanTaskInfo{ + AuditPlanID: auditPlan.ID, + } + if err := txDB.Save(apTaskInfo).Error; err != nil { + return err + } + } else { + if err := txDB.Save(auditPlan).Error; err != nil { + return err + } } } return nil @@ -345,12 +379,14 @@ func (s *Storage) DeleteInstanceAuditPlan(instanceAuditPlanId string) error { } err = txDB.Exec(`UPDATE instance_audit_plans iap LEFT JOIN audit_plans_v2 ap ON iap.id = ap.instance_audit_plan_id - LEFT JOIN sql_manage_records oms ON oms.source_id = ap.instance_audit_plan_id AND oms.source = ap.type - LEFT JOIN sql_manage_record_processes sm ON sm.sql_manage_record_id = oms.id + LEFT JOIN audit_plan_task_infos apti ON apti.audit_plan_id = ap.id + LEFT JOIN sql_manage_records smr ON smr.source_id = ap.instance_audit_plan_id AND smr.source = ap.type + LEFT JOIN sql_manage_record_processes smrp ON smrp.sql_manage_record_id = smr.id SET iap.deleted_at = now(), ap.deleted_at = now(), - oms.deleted_at = now(), - sm.deleted_at = now() + smr.deleted_at = now(), + smrp.deleted_at = now(), + apti.deleted_at = now() WHERE iap.ID = ?`, instanceAuditPlanId).Error if err != nil { return err @@ -369,11 +405,13 @@ func (s *Storage) DeleteAuditPlan(auditPlanID int) error { return err } err = txDB.Exec(`UPDATE audit_plans_v2 ap - LEFT JOIN sql_manage_records oms ON oms.source_id = ap.instance_audit_plan_id AND oms.source = ap.type - LEFT JOIN sql_manage_record_processes sm ON sm.sql_manage_record_id = oms.id + LEFT JOIN audit_plan_task_infos apti ON apti.audit_plan_id = ap.id + LEFT JOIN sql_manage_records smr ON smr.source_id = ap.instance_audit_plan_id AND smr.source = ap.type + LEFT JOIN sql_manage_record_processes smrp ON smrp.sql_manage_record_id = smr.id SET ap.deleted_at = now(), - oms.deleted_at = now(), - sm.deleted_at = now() + smr.deleted_at = now(), + smrp.deleted_at = now(), + apti.deleted_at = now() WHERE ap.id = ?`, auditPlanID).Error if err != nil { return err @@ -456,12 +494,15 @@ func (s *Storage) UpdateManagerSQL(sql *SQLManageRecord) error { func (s *Storage) UpdateManagerSQLStatus(sql *SQLManageRecord) error { const query = ` INSERT INTO sql_manage_record_processes (sql_manage_record_id) - SELECT oms.id FROM sql_manage_records oms WHERE oms.sql_id = ? + SELECT smr.id FROM sql_manage_records smr WHERE smr.sql_id = ? ON DUPLICATE KEY UPDATE sql_manage_record_id = VALUES(sql_manage_record_id);` return s.db.Exec(query, sql.SQLID).Error } func (s *Storage) UpdateAuditPlanLastCollectionTime(auditPlanID uint, collectionTime time.Time) error { - const query = `UPDATE audit_plans_v2 SET last_collection_time = now(3) WHERE id = ?;` - return s.db.Exec(query, auditPlanID).Error + err := s.db.Model(AuditPlanTaskInfo{}).Where("audit_plan_id = ?", auditPlanID).Update("last_collection_time", collectionTime).Error + if err != nil { + return err + } + return nil } diff --git a/sqle/model/pipline.go b/sqle/model/pipline.go index 78394718cc..d73650faec 100644 --- a/sqle/model/pipline.go +++ b/sqle/model/pipline.go @@ -47,11 +47,11 @@ type Pipeline struct { type PipelineNode struct { gorm.Model PipelineID uint `gorm:"type:bigint;not null;index" json:"pipeline_id"` // 关联的流水线ID - UUID string `gorm:"type:varchar(255);not null" json:"uuid"` // 节点uuid + UUID string `gorm:"type:varchar(32);not null" json:"uuid"` // 节点uuid Name string `gorm:"type:varchar(255);not null" json:"name"` // 节点名称 NodeType string `gorm:"type:varchar(20);not null" json:"node_type"` // 节点类型 - NodeVersion string `gorm:"type:varchar(255)" json:"node_version"` // 节点版本 - InstanceName string `gorm:"type:varchar(255)" json:"instance_name,omitempty"` // 数据源名称,在线审核时必填 + NodeVersion string `gorm:"type:varchar(32)" json:"node_version"` // 节点版本 + InstanceID uint64 `gorm:"type:bigint" json:"instance_id"` // 数据源名称,在线审核时必填 InstanceType string `gorm:"type:varchar(255)" json:"instance_type,omitempty"` // 数据源类型,离线审核时必填 ObjectPath string `gorm:"type:varchar(512);not null" json:"object_path"` // 审核脚本路径 ObjectType string `gorm:"type:varchar(20);not null" json:"object_type"` // 审核对象类型 @@ -140,6 +140,18 @@ func (s *Storage) GetPipelineNodes(pipelineID uint) ([]*PipelineNode, error) { return nodes, nil } +func (s *Storage) GetPipelineNodesByInstanceId(instanceID uint64) ([]*PipelineNode, error) { + if instanceID == 0 { + return nil, fmt.Errorf("instance id should not be zero") + } + var nodes []*PipelineNode + err := s.db.Model(PipelineNode{}).Where("instance_id = ?", instanceID).Find(&nodes).Error + if err != nil { + return nodes, errors.New(errors.ConnectStorageError, err) + } + return nodes, nil +} + func (s *Storage) CreatePipeline(pipeline *Pipeline, nodes []*PipelineNode) error { return s.Tx(func(txDB *gorm.DB) error { // 4.1 保存 Pipeline 到数据库 diff --git a/sqle/model/rule.go b/sqle/model/rule.go index ceb26102be..14951332c3 100644 --- a/sqle/model/rule.go +++ b/sqle/model/rule.go @@ -1,11 +1,13 @@ package model import ( + "context" "fmt" "strings" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/pkg/params" "gorm.io/gorm" ) @@ -22,59 +24,78 @@ type RuleTemplate struct { } func GenerateRuleByDriverRule(dr *driverV2.Rule, dbType string) *Rule { - return &Rule{ - Name: dr.Name, - Desc: dr.Desc, - Annotation: dr.Annotation, - Level: string(dr.Level), - Typ: dr.Category, - DBType: dbType, - Params: dr.Params, + r := &Rule{ + Name: dr.Name, + Level: string(dr.Level), + DBType: dbType, + Params: dr.Params, Knowledge: &RuleKnowledge{ - Content: dr.Knowledge.Content, + I18nContent: make(driverV2.I18nStr, len(dr.I18nRuleInfo)), }, + I18nRuleInfo: make(driverV2.I18nRuleInfo, len(dr.I18nRuleInfo)), + } + for lang, v := range dr.I18nRuleInfo { + r.Knowledge.I18nContent[lang] = v.Knowledge.Content + r.I18nRuleInfo[lang] = &driverV2.RuleInfo{ + Desc: v.Desc, + Annotation: v.Annotation, + Category: v.Category, + Params: v.Params, + } } + return r } func ConvertRuleToDriverRule(r *Rule) *driverV2.Rule { - return &driverV2.Rule{ - Name: r.Name, - Desc: r.Desc, - Annotation: r.Annotation, - Category: r.Typ, - Level: driverV2.RuleLevel(r.Level), - Params: r.Params, + dr := &driverV2.Rule{ + Name: r.Name, + Level: driverV2.RuleLevel(r.Level), + Params: r.Params, + I18nRuleInfo: make(map[string]*driverV2.RuleInfo, len(r.I18nRuleInfo)), + } + for lang, v := range r.I18nRuleInfo { + dr.I18nRuleInfo[lang] = &driverV2.RuleInfo{ + Desc: v.Desc, + Annotation: v.Annotation, + Category: v.Category, + Params: v.Params, + Knowledge: driverV2.RuleKnowledge{}, + } } + return dr } type RuleKnowledge struct { Model - Content string `gorm:"type:longtext"` + Content string `gorm:"type:longtext"` // Deprecated: use I18nContent instead + I18nContent driverV2.I18nStr `gorm:"type:json"` } func (r *RuleKnowledge) TableName() string { return "rule_knowledge" } -func (r *RuleKnowledge) GetContent() string { +func (r *RuleKnowledge) GetContentByLangTag(lang string) string { if r == nil { return "" } - return r.Content + return r.I18nContent.GetStrInLang(lang) } type Rule struct { - Name string `json:"name" gorm:"primary_key; not null;type:varchar(255)"` - DBType string `json:"db_type" gorm:"primary_key; not null; default:\"mysql\";type:varchar(255)"` - Desc string `json:"desc" gorm:"type:varchar(255)"` - Annotation string `json:"annotation" gorm:"column:annotation;type:varchar(255)"` - Level string `json:"level" example:"error" gorm:"type:varchar(255)"` // notice, warn, error - Typ string `json:"type" gorm:"column:type; not null;type:varchar(255)"` - Params params.Params `json:"params" gorm:"type:varchar(1000)"` - KnowledgeId uint `json:"knowledge_id"` - Knowledge *RuleKnowledge `json:"knowledge" gorm:"foreignkey:KnowledgeId"` - HasAuditPower bool `json:"has_audit_power" gorm:"type:bool" example:"true"` - HasRewritePower bool `json:"has_rewrite_power" gorm:"type:bool" example:"true"` + Name string `json:"name" gorm:"primary_key; not null;type:varchar(255)"` + DBType string `json:"db_type" gorm:"primary_key; not null; default:\"mysql\";type:varchar(255)"` + // todo i18n 规则应该不用兼容老sqle数据 + //Desc string `json:"desc" gorm:"type:varchar(255)"` // Deprecated: use driverV2.RuleInfo .Desc in I18nRuleInfo instead + //Annotation string `json:"annotation" gorm:"column:annotation;type:varchar(1024)"` // Deprecated: use driverV2.RuleInfo .Annotation in I18nRuleInfo instead + Level string `json:"level" example:"error" gorm:"type:varchar(255)"` // notice, warn, error + //Typ string `json:"type" gorm:"column:type; not null;type:varchar(255)"` // Deprecated: use driverV2.RuleInfo .Category in I18nRuleInfo instead + Params params.Params `json:"params" gorm:"type:varchar(1000)"` + KnowledgeId uint `json:"knowledge_id"` + Knowledge *RuleKnowledge `json:"knowledge" gorm:"foreignkey:KnowledgeId"` + HasAuditPower bool `json:"has_audit_power" gorm:"type:bool" example:"true"` + HasRewritePower bool `json:"has_rewrite_power" gorm:"type:bool" example:"true"` + I18nRuleInfo driverV2.I18nRuleInfo `json:"i18n_rule_info" gorm:"type:json"` } func (r Rule) TableName() string { @@ -293,7 +314,8 @@ func (s *Storage) GetRuleTemplateDetailByNameAndProjectIds(projectIds []string, if fuzzy_keyword_rule == "" { return db } - return db.Where("`desc` like ? OR annotation like ?", fmt.Sprintf("%%%s%%", fuzzy_keyword_rule), fmt.Sprintf("%%%s%%", fuzzy_keyword_rule)) + // todo i18n use json syntax to query? + return db.Where("`i18n_rule_info` like ?", fmt.Sprintf("%%%s%%", fuzzy_keyword_rule)) } t := &RuleTemplate{Name: name} err := s.db.Preload("RuleList", dbOrder).Preload("RuleList.Rule", fuzzy_condition).Preload("CustomRuleList.CustomRule", fuzzy_condition). @@ -331,13 +353,13 @@ func (s *Storage) CloneRuleTemplateRules(source, destination *RuleTemplate) erro return s.UpdateRuleTemplateRules(destination, source.RuleList...) } -func (s *Storage) GetRuleTemplateRuleByName(name string, dbType string) (*[]RuleTemplateRule, error) { +func (s *Storage) GetRuleTemplateRuleByName(name string, dbType string) ([]RuleTemplateRule, error) { ruleTemplateRule := []RuleTemplateRule{} result := s.db.Where("rule_name = ?", name).Where("db_type = ?", dbType).Find(&ruleTemplateRule) if result.RowsAffected == 0 { return nil, nil } - return &ruleTemplateRule, errors.New(errors.ConnectStorageError, result.Error) + return ruleTemplateRule, errors.New(errors.ConnectStorageError, result.Error) } func (s *Storage) CloneRuleTemplateCustomRules(source, destination *RuleTemplate) error { @@ -491,7 +513,8 @@ func (s *Storage) GetAuditPlanNamesByRuleTemplateAndProject( return auditPlanNames, nil } -func (s *Storage) GetRuleTypeByDBType(DBType string) ([]string, error) { +func (s *Storage) GetRuleTypeByDBType(ctx context.Context, DBType string) ([]string, error) { + lang := locale.GetLangTagFromCtx(ctx) rules := []*Rule{} err := s.db.Select("type").Where("db_type = ?", DBType).Group("type").Find(&rules).Error if err != nil { @@ -499,17 +522,17 @@ func (s *Storage) GetRuleTypeByDBType(DBType string) ([]string, error) { } ruleDBTypes := make([]string, len(rules)) for i := range rules { - ruleDBTypes[i] = rules[i].Typ + ruleDBTypes[i] = rules[i].I18nRuleInfo.GetRuleInfoByLangTag(lang.String()).Category } return ruleDBTypes, nil } type CustomRule struct { Model - RuleId string `json:"rule_id" gorm:"index:unique; not null; type:varchar(255)"` + RuleId string `json:"rule_id" gorm:"index:unique; not null; type:varchar(255)"` Desc string `json:"desc" gorm:"not null; type:varchar(255)"` - Annotation string `json:"annotation" gorm:"type:varchar(255)"` + Annotation string `json:"annotation" gorm:"type:varchar(1024)"` DBType string `json:"db_type" gorm:"not null; default:\"mysql\"; type:varchar(255)"` Level string `json:"level" example:"error" gorm:"type:varchar(255)"` // notice, warn, error Typ string `json:"type" gorm:"column:type; not null; type:varchar(255)"` diff --git a/sqle/model/rule_list.go b/sqle/model/rule_list.go index ab8ddc16a0..01b01b2322 100644 --- a/sqle/model/rule_list.go +++ b/sqle/model/rule_list.go @@ -64,7 +64,8 @@ func (s *Storage) GetRulesByReq(data map[string]interface{}) ( } } if data["fuzzy_keyword_rule"] != "" { - db = db.Where("rules.`desc` like ? OR rules.annotation like ?", fmt.Sprintf("%%%s%%", data["fuzzy_keyword_rule"]), fmt.Sprintf("%%%s%%", data["fuzzy_keyword_rule"])) + // todo i18n use json syntax to query? + db = db.Where("rules.`i18n_rule_info` like ?", fmt.Sprintf("%%%s%%", data["fuzzy_keyword_rule"])) } err = db.Find(&result).Error return result, err diff --git a/sqle/model/task.go b/sqle/model/task.go index a6879784f0..d5025a4377 100644 --- a/sqle/model/task.go +++ b/sqle/model/task.go @@ -2,6 +2,7 @@ package model import ( "bytes" + "context" "database/sql" "database/sql/driver" "encoding/json" @@ -9,9 +10,10 @@ import ( "strings" "time" + driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/errors" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/utils" - "gorm.io/gorm" ) @@ -132,27 +134,82 @@ type BaseSQL struct { SQLType string `json:"sql_type" gorm:"type:varchar(255)"` // such as DDL,DML,DQL... } -func (s *BaseSQL) GetExecStatusDesc() string { +func (s *BaseSQL) GetExecStatusDesc(ctx context.Context) string { switch s.ExecStatus { case SQLExecuteStatusInitialized: - return "准备执行" + return locale.ShouldLocalizeMsg(ctx, locale.SQLExecuteStatusInitialized) case SQLExecuteStatusDoing: - return "正在执行" + return locale.ShouldLocalizeMsg(ctx, locale.SQLExecuteStatusDoing) case SQLExecuteStatusFailed: - return "执行失败" + return locale.ShouldLocalizeMsg(ctx, locale.SQLExecuteStatusFailed) case SQLExecuteStatusSucceeded: - return "执行成功" + return locale.ShouldLocalizeMsg(ctx, locale.SQLExecuteStatusSucceeded) case SQLExecuteStatusManuallyExecuted: - return "人工执行" + return locale.ShouldLocalizeMsg(ctx, locale.SQLExecuteStatusManuallyExecuted) default: - return "未知" + return locale.ShouldLocalizeMsg(ctx, locale.SQLExecuteStatusUnknown) } } type AuditResult struct { - Level string `json:"level"` - Message string `json:"message"` - RuleName string `json:"rule_name"` + Level string `json:"level"` + Message string `json:"message"` // Deprecated: use I18nAuditResultInfo instead + RuleName string `json:"rule_name"` + I18nAuditResultInfo I18nAuditResultInfo `json:"i18n_audit_result_info"` +} + +func (ar *AuditResult) GetAuditMsgByLangTag(lang string) string { + if len(ar.I18nAuditResultInfo) == 0 { + // 兼容老sqle数据 + return ar.Message + } + return ar.I18nAuditResultInfo.GetAuditResultInfoByLangTag(lang).Message +} + +type AuditResultInfo struct { + Message string +} + +type I18nAuditResultInfo map[string]AuditResultInfo + +func (i *I18nAuditResultInfo) GetAuditResultInfoByLangTag(lang string) *AuditResultInfo { + if i == nil { + return &AuditResultInfo{} + } + for langTag, ari := range *i { + if strings.HasPrefix(lang, langTag) { + return &ari + } + } + ruleInfo := (*i)[locale.DefaultLang.String()] + return &ruleInfo +} + +func (i I18nAuditResultInfo) Value() (driver.Value, error) { + b, err := json.Marshal(i) + return string(b), err +} + +func (i *I18nAuditResultInfo) Scan(input interface{}) error { + return json.Unmarshal(input.([]byte), i) +} + +func ConvertI18NAuditResultInfoMapToI18nStr(m I18nAuditResultInfo) driverV2.I18nStr { + s := make(map[string]string, len(m)) + for lang, v := range m { + s[lang] = v.Message + } + return s +} + +func ConvertI18nStrToI18NAuditResultInfoMap(s driverV2.I18nStr) I18nAuditResultInfo { + m := make(I18nAuditResultInfo, len(s)) + for lang, v := range s { + m[lang] = AuditResultInfo{ + Message: v, + } + } + return m } type AuditResults []AuditResult @@ -166,24 +223,54 @@ func (a *AuditResults) Scan(input interface{}) error { return json.Unmarshal(input.([]byte), a) } -func (a *AuditResults) String() string { +// todo check somewhere fmt Sprint AuditResults to frontend? +func (a *AuditResults) String(ctx context.Context) string { + lang := locale.GetLangTagFromCtx(ctx) msgs := make([]string, len(*a)) for i := range *a { res := (*a)[i] - msg := fmt.Sprintf("[%s]%s", res.Level, res.Message) - msgs[i] = msg + msgs[i] = res.GetAuditMsgByLangTag(lang.String()) } return strings.Join(msgs, "\n") } -func (a *AuditResults) Append(level, ruleName, message string) { +func (a *AuditResults) Append(dar *driverV2.AuditResult) { for i := range *a { ar := (*a)[i] - if ar.Level == level && ar.RuleName == ruleName && ar.Message == message { + if ar.Level == string(dar.Level) && ar.RuleName == dar.RuleName { return } } - *a = append(*a, AuditResult{Level: level, RuleName: ruleName, Message: message}) + newAr := ConvertAuditResultFromDriverToModel(dar) + *a = append(*a, *newAr) +} + +func ConvertAuditResultFromDriverToModel(dar *driverV2.AuditResult) *AuditResult { + newAr := &AuditResult{ + Level: string(dar.Level), + RuleName: dar.RuleName, + I18nAuditResultInfo: make(map[string]AuditResultInfo, len(dar.I18nAuditResultInfo)), + } + for langTag, info := range dar.I18nAuditResultInfo { + newAr.I18nAuditResultInfo[langTag] = AuditResultInfo{ + Message: info.Message, + } + } + return newAr +} + +func ConvertAuditResultFromModelToDriver(mar *AuditResult) *driverV2.AuditResult { + newAr := &driverV2.AuditResult{ + Level: driverV2.RuleLevel(mar.Level), + RuleName: mar.RuleName, + I18nAuditResultInfo: make(map[string]driverV2.AuditResultInfo, len(mar.I18nAuditResultInfo)), + } + for langTag, info := range mar.I18nAuditResultInfo { + newAr.I18nAuditResultInfo[langTag] = driverV2.AuditResultInfo{ + Message: info.Message, + } + } + return newAr } type ExecuteSQL struct { @@ -201,33 +288,33 @@ func (s ExecuteSQL) TableName() string { return "execute_sql_detail" } -func (s *ExecuteSQL) GetAuditStatusDesc() string { +func (s *ExecuteSQL) GetAuditStatusDesc(ctx context.Context) string { switch s.AuditStatus { case SQLAuditStatusInitialized: - return "未审核" + return locale.ShouldLocalizeMsg(ctx, locale.SQLAuditStatusInitialized) case SQLAuditStatusDoing: - return "正在审核" + return locale.ShouldLocalizeMsg(ctx, locale.SQLAuditStatusDoing) case SQLAuditStatusFinished: - return "审核完成" + return locale.ShouldLocalizeMsg(ctx, locale.SQLAuditStatusFinished) default: - return "未知状态" + return locale.ShouldLocalizeMsg(ctx, locale.SQLAuditStatusUnknown) } } -func (s *ExecuteSQL) GetAuditResults() string { +func (s *ExecuteSQL) GetAuditResults(ctx context.Context) string { if len(s.AuditResults) == 0 { return "" } - return s.AuditResults.String() + return s.AuditResults.String(ctx) } -func (s *ExecuteSQL) GetAuditResultDesc() string { +func (s *ExecuteSQL) GetAuditResultDesc(ctx context.Context) string { if len(s.AuditResults) == 0 { - return "审核通过" + return locale.ShouldLocalizeMsg(ctx, locale.SQLAuditResultDescPass) } - return s.AuditResults.String() + return s.AuditResults.String(ctx) } func (s *Storage) BatchSaveExecuteSqls(models []*ExecuteSQL) error { @@ -478,12 +565,12 @@ type TaskSQLDetail struct { SQLType sql.NullString `json:"sql_type"` } -func (t *TaskSQLDetail) GetAuditResults() string { +func (t *TaskSQLDetail) GetAuditResults(ctx context.Context) string { if len(t.AuditResults) == 0 { return "" } - return t.AuditResults.String() + return t.AuditResults.String(ctx) } var taskSQLsQueryTpl = `SELECT e_sql.number, e_sql.description, e_sql.content AS exec_sql, e_sql.source_file AS sql_source_file, e_sql.start_line AS sql_start_line, e_sql.sql_type, r_sql.content AS rollback_sql, diff --git a/sqle/model/utils.go b/sqle/model/utils.go index 322da010a8..011951bb17 100644 --- a/sqle/model/utils.go +++ b/sqle/model/utils.go @@ -20,6 +20,7 @@ import ( "github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx/reflectx" xerrors "github.com/pkg/errors" + "golang.org/x/text/language" "gorm.io/driver/mysql" "gorm.io/gorm" "gorm.io/gorm/logger" @@ -160,6 +161,7 @@ var autoMigrateList = []interface{}{ &FeishuScheduledRecord{}, &InstanceAuditPlan{}, &AuditPlanV2{}, + &AuditPlanTaskInfo{}, &SQLManageRecord{}, &SQLManageRecordProcess{}, &SQLManageQueue{}, @@ -183,10 +185,10 @@ func (s *Storage) AutoMigrate() error { } func (s *Storage) CreateRulesIfNotExist(rulesMap map[string][]*Rule) error { - isRuleExistInDB := func(rulesInDB []*Rule, targetRuleName, dbType string) (*Rule, bool) { + isRuleExistInDB := func(rulesInDB []*Rule, targetRule *Rule, dbType string) (*Rule, bool) { for i := range rulesInDB { rule := rulesInDB[i] - if rule.DBType != dbType || rule.Name != targetRuleName { + if rule.DBType != dbType || rule.Name != targetRule.Name { continue } return rule, true @@ -200,7 +202,7 @@ func (s *Storage) CreateRulesIfNotExist(rulesMap map[string][]*Rule) error { } for dbType, rules := range rulesMap { for _, rule := range rules { - existedRule, exist := isRuleExistInDB(rulesInDB, rule.Name, dbType) + existedRule, exist := isRuleExistInDB(rulesInDB, rule, dbType) // rule will be created or update if: // 1. rule not exist; if !exist { @@ -209,10 +211,12 @@ func (s *Storage) CreateRulesIfNotExist(rulesMap map[string][]*Rule) error { return err } } else { - isRuleDescSame := existedRule.Desc == rule.Desc - isRuleAnnotationSame := existedRule.Annotation == rule.Annotation + //isRuleDescSame := existedRule.Desc == rule.Desc + //isRuleAnnotationSame := existedRule.Annotation == rule.Annotation isRuleLevelSame := existedRule.Level == rule.Level - isRuleTypSame := existedRule.Typ == rule.Typ + //isRuleTypSame := existedRule.Typ == rule.Typ + isI18nInfoSame := reflect.DeepEqual(existedRule.I18nRuleInfo, rule.I18nRuleInfo) + isOldKnowledge := existedRule.Knowledge.Content != "" isHasAuditPowerSame := existedRule.HasAuditPower == rule.HasAuditPower isHasRewritePowerSame := existedRule.HasRewritePower == rule.HasRewritePower existRuleParam, err := existedRule.Params.Value() @@ -225,10 +229,19 @@ func (s *Storage) CreateRulesIfNotExist(rulesMap map[string][]*Rule) error { } isParamSame := reflect.DeepEqual(existRuleParam, pluginRuleParam) - if !isRuleDescSame || !isRuleAnnotationSame || !isRuleLevelSame || !isRuleTypSame || !isParamSame || !isHasAuditPowerSame || !isHasRewritePowerSame { - if existedRule.Knowledge != nil && existedRule.Knowledge.Content != "" { - // 知识库是可以在页面上编辑的,而插件里只是默认内容,以页面上编辑后的内容为准 - rule.Knowledge.Content = existedRule.Knowledge.Content + if !isI18nInfoSame || isOldKnowledge || !isRuleLevelSame || !isParamSame || !isHasAuditPowerSame || !isHasRewritePowerSame { + if isOldKnowledge { + // 兼容老sqle的数据,将其移动到中文Key下 + existedRule.Knowledge.I18nContent = driverV2.I18nStr{language.Chinese.String(): existedRule.Knowledge.Content} + existedRule.Knowledge.Content = "" + } + if existedRule.Knowledge != nil && existedRule.Knowledge.I18nContent != nil { + for lang, content := range existedRule.Knowledge.I18nContent { + if content != "" { + // 知识库是可以在页面上编辑的,而插件里只是默认内容,以页面上编辑后的内容为准 + rule.Knowledge.I18nContent.SetStrInLang(lang, content) + } + } } // 保存规则 err := s.Save(rule) @@ -254,7 +267,7 @@ func (s *Storage) UpdateRuleTemplateRulesParams(pluginRule *Rule, dbType string) if err != nil { return err } - for _, ruleTemplateRule := range *ruleTemplateRules { + for _, ruleTemplateRule := range ruleTemplateRules { ruleTemplateRuleParamsMap := make(map[string]string) for _, p := range ruleTemplateRule.RuleParams { ruleTemplateRuleParamsMap[p.Key] = p.Value @@ -423,7 +436,6 @@ func (s *Storage) CreateDefaultTemplateIfNotExist(projectId ProjectUID, rules ma t := &RuleTemplate{ ProjectId: projectId, Name: templateName, - Desc: "默认规则模板", DBType: dbType, } if err := s.Save(t); err != nil { diff --git a/sqle/model/workflow.go b/sqle/model/workflow.go index 2d9f36f90b..2bc722113a 100644 --- a/sqle/model/workflow.go +++ b/sqle/model/workflow.go @@ -4,6 +4,8 @@ import ( "database/sql" e "errors" "fmt" + "github.com/actiontech/sqle/sqle/locale" + "github.com/nicksnyder/go-i18n/v2/i18n" "strings" "time" @@ -48,7 +50,6 @@ func DefaultWorkflowTemplate(projectId string) *WorkflowTemplate { return &WorkflowTemplate{ ProjectId: ProjectUID(projectId), Name: fmt.Sprintf("%s-WorkflowTemplate", projectId), - Desc: fmt.Sprintf("%s 默认模板", projectId), AllowSubmitWhenLessAuditLevel: string(driverV2.RuleLevelWarn), Steps: []*WorkflowStepTemplate{ { @@ -208,14 +209,14 @@ const ( WorkflowModeDifferentSQLs = "different_sqls" ) -var WorkflowStatus = map[string]string{ - WorkflowStatusWaitForAudit: "待审核", - WorkflowStatusWaitForExecution: "待上线", - WorkflowStatusReject: "已驳回", - WorkflowStatusCancel: "已关闭", - WorkflowStatusExecuting: "正在上线", - WorkflowStatusExecFailed: "上线失败", - WorkflowStatusFinish: "上线成功", +var WorkflowStatus = map[string]*i18n.Message{ + WorkflowStatusWaitForAudit: locale.WorkflowStatusWaitForAudit, // "待审核", + WorkflowStatusWaitForExecution: locale.WorkflowStatusWaitForExecution, // "待上线", + WorkflowStatusReject: locale.WorkflowStatusReject, // "已驳回", + WorkflowStatusCancel: locale.WorkflowStatusCancel, // "已关闭", + WorkflowStatusExecuting: locale.WorkflowStatusExecuting, // "正在上线", + WorkflowStatusExecFailed: locale.WorkflowStatusExecFailed, // "上线失败", + WorkflowStatusFinish: locale.WorkflowStatusFinish, // "上线成功", } type WorkflowRecord struct { diff --git a/sqle/pkg/driver/builder.go b/sqle/pkg/driver/builder.go index d57b13f00b..d9b72faf3d 100644 --- a/sqle/pkg/driver/builder.go +++ b/sqle/pkg/driver/builder.go @@ -9,9 +9,9 @@ import ( "github.com/pkg/errors" ) -type RawSQLRuleHandler func(ctx context.Context, rule *driverV2.Rule, rawSQL string, nextSQL []string) (string, error) +type RawSQLRuleHandler func(ctx context.Context, rule *driverV2.Rule, rawSQL string, nextSQL []string) (driverV2.I18nStr, error) -type AstSQLRuleHandler func(ctx context.Context, rule *driverV2.Rule, astSQL interface{}, nextSQL []string) (string, error) +type AstSQLRuleHandler func(ctx context.Context, rule *driverV2.Rule, astSQL interface{}, nextSQL []string) (driverV2.I18nStr, error) type AuditHandler struct { SqlParserFn func(string) (interface{}, error) @@ -21,7 +21,7 @@ type AuditHandler struct { func (a *AuditHandler) Audit(ctx context.Context, rule *driverV2.Rule, sql string, nextSQL []string) (*driverV2.AuditResult, error) { result := &driverV2.AuditResult{} - message := "" + message := driverV2.I18nStr{} var err error handler, ok := a.RuleToRawHandler[rule.Name] @@ -46,11 +46,15 @@ func (a *AuditHandler) Audit(ctx context.Context, rule *driverV2.Rule, sql strin } } } - if message != "" { + if len(message) != 0 { result.Level = rule.Level - result.Message = message result.RuleName = rule.Name } + for langTag, langMsg := range message { + result.I18nAuditResultInfo[langTag] = driverV2.AuditResultInfo{ + Message: langMsg, + } + } return result, nil } diff --git a/sqle/server/audit.go b/sqle/server/audit.go index 605465326a..d1c3c52ef5 100644 --- a/sqle/server/audit.go +++ b/sqle/server/audit.go @@ -9,6 +9,7 @@ import ( "time" "github.com/actiontech/sqle/sqle/driver" + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" "github.com/actiontech/sqle/sqle/driver/mysql/session" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/model" @@ -179,7 +180,7 @@ func hookAudit(l *logrus.Entry, task *model.Task, p driver.Plugin, hook AuditHoo } if whitelistMatch { result := driverV2.NewAuditResults() - result.Add(driverV2.RuleLevelNormal, "", "审核SQL例外") + result.Add(driverV2.RuleLevelNormal, "", plocale.ShouldLocalizeAll(plocale.AuditResultMsgExcludedSQL)) executeSQL.AuditStatus = model.SQLAuditStatusFinished executeSQL.AuditLevel = string(result.Level()) executeSQL.AuditFingerprint = utils.Md5String(string(append([]byte(result.Message()), []byte(node.Fingerprint)...))) @@ -327,7 +328,7 @@ func parse(l *logrus.Entry, p driver.Plugin, sql string) (node driverV2.Node, er func genRollbackSQL(l *logrus.Entry, task *model.Task, p driver.Plugin) ([]*model.RollbackSQL, error) { rollbackSQLs := make([]*model.RollbackSQL, 0, len(task.ExecuteSQLs)) for _, executeSQL := range task.ExecuteSQLs { - rollbackSQL, reason, err := p.GenRollbackSQL(context.TODO(), executeSQL.Content) + rollbackSQL, i18nReason, err := p.GenRollbackSQL(context.TODO(), executeSQL.Content) if err != nil && session.IsParseShowCreateTableContentErr(err) { l.Errorf("gen rollback sql error, %v", err) // todo #1630 临时跳过创表语句解析错误 return nil, nil @@ -338,9 +339,9 @@ func genRollbackSQL(l *logrus.Entry, task *model.Task, p driver.Plugin) ([]*mode result := driverV2.NewAuditResults() for i := range executeSQL.AuditResults { ar := executeSQL.AuditResults[i] - result.Add(driverV2.RuleLevel(ar.Level), ar.RuleName, ar.Message) + result.Add(driverV2.RuleLevel(ar.Level), ar.RuleName, model.ConvertI18NAuditResultInfoMapToI18nStr(ar.I18nAuditResultInfo)) } - result.Add(driverV2.RuleLevelNotice, "", reason) + result.Add(driverV2.RuleLevelNotice, "", i18nReason) executeSQL.AuditLevel = string(result.Level()) appendExecuteSqlResults(executeSQL, result) @@ -358,7 +359,6 @@ func genRollbackSQL(l *logrus.Entry, task *model.Task, p driver.Plugin) ([]*mode func appendExecuteSqlResults(executeSQL *model.ExecuteSQL, result *driverV2.AuditResults) { for i := range result.Results { - ar := result.Results[i] - executeSQL.AuditResults.Append(string(ar.Level), ar.RuleName, ar.Message) + executeSQL.AuditResults.Append(result.Results[i]) } } diff --git a/sqle/server/auditplan/manager.go b/sqle/server/auditplan/manager.go index ce59871068..f7ebec2744 100644 --- a/sqle/server/auditplan/manager.go +++ b/sqle/server/auditplan/manager.go @@ -230,7 +230,7 @@ func (mgr *Manager) sync() error { } } // 增量同步智能扫描任务,根据数据库记录的更新时间筛选,更新后将下次筛选的时间为上一次记录的最晚的更新时间。 - aps, err := mgr.persist.GetLatestAuditPlanRecordsV2() + aps, err := mgr.persist.GetLatestAuditPlanRecordsV2(*mgr.lastSyncTime) if err != nil { return err } @@ -241,6 +241,7 @@ func (mgr *Manager) sync() error { if err != nil { mgr.logger.WithField("id", ap.ID).Errorf("sync audit task failed, error: %v", err) } + mgr.lastSyncTime = &ap.UpdatedAt } return nil } diff --git a/sqle/server/auditplan/meta.go b/sqle/server/auditplan/meta.go index 732bf82fa9..c60f383b44 100644 --- a/sqle/server/auditplan/meta.go +++ b/sqle/server/auditplan/meta.go @@ -2,7 +2,7 @@ package auditplan import ( "fmt" - + scannerCmd "github.com/actiontech/sqle/sqle/cmd/scannerd/command" "github.com/actiontech/sqle/sqle/pkg/params" "github.com/sirupsen/logrus" @@ -28,18 +28,18 @@ type MetaBuilder struct { const ( TypeDefault = "default" - TypeMySQLSlowLog = "mysql_slow_log" - TypeMySQLMybatis = "mysql_mybatis" + TypeMySQLSlowLog = scannerCmd.TypeMySQLSlowLog + TypeMySQLMybatis = scannerCmd.TypeMySQLMybatis TypeMySQLSchemaMeta = "mysql_schema_meta" TypeMySQLProcesslist = "mysql_processlist" TypeAliRdsMySQLSlowLog = "ali_rds_mysql_slow_log" TypeAliRdsMySQLAuditLog = "ali_rds_mysql_audit_log" TypeHuaweiRdsMySQLSlowLog = "huawei_rds_mysql_slow_log" TypeOracleTopSQL = "oracle_top_sql" - TypeTiDBAuditLog = "tidb_audit_log" + TypeTiDBAuditLog = scannerCmd.TypeTiDBAuditLog TypeAllAppExtract = "all_app_extract" TypeBaiduRdsMySQLSlowLog = "baidu_rds_mysql_slow_log" - TypeSQLFile = "sql_file" + TypeSQLFile = scannerCmd.TypeSQLFile ) const ( diff --git a/sqle/server/auditplan/task_wrap.go b/sqle/server/auditplan/task_wrap.go index 08878837f4..bda2a50c3e 100644 --- a/sqle/server/auditplan/task_wrap.go +++ b/sqle/server/auditplan/task_wrap.go @@ -230,7 +230,6 @@ func (at *TaskWrapper) extractSQL() { err = at.persist.UpdateAuditPlanLastCollectionTime(at.ap.ID, collectionTime) if err != nil { at.logger.Errorf("update audit plan last collection time failed, error : %v", err) - return } if len(sqls) == 0 { at.logger.Info("extract sql list is empty, skip") diff --git a/sqle/server/optimization/rule/rule.go b/sqle/server/optimization/rule/rule.go index 561faf6bae..74db70e112 100644 --- a/sqle/server/optimization/rule/rule.go +++ b/sqle/server/optimization/rule/rule.go @@ -2,6 +2,7 @@ package optimization import ( driverV2 "github.com/actiontech/sqle/sqle/driver/v2" + "github.com/actiontech/sqle/sqle/locale" "github.com/actiontech/sqle/sqle/log" ) @@ -86,7 +87,8 @@ func init() { for _, optimizationRule := range OptimizationRuleMap { for i, rule := range optimizationRule { if knowledge, ok := defaultRulesKnowledge[rule.RuleCode]; ok { - rule.Rule.Knowledge = driverV2.RuleKnowledge{Content: knowledge} + // todo i18n rewrite rule Knowledge + rule.Rule.I18nRuleInfo[locale.DefaultLang.String()].Knowledge = driverV2.RuleKnowledge{Content: knowledge} optimizationRule[i] = rule } } diff --git a/sqle/server/optimization/rule/rule_list.go b/sqle/server/optimization/rule/rule_list.go index 818ec6ace4..86184b5668 100644 --- a/sqle/server/optimization/rule/rule_list.go +++ b/sqle/server/optimization/rule/rule_list.go @@ -4,224 +4,245 @@ package optimization import ( + "github.com/actiontech/sqle/sqle/driver/mysql/plocale" rulepkg "github.com/actiontech/sqle/sqle/driver/mysql/rule" driverV2 "github.com/actiontech/sqle/sqle/driver/v2" "github.com/actiontech/sqle/sqle/pkg/params" ) -var MySQLOptimizationRuleHandler = []OptimizationRuleHandler{ +type SourceOptimizationRuleHandler struct { + Rule rulepkg.SourceRule + RuleCode string +} + +var MySQLOptimizationRuleHandler = generateOptimizationRuleHandlers(mySQLOptimizationRuleHandlerSource) + +var OracleOptimizationRuleHandler = generateOptimizationRuleHandlers(oracleOptimizationRuleHandlerSource) + +func generateOptimizationRuleHandlers(sources []SourceOptimizationRuleHandler) []OptimizationRuleHandler { + result := make([]OptimizationRuleHandler, len(sources)) + for k, v := range sources { + result[k] = OptimizationRuleHandler{ + Rule: *rulepkg.ConvertSourceRule(&v.Rule), + RuleCode: v.RuleCode, + } + } + return result +} + +var mySQLOptimizationRuleHandlerSource = []SourceOptimizationRuleHandler{ { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: rulepkg.DMLHintGroupByRequiresConditions, - Desc: "为GROUP BY显示添加 ORDER BY 条件('代替'!='", - Annotation: "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符", + Desc: plocale.OptDMLCheckNotEqualSymbolDesc, + Annotation: plocale.OptDMLCheckNotEqualSymbolAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleUseNonstandardNotEqualOperator, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: rulepkg.DMLCheckLimitOffsetNum, - Desc: "OFFSET的值超过阈值", - Annotation: "使用LIMIT和OFFSET子句可以分别控制查询结果的数量和指定从哪一行开始返回数据。但是,当OFFSET值较大时,查询效率会降低,因为系统必须扫描更多数据才能找到起始行,这在大数据集中尤其会导致性能问题和资源消耗。", + Desc: plocale.OptDMLCheckLimitOffsetNumDesc, + Annotation: plocale.OptDMLCheckLimitOffsetNumAnnotation, Level: driverV2.RuleLevelError, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleLargeOffset, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleNPERewrite, - Desc: "NPE重写", - Annotation: "SQL的NPE(Null Pointer Exception)问题是指在SQL查询中,当聚合列全为NULL时,SUM、AVG等聚合函数会返回NULL,这可能会导致后续的程序出现空指针异常。", + Desc: plocale.OptDMLRuleNPERewriteDesc, + Annotation: plocale.OptDMLRuleNPERewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleNPERewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleAllSubqueryRewrite, - Desc: "ALL修饰的子查询重写", - Annotation: "如果ALL子查询的结果中存在NULL,这个SQL永远返回为空。正确的写法应该是在子查询里加上非空限制,或使用max/min的写法。", + Desc: plocale.OptDMLRuleAllSubqueryRewriteDesc, + Annotation: plocale.OptDMLRuleAllSubqueryRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleAllQualifierSubQueryRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleDiffOrderingSpecTypeWarning, - Desc: "排序字段方向不同导致索引失效", - Annotation: "ORDER BY 子句中的所有表达式需要按统一的 ASC 或 DESC 方向排序,才能利用索引来避免排序;如果ORDER BY 语句对多个不同条件使用不同方向的排序无法使用索引", + Desc: plocale.OptDMLRuleDiffOrderingSpecTypeWarningDesc, + Annotation: plocale.OptDMLRuleDiffOrderingSpecTypeWarningAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleDiffOrderingSpecTypeWarning, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleDistinctEliminationRewrite, - Desc: "子查询中的DISTINCT消除", - Annotation: "对于仅进行存在性测试的子查询,如果子查询包含DISTINCT通常可以删除,以避免一次去重操作。", + Desc: plocale.OptDMLRuleDistinctEliminationRewriteDesc, + Annotation: plocale.OptDMLRuleDistinctEliminationRewriteAnnotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleDistinctEliminationRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleExists2JoinRewrite, - Desc: "EXISTS查询转换为表连接", - Annotation: "EXISTS子查询可以在适当情况下转换为JOIN来优化查询,提高数据库处理效率和性能。", + Desc: plocale.OptDMLRuleExists2JoinRewriteDesc, + Annotation: plocale.OptDMLRuleExists2JoinRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleExists2JoinRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleFilterPredicatePushDownRewrite, - Desc: "过滤谓词下推", - Annotation: "滤条件下推(FPPD)是一种通过将过滤条件提前应用于内部查询块,以减少数据处理量并提升SQL执行效率。", + Desc: plocale.OptDMLRuleFilterPredicatePushDownRewriteDesc, + Annotation: plocale.OptDMLRuleFilterPredicatePushDownRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleFilterPredicatePushDownRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleGroupingFromDiffTablesRewrite, - Desc: "GROUPBY字段来自不同表", - Annotation: "如果分组字段来自不同的表,数据库优化器将没有办法利用索引的有序性来避免一次排序,如果存在等值条件,可以替换这些字段为来自同一张表的字段,以利用索引优化排序和提高查询效率。", + Desc: plocale.OptDMLRuleGroupingFromDiffTablesRewriteDesc, + Annotation: plocale.OptDMLRuleGroupingFromDiffTablesRewriteAnnotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleGroupingFromDiffTablesRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleJoinEliminationRewrite, - Desc: "表连接消除", - Annotation: "在不影响结果的情况下通过删除不必要的表连接来简化查询并提升性能,适用于查询仅涉及到主表主键列的场景。", + Desc: plocale.OptDMLRuleJoinEliminationRewriteDesc, + Annotation: plocale.OptDMLRuleJoinEliminationRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleJoinEliminationRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleLimitClausePushDownRewrite, - Desc: "LIMIT下推至UNION分支", - Annotation: "Limit子句下推优化通过尽可能的 “下压” Limit子句,提前过滤掉部分数据, 减少中间结果集的大小,减少后续计算需要处理的数据量, 以提高查询性能。", + Desc: plocale.OptDMLRuleLimitClausePushDownRewriteDesc, + Annotation: plocale.OptDMLRuleLimitClausePushDownRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, - Params: params.Params{ - ¶ms.Param{ + Category: plocale.RuleTypeDMLConvention, + Params: []*rulepkg.SourceParam{ + { Key: rulepkg.DefaultSingleParamKeyName, Value: "1000", - Desc: "OFFSET最大阈值", + Desc: plocale.OptDMLRuleLimitClausePushDownRewriteParams1, Type: params.ParamTypeInt, }, }, @@ -229,395 +250,395 @@ var MySQLOptimizationRuleHandler = []OptimizationRuleHandler{ RuleCode: RuleLimitClausePushDownRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleMaxMinAggRewrite, - Desc: "MAX/MIN子查询重写", - Annotation: "对于使用MAX/MIN的子查询,可以通过重写从而利用索引的有序来避免一次聚集运算。", + Desc: plocale.OptDMLRuleMaxMinAggRewriteDesc, + Annotation: plocale.OptDMLRuleMaxMinAggRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleMaxMinAggRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleMoveOrder2LeadingRewrite, - Desc: "ORDER子句重排序优化", - Annotation: "如果一个查询中既包含来自同一个表的排序字段也包含分组字段,但字段顺序不同,可以通过调整分组字段顺序,使其和排序字段顺序一致,这样数据库可以避免一次排序操作。", + Desc: plocale.OptDMLRuleMoveOrder2LeadingRewriteDesc, + Annotation: plocale.OptDMLRuleMoveOrder2LeadingRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleMoveOrder2LeadingRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleOrCond4SelectRewrite, - Desc: "OR条件的SELECT重写", - Annotation: "如果使用OR条件的查询语句,数据库优化器有可能无法使用索引来完成查询,可以把查询语句重写为UNION或UNION ALL查询,以便使用索引提升查询性能。", + Desc: plocale.OptDMLRuleOrCond4SelectRewriteDesc, + Annotation: plocale.OptDMLRuleOrCond4SelectRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOrCond4SelectRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleOrCond4UpDeleteRewrite, - Desc: "OR条件的UPDELETE重写", - Annotation: "如果有使用OR条件的UPDATE或DELETE语句,数据库优化器有可能无法使用索引来完成操作,可以把它重写为多个DELETE语句,利用索引提升查询性能。", + Desc: plocale.OptDMLRuleOrCond4UpDeleteRewriteDesc, + Annotation: plocale.OptDMLRuleOrCond4UpDeleteRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOrCond4UpDeleteRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleOrderEliminationInSubqueryRewrite, - Desc: "IN子查询中没有LIMIT的排序消除", - Annotation: "如果子查询没有LIMIT子句,那么子查询的排序操作就没有意义,可以将其删除而不影响最终的结果。", + Desc: plocale.OptDMLRuleOrderEliminationInSubqueryRewriteDesc, + Annotation: plocale.OptDMLRuleOrderEliminationInSubqueryRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOrderEliminationInSubqueryRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleOrderingFromDiffTablesRewrite, - Desc: "避免ORDERBY字段来自不同表", - Annotation: "当排序字段来自不同表时,若存在等值条件,可替换这些字段为来自同一张表的字段,利用索引避免额外排序,提升效率。", + Desc: plocale.OptDMLRuleOrderingFromDiffTablesRewriteDesc, + Annotation: plocale.OptDMLRuleOrderingFromDiffTablesRewriteAnnotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOrderingFromDiffTablesRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleOuter2InnerConversionRewrite, - Desc: "外连接优化", - Annotation: "外连接优化指的是满足一定条件(外表具有NULL拒绝条件)的外连接可以转化为内连接,从而可以让数据库优化器可以选择更优的执行计划,提升SQL查询的性能。", + Desc: plocale.OptDMLRuleOuter2InnerConversionRewriteDesc, + Annotation: plocale.OptDMLRuleOuter2InnerConversionRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOuter2InnerConversionRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleProjectionPushdownRewrite, - Desc: "投影下推(PROJECTION PUSHDOWN)", - Annotation: "投影下推指的通过删除DT子查询中无意义的列(在外查询中没有使用),来减少IO和网络的代价,同时提升优化器在进行表访问的规划时,采用无需回表的优化选项的几率。", + Desc: plocale.OptDMLRuleProjectionPushdownRewriteDesc, + Annotation: plocale.OptDMLRuleProjectionPushdownRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleProjectionPushdownRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleQualifierSubQueryRewrite, - Desc: "修饰子查询重写优化", - Annotation: "ANY/SOME/ALL修饰的子查询用于比较值关系,但效率低下因为它们逐行处理比较。通过查询重写可以提升这类子查询的执行效率。", + Desc: plocale.OptDMLRuleQualifierSubQueryRewriteDesc, + Annotation: plocale.OptDMLRuleQualifierSubQueryRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleQualifierSubQueryRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleQueryFoldingRewrite, - Desc: "查询折叠(QUERY FOLDING)", - Annotation: "查询折叠指的是把视图、CTE或是DT子查询展开,并与引用它的查询语句合并,来减少序列化中间结果集,或是触发更优的关于表连接规划的优化技术。", + Desc: plocale.OptDMLRuleQueryFoldingRewriteDesc, + Annotation: plocale.OptDMLRuleQueryFoldingRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleQueryFoldingRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: DMLRuleSATTCRewrite, - Desc: "SATTC重写优化", - Annotation: "SAT-TC重写优化通过分析和处理查询条件的逻辑关系,以发现矛盾、简化条件或推断新条件,从而帮助数据库优化器制定更高效的执行计划,提升SQL性能。", + Desc: plocale.OptDMLRuleSATTCRewriteDesc, + Annotation: plocale.OptDMLRuleSATTCRewriteAnnotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleSATTCRewrite, }, } -var OracleOptimizationRuleHandler = []OptimizationRuleHandler{ +var oracleOptimizationRuleHandlerSource = []SourceOptimizationRuleHandler{ { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_500", - Desc: "NPE重写", - Annotation: "SQL的NPE(Null Pointer Exception)问题是指在SQL查询中,当聚合列全为NULL时,SUM、AVG等聚合函数会返回NULL,这可能会导致后续的程序出现空指针异常。", + Desc: plocale.OptOracle500Desc, + Annotation: plocale.OptOracle500Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleNPERewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_501", - Desc: "ALL修饰的子查询重写", - Annotation: "如果ALL子查询的结果中存在NULL,这个SQL永远返回为空。正确的写法应该是在子查询里加上非空限制,或使用max/min的写法。", + Desc: plocale.OptOracle501Desc, + Annotation: plocale.OptOracle501Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleAllQualifierSubQueryRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_502", - Desc: "COUNT标量子查询重写", - Annotation: "对于使用COUNT标量子查询来进行判断是否存在,可以重写为EXISTS子查询,从而避免一次聚集运算。", + Desc: plocale.OptOracle502Desc, + Annotation: plocale.OptOracle502Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleCntGtThanZeroRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_503", - Desc: "无条件的DELETE建议重写为Truncate", - Annotation: "TRUNCATE TABLE 比 DELETE 速度快,且使用的系统和事务日志资源少,同时TRUNCATE后表所占用的空间会被释放,而DELETE后需要手工执行OPTIMIZE才能释放表空间", + Desc: plocale.OptOracle503Desc, + Annotation: plocale.OptOracle503Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleDelete2TruncateRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_504", - Desc: "隐式类型转换导致索引失效", - Annotation: "WHERE条件中使用与过滤字段不一致的数据类型会引发隐式数据类型转换,导致查询有无法命中索引的风险,在高并发、大数据量的情况下,不走索引会使得数据库的查询性能严重下降", + Desc: plocale.OptOracle504Desc, + Annotation: plocale.OptOracle504Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleDiffDataTypeInPredicateWrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_505", - Desc: "排序字段方向不同导致索引失效", - Annotation: "ORDER BY 子句中的所有表达式需要按统一的 ASC 或 DESC 方向排序,才能利用索引来避免排序;如果ORDER BY 语句对多个不同条件使用不同方向的排序无法使用索引", + Desc: plocale.OptOracle505Desc, + Annotation: plocale.OptOracle505Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDDLConvention, + Category: plocale.RuleTypeDDLConvention, }, RuleCode: RuleDiffOrderingSpecTypeWarning, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_506", - Desc: "索引列上的运算导致索引失效", - Annotation: "在索引列上的运算将导致索引失效,容易造成全表扫描,产生严重的性能问题。所以需要尽量将索引列上的运算转换到常量端进行。", + Desc: plocale.OptOracle506Desc, + Annotation: plocale.OptOracle506Annotation, Level: driverV2.RuleLevelError, - Category: rulepkg.RuleTypeIndexInvalidation, + Category: plocale.RuleTypeIndexInvalidation, }, RuleCode: RuleFuncWithColumnInPredicate, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_507", - Desc: "HAVING条件下推", - Annotation: "从逻辑上,HAVING条件是在分组之后执行的,而WHERE子句上的条件可以在表访问的时候(索引访问),或是表访问之后、分组之前执行,这两种条件都比在分组之后执行代价要小。", + Desc: plocale.OptOracle507Desc, + Annotation: plocale.OptOracle507Annotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleHavingCond2WhereCondRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_508", - Desc: "禁止使用=NULL判断空值", - Annotation: "= null并不能判断表达式为空,= null总是被判断为假。判断表达式为空应该使用is null。", + Desc: plocale.OptOracle508Desc, + Annotation: plocale.OptOracle508Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleUseEqual4NullRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_509", - Desc: "IN子查询优化", - Annotation: "IN子查询是指符合下面形式的子查询,IN子查询可以改写成等价的相关EXISTS子查询或是内连接,从而可以产生一个新的过滤条件。", + Desc: plocale.OptOracle509Desc, + Annotation: plocale.OptOracle509Annotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleInSubqueryRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_510", - Desc: "IN可空子查询可能导致结果集不符合预期", - Annotation: "查询条件永远非真,这将导致查询无匹配到的结果", + Desc: plocale.OptOracle510Desc, + Annotation: plocale.OptOracle510Annotation, Level: driverV2.RuleLevelError, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleNotInNullableSubQueryRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_511", - Desc: "避免使用没有通配符的 LIKE 查询", - Annotation: "不包含通配符的LIKE 查询逻辑上与等值查询相同,建议使用等值查询替代。而且不包含通配符的LIKE 查询逻辑通常是由于开发者错误导致的,可能不符合其期望的业务逻辑实现", + Desc: plocale.OptOracle511Desc, + Annotation: plocale.OptOracle511Annotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleNoWildcardInPredicateLikeWarning, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_512", - Desc: "建议使用'<>'代替'!='", - Annotation: "'!=' 是非标准的运算符,'<>' 才是SQL中标准的不等于运算符", + Desc: plocale.OptOracle512Desc, + Annotation: plocale.OptOracle512Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleUseNonstandardNotEqualOperator, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_513", - Desc: "子查询中的DISTINCT消除", - Annotation: "对于仅进行存在性测试的子查询,如果子查询包含DISTINCT通常可以删除,以避免一次去重操作。", + Desc: plocale.OptOracle513Desc, + Annotation: plocale.OptOracle513Annotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleDistinctEliminationRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_514", - Desc: "EXISTS查询转换为表连接", - Annotation: "EXISTS子查询可以在适当情况下转换为JOIN来优化查询,提高数据库处理效率和性能。", + Desc: plocale.OptOracle514Desc, + Annotation: plocale.OptOracle514Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleExists2JoinRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_515", - Desc: "过滤谓词下推", - Annotation: "滤条件下推(FPPD)是一种通过将过滤条件提前应用于内部查询块,以减少数据处理量并提升SQL执行效率。", + Desc: plocale.OptOracle515Desc, + Annotation: plocale.OptOracle515Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleFilterPredicatePushDownRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_516", - Desc: "GROUPBY字段来自不同表", - Annotation: "如果分组字段来自不同的表,数据库优化器将没有办法利用索引的有序性来避免一次排序,如果存在等值条件,可以替换这些字段为来自同一张表的字段,以利用索引优化排序和提高查询效率。", + Desc: plocale.OptOracle516Desc, + Annotation: plocale.OptOracle516Annotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleGroupingFromDiffTablesRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_517", - Desc: "表连接消除", - Annotation: "在不影响结果的情况下通过删除不必要的表连接来简化查询并提升性能,适用于查询仅涉及到主表主键列的场景。", + Desc: plocale.OptOracle517Desc, + Annotation: plocale.OptOracle517Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleJoinEliminationRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_518", - Desc: "MAX/MIN子查询重写", - Annotation: "对于使用MAX/MIN的子查询,可以通过重写从而利用索引的有序来避免一次聚集运算。", + Desc: plocale.OptOracle518Desc, + Annotation: plocale.OptOracle518Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleMaxMinAggRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_519", - Desc: "ORDER子句重排序优化", - Annotation: "如果一个查询中既包含来自同一个表的排序字段也包含分组字段,但字段顺序不同,可以通过调整分组字段顺序,使其和排序字段顺序一致,这样数据库可以避免一次排序操作。", + Desc: plocale.OptOracle519Desc, + Annotation: plocale.OptOracle519Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleMoveOrder2LeadingRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_520", - Desc: "OR条件的SELECT重写", - Annotation: "如果使用OR条件的查询语句,数据库优化器有可能无法使用索引来完成查询,可以把查询语句重写为UNION或UNION ALL查询,以便使用索引提升查询性能。", + Desc: plocale.OptOracle520Desc, + Annotation: plocale.OptOracle520Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOrCond4SelectRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_521", - Desc: "OR条件的UPDELETE重写", - Annotation: "如果有使用OR条件的UPDATE或DELETE语句,数据库优化器有可能无法使用索引来完成操作,可以把它重写为多个DELETE语句,利用索引提升查询性能。", + Desc: plocale.OptOracle521Desc, + Annotation: plocale.OptOracle521Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOrCond4UpDeleteRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_522", - Desc: "避免ORDERBY字段来自不同表", - Annotation: "当排序字段来自不同表时,若存在等值条件,可替换这些字段为来自同一张表的字段,利用索引避免额外排序,提升效率。", + Desc: plocale.OptOracle522Desc, + Annotation: plocale.OptOracle522Annotation, Level: driverV2.RuleLevelWarn, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOrderingFromDiffTablesRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_523", - Desc: "外连接优化", - Annotation: "外连接优化指的是满足一定条件(外表具有NULL拒绝条件)的外连接可以转化为内连接,从而可以让数据库优化器可以选择更优的执行计划,提升SQL查询的性能。", + Desc: plocale.OptOracle523Desc, + Annotation: plocale.OptOracle523Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleOuter2InnerConversionRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_524", - Desc: "投影下推(PROJECTION PUSHDOWN)", - Annotation: "投影下推指的通过删除DT子查询中无意义的列(在外查询中没有使用),来减少IO和网络的代价,同时提升优化器在进行表访问的规划时,采用无需回表的优化选项的几率。", + Desc: plocale.OptOracle524Desc, + Annotation: plocale.OptOracle524Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleProjectionPushdownRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_525", - Desc: "修饰子查询重写优化", - Annotation: "ANY/SOME/ALL修饰的子查询用于比较值关系,但效率低下因为它们逐行处理比较。通过查询重写可以提升这类子查询的执行效率。", + Desc: plocale.OptOracle525Desc, + Annotation: plocale.OptOracle525Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleQualifierSubQueryRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_526", - Desc: "查询折叠(QUERY FOLDING)", - Annotation: "查询折叠指的是把视图、CTE或是DT子查询展开,并与引用它的查询语句合并,来减少序列化中间结果集,或是触发更优的关于表连接规划的优化技术。", + Desc: plocale.OptOracle526Desc, + Annotation: plocale.OptOracle526Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleQueryFoldingRewrite, }, { - Rule: driverV2.Rule{ + Rule: rulepkg.SourceRule{ Name: "Oracle_527", - Desc: "SATTC重写优化", - Annotation: "SAT-TC重写优化通过分析和处理查询条件的逻辑关系,以发现矛盾、简化条件或推断新条件,从而帮助数据库优化器制定更高效的执行计划,提升SQL性能。", + Desc: plocale.OptOracle527Desc, + Annotation: plocale.OptOracle527Annotation, Level: driverV2.RuleLevelNotice, - Category: rulepkg.RuleTypeDMLConvention, + Category: plocale.RuleTypeDMLConvention, }, RuleCode: RuleSATTCRewrite, }, diff --git a/sqle/server/pipeline/pipeline.go b/sqle/server/pipeline/pipeline.go index a7bb43346b..0e0ac4f1f3 100644 --- a/sqle/server/pipeline/pipeline.go +++ b/sqle/server/pipeline/pipeline.go @@ -9,6 +9,7 @@ import ( dmsCommonJwt "github.com/actiontech/dms/pkg/dms-common/api/jwt" "github.com/actiontech/sqle/sqle/api/controller" + scannerCmd "github.com/actiontech/sqle/sqle/cmd/scannerd/command" "github.com/actiontech/sqle/sqle/dms" "github.com/actiontech/sqle/sqle/model" "github.com/aliyun/credentials-go/credentials/utils" @@ -30,40 +31,56 @@ func (pipe Pipeline) NodeCount() uint32 { return uint32(len(pipe.PipelineNodes)) } -func (node PipelineNode) IntegrationInfo() string { +func (node PipelineNode) IntegrationInfo() (string, error) { dmsAddr := controller.GetDMSServerAddress() parsedURL, err := url.Parse(dmsAddr) if err != nil { - return "" + return "", err } ip, port, err := net.SplitHostPort(parsedURL.Host) if err != nil { - return "" + return "", err + } + if node.InstanceID != 0 { + instance, _, err := dms.GetInstancesById(context.TODO(), fmt.Sprint(node.InstanceID)) + if err != nil { + return "", err + } + node.InstanceName = instance.Name } switch model.PipelineNodeType(node.NodeType) { case model.NodeTypeAudit: var cmdUsage = "#使用方法#\n1. 确保运行该命令的用户具有scannerd的执行权限。\n2. 在scannerd文件所在目录执行启动命令。\n#启动命令#\n" - baseCmd := "./scannerd %s --host=\"%s\" --port=\"%s\" --dir=\"%s\" --token=\"%s\"" - var extraArgs string + + var cmd string var cmdType string if model.ObjectType(node.ObjectType) == model.ObjectTypeSQL { - cmdType = "sql_file" + cmdType = scannerCmd.TypeSQLFile } if model.ObjectType(node.ObjectType) == model.ObjectTypeMyBatis { - cmdType = "mysql_mybatis" + cmdType = scannerCmd.TypeMySQLMybatis } - if model.AuditMethod(node.AuditMethod) == model.AuditMethodOnline { - extraArgs = fmt.Sprintf(" --instance-name=\"%s\"", node.InstanceName) + sqlfile, err := scannerCmd.GetScannerdCmd(cmdType) + if err != nil { + return "", err } - if model.AuditMethod(node.AuditMethod) == model.AuditMethodOffline { - extraArgs = fmt.Sprintf(" --db-type=\"%s\"", node.InstanceType) + cmd, err = sqlfile.GenCommand("./scannerd", map[string]string{ + scannerCmd.FlagHost: ip, + scannerCmd.FlagPort: port, + scannerCmd.FlagToken: node.Token, + scannerCmd.FlagDirectory: node.ObjectPath, + scannerCmd.FlagDbType: node.InstanceType, + scannerCmd.FlagInstanceName: node.InstanceName, + }) + if err != nil { + return "", err } - return fmt.Sprintf(cmdUsage+baseCmd+extraArgs, cmdType, ip, port, node.ObjectPath, node.Token) + return cmdUsage + cmd, nil case model.NodeTypeRelease: - return "" + return "", fmt.Errorf("unsupport node type release") default: - return "" + return "", fmt.Errorf("unsupport node type unknown") } } @@ -73,6 +90,7 @@ type PipelineNode struct { Name string // 节点名称,必填,支持中文、英文+数字+特殊字符 NodeType string // 节点类型,必填,选项为“审核”或“上线” InstanceName string // 数据源名称,在线审核时必填 + InstanceID uint64 // 数据源ID InstanceType string // 数据源类型,在线审核时必填 ObjectPath string // 审核脚本路径,必填,用户填写文件路径 ObjectType string // 审核对象类型,必填,可选项为SQL文件、MyBatis文件 @@ -111,6 +129,7 @@ func (svc PipelineSvc) CheckInstance(ctx context.Context, pipe *Pipeline) (err e return fmt.Errorf("instance does not exist") } node.InstanceType = instance.DbType + node.InstanceID = instance.ID } } return nil @@ -157,7 +176,7 @@ func (svc PipelineSvc) toModelPipelineNodes(pipe *Pipeline, userId string) []*mo PipelineID: pipe.ID, // 需要将 Pipeline 的 ID 关联到 Node 上 Name: node.Name, NodeType: node.NodeType, - InstanceName: node.InstanceName, + InstanceID: node.InstanceID, InstanceType: node.InstanceType, ObjectPath: node.ObjectPath, ObjectType: node.ObjectType, @@ -252,7 +271,7 @@ func (svc PipelineSvc) toPipelineNode(modelPipelineNode *model.PipelineNode) *Pi ID: modelPipelineNode.ID, Name: modelPipelineNode.Name, NodeType: modelPipelineNode.NodeType, - InstanceName: modelPipelineNode.InstanceName, + InstanceID: modelPipelineNode.InstanceID, InstanceType: modelPipelineNode.InstanceType, ObjectPath: modelPipelineNode.ObjectPath, ObjectType: modelPipelineNode.ObjectType, @@ -280,7 +299,7 @@ func (svc PipelineSvc) needUpdateToken(oldNode *model.PipelineNode, newNode *Pip newNode.ObjectPath != oldNode.ObjectPath || newNode.ObjectType != oldNode.ObjectType || newNode.AuditMethod != oldNode.AuditMethod || - newNode.InstanceName != oldNode.InstanceName || + newNode.InstanceID != oldNode.InstanceID || newNode.InstanceType != oldNode.InstanceType } @@ -386,7 +405,7 @@ func (svc PipelineSvc) UpdatePipeline(pipe *Pipeline, userId string) error { PipelineID: pipe.ID, Name: newNode.Name, NodeType: newNode.NodeType, - InstanceName: newNode.InstanceName, + InstanceID: newNode.InstanceID, InstanceType: newNode.InstanceType, ObjectPath: newNode.ObjectPath, ObjectType: newNode.ObjectType, @@ -414,7 +433,7 @@ func (svc PipelineSvc) DeletePipeline(projectUID string, pipelineID uint) error } // 删除 pipeline - if err := txDB.Model(&model.Pipeline{}).Where("id = ?", pipelineID).Delete(&model.Pipeline{}).Error; err != nil { + if err := txDB.Model(&model.Pipeline{}).Where("project_uid = ? AND id = ?", projectUID, pipelineID).Delete(&model.Pipeline{}).Error; err != nil { return fmt.Errorf("failed to delete pipeline: %w", err) } diff --git a/sqle/server/sqled_test.go b/sqle/server/sqled_test.go index 4042006b85..2e747db805 100644 --- a/sqle/server/sqled_test.go +++ b/sqle/server/sqled_test.go @@ -85,8 +85,8 @@ func (d *mockDriver) Audit(ctx context.Context, sqls []string) ([]*driverV2.Audi return nil, nil } -func (d *mockDriver) GenRollbackSQL(ctx context.Context, sql string) (string, string, error) { - return "", "", nil +func (d *mockDriver) GenRollbackSQL(ctx context.Context, sql string) (string, driverV2.I18nStr, error) { + return "", nil, nil } func (d *mockDriver) Explain(ctx context.Context, conf *driverV2.ExplainConf) (*driverV2.ExplainResult, error) { diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 0000000000..fe79e3adda --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +/toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000000..3651cfa960 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,120 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; add it to your go.mod with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Examples +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which can be decoded with: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time +} + +var conf Config +_, err := toml.Decode(tomlData, &conf) +``` + +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: + +```toml +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] +``` + +Can be decoded with: + +```go +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address +} + +func (a *address) UnmarshalText(text []byte) error { + var err error + a.Address, err = mail.ParseAddress(string(text)) + return err +} + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000000..4d38f3bfce --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,602 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) + return err +} + +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. +// +// # Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + if v, ok := rvi.(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return md.e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return md.e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + + err := md.unify(v, indirect(rvval)) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + + if num, ok := data.(float64); ok { + switch rvk { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) + } + rv.SetInt(int64(dur)) + return nil + } + } + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +func (md *MetaData) badtype(dst string, data interface{}) error { + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { + return true + } + return false +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 0000000000..086d0b6866 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 0000000000..b9e309717e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,29 @@ +package toml + +import ( + "encoding" + "io" +) + +// TextMarshaler is an alias for encoding.TextMarshaler. +// +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000000..81a7c0fe9f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,11 @@ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// There is also support for delaying decoding with the Primitive type, and +// querying the set of keys in a TOML document with the MetaData type. +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000000..9cd25d7571 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,759 @@ +package toml + +import ( + "bufio" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // String to use for a single indentation level; default is two spaces. + Indent string + + w *bufio.Writer + hasWritten bool // written any output to w yet? +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) + } + + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := eindirect(rv.Index(i)) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := eindirect(rv.Index(i)) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if isTableArray(rv) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + default: + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false + } + + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { + encPanic(errArrayNilElement) + } + + if ret && !typeEqual(tomlHash, tt) { + ret = false + } + } + return ret +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !isEmpty(rv.Field(i)) { + return false + } + } + return true + case reflect.Bool: + return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. + if len(key) == 0 { + enc.eElement(val) + return + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +// Resolve any level of pointers to the actual value (e.g. **string → string). +func eindirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 0000000000..efd68865bb --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,279 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithPosition returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 0000000000..022f15bc2b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000000..3545a6ad66 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1283 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string, tomlNext bool) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r, lx.tomlNext) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + if isNL(lx.next()) { /// \ escaping newline. + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected two hexadecimal digits after '\x', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 0000000000..2e78b24e95 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,121 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo + mapping map[string]interface{} + keys []Key + decoded map[string]struct{} + data []byte // Input file; for errors. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a [Primitive] value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c, false) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000000..9c19153698 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,811 @@ +package toml + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + tomlNext bool + + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]interface{} // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType +} + +func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 + data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]interface{}), + lx: lex(data, tomlNext), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + tomlNext: tomlNext, + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash, item.pos) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location + next bool +}{ + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + _ = types + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ, it.pos) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { + p.setValue(key, val) + p.setType(key, typ, pos) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType, pos Position) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. +func (p *parser) stripEscapedNewlines(s string) string { + var b strings.Builder + var i int + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() + } + i += ix + + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue + } + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } + } + if j == i+1 { + // Not a whitespace escape. + i++ + continue + } + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. + // (It's a bad escape sequence, but we can let + // replaceEscapes catch it.) + i++ + continue + } + b.WriteString(s[:i]) + s = s[j:] + i = 0 + } +} + +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case 'e': + if p.tomlNext { + replaced = append(replaced, rune(0x001B)) + r += 1 + } + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) + replaced = append(replaced, escaped) + r += 3 + } + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000000..254ca82e54 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 0000000000..4e90d77373 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore similarity index 88% rename from vendor/sigs.k8s.io/yaml/.gitignore rename to vendor/github.com/ghodss/yaml/.gitignore index 2dc92904ef..e256a31e00 100644 --- a/vendor/sigs.k8s.io/yaml/.gitignore +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -6,10 +6,6 @@ .project .settings/** -# Idea files -.idea/** -.idea/ - # Emacs save files *~ diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/LICENSE b/vendor/github.com/nicksnyder/go-i18n/v2/LICENSE new file mode 100644 index 0000000000..609cce7976 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Nick Snyder https://github.com/nicksnyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/bundle.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/bundle.go new file mode 100644 index 0000000000..dfe4d7a5a3 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/bundle.go @@ -0,0 +1,144 @@ +package i18n + +import ( + "fmt" + "os" + + "github.com/nicksnyder/go-i18n/v2/internal/plural" + + "golang.org/x/text/language" +) + +// UnmarshalFunc unmarshals data into v. +type UnmarshalFunc func(data []byte, v interface{}) error + +// Bundle stores a set of messages and pluralization rules. +// Most applications only need a single bundle +// that is initialized early in the application's lifecycle. +// It is not goroutine safe to modify the bundle while Localizers +// are reading from it. +type Bundle struct { + defaultLanguage language.Tag + unmarshalFuncs map[string]UnmarshalFunc + messageTemplates map[language.Tag]map[string]*MessageTemplate + pluralRules plural.Rules + tags []language.Tag + matcher language.Matcher +} + +// artTag is the language tag used for artificial languages +// https://en.wikipedia.org/wiki/Codes_for_constructed_languages +var artTag = language.MustParse("art") + +// NewBundle returns a bundle with a default language and a default set of plural rules. +func NewBundle(defaultLanguage language.Tag) *Bundle { + b := &Bundle{ + defaultLanguage: defaultLanguage, + pluralRules: plural.DefaultRules(), + } + b.pluralRules[artTag] = b.pluralRules.Rule(language.English) + b.addTag(defaultLanguage) + return b +} + +// RegisterUnmarshalFunc registers an UnmarshalFunc for format. +func (b *Bundle) RegisterUnmarshalFunc(format string, unmarshalFunc UnmarshalFunc) { + if b.unmarshalFuncs == nil { + b.unmarshalFuncs = make(map[string]UnmarshalFunc) + } + b.unmarshalFuncs[format] = unmarshalFunc +} + +// LoadMessageFile loads the bytes from path +// and then calls ParseMessageFileBytes. +func (b *Bundle) LoadMessageFile(path string) (*MessageFile, error) { + buf, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return b.ParseMessageFileBytes(buf, path) +} + +// MustLoadMessageFile is similar to LoadMessageFile +// except it panics if an error happens. +func (b *Bundle) MustLoadMessageFile(path string) { + if _, err := b.LoadMessageFile(path); err != nil { + panic(err) + } +} + +// ParseMessageFileBytes parses the bytes in buf to add translations to the bundle. +// +// The format of the file is everything after the last ".". +// +// The language tag of the file is everything after the second to last "." or after the last path separator, but before the format. +func (b *Bundle) ParseMessageFileBytes(buf []byte, path string) (*MessageFile, error) { + messageFile, err := ParseMessageFileBytes(buf, path, b.unmarshalFuncs) + if err != nil { + return nil, err + } + if err := b.AddMessages(messageFile.Tag, messageFile.Messages...); err != nil { + return nil, err + } + return messageFile, nil +} + +// MustParseMessageFileBytes is similar to ParseMessageFileBytes +// except it panics if an error happens. +func (b *Bundle) MustParseMessageFileBytes(buf []byte, path string) { + if _, err := b.ParseMessageFileBytes(buf, path); err != nil { + panic(err) + } +} + +// AddMessages adds messages for a language. +// It is useful if your messages are in a format not supported by ParseMessageFileBytes. +func (b *Bundle) AddMessages(tag language.Tag, messages ...*Message) error { + pluralRule := b.pluralRules.Rule(tag) + if pluralRule == nil { + return fmt.Errorf("no plural rule registered for %s", tag) + } + if b.messageTemplates == nil { + b.messageTemplates = map[language.Tag]map[string]*MessageTemplate{} + } + if b.messageTemplates[tag] == nil { + b.messageTemplates[tag] = map[string]*MessageTemplate{} + b.addTag(tag) + } + for _, m := range messages { + b.messageTemplates[tag][m.ID] = NewMessageTemplate(m) + } + return nil +} + +// MustAddMessages is similar to AddMessages except it panics if an error happens. +func (b *Bundle) MustAddMessages(tag language.Tag, messages ...*Message) { + if err := b.AddMessages(tag, messages...); err != nil { + panic(err) + } +} + +func (b *Bundle) addTag(tag language.Tag) { + for _, t := range b.tags { + if t == tag { + // Tag already exists + return + } + } + b.tags = append(b.tags, tag) + b.matcher = language.NewMatcher(b.tags) +} + +// LanguageTags returns the list of language tags +// of all the translations loaded into the bundle +func (b *Bundle) LanguageTags() []language.Tag { + return b.tags +} + +func (b *Bundle) getMessageTemplate(tag language.Tag, id string) *MessageTemplate { + templates := b.messageTemplates[tag] + if templates == nil { + return nil + } + return templates[id] +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/bundlefs.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/bundlefs.go new file mode 100644 index 0000000000..69c9dd2f63 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/bundlefs.go @@ -0,0 +1,16 @@ +package i18n + +import ( + "io/fs" +) + +// LoadMessageFileFS is like LoadMessageFile but instead of reading from the +// hosts operating system's file system it reads from the fs file system. +func (b *Bundle) LoadMessageFileFS(fsys fs.FS, path string) (*MessageFile, error) { + buf, err := fs.ReadFile(fsys, path) + if err != nil { + return nil, err + } + + return b.ParseMessageFileBytes(buf, path) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/doc.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/doc.go new file mode 100644 index 0000000000..709b59a5d8 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/doc.go @@ -0,0 +1,28 @@ +// Package i18n provides support for looking up messages +// according to a set of locale preferences. +// +// Create a Bundle to use for the lifetime of your application. +// +// bundle := i18n.NewBundle(language.English) +// +// Load translations into your bundle during initialization. +// +// bundle.LoadMessageFile("en-US.yaml") +// +// Create a Localizer to use for a set of language preferences. +// +// func(w http.ResponseWriter, r *http.Request) { +// lang := r.FormValue("lang") +// accept := r.Header.Get("Accept-Language") +// localizer := i18n.NewLocalizer(bundle, lang, accept) +// } +// +// Use the Localizer to lookup messages. +// +// localizer.MustLocalize(&i18n.LocalizeConfig{ +// DefaultMessage: &i18n.Message{ +// ID: "HelloWorld", +// Other: "Hello World!", +// }, +// }) +package i18n diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/localizer.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/localizer.go new file mode 100644 index 0000000000..b13160fe54 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/localizer.go @@ -0,0 +1,238 @@ +package i18n + +import ( + "fmt" + texttemplate "text/template" + + "github.com/nicksnyder/go-i18n/v2/i18n/template" + "github.com/nicksnyder/go-i18n/v2/internal/plural" + "golang.org/x/text/language" +) + +// Localizer provides Localize and MustLocalize methods that return localized messages. +// Localize and MustLocalize methods use a language.Tag matching algorithm based +// on the best possible value. This algorithm may cause an unexpected language.Tag returned +// value depending on the order of the tags stored in memory. For example, if the bundle +// used to create a Localizer instance ingested locales following this order +// ["en-US", "en-GB", "en-IE", "en"] and the locale "en" is asked, the underlying matching +// algorithm will return "en-US" thinking it is the best match possible. More information +// about the algorithm in this Github issue: https://github.com/golang/go/issues/49176. +// There is additionnal informations inside the Go code base: +// https://github.com/golang/text/blob/master/language/match.go#L142 +type Localizer struct { + // bundle contains the messages that can be returned by the Localizer. + bundle *Bundle + + // tags is the list of language tags that the Localizer checks + // in order when localizing a message. + tags []language.Tag +} + +// NewLocalizer returns a new Localizer that looks up messages +// in the bundle according to the language preferences in langs. +// It can parse Accept-Language headers as defined in http://www.ietf.org/rfc/rfc2616.txt. +func NewLocalizer(bundle *Bundle, langs ...string) *Localizer { + return &Localizer{ + bundle: bundle, + tags: parseTags(langs), + } +} + +func parseTags(langs []string) []language.Tag { + tags := []language.Tag{} + for _, lang := range langs { + t, _, err := language.ParseAcceptLanguage(lang) + if err != nil { + continue + } + tags = append(tags, t...) + } + return tags +} + +// LocalizeConfig configures a call to the Localize method on Localizer. +type LocalizeConfig struct { + // MessageID is the id of the message to lookup. + // This field is ignored if DefaultMessage is set. + MessageID string + + // TemplateData is the data passed when executing the message's template. + // If TemplateData is nil and PluralCount is not nil, then the message template + // will be executed with data that contains the plural count. + TemplateData interface{} + + // PluralCount determines which plural form of the message is used. + PluralCount interface{} + + // DefaultMessage is used if the message is not found in any message files. + DefaultMessage *Message + + // Funcs is used to configure a template.TextParser if TemplateParser is not set. + Funcs texttemplate.FuncMap + + // The TemplateParser to use for parsing templates. + // If one is not set, a template.TextParser is used (configured with Funcs if it is set). + TemplateParser template.Parser +} + +var defaultTextParser = &template.TextParser{} + +func (lc *LocalizeConfig) getTemplateParser() template.Parser { + if lc.TemplateParser != nil { + return lc.TemplateParser + } + if lc.Funcs != nil { + return &template.TextParser{ + Funcs: lc.Funcs, + } + } + return defaultTextParser +} + +type invalidPluralCountErr struct { + messageID string + pluralCount interface{} + err error +} + +func (e *invalidPluralCountErr) Error() string { + return fmt.Sprintf("invalid plural count %#v for message id %q: %s", e.pluralCount, e.messageID, e.err) +} + +// MessageNotFoundErr is returned from Localize when a message could not be found. +type MessageNotFoundErr struct { + Tag language.Tag + MessageID string +} + +func (e *MessageNotFoundErr) Error() string { + return fmt.Sprintf("message %q not found in language %q", e.MessageID, e.Tag) +} + +type messageIDMismatchErr struct { + messageID string + defaultMessageID string +} + +func (e *messageIDMismatchErr) Error() string { + return fmt.Sprintf("message id %q does not match default message id %q", e.messageID, e.defaultMessageID) +} + +// Localize returns a localized message. +func (l *Localizer) Localize(lc *LocalizeConfig) (string, error) { + msg, _, err := l.LocalizeWithTag(lc) + return msg, err +} + +// Localize returns a localized message. +func (l *Localizer) LocalizeMessage(msg *Message) (string, error) { + return l.Localize(&LocalizeConfig{ + DefaultMessage: msg, + }) +} + +// TODO: uncomment this (and the test) when extract has been updated to extract these call sites too. +// Localize returns a localized message. +// func (l *Localizer) LocalizeMessageID(messageID string) (string, error) { +// return l.Localize(&LocalizeConfig{ +// MessageID: messageID, +// }) +// } + +// LocalizeWithTag returns a localized message and the language tag. +// It may return a best effort localized message even if an error happens. +func (l *Localizer) LocalizeWithTag(lc *LocalizeConfig) (string, language.Tag, error) { + messageID := lc.MessageID + if lc.DefaultMessage != nil { + if messageID != "" && messageID != lc.DefaultMessage.ID { + return "", language.Und, &messageIDMismatchErr{messageID: messageID, defaultMessageID: lc.DefaultMessage.ID} + } + messageID = lc.DefaultMessage.ID + } + + var operands *plural.Operands + templateData := lc.TemplateData + if lc.PluralCount != nil { + var err error + operands, err = plural.NewOperands(lc.PluralCount) + if err != nil { + return "", language.Und, &invalidPluralCountErr{messageID: messageID, pluralCount: lc.PluralCount, err: err} + } + if templateData == nil { + templateData = map[string]interface{}{ + "PluralCount": lc.PluralCount, + } + } + } + + tag, template, err := l.getMessageTemplate(messageID, lc.DefaultMessage) + if template == nil { + return "", language.Und, err + } + + pluralForm := l.pluralForm(tag, operands) + templateParser := lc.getTemplateParser() + msg, err2 := template.execute(pluralForm, templateData, templateParser) + if err2 != nil { + if err == nil { + err = err2 + } + + // Attempt to fallback to "Other" pluralization in case translations are incomplete. + if pluralForm != plural.Other { + msg2, err3 := template.execute(plural.Other, templateData, templateParser) + if err3 == nil { + msg = msg2 + } + } + } + return msg, tag, err +} + +func (l *Localizer) getMessageTemplate(id string, defaultMessage *Message) (language.Tag, *MessageTemplate, error) { + _, i, _ := l.bundle.matcher.Match(l.tags...) + tag := l.bundle.tags[i] + mt := l.bundle.getMessageTemplate(tag, id) + if mt != nil { + return tag, mt, nil + } + + if tag == l.bundle.defaultLanguage { + if defaultMessage == nil { + return language.Und, nil, &MessageNotFoundErr{Tag: tag, MessageID: id} + } + mt := NewMessageTemplate(defaultMessage) + if mt == nil { + return language.Und, nil, &MessageNotFoundErr{Tag: tag, MessageID: id} + } + return tag, mt, nil + } + + // Fallback to default language in bundle. + mt = l.bundle.getMessageTemplate(l.bundle.defaultLanguage, id) + if mt != nil { + return l.bundle.defaultLanguage, mt, &MessageNotFoundErr{Tag: tag, MessageID: id} + } + + // Fallback to default message. + if defaultMessage == nil { + return language.Und, nil, &MessageNotFoundErr{Tag: tag, MessageID: id} + } + return l.bundle.defaultLanguage, NewMessageTemplate(defaultMessage), &MessageNotFoundErr{Tag: tag, MessageID: id} +} + +func (l *Localizer) pluralForm(tag language.Tag, operands *plural.Operands) plural.Form { + if operands == nil { + return plural.Other + } + return l.bundle.pluralRules.Rule(tag).PluralFormFunc(operands) +} + +// MustLocalize is similar to Localize, except it panics if an error happens. +func (l *Localizer) MustLocalize(lc *LocalizeConfig) string { + localized, err := l.Localize(lc) + if err != nil { + panic(err) + } + return localized +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/message.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/message.go new file mode 100644 index 0000000000..73cd2f6d34 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/message.go @@ -0,0 +1,221 @@ +package i18n + +import ( + "fmt" + "strings" +) + +// Message is a string that can be localized. +type Message struct { + // ID uniquely identifies the message. + ID string + + // Hash uniquely identifies the content of the message + // that this message was translated from. + Hash string + + // Description describes the message to give additional + // context to translators that may be relevant for translation. + Description string + + // LeftDelim is the left Go template delimiter. + LeftDelim string + + // RightDelim is the right Go template delimiter. + RightDelim string + + // Zero is the content of the message for the CLDR plural form "zero". + Zero string + + // One is the content of the message for the CLDR plural form "one". + One string + + // Two is the content of the message for the CLDR plural form "two". + Two string + + // Few is the content of the message for the CLDR plural form "few". + Few string + + // Many is the content of the message for the CLDR plural form "many". + Many string + + // Other is the content of the message for the CLDR plural form "other". + Other string +} + +// NewMessage parses data and returns a new message. +func NewMessage(data interface{}) (*Message, error) { + m := &Message{} + if err := m.unmarshalInterface(data); err != nil { + return nil, err + } + return m, nil +} + +// MustNewMessage is similar to NewMessage except it panics if an error happens. +func MustNewMessage(data interface{}) *Message { + m, err := NewMessage(data) + if err != nil { + panic(err) + } + return m +} + +// unmarshalInterface unmarshals a message from data. +func (m *Message) unmarshalInterface(v interface{}) error { + strdata, err := stringMap(v) + if err != nil { + return err + } + for k, v := range strdata { + switch strings.ToLower(k) { + case "id": + m.ID = v + case "description": + m.Description = v + case "hash": + m.Hash = v + case "leftdelim": + m.LeftDelim = v + case "rightdelim": + m.RightDelim = v + case "zero": + m.Zero = v + case "one": + m.One = v + case "two": + m.Two = v + case "few": + m.Few = v + case "many": + m.Many = v + case "other": + m.Other = v + } + } + return nil +} + +type keyTypeErr struct { + key interface{} +} + +func (err *keyTypeErr) Error() string { + return fmt.Sprintf("expected key to be a string but got %#v", err.key) +} + +type valueTypeErr struct { + value interface{} +} + +func (err *valueTypeErr) Error() string { + return fmt.Sprintf("unsupported type %#v", err.value) +} + +func stringMap(v interface{}) (map[string]string, error) { + switch value := v.(type) { + case string: + return map[string]string{ + "other": value, + }, nil + case map[string]string: + return value, nil + case map[string]interface{}: + strdata := make(map[string]string, len(value)) + for k, v := range value { + err := stringSubmap(k, v, strdata) + if err != nil { + return nil, err + } + } + return strdata, nil + case map[interface{}]interface{}: + strdata := make(map[string]string, len(value)) + for k, v := range value { + kstr, ok := k.(string) + if !ok { + return nil, &keyTypeErr{key: k} + } + err := stringSubmap(kstr, v, strdata) + if err != nil { + return nil, err + } + } + return strdata, nil + default: + return nil, &valueTypeErr{value: value} + } +} + +func stringSubmap(k string, v interface{}, strdata map[string]string) error { + if k == "translation" { + switch vt := v.(type) { + case string: + strdata["other"] = vt + default: + v1Message, err := stringMap(v) + if err != nil { + return err + } + for kk, vv := range v1Message { + strdata[kk] = vv + } + } + return nil + } + + switch vt := v.(type) { + case string: + strdata[k] = vt + return nil + case nil: + return nil + default: + return fmt.Errorf("expected value for key %q be a string but got %#v", k, v) + } +} + +// isMessage tells whether the given data is a message, or a map containing +// nested messages. +// A map is assumed to be a message if it contains any of the "reserved" keys: +// "id", "description", "hash", "leftdelim", "rightdelim", "zero", "one", "two", "few", "many", "other" +// with a string value. +// e.g., +// - {"message": {"description": "world"}} is a message +// - {"message": {"description": "world", "foo": "bar"}} is a message ("foo" key is ignored) +// - {"notmessage": {"description": {"hello": "world"}}} is not +// - {"notmessage": {"foo": "bar"}} is not +func isMessage(v interface{}) bool { + reservedKeys := []string{"id", "description", "hash", "leftdelim", "rightdelim", "zero", "one", "two", "few", "many", "other"} + switch data := v.(type) { + case string: + return true + case map[string]interface{}: + for _, key := range reservedKeys { + val, ok := data[key] + if !ok { + continue + } + _, ok = val.(string) + if !ok { + continue + } + // v is a message if it contains a "reserved" key holding a string value + return true + } + case map[interface{}]interface{}: + for _, key := range reservedKeys { + val, ok := data[key] + if !ok { + continue + } + _, ok = val.(string) + if !ok { + continue + } + // v is a message if it contains a "reserved" key holding a string value + return true + } + } + return false +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/message_template.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/message_template.go new file mode 100644 index 0000000000..24a890b8b8 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/message_template.go @@ -0,0 +1,82 @@ +package i18n + +import ( + "fmt" + texttemplate "text/template" + + "github.com/nicksnyder/go-i18n/v2/i18n/template" + "github.com/nicksnyder/go-i18n/v2/internal" + "github.com/nicksnyder/go-i18n/v2/internal/plural" +) + +// MessageTemplate is an executable template for a message. +type MessageTemplate struct { + *Message + PluralTemplates map[plural.Form]*internal.Template +} + +// NewMessageTemplate returns a new message template. +func NewMessageTemplate(m *Message) *MessageTemplate { + pluralTemplates := map[plural.Form]*internal.Template{} + setPluralTemplate(pluralTemplates, plural.Zero, m.Zero, m.LeftDelim, m.RightDelim) + setPluralTemplate(pluralTemplates, plural.One, m.One, m.LeftDelim, m.RightDelim) + setPluralTemplate(pluralTemplates, plural.Two, m.Two, m.LeftDelim, m.RightDelim) + setPluralTemplate(pluralTemplates, plural.Few, m.Few, m.LeftDelim, m.RightDelim) + setPluralTemplate(pluralTemplates, plural.Many, m.Many, m.LeftDelim, m.RightDelim) + setPluralTemplate(pluralTemplates, plural.Other, m.Other, m.LeftDelim, m.RightDelim) + if len(pluralTemplates) == 0 { + return nil + } + return &MessageTemplate{ + Message: m, + PluralTemplates: pluralTemplates, + } +} + +func setPluralTemplate(pluralTemplates map[plural.Form]*internal.Template, pluralForm plural.Form, src, leftDelim, rightDelim string) { + if src != "" { + pluralTemplates[pluralForm] = &internal.Template{ + Src: src, + LeftDelim: leftDelim, + RightDelim: rightDelim, + } + } +} + +type pluralFormNotFoundError struct { + pluralForm plural.Form + messageID string +} + +func (e pluralFormNotFoundError) Error() string { + return fmt.Sprintf("message %q has no plural form %q", e.messageID, e.pluralForm) +} + +// Execute executes the template for the plural form and template data. +// Deprecated: This message is no longer used internally by go-i18n and it probably should not have been exported to +// begin with. Its replacement is not exported. If you depend on this method for some reason and/or have +// a use case for exporting execute, please file an issue. +func (mt *MessageTemplate) Execute(pluralForm plural.Form, data interface{}, funcs texttemplate.FuncMap) (string, error) { + t := mt.PluralTemplates[pluralForm] + if t == nil { + return "", pluralFormNotFoundError{ + pluralForm: pluralForm, + messageID: mt.Message.ID, + } + } + parser := &template.TextParser{ + Funcs: funcs, + } + return t.Execute(parser, data) +} + +func (mt *MessageTemplate) execute(pluralForm plural.Form, data interface{}, parser template.Parser) (string, error) { + t := mt.PluralTemplates[pluralForm] + if t == nil { + return "", pluralFormNotFoundError{ + pluralForm: pluralForm, + messageID: mt.Message.ID, + } + } + return t.Execute(parser, data) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/parse.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/parse.go new file mode 100644 index 0000000000..57dd7fe7f5 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/parse.go @@ -0,0 +1,166 @@ +package i18n + +import ( + "encoding/json" + "errors" + "fmt" + "os" + + "golang.org/x/text/language" +) + +// MessageFile represents a parsed message file. +type MessageFile struct { + Path string + Tag language.Tag + Format string + Messages []*Message +} + +// ParseMessageFileBytes returns the messages parsed from file. +func ParseMessageFileBytes(buf []byte, path string, unmarshalFuncs map[string]UnmarshalFunc) (*MessageFile, error) { + lang, format := parsePath(path) + tag := language.Make(lang) + messageFile := &MessageFile{ + Path: path, + Tag: tag, + Format: format, + } + if len(buf) == 0 { + return messageFile, nil + } + unmarshalFunc := unmarshalFuncs[messageFile.Format] + if unmarshalFunc == nil { + if messageFile.Format == "json" { + unmarshalFunc = json.Unmarshal + } else { + return nil, fmt.Errorf("no unmarshaler registered for %s", messageFile.Format) + } + } + var err error + var raw interface{} + if err = unmarshalFunc(buf, &raw); err != nil { + return nil, err + } + + if messageFile.Messages, err = recGetMessages(raw, isMessage(raw), true); err != nil { + return nil, err + } + + return messageFile, nil +} + +const nestedSeparator = "." + +var errInvalidTranslationFile = errors.New("invalid translation file, expected key-values, got a single value") + +// recGetMessages looks for translation messages inside "raw" parameter, +// scanning nested maps using recursion. +func recGetMessages(raw interface{}, isMapMessage, isInitialCall bool) ([]*Message, error) { + var messages []*Message + var err error + + switch data := raw.(type) { + case string: + if isInitialCall { + return nil, errInvalidTranslationFile + } + m, err := NewMessage(data) + return []*Message{m}, err + + case map[string]interface{}: + if isMapMessage { + m, err := NewMessage(data) + return []*Message{m}, err + } + messages = make([]*Message, 0, len(data)) + for id, data := range data { + // recursively scan map items + messages, err = addChildMessages(id, data, messages) + if err != nil { + return nil, err + } + } + + case map[interface{}]interface{}: + if isMapMessage { + m, err := NewMessage(data) + return []*Message{m}, err + } + messages = make([]*Message, 0, len(data)) + for id, data := range data { + strid, ok := id.(string) + if !ok { + return nil, fmt.Errorf("expected key to be string but got %#v", id) + } + // recursively scan map items + messages, err = addChildMessages(strid, data, messages) + if err != nil { + return nil, err + } + } + + case []interface{}: + // Backward compatibility for v1 file format. + messages = make([]*Message, 0, len(data)) + for _, data := range data { + // recursively scan slice items + childMessages, err := recGetMessages(data, isMessage(data), false) + if err != nil { + return nil, err + } + messages = append(messages, childMessages...) + } + + default: + return nil, fmt.Errorf("unsupported file format %T", raw) + } + + return messages, nil +} + +func addChildMessages(id string, data interface{}, messages []*Message) ([]*Message, error) { + isChildMessage := isMessage(data) + childMessages, err := recGetMessages(data, isChildMessage, false) + if err != nil { + return nil, err + } + for _, m := range childMessages { + if isChildMessage { + if m.ID == "" { + m.ID = id // start with innermost key + } + } else { + m.ID = id + nestedSeparator + m.ID // update ID with each nested key on the way + } + messages = append(messages, m) + } + return messages, nil +} + +func parsePath(path string) (langTag, format string) { + formatStartIdx := -1 + for i := len(path) - 1; i >= 0; i-- { + c := path[i] + if os.IsPathSeparator(c) { + if formatStartIdx != -1 { + langTag = path[i+1 : formatStartIdx] + } + return + } + if path[i] == '.' { + if formatStartIdx != -1 { + langTag = path[i+1 : formatStartIdx] + return + } + if formatStartIdx == -1 { + format = path[i+1:] + formatStartIdx = i + } + } + } + if formatStartIdx != -1 { + langTag = path[:formatStartIdx] + } + return +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/identity_parser.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/identity_parser.go new file mode 100644 index 0000000000..baaa1ac3e1 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/identity_parser.go @@ -0,0 +1,21 @@ +package template + +// IdentityParser is an Parser that does no parsing and returns tempalte string unchanged. +type IdentityParser struct{} + +func (IdentityParser) Cacheable() bool { + // Caching is not necessary because Parse is cheap. + return false +} + +func (IdentityParser) Parse(src, leftDelim, rightDelim string) (ParsedTemplate, error) { + return &identityParsedTemplate{src: src}, nil +} + +type identityParsedTemplate struct { + src string +} + +func (t *identityParsedTemplate) Execute(data any) (string, error) { + return t.src, nil +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/parser.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/parser.go new file mode 100644 index 0000000000..9a01fd5d2f --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/parser.go @@ -0,0 +1,17 @@ +// Package template defines a generic interface for template parsers and implementations of that interface. +package template + +// Parser parses strings into executable templates. +type Parser interface { + // Parse parses src and returns a ParsedTemplate. + Parse(src, leftDelim, rightDelim string) (ParsedTemplate, error) + + // Cacheable returns true if Parse returns ParsedTemplates that are always safe to cache. + Cacheable() bool +} + +// ParsedTemplate is an executable template. +type ParsedTemplate interface { + // Execute applies a parsed template to the specified data. + Execute(data any) (string, error) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/text_parser.go b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/text_parser.go new file mode 100644 index 0000000000..76b4ba2dd6 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/i18n/template/text_parser.go @@ -0,0 +1,57 @@ +package template + +import ( + "bytes" + "strings" + "text/template" +) + +// TextParser is a Parser that uses text/template. +type TextParser struct { + LeftDelim string + RightDelim string + Funcs template.FuncMap + Option string +} + +func (te *TextParser) Cacheable() bool { + return te.Funcs == nil +} + +func (te *TextParser) Parse(src, leftDelim, rightDelim string) (ParsedTemplate, error) { + if leftDelim == "" { + leftDelim = te.LeftDelim + } + if leftDelim == "" { + leftDelim = "{{" + } + if !strings.Contains(src, leftDelim) { + // Fast path to avoid parsing a template that has no actions. + return &identityParsedTemplate{src: src}, nil + } + + if rightDelim == "" { + rightDelim = te.RightDelim + } + if rightDelim == "" { + rightDelim = "}}" + } + + tmpl, err := template.New("").Delims(leftDelim, rightDelim).Funcs(te.Funcs).Parse(src) + if err != nil { + return nil, err + } + return &parsedTextTemplate{tmpl: tmpl}, nil +} + +type parsedTextTemplate struct { + tmpl *template.Template +} + +func (t *parsedTextTemplate) Execute(data any) (string, error) { + var buf bytes.Buffer + if err := t.tmpl.Execute(&buf, data); err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/doc.go b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/doc.go new file mode 100644 index 0000000000..c2a71d53eb --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/doc.go @@ -0,0 +1,3 @@ +// Package plural provides support for pluralizing messages +// according to CLDR rules http://cldr.unicode.org/index/cldr-spec/plural-rules +package plural diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/form.go b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/form.go new file mode 100644 index 0000000000..287a87f22f --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/form.go @@ -0,0 +1,16 @@ +package plural + +// Form represents a language pluralization form as defined here: +// http://cldr.unicode.org/index/cldr-spec/plural-rules +type Form string + +// All defined plural forms. +const ( + Invalid Form = "" + Zero Form = "zero" + One Form = "one" + Two Form = "two" + Few Form = "few" + Many Form = "many" + Other Form = "other" +) diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/operands.go b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/operands.go new file mode 100644 index 0000000000..06c5a7b0a9 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/operands.go @@ -0,0 +1,192 @@ +package plural + +import ( + "fmt" + "strconv" + "strings" +) + +// Operands is a representation of http://unicode.org/reports/tr35/tr35-numbers.html#Operands +// If there is a compact decimal exponent value C, then the N, I, V, W, F, and T values are computed after shifting the decimal point in the original by the ‘c’ value. +// So for 1.2c3, the values are the same as those of 1200: i=1200 and f=0. +// Similarly, 1.2005c3 has i=1200 and f=5 (corresponding to 1200.5). +type Operands struct { + N float64 // absolute value of the source number (integer and decimals) + I int64 // integer digits of n + V int64 // number of visible fraction digits in n, with trailing zeros + W int64 // number of visible fraction digits in n, without trailing zeros + F int64 // visible fractional digits in n, with trailing zeros + T int64 // visible fractional digits in n, without trailing zeros + C int64 // compact decimal exponent value: exponent of the power of 10 used in compact decimal formatting. +} + +// NEqualsAny returns true if o represents an integer equal to any of the arguments. +func (o *Operands) NEqualsAny(any ...int64) bool { + for _, i := range any { + if o.I == i && o.T == 0 { + return true + } + } + return false +} + +// NModEqualsAny returns true if o represents an integer equal to any of the arguments modulo mod. +func (o *Operands) NModEqualsAny(mod int64, any ...int64) bool { + modI := o.I % mod + for _, i := range any { + if modI == i && o.T == 0 { + return true + } + } + return false +} + +// NInRange returns true if o represents an integer in the closed interval [from, to]. +func (o *Operands) NInRange(from, to int64) bool { + return o.T == 0 && from <= o.I && o.I <= to +} + +// NModInRange returns true if o represents an integer in the closed interval [from, to] modulo mod. +func (o *Operands) NModInRange(mod, from, to int64) bool { + modI := o.I % mod + return o.T == 0 && from <= modI && modI <= to +} + +// NewOperands returns the operands for number. +func NewOperands(number interface{}) (*Operands, error) { + switch number := number.(type) { + case int: + return newOperandsInt64(int64(number)), nil + case int8: + return newOperandsInt64(int64(number)), nil + case int16: + return newOperandsInt64(int64(number)), nil + case int32: + return newOperandsInt64(int64(number)), nil + case int64: + return newOperandsInt64(number), nil + case string: + return newOperandsString(number) + case float32, float64: + return nil, fmt.Errorf("floats should be formatted into a string") + default: + return nil, fmt.Errorf("invalid type %T; expected integer or string", number) + } +} + +func newOperandsInt64(i int64) *Operands { + if i < 0 { + i = -i + } + return &Operands{float64(i), i, 0, 0, 0, 0, 0} +} + +func splitSignificandExponent(s string) (significand, exponent string) { + i := strings.IndexAny(s, "eE") + if i < 0 { + return s, "" + } + return s[:i], s[i+1:] +} + +func shiftDecimalLeft(s string, n int) string { + if n <= 0 { + return s + } + i := strings.IndexRune(s, '.') + tilt := 0 + if i < 0 { + i = len(s) + tilt = -1 + } + switch { + case n == i: + return "0." + s[:i] + s[i+1+tilt:] + case n > i: + return "0." + strings.Repeat("0", n-i) + s[:i] + s[i+1+tilt:] + default: + return s[:i-n] + "." + s[i-n:i] + s[i+1+tilt:] + } +} + +func shiftDecimalRight(s string, n int) string { + if n <= 0 { + return s + } + i := strings.IndexRune(s, '.') + if i < 0 { + return s + strings.Repeat("0", n) + } + switch rest := len(s) - i - 1; { + case n == rest: + return s[:i] + s[i+1:] + case n > rest: + return s[:i] + s[i+1:] + strings.Repeat("0", n-rest) + default: + return s[:i] + s[i+1:i+1+n] + "." + s[i+1+n:] + } +} + +func applyExponent(s string, exponent int) string { + switch { + case exponent > 0: + return shiftDecimalRight(s, exponent) + case exponent < 0: + return shiftDecimalLeft(s, -exponent) + } + return s +} + +func newOperandsString(s string) (*Operands, error) { + if s[0] == '-' { + s = s[1:] + } + ops := &Operands{} + var err error + ops.N, err = strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + significand, exponent := splitSignificandExponent(s) + if exponent != "" { + // We are storing C as an int64 but only allowing + // numbers that fit into the bitsize of an int + // so C is safe to cast as a int later. + ops.C, err = strconv.ParseInt(exponent, 10, 0) + if err != nil { + return nil, err + } + } + value := applyExponent(significand, int(ops.C)) + parts := strings.SplitN(value, ".", 2) + ops.I, err = strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, err + } + if len(parts) == 1 { + return ops, nil + } + fraction := parts[1] + ops.V = int64(len(fraction)) + for i := ops.V - 1; i >= 0; i-- { + if fraction[i] != '0' { + ops.W = i + 1 + break + } + } + if ops.V > 0 { + f, err := strconv.ParseInt(fraction, 10, 0) + if err != nil { + return nil, err + } + ops.F = f + } + if ops.W > 0 { + t, err := strconv.ParseInt(fraction[:ops.W], 10, 0) + if err != nil { + return nil, err + } + ops.T = t + } + return ops, nil +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rule.go b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rule.go new file mode 100644 index 0000000000..0869c84ffb --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rule.go @@ -0,0 +1,44 @@ +package plural + +import ( + "golang.org/x/text/language" +) + +// Rule defines the CLDR plural rules for a language. +// http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html +// http://unicode.org/reports/tr35/tr35-numbers.html#Operands +type Rule struct { + PluralForms map[Form]struct{} + PluralFormFunc func(*Operands) Form +} + +func addPluralRules(rules Rules, ids []string, ps *Rule) { + for _, id := range ids { + if id == "root" { + continue + } + tag := language.MustParse(id) + rules[tag] = ps + } +} + +func newPluralFormSet(pluralForms ...Form) map[Form]struct{} { + set := make(map[Form]struct{}, len(pluralForms)) + for _, plural := range pluralForms { + set[plural] = struct{}{} + } + return set +} + +func intInRange(i, from, to int64) bool { + return from <= i && i <= to +} + +func intEqualsAny(i int64, any ...int64) bool { + for _, a := range any { + if i == a { + return true + } + } + return false +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rule_gen.go b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rule_gen.go new file mode 100644 index 0000000000..258311ca97 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rule_gen.go @@ -0,0 +1,654 @@ +// This file is generated by i18n/plural/codegen/generate.sh; DO NOT EDIT + +package plural + +// DefaultRules returns a map of Rules generated from CLDR language data. +func DefaultRules() Rules { + rules := Rules{} + + addPluralRules(rules, []string{"bm", "bo", "dz", "hnj", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "osa", "root", "sah", "ses", "sg", "su", "th", "to", "tpi", "vi", "wo", "yo", "yue", "zh"}, &Rule{ + PluralForms: newPluralFormSet(Other), + PluralFormFunc: func(ops *Operands) Form { + return Other + }, + }) + addPluralRules(rules, []string{"am", "as", "bn", "doi", "fa", "gu", "hi", "kn", "pcm", "zu"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 0 or n = 1 + if intEqualsAny(ops.I, 0) || + ops.NEqualsAny(1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"ff", "hy", "kab"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 0,1 + if intEqualsAny(ops.I, 0, 1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"ast", "de", "en", "et", "fi", "fy", "gl", "ia", "io", "ji", "lij", "nl", "sc", "scn", "sv", "sw", "ur", "yi"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"si"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0,1 or i = 0 and f = 1 + if ops.NEqualsAny(0, 1) || + intEqualsAny(ops.I, 0) && intEqualsAny(ops.F, 1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"ak", "bho", "guw", "ln", "mg", "nso", "pa", "ti", "wa"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0..1 + if ops.NInRange(0, 1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"tzm"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0..1 or n = 11..99 + if ops.NInRange(0, 1) || + ops.NInRange(11, 99) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"af", "an", "asa", "az", "bal", "bem", "bez", "bg", "brx", "ce", "cgg", "chr", "ckb", "dv", "ee", "el", "eo", "eu", "fo", "fur", "gsw", "ha", "haw", "hu", "jgo", "jmc", "ka", "kaj", "kcg", "kk", "kkj", "kl", "ks", "ksb", "ku", "ky", "lb", "lg", "mas", "mgo", "ml", "mn", "mr", "nah", "nb", "nd", "ne", "nn", "nnh", "no", "nr", "ny", "nyn", "om", "or", "os", "pap", "ps", "rm", "rof", "rwk", "saq", "sd", "sdh", "seh", "sn", "so", "sq", "ss", "ssy", "st", "syr", "ta", "te", "teo", "tig", "tk", "tn", "tr", "ts", "ug", "uz", "ve", "vo", "vun", "wae", "xh", "xog"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 1 + if ops.NEqualsAny(1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"da"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 1 or t != 0 and i = 0,1 + if ops.NEqualsAny(1) || + !intEqualsAny(ops.T, 0) && intEqualsAny(ops.I, 0, 1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"is"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // t = 0 and i % 10 = 1 and i % 100 != 11 or t % 10 = 1 and t % 100 != 11 + if intEqualsAny(ops.T, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || + intEqualsAny(ops.T%10, 1) && !intEqualsAny(ops.T%100, 11) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"mk"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || + intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"ceb", "fil", "tl"}, &Rule{ + PluralForms: newPluralFormSet(One, Other), + PluralFormFunc: func(ops *Operands) Form { + // v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I, 1, 2, 3) || + intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I%10, 4, 6, 9) || + !intEqualsAny(ops.V, 0) && !intEqualsAny(ops.F%10, 4, 6, 9) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"lv", "prg"}, &Rule{ + PluralForms: newPluralFormSet(Zero, One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 + if ops.NModEqualsAny(10, 0) || + ops.NModInRange(100, 11, 19) || + intEqualsAny(ops.V, 2) && intInRange(ops.F%100, 11, 19) { + return Zero + } + // n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 + if ops.NModEqualsAny(10, 1) && !ops.NModEqualsAny(100, 11) || + intEqualsAny(ops.V, 2) && intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) || + !intEqualsAny(ops.V, 2) && intEqualsAny(ops.F%10, 1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"lag"}, &Rule{ + PluralForms: newPluralFormSet(Zero, One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0 + if ops.NEqualsAny(0) { + return Zero + } + // i = 0,1 and n != 0 + if intEqualsAny(ops.I, 0, 1) && !ops.NEqualsAny(0) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"ksh"}, &Rule{ + PluralForms: newPluralFormSet(Zero, One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0 + if ops.NEqualsAny(0) { + return Zero + } + // n = 1 + if ops.NEqualsAny(1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"blo"}, &Rule{ + PluralForms: newPluralFormSet(Zero, One, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0 + if ops.NEqualsAny(0) { + return Zero + } + // n = 1 + if ops.NEqualsAny(1) { + return One + } + return Other + }, + }) + addPluralRules(rules, []string{"he", "iw"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 1 and v = 0 or i = 0 and v != 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) || + intEqualsAny(ops.I, 0) && !intEqualsAny(ops.V, 0) { + return One + } + // i = 2 and v = 0 + if intEqualsAny(ops.I, 2) && intEqualsAny(ops.V, 0) { + return Two + } + return Other + }, + }) + addPluralRules(rules, []string{"iu", "naq", "sat", "se", "sma", "smi", "smj", "smn", "sms"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 1 + if ops.NEqualsAny(1) { + return One + } + // n = 2 + if ops.NEqualsAny(2) { + return Two + } + return Other + }, + }) + addPluralRules(rules, []string{"shi"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 0 or n = 1 + if intEqualsAny(ops.I, 0) || + ops.NEqualsAny(1) { + return One + } + // n = 2..10 + if ops.NInRange(2, 10) { + return Few + } + return Other + }, + }) + addPluralRules(rules, []string{"mo", "ro"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // v != 0 or n = 0 or n != 1 and n % 100 = 1..19 + if !intEqualsAny(ops.V, 0) || + ops.NEqualsAny(0) || + !ops.NEqualsAny(1) && ops.NModInRange(100, 1, 19) { + return Few + } + return Other + }, + }) + addPluralRules(rules, []string{"bs", "hr", "sh", "sr"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Other), + PluralFormFunc: func(ops *Operands) Form { + // v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || + intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) || + intInRange(ops.F%10, 2, 4) && !intInRange(ops.F%100, 12, 14) { + return Few + } + return Other + }, + }) + addPluralRules(rules, []string{"fr"}, &Rule{ + PluralForms: newPluralFormSet(One, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 0,1 + if intEqualsAny(ops.I, 0, 1) { + return One + } + // e = 0 and i != 0 and i % 1000000 = 0 and v = 0 or e != 0..5 + if intEqualsAny(ops.C, 0) && !intEqualsAny(ops.I, 0) && intEqualsAny(ops.I%1000000, 0) && intEqualsAny(ops.V, 0) || + !intInRange(ops.C, 0, 5) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"pt"}, &Rule{ + PluralForms: newPluralFormSet(One, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 0..1 + if intInRange(ops.I, 0, 1) { + return One + } + // e = 0 and i != 0 and i % 1000000 = 0 and v = 0 or e != 0..5 + if intEqualsAny(ops.C, 0) && !intEqualsAny(ops.I, 0) && intEqualsAny(ops.I%1000000, 0) && intEqualsAny(ops.V, 0) || + !intInRange(ops.C, 0, 5) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"ca", "it", "pt_PT", "vec"}, &Rule{ + PluralForms: newPluralFormSet(One, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // e = 0 and i != 0 and i % 1000000 = 0 and v = 0 or e != 0..5 + if intEqualsAny(ops.C, 0) && !intEqualsAny(ops.I, 0) && intEqualsAny(ops.I%1000000, 0) && intEqualsAny(ops.V, 0) || + !intInRange(ops.C, 0, 5) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"es"}, &Rule{ + PluralForms: newPluralFormSet(One, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 1 + if ops.NEqualsAny(1) { + return One + } + // e = 0 and i != 0 and i % 1000000 = 0 and v = 0 or e != 0..5 + if intEqualsAny(ops.C, 0) && !intEqualsAny(ops.I, 0) && intEqualsAny(ops.I%1000000, 0) && intEqualsAny(ops.V, 0) || + !intInRange(ops.C, 0, 5) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"gd"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Few, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 1,11 + if ops.NEqualsAny(1, 11) { + return One + } + // n = 2,12 + if ops.NEqualsAny(2, 12) { + return Two + } + // n = 3..10,13..19 + if ops.NInRange(3, 10) || ops.NInRange(13, 19) { + return Few + } + return Other + }, + }) + addPluralRules(rules, []string{"sl"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Few, Other), + PluralFormFunc: func(ops *Operands) Form { + // v = 0 and i % 100 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) { + return One + } + // v = 0 and i % 100 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 2) { + return Two + } + // v = 0 and i % 100 = 3..4 or v != 0 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 3, 4) || + !intEqualsAny(ops.V, 0) { + return Few + } + return Other + }, + }) + addPluralRules(rules, []string{"dsb", "hsb"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Few, Other), + PluralFormFunc: func(ops *Operands) Form { + // v = 0 and i % 100 = 1 or f % 100 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) || + intEqualsAny(ops.F%100, 1) { + return One + } + // v = 0 and i % 100 = 2 or f % 100 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 2) || + intEqualsAny(ops.F%100, 2) { + return Two + } + // v = 0 and i % 100 = 3..4 or f % 100 = 3..4 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 3, 4) || + intInRange(ops.F%100, 3, 4) { + return Few + } + return Other + }, + }) + addPluralRules(rules, []string{"cs", "sk"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // i = 2..4 and v = 0 + if intInRange(ops.I, 2, 4) && intEqualsAny(ops.V, 0) { + return Few + } + // v != 0 + if !intEqualsAny(ops.V, 0) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"pl"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) { + return Few + } + // v = 0 and i != 1 and i % 10 = 0..1 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 12..14 + if intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I, 1) && intInRange(ops.I%10, 0, 1) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 5, 9) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 12, 14) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"be"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n % 10 = 1 and n % 100 != 11 + if ops.NModEqualsAny(10, 1) && !ops.NModEqualsAny(100, 11) { + return One + } + // n % 10 = 2..4 and n % 100 != 12..14 + if ops.NModInRange(10, 2, 4) && !ops.NModInRange(100, 12, 14) { + return Few + } + // n % 10 = 0 or n % 10 = 5..9 or n % 100 = 11..14 + if ops.NModEqualsAny(10, 0) || + ops.NModInRange(10, 5, 9) || + ops.NModInRange(100, 11, 14) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"lt"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n % 10 = 1 and n % 100 != 11..19 + if ops.NModEqualsAny(10, 1) && !ops.NModInRange(100, 11, 19) { + return One + } + // n % 10 = 2..9 and n % 100 != 11..19 + if ops.NModInRange(10, 2, 9) && !ops.NModInRange(100, 11, 19) { + return Few + } + // f != 0 + if !intEqualsAny(ops.F, 0) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"ru", "uk"}, &Rule{ + PluralForms: newPluralFormSet(One, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // v = 0 and i % 10 = 1 and i % 100 != 11 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) { + return Few + } + // v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 0) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 5, 9) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 11, 14) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"br"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n % 10 = 1 and n % 100 != 11,71,91 + if ops.NModEqualsAny(10, 1) && !ops.NModEqualsAny(100, 11, 71, 91) { + return One + } + // n % 10 = 2 and n % 100 != 12,72,92 + if ops.NModEqualsAny(10, 2) && !ops.NModEqualsAny(100, 12, 72, 92) { + return Two + } + // n % 10 = 3..4,9 and n % 100 != 10..19,70..79,90..99 + if (ops.NModInRange(10, 3, 4) || ops.NModEqualsAny(10, 9)) && !(ops.NModInRange(100, 10, 19) || ops.NModInRange(100, 70, 79) || ops.NModInRange(100, 90, 99)) { + return Few + } + // n != 0 and n % 1000000 = 0 + if !ops.NEqualsAny(0) && ops.NModEqualsAny(1000000, 0) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"mt"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 1 + if ops.NEqualsAny(1) { + return One + } + // n = 2 + if ops.NEqualsAny(2) { + return Two + } + // n = 0 or n % 100 = 3..10 + if ops.NEqualsAny(0) || + ops.NModInRange(100, 3, 10) { + return Few + } + // n % 100 = 11..19 + if ops.NModInRange(100, 11, 19) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"ga"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 1 + if ops.NEqualsAny(1) { + return One + } + // n = 2 + if ops.NEqualsAny(2) { + return Two + } + // n = 3..6 + if ops.NInRange(3, 6) { + return Few + } + // n = 7..10 + if ops.NInRange(7, 10) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"gv"}, &Rule{ + PluralForms: newPluralFormSet(One, Two, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // v = 0 and i % 10 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) { + return One + } + // v = 0 and i % 10 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 2) { + return Two + } + // v = 0 and i % 100 = 0,20,40,60,80 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 0, 20, 40, 60, 80) { + return Few + } + // v != 0 + if !intEqualsAny(ops.V, 0) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"kw"}, &Rule{ + PluralForms: newPluralFormSet(Zero, One, Two, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0 + if ops.NEqualsAny(0) { + return Zero + } + // n = 1 + if ops.NEqualsAny(1) { + return One + } + // n % 100 = 2,22,42,62,82 or n % 1000 = 0 and n % 100000 = 1000..20000,40000,60000,80000 or n != 0 and n % 1000000 = 100000 + if ops.NModEqualsAny(100, 2, 22, 42, 62, 82) || + ops.NModEqualsAny(1000, 0) && (ops.NModInRange(100000, 1000, 20000) || ops.NModEqualsAny(100000, 40000, 60000, 80000)) || + !ops.NEqualsAny(0) && ops.NModEqualsAny(1000000, 100000) { + return Two + } + // n % 100 = 3,23,43,63,83 + if ops.NModEqualsAny(100, 3, 23, 43, 63, 83) { + return Few + } + // n != 1 and n % 100 = 1,21,41,61,81 + if !ops.NEqualsAny(1) && ops.NModEqualsAny(100, 1, 21, 41, 61, 81) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"ar", "ars"}, &Rule{ + PluralForms: newPluralFormSet(Zero, One, Two, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0 + if ops.NEqualsAny(0) { + return Zero + } + // n = 1 + if ops.NEqualsAny(1) { + return One + } + // n = 2 + if ops.NEqualsAny(2) { + return Two + } + // n % 100 = 3..10 + if ops.NModInRange(100, 3, 10) { + return Few + } + // n % 100 = 11..99 + if ops.NModInRange(100, 11, 99) { + return Many + } + return Other + }, + }) + addPluralRules(rules, []string{"cy"}, &Rule{ + PluralForms: newPluralFormSet(Zero, One, Two, Few, Many, Other), + PluralFormFunc: func(ops *Operands) Form { + // n = 0 + if ops.NEqualsAny(0) { + return Zero + } + // n = 1 + if ops.NEqualsAny(1) { + return One + } + // n = 2 + if ops.NEqualsAny(2) { + return Two + } + // n = 3 + if ops.NEqualsAny(3) { + return Few + } + // n = 6 + if ops.NEqualsAny(6) { + return Many + } + return Other + }, + }) + + return rules +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rules.go b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rules.go new file mode 100644 index 0000000000..87eb8369dd --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/internal/plural/rules.go @@ -0,0 +1,24 @@ +package plural + +import "golang.org/x/text/language" + +// Rules is a set of plural rules by language tag. +type Rules map[language.Tag]*Rule + +// Rule returns the closest matching plural rule for the language tag +// or nil if no rule could be found. +func (r Rules) Rule(tag language.Tag) *Rule { + t := tag + for { + if rule := r[t]; rule != nil { + return rule + } + t = t.Parent() + if t.IsRoot() { + break + } + } + base, _ := tag.Base() + baseTag, _ := language.Parse(base.String()) + return r[baseTag] +} diff --git a/vendor/github.com/nicksnyder/go-i18n/v2/internal/template.go b/vendor/github.com/nicksnyder/go-i18n/v2/internal/template.go new file mode 100644 index 0000000000..e4b5f476b4 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/v2/internal/template.go @@ -0,0 +1,36 @@ +package internal + +import ( + "sync" + + "github.com/nicksnyder/go-i18n/v2/i18n/template" +) + +// Template stores the template for a string and a cached version of the parsed template if they are cacheable. +type Template struct { + Src string + LeftDelim string + RightDelim string + + parseOnce sync.Once + parsedTemplate template.ParsedTemplate + parseError error +} + +func (t *Template) Execute(parser template.Parser, data interface{}) (string, error) { + var pt template.ParsedTemplate + var err error + if parser.Cacheable() { + t.parseOnce.Do(func() { + t.parsedTemplate, t.parseError = parser.Parse(t.Src, t.LeftDelim, t.RightDelim) + }) + pt, err = t.parsedTemplate, t.parseError + } else { + pt, err = parser.Parse(t.Src, t.LeftDelim, t.RightDelim) + } + + if err != nil { + return "", err + } + return pt.Execute(data) +} diff --git a/vendor/golang.org/x/text/cases/icu.go b/vendor/golang.org/x/text/cases/icu.go index 2dc84b39ef..db7c237ccf 100644 --- a/vendor/golang.org/x/text/cases/icu.go +++ b/vendor/golang.org/x/text/cases/icu.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build icu -// +build icu package cases diff --git a/vendor/golang.org/x/text/cases/tables10.0.0.go b/vendor/golang.org/x/text/cases/tables10.0.0.go index ca9923105e..bd28ae145d 100644 --- a/vendor/golang.org/x/text/cases/tables10.0.0.go +++ b/vendor/golang.org/x/text/cases/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package cases diff --git a/vendor/golang.org/x/text/cases/tables11.0.0.go b/vendor/golang.org/x/text/cases/tables11.0.0.go index b1106b4171..ce00ce3725 100644 --- a/vendor/golang.org/x/text/cases/tables11.0.0.go +++ b/vendor/golang.org/x/text/cases/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package cases diff --git a/vendor/golang.org/x/text/cases/tables12.0.0.go b/vendor/golang.org/x/text/cases/tables12.0.0.go index ae7dc24072..84d841b149 100644 --- a/vendor/golang.org/x/text/cases/tables12.0.0.go +++ b/vendor/golang.org/x/text/cases/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package cases diff --git a/vendor/golang.org/x/text/cases/tables13.0.0.go b/vendor/golang.org/x/text/cases/tables13.0.0.go index 68d2981d18..6187e6b462 100644 --- a/vendor/golang.org/x/text/cases/tables13.0.0.go +++ b/vendor/golang.org/x/text/cases/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package cases diff --git a/vendor/golang.org/x/text/cases/tables15.0.0.go b/vendor/golang.org/x/text/cases/tables15.0.0.go index e431b99537..aee0f31085 100644 --- a/vendor/golang.org/x/text/cases/tables15.0.0.go +++ b/vendor/golang.org/x/text/cases/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package cases diff --git a/vendor/golang.org/x/text/cases/tables9.0.0.go b/vendor/golang.org/x/text/cases/tables9.0.0.go index 636d5d14df..3aeb7be6d0 100644 --- a/vendor/golang.org/x/text/cases/tables9.0.0.go +++ b/vendor/golang.org/x/text/cases/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package cases diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index 8a7392c4a1..784bb88087 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index bb0a920018..8e1e943955 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/precis/tables10.0.0.go b/vendor/golang.org/x/text/secure/precis/tables10.0.0.go index 8164749502..93cbffaca3 100644 --- a/vendor/golang.org/x/text/secure/precis/tables10.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables11.0.0.go b/vendor/golang.org/x/text/secure/precis/tables11.0.0.go index a40e55d6c9..6cea210e1e 100644 --- a/vendor/golang.org/x/text/secure/precis/tables11.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables12.0.0.go b/vendor/golang.org/x/text/secure/precis/tables12.0.0.go index 254bbc7930..1b506cdf43 100644 --- a/vendor/golang.org/x/text/secure/precis/tables12.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables13.0.0.go b/vendor/golang.org/x/text/secure/precis/tables13.0.0.go index 7bc1a1629c..0a467f5912 100644 --- a/vendor/golang.org/x/text/secure/precis/tables13.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables15.0.0.go b/vendor/golang.org/x/text/secure/precis/tables15.0.0.go index 48c3227777..0628606121 100644 --- a/vendor/golang.org/x/text/secure/precis/tables15.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables9.0.0.go b/vendor/golang.org/x/text/secure/precis/tables9.0.0.go index 2292b7cb0c..0a104f79a6 100644 --- a/vendor/golang.org/x/text/secure/precis/tables9.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package precis diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 42fa8d72ce..d2bd71181d 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 56a0e1ea21..f76bdca273 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index baacf32b43..3aa2c3bdf8 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index ffadb7bebd..a713757906 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go index 92cce5802c..f15746f7df 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index f517fdb202..c164d37917 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index f5a0788277..1af161c756 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index cb7239c437..eb73ecc373 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 11b2733001..276cb8d8c0 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index f65785e8ac..0cceffd731 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go index e1858b879d..b0819e42d0 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 0175eae50a..bf65457d9b 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package norm diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go index cd9d91cafb..07c1cb17af 100644 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package width diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go index 327eaef9b7..89288b3dae 100644 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package width diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 5c14ade6d9..755ee91221 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go index b1fcb522cb..40c169edf6 100644 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package width diff --git a/vendor/golang.org/x/text/width/tables15.0.0.go b/vendor/golang.org/x/text/width/tables15.0.0.go index 4b91e3384d..2b85289675 100644 --- a/vendor/golang.org/x/text/width/tables15.0.0.go +++ b/vendor/golang.org/x/text/width/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package width diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go index 6781f3d960..d981330a9f 100644 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package width diff --git a/vendor/modules.txt b/vendor/modules.txt index a07f3a6ce1..2365c5eb09 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,10 @@ # dario.cat/mergo v1.0.0 ## explicit; go 1.13 dario.cat/mergo +# github.com/BurntSushi/toml v1.3.2 +## explicit; go 1.16 +github.com/BurntSushi/toml +github.com/BurntSushi/toml/internal # github.com/DATA-DOG/go-sqlmock v1.5.0 ## explicit github.com/DATA-DOG/go-sqlmock @@ -528,6 +532,12 @@ github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 +# github.com/nicksnyder/go-i18n/v2 v2.4.0 +## explicit; go 1.18 +github.com/nicksnyder/go-i18n/v2/i18n +github.com/nicksnyder/go-i18n/v2/i18n/template +github.com/nicksnyder/go-i18n/v2/internal +github.com/nicksnyder/go-i18n/v2/internal/plural # github.com/oklog/run v1.0.0 ## explicit github.com/oklog/run @@ -743,8 +753,8 @@ golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.13.0 -## explicit; go 1.17 +# golang.org/x/text v0.14.0 +## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding golang.org/x/text/encoding/charmap