diff --git a/server/enterprise/elasticsearch/common/indexing_job.go b/server/enterprise/elasticsearch/common/indexing_job.go index cb0cc481055..0844d9d170b 100644 --- a/server/enterprise/elasticsearch/common/indexing_job.go +++ b/server/enterprise/elasticsearch/common/indexing_job.go @@ -37,6 +37,7 @@ const ( ) func NewIndexerWorker(name string, + backend string, jobServer *jobs.JobServer, logger mlog.LoggerIFace, fileBackend filestore.FileBackend, @@ -47,6 +48,7 @@ func NewIndexerWorker(name string, ) *IndexerWorker { return &IndexerWorker{ name: name, + backend: backend, stoppedCh: make(chan bool, 1), jobs: make(chan model.Job), jobServer: jobServer, @@ -61,7 +63,8 @@ func NewIndexerWorker(name string, } type IndexerWorker struct { - name string + name string + backend string // stateMut protects stopCh and stopped and helps enforce // ordering in case subsequent Run or Stop calls are made. stateMut sync.Mutex @@ -234,7 +237,7 @@ func (worker *IndexerWorker) DoJob(job *model.Job) { } worker.initEntitiesToIndex(job) - progress, err := initProgress(logger, worker.jobServer, job) + progress, err := initProgress(logger, worker.jobServer, job, worker.backend) if err != nil { return } @@ -663,7 +666,7 @@ func (worker *IndexerWorker) BulkIndexUsers(users []*model.UserForIndexing, prog return users[len(users)-1], nil } -func initProgress(logger mlog.LoggerIFace, jobServer *jobs.JobServer, job *model.Job) (IndexingProgress, error) { +func initProgress(logger mlog.LoggerIFace, jobServer *jobs.JobServer, job *model.Job, backend string) (IndexingProgress, error) { progress := IndexingProgress{ Now: time.Now(), DonePosts: false, @@ -674,12 +677,12 @@ func initProgress(logger mlog.LoggerIFace, jobServer *jobs.JobServer, job *model EndAtTime: model.GetMillis(), } - progress, err := parseStartTime(logger, jobServer, progress, job) + progress, err := parseStartTime(logger, jobServer, progress, job, backend) if err != nil { return progress, err } - progress, err = parseEndTime(logger, jobServer, progress, job) + progress, err = parseEndTime(logger, jobServer, progress, job, backend) if err != nil { return progress, err } @@ -691,13 +694,13 @@ func initProgress(logger mlog.LoggerIFace, jobServer *jobs.JobServer, job *model return progress, nil } -func parseStartTime(logger mlog.LoggerIFace, jobServer *jobs.JobServer, progress IndexingProgress, job *model.Job) (IndexingProgress, error) { +func parseStartTime(logger mlog.LoggerIFace, jobServer *jobs.JobServer, progress IndexingProgress, job *model.Job, backend string) (IndexingProgress, error) { // Extract the start time, if it is set. if startString, ok := job.Data["start_time"]; ok { startInt, err := strconv.ParseInt(startString, 10, 64) if err != nil { logger.Error("Worker: Failed to parse start_time for job", mlog.String("start_time", startString), mlog.Err(err)) - appError := model.NewAppError("IndexerWorker", "ent.elasticsearch.indexer.do_job.parse_start_time.error", nil, "", http.StatusInternalServerError).Wrap(err) + appError := model.NewAppError("IndexerWorker", "ent.elasticsearch.indexer.do_job.parse_start_time.error", map[string]any{"Backend": backend}, "", http.StatusInternalServerError).Wrap(err) if err := jobServer.SetJobError(job, appError); err != nil { logger.Error("Worker: Failed to set job error", mlog.Err(err), mlog.NamedErr("set_error", appError)) } @@ -722,12 +725,12 @@ func parseStartTime(logger mlog.LoggerIFace, jobServer *jobs.JobServer, progress return progress, nil } -func parseEndTime(logger mlog.LoggerIFace, jobServer *jobs.JobServer, progress IndexingProgress, job *model.Job) (IndexingProgress, error) { +func parseEndTime(logger mlog.LoggerIFace, jobServer *jobs.JobServer, progress IndexingProgress, job *model.Job, backend string) (IndexingProgress, error) { if endString, ok := job.Data["end_time"]; ok { endInt, err := strconv.ParseInt(endString, 10, 64) if err != nil { logger.Error("Worker: Failed to parse end_time for job", mlog.String("end_time", endString), mlog.Err(err)) - appError := model.NewAppError("IndexerWorker", "ent.elasticsearch.indexer.do_job.parse_end_time.error", nil, "", http.StatusInternalServerError).Wrap(err) + appError := model.NewAppError("IndexerWorker", "ent.elasticsearch.indexer.do_job.parse_end_time.error", map[string]any{"Backend": backend}, "", http.StatusInternalServerError).Wrap(err) if err := jobServer.SetJobError(job, appError); err != nil { logger.Error("Worker: Failed to set job errorv", mlog.Err(err), mlog.NamedErr("set_error", appError)) } diff --git a/server/enterprise/elasticsearch/elasticsearch/elasticsearch.go b/server/enterprise/elasticsearch/elasticsearch/elasticsearch.go index 64a22be12e9..89be0fcb280 100644 --- a/server/enterprise/elasticsearch/elasticsearch/elasticsearch.go +++ b/server/enterprise/elasticsearch/elasticsearch/elasticsearch.go @@ -205,7 +205,7 @@ func (es *ElasticsearchInterfaceImpl) Stop() *model.AppError { defer es.mutex.Unlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.start", "ent.elasticsearch.stop.already_stopped.app_error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.start", "ent.elasticsearch.stop.already_stopped.app_error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } es.client = nil @@ -239,7 +239,7 @@ func (es *ElasticsearchInterfaceImpl) IndexPost(post *model.Post, teamId string) defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.IndexPost", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.IndexPost", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } indexName := common.BuildPostIndexName(*es.Platform.Config().ElasticsearchSettings.AggregatePostsAfterDays, @@ -301,7 +301,7 @@ func (es *ElasticsearchInterfaceImpl) SearchPosts(channels model.ChannelList, se defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return []string{}, nil, model.NewAppError("Elasticsearch.SearchPosts", "ent.elasticsearch.search_posts.disabled", nil, "", http.StatusInternalServerError) + return []string{}, nil, model.NewAppError("Elasticsearch.SearchPosts", "ent.elasticsearch.search_posts.disabled", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } var channelIds []string @@ -625,7 +625,7 @@ func (es *ElasticsearchInterfaceImpl) DeletePost(post *model.Post) *model.AppErr defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeletePost", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeletePost", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } // This is racy with index aggregation, but since the posts are verified in the database when returning search @@ -647,7 +647,7 @@ func (es *ElasticsearchInterfaceImpl) DeleteChannelPosts(rctx request.CTX, chann defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteChannelPosts", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteChannelPosts", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } postIndexes, err := es.getPostIndexNames() @@ -683,7 +683,7 @@ func (es *ElasticsearchInterfaceImpl) DeleteUserPosts(rctx request.CTX, userID s defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteUserPosts", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteUserPosts", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } postIndexes, err := es.getPostIndexNames() @@ -724,7 +724,7 @@ func (es *ElasticsearchInterfaceImpl) deletePost(indexName, postID string) *mode Id_: model.NewPointer(postID), }) if err != nil { - return model.NewAppError("Elasticsearch.IndexPost", model.NoTranslation, nil, "", http.StatusInternalServerError).Wrap(err) + return model.NewAppError("Elasticsearch.DeletePost", model.NoTranslation, nil, "", http.StatusInternalServerError).Wrap(err) } } else { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -742,7 +742,7 @@ func (es *ElasticsearchInterfaceImpl) IndexChannel(rctx request.CTX, channel *mo defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.IndexChannel", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.IndexChannel", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } indexName := *es.Platform.Config().ElasticsearchSettings.IndexPrefix + common.IndexBaseChannels @@ -756,7 +756,7 @@ func (es *ElasticsearchInterfaceImpl) IndexChannel(rctx request.CTX, channel *mo Id_: model.NewPointer(searchChannel.Id), }, searchChannel) if err != nil { - return model.NewAppError("Elasticsearch.IndexPost", model.NoTranslation, nil, "", http.StatusInternalServerError).Wrap(err) + return model.NewAppError("Elasticsearch.IndexChannel", model.NoTranslation, nil, "", http.StatusInternalServerError).Wrap(err) } } else { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -783,7 +783,7 @@ func (es *ElasticsearchInterfaceImpl) SearchChannels(teamId, userID string, term defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return []string{}, model.NewAppError("Elasticsearch.SearchChannels", "ent.elasticsearch.search_channels.disabled", nil, "", http.StatusInternalServerError) + return []string{}, model.NewAppError("Elasticsearch.SearchChannels", "ent.elasticsearch.search_channels.disabled", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -878,7 +878,7 @@ func (es *ElasticsearchInterfaceImpl) DeleteChannel(channel *model.Channel) *mod defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteChannel", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteChannel", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } var err error @@ -909,7 +909,7 @@ func (es *ElasticsearchInterfaceImpl) IndexUser(rctx request.CTX, user *model.Us defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.IndexUser", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.IndexUser", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } indexName := *es.Platform.Config().ElasticsearchSettings.IndexPrefix + common.IndexBaseUsers @@ -951,7 +951,7 @@ func (es *ElasticsearchInterfaceImpl) autocompleteUsers(contextCategory string, defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return nil, model.NewAppError("Elasticsearch.autocompleteUsers", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return nil, model.NewAppError("Elasticsearch.autocompleteUsers", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1064,7 +1064,7 @@ func (es *ElasticsearchInterfaceImpl) autocompleteUsersNotInChannel(teamId, chan defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return nil, model.NewAppError("Elasticsearch.autocompleteUsersNotInChannel", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return nil, model.NewAppError("Elasticsearch.autocompleteUsersNotInChannel", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1227,7 +1227,7 @@ func (es *ElasticsearchInterfaceImpl) DeleteUser(user *model.User) *model.AppErr defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteUser", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteUser", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } var err error @@ -1259,7 +1259,7 @@ func (es *ElasticsearchInterfaceImpl) TestConfig(rctx request.CTX, cfg *model.Co } if !*cfg.ElasticsearchSettings.EnableIndexing { - return model.NewAppError("Elasticsearch.TestConfig", "ent.elasticsearch.test_config.indexing_disabled.error", nil, "", http.StatusNotImplemented) + return model.NewAppError("Elasticsearch.TestConfig", "ent.elasticsearch.test_config.indexing_disabled.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusNotImplemented) } client, appErr := createTypedClient(rctx.Logger(), cfg, es.Platform.FileBackend(), true) @@ -1294,7 +1294,7 @@ func (es *ElasticsearchInterfaceImpl) PurgeIndexes(rctx request.CTX) *model.AppE } if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.PurgeIndexes", "ent.elasticsearch.generic.disabled", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.PurgeIndexes", "ent.elasticsearch.generic.disabled", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } indexPrefix := *es.Platform.Config().ElasticsearchSettings.IndexPrefix @@ -1343,7 +1343,7 @@ func (es *ElasticsearchInterfaceImpl) PurgeIndexList(rctx request.CTX, indexes [ } if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.PurgeIndexList", "ent.elasticsearch.generic.disabled", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.PurgeIndexList", "ent.elasticsearch.generic.disabled", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } indexPrefix := *es.Platform.Config().ElasticsearchSettings.IndexPrefix @@ -1412,7 +1412,7 @@ func (es *ElasticsearchInterfaceImpl) DataRetentionDeleteIndexes(rctx request.CT } if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DataRetentionDeleteIndexes", "ent.elasticsearch.generic.disabled", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DataRetentionDeleteIndexes", "ent.elasticsearch.generic.disabled", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } ctx := context.Background() @@ -1441,7 +1441,7 @@ func (es *ElasticsearchInterfaceImpl) IndexFile(file *model.FileInfo, channelId defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.IndexFile", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.IndexFile", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } indexName := *es.Platform.Config().ElasticsearchSettings.IndexPrefix + common.IndexBaseFiles @@ -1482,7 +1482,7 @@ func (es *ElasticsearchInterfaceImpl) SearchFiles(channels model.ChannelList, se defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return []string{}, model.NewAppError("Elasticsearch.SearchPosts", "ent.elasticsearch.search_files.disabled", nil, "", http.StatusInternalServerError) + return []string{}, model.NewAppError("Elasticsearch.SearchPosts", "ent.elasticsearch.search_files.disabled", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } var channelIds []string @@ -1723,7 +1723,7 @@ func (es *ElasticsearchInterfaceImpl) DeleteFile(fileID string) *model.AppError defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteFile", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteFile", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } var err error @@ -1754,7 +1754,7 @@ func (es *ElasticsearchInterfaceImpl) DeleteUserFiles(rctx request.CTX, userID s defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1786,7 +1786,7 @@ func (es *ElasticsearchInterfaceImpl) DeletePostFiles(rctx request.CTX, postID s defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1817,7 +1817,7 @@ func (es *ElasticsearchInterfaceImpl) DeleteFilesBatch(rctx request.CTX, endTime defer es.mutex.RUnlock() if atomic.LoadInt32(&es.ready) == 0 { - return model.NewAppError("Elasticsearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Elasticsearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*es.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1855,16 +1855,16 @@ func (es *ElasticsearchInterfaceImpl) DeleteFilesBatch(rctx request.CTX, endTime func checkMaxVersion(client *elastic.TypedClient, cfg *model.Config) (string, int, *model.AppError) { resp, err := client.API.Core.Info().Do(context.Background()) if err != nil { - return "", 0, model.NewAppError("Elasticsearch.checkMaxVersion", "ent.elasticsearch.start.get_server_version.app_error", nil, "", http.StatusInternalServerError).Wrap(err) + return "", 0, model.NewAppError("Elasticsearch.checkMaxVersion", "ent.elasticsearch.start.get_server_version.app_error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError).Wrap(err) } major, _, _, esErr := common.GetVersionComponents(resp.Version.Int) if esErr != nil { - return "", 0, model.NewAppError("Elasticsearch.checkMaxVersion", "ent.elasticsearch.start.parse_server_version.app_error", nil, "", http.StatusInternalServerError).Wrap(err) + return "", 0, model.NewAppError("Elasticsearch.checkMaxVersion", "ent.elasticsearch.start.parse_server_version.app_error", map[string]any{"Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusInternalServerError).Wrap(err) } if major > elasticsearchMaxVersion { - return "", 0, model.NewAppError("Elasticsearch.checkMaxVersion", "ent.elasticsearch.max_version.app_error", map[string]any{"Version": major, "MaxVersion": elasticsearchMaxVersion}, "", http.StatusBadRequest) + return "", 0, model.NewAppError("Elasticsearch.checkMaxVersion", "ent.elasticsearch.max_version.app_error", map[string]any{"Version": major, "MaxVersion": elasticsearchMaxVersion, "Backend": model.ElasticsearchSettingsESBackend}, "", http.StatusBadRequest) } return resp.Version.Int, major, nil } diff --git a/server/enterprise/elasticsearch/elasticsearch/indexing_job.go b/server/enterprise/elasticsearch/elasticsearch/indexing_job.go index 276c9f7871f..45107e518fa 100644 --- a/server/enterprise/elasticsearch/elasticsearch/indexing_job.go +++ b/server/enterprise/elasticsearch/elasticsearch/indexing_job.go @@ -35,7 +35,7 @@ func (esi *ElasticsearchIndexerInterfaceImpl) MakeWorker() model.Worker { return nil } - return common.NewIndexerWorker(workerName, + return common.NewIndexerWorker(workerName, model.ElasticsearchSettingsESBackend, esi.Server.Jobs, logger, esi.Server.Platform().FileBackend(), esi.Server.License, diff --git a/server/enterprise/elasticsearch/opensearch/indexing_job.go b/server/enterprise/elasticsearch/opensearch/indexing_job.go index 823f81cf154..91a00180ec4 100644 --- a/server/enterprise/elasticsearch/opensearch/indexing_job.go +++ b/server/enterprise/elasticsearch/opensearch/indexing_job.go @@ -35,7 +35,7 @@ func (esi *OpensearchIndexerInterfaceImpl) MakeWorker() model.Worker { return nil } - return common.NewIndexerWorker(workerName, + return common.NewIndexerWorker(workerName, model.ElasticsearchSettingsOSBackend, esi.Server.Jobs, logger, esi.Server.Platform().FileBackend(), diff --git a/server/enterprise/elasticsearch/opensearch/opensearch.go b/server/enterprise/elasticsearch/opensearch/opensearch.go index 5f7a265e8e5..6473246477a 100644 --- a/server/enterprise/elasticsearch/opensearch/opensearch.go +++ b/server/enterprise/elasticsearch/opensearch/opensearch.go @@ -227,7 +227,7 @@ func (os *OpensearchInterfaceImpl) Stop() *model.AppError { defer os.mutex.Unlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.start", "ent.elasticsearch.stop.already_stopped.app_error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.start", "ent.elasticsearch.stop.already_stopped.app_error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } // Flushing any pending requests @@ -261,7 +261,7 @@ func (os *OpensearchInterfaceImpl) IndexPost(post *model.Post, teamId string) *m defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.IndexPost", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.IndexPost", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } indexName := common.BuildPostIndexName(*os.Platform.Config().ElasticsearchSettings.AggregatePostsAfterDays, @@ -331,7 +331,7 @@ func (os *OpensearchInterfaceImpl) SearchPosts(channels model.ChannelList, searc defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return []string{}, nil, model.NewAppError("Opensearch.SearchPosts", "ent.elasticsearch.search_posts.disabled", nil, "", http.StatusInternalServerError) + return []string{}, nil, model.NewAppError("Opensearch.SearchPosts", "ent.elasticsearch.search_posts.disabled", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } var channelIds []string @@ -689,7 +689,7 @@ func (os *OpensearchInterfaceImpl) DeletePost(post *model.Post) *model.AppError defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeletePost", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeletePost", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } // This is racy with index aggregation, but since the posts are verified in the database when returning search @@ -711,7 +711,7 @@ func (os *OpensearchInterfaceImpl) DeleteChannelPosts(rctx request.CTX, channelI defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteChannelPosts", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteChannelPosts", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } postIndexes, err := os.getPostIndexNames() @@ -752,7 +752,7 @@ func (os *OpensearchInterfaceImpl) DeleteUserPosts(rctx request.CTX, userID stri defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteUserPosts", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteUserPosts", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } postIndexes, err := os.getPostIndexNames() @@ -818,7 +818,7 @@ func (os *OpensearchInterfaceImpl) IndexChannel(rctx request.CTX, channel *model defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.IndexChannel", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.IndexChannel", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } indexName := *os.Platform.Config().ElasticsearchSettings.IndexPrefix + common.IndexBaseChannels @@ -865,7 +865,7 @@ func (os *OpensearchInterfaceImpl) SearchChannels(teamId, userID string, term st defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return []string{}, model.NewAppError("Opensearch.SearchChannels", "ent.elasticsearch.search_channels.disabled", nil, "", http.StatusInternalServerError) + return []string{}, model.NewAppError("Opensearch.SearchChannels", "ent.elasticsearch.search_channels.disabled", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*os.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -964,7 +964,7 @@ func (os *OpensearchInterfaceImpl) DeleteChannel(channel *model.Channel) *model. defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteChannel", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteChannel", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } var err error @@ -997,7 +997,7 @@ func (os *OpensearchInterfaceImpl) IndexUser(rctx request.CTX, user *model.User, defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.IndexUser", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.IndexUser", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } indexName := *os.Platform.Config().ElasticsearchSettings.IndexPrefix + common.IndexBaseUsers @@ -1045,7 +1045,7 @@ func (os *OpensearchInterfaceImpl) autocompleteUsers(contextCategory string, cat defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return nil, model.NewAppError("Opensearch.autocompleteUsers", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return nil, model.NewAppError("Opensearch.autocompleteUsers", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*os.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1164,7 +1164,7 @@ func (os *OpensearchInterfaceImpl) autocompleteUsersNotInChannel(teamId, channel defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return nil, model.NewAppError("Opensearch.autocompleteUsersNotInChannel", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return nil, model.NewAppError("Opensearch.autocompleteUsersNotInChannel", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*os.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1333,7 +1333,7 @@ func (os *OpensearchInterfaceImpl) DeleteUser(user *model.User) *model.AppError defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteUser", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteUser", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } var err error @@ -1367,7 +1367,7 @@ func (os *OpensearchInterfaceImpl) TestConfig(rctx request.CTX, cfg *model.Confi } if !*cfg.ElasticsearchSettings.EnableIndexing { - return model.NewAppError("Opensearch.TestConfig", "ent.elasticsearch.test_config.indexing_disabled.error", nil, "", http.StatusNotImplemented) + return model.NewAppError("Opensearch.TestConfig", "ent.elasticsearch.test_config.indexing_disabled.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusNotImplemented) } client, appErr := createClient(rctx.Logger(), cfg, os.Platform.FileBackend(), true) @@ -1402,7 +1402,7 @@ func (os *OpensearchInterfaceImpl) PurgeIndexes(rctx request.CTX) *model.AppErro } if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.PurgeIndexes", "ent.elasticsearch.generic.disabled", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.PurgeIndexes", "ent.elasticsearch.generic.disabled", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } indexPrefix := *os.Platform.Config().ElasticsearchSettings.IndexPrefix @@ -1455,7 +1455,7 @@ func (os *OpensearchInterfaceImpl) PurgeIndexList(rctx request.CTX, indexes []st } if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.PurgeIndexList", "ent.elasticsearch.generic.disabled", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.PurgeIndexList", "ent.elasticsearch.generic.disabled", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } indexPrefix := *os.Platform.Config().ElasticsearchSettings.IndexPrefix @@ -1526,7 +1526,7 @@ func (os *OpensearchInterfaceImpl) DataRetentionDeleteIndexes(rctx request.CTX, } if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DataRetentionDeleteIndexes", "ent.elasticsearch.generic.disabled", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DataRetentionDeleteIndexes", "ent.elasticsearch.generic.disabled", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } ctx := context.Background() @@ -1559,7 +1559,7 @@ func (os *OpensearchInterfaceImpl) IndexFile(file *model.FileInfo, channelId str defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.IndexFile", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.IndexFile", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } indexName := *os.Platform.Config().ElasticsearchSettings.IndexPrefix + common.IndexBaseFiles @@ -1606,7 +1606,7 @@ func (os *OpensearchInterfaceImpl) SearchFiles(channels model.ChannelList, searc defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return []string{}, model.NewAppError("Opensearch.SearchPosts", "ent.elasticsearch.search_files.disabled", nil, "", http.StatusInternalServerError) + return []string{}, model.NewAppError("Opensearch.SearchPosts", "ent.elasticsearch.search_files.disabled", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } var channelIds []string @@ -1853,7 +1853,7 @@ func (os *OpensearchInterfaceImpl) DeleteFile(fileID string) *model.AppError { defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteFile", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteFile", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } var err error @@ -1886,7 +1886,7 @@ func (os *OpensearchInterfaceImpl) DeleteUserFiles(rctx request.CTX, userID stri defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*os.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1923,7 +1923,7 @@ func (os *OpensearchInterfaceImpl) DeletePostFiles(rctx request.CTX, postID stri defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*os.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -1959,7 +1959,7 @@ func (os *OpensearchInterfaceImpl) DeleteFilesBatch(rctx request.CTX, endTime, l defer os.mutex.RUnlock() if atomic.LoadInt32(&os.ready) == 0 { - return model.NewAppError("Opensearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", nil, "", http.StatusInternalServerError) + return model.NewAppError("Opensearch.DeleteFilesBatch", "ent.elasticsearch.not_started.error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*os.Platform.Config().ElasticsearchSettings.RequestTimeoutSeconds)*time.Second) @@ -2004,16 +2004,16 @@ func (os *OpensearchInterfaceImpl) DeleteFilesBatch(rctx request.CTX, endTime, l func checkMaxVersion(client *opensearchapi.Client) (string, int, *model.AppError) { resp, err := client.Info(context.Background(), nil) if err != nil { - return "", 0, model.NewAppError("Opensearch.checkMaxVersion", "ent.elasticsearch.start.get_server_version.app_error", nil, "", http.StatusInternalServerError).Wrap(err) + return "", 0, model.NewAppError("Opensearch.checkMaxVersion", "ent.elasticsearch.start.get_server_version.app_error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError).Wrap(err) } major, _, _, esErr := common.GetVersionComponents(resp.Version.Number) if esErr != nil { - return "", 0, model.NewAppError("Opensearch.checkMaxVersion", "ent.elasticsearch.start.parse_server_version.app_error", nil, "", http.StatusInternalServerError).Wrap(err) + return "", 0, model.NewAppError("Opensearch.checkMaxVersion", "ent.elasticsearch.start.parse_server_version.app_error", map[string]any{"Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusInternalServerError).Wrap(err) } if major > opensearchMaxVersion { - return "", 0, model.NewAppError("Opensearch.checkMaxVersion", "ent.elasticsearch.max_version.app_error", map[string]any{"Version": major, "MaxVersion": opensearchMaxVersion}, "", http.StatusBadRequest) + return "", 0, model.NewAppError("Opensearch.checkMaxVersion", "ent.elasticsearch.max_version.app_error", map[string]any{"Version": major, "MaxVersion": opensearchMaxVersion, "Backend": model.ElasticsearchSettingsOSBackend}, "", http.StatusBadRequest) } return resp.Version.Number, major, nil } diff --git a/server/i18n/en.json b/server/i18n/en.json index c1ac8733273..09ef2102698 100644 --- a/server/i18n/en.json +++ b/server/i18n/en.json @@ -7786,7 +7786,7 @@ }, { "id": "ent.elasticsearch.generic.disabled", - "translation": "Elasticsearch search is not enabled on this server" + "translation": "{{.Backend}} search is not enabled on this server" }, { "id": "ent.elasticsearch.getAllChannelMembers.error", @@ -7822,11 +7822,11 @@ }, { "id": "ent.elasticsearch.indexer.do_job.parse_end_time.error", - "translation": "Elasticsearch indexing worker failed to parse the end time" + "translation": "{{.Backend}} indexing worker failed to parse the end time" }, { "id": "ent.elasticsearch.indexer.do_job.parse_start_time.error", - "translation": "Elasticsearch indexing worker failed to parse the start time" + "translation": "{{.Backend}} indexing worker failed to parse the start time" }, { "id": "ent.elasticsearch.indexer.index_batch.nothing_left_to_index.error", @@ -7834,11 +7834,11 @@ }, { "id": "ent.elasticsearch.max_version.app_error", - "translation": "Elasticsearch version {{.Version}} is higher than max supported version of {{.MaxVersion}}" + "translation": "{{.Backend}} version {{.Version}} is higher than max supported version of {{.MaxVersion}}" }, { "id": "ent.elasticsearch.not_started.error", - "translation": "Elasticsearch is not started" + "translation": "{{.Backend}} is not started" }, { "id": "ent.elasticsearch.post.get_files_batch_for_indexing.error", @@ -7866,7 +7866,7 @@ }, { "id": "ent.elasticsearch.search_channels.disabled", - "translation": "Elasticsearch searching is disabled on this server" + "translation": "{{.Backend}} searching is disabled on this server" }, { "id": "ent.elasticsearch.search_channels.search_failed", @@ -7878,7 +7878,7 @@ }, { "id": "ent.elasticsearch.search_files.disabled", - "translation": "Elasticsearch files searching is disabled on this server" + "translation": "{{.Backend}} files searching is disabled on this server" }, { "id": "ent.elasticsearch.search_files.search_failed", @@ -7890,7 +7890,7 @@ }, { "id": "ent.elasticsearch.search_posts.disabled", - "translation": "Elasticsearch searching is disabled on this server" + "translation": "{{.Backend}} searching is disabled on this server" }, { "id": "ent.elasticsearch.search_posts.parse_matches_failed", @@ -7914,19 +7914,19 @@ }, { "id": "ent.elasticsearch.start.get_server_version.app_error", - "translation": "Failed to get Elasticsearch server version." + "translation": "Failed to get {{.Backend}} server version." }, { "id": "ent.elasticsearch.start.parse_server_version.app_error", - "translation": "Failed to parse Elasticsearch server version." + "translation": "Failed to parse {{.Backend}} server version." }, { "id": "ent.elasticsearch.stop.already_stopped.app_error", - "translation": "Elasticsearch is already stopped." + "translation": "{{.Backend}} is already stopped." }, { "id": "ent.elasticsearch.test_config.indexing_disabled.error", - "translation": "Elasticsearch is disabled." + "translation": "{{.Backend}} is disabled." }, { "id": "ent.elasticsearch.test_config.license.error", @@ -8854,23 +8854,23 @@ }, { "id": "model.config.is_valid.elastic_search.aggregate_posts_after_days.app_error", - "translation": "Elasticsearch AggregatePostsAfterDays setting must be a number greater than or equal to 1." + "translation": "Search AggregatePostsAfterDays setting must be a number greater than or equal to 1." }, { "id": "model.config.is_valid.elastic_search.bulk_indexing_batch_size.app_error", - "translation": "Elasticsearch Bulk Indexing Batch Size must be at least {{.BatchSize}}." + "translation": "Search Bulk Indexing Batch Size must be at least {{.BatchSize}}." }, { "id": "model.config.is_valid.elastic_search.connection_url.app_error", - "translation": "Elasticsearch ConnectionUrl setting must be provided when Elasticsearch indexing is enabled." + "translation": "Search ConnectionUrl setting must be provided when indexing is enabled." }, { "id": "model.config.is_valid.elastic_search.enable_autocomplete.app_error", - "translation": "Elasticsearch EnableIndexing setting must be set to true when Elasticsearch EnableAutocomplete is set to true" + "translation": "Search EnableIndexing setting must be set to true when Elasticsearch EnableAutocomplete is set to true" }, { "id": "model.config.is_valid.elastic_search.enable_searching.app_error", - "translation": "Elasticsearch EnableIndexing setting must be set to true when Elasticsearch EnableSearching is set to true" + "translation": "Search EnableIndexing setting must be set to true when Elasticsearch EnableSearching is set to true" }, { "id": "model.config.is_valid.elastic_search.ignored_indexes_dash_prefix.app_error", @@ -8882,15 +8882,15 @@ }, { "id": "model.config.is_valid.elastic_search.live_indexing_batch_size.app_error", - "translation": "Elasticsearch Live Indexing Batch Size must be at least 1." + "translation": "Search Live Indexing Batch Size must be at least 1." }, { "id": "model.config.is_valid.elastic_search.posts_aggregator_job_start_time.app_error", - "translation": "Elasticsearch PostsAggregatorJobStartTime setting must be a time in the format \"hh:mm\"." + "translation": "Search PostsAggregatorJobStartTime setting must be a time in the format \"hh:mm\"." }, { "id": "model.config.is_valid.elastic_search.request_timeout_seconds.app_error", - "translation": "Elasticsearch Request Timeout must be at least 1 second." + "translation": "Search Request Timeout must be at least 1 second." }, { "id": "model.config.is_valid.email_batching_buffer_size.app_error",