diff --git a/chaoscenter/authentication/api/utils/project_utils.go b/chaoscenter/authentication/api/utils/project_utils.go index 9d77f3a2704..f1fbf4eaf9b 100644 --- a/chaoscenter/authentication/api/utils/project_utils.go +++ b/chaoscenter/authentication/api/utils/project_utils.go @@ -184,25 +184,28 @@ func CreateSortStage(sort *entities.SortInput) bson.D { } } -func CreatePaginationStage(pagination *entities.Pagination) []bson.D { +func CreatePaginationStage(pagination *entities.Pagination) ([]bson.D, int, int) { var stages []bson.D + skip := 0 + limit := 10 + if pagination != nil { page := pagination.Page - limit := pagination.Limit + limit = pagination.Limit + // upper limit of 50 to prevent exceeding max limit 16mb - if pagination.Limit > 50 { + if limit > 50 { limit = 50 } - stages = append(stages, bson.D{ - {"$skip", page * limit}, - }) - stages = append(stages, bson.D{ - {"$limit", limit}, - }) - } else { - stages = append(stages, bson.D{ - {"$limit", 10}, - }) + skip = page * limit } - return stages + + stages = append(stages, bson.D{ + {"$skip", skip}, + }) + stages = append(stages, bson.D{{ + "$limit", limit}, + }) + + return stages, skip, limit } diff --git a/chaoscenter/authentication/pkg/project/repository.go b/chaoscenter/authentication/pkg/project/repository.go index 8f170b8967f..75f3b0c4a20 100644 --- a/chaoscenter/authentication/pkg/project/repository.go +++ b/chaoscenter/authentication/pkg/project/repository.go @@ -92,19 +92,29 @@ func (r repository) GetProjectsByUserID(request *entities.ListProjectRequest) (* pipeline = append(pipeline, sortStage) } - // Pagination stages - paginationStages := project_utils.CreatePaginationStage(request.Pagination) - - // Facet stage to count total projects and paginate results - facetStage := bson.D{ - {"$facet", bson.D{ - {"totalCount", bson.A{ - bson.D{{"$count", "totalNumberOfProjects"}}, - }}, - {"projects", append(mongo.Pipeline{}, paginationStages...)}, + // Pagination stage + _, skip, limit := project_utils.CreatePaginationStage(request.Pagination) + + // Count total project and get top-level document to array + countStage := bson.D{ + {"$group", bson.D{ + {"_id", nil}, + {"totalNumberOfProjects", bson.D{{"$sum", 1}}}, + {"projects", bson.D{{"$push", "$$ROOT"}}}, }}, } - pipeline = append(pipeline, facetStage) + + // Paging results + pagingStage := bson.D{ + {"$project", bson.D{ + {"_id", 0}, + {"totalNumberOfProjects", 1}, + {"projects", bson.D{ + {"$slice", bson.A{"$projects", skip, limit}}, + }}, + }}} + + pipeline = append(pipeline, countStage, pagingStage) // Execute the aggregate pipeline cursor, err := r.Collection.Aggregate(ctx, pipeline) @@ -115,10 +125,8 @@ func (r repository) GetProjectsByUserID(request *entities.ListProjectRequest) (* // Extract results var result struct { - TotalCount []struct { - TotalNumberOfProjects int64 `bson:"totalNumberOfProjects"` - } `bson:"totalCount"` - Projects []*entities.Project `bson:"projects"` + TotalNumberOfProjects int64 `bson:"totalNumberOfProjects"` + Projects []*entities.Project `bson:"projects"` } if cursor.Next(ctx) { @@ -128,8 +136,8 @@ func (r repository) GetProjectsByUserID(request *entities.ListProjectRequest) (* } var totalNumberOfProjects int64 - if len(result.TotalCount) > 0 { - totalNumberOfProjects = result.TotalCount[0].TotalNumberOfProjects + if result.TotalNumberOfProjects > 0 { + totalNumberOfProjects = result.TotalNumberOfProjects } else { zero := int64(0) return &entities.ListProjectResponse{ diff --git a/chaoscenter/graphql/server/pkg/chaos_experiment/handler/handler.go b/chaoscenter/graphql/server/pkg/chaos_experiment/handler/handler.go index c2ff92baa35..604b86d85f5 100644 --- a/chaoscenter/graphql/server/pkg/chaos_experiment/handler/handler.go +++ b/chaoscenter/graphql/server/pkg/chaos_experiment/handler/handler.go @@ -15,6 +15,7 @@ import ( chaosTypes "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_experiment/ops" dbChaosInfra "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure" + "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/common" dbSchemaProbe "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/probe" @@ -707,37 +708,30 @@ func (c *ChaosExperimentHandler) ListExperiment(projectID string, request model. } // Pagination or adding a default limit of 15 if pagination not provided - paginatedExperiments := bson.A{ - sortStage, - } - - if request.Pagination != nil { - paginationSkipStage := bson.D{ - {"$skip", request.Pagination.Page * request.Pagination.Limit}, - } - paginationLimitStage := bson.D{ - {"$limit", request.Pagination.Limit}, - } + _, skip, limit := common.CreatePaginationStage(request.Pagination) - paginatedExperiments = append(paginatedExperiments, paginationSkipStage, paginationLimitStage) - } else { - limitStage := bson.D{ - {"$limit", 15}, - } + pipeline = append(pipeline, sortStage) - paginatedExperiments = append(paginatedExperiments, limitStage) + // Count total project and get top-level document to array + countStage := bson.D{ + {"$group", bson.D{ + {"_id", nil}, + {"total_filtered_experiments", bson.D{{"$sum", 1}}}, + {"scheduled_experiments", bson.D{{"$push", "$$ROOT"}}}, + }}, } - // Add two stages where we first count the number of filtered workflow and then paginate the results - facetStage := bson.D{ - {"$facet", bson.D{ - {"total_filtered_experiments", bson.A{ - bson.D{{"$count", "count"}}, + // Paging results + pagingStage := bson.D{ + {"$project", bson.D{ + {"_id", 0}, + {"total_filtered_experiments", 1}, + {"scheduled_experiments", bson.D{ + {"$slice", bson.A{"$scheduled_experiments", skip, limit}}, }}, - {"scheduled_experiments", paginatedExperiments}, - }}, - } - pipeline = append(pipeline, facetStage) + }}} + + pipeline = append(pipeline, countStage, pagingStage) // Call aggregation on pipeline workflowsCursor, err := c.chaosExperimentOperator.GetAggregateExperiments(pipeline) @@ -750,7 +744,7 @@ func (c *ChaosExperimentHandler) ListExperiment(projectID string, request model. workflows []dbChaosExperiment.AggregatedExperiments ) - if err = workflowsCursor.All(context.Background(), &workflows); err != nil || len(workflows) == 0 { + if err = workflowsCursor.All(context.Background(), &workflows); err != nil { return &model.ListExperimentResponse{ TotalNoOfExperiments: 0, Experiments: result, @@ -847,8 +841,8 @@ func (c *ChaosExperimentHandler) ListExperiment(projectID string, request model. } totalFilteredExperimentsCounter := 0 - if len(workflows) > 0 && len(workflows[0].TotalFilteredExperiments) > 0 { - totalFilteredExperimentsCounter = workflows[0].TotalFilteredExperiments[0].Count + if len(workflows) > 0 && workflows[0].TotalFilteredExperiments > 0 { + totalFilteredExperimentsCounter = workflows[0].TotalFilteredExperiments } output := model.ListExperimentResponse{ @@ -898,66 +892,55 @@ func (c *ChaosExperimentHandler) getWfRunDetails(workflowIDs []string) (map[stri } pipeline = append(pipeline, sortStage) - var workflowRunPipeline mongo.Pipeline - // Get details of the latest wf run - wfRunDetails := bson.D{ + // Get details of the latest wf run and total_experiment_runs and avg_resiliency_score + groupStage := bson.D{ {"$group", bson.D{ {"_id", "$experiment_id"}, - - // Fetch the latest workflowRun details - {"experiment_run_details", bson.D{ - { - "$first", "$$ROOT", - }, - }}, - }}, - } - workflowRunPipeline = append(workflowRunPipeline, wfRunDetails) - - var resScorePipeline mongo.Pipeline - // Filtering out running workflow runs to calculate average resiliency score - filterRunningWfRuns := bson.D{ - { - "$match", - bson.D{{ - "$and", bson.A{ - bson.D{{ - "experiment_id", bson.D{{"$in", workflowIDs}}, - }}, - bson.D{{"phase", bson.D{ - {"$ne", "Running"}, - }}}, - }, + {"latest_experiment_run", bson.D{ + {"$first", bson.D{ + {"_id", "$experiment_id"}, + {"experiment_run_details", "$$ROOT"}, + }}, }}, - }, - } - resScorePipeline = append(resScorePipeline, filterRunningWfRuns) - //// Calculating average resiliency score - avgResiliencyScore := bson.D{ - {"$group", bson.D{ - {"_id", "$experiment_id"}, - - // Count all workflowRuns in a workflow {"total_experiment_runs", bson.D{ - {"$sum", 1}, + {"$sum", bson.D{ + {"$cond", bson.A{ + bson.D{{"$ne", bson.A{"$phase", "Running"}}}, 1, 0}}, + }}, }}, - - // Calculate average {"avg_resiliency_score", bson.D{ - {"$avg", "$resiliency_score"}, + {"$avg", bson.D{ + {"$cond", bson.A{ + bson.D{{"$ne", bson.A{"$phase", "Running"}}}, "$resiliency_score", nil}}, + }}, }}, }}, } - resScorePipeline = append(resScorePipeline, avgResiliencyScore) - // Add two stages where we first calculate the avg resiliency score of filtered workflow runs and then fetch details of the latest workflow run - facetStage := bson.D{ - {"$facet", bson.D{ - {"avg_resiliency_score", resScorePipeline}, - {"latest_experiment_run", workflowRunPipeline}, + pipeline = append(pipeline, groupStage) + + // the latest workflow run is wrapped in array and avg_resiliency_score is formatted as object + finalProjectStage := bson.D{ + {"$project", bson.D{ + {"_id", 0}, + {"latest_experiment_run", bson.D{ + {"$cond", bson.A{ + bson.M{"$isArray": "$latest_experiment_run"}, + "$latest_experiment_run", + bson.A{"$latest_experiment_run"}, + }}, + }}, + {"avg_resiliency_score", bson.A{ + bson.D{ + {"_id", "$_id"}, + {"total_experiment_runs", "$total_experiment_runs"}, + {"avg_resiliency_score", "$avg_resiliency_score"}, + }, + }}, }}, } - pipeline = append(pipeline, facetStage) + + pipeline = append(pipeline, finalProjectStage) // Call aggregation on pipeline workflowsRunDetailCursor, err := c.chaosExperimentRunOperator.GetAggregateExperimentRuns(pipeline) if err != nil { @@ -1055,18 +1038,37 @@ func (c *ChaosExperimentHandler) GetExperimentStats(ctx context.Context, project {"is_removed", false}, }}, } - // Project experiment ID - projectstage := bson.D{ - {"$project", bson.D{ - {"experiment_id", 1}, - }}, + + pipeline = append(pipeline, matchIdentifierStage) + + // Groups to count total number of experiments and get experiments to array + groupByTotalCount := bson.D{ + { + "$group", bson.D{ + {"_id", nil}, + {"total_experiments", bson.D{ + {"$sum", 1}, + }}, + {"experiments", bson.D{ + {"$push", "$$ROOT"}, + }}, + }, + }, } + pipeline = append(pipeline, groupByTotalCount) + + unwindStage := bson.D{ + {"$unwind", "$experiments"}, + } + + pipeline = append(pipeline, unwindStage) + // fetchRunDetailsStage fetches experiment runs and calculates their avg resiliency score which have completed fetchRunDetailsStage := bson.D{ {"$lookup", bson.D{ {"from", "chaosExperimentRuns"}, - {"let", bson.D{{"expID", "$experiment_id"}}}, + {"let", bson.D{{"expID", "$experiments.experiment_id"}}}, {"pipeline", bson.A{ bson.D{ {"$match", bson.D{ @@ -1095,59 +1097,63 @@ func (c *ChaosExperimentHandler) GetExperimentStats(ctx context.Context, project }}, } - unwindStage := bson.D{ - {"$unwind", bson.D{ - {"path", "$avg_resiliency_score"}, - }}, + pipeline = append(pipeline, fetchRunDetailsStage) + + unwindResiliencySocreStage := bson.D{ + {"$unwind", "$avg_resiliency_score"}, } + pipeline = append(pipeline, unwindResiliencySocreStage) + // This stage buckets the number of experiments by avg resiliency score in the ranges of 0-39, 40-79, 80-100 - bucketByResScoreStage := bson.D{ - {"$bucket", bson.D{ - {"groupBy", "$avg_resiliency_score.avg"}, - {"boundaries", bson.A{0, 40, 80, 101}}, - {"default", 101}, - {"output", bson.D{ - {"count", bson.D{ - {"$sum", 1}, + groupByResiliencyScore := bson.D{ + {"$group", bson.D{ + {"_id", bson.D{ + {"$switch", bson.D{ + {"branches", bson.A{ + bson.D{ + {"case", bson.D{{"$lt", bson.A{"$avg_resiliency_score.avg", 40}}}}, + {"then", 0}, + }, + bson.D{ + {"case", bson.D{{"$lt", bson.A{"$avg_resiliency_score.avg", 80}}}}, + {"then", 40}, + }, + bson.D{ + {"case", bson.D{{"$lt", bson.A{"$avg_resiliency_score.avg", 101}}}}, + {"then", 80}, + }, + }}, + {"default", 101}, }}, }}, + {"total_experiments", bson.D{ + {"$first", "$total_experiments"}, + }}, + {"count", bson.D{{"$sum", 1}}}, }}, } - // Groups to count total number of experiments - groupByTotalCount := bson.D{ - { - "$group", bson.D{ - {"_id", nil}, - {"count", bson.D{ - {"$sum", 1}, - }}, - }, - }, - } + pipeline = append(pipeline, groupByResiliencyScore) - facetStage := bson.D{ - {"$facet", bson.D{ - {"total_experiments", bson.A{ - matchIdentifierStage, - groupByTotalCount, + projectStage := bson.D{ + {"$project", bson.D{ + {"_id", 0}, + {"by_resiliency_score", bson.A{ bson.D{ - {"$project", bson.D{ - {"_id", 0}, - }}, + {"_id", "$_id"}, + {"count", "$count"}, }, }}, - {"categorized_by_resiliency_score", bson.A{ - matchIdentifierStage, - projectstage, - fetchRunDetailsStage, - unwindStage, - bucketByResScoreStage, + {"total_experiments", bson.A{ + bson.D{ + {"count", "$total_experiments"}, + }, }}, }}, } - pipeline = append(pipeline, facetStage) + + pipeline = append(pipeline, projectStage) // Call aggregation on pipeline experimentCursor, err := c.chaosExperimentOperator.GetAggregateExperiments(pipeline) @@ -1157,7 +1163,7 @@ func (c *ChaosExperimentHandler) GetExperimentStats(ctx context.Context, project var res []dbChaosExperiment.AggregatedExperimentStats - if err = experimentCursor.All(context.Background(), &res); err != nil || len(res) == 0 { + if err = experimentCursor.All(context.Background(), &res); err != nil { return nil, errors.New("error decoding experiment details cursor: " + err.Error()) } @@ -1166,6 +1172,10 @@ func (c *ChaosExperimentHandler) GetExperimentStats(ctx context.Context, project TotalExpCategorizedByResiliencyScore: nil, } + if len(res) == 0 { + return result, nil + } + if len(res[0].TotalExperiments) > 0 { result.TotalExperiments = res[0].TotalExperiments[0].Count } diff --git a/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler.go b/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler.go index eddb54e4fb3..51b0d119ac1 100644 --- a/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler.go +++ b/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler.go @@ -12,6 +12,7 @@ import ( "time" "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/authorization" + "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/common" probeUtils "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/probe/utils" @@ -505,37 +506,29 @@ func (c *ChaosExperimentRunHandler) ListExperimentRun(projectID string, request pipeline = append(pipeline, fetchKubernetesInfraDetailsStage) // Pagination or adding a default limit of 15 if pagination not provided - paginatedExperiments := bson.A{ - sortStage, - } + _, skip, limit := common.CreatePaginationStage(request.Pagination) - if request.Pagination != nil { - paginationSkipStage := bson.D{ - {"$skip", request.Pagination.Page * request.Pagination.Limit}, - } - paginationLimitStage := bson.D{ - {"$limit", request.Pagination.Limit}, - } - - paginatedExperiments = append(paginatedExperiments, paginationSkipStage, paginationLimitStage) - } else { - limitStage := bson.D{ - {"$limit", 15}, - } + pipeline = append(pipeline, sortStage) - paginatedExperiments = append(paginatedExperiments, limitStage) + // Count total project and get top-level document to array + countStage := bson.D{ + {"$group", bson.D{ + {"_id", nil}, + {"total_filtered_experiment_runs", bson.D{{"$sum", 1}}}, + {"flattened_experiment_runs", bson.D{{"$push", "$$ROOT"}}}, + }}, } - // Add two stages where we first count the number of filtered workflow and then paginate the results - facetStage := bson.D{ - {"$facet", bson.D{ - {"total_filtered_experiment_runs", bson.A{ - bson.D{{"$count", "count"}}, + // Paging results + pagingStage := bson.D{ + {"$project", bson.D{ + {"total_filtered_experiment_runs", 1}, + {"flattened_experiment_runs", bson.D{ + {"$slice", bson.A{"$flattened_experiment_runs", skip, limit}}, }}, - {"flattened_experiment_runs", paginatedExperiments}, - }}, - } - pipeline = append(pipeline, facetStage) + }}} + + pipeline = append(pipeline, countStage, pagingStage) // Call aggregation on pipeline workflowsCursor, err := c.chaosExperimentRunOperator.GetAggregateExperimentRuns(pipeline) @@ -548,12 +541,13 @@ func (c *ChaosExperimentRunHandler) ListExperimentRun(projectID string, request workflows []dbChaosExperiment.AggregatedExperimentRuns ) - if err = workflowsCursor.All(context.Background(), &workflows); err != nil || len(workflows) == 0 { + if err = workflowsCursor.All(context.Background(), &workflows); err != nil { return &model.ListExperimentRunResponse{ TotalNoOfExperimentRuns: 0, ExperimentRuns: result, }, errors.New("error decoding experiment runs cursor: " + err.Error()) } + if len(workflows) == 0 { return &model.ListExperimentRunResponse{ TotalNoOfExperimentRuns: 0, @@ -637,8 +631,8 @@ func (c *ChaosExperimentRunHandler) ListExperimentRun(projectID string, request } totalFilteredExperimentRunsCounter := 0 - if len(workflows) > 0 && len(workflows[0].TotalFilteredExperimentRuns) > 0 { - totalFilteredExperimentRunsCounter = workflows[0].TotalFilteredExperimentRuns[0].Count + if len(workflows) > 0 && workflows[0].TotalFilteredExperimentRuns > 0 { + totalFilteredExperimentRunsCounter = workflows[0].TotalFilteredExperimentRuns } output := model.ListExperimentRunResponse{ diff --git a/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler_test.go b/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler_test.go index 6f08fa1f6cf..ec9b1f9c591 100644 --- a/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler_test.go +++ b/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler/handler_test.go @@ -262,11 +262,7 @@ func TestChaosExperimentRunHandler_ListExperimentRun(t *testing.T) { }, given: func() { findResult := []interface{}{bson.D{ - {Key: "total_filtered_experiment_runs", Value: []dbOperationsChaosExpRun.TotalFilteredData{ - { - Count: 1, - }, - }}, + {Key: "total_filtered_experiment_runs", Value: 1}, {Key: "flattened_experiment_runs", Value: []dbOperationsChaosExpRun.FlattenedExperimentRun{ { ExperimentDetails: []dbOperationsChaosExpRun.ExperimentDetails{ @@ -308,11 +304,7 @@ func TestChaosExperimentRunHandler_ListExperimentRun(t *testing.T) { }, given: func() { findResult := []interface{}{bson.D{ - {Key: "total_filtered_experiment_runs", Value: []dbOperationsChaosExpRun.TotalFilteredData{ - { - Count: 1, - }, - }}, + {Key: "total_filtered_experiment_runs", Value: 1}, {Key: "flattened_experiment_runs", Value: []dbOperationsChaosExpRun.FlattenedExperimentRun{ { ExperimentDetails: []dbOperationsChaosExpRun.ExperimentDetails{ @@ -354,11 +346,7 @@ func TestChaosExperimentRunHandler_ListExperimentRun(t *testing.T) { }, given: func() { findResult := []interface{}{bson.D{ - {Key: "total_filtered_experiment_runs", Value: []dbOperationsChaosExpRun.TotalFilteredData{ - { - Count: 1, - }, - }}, + {Key: "total_filtered_experiment_runs", Value: 1}, {Key: "flattened_experiment_runs", Value: []dbOperationsChaosExpRun.FlattenedExperimentRun{ { ExperimentDetails: []dbOperationsChaosExpRun.ExperimentDetails{ diff --git a/chaoscenter/graphql/server/pkg/chaos_infrastructure/service.go b/chaoscenter/graphql/server/pkg/chaos_infrastructure/service.go index 4548e081615..8076dc7a33f 100644 --- a/chaoscenter/graphql/server/pkg/chaos_infrastructure/service.go +++ b/chaoscenter/graphql/server/pkg/chaos_infrastructure/service.go @@ -14,6 +14,7 @@ import ( "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/authorization" store "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/data-store" + "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/common" "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/config" dbEnvironments "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/environments" "github.com/sirupsen/logrus" @@ -641,34 +642,29 @@ func (in *infraService) ListInfras(projectID string, request *model.ListInfraReq pipeline = append(pipeline, fetchExperimentDetailsStage) - //Pagination - paginatedInfras := bson.A{ - fetchExperimentDetailsStage, - } - - if request != nil { - if request.Pagination != nil { - paginationSkipStage := bson.D{ - {"$skip", request.Pagination.Page * request.Pagination.Limit}, - } - paginationLimitStage := bson.D{ - {"$limit", request.Pagination.Limit}, - } + // Pagination or adding a default limit of 15 if pagination not provided + _, skip, limit := common.CreatePaginationStage(request.Pagination) - paginatedInfras = append(paginatedInfras, paginationSkipStage, paginationLimitStage) - } + // Count total project and get top-level document to array + countStage := bson.D{ + {"$group", bson.D{ + {"_id", nil}, + {"total_filtered_infras", bson.D{{"$sum", 1}}}, + {"infras", bson.D{{"$push", "$$ROOT"}}}, + }}, } - // Add two stages where we first count the number of filtered workflow and then paginate the results - facetStage := bson.D{ - {"$facet", bson.D{ - {"total_filtered_infras", bson.A{ - bson.D{{"$count", "count"}}, + // Paging results + pagingStage := bson.D{ + {"$project", bson.D{ + {"_id", 0}, + {"total_filtered_infras", 1}, + {"infras", bson.D{ + {"$slice", bson.A{"$infras", skip, limit}}, }}, - {"infras", paginatedInfras}, - }}, - } - pipeline = append(pipeline, facetStage) + }}} + + pipeline = append(pipeline, countStage, pagingStage) // Call aggregation on pipeline infraCursor, err := in.infraOperator.GetAggregateInfras(pipeline) @@ -775,8 +771,8 @@ func (in *infraService) ListInfras(projectID string, request *model.ListInfraReq } totalFilteredInfrasCounter := 0 - if len(infras) > 0 && len(infras[0].TotalFilteredInfras) > 0 { - totalFilteredInfrasCounter = infras[0].TotalFilteredInfras[0].Count + if len(infras) > 0 && infras[0].TotalFilteredInfras > 0 { + totalFilteredInfrasCounter = infras[0].TotalFilteredInfras } output := model.ListInfraResponse{ @@ -813,43 +809,43 @@ func (in *infraService) GetInfraStats(ctx context.Context, projectID string) (*m }}, } - // Group by infra status and count their total number by each group - groupByInfraStatusStage := bson.D{ - { - "$group", bson.D{ - {"_id", "$is_active"}, - {"count", bson.D{ - {"$sum", 1}, - }}, - }, - }, - } + // Group by infra status, confirmed stage and count their total number by each group + groupByStage := bson.D{ + {"$group", bson.D{ + {"_id", nil}, - // Group by infra confirmed stage and count their total number by each group - groupByInfraConfirmedStage := bson.D{ - { - "$group", bson.D{ - {"_id", "$is_infra_confirmed"}, - {"count", bson.D{ - {"$sum", 1}, + // Count for active status + {"total_active_infras", bson.D{ + {"$sum", bson.D{ + {"$cond", bson.A{ + bson.D{{"$eq", bson.A{"$is_active", true}}}, 1, 0}}, }}, - }, - }, - } - - facetStage := bson.D{ - {"$facet", bson.D{ + }}, + {"total_not_active_infras", bson.D{ + {"$sum", bson.D{ + {"$cond", bson.A{ + bson.D{{"$eq", bson.A{"$is_active", false}}}, 1, 0}}, + }}, + }}, - {"total_active_infras", bson.A{ - matchIdentifierStage, groupByInfraStatusStage, + // Count for confirmed status + {"total_confirmed_infras", bson.D{ + {"$sum", bson.D{ + {"$cond", bson.A{ + bson.D{{"$eq", bson.A{"$is_infra_confirmed", true}}}, 1, 0}}, + }}, }}, - {"total_confirmed_infras", bson.A{ - matchIdentifierStage, groupByInfraConfirmedStage, + + {"total_not_confirmed_infras", bson.D{ + {"$sum", bson.D{ + {"$cond", bson.A{ + bson.D{{"$eq", bson.A{"$is_infra_confirmed", true}}}, 1, 0}}, + }}, }}, }}, } - pipeline = append(pipeline, facetStage) + pipeline = append(pipeline, matchIdentifierStage, groupByStage) // Call aggregation on pipeline infraCursor, err := in.infraOperator.GetAggregateInfras(pipeline) @@ -862,30 +858,12 @@ func (in *infraService) GetInfraStats(ctx context.Context, projectID string) (*m return nil, err } - stateMap := map[bool]int{ - false: 0, - true: 0, - } - - infraConfirmedMap := map[bool]int{ - false: 0, - true: 0, - } - - for _, data := range res[0].TotalConfirmedInfrastructures { - infraConfirmedMap[data.Id] = data.Count - } - - for _, data := range res[0].TotalActiveInfrastructure { - stateMap[data.Id] = data.Count - } - return &model.GetInfraStatsResponse{ - TotalInfrastructures: infraConfirmedMap[true] + infraConfirmedMap[false], - TotalActiveInfrastructure: stateMap[true], - TotalInactiveInfrastructures: stateMap[false], - TotalConfirmedInfrastructure: infraConfirmedMap[true], - TotalNonConfirmedInfrastructures: infraConfirmedMap[false], + TotalInfrastructures: res[0].TotalActiveInfrastructure + res[0].TotalNotActiveInfrastructure, + TotalActiveInfrastructure: res[0].TotalActiveInfrastructure, + TotalInactiveInfrastructures: res[0].TotalNotActiveInfrastructure, + TotalConfirmedInfrastructure: res[0].TotalConfirmedInfrastructures, + TotalNonConfirmedInfrastructures: res[0].TotalNotConfirmedInfrastructures, }, nil } diff --git a/chaoscenter/graphql/server/pkg/chaoshub/service.go b/chaoscenter/graphql/server/pkg/chaoshub/service.go index 2c933363179..49a5ff1df0c 100644 --- a/chaoscenter/graphql/server/pkg/chaoshub/service.go +++ b/chaoscenter/graphql/server/pkg/chaoshub/service.go @@ -918,16 +918,8 @@ func (c *chaosHubService) GetChaosHubStats(ctx context.Context, projectID string }}, } - facetStage := bson.D{ - {"$facet", bson.D{ - {"total_chaos_hubs", bson.A{ - matchIdentifierStage, - bson.D{{"$count", "count"}}, - }}, - }}, - } - - pipeline = append(pipeline, facetStage) + countStage := bson.D{{"$count", "total_chaos_hubs"}} + pipeline = append(pipeline, matchIdentifierStage, countStage) // Call aggregation on pipeline hubCursor, err := c.chaosHubOperator.GetAggregateChaosHubs(ctx, pipeline) if err != nil { @@ -935,14 +927,14 @@ func (c *chaosHubService) GetChaosHubStats(ctx context.Context, projectID string } var res []dbSchemaChaosHub.AggregatedChaosHubStats - if err = hubCursor.All(ctx, &res); err != nil || len(res) == 0 || len(res[0].TotalChaosHubs) == 0 { + if err = hubCursor.All(ctx, &res); err != nil || len(res) == 0 || res[0].TotalChaosHubs == 0 { return &model.GetChaosHubStatsResponse{ TotalChaosHubs: 1, }, err } return &model.GetChaosHubStatsResponse{ - TotalChaosHubs: res[0].TotalChaosHubs[0].Count + 1, + TotalChaosHubs: res[0].TotalChaosHubs + 1, }, nil } diff --git a/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment/schema.go b/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment/schema.go index f79637995da..f5ee42bbc02 100644 --- a/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment/schema.go +++ b/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment/schema.go @@ -109,7 +109,7 @@ type FaultEventMetadata struct { } type AggregatedExperimentRuns struct { - TotalFilteredExperimentRuns []TotalFilteredData `bson:"total_filtered_experiment_runs"` + TotalFilteredExperimentRuns int `bson:"total_filtered_experiment_runs"` FlattenedExperimentRuns []FlattenedExperimentRun `bson:"flattened_experiment_runs"` } @@ -151,7 +151,7 @@ type ExperimentDetails struct { } type AggregatedExperiments struct { - TotalFilteredExperiments []TotalFilteredData `bson:"total_filtered_experiments"` + TotalFilteredExperiments int `bson:"total_filtered_experiments"` ScheduledExperiments []ChaosExperimentsWithRunDetails `bson:"scheduled_experiments"` } diff --git a/chaoscenter/graphql/server/pkg/database/mongodb/chaos_hub/schema.go b/chaoscenter/graphql/server/pkg/database/mongodb/chaos_hub/schema.go index 7e8205f1192..5b869c38172 100644 --- a/chaoscenter/graphql/server/pkg/database/mongodb/chaos_hub/schema.go +++ b/chaoscenter/graphql/server/pkg/database/mongodb/chaos_hub/schema.go @@ -57,5 +57,5 @@ type TotalCount struct { } type AggregatedChaosHubStats struct { - TotalChaosHubs []TotalCount `bson:"total_chaos_hubs"` + TotalChaosHubs int `bson:"total_chaos_hubs"` } diff --git a/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure/schema.go b/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure/schema.go index a8413841de4..eea8c5c7a13 100644 --- a/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure/schema.go +++ b/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure/schema.go @@ -67,7 +67,7 @@ type AggregatedGetInfras struct { } type AggregatedInfras struct { - TotalFilteredInfras []TotalFilteredData `bson:"total_filtered_infras"` + TotalFilteredInfras int `bson:"total_filtered_infras"` Infras []ChaosInfraDetails `bson:"infras"` } @@ -98,6 +98,8 @@ type TotalCount struct { } type AggregatedInfraStats struct { - TotalActiveInfrastructure []TotalCount `bson:"total_active_infras"` - TotalConfirmedInfrastructures []TotalCount `bson:"total_confirmed_infras"` + TotalActiveInfrastructure int `bson:"total_active_infras"` + TotalNotActiveInfrastructure int `bson:"total_not_active_infras"` + TotalConfirmedInfrastructures int `bson:"total_confirmed_infras"` + TotalNotConfirmedInfrastructures int `bson:"total_not_confirmed_infras"` } diff --git a/chaoscenter/graphql/server/pkg/database/mongodb/common/operations.go b/chaoscenter/graphql/server/pkg/database/mongodb/common/operations.go new file mode 100644 index 00000000000..ac6e4ac96b3 --- /dev/null +++ b/chaoscenter/graphql/server/pkg/database/mongodb/common/operations.go @@ -0,0 +1,28 @@ +package common + +import ( + "github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/model" + "go.mongodb.org/mongo-driver/bson" +) + +func CreatePaginationStage(pagination *model.Pagination) ([]bson.D, int, int) { + var stages []bson.D + skip := 0 + limit := 15 + + if pagination != nil { + page := pagination.Page + limit = pagination.Limit + + skip = page * limit + } + + stages = append(stages, bson.D{ + {"$skip", skip}, + }) + stages = append(stages, bson.D{{ + "$limit", limit}, + }) + + return stages, skip, limit +} diff --git a/chaoscenter/graphql/server/pkg/database/mongodb/environments/schema.go b/chaoscenter/graphql/server/pkg/database/mongodb/environments/schema.go index 0d3c7de0f6e..0ee3ce5c70d 100644 --- a/chaoscenter/graphql/server/pkg/database/mongodb/environments/schema.go +++ b/chaoscenter/graphql/server/pkg/database/mongodb/environments/schema.go @@ -23,6 +23,6 @@ type TotalFilteredData struct { } type AggregatedEnvironments struct { - TotalFilteredEnvironments []TotalFilteredData `bson:"total_filtered_environments"` - Environments []Environment `bson:"environments"` + TotalFilteredEnvironments int `bson:"total_filtered_environments"` + Environments []Environment `bson:"environments"` } diff --git a/chaoscenter/graphql/server/pkg/environment/handler/handler.go b/chaoscenter/graphql/server/pkg/environment/handler/handler.go index 8cd437ac419..7458fb97054 100644 --- a/chaoscenter/graphql/server/pkg/environment/handler/handler.go +++ b/chaoscenter/graphql/server/pkg/environment/handler/handler.go @@ -8,6 +8,7 @@ import ( "github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/model" "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb" + "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/common" "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/environments" dbOperationsEnvironment "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/environments" "go.mongodb.org/mongo-driver/bson" @@ -319,38 +320,32 @@ func (e *EnvironmentService) ListEnvironments(projectID string, request *model.L } } - // Pagination or adding a default limit of 15 if pagination not provided - paginatedExperiments := bson.A{ - sortStage, - } - - if request.Pagination != nil { - paginationSkipStage := bson.D{ - {"$skip", request.Pagination.Page * request.Pagination.Limit}, - } - paginationLimitStage := bson.D{ - {"$limit", request.Pagination.Limit}, - } + pipeline = append(pipeline, sortStage) - paginatedExperiments = append(paginatedExperiments, paginationSkipStage, paginationLimitStage) - } else { - limitStage := bson.D{ - {"$limit", 15}, - } - - paginatedExperiments = append(paginatedExperiments, limitStage) + // Pagination or adding a default limit of 15 if pagination not provided + _, skip, limit := common.CreatePaginationStage(request.Pagination) + + // Count total project and get top-level document to array + countStage := bson.D{ + {"$group", bson.D{ + {"_id", nil}, + {"total_filtered_environments", bson.D{{"$sum", 1}}}, + {"environments", bson.D{{"$push", "$$ROOT"}}}, + }}, } - // Add two stages where we first count the number of filtered workflow and then paginate the results - facetStage := bson.D{ - {"$facet", bson.D{ - {"total_filtered_environments", bson.A{ - bson.D{{"$count", "count"}}, + // Paging results + pagingStage := bson.D{ + {"$project", bson.D{ + {"_id", 0}, + {"total_filtered_environments", 1}, + {"environments", bson.D{ + {"$slice", bson.A{"$environments", skip, limit}}, }}, - {"environments", paginatedExperiments}, }}, } - pipeline = append(pipeline, facetStage) + + pipeline = append(pipeline, countStage, pagingStage) cursor, err := e.EnvironmentOperator.GetAggregateEnvironments(pipeline) if err != nil { @@ -362,12 +357,13 @@ func (e *EnvironmentService) ListEnvironments(projectID string, request *model.L aggregatedEnvironments []environments.AggregatedEnvironments ) - if err = cursor.All(context.Background(), &aggregatedEnvironments); err != nil || len(aggregatedEnvironments) == 0 { + if err = cursor.All(context.Background(), &aggregatedEnvironments); err != nil { return &model.ListEnvironmentResponse{ TotalNoOfEnvironments: 0, Environments: envs, }, errors.New("error decoding environment cursor: " + err.Error()) } + if len(aggregatedEnvironments) == 0 { return &model.ListEnvironmentResponse{ TotalNoOfEnvironments: 0, @@ -393,8 +389,8 @@ func (e *EnvironmentService) ListEnvironments(projectID string, request *model.L } totalFilteredEnvironmentsCounter := 0 - if len(envs) > 0 && len(aggregatedEnvironments[0].TotalFilteredEnvironments) > 0 { - totalFilteredEnvironmentsCounter = aggregatedEnvironments[0].TotalFilteredEnvironments[0].Count + if len(envs) > 0 && aggregatedEnvironments[0].TotalFilteredEnvironments > 0 { + totalFilteredEnvironmentsCounter = aggregatedEnvironments[0].TotalFilteredEnvironments } output := model.ListEnvironmentResponse{