Skip to content

Commit

Permalink
Enable churn for our node-density tests
Browse files Browse the repository at this point in the history
Enabling churn (default :false) for our node-density tests in
kube-burner-ocp

Signed-off-by: Joe Talerico <[email protected]>
  • Loading branch information
Joe Talerico committed Jul 23, 2024
1 parent 9c8f32f commit d85a6a3
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 10 deletions.
6 changes: 6 additions & 0 deletions cmd/config/node-density-cni/node-density-cni.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,12 @@ jobs:
burst: {{.BURST}}
namespacedIterations: {{.NAMESPACED_ITERATIONS}}
iterationsPerNamespace: {{.ITERATIONS_PER_NAMESPACE}}
churn: {{.CHURN}}
churnCycles: {{.CHURN_CYCLES}}
churnDuration: {{.CHURN_DURATION}}
churnPercent: {{.CHURN_PERCENT}}
churnDelay: {{.CHURN_DELAY}}
churnDeletionStrategy: {{.CHURN_DELETION_STRATEGY}}
podWait: false
waitWhenFinished: true
preLoadImages: true
Expand Down
6 changes: 6 additions & 0 deletions cmd/config/node-density-heavy/node-density-heavy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@ jobs:
burst: {{.BURST}}
namespacedIterations: {{.NAMESPACED_ITERATIONS}}
iterationsPerNamespace: {{.ITERATIONS_PER_NAMESPACE}}
churn: {{.CHURN}}
churnCycles: {{.CHURN_CYCLES}}
churnDuration: {{.CHURN_DURATION}}
churnPercent: {{.CHURN_PERCENT}}
churnDelay: {{.CHURN_DELAY}}
churnDeletionStrategy: {{.CHURN_DELETION_STRATEGY}}
podWait: false
waitWhenFinished: true
preLoadImages: true
Expand Down
6 changes: 6 additions & 0 deletions cmd/config/node-density/node-density.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ jobs:
podWait: false
waitWhenFinished: true
preLoadImages: true
churn: {{.CHURN}}
churnCycles: {{.CHURN_CYCLES}}
churnDuration: {{.CHURN_DURATION}}
churnPercent: {{.CHURN_PERCENT}}
churnDelay: {{.CHURN_DELAY}}
churnDeletionStrategy: {{.CHURN_DELETION_STRATEGY}}
preLoadPeriod: 10s
namespaceLabels:
security.openshift.io/scc.podSecurityLabelSync: false
Expand Down
21 changes: 18 additions & 3 deletions node-density-cni.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,11 @@ import (

// NewNodeDensity holds node-density-cni workload
func NewNodeDensityCNI(wh *workloads.WorkloadHelper) *cobra.Command {
var podsPerNode int
var namespacedIterations, svcLatency bool
var podsPerNode, iterationsPerNamespace, churnCycles, churnPercent int
var churnDelay, churnDuration time.Duration
var churnDeletionStrategy string
var churn, namespacedIterations, svcLatency bool
var podReadyThreshold time.Duration
var iterationsPerNamespace int
cmd := &cobra.Command{
Use: "node-density-cni",
Short: "Runs node-density-cni workload",
Expand All @@ -42,6 +43,13 @@ func NewNodeDensityCNI(wh *workloads.WorkloadHelper) *cobra.Command {
if err != nil {
log.Fatal(err)
}

os.Setenv("CHURN", fmt.Sprint(churn))
os.Setenv("CHURN_CYCLES", fmt.Sprintf("%v", churnCycles))
os.Setenv("CHURN_DURATION", fmt.Sprintf("%v", churnDuration))
os.Setenv("CHURN_DELAY", fmt.Sprintf("%v", churnDelay))
os.Setenv("CHURN_PERCENT", fmt.Sprint(churnPercent))
os.Setenv("CHURN_DELETION_STRATEGY", churnDeletionStrategy)
os.Setenv("JOB_ITERATIONS", fmt.Sprint((totalPods-podCount)/2))
os.Setenv("NAMESPACED_ITERATIONS", fmt.Sprint(namespacedIterations))
os.Setenv("ITERATIONS_PER_NAMESPACE", fmt.Sprint(iterationsPerNamespace))
Expand All @@ -53,6 +61,13 @@ func NewNodeDensityCNI(wh *workloads.WorkloadHelper) *cobra.Command {
wh.Run(cmd.Name())
},
}

cmd.Flags().BoolVar(&churn, "churn", false, "Enable churning")
cmd.Flags().IntVar(&churnCycles, "churn-cycles", 0, "Churn cycles to execute")
cmd.Flags().DurationVar(&churnDuration, "churn-duration", 1*time.Hour, "Churn duration")
cmd.Flags().DurationVar(&churnDelay, "churn-delay", 2*time.Minute, "Time to wait between each churn")
cmd.Flags().StringVar(&churnDeletionStrategy, "churn-deletion-strategy", "default", "Churn deletion strategy to use")
cmd.Flags().IntVar(&churnPercent, "churn-percent", 10, "Percentage of job iterations that kube-burner will churn each round")
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 1*time.Minute, "Pod ready timeout threshold")
cmd.Flags().IntVar(&podsPerNode, "pods-per-node", 245, "Pods per node")
cmd.Flags().BoolVar(&namespacedIterations, "namespaced-iterations", true, "Namespaced iterations")
Expand Down
21 changes: 17 additions & 4 deletions node-density-heavy.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ import (

// NewNodeDensity holds node-density-heavy workload
func NewNodeDensityHeavy(wh *workloads.WorkloadHelper) *cobra.Command {
var podsPerNode int
var podReadyThreshold, probesPeriod time.Duration
var namespacedIterations bool
var iterationsPerNamespace int
var podsPerNode, churnCycles, iterationsPerNamespace, churnPercent int
var podReadyThreshold, churnDuration, churnDelay, probesPeriod time.Duration
var churnDeletionStrategy string
var churn, namespacedIterations bool
cmd := &cobra.Command{
Use: "node-density-heavy",
Short: "Runs node-density-heavy workload",
Expand All @@ -41,6 +41,12 @@ func NewNodeDensityHeavy(wh *workloads.WorkloadHelper) *cobra.Command {
if err != nil {
log.Fatal(err)
}
os.Setenv("CHURN", fmt.Sprint(churn))
os.Setenv("CHURN_CYCLES", fmt.Sprintf("%v", churnCycles))
os.Setenv("CHURN_DURATION", fmt.Sprintf("%v", churnDuration))
os.Setenv("CHURN_DELAY", fmt.Sprintf("%v", churnDelay))
os.Setenv("CHURN_PERCENT", fmt.Sprint(churnPercent))
os.Setenv("CHURN_DELETION_STRATEGY", churnDeletionStrategy)
// We divide by two the number of pods to deploy to obtain the workload iterations
os.Setenv("JOB_ITERATIONS", fmt.Sprint((totalPods-podCount)/2))
os.Setenv("POD_READY_THRESHOLD", fmt.Sprintf("%v", podReadyThreshold))
Expand All @@ -53,6 +59,13 @@ func NewNodeDensityHeavy(wh *workloads.WorkloadHelper) *cobra.Command {
wh.Run(cmd.Name())
},
}

cmd.Flags().BoolVar(&churn, "churn", false, "Enable churning")
cmd.Flags().IntVar(&churnCycles, "churn-cycles", 0, "Churn cycles to execute")
cmd.Flags().DurationVar(&churnDuration, "churn-duration", 1*time.Hour, "Churn duration")
cmd.Flags().DurationVar(&churnDelay, "churn-delay", 2*time.Minute, "Time to wait between each churn")
cmd.Flags().StringVar(&churnDeletionStrategy, "churn-deletion-strategy", "default", "Churn deletion strategy to use")
cmd.Flags().IntVar(&churnPercent, "churn-percent", 10, "Percentage of job iterations that kube-burner will churn each round")
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 2*time.Minute, "Pod ready timeout threshold")
cmd.Flags().DurationVar(&probesPeriod, "probes-period", 10*time.Second, "Perf app readiness/livenes probes period")
cmd.Flags().IntVar(&podsPerNode, "pods-per-node", 245, "Pods per node")
Expand Down
21 changes: 18 additions & 3 deletions node-density.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,10 @@ import (

// NewNodeDensity holds node-density workload
func NewNodeDensity(wh *workloads.WorkloadHelper) *cobra.Command {
var podsPerNode int
var podReadyThreshold time.Duration
var containerImage string
var podsPerNode, churnCycles, churnPercent int
var podReadyThreshold, churnDuration, churnDelay time.Duration
var containerImage, churnDeletionStrategy string
var churn bool
cmd := &cobra.Command{
Use: "node-density",
Short: "Runs node-density workload",
Expand All @@ -40,6 +41,13 @@ func NewNodeDensity(wh *workloads.WorkloadHelper) *cobra.Command {
if err != nil {
log.Fatal(err.Error())
}

os.Setenv("CHURN", fmt.Sprint(churn))
os.Setenv("CHURN_CYCLES", fmt.Sprintf("%v", churnCycles))
os.Setenv("CHURN_DURATION", fmt.Sprintf("%v", churnDuration))
os.Setenv("CHURN_DELAY", fmt.Sprintf("%v", churnDelay))
os.Setenv("CHURN_PERCENT", fmt.Sprint(churnPercent))
os.Setenv("CHURN_DELETION_STRATEGY", churnDeletionStrategy)
os.Setenv("JOB_ITERATIONS", fmt.Sprint(totalPods-podCount))
os.Setenv("POD_READY_THRESHOLD", fmt.Sprintf("%v", podReadyThreshold))
os.Setenv("CONTAINER_IMAGE", containerImage)
Expand All @@ -49,6 +57,13 @@ func NewNodeDensity(wh *workloads.WorkloadHelper) *cobra.Command {
wh.Run(cmd.Name())
},
}

cmd.Flags().BoolVar(&churn, "churn", false, "Enable churning")
cmd.Flags().IntVar(&churnCycles, "churn-cycles", 0, "Churn cycles to execute")
cmd.Flags().DurationVar(&churnDuration, "churn-duration", 1*time.Hour, "Churn duration")
cmd.Flags().DurationVar(&churnDelay, "churn-delay", 2*time.Minute, "Time to wait between each churn")
cmd.Flags().StringVar(&churnDeletionStrategy, "churn-deletion-strategy", "default", "Churn deletion strategy to use")
cmd.Flags().IntVar(&churnPercent, "churn-percent", 10, "Percentage of job iterations that kube-burner will churn each round")
cmd.Flags().IntVar(&podsPerNode, "pods-per-node", 245, "Pods per node")
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 15*time.Second, "Pod ready timeout threshold")
cmd.Flags().StringVar(&containerImage, "container-image", "gcr.io/google_containers/pause:3.1", "Container image")
Expand Down

0 comments on commit d85a6a3

Please sign in to comment.