diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 5cb0722edd8..884c413cdb5 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -21,8 +21,8 @@ fi if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-agentbeat" && "$BUILDKITE_STEP_KEY" == *"agentbeat-it"* ]]; then out=$(.buildkite/scripts/agentbeat/setup_agentbeat.py) echo "$out" - AGENTBEAT_PATH=$(echo "$out" | tail -n 1) - export AGENTBEAT_PATH + AGENT_BUILD_DIR=$(echo "$out" | tail -n 1) + export AGENT_BUILD_DIR fi if [[ "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" || \ diff --git a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml index 1687aa25d92..2fb2b7654af 100644 --- a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml @@ -1,43 +1,31 @@ env: ASDF_MAGE_VERSION: 1.15.0 - ASDF_NODEJS_VERSION: 18.17.1 - GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" - AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" - AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" - - IMAGE_MACOS_ARM: "generic-13-ventura-arm" - IMAGE_MACOS_X86_64: "generic-13-ventura-x64" - - IMAGE_WIN_2022: "family/platform-ingest-beats-windows-2022" - IMAGE_BEATS_WITH_HOOKS_LATEST: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:latest" - AGENTBEAT_SPEC: "./agentbeat.spec.yml" - steps: - - group: "Check/Update" - key: "x-pack-agentbeat-check-update" - - steps: - - label: "agentbeat: Run pre-commit" - command: "pre-commit run --all-files" - agents: - image: "${IMAGE_BEATS_WITH_HOOKS_LATEST}" - memory: "2Gi" - useCustomGlobalHooks: true - notify: - - github_commit_status: - context: "agentbeat: pre-commit" - - - wait: ~ - # with PRs, we want to run mandatory tests only if check/update step succeed - # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests - # this allows building DRA artifacts even if there is flakiness in check/update step - if: build.env("BUILDKITE_PULL_REQUEST") != "false" - depends_on: "x-pack-agentbeat-check-update" +# - group: "Check/Update" +# key: "x-pack-agentbeat-check-update" +# +# steps: +# - label: "agentbeat: Run pre-commit" +# command: "pre-commit run --all-files" +# agents: +# image: "${IMAGE_BEATS_WITH_HOOKS_LATEST}" +# memory: "2Gi" +# useCustomGlobalHooks: true +# notify: +# - github_commit_status: +# context: "agentbeat: pre-commit" +# +# - wait: ~ +# # with PRs, we want to run mandatory tests only if check/update step succeed +# # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests +# # this allows building DRA artifacts even if there is flakiness in check/update step +# if: build.env("BUILDKITE_PULL_REQUEST") != "false" +# depends_on: "x-pack-agentbeat-check-update" - group: "Agentbeat tests" key: "agentbeat-mandatory-tests" @@ -47,6 +35,7 @@ steps: key: "agentbeat-package-linux" env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + PACKAGES: tar.gz,zip SNAPSHOT: true command: | set -euo pipefail @@ -70,93 +59,87 @@ steps: - github_commit_status: context: "agentbeat: Packaging" - - label: ":ubuntu: x-pack/agentbeat: Ubuntu x86_64 Spec tests" - key: "agentbeat-it-linux-x86-64" - env: - PLATFORM: "linux/amd64" +# - label: ":linux: Agentbeat/Integration tests Linux" +# key: "agentbeat-it-linux" +# depends_on: +# - agentbeat-package-linux +# env: +# ASDF_NODEJS_VERSION: 18.17.1 +# PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" +# SNAPSHOT: true +# command: | +# set -euo pipefail +# echo "~~~ Downloading artifacts" +# buildkite-agent artifact download x-pack/agentbeat/build/distributions/** . --step 'agentbeat-package-linux' +# ls -lah x-pack/agentbeat/build/distributions/ +# echo "~~~ Installing @elastic/synthetics with npm" +# npm install -g @elastic/synthetics +# echo "~~~ Running tests" +# cd x-pack/agentbeat +# mage goIntegTest +# artifact_paths: +# - x-pack/agentbeat/build/distributions/**/* +# - "x-pack/agentbeat/build/*.xml" +# - "x-pack/agentbeat/build/*.json" +# plugins: +# - test-collector#v1.10.2: +# files: "x-pack/agentbeat/build/TEST-*.xml" +# format: "junit" +# branches: "main" +# debug: true +# retry: +# automatic: +# - limit: 1 +# timeout_in_minutes: 60 +# agents: +# provider: "gcp" +# image: "${IMAGE_UBUNTU_X86_64}" +# machineType: "${GCP_HI_PERF_MACHINE_TYPE}" +# disk_size: 100 +# disk_type: "pd-ssd" +# notify: +# - github_commit_status: +# context: "agentbeat: Integration tests" + + - group: "Agentbeat: Servelress Tests" + key: "agentbeat-serverless-tests" + + steps: + - label: ":ubuntu: Serverless tests" + key: "agentbeat-it-serverless" depends_on: - agentbeat-package-linux + env: + AGENT_STACK_VERSION: "8.16.0-SNAPSHOT" + TEST_INTEG_AUTH_GCP_DATACENTER: "us-central1-a" + GOFLAGS: "-buildvcs=false" + TEST_INTEG_CLEAN_ON_EXIT: true + TEST_PLATFORMS: "linux/amd64" + SNAPSHOT: true command: | cd x-pack/agentbeat - mage -v testWithSpec + mage serverlessTest metricbeat + artifact_paths: + - x-pack/agentbeat/build/TEST-** + - x-pack/agentbeat/build/diagnostics/* + plugins: + - test-collector#v1.10.2: + files: "x-pack/agentbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + retry: + automatic: + - limit: 1 + timeout_in_minutes: 60 + concurrency_group: elastic-agent-extended-testing/serverless-integration + concurrency: 8 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" - machineType: "${GCP_HI_PERF_MACHINE_TYPE}" + machineType: "${GCP_STANDARD_MACHINE_TYPE}" disk_size: 100 disk_type: "pd-ssd" notify: - github_commit_status: - context: "agentbeat: Ubuntu x86_64 Spec tests" - - - label: ":ubuntu: x-pack/agentbeat: Ubuntu arm64 Spec tests" - key: "agentbeat-it-linux-arm64" - env: - PLATFORM: "linux/arm64" - depends_on: - - agentbeat-package-linux - command: | - cd x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" - instanceType: "${AWS_ARM_INSTANCE_TYPE}" - notify: - - github_commit_status: - context: "agentbeat: Ubuntu arm64 Spec tests" - - - label: ":windows: x-pack/agentbeat: Windows x86_64 Spec tests" - key: "agentbeat-it-windows" - env: - PLATFORM: "windows/amd64" - depends_on: - - agentbeat-package-linux - command: | - Set-Location -Path x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "gcp" - image: "${IMAGE_WIN_2022}" - machine_type: "${GCP_WIN_MACHINE_TYPE}" - disk_size: 200 - disk_type: "pd-ssd" - notify: - - github_commit_status: - context: "agentbeat: Windows x86_64 Spec tests" - - - label: ":macos: x-pack/agentbeat: macOS x86_64 Spec tests" - key: "agentbeat-it-macos-x86-64" - depends_on: - - agentbeat-package-linux - env: - PLATFORM: "darwin/amd64" - command: | - set -euo pipefail - source .buildkite/scripts/install_macos_tools.sh - cd x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "orka" - imagePrefix: "${IMAGE_MACOS_X86_64}" - notify: - - github_commit_status: - context: "agentbeat: macOS x86_64 Spec tests" - - - label: ":macos: x-pack/agentbeat: macOS arm64 Spec tests" - key: "agentbeat-it-macos-arm64" - depends_on: - - agentbeat-package-linux - env: - PLATFORM: "darwin/arm64" - command: | - set -euo pipefail - source .buildkite/scripts/install_macos_tools.sh - cd x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "orka" - imagePrefix: "${IMAGE_MACOS_ARM}" - notify: - - github_commit_status: - context: "agentbeat: macOS arm64 Spec tests" + context: "agentbeat: Serverless tests" diff --git a/dev-tools/mage/agentbeat-serverless.go b/dev-tools/mage/agentbeat-serverless.go new file mode 100644 index 00000000000..3b369f42c32 --- /dev/null +++ b/dev-tools/mage/agentbeat-serverless.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "log" + "os" +) + +// TestBeatServerless todo description +func TestBeatServerless(beat string) { + if beat == "" { + log.Fatal("Beat is not defined") + } + + if os.Getenv("AGENT_BUILD_DIR") == "" { + log.Fatal("AGENT_BUILD_DIR is not defined") + } + + setStackProvisioner() + setTestBinaryName(beat) + +} + +func setStackProvisioner() { + stackProvisioner := os.Getenv("STACK_PROVISIONER") + if stackProvisioner == "" { + if err := os.Setenv("STACK_PROVISIONER", "serverless"); err != nil { + log.Fatal("error setting serverless stack var: %w", err) + } + } else if stackProvisioner == "stateful" { + fmt.Println("--- Warning: running TestBeatServerless as stateful") + } +} + +func setTestBinaryName(beat string) { + if err := os.Setenv("TEST_BINARY_NAME", beat); err != nil { + log.Fatal("error setting binary name: %w", err) + } +} diff --git a/dev-tools/mage/gotest.go b/dev-tools/mage/gotest.go index ecc8f277b94..efed67fdea6 100644 --- a/dev-tools/mage/gotest.go +++ b/dev-tools/mage/gotest.go @@ -428,3 +428,30 @@ func BuildSystemTestGoBinary(binArgs TestBinaryArgs) error { }() return sh.RunV("go", args...) } + +func GoTestBuild(ctx context.Context, params GoTestArgs) error { + if params.OutputFile == "" { + return fmt.Errorf("missing output file") + } + + fmt.Println(">> go test:", params.TestName, "Building Test Binary") + + args := []string{"test", "-c", "-o", params.OutputFile} + + if len(params.Tags) > 0 { + params := strings.Join(params.Tags, " ") + if params != "" { + args = append(args, "-tags", params) + } + } + + args = append(args, params.Packages...) + + goTestBuild := makeCommand(ctx, params.Env, "go", args...) + + err := goTestBuild.Run() + if err != nil { + return err + } + return nil +} diff --git a/dev-tools/mage/spec.go b/dev-tools/mage/spec.go deleted file mode 100644 index 03c733f1dd6..00000000000 --- a/dev-tools/mage/spec.go +++ /dev/null @@ -1,100 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package mage - -import ( - "gopkg.in/yaml.v2" - - "log" - "os" - "strings" -) - -type spec struct { - Inputs []input -} - -type input struct { - Name string - Description string - Platforms []string - Command command -} - -func (i *input) GetCommand() string { - return strings.Join(i.Command.Args, " ") -} - -type command struct { - Name string - Args []string -} - -// SpecCommands parses agent.beat.spec.yml and collects commands for tests -func SpecCommands(specPath string, platform string) []string { - spec, _ := parseToObj(specPath) - - filteredInputs := filter(spec.Inputs, func(input input) bool { - return contains(input.Platforms, platform) - }) - - commands := make(map[string]interface{}) - for _, i := range filteredInputs { - commands[i.GetCommand()] = nil - } - keys := make([]string, 0, len(commands)) - for k := range commands { - keys = append(keys, k) - } - - return keys -} - -func parseToObj(path string) (spec, error) { - specFile, err := os.ReadFile(path) - if err != nil { - log.Fatalf("Error opening agentbeat.spec.yml: %v", err) - return spec{}, err - } - var spec spec - err = yaml.Unmarshal(specFile, &spec) - if err != nil { - log.Fatalf("Error parsing agentbeat.spec.yml: %v", err) - return spec, err - } - return spec, nil -} - -func filter[T any](slice []T, condition func(T) bool) []T { - var result []T - for _, v := range slice { - if condition(v) { - result = append(result, v) - } - } - return result -} - -func contains(slice []string, item string) bool { - for _, v := range slice { - if v == item { - return true - } - } - return false -} diff --git a/dev-tools/mage/target/srvrlesstest/component/platforms.go b/dev-tools/mage/target/srvrlesstest/component/platforms.go new file mode 100644 index 00000000000..7825b3e5fdc --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/component/platforms.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package component + +import ( + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/utils" + goruntime "runtime" + "strings" + + "github.com/elastic/go-sysinfo" +) + +const ( + // Container represents running inside a container + Container = "container" + // Darwin represents running on Mac OSX + Darwin = "darwin" + // Linux represents running on Linux + Linux = "linux" + // Windows represents running on Windows + Windows = "windows" +) + +const ( + // AMD64 represents the amd64 architecture + AMD64 = "amd64" + // ARM64 represents the arm64 architecture + ARM64 = "arm64" +) + +// Platform defines the platform that a component can support +type Platform struct { + OS string + Arch string + GOOS string +} + +// Platforms is an array of platforms. +type Platforms []Platform + +// GlobalPlatforms defines the platforms that a component can support +var GlobalPlatforms = Platforms{ + { + OS: Container, + Arch: AMD64, + GOOS: Linux, + }, + { + OS: Container, + Arch: ARM64, + GOOS: Linux, + }, + { + OS: Darwin, + Arch: AMD64, + GOOS: Darwin, + }, + { + OS: Darwin, + Arch: ARM64, + GOOS: Darwin, + }, + { + OS: Linux, + Arch: AMD64, + GOOS: Linux, + }, + { + OS: Linux, + Arch: ARM64, + GOOS: Linux, + }, + { + OS: Windows, + Arch: AMD64, + GOOS: Windows, + }, +} + +// String returns the platform string identifier. +func (p *Platform) String() string { + return fmt.Sprintf("%s/%s", p.OS, p.Arch) +} + +// Exists returns true if the +func (p Platforms) Exists(platform string) bool { + pieces := strings.SplitN(platform, "/", 2) + if len(pieces) != 2 { + return false + } + for _, platform := range p { + if platform.OS == pieces[0] && platform.Arch == pieces[1] { + return true + } + } + return false +} + +// UserDetail provides user specific information on the running platform. +type UserDetail struct { + Root bool +} + +// PlatformDetail is platform that has more detail information about the running platform. +type PlatformDetail struct { + Platform + + NativeArch string + Family string + Major int + Minor int + + User UserDetail +} + +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail PlatformDetail) PlatformDetail + +// LoadPlatformDetail loads the platform details for the current system. +func LoadPlatformDetail(modifiers ...PlatformModifier) (PlatformDetail, error) { + hasRoot, err := utils.HasRoot() + if err != nil { + return PlatformDetail{}, err + } + info, err := sysinfo.Host() + if err != nil { + return PlatformDetail{}, err + } + os := info.Info().OS + nativeArch := info.Info().NativeArchitecture + if nativeArch == "x86_64" { + // go-sysinfo Architecture and NativeArchitecture prefer x64_64 + // but GOARCH prefers amd64 + nativeArch = "amd64" + } + if nativeArch == "aarch64" { + // go-sysinfo Architecture and NativeArchitecture prefer aarch64 + // but GOARCH prefers arm64 + nativeArch = "arm64" + } + detail := PlatformDetail{ + Platform: Platform{ + OS: goruntime.GOOS, + Arch: goruntime.GOARCH, + GOOS: goruntime.GOOS, + }, + NativeArch: nativeArch, + Family: os.Family, + Major: os.Major, + Minor: os.Minor, + User: UserDetail{ + Root: hasRoot, + }, + } + for _, modifier := range modifiers { + detail = modifier(detail) + } + return detail, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/cmd.go b/dev-tools/mage/target/srvrlesstest/core/process/cmd.go new file mode 100644 index 00000000000..fc5f262e662 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/cmd.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !linux && !darwin + +package process + +import ( + "context" + "os" + "os/exec" + "path/filepath" +) + +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { + var cmd *exec.Cmd + if ctx == nil { + cmd = exec.Command(path, arg...) + } else { + cmd = exec.CommandContext(ctx, path, arg...) + } + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Dir = filepath.Dir(path) + + return cmd, nil +} + +func killCmd(proc *os.Process) error { + return proc.Kill() +} + +func terminateCmd(proc *os.Process) error { + return proc.Kill() +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/cmd_darwin.go b/dev-tools/mage/target/srvrlesstest/core/process/cmd_darwin.go new file mode 100644 index 00000000000..9a5be3e1beb --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/cmd_darwin.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build darwin + +package process + +import ( + "context" + "fmt" + "math" + "os" + "os/exec" + "path/filepath" + "syscall" +) + +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { + var cmd *exec.Cmd + if ctx == nil { + cmd = exec.Command(path, arg...) + } else { + cmd = exec.CommandContext(ctx, path, arg...) + } + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Dir = filepath.Dir(path) + if isInt32(uid) && isInt32(gid) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + NoSetGroups: true, + }, + } + } else { + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) + } + + return cmd, nil +} + +func isInt32(val int) bool { + return val >= 0 && val <= math.MaxInt32 +} + +func killCmd(proc *os.Process) error { + return proc.Kill() +} + +func terminateCmd(proc *os.Process) error { + return proc.Signal(syscall.SIGTERM) +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/cmd_linux.go b/dev-tools/mage/target/srvrlesstest/core/process/cmd_linux.go new file mode 100644 index 00000000000..3a480128ffb --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/cmd_linux.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package process + +import ( + "context" + "fmt" + "math" + "os" + "os/exec" + "path/filepath" + "syscall" +) + +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { + var cmd *exec.Cmd + if ctx == nil { + cmd = exec.Command(path, arg...) + } else { + cmd = exec.CommandContext(ctx, path, arg...) + } + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Dir = filepath.Dir(path) + if isInt32(uid) && isInt32(gid) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + // on shutdown all sub-processes are sent SIGTERM, in the case that the Agent dies or is -9 killed + // then also kill the children (only supported on linux) + Pdeathsig: syscall.SIGKILL, + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + NoSetGroups: true, + }, + } + } else { + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) + } + + return cmd, nil +} + +func isInt32(val int) bool { + return val >= 0 && val <= math.MaxInt32 +} + +func killCmd(proc *os.Process) error { + return proc.Kill() +} + +func terminateCmd(proc *os.Process) error { + return proc.Signal(syscall.SIGTERM) +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/external_unix.go b/dev-tools/mage/target/srvrlesstest/core/process/external_unix.go new file mode 100644 index 00000000000..6563556b3ee --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/external_unix.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !windows + +package process + +import ( + "os" + "syscall" + "time" +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if proc.Signal(syscall.Signal(0)) != nil { + // failed to contact process, return + return + } + } +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/external_windows.go b/dev-tools/mage/target/srvrlesstest/core/process/external_windows.go new file mode 100644 index 00000000000..8d3aa616964 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/external_windows.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows + +package process + +import ( + "os" + "syscall" + "time" +) + +const ( + // exitCodeStillActive according to docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess + exitCodeStillActive = 259 +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if isWindowsProcessExited(proc.Pid) { + return + } + } +} + +func isWindowsProcessExited(pid int) bool { + const desiredAccess = syscall.STANDARD_RIGHTS_READ | syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE + h, err := syscall.OpenProcess(desiredAccess, false, uint32(pid)) + if err != nil { + // failed to open handle, report exited + return true + } + + // get exit code, this returns immediately in case it is still running + // it returns exitCodeStillActive + var ec uint32 + if err := syscall.GetExitCodeProcess(h, &ec); err != nil { + // failed to contact, report exited + return true + } + + return ec != exitCodeStillActive +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/job_unix.go b/dev-tools/mage/target/srvrlesstest/core/process/job_unix.go new file mode 100644 index 00000000000..37a030e4159 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/job_unix.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !windows + +package process + +import ( + "os" +) + +// Job is noop on unix +type Job int + +var ( + // JobObject is public global JobObject, 0 value on linux + JobObject Job +) + +// CreateJobObject returns a job object. +func CreateJobObject() (pj Job, err error) { + return pj, err +} + +// NewJob is noop on unix +func NewJob() (Job, error) { + return 0, nil +} + +// Close is noop on unix +func (job Job) Close() error { + return nil +} + +// Assign is noop on unix +func (job Job) Assign(p *os.Process) error { + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/job_windows.go b/dev-tools/mage/target/srvrlesstest/core/process/job_windows.go new file mode 100644 index 00000000000..332f3ed4f27 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/job_windows.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows + +package process + +import ( + "os" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Job is wrapper for windows JobObject +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects +// This helper guarantees a clean process tree kill on job handler close +type Job windows.Handle + +var ( + // Public global JobObject should be initialized once in main + JobObject Job +) + +// CreateJobObject creates JobObject on Windows, global per process +// Should only be initialized once in main function +func CreateJobObject() (pj Job, err error) { + if pj, err = NewJob(); err != nil { + return pj, err + } + JobObject = pj + return pj, nil +} + +// NewJob creates a instance of the JobObject +func NewJob() (Job, error) { + h, err := windows.CreateJobObject(nil, nil) + if err != nil { + return 0, err + } + + // From https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects + // ... if the job has the JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE flag specified, + // closing the last job object handle terminates all associated processes + // and then destroys the job object itself. + // If a nested job has the JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE flag specified, + // closing the last job object handle terminates all processes associated + // with the job and its child jobs in the hierarchy. + info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{ + BasicLimitInformation: windows.JOBOBJECT_BASIC_LIMIT_INFORMATION{ + LimitFlags: windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE, + }, + } + if _, err := windows.SetInformationJobObject( + h, + windows.JobObjectExtendedLimitInformation, + uintptr(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info))); err != nil { + return 0, err + } + + return Job(h), nil +} + +// Close closes job handler +func (job Job) Close() error { + if job == 0 { + return nil + } + return windows.CloseHandle(windows.Handle(job)) +} + +// Assign assigns the process to the JobObject +func (job Job) Assign(p *os.Process) error { + if job == 0 || p == nil { + return nil + } + return windows.AssignProcessToJobObject( + windows.Handle(job), + windows.Handle((*process)(unsafe.Pointer(p)).Handle)) +} + +type process struct { + Pid int + Handle uintptr +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/process.go b/dev-tools/mage/target/srvrlesstest/core/process/process.go new file mode 100644 index 00000000000..e2d60efa1c7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/process.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package process + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" +) + +// Info groups information about fresh new process +type Info struct { + PID int + Process *os.Process + Stdin io.WriteCloser + Stderr io.ReadCloser +} + +// CmdOption is an option func to change the underlying command +type CmdOption func(c *exec.Cmd) error + +// StartConfig configuration for the process start set by the StartOption functions +type StartConfig struct { + ctx context.Context + uid, gid int + args, env []string + cmdOpts []CmdOption +} + +// StartOption start options function +type StartOption func(cfg *StartConfig) + +// Start starts a new process +func Start(path string, opts ...StartOption) (proc *Info, err error) { + // Apply options + c := StartConfig{ + uid: os.Geteuid(), + gid: os.Getegid(), + } + + for _, opt := range opts { + opt(&c) + } + + return startContext(c.ctx, path, c.uid, c.gid, c.args, c.env, c.cmdOpts...) +} + +// WithContext sets an optional context +func WithContext(ctx context.Context) StartOption { + return func(cfg *StartConfig) { + cfg.ctx = ctx + } +} + +// WithArgs sets arguments +func WithArgs(args []string) StartOption { + return func(cfg *StartConfig) { + cfg.args = args + } +} + +// WithEnv sets the environment variables +func WithEnv(env []string) StartOption { + return func(cfg *StartConfig) { + cfg.env = env + } +} + +// WithUID sets UID +func WithUID(uid int) StartOption { + return func(cfg *StartConfig) { + cfg.uid = uid + } +} + +// WithGID sets GID +func WithGID(gid int) StartOption { + return func(cfg *StartConfig) { + cfg.gid = gid + } +} + +// WithCmdOptions sets the exec.Cmd options +func WithCmdOptions(cmdOpts ...CmdOption) StartOption { + return func(cfg *StartConfig) { + cfg.cmdOpts = cmdOpts + } +} + +// WithWorkDir sets the cmd working directory +func WithWorkDir(wd string) CmdOption { + return func(c *exec.Cmd) error { + c.Dir = wd + return nil + } +} + +// Kill kills the process. +func (i *Info) Kill() error { + return killCmd(i.Process) +} + +// Stop stops the process cleanly. +func (i *Info) Stop() error { + return terminateCmd(i.Process) +} + +// StopWait stops the process and waits for it to exit. +func (i *Info) StopWait() error { + err := i.Stop() + if err != nil { + return err + } + _, err = i.Process.Wait() + return err +} + +// Wait returns a channel that will send process state once it exits. Each +// call to Wait() creates a goroutine. Failure to read from the returned +// channel will leak this goroutine. +func (i *Info) Wait() <-chan *os.ProcessState { + ch := make(chan *os.ProcessState) + + go func() { + procState, err := i.Process.Wait() + if err != nil { + // process is not a child - some OSs requires process to be child + externalProcess(i.Process) + } + ch <- procState + }() + + return ch +} + +// startContext starts a new process with context. The context is optional and can be nil. +func startContext(ctx context.Context, path string, uid, gid int, args []string, env []string, opts ...CmdOption) (*Info, error) { + cmd, err := getCmd(ctx, path, env, uid, gid, args...) + if err != nil { + return nil, fmt.Errorf("failed to create command for %q: %w", path, err) + } + for _, o := range opts { + if err := o(cmd); err != nil { + return nil, fmt.Errorf("failed to set option command for %q: %w", path, err) + } + } + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdin for %q: %w", path, err) + } + + var stderr io.ReadCloser + if cmd.Stderr == nil { + stderr, err = cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stderr for %q: %w", path, err) + } + } + + // start process + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start %q: %w", path, err) + } + + // Hook to JobObject on windows, noop on other platforms. + // This ties the application processes lifespan to the agent's. + // Fixes the orphaned beats processes left behind situation + // after the agent process gets killed. + if err := JobObject.Assign(cmd.Process); err != nil { + _ = killCmd(cmd.Process) + return nil, fmt.Errorf("failed job assignment %q: %w", path, err) + } + + return &Info{ + PID: cmd.Process.Pid, + Process: cmd.Process, + Stdin: stdin, + Stderr: stderr, + }, err +} diff --git a/dev-tools/mage/target/srvrlesstest/define/batch.go b/dev-tools/mage/target/srvrlesstest/define/batch.go new file mode 100644 index 00000000000..c8b6ac65a0e --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/define/batch.go @@ -0,0 +1,320 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package define + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +// defaultOS is the set of OS that are used in the case that a requirement doesn't define any +var defaultOS = []OS{ + { + Type: Darwin, + Arch: AMD64, + }, + { + Type: Darwin, + Arch: ARM64, + }, + { + Type: Linux, + Arch: AMD64, + }, + { + Type: Linux, + Arch: ARM64, + }, + { + Type: Windows, + Arch: AMD64, + }, +} + +// Batch is a grouping of tests that all have the same requirements. +type Batch struct { + // Group must be set on each test to define which group the tests belongs. + // Tests that are in the same group are executed on the same runner. + Group string `json:"group"` + + // OS defines the operating systems this test batch needs. + OS OS `json:"os"` + + // Stack defines the stack required for this batch. + Stack *Stack `json:"stack,omitempty"` + + // Tests define the set of packages and tests that do not require sudo + // privileges to be performed. + Tests []BatchPackageTests `json:"tests"` + + // SudoTests define the set of packages and tests that do require sudo + // privileges to be performed. + SudoTests []BatchPackageTests `json:"sudo_tests"` +} + +// BatchPackageTests is a package and its tests that belong to a batch. +type BatchPackageTests struct { + // Name is the package name. + Name string `json:"name"` + // Tests is the set of tests in the package. + Tests []BatchPackageTest `json:"tests"` +} + +// BatchPackageTest is a specific test in a package. +type BatchPackageTest struct { + // Name of the test. + Name string `json:"name"` + // Stack needed for test. + Stack bool `json:"stack"` +} + +// DetermineBatches parses the package directory with the possible extra build +// tags to determine the set of batches for the package. +func DetermineBatches(dir string, testFlags string, buildTags ...string) ([]Batch, error) { + const ( + defineMatcher = "define skip; requirements: " + ) + + // the 'define' build tag is added so that the `define.Require` skips and + // logs the requirements for each test. + buildTags = append(buildTags, "define") + + // 'go test' wants a directory path to either be absolute or start with + // './' so it knows it's a directory and not package. + if !filepath.IsAbs(dir) && !strings.HasPrefix(dir, "./") { + dir = "./" + dir + } + + // run 'go test' and collect the JSON output to be parsed + // #nosec G204 -- test function code, it will be okay + cmdArgs := []string{"test", "-v", "--tags", strings.Join(buildTags, ","), "-json"} + if testFlags != "" { + flags := strings.Split(testFlags, " ") + cmdArgs = append(cmdArgs, flags...) + } + + cmdArgs = append(cmdArgs, dir) + testCmd := exec.Command("go", cmdArgs...) + output, err := testCmd.Output() + if err != nil { + // format cmdArgs to make the error message more coherent + cmdArgs = append([]string{"go"}, cmdArgs...) + + var errExit *exec.ExitError + if errors.As(err, &errExit) { + b := bytes.NewBuffer(errExit.Stderr) + b.Write(output) + output = b.Bytes() + } + return nil, fmt.Errorf( + "error running go test: (%w), got:\n\n%s\ntried to run: %v", + err, string(output), cmdArgs) + } + + // parses each test and determine the batches that each test belongs in + var batches []Batch + sc := bufio.NewScanner(bytes.NewReader(output)) + for sc.Scan() { + var tar testActionResult + err := json.Unmarshal([]byte(sc.Text()), &tar) + if err != nil { + return nil, err + } + if tar.Action == "output" && strings.Contains(tar.Output, defineMatcher) { + reqRaw := tar.Output[strings.Index(tar.Output, defineMatcher)+len(defineMatcher) : strings.LastIndex(tar.Output, "\n")] + var req Requirements + err := json.Unmarshal([]byte(reqRaw), &req) + if err != nil { + return nil, fmt.Errorf("failed to parse requirements JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + err = req.Validate() + if err != nil { + return nil, fmt.Errorf("parsed requirements are invalid JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + batches = appendTest(batches, tar, req) + } + } + return batches, nil +} + +func appendTest(batches []Batch, tar testActionResult, req Requirements) []Batch { + var set []OS + for _, o := range req.OS { + if o.Arch == "" { + set = append(set, OS{ + Type: o.Type, + Arch: AMD64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + if o.Type != Windows { + set = append(set, OS{ + Type: o.Type, + Arch: ARM64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } else { + set = append(set, OS{ + Type: o.Type, + Arch: o.Arch, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } + if len(set) == 0 { + // no os define; means the test supports all + set = defaultOS + } + for _, o := range set { + var batch Batch + batchIdx := findBatchIdx(batches, req.Group, o, req.Stack) + if batchIdx == -1 { + // new batch required + batch = Batch{ + Group: req.Group, + OS: o, + Tests: nil, + SudoTests: nil, + } + batches = append(batches, batch) + batchIdx = len(batches) - 1 + } + batch = batches[batchIdx] + if o.Distro != "" { + batch.OS.Distro = o.Distro + } + if o.Version != "" { + batch.OS.Version = o.Version + } + if o.DockerVariant != "" { + batch.OS.DockerVariant = o.DockerVariant + } + if req.Stack != nil && batch.Stack == nil { + // assign the stack to this batch + batch.Stack = copyStack(req.Stack) + } + if req.Sudo { + batch.SudoTests = appendPackageTest(batch.SudoTests, tar.Package, tar.Test, req.Stack != nil) + } else { + batch.Tests = appendPackageTest(batch.Tests, tar.Package, tar.Test, req.Stack != nil) + } + batches[batchIdx] = batch + } + return batches +} + +func appendPackageTest(tests []BatchPackageTests, pkg string, name string, stack bool) []BatchPackageTests { + for i, pt := range tests { + if pt.Name == pkg { + for _, testName := range pt.Tests { + if testName.Name == name { + // we already selected this test for this package for this batch, + // we can return immediately + return tests + } + } + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests[i] = pt + return tests + } + } + var pt BatchPackageTests + pt.Name = pkg + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests = append(tests, pt) + return tests +} + +func findBatchIdx(batches []Batch, group string, os OS, stack *Stack) int { + for i, b := range batches { + if b.Group != group { + // must be in the same group + continue + } + if b.OS.Type != os.Type || b.OS.Arch != os.Arch { + // must be same type and arch both are always defined at this point + continue + } + if os.Distro != "" { + // must have the same distro + if b.OS.Distro != "" && b.OS.Distro != os.Distro { + continue + } + } + if os.Version != "" { + // must have the same version + if b.OS.Version != "" && b.OS.Version != os.Version { + continue + } + } + if os.DockerVariant != "" { + // must be the same docker image + if b.OS.DockerVariant != "" && b.OS.DockerVariant != os.DockerVariant { + continue + } + } + if stack == nil { + // don't care if the batch has a cloud or not + return i + } + if b.Stack == nil { + // need cloud, but batch doesn't have cloud calling code can set it + return i + } + if b.Stack.Version == stack.Version { + // same cloud version; compatible + return i + } + } + return -1 +} + +func copyStack(stack *Stack) *Stack { + var s Stack + if stack != nil { + s = *stack + return &s + } + return nil +} + +type testActionResult struct { + Time string `json:"Time"` + Action string `json:"Action"` + Package string `json:"Package"` + Test string `json:"Test"` + Output string `json:"Output"` +} diff --git a/dev-tools/mage/target/srvrlesstest/define/requirements.go b/dev-tools/mage/target/srvrlesstest/define/requirements.go new file mode 100644 index 00000000000..793c80e1023 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/define/requirements.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package define + +import ( + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/component" +) + +const ( + // Default constant can be used as the default group for tests. + Default = "default" +) + +const ( + // Darwin is macOS platform + Darwin = component.Darwin + // Linux is Linux platform + Linux = component.Linux + // Windows is Windows platform + Windows = component.Windows + // Kubernetes is Kubernetes platform + Kubernetes = "kubernetes" +) + +const ( + // AMD64 is amd64 architecture + AMD64 = component.AMD64 + // ARM64 is arm64 architecture + ARM64 = component.ARM64 +) + +// OS defines an operating system, architecture, version and distribution combination. +type OS struct { + // Type is the operating system type (darwin, linux or windows). + // + // This is always required to be defined on the OS structure. + // If it is not defined the test runner will error. + Type string `json:"type"` + // Arch is the architecture type (amd64 or arm64). + // + // In the case that it's not provided the test will run on every + // architecture that is supported. + Arch string `json:"arch"` + // Version is a specific version of the OS type to run this test on + // + // When defined the test runs on this specific version only. When not + // defined the test is run on a selected version for this operating system. + Version string `json:"version"` + // Distro allows in the Linux case for a specific distribution to be + // selected for running on. Example would be "ubuntu". In the Kubernetes case + // for a specific distribution of kubernetes. Example would be "kind". + Distro string `json:"distro"` + // DockerVariant allows in the Kubernetes case for a specific variant to + // be selected for running with. Example would be "wolfi". + DockerVariant string `json:"docker_variant"` +} + +// Validate returns an error if not valid. +func (o OS) Validate() error { + if o.Type == "" { + return errors.New("type must be defined") + } + if o.Type != Darwin && o.Type != Linux && o.Type != Windows && o.Type != Kubernetes { + return errors.New("type must be either darwin, linux, windows, or kubernetes") + } + if o.Arch != "" { + if o.Arch != AMD64 && o.Arch != ARM64 { + return errors.New("arch must be either amd64 or arm64") + } + if o.Type == Windows && o.Arch == ARM64 { + return errors.New("windows on arm64 not supported") + } + } + if o.Distro != "" && (o.Type != Linux && o.Type != Kubernetes) { + return errors.New("distro can only be set when type is linux or kubernetes") + } + if o.DockerVariant != "" && o.Type != Kubernetes { + return errors.New("docker variant can only be set when type is kubernetes") + } + return nil +} + +// Stack defines the stack required for the test. +type Stack struct { + // Version defines a specific stack version to create for this test. + // + // In the case that no version is provided the same version being used for + // the current test execution is used. + Version string `json:"version"` +} + +// Requirements defines the testing requirements for the test to run. +type Requirements struct { + // Group must be set on each test to define which group the tests belongs to. + // Tests that are in the same group are executed on the same runner. + // + // Useful when tests take a long time to complete and sharding them across multiple + // hosts can improve the total amount of time to complete all the tests. + Group string `json:"group"` + + // OS defines the operating systems this test can run on. In the case + // multiple are provided the test is ran multiple times one time on each + // combination. + OS []OS `json:"os,omitempty"` + + // Stack defines the stack required for the test. + Stack *Stack `json:"stack,omitempty"` + + // Local defines if this test can safely be performed on a local development machine. + // If not set then the test will not be performed when local only testing is performed. + // + // This doesn't mean this test can only run locally. It will still run on defined OS's + // when a full test run is performed. + Local bool `json:"local"` + + // Sudo defines that this test must run under superuser permissions. On Mac and Linux the + // test gets executed under sudo and on Windows it gets run under Administrator. + Sudo bool `json:"sudo"` +} + +// Validate returns an error if not valid. +func (r Requirements) Validate() error { + if r.Group == "" { + return errors.New("group is required") + } + for i, o := range r.OS { + if err := o.Validate(); err != nil { + return fmt.Errorf("invalid os %d: %w", i, err) + } + } + return nil +} + +// runtimeAllowed returns true if the runtime matches a valid OS. +func (r Requirements) runtimeAllowed(os string, arch string, version string, distro string) bool { + if len(r.OS) == 0 { + // all allowed + return true + } + for _, o := range r.OS { + if o.Type != Kubernetes && o.Type != os { + // not valid on this runtime + continue + } + if o.Arch != "" && o.Arch != arch { + // not allowed on specific architecture + continue + } + if o.Version != "" && o.Version != version { + // not allowed on specific version + continue + } + if o.Distro != "" && o.Distro != distro { + // not allowed on specific distro + continue + } + // allowed + return true + } + // made it this far, not allowed + return false +} diff --git a/dev-tools/mage/target/srvrlesstest/srvrlesstest.go b/dev-tools/mage/target/srvrlesstest/srvrlesstest.go new file mode 100644 index 00000000000..21bfe2767c2 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/srvrlesstest.go @@ -0,0 +1,428 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package srvrlesstest + +import ( + "context" + "fmt" + tcommon "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ess" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind" + multipass "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/multipas" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ogc" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/runner" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/magefile/mage/mg" +) + +type ProvisionerType uint32 + +var ( + goIntegTestTimeout = 2 * time.Hour + goProvisionAndTestTimeout = goIntegTestTimeout + 30*time.Minute +) + +const ( + snapshotEnv = "SNAPSHOT" +) + +// Integration namespace contains tasks related to operating and running integration tests. +type Integration mg.Namespace + +func IntegRunner(ctx context.Context, matrix bool, singleTest string) error { + if _, ok := ctx.Deadline(); !ok { + // If the context doesn't have a timeout (usually via the mage -t option), give it one. + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, goProvisionAndTestTimeout) + defer cancel() + } + + for { + failedCount, err := integRunnerOnce(ctx, matrix, singleTest) + if err != nil { + return err + } + if failedCount > 0 { + if hasCleanOnExit() { + mg.Deps(Integration.Clean) + } + os.Exit(1) + } + if !hasRunUntilFailure() { + if hasCleanOnExit() { + mg.Deps(Integration.Clean) + } + return nil + } + } +} + +func hasCleanOnExit() bool { + clean := os.Getenv("TEST_INTEG_CLEAN_ON_EXIT") + b, _ := strconv.ParseBool(clean) + return b +} + +func hasRunUntilFailure() bool { + runUntil := os.Getenv("TEST_RUN_UNTIL_FAILURE") + b, _ := strconv.ParseBool(runUntil) + return b +} + +func integRunnerOnce(ctx context.Context, matrix bool, singleTest string) (int, error) { + goTestFlags := os.Getenv("GOTEST_FLAGS") + + batches, err := define.DetermineBatches("testing/integration", goTestFlags, "integration") + if err != nil { + return 0, fmt.Errorf("failed to determine batches: %w", err) + } + r, err := createTestRunner(matrix, singleTest, goTestFlags, batches...) + if err != nil { + return 0, fmt.Errorf("error creating test runner: %w", err) + } + results, err := r.Run(ctx) + if err != nil { + return 0, fmt.Errorf("error running test: %w", err) + } + _ = os.Remove("build/TEST-go-integration.out") + _ = os.Remove("build/TEST-go-integration.out.json") + _ = os.Remove("build/TEST-go-integration.xml") + err = writeFile("build/TEST-go-integration.out", results.Output, 0644) + if err != nil { + return 0, fmt.Errorf("error writing test out file: %w", err) + } + err = writeFile("build/TEST-go-integration.out.json", results.JSONOutput, 0644) + if err != nil { + return 0, fmt.Errorf("error writing test out json file: %w", err) + } + err = writeFile("build/TEST-go-integration.xml", results.XMLOutput, 0644) + if err != nil { + return 0, fmt.Errorf("error writing test out xml file: %w", err) + } + if results.Failures > 0 { + r.Logger().Logf("Testing completed (%d failures, %d successful)", results.Failures, results.Tests-results.Failures) + } else { + r.Logger().Logf("Testing completed (%d successful)", results.Tests) + } + r.Logger().Logf("Console output written here: build/TEST-go-integration.out") + r.Logger().Logf("Console JSON output written here: build/TEST-go-integration.out.json") + r.Logger().Logf("JUnit XML written here: build/TEST-go-integration.xml") + r.Logger().Logf("Diagnostic output (if present) here: build/diagnostics") + return results.Failures, nil +} + +// Clean cleans up the integration testing leftovers +func (Integration) Clean() error { + fmt.Println("--- Clean mage artifacts") + _ = os.RemoveAll(".agent-testing") + + // Clean out .integration-cache/.ogc-cache always + defer os.RemoveAll(".integration-cache") + defer os.RemoveAll(".ogc-cache") + + _, err := os.Stat(".integration-cache") + if err == nil { + // .integration-cache exists; need to run `Clean` from the runner + r, err := createTestRunner(false, "", "") + if err != nil { + return fmt.Errorf("error creating test runner: %w", err) + } + err = r.Clean() + if err != nil { + return fmt.Errorf("error running clean: %w", err) + } + } + + return nil +} + +func createTestRunner(matrix bool, singleTest string, goTestFlags string, batches ...define.Batch) (*runner.Runner, error) { + goVersion, err := mage.DefaultBeatBuildVariableSources.GetGoVersion() + if err != nil { + return nil, err + } + + agentVersion, agentStackVersion, err := getTestRunnerVersions() + if err != nil { + return nil, err + } + + agentBuildDir := os.Getenv("AGENT_BUILD_DIR") + if agentBuildDir == "" { + agentBuildDir = filepath.Join("build", "distributions") + } + essToken, ok, err := ess.GetESSAPIKey() + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("ESS api key missing; run 'mage integration:auth'") + } + + // Possible to change the region for deployment, default is gcp-us-west2 which is + // the CFT region. + essRegion := os.Getenv("TEST_INTEG_AUTH_ESS_REGION") + if essRegion == "" { + essRegion = "gcp-us-west2" + } + + serviceTokenPath, ok, err := getGCEServiceTokenPath() + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("GCE service token missing; run 'mage integration:auth'") + } + datacenter := os.Getenv("TEST_INTEG_AUTH_GCP_DATACENTER") + if datacenter == "" { + // us-central1-a is used because T2A instances required for ARM64 testing are only + // available in the central regions + datacenter = "us-central1-a" + } + + ogcCfg := ogc.Config{ + ServiceTokenPath: serviceTokenPath, + Datacenter: datacenter, + } + + var instanceProvisioner tcommon.InstanceProvisioner + instanceProvisionerMode := os.Getenv("INSTANCE_PROVISIONER") + switch instanceProvisionerMode { + case "", ogc.Name: + instanceProvisionerMode = ogc.Name + instanceProvisioner, err = ogc.NewProvisioner(ogcCfg) + case multipass.Name: + instanceProvisioner = multipass.NewProvisioner() + case kind.Name: + instanceProvisioner = kind.NewProvisioner() + default: + return nil, fmt.Errorf("INSTANCE_PROVISIONER environment variable must be one of 'ogc' or 'multipass', not %s", instanceProvisionerMode) + } + + email, err := ogcCfg.ClientEmail() + if err != nil { + return nil, err + } + + provisionCfg := ess.ProvisionerConfig{ + Identifier: fmt.Sprintf("at-%s", strings.Replace(strings.Split(email, "@")[0], ".", "-", -1)), + APIKey: essToken, + Region: essRegion, + } + + var stackProvisioner tcommon.StackProvisioner + stackProvisionerMode := os.Getenv("STACK_PROVISIONER") + switch stackProvisionerMode { + case "", ess.ProvisionerStateful: + stackProvisionerMode = ess.ProvisionerStateful + stackProvisioner, err = ess.NewProvisioner(provisionCfg) + if err != nil { + return nil, err + } + case ess.ProvisionerServerless: + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + stackProvisioner, err = ess.NewServerlessProvisioner(ctx, provisionCfg) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("STACK_PROVISIONER environment variable must be one of %q or %q, not %s", + ess.ProvisionerStateful, + ess.ProvisionerServerless, + stackProvisionerMode) + } + + timestamp := timestampEnabled() + + extraEnv := map[string]string{} + if agentCollectDiag := os.Getenv("AGENT_COLLECT_DIAG"); agentCollectDiag != "" { + extraEnv["AGENT_COLLECT_DIAG"] = agentCollectDiag + } + if agentKeepInstalled := os.Getenv("AGENT_KEEP_INSTALLED"); agentKeepInstalled != "" { + extraEnv["AGENT_KEEP_INSTALLED"] = agentKeepInstalled + } + + extraEnv["TEST_LONG_RUNNING"] = os.Getenv("TEST_LONG_RUNNING") + extraEnv["LONG_TEST_RUNTIME"] = os.Getenv("LONG_TEST_RUNTIME") + + // these following two env vars are currently not used by anything, but can be used in the future to test beats or + // other binaries, see https://github.com/elastic/elastic-agent/pull/3258 + binaryName := os.Getenv("TEST_BINARY_NAME") + if binaryName == "" { + binaryName = "elastic-agent" + } + + repoDir := os.Getenv("TEST_INTEG_REPO_PATH") + if repoDir == "" { + repoDir = "." + } + + diagDir := filepath.Join("build", "diagnostics") + _ = os.MkdirAll(diagDir, 0755) + + cfg := tcommon.Config{ + AgentVersion: agentVersion, + StackVersion: agentStackVersion, + BuildDir: agentBuildDir, + GOVersion: goVersion, + RepoDir: repoDir, + DiagnosticsDir: diagDir, + StateDir: ".integration-cache", + Platforms: testPlatforms(), + Packages: testPackages(), + Groups: testGroups(), + Matrix: matrix, + SingleTest: singleTest, + VerboseMode: mg.Verbose(), + Timestamp: timestamp, + TestFlags: goTestFlags, + ExtraEnv: extraEnv, + BinaryName: binaryName, + } + + r, err := runner.NewRunner(cfg, instanceProvisioner, stackProvisioner, batches...) + if err != nil { + return nil, fmt.Errorf("failed to create runner: %w", err) + } + return r, nil +} + +func writeFile(name string, data []byte, perm os.FileMode) error { + err := os.WriteFile(name, data, perm) + if err != nil { + return fmt.Errorf("failed to write file %s: %w", name, err) + } + return nil +} + +func getTestRunnerVersions() (string, string, error) { + var err error + agentStackVersion := os.Getenv("AGENT_STACK_VERSION") + agentVersion := os.Getenv("AGENT_VERSION") + if agentVersion == "" { + agentVersion, err = mage.DefaultBeatBuildVariableSources.GetBeatVersion() + if err != nil { + return "", "", err + } + if agentStackVersion == "" { + // always use snapshot for stack version + agentStackVersion = fmt.Sprintf("%s-SNAPSHOT", agentVersion) + } + if hasSnapshotEnv() { + // in the case that SNAPSHOT=true is set in the environment the + // default version of the agent is used, but as a snapshot build + agentVersion = fmt.Sprintf("%s-SNAPSHOT", agentVersion) + } + } + + if agentStackVersion == "" { + agentStackVersion = agentVersion + } + + return agentVersion, agentStackVersion, nil +} + +func hasSnapshotEnv() bool { + snapshot := os.Getenv(snapshotEnv) + if snapshot == "" { + return false + } + b, _ := strconv.ParseBool(snapshot) + + return b +} + +func getGCEServiceTokenPath() (string, bool, error) { + serviceTokenPath := os.Getenv("TEST_INTEG_AUTH_GCP_SERVICE_TOKEN_FILE") + if serviceTokenPath == "" { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", false, fmt.Errorf("unable to determine user's home directory: %w", err) + } + serviceTokenPath = filepath.Join(homeDir, ".config", "gcloud", "agent-testing-service-token.json") + } + _, err := os.Stat(serviceTokenPath) + if os.IsNotExist(err) { + return serviceTokenPath, false, nil + } else if err != nil { + return serviceTokenPath, false, fmt.Errorf("unable to check for service account key file at %s: %w", serviceTokenPath, err) + } + return serviceTokenPath, true, nil +} + +func timestampEnabled() bool { + timestamp := os.Getenv("TEST_INTEG_TIMESTAMP") + if timestamp == "" { + return false + } + b, _ := strconv.ParseBool(timestamp) + return b +} + +func testPlatforms() []string { + platformsStr := os.Getenv("TEST_PLATFORMS") + if platformsStr == "" { + return nil + } + var platforms []string + for _, p := range strings.Split(platformsStr, " ") { + if p != "" { + platforms = append(platforms, p) + } + } + return platforms +} + +func testPackages() []string { + packagesStr, defined := os.LookupEnv("TEST_PACKAGES") + if !defined { + return nil + } + + var packages []string + for _, p := range strings.Split(packagesStr, ",") { + if p == "tar.gz" { + p = "targz" + } + packages = append(packages, p) + } + + return packages +} + +func testGroups() []string { + groupsStr := os.Getenv("TEST_GROUPS") + if groupsStr == "" { + return nil + } + var groups []string + for _, g := range strings.Split(groupsStr, " ") { + if g != "" { + groups = append(groups, g) + } + } + return groups +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/batch.go b/dev-tools/mage/target/srvrlesstest/testing/common/batch.go new file mode 100644 index 00000000000..54a9929b548 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/batch.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +import "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + +// OSBatch defines the mapping between a SupportedOS and a define.Batch. +type OSBatch struct { + // ID is the unique ID for the batch. + ID string + // LayoutOS provides all the OS information to create an instance. + OS SupportedOS + // Batch defines the batch of tests to run on this layout. + Batch define.Batch + // Skip defines if this batch will be skipped because no supported layout exists yet. + Skip bool +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/build.go b/dev-tools/mage/target/srvrlesstest/testing/common/build.go new file mode 100644 index 00000000000..e994a0e08bc --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/build.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +// Build describes a build and its paths. +type Build struct { + // Version of the Elastic Agent build. + Version string + // Type of OS this build is for. + Type string + // Arch is architecture this build is for. + Arch string + // Path is the path to the build. + Path string + // SHA512 is the path to the SHA512 file. + SHA512Path string +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/config.go b/dev-tools/mage/target/srvrlesstest/testing/common/config.go new file mode 100644 index 00000000000..1eb81d05c79 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/config.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +import ( + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "strings" +) + +// Config provides the configuration for running the runner. +type Config struct { + AgentVersion string + StateDir string + ReleaseVersion string + StackVersion string + BuildDir string + GOVersion string + RepoDir string + DiagnosticsDir string + + // Platforms filters the tests to only run on the provided list + // of platforms even if the tests supports more than what is + // defined in this list. + Platforms []string + + // Packages filters the tests to only run on the provided list + // of platforms even if the tests supports more than what is + // defined in this list. + Packages []string + + // BinaryName is the name of the binary package under test, i.e, elastic-agent, metricbeat, etc + // this is used to copy the .tar.gz to the remote host + BinaryName string + + // Groups filters the tests to only run tests that are part of + // the groups defined in this list. + Groups []string + + // Matrix enables matrix testing. This explodes each test to + // run on all supported platforms the runner supports. + Matrix bool + + // SingleTest only has the runner run that specific test. + SingleTest string + + // VerboseMode passed along a verbose mode flag to tests + VerboseMode bool + + // Timestamp enables timestamps on the console output. + Timestamp bool + + // Testflags contains extra go test flags to be set when running tests + TestFlags string + + // ExtraEnv contains extra environment flags to pass to the runner. + ExtraEnv map[string]string +} + +// Validate returns an error if the information is invalid. +func (c *Config) Validate() error { + if c.AgentVersion == "" { + return errors.New("field AgentVersion must be set") + } + if c.StackVersion == "" { + return errors.New("field StackVersion must be set") + } + if c.BuildDir == "" { + return errors.New("field BuildDir must be set") + } + if c.GOVersion == "" { + return errors.New("field GOVersion must be set") + } + if c.RepoDir == "" { + return errors.New("field RepoDir must be set") + } + if c.StateDir == "" { + return errors.New("field StateDir must be set") + } + _, err := c.GetPlatforms() + if err != nil { + return err + } + return nil +} + +// GetPlatforms returns the defined platforms for the configuration. +func (c *Config) GetPlatforms() ([]define.OS, error) { + var each []define.OS + for _, platform := range c.Platforms { + o, err := parsePlatform(platform) + if err != nil { + return nil, err + } + each = append(each, o) + } + return each, nil +} + +func parsePlatform(platform string) (define.OS, error) { + separated := strings.Split(platform, "/") + var os define.OS + switch len(separated) { + case 0: + return define.OS{}, fmt.Errorf("failed to parse platform string %q: empty string", platform) + case 1: + os = define.OS{Type: separated[0]} + case 2: + os = define.OS{Type: separated[0], Arch: separated[1]} + case 3: + if separated[0] == define.Linux { + os = define.OS{Type: separated[0], Arch: separated[1], Distro: separated[2]} + } else { + os = define.OS{Type: separated[0], Arch: separated[1], Version: separated[2]} + } + case 4: + if separated[0] == define.Linux { + os = define.OS{Type: separated[0], Arch: separated[1], Distro: separated[2], Version: separated[3]} + } else if separated[0] == define.Kubernetes { + os = define.OS{Type: separated[0], Arch: separated[1], Version: separated[2], DockerVariant: separated[3]} + } else { + return define.OS{}, fmt.Errorf("failed to parse platform string %q: more than 2 separators", platform) + } + default: + return define.OS{}, fmt.Errorf("failed to parse platform string %q: more than 3 separators", platform) + } + if err := os.Validate(); err != nil { + return define.OS{}, fmt.Errorf("failed to parse platform string %q: %w", platform, err) + } + return os, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/instance.go b/dev-tools/mage/target/srvrlesstest/testing/common/instance.go new file mode 100644 index 00000000000..c87bd4c10e4 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/instance.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +import ( + "context" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" +) + +type ProvisionerType uint32 + +const ( + ProvisionerTypeVM ProvisionerType = iota + ProvisionerTypeK8SCluster +) + +// Instance represents a provisioned instance. +type Instance struct { + // Provider is the instance provider for the instance. + // See INSTANCE_PROVISIONER environment variable for the supported providers. + Provider string `yaml:"provider"` + // ID is the identifier of the instance. + // + // This must be the same ID of the OSBatch. + ID string `yaml:"id"` + // Name is the nice-name of the instance. + Name string `yaml:"name"` + // Provisioner is the instance provider for the instance. + // See INSTANCE_PROVISIONER environment variable for the supported Provisioner. + Provisioner string `yaml:"provisioner"` + // IP is the IP address of the instance. + IP string `yaml:"ip"` + // Username is the username used to SSH to the instance. + Username string `yaml:"username"` + // RemotePath is the based path used for performing work on the instance. + RemotePath string `yaml:"remote_path"` + // Internal holds internal information used by the provisioner. + // Best to not touch the contents of this, and leave it be for + // the provisioner. + Internal map[string]interface{} `yaml:"internal"` +} + +// InstanceProvisioner performs the provisioning of instances. +type InstanceProvisioner interface { + // Name returns the name of the instance provisioner. + Name() string + + // Type returns the type of the provisioner. + Type() ProvisionerType + + // SetLogger sets the logger for it to use. + SetLogger(l Logger) + + // Supported returns true of false if the provisioner supports the given batch. + Supported(batch define.OS) bool + + // Provision brings up the machines. + // + // The provision should re-use already prepared instances when possible. + Provision(ctx context.Context, cfg Config, batches []OSBatch) ([]Instance, error) + + // Clean cleans up all provisioned resources. + Clean(ctx context.Context, cfg Config, instances []Instance) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/logger.go b/dev-tools/mage/target/srvrlesstest/testing/common/logger.go new file mode 100644 index 00000000000..3dd641c0307 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/logger.go @@ -0,0 +1,24 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +// Logger is a simple logging interface used by each runner type. +type Logger interface { + // Logf logs the message for this runner. + Logf(format string, args ...any) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/prefix_output.go b/dev-tools/mage/target/srvrlesstest/testing/common/prefix_output.go new file mode 100644 index 00000000000..56b0250a8d4 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/prefix_output.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +import ( + "bytes" + "strings" +) + +// prefixOutput is an `io.Writer` that prefixes each written line with the provided prefix text +type prefixOutput struct { + logger Logger + prefix string + remainder []byte +} + +// NewPrefixOutput creates a prefix output `io.Writer`. +func NewPrefixOutput(logger Logger, prefix string) *prefixOutput { + return &prefixOutput{ + logger: logger, + prefix: prefix, + } +} + +func (r *prefixOutput) Write(p []byte) (int, error) { + if len(p) == 0 { + // nothing to do + return 0, nil + } + offset := 0 + for { + idx := bytes.IndexByte(p[offset:], '\n') + if idx < 0 { + // not all used add to remainder to be used on next call + r.remainder = append(r.remainder, p[offset:]...) + return len(p), nil + } + + var line []byte + if r.remainder != nil { + line = r.remainder + r.remainder = nil + line = append(line, p[offset:offset+idx]...) + } else { + line = append(line, p[offset:offset+idx]...) + } + offset += idx + 1 + // drop '\r' from line (needed for Windows) + if len(line) > 0 && line[len(line)-1] == '\r' { + line = line[0 : len(line)-1] + } + if len(line) == 0 { + // empty line + continue + } + str := strings.TrimSpace(string(line)) + r.logger.Logf("%s%s", r.prefix, str) + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/runner.go b/dev-tools/mage/target/srvrlesstest/testing/common/runner.go new file mode 100644 index 00000000000..6ebc48c92b7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/runner.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +import ( + "context" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" +) + +// OSRunnerPackageResult is the result for each package. +type OSRunnerPackageResult struct { + // Name is the package name. + Name string + // Output is the raw test output. + Output []byte + // XMLOutput is the XML Junit output. + XMLOutput []byte + // JSONOutput is the JSON output. + JSONOutput []byte +} + +// OSRunnerResult is the result of the test run provided by a OSRunner. +type OSRunnerResult struct { + // Packages is the results for each package. + Packages []OSRunnerPackageResult + + // SudoPackages is the results for each package that need to run as sudo. + SudoPackages []OSRunnerPackageResult +} + +// OSRunner provides an interface to run the tests on the OS. +type OSRunner interface { + // Prepare prepares the runner to actual run on the host. + Prepare(ctx context.Context, sshClient ssh.SSHClient, logger Logger, arch string, goVersion string) error + // Copy places the required files on the host. + Copy(ctx context.Context, sshClient ssh.SSHClient, logger Logger, repoArchive string, builds []Build) error + // Run runs the actual tests and provides the result. + Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (OSRunnerResult, error) + // Diagnostics gathers any diagnostics from the host. + Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger Logger, destination string) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/stack.go b/dev-tools/mage/target/srvrlesstest/testing/common/stack.go new file mode 100644 index 00000000000..913435bf947 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/stack.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +import "context" + +// Stack is a created stack. +type Stack struct { + // ID is the identifier of the instance. + // + // This must be the same ID used for requesting a stack. + ID string `yaml:"id"` + + // Provisioner is the stack provisioner. See STACK_PROVISIONER environment + // variable for the supported provisioners. + Provisioner string `yaml:"provisioner"` + + // Version is the version of the stack. + Version string `yaml:"version"` + + // Ready determines if the stack is ready to be used. + Ready bool `yaml:"ready"` + + // Elasticsearch is the URL to communicate with elasticsearch. + Elasticsearch string `yaml:"elasticsearch"` + + // Kibana is the URL to communication with kibana. + Kibana string `yaml:"kibana"` + + // Username is the username. + Username string `yaml:"username"` + + // Password is the password. + Password string `yaml:"password"` + + // Internal holds internal information used by the provisioner. + // Best to not touch the contents of this, and leave it be for + // the provisioner. + Internal map[string]interface{} `yaml:"internal"` +} + +// Same returns true if other is the same stack as this one. +// Two stacks are considered the same if their provisioner and ID are the same. +func (s Stack) Same(other Stack) bool { + return s.Provisioner == other.Provisioner && + s.ID == other.ID +} + +// StackRequest request for a new stack. +type StackRequest struct { + // ID is the unique ID for the stack. + ID string `yaml:"id"` + + // Version is the version of the stack. + Version string `yaml:"version"` +} + +// StackProvisioner performs the provisioning of stacks. +type StackProvisioner interface { + // Name returns the name of the stack provisioner. + Name() string + + // SetLogger sets the logger for it to use. + SetLogger(l Logger) + + // Create creates a stack. + Create(ctx context.Context, request StackRequest) (Stack, error) + + // WaitForReady should block until the stack is ready or the context is cancelled. + WaitForReady(ctx context.Context, stack Stack) (Stack, error) + + // Delete deletes the stack. + Delete(ctx context.Context, stack Stack) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/supported.go b/dev-tools/mage/target/srvrlesstest/testing/common/supported.go new file mode 100644 index 00000000000..130ddef9ec7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/supported.go @@ -0,0 +1,28 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package common + +import "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + +// SupportedOS maps a OS definition to a OSRunner. +type SupportedOS struct { + define.OS + + // Runner is the runner to use for the OS. + Runner OSRunner +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/define/batch.go b/dev-tools/mage/target/srvrlesstest/testing/define/batch.go new file mode 100644 index 00000000000..c8b6ac65a0e --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/define/batch.go @@ -0,0 +1,320 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package define + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +// defaultOS is the set of OS that are used in the case that a requirement doesn't define any +var defaultOS = []OS{ + { + Type: Darwin, + Arch: AMD64, + }, + { + Type: Darwin, + Arch: ARM64, + }, + { + Type: Linux, + Arch: AMD64, + }, + { + Type: Linux, + Arch: ARM64, + }, + { + Type: Windows, + Arch: AMD64, + }, +} + +// Batch is a grouping of tests that all have the same requirements. +type Batch struct { + // Group must be set on each test to define which group the tests belongs. + // Tests that are in the same group are executed on the same runner. + Group string `json:"group"` + + // OS defines the operating systems this test batch needs. + OS OS `json:"os"` + + // Stack defines the stack required for this batch. + Stack *Stack `json:"stack,omitempty"` + + // Tests define the set of packages and tests that do not require sudo + // privileges to be performed. + Tests []BatchPackageTests `json:"tests"` + + // SudoTests define the set of packages and tests that do require sudo + // privileges to be performed. + SudoTests []BatchPackageTests `json:"sudo_tests"` +} + +// BatchPackageTests is a package and its tests that belong to a batch. +type BatchPackageTests struct { + // Name is the package name. + Name string `json:"name"` + // Tests is the set of tests in the package. + Tests []BatchPackageTest `json:"tests"` +} + +// BatchPackageTest is a specific test in a package. +type BatchPackageTest struct { + // Name of the test. + Name string `json:"name"` + // Stack needed for test. + Stack bool `json:"stack"` +} + +// DetermineBatches parses the package directory with the possible extra build +// tags to determine the set of batches for the package. +func DetermineBatches(dir string, testFlags string, buildTags ...string) ([]Batch, error) { + const ( + defineMatcher = "define skip; requirements: " + ) + + // the 'define' build tag is added so that the `define.Require` skips and + // logs the requirements for each test. + buildTags = append(buildTags, "define") + + // 'go test' wants a directory path to either be absolute or start with + // './' so it knows it's a directory and not package. + if !filepath.IsAbs(dir) && !strings.HasPrefix(dir, "./") { + dir = "./" + dir + } + + // run 'go test' and collect the JSON output to be parsed + // #nosec G204 -- test function code, it will be okay + cmdArgs := []string{"test", "-v", "--tags", strings.Join(buildTags, ","), "-json"} + if testFlags != "" { + flags := strings.Split(testFlags, " ") + cmdArgs = append(cmdArgs, flags...) + } + + cmdArgs = append(cmdArgs, dir) + testCmd := exec.Command("go", cmdArgs...) + output, err := testCmd.Output() + if err != nil { + // format cmdArgs to make the error message more coherent + cmdArgs = append([]string{"go"}, cmdArgs...) + + var errExit *exec.ExitError + if errors.As(err, &errExit) { + b := bytes.NewBuffer(errExit.Stderr) + b.Write(output) + output = b.Bytes() + } + return nil, fmt.Errorf( + "error running go test: (%w), got:\n\n%s\ntried to run: %v", + err, string(output), cmdArgs) + } + + // parses each test and determine the batches that each test belongs in + var batches []Batch + sc := bufio.NewScanner(bytes.NewReader(output)) + for sc.Scan() { + var tar testActionResult + err := json.Unmarshal([]byte(sc.Text()), &tar) + if err != nil { + return nil, err + } + if tar.Action == "output" && strings.Contains(tar.Output, defineMatcher) { + reqRaw := tar.Output[strings.Index(tar.Output, defineMatcher)+len(defineMatcher) : strings.LastIndex(tar.Output, "\n")] + var req Requirements + err := json.Unmarshal([]byte(reqRaw), &req) + if err != nil { + return nil, fmt.Errorf("failed to parse requirements JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + err = req.Validate() + if err != nil { + return nil, fmt.Errorf("parsed requirements are invalid JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + batches = appendTest(batches, tar, req) + } + } + return batches, nil +} + +func appendTest(batches []Batch, tar testActionResult, req Requirements) []Batch { + var set []OS + for _, o := range req.OS { + if o.Arch == "" { + set = append(set, OS{ + Type: o.Type, + Arch: AMD64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + if o.Type != Windows { + set = append(set, OS{ + Type: o.Type, + Arch: ARM64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } else { + set = append(set, OS{ + Type: o.Type, + Arch: o.Arch, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } + if len(set) == 0 { + // no os define; means the test supports all + set = defaultOS + } + for _, o := range set { + var batch Batch + batchIdx := findBatchIdx(batches, req.Group, o, req.Stack) + if batchIdx == -1 { + // new batch required + batch = Batch{ + Group: req.Group, + OS: o, + Tests: nil, + SudoTests: nil, + } + batches = append(batches, batch) + batchIdx = len(batches) - 1 + } + batch = batches[batchIdx] + if o.Distro != "" { + batch.OS.Distro = o.Distro + } + if o.Version != "" { + batch.OS.Version = o.Version + } + if o.DockerVariant != "" { + batch.OS.DockerVariant = o.DockerVariant + } + if req.Stack != nil && batch.Stack == nil { + // assign the stack to this batch + batch.Stack = copyStack(req.Stack) + } + if req.Sudo { + batch.SudoTests = appendPackageTest(batch.SudoTests, tar.Package, tar.Test, req.Stack != nil) + } else { + batch.Tests = appendPackageTest(batch.Tests, tar.Package, tar.Test, req.Stack != nil) + } + batches[batchIdx] = batch + } + return batches +} + +func appendPackageTest(tests []BatchPackageTests, pkg string, name string, stack bool) []BatchPackageTests { + for i, pt := range tests { + if pt.Name == pkg { + for _, testName := range pt.Tests { + if testName.Name == name { + // we already selected this test for this package for this batch, + // we can return immediately + return tests + } + } + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests[i] = pt + return tests + } + } + var pt BatchPackageTests + pt.Name = pkg + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests = append(tests, pt) + return tests +} + +func findBatchIdx(batches []Batch, group string, os OS, stack *Stack) int { + for i, b := range batches { + if b.Group != group { + // must be in the same group + continue + } + if b.OS.Type != os.Type || b.OS.Arch != os.Arch { + // must be same type and arch both are always defined at this point + continue + } + if os.Distro != "" { + // must have the same distro + if b.OS.Distro != "" && b.OS.Distro != os.Distro { + continue + } + } + if os.Version != "" { + // must have the same version + if b.OS.Version != "" && b.OS.Version != os.Version { + continue + } + } + if os.DockerVariant != "" { + // must be the same docker image + if b.OS.DockerVariant != "" && b.OS.DockerVariant != os.DockerVariant { + continue + } + } + if stack == nil { + // don't care if the batch has a cloud or not + return i + } + if b.Stack == nil { + // need cloud, but batch doesn't have cloud calling code can set it + return i + } + if b.Stack.Version == stack.Version { + // same cloud version; compatible + return i + } + } + return -1 +} + +func copyStack(stack *Stack) *Stack { + var s Stack + if stack != nil { + s = *stack + return &s + } + return nil +} + +type testActionResult struct { + Time string `json:"Time"` + Action string `json:"Action"` + Package string `json:"Package"` + Test string `json:"Test"` + Output string `json:"Output"` +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/define/requirements.go b/dev-tools/mage/target/srvrlesstest/testing/define/requirements.go new file mode 100644 index 00000000000..a9fac1ed467 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/define/requirements.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package define + +import ( + "errors" + "fmt" + + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/component" +) + +const ( + // Default constant can be used as the default group for tests. + Default = "default" +) + +const ( + // Darwin is macOS platform + Darwin = component.Darwin + // Linux is Linux platform + Linux = component.Linux + // Windows is Windows platform + Windows = component.Windows + // Kubernetes is Kubernetes platform + Kubernetes = "kubernetes" +) + +const ( + // AMD64 is amd64 architecture + AMD64 = component.AMD64 + // ARM64 is arm64 architecture + ARM64 = component.ARM64 +) + +// OS defines an operating system, architecture, version and distribution combination. +type OS struct { + // Type is the operating system type (darwin, linux or windows). + // + // This is always required to be defined on the OS structure. + // If it is not defined the test runner will error. + Type string `json:"type"` + // Arch is the architecture type (amd64 or arm64). + // + // In the case that it's not provided the test will run on every + // architecture that is supported. + Arch string `json:"arch"` + // Version is a specific version of the OS type to run this test on + // + // When defined the test runs on this specific version only. When not + // defined the test is run on a selected version for this operating system. + Version string `json:"version"` + // Distro allows in the Linux case for a specific distribution to be + // selected for running on. Example would be "ubuntu". In the Kubernetes case + // for a specific distribution of kubernetes. Example would be "kind". + Distro string `json:"distro"` + // DockerVariant allows in the Kubernetes case for a specific variant to + // be selected for running with. Example would be "wolfi". + DockerVariant string `json:"docker_variant"` +} + +// Validate returns an error if not valid. +func (o OS) Validate() error { + if o.Type == "" { + return errors.New("type must be defined") + } + if o.Type != Darwin && o.Type != Linux && o.Type != Windows && o.Type != Kubernetes { + return errors.New("type must be either darwin, linux, windows, or kubernetes") + } + if o.Arch != "" { + if o.Arch != AMD64 && o.Arch != ARM64 { + return errors.New("arch must be either amd64 or arm64") + } + if o.Type == Windows && o.Arch == ARM64 { + return errors.New("windows on arm64 not supported") + } + } + if o.Distro != "" && (o.Type != Linux && o.Type != Kubernetes) { + return errors.New("distro can only be set when type is linux or kubernetes") + } + if o.DockerVariant != "" && o.Type != Kubernetes { + return errors.New("docker variant can only be set when type is kubernetes") + } + return nil +} + +// Stack defines the stack required for the test. +type Stack struct { + // Version defines a specific stack version to create for this test. + // + // In the case that no version is provided the same version being used for + // the current test execution is used. + Version string `json:"version"` +} + +// Requirements defines the testing requirements for the test to run. +type Requirements struct { + // Group must be set on each test to define which group the tests belongs to. + // Tests that are in the same group are executed on the same runner. + // + // Useful when tests take a long time to complete and sharding them across multiple + // hosts can improve the total amount of time to complete all the tests. + Group string `json:"group"` + + // OS defines the operating systems this test can run on. In the case + // multiple are provided the test is ran multiple times one time on each + // combination. + OS []OS `json:"os,omitempty"` + + // Stack defines the stack required for the test. + Stack *Stack `json:"stack,omitempty"` + + // Local defines if this test can safely be performed on a local development machine. + // If not set then the test will not be performed when local only testing is performed. + // + // This doesn't mean this test can only run locally. It will still run on defined OS's + // when a full test run is performed. + Local bool `json:"local"` + + // Sudo defines that this test must run under superuser permissions. On Mac and Linux the + // test gets executed under sudo and on Windows it gets run under Administrator. + Sudo bool `json:"sudo"` +} + +// Validate returns an error if not valid. +func (r Requirements) Validate() error { + if r.Group == "" { + return errors.New("group is required") + } + for i, o := range r.OS { + if err := o.Validate(); err != nil { + return fmt.Errorf("invalid os %d: %w", i, err) + } + } + return nil +} + +// runtimeAllowed returns true if the runtime matches a valid OS. +func (r Requirements) runtimeAllowed(os string, arch string, version string, distro string) bool { + if len(r.OS) == 0 { + // all allowed + return true + } + for _, o := range r.OS { + if o.Type != Kubernetes && o.Type != os { + // not valid on this runtime + continue + } + if o.Arch != "" && o.Arch != arch { + // not allowed on specific architecture + continue + } + if o.Version != "" && o.Version != version { + // not allowed on specific version + continue + } + if o.Distro != "" && o.Distro != distro { + // not allowed on specific distro + continue + } + // allowed + return true + } + // made it this far, not allowed + return false +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/client.go b/dev-tools/mage/target/srvrlesstest/testing/ess/client.go new file mode 100644 index 00000000000..6ff144c889b --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/client.go @@ -0,0 +1,79 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ess + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" +) + +type Client struct { + config *Config + client *http.Client +} + +func NewClient(config Config) *Client { + cfg := defaultConfig() + cfg.Merge(config) + + c := new(Client) + c.client = http.DefaultClient + c.config = cfg + + return c +} + +func (c *Client) doGet(ctx context.Context, relativeUrl string) (*http.Response, error) { + u, err := url.JoinPath(c.config.BaseUrl, relativeUrl) + if err != nil { + return nil, fmt.Errorf("unable to create API URL: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, fmt.Errorf("unable to create GET request: %w", err) + } + + req.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", c.config.ApiKey)) + + return c.client.Do(req) +} + +func (c *Client) doPost(ctx context.Context, relativeUrl, contentType string, body io.Reader) (*http.Response, error) { + u, err := url.JoinPath(c.config.BaseUrl, relativeUrl) + if err != nil { + return nil, fmt.Errorf("unable to create API URL: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, body) + if err != nil { + return nil, fmt.Errorf("unable to create POST request: %w", err) + } + + req.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", c.config.ApiKey)) + req.Header.Set("Content-Type", contentType) + + return c.client.Do(req) +} + +func (c *Client) BaseURL() string { + return c.config.BaseUrl +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/config.go b/dev-tools/mage/target/srvrlesstest/testing/ess/config.go new file mode 100644 index 00000000000..6f886022e7c --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/config.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ess + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +type Config struct { + BaseUrl string `json:"base_url" yaml:"base_url"` + ApiKey string `json:"api_key" yaml:"api_key"` +} + +func defaultConfig() *Config { + baseURL := os.Getenv("TEST_INTEG_AUTH_ESS_URL") + if baseURL == "" { + baseURL = "https://cloud.elastic.co" + } + url := strings.TrimRight(baseURL, "/") + "/api/v1" + return &Config{ + BaseUrl: url, + } +} + +// Merge overlays the provided configuration on top of +// this configuration. +func (c *Config) Merge(anotherConfig Config) { + if anotherConfig.BaseUrl != "" { + c.BaseUrl = anotherConfig.BaseUrl + } + + if anotherConfig.ApiKey != "" { + c.ApiKey = anotherConfig.ApiKey + } +} + +// GetESSAPIKey returns the ESS API key, if it exists +func GetESSAPIKey() (string, bool, error) { + essAPIKeyFile, err := GetESSAPIKeyFilePath() + if err != nil { + return "", false, err + } + _, err = os.Stat(essAPIKeyFile) + if os.IsNotExist(err) { + return "", false, nil + } else if err != nil { + return "", false, fmt.Errorf("unable to check if ESS config directory exists: %w", err) + } + data, err := os.ReadFile(essAPIKeyFile) + if err != nil { + return "", true, fmt.Errorf("unable to read ESS API key: %w", err) + } + essAPIKey := strings.TrimSpace(string(data)) + return essAPIKey, true, nil +} + +// GetESSAPIKeyFilePath returns the path to the ESS API key file +func GetESSAPIKeyFilePath() (string, error) { + essAPIKeyFile := os.Getenv("TEST_INTEG_AUTH_ESS_APIKEY_FILE") + if essAPIKeyFile == "" { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("unable to determine user's home directory: %w", err) + } + essAPIKeyFile = filepath.Join(homeDir, ".config", "ess", "api_key.txt") + } + return essAPIKeyFile, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_csp_configuration.yaml b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_csp_configuration.yaml new file mode 100644 index 00000000000..199f664a65a --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_csp_configuration.yaml @@ -0,0 +1,15 @@ +gcp: + integrations_server_conf_id: "gcp.integrationsserver.n2.68x32x45.2" + elasticsearch_conf_id: "gcp.es.datahot.n2.68x10x45" + elasticsearch_deployment_template_id: "gcp-storage-optimized-v5" + kibana_instance_configuration_id: "gcp.kibana.n2.68x32x45" +azure: + integrations_server_conf_id: "azure.integrationsserver.fsv2.2" + elasticsearch_conf_id: "azure.es.datahot.edsv4" + elasticsearch_deployment_template_id: "azure-storage-optimized-v2" + kibana_instance_configuration_id: "azure.kibana.fsv2" +aws: + integrations_server_conf_id: "aws.integrationsserver.c5d.2.1" + elasticsearch_conf_id: "aws.es.datahot.i3.1.1" + elasticsearch_deployment_template_id: "aws-storage-optimized-v5" + kibana_instance_configuration_id: "aws.kibana.c5d.1.1" \ No newline at end of file diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_request.tmpl.json b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_request.tmpl.json new file mode 100644 index 00000000000..3ef93868708 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_request.tmpl.json @@ -0,0 +1,102 @@ +{ + "resources": { + "integrations_server": [ + { + "elasticsearch_cluster_ref_id": "main-elasticsearch", + "region": "{{ .request.Region }}", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "{{ .integrations_server_conf_id }}", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "integrations_server": { + "version": "{{ .request.Version }}" + } + }, + "ref_id": "main-integrations_server" + } + ], + "elasticsearch": [ + { + "region": "{{ .request.Region }}", + "settings": { + "dedicated_masters_threshold": 6 + }, + "plan": { + "cluster_topology": [ + { + "zone_count": 1, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "instance_configuration_id": "{{.elasticsearch_conf_id}}", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + } + ], + "elasticsearch": { + "version": "{{ .request.Version }}", + "enabled_built_in_plugins": [] + }, + "deployment_template": { + "id": "{{ .elasticsearch_deployment_template_id }}" + } + }, + "ref_id": "main-elasticsearch" + } + ], + "enterprise_search": [], + "kibana": [ + { + "elasticsearch_cluster_ref_id": "main-elasticsearch", + "region": "{{ .request.Region }}", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "{{.kibana_instance_configuration_id}}", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "kibana": { + "version": "{{ .request.Version }}", + "user_settings_json": { + "xpack.fleet.enableExperimental": ["agentTamperProtectionEnabled"] + } + } + }, + "ref_id": "main-kibana" + } + ] + }, + "settings": { + "autoscaling_enabled": false + }, + "name": "{{ .request.Name }}", + "metadata": { + "system_owned": false, + "tags": {{ json .request.Tags }} + } +} \ No newline at end of file diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/deployment.go b/dev-tools/mage/target/srvrlesstest/testing/ess/deployment.go new file mode 100644 index 00000000000..9170e9d3dcb --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/deployment.go @@ -0,0 +1,401 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ess + +import ( + "bytes" + "context" + _ "embed" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "text/template" + "time" + + "gopkg.in/yaml.v2" +) + +type Tag struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type CreateDeploymentRequest struct { + Name string `json:"name"` + Region string `json:"region"` + Version string `json:"version"` + Tags []Tag `json:"tags"` +} + +type CreateDeploymentResponse struct { + ID string `json:"id"` + + ElasticsearchEndpoint string + KibanaEndpoint string + + Username string + Password string +} + +type GetDeploymentResponse struct { + Elasticsearch struct { + Status DeploymentStatus + ServiceUrl string + } + Kibana struct { + Status DeploymentStatus + ServiceUrl string + } + IntegrationsServer struct { + Status DeploymentStatus + ServiceUrl string + } +} + +type DeploymentStatus string + +func (d *DeploymentStatus) UnmarshalJSON(data []byte) error { + var status string + if err := json.Unmarshal(data, &status); err != nil { + return err + } + + switch status { + case string(DeploymentStatusInitializing), string(DeploymentStatusReconfiguring), string(DeploymentStatusStarted): + *d = DeploymentStatus(status) + default: + return fmt.Errorf("unknown status: [%s]", status) + } + + return nil +} + +func (d *DeploymentStatus) String() string { + return string(*d) +} + +const ( + DeploymentStatusInitializing DeploymentStatus = "initializing" + DeploymentStatusReconfiguring DeploymentStatus = "reconfiguring" + DeploymentStatusStarted DeploymentStatus = "started" +) + +type DeploymentStatusResponse struct { + Overall DeploymentStatus + + Elasticsearch DeploymentStatus + Kibana DeploymentStatus + IntegrationsServer DeploymentStatus +} + +// CreateDeployment creates the deployment with the specified configuration. +func (c *Client) CreateDeployment(ctx context.Context, req CreateDeploymentRequest) (*CreateDeploymentResponse, error) { + reqBodyBytes, err := generateCreateDeploymentRequestBody(req) + if err != nil { + return nil, err + } + + createResp, err := c.doPost( + ctx, + "deployments", + "application/json", + bytes.NewReader(reqBodyBytes), + ) + if err != nil { + return nil, fmt.Errorf("error calling deployment creation API: %w", err) + } + defer createResp.Body.Close() + + var createRespBody struct { + ID string `json:"id"` + Resources []struct { + Kind string `json:"kind"` + Credentials struct { + Username string `json:"username"` + Password string `json:"password"` + } `json:"credentials"` + } `json:"resources"` + Errors []struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"errors"` + } + + if err := json.NewDecoder(createResp.Body).Decode(&createRespBody); err != nil { + return nil, fmt.Errorf("error parsing deployment creation API response: %w", err) + } + + if len(createRespBody.Errors) > 0 { + return nil, fmt.Errorf("failed to create: (%s) %s", createRespBody.Errors[0].Code, createRespBody.Errors[0].Message) + } + + r := CreateDeploymentResponse{ + ID: createRespBody.ID, + } + + for _, resource := range createRespBody.Resources { + if resource.Kind == "elasticsearch" { + r.Username = resource.Credentials.Username + r.Password = resource.Credentials.Password + break + } + } + + // Get Elasticsearch and Kibana endpoint URLs + getResp, err := c.getDeployment(ctx, r.ID) + if err != nil { + return nil, fmt.Errorf("error calling deployment retrieval API: %w", err) + } + defer getResp.Body.Close() + + var getRespBody struct { + Resources struct { + Elasticsearch []struct { + Info struct { + Metadata struct { + ServiceUrl string `json:"service_url"` + } `json:"metadata"` + } `json:"info"` + } `json:"elasticsearch"` + Kibana []struct { + Info struct { + Metadata struct { + ServiceUrl string `json:"service_url"` + } `json:"metadata"` + } `json:"info"` + } `json:"kibana"` + } `json:"resources"` + } + + if err := json.NewDecoder(getResp.Body).Decode(&getRespBody); err != nil { + return nil, fmt.Errorf("error parsing deployment retrieval API response: %w", err) + } + + r.ElasticsearchEndpoint = getRespBody.Resources.Elasticsearch[0].Info.Metadata.ServiceUrl + r.KibanaEndpoint = getRespBody.Resources.Kibana[0].Info.Metadata.ServiceUrl + + return &r, nil +} + +// ShutdownDeployment attempts to shut down the ESS deployment with the specified ID. +func (c *Client) ShutdownDeployment(ctx context.Context, deploymentID string) error { + u, err := url.JoinPath("deployments", deploymentID, "_shutdown") + if err != nil { + return fmt.Errorf("unable to create deployment shutdown API URL: %w", err) + } + + res, err := c.doPost(ctx, u, "", nil) + if err != nil { + return fmt.Errorf("error calling deployment shutdown API: %w", err) + } + defer res.Body.Close() + + if res.StatusCode != 200 { + resBytes, _ := io.ReadAll(res.Body) + return fmt.Errorf("got unexpected response code [%d] from deployment shutdown API: %s", res.StatusCode, string(resBytes)) + } + + return nil +} + +// DeploymentStatus returns the overall status of the deployment as well as statuses of every component. +func (c *Client) DeploymentStatus(ctx context.Context, deploymentID string) (*DeploymentStatusResponse, error) { + getResp, err := c.getDeployment(ctx, deploymentID) + if err != nil { + return nil, fmt.Errorf("error calling deployment retrieval API: %w", err) + } + defer getResp.Body.Close() + + var getRespBody struct { + Resources struct { + Elasticsearch []struct { + Info struct { + Status DeploymentStatus `json:"status"` + } `json:"info"` + } `json:"elasticsearch"` + Kibana []struct { + Info struct { + Status DeploymentStatus `json:"status"` + } `json:"info"` + } `json:"kibana"` + IntegrationsServer []struct { + Info struct { + Status DeploymentStatus `json:"status"` + } `json:"info"` + } `json:"integrations_server"` + } `json:"resources"` + } + + if err := json.NewDecoder(getResp.Body).Decode(&getRespBody); err != nil { + return nil, fmt.Errorf("error parsing deployment retrieval API response: %w", err) + } + + s := DeploymentStatusResponse{ + Elasticsearch: getRespBody.Resources.Elasticsearch[0].Info.Status, + Kibana: getRespBody.Resources.Kibana[0].Info.Status, + IntegrationsServer: getRespBody.Resources.IntegrationsServer[0].Info.Status, + } + s.Overall = overallStatus(s.Elasticsearch, s.Kibana, s.IntegrationsServer) + + return &s, nil +} + +// DeploymentIsReady returns true when the deployment is ready, checking its status +// every `tick` until `waitFor` duration. +func (c *Client) DeploymentIsReady(ctx context.Context, deploymentID string, tick time.Duration) (bool, error) { + ticker := time.NewTicker(tick) + defer ticker.Stop() + + var errs error + statusCh := make(chan DeploymentStatus, 1) + for { + select { + case <-ctx.Done(): + return false, errors.Join(errs, ctx.Err()) + case <-ticker.C: + go func() { + statusCtx, statusCancel := context.WithTimeout(ctx, tick) + defer statusCancel() + status, err := c.DeploymentStatus(statusCtx, deploymentID) + if err != nil { + errs = errors.Join(errs, err) + return + } + statusCh <- status.Overall + }() + case status := <-statusCh: + if status == DeploymentStatusStarted { + return true, nil + } + } + } +} + +func (c *Client) getDeployment(ctx context.Context, deploymentID string) (*http.Response, error) { + u, err := url.JoinPath("deployments", deploymentID) + if err != nil { + return nil, fmt.Errorf("unable to create deployment retrieval API URL: %w", err) + } + + return c.doGet(ctx, u) +} + +func overallStatus(statuses ...DeploymentStatus) DeploymentStatus { + // The overall status is started if every component's status is started. Otherwise, + // we take the non-started components' statuses and pick the first one as the overall + // status. + statusMap := map[DeploymentStatus]struct{}{} + for _, status := range statuses { + statusMap[status] = struct{}{} + } + + if len(statusMap) == 1 { + if _, allStarted := statusMap[DeploymentStatusStarted]; allStarted { + return DeploymentStatusStarted + } + } + + var overallStatus DeploymentStatus + for _, status := range statuses { + if status != DeploymentStatusStarted { + overallStatus = status + break + } + } + + return overallStatus +} + +//go:embed create_deployment_request.tmpl.json +var createDeploymentRequestTemplate string + +//go:embed create_deployment_csp_configuration.yaml +var cloudProviderSpecificValues []byte + +func generateCreateDeploymentRequestBody(req CreateDeploymentRequest) ([]byte, error) { + var csp string + // Special case: AWS us-east-1 region is just called + // us-east-1 (instead of aws-us-east-1)! + if req.Region == "us-east-1" { + csp = "aws" + } else { + regionParts := strings.Split(req.Region, "-") + if len(regionParts) < 2 { + return nil, fmt.Errorf("unable to parse CSP out of region [%s]", req.Region) + } + + csp = regionParts[0] + } + templateContext, err := createDeploymentTemplateContext(csp, req) + if err != nil { + return nil, fmt.Errorf("creating request template context: %w", err) + } + + tpl, err := template.New("create_deployment_request"). + Funcs(template.FuncMap{"json": jsonMarshal}). + Parse(createDeploymentRequestTemplate) + if err != nil { + return nil, fmt.Errorf("unable to parse deployment creation template: %w", err) + } + + var bBuf bytes.Buffer + err = tpl.Execute(&bBuf, templateContext) + if err != nil { + return nil, fmt.Errorf("rendering create deployment request template with context %v : %w", templateContext, err) + } + return bBuf.Bytes(), nil +} + +func jsonMarshal(in any) (string, error) { + jsonBytes, err := json.Marshal(in) + if err != nil { + return "", err + } + + return string(jsonBytes), nil +} + +func createDeploymentTemplateContext(csp string, req CreateDeploymentRequest) (map[string]any, error) { + cspSpecificContext, err := loadCspValues(csp) + if err != nil { + return nil, fmt.Errorf("loading csp-specific values for %q: %w", csp, err) + } + + cspSpecificContext["request"] = req + + return cspSpecificContext, nil +} + +func loadCspValues(csp string) (map[string]any, error) { + var cspValues map[string]map[string]any + + err := yaml.Unmarshal(cloudProviderSpecificValues, &cspValues) + if err != nil { + return nil, fmt.Errorf("unmarshalling error: %w", err) + } + values, supportedCSP := cspValues[csp] + if !supportedCSP { + return nil, fmt.Errorf("csp %s not supported", csp) + } + + return values, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/serverless.go b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless.go new file mode 100644 index 00000000000..96245a39fc7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless.go @@ -0,0 +1,331 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ess + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "io" + "net/http" + "strings" + "time" +) + +var serverlessURL = "https://cloud.elastic.co" + +// ServerlessClient is the handler the serverless ES instance +type ServerlessClient struct { + region string + projectType string + api string + proj Project + log common.Logger +} + +// ServerlessRequest contains the data needed for a new serverless instance +type ServerlessRequest struct { + Name string `json:"name"` + RegionID string `json:"region_id"` +} + +// Project represents a serverless project +type Project struct { + Name string `json:"name"` + ID string `json:"id"` + Type string `json:"type"` + Region string `json:"region_id"` + + Credentials struct { + Username string `json:"username"` + Password string `json:"password"` + } `json:"credentials"` + + Endpoints struct { + Elasticsearch string `json:"elasticsearch"` + Kibana string `json:"kibana"` + Fleet string `json:"fleet,omitempty"` + APM string `json:"apm,omitempty"` + } `json:"endpoints"` +} + +// CredResetResponse contains the new auth details for a +// stack credential reset +type CredResetResponse struct { + Password string `json:"password"` + Username string `json:"username"` +} + +// NewServerlessClient creates a new instance of the serverless client +func NewServerlessClient(region, projectType, api string, logger common.Logger) *ServerlessClient { + return &ServerlessClient{ + region: region, + api: api, + projectType: projectType, + log: logger, + } +} + +// DeployStack creates a new serverless elastic stack +func (srv *ServerlessClient) DeployStack(ctx context.Context, req ServerlessRequest) (Project, error) { + reqBody, err := json.Marshal(req) + if err != nil { + return Project{}, fmt.Errorf("error marshaling JSON request %w", err) + } + urlPath := fmt.Sprintf("%s/api/v1/serverless/projects/%s", serverlessURL, srv.projectType) + + httpHandler, err := http.NewRequestWithContext(ctx, "POST", urlPath, bytes.NewReader(reqBody)) + if err != nil { + return Project{}, fmt.Errorf("error creating new httpRequest: %w", err) + } + + httpHandler.Header.Set("Content-Type", "application/json") + httpHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + + resp, err := http.DefaultClient.Do(httpHandler) + if err != nil { + return Project{}, fmt.Errorf("error performing HTTP request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + p, _ := io.ReadAll(resp.Body) + return Project{}, fmt.Errorf("Non-201 status code returned by server: %d, body: %s", resp.StatusCode, string(p)) + } + + serverlessHandle := Project{} + err = json.NewDecoder(resp.Body).Decode(&serverlessHandle) + if err != nil { + return Project{}, fmt.Errorf("error decoding JSON response: %w", err) + } + srv.proj = serverlessHandle + + // as of 8/8-ish, the serverless ESS cloud no longer provides credentials on the first POST request, we must send an additional POST + // to reset the credentials + updated, err := srv.ResetCredentials(ctx) + if err != nil { + return serverlessHandle, fmt.Errorf("error resetting credentials: %w", err) + } + srv.proj.Credentials.Username = updated.Username + srv.proj.Credentials.Password = updated.Password + + return serverlessHandle, nil +} + +// DeploymentIsReady returns true when the serverless deployment is healthy and ready +func (srv *ServerlessClient) DeploymentIsReady(ctx context.Context) (bool, error) { + err := srv.WaitForEndpoints(ctx) + if err != nil { + return false, fmt.Errorf("error waiting for endpoints to become available: %w", err) + } + srv.log.Logf("Endpoints available: ES: %s Fleet: %s Kibana: %s", srv.proj.Endpoints.Elasticsearch, srv.proj.Endpoints.Fleet, srv.proj.Endpoints.Kibana) + err = srv.WaitForElasticsearch(ctx) + if err != nil { + return false, fmt.Errorf("error waiting for ES to become available: %w", err) + } + srv.log.Logf("Elasticsearch healthy...") + err = srv.WaitForKibana(ctx) + if err != nil { + return false, fmt.Errorf("error waiting for Kibana to become available: %w", err) + } + srv.log.Logf("Kibana healthy...") + + return true, nil +} + +// DeleteDeployment deletes the deployment +func (srv *ServerlessClient) DeleteDeployment(ctx context.Context) error { + endpoint := fmt.Sprintf("%s/api/v1/serverless/projects/%s/%s", serverlessURL, srv.proj.Type, srv.proj.ID) + req, err := http.NewRequestWithContext(ctx, "DELETE", endpoint, nil) + if err != nil { + return fmt.Errorf("error creating HTTP request: %w", err) + } + req.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("error performing delete request: %w", err) + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + errBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d from %s: %s", resp.StatusCode, req.URL, errBody) + } + return nil +} + +// WaitForEndpoints polls the API and waits until fleet/ES endpoints are available +func (srv *ServerlessClient) WaitForEndpoints(ctx context.Context) error { + reqURL := fmt.Sprintf("%s/api/v1/serverless/projects/%s/%s", serverlessURL, srv.proj.Type, srv.proj.ID) + httpHandler, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil) + if err != nil { + return fmt.Errorf("error creating http request: %w", err) + } + + httpHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + + readyFunc := func(resp *http.Response) bool { + project := &Project{} + err = json.NewDecoder(resp.Body).Decode(project) + resp.Body.Close() + if err != nil { + srv.log.Logf("response decoding error: %v", err) + return false + } + if project.Endpoints.Elasticsearch != "" { + // fake out the fleet URL, set to ES url + if project.Endpoints.Fleet == "" { + project.Endpoints.Fleet = strings.Replace(project.Endpoints.Elasticsearch, "es.eks", "fleet.eks", 1) + } + + srv.proj.Endpoints = project.Endpoints + return true + } + return false + } + + err = srv.waitForRemoteState(ctx, httpHandler, time.Second*5, readyFunc) + if err != nil { + return fmt.Errorf("error waiting for remote instance to start: %w", err) + } + + return nil +} + +// WaitForElasticsearch waits until the ES endpoint is healthy +func (srv *ServerlessClient) WaitForElasticsearch(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, "GET", srv.proj.Endpoints.Elasticsearch, nil) + if err != nil { + return fmt.Errorf("error creating HTTP request: %w", err) + } + req.SetBasicAuth(srv.proj.Credentials.Username, srv.proj.Credentials.Password) + + // _cluster/health no longer works on serverless, just check response code + readyFunc := func(resp *http.Response) bool { + return resp.StatusCode == 200 + } + + err = srv.waitForRemoteState(ctx, req, time.Second*5, readyFunc) + if err != nil { + return fmt.Errorf("error waiting for ES to become healthy: %w", err) + } + return nil +} + +// WaitForKibana waits until the kibana endpoint is healthy +func (srv *ServerlessClient) WaitForKibana(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, "GET", srv.proj.Endpoints.Kibana+"/api/status", nil) + if err != nil { + return fmt.Errorf("error creating HTTP request: %w", err) + } + req.SetBasicAuth(srv.proj.Credentials.Username, srv.proj.Credentials.Password) + + readyFunc := func(resp *http.Response) bool { + var status struct { + Status struct { + Overall struct { + Level string `json:"level"` + } `json:"overall"` + } `json:"status"` + } + err = json.NewDecoder(resp.Body).Decode(&status) + if err != nil { + srv.log.Logf("response decoding error: %v", err) + return false + } + resp.Body.Close() + return status.Status.Overall.Level == "available" + } + + err = srv.waitForRemoteState(ctx, req, time.Second*5, readyFunc) + if err != nil { + return fmt.Errorf("error waiting for ES to become healthy: %w", err) + } + return nil +} + +// ResetCredentials resets the credentials for the given ESS instance +func (srv *ServerlessClient) ResetCredentials(ctx context.Context) (CredResetResponse, error) { + resetURL := fmt.Sprintf("%s/api/v1/serverless/projects/%s/%s/_reset-internal-credentials", serverlessURL, srv.projectType, srv.proj.ID) + + resetHandler, err := http.NewRequestWithContext(ctx, "POST", resetURL, nil) + if err != nil { + return CredResetResponse{}, fmt.Errorf("error creating new httpRequest: %w", err) + } + + resetHandler.Header.Set("Content-Type", "application/json") + resetHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + + resp, err := http.DefaultClient.Do(resetHandler) + if err != nil { + return CredResetResponse{}, fmt.Errorf("error performing HTTP request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + p, _ := io.ReadAll(resp.Body) + return CredResetResponse{}, fmt.Errorf("Non-200 status code returned by server: %d, body: %s", resp.StatusCode, string(p)) + } + + updated := CredResetResponse{} + err = json.NewDecoder(resp.Body).Decode(&updated) + if err != nil { + return CredResetResponse{}, fmt.Errorf("error decoding JSON response: %w", err) + } + + return updated, nil +} + +func (srv *ServerlessClient) waitForRemoteState(ctx context.Context, httpHandler *http.Request, tick time.Duration, isReady func(*http.Response) bool) error { + timer := time.NewTimer(time.Millisecond) + // in cases where we get a timeout, also return the last error returned via HTTP + var lastErr error + for { + select { + case <-ctx.Done(): + return fmt.Errorf("got context done; Last HTTP Error: %w", lastErr) + case <-timer.C: + } + + resp, err := http.DefaultClient.Do(httpHandler) + if err != nil { + errMsg := fmt.Errorf("request error: %w", err) + // Logger interface doesn't have a debug level and we don't want to auto-log these; + // as most of the time it's just spam. + //srv.log.Logf(errMsg.Error()) + lastErr = errMsg + timer.Reset(time.Second * 5) + continue + } + if resp.StatusCode != http.StatusOK { + errBody, _ := io.ReadAll(resp.Body) + errMsg := fmt.Errorf("unexpected status code %d in request to %s, body: %s", resp.StatusCode, httpHandler.URL.String(), string(errBody)) + //srv.log.Logf(errMsg.Error()) + lastErr = errMsg + resp.Body.Close() + timer.Reset(time.Second * 5) + continue + } + + if isReady(resp) { + return nil + } + timer.Reset(tick) + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/serverless_provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless_provisioner.go new file mode 100644 index 00000000000..0ae3280016d --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless_provisioner.go @@ -0,0 +1,275 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ess + +import ( + "context" + "encoding/json" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "io" + "net/http" + "time" + + "github.com/elastic/elastic-agent-libs/logp" +) + +const ProvisionerServerless = "serverless" + +// ServerlessProvisioner contains +type ServerlessProvisioner struct { + cfg ProvisionerConfig + log common.Logger +} + +type defaultLogger struct { + wrapped *logp.Logger +} + +// Logf implements the runner.Logger interface +func (log *defaultLogger) Logf(format string, args ...any) { + if len(args) == 0 { + + } else { + log.wrapped.Infof(format, args) + } + +} + +// ServerlessRegions is the JSON response from the serverless regions API endpoint +type ServerlessRegions struct { + CSP string `json:"csp"` + CSPRegion string `json:"csp_region"` + ID string `json:"id"` + Name string `json:"name"` +} + +// NewServerlessProvisioner creates a new StackProvisioner instance for serverless +func NewServerlessProvisioner(ctx context.Context, cfg ProvisionerConfig) (common.StackProvisioner, error) { + prov := &ServerlessProvisioner{ + cfg: cfg, + log: &defaultLogger{wrapped: logp.L()}, + } + err := prov.CheckCloudRegion(ctx) + if err != nil { + return nil, fmt.Errorf("error checking region setting: %w", err) + } + return prov, nil +} + +func (prov *ServerlessProvisioner) Name() string { + return ProvisionerServerless +} + +// SetLogger sets the logger for the +func (prov *ServerlessProvisioner) SetLogger(l common.Logger) { + prov.log = l +} + +// Create creates a stack. +func (prov *ServerlessProvisioner) Create(ctx context.Context, request common.StackRequest) (common.Stack, error) { + // allow up to 4 minutes for requests + createCtx, createCancel := context.WithTimeout(ctx, 4*time.Minute) + defer createCancel() + + client := NewServerlessClient(prov.cfg.Region, "observability", prov.cfg.APIKey, prov.log) + srvReq := ServerlessRequest{Name: request.ID, RegionID: prov.cfg.Region} + + prov.log.Logf("Creating serverless stack %s [stack_id: %s]", request.Version, request.ID) + proj, err := client.DeployStack(createCtx, srvReq) + if err != nil { + return common.Stack{}, fmt.Errorf("error deploying stack for request %s: %w", request.ID, err) + } + err = client.WaitForEndpoints(createCtx) + if err != nil { + return common.Stack{}, fmt.Errorf("error waiting for endpoints to become available for serverless stack %s [stack_id: %s, deployment_id: %s]: %w", request.Version, request.ID, proj.ID, err) + } + stack := common.Stack{ + ID: request.ID, + Provisioner: prov.Name(), + Version: request.Version, + Elasticsearch: client.proj.Endpoints.Elasticsearch, + Kibana: client.proj.Endpoints.Kibana, + Username: client.proj.Credentials.Username, + Password: client.proj.Credentials.Password, + Internal: map[string]interface{}{ + "deployment_id": proj.ID, + "deployment_type": proj.Type, + }, + Ready: false, + } + prov.log.Logf("Created serverless stack %s [stack_id: %s, deployment_id: %s]", request.Version, request.ID, proj.ID) + return stack, nil +} + +// WaitForReady should block until the stack is ready or the context is cancelled. +func (prov *ServerlessProvisioner) WaitForReady(ctx context.Context, stack common.Stack) (common.Stack, error) { + deploymentID, deploymentType, err := prov.getDeploymentInfo(stack) + if err != nil { + return stack, fmt.Errorf("failed to get deployment info from the stack: %w", err) + } + + ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) + defer cancel() + + client := NewServerlessClient(prov.cfg.Region, "observability", prov.cfg.APIKey, prov.log) + client.proj.ID = deploymentID + client.proj.Type = deploymentType + client.proj.Region = prov.cfg.Region + client.proj.Endpoints.Elasticsearch = stack.Elasticsearch + client.proj.Endpoints.Kibana = stack.Kibana + client.proj.Credentials.Username = stack.Username + client.proj.Credentials.Password = stack.Password + + prov.log.Logf("Waiting for serverless stack %s to be ready [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + + errCh := make(chan error) + var lastErr error + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + if lastErr == nil { + lastErr = ctx.Err() + } + return stack, fmt.Errorf("serverless stack %s [stack_id: %s, deployment_id: %s] never became ready: %w", stack.Version, stack.ID, deploymentID, lastErr) + case <-ticker.C: + go func() { + statusCtx, statusCancel := context.WithTimeout(ctx, 30*time.Second) + defer statusCancel() + ready, err := client.DeploymentIsReady(statusCtx) + if err != nil { + errCh <- err + } else if !ready { + errCh <- fmt.Errorf("serverless stack %s [stack_id: %s, deployment_id: %s] never became ready", stack.Version, stack.ID, deploymentID) + } else { + errCh <- nil + } + }() + case err := <-errCh: + if err == nil { + stack.Ready = true + return stack, nil + } + lastErr = err + } + } +} + +// Delete deletes a stack. +func (prov *ServerlessProvisioner) Delete(ctx context.Context, stack common.Stack) error { + deploymentID, deploymentType, err := prov.getDeploymentInfo(stack) + if err != nil { + return fmt.Errorf("failed to get deployment info from the stack: %w", err) + } + + client := NewServerlessClient(prov.cfg.Region, "observability", prov.cfg.APIKey, prov.log) + client.proj.ID = deploymentID + client.proj.Type = deploymentType + client.proj.Region = prov.cfg.Region + client.proj.Endpoints.Elasticsearch = stack.Elasticsearch + client.proj.Endpoints.Kibana = stack.Kibana + client.proj.Credentials.Username = stack.Username + client.proj.Credentials.Password = stack.Password + + prov.log.Logf("Destroying serverless stack %s [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + err = client.DeleteDeployment(ctx) + if err != nil { + return fmt.Errorf("error removing serverless stack %s [stack_id: %s, deployment_id: %s]: %w", stack.Version, stack.ID, deploymentID, err) + } + return nil +} + +// CheckCloudRegion checks to see if the provided region is valid for the serverless +// if we have an invalid region, overwrite with a valid one. +// The "normal" and serverless ESS APIs have different regions, hence why we need this. +func (prov *ServerlessProvisioner) CheckCloudRegion(ctx context.Context) error { + urlPath := fmt.Sprintf("%s/api/v1/serverless/regions", serverlessURL) + + httpHandler, err := http.NewRequestWithContext(ctx, "GET", urlPath, nil) + if err != nil { + return fmt.Errorf("error creating new httpRequest: %w", err) + } + + httpHandler.Header.Set("Content-Type", "application/json") + httpHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", prov.cfg.APIKey)) + + resp, err := http.DefaultClient.Do(httpHandler) + if err != nil { + return fmt.Errorf("error performing HTTP request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + p, _ := io.ReadAll(resp.Body) + return fmt.Errorf("Non-201 status code returned by server: %d, body: %s", resp.StatusCode, string(p)) + } + regions := []ServerlessRegions{} + + err = json.NewDecoder(resp.Body).Decode(®ions) + if err != nil { + return fmt.Errorf("error unpacking regions from list: %w", err) + } + resp.Body.Close() + + found := false + for _, region := range regions { + if region.ID == prov.cfg.Region { + found = true + } + } + if !found { + if len(regions) == 0 { + return fmt.Errorf("no regions found for cloudless API") + } + newRegion := regions[0].ID + prov.log.Logf("WARNING: Region %s is not available for serverless, selecting %s. Other regions are:", prov.cfg.Region, newRegion) + for _, avail := range regions { + prov.log.Logf(" %s - %s", avail.ID, avail.Name) + } + prov.cfg.Region = newRegion + } + + return nil +} + +func (prov *ServerlessProvisioner) getDeploymentInfo(stack common.Stack) (string, string, error) { + if stack.Internal == nil { + return "", "", fmt.Errorf("missing internal information") + } + deploymentIDRaw, ok := stack.Internal["deployment_id"] + if !ok { + return "", "", fmt.Errorf("missing internal deployment_id") + } + deploymentID, ok := deploymentIDRaw.(string) + if !ok { + return "", "", fmt.Errorf("internal deployment_id not a string") + } + deploymentTypeRaw, ok := stack.Internal["deployment_type"] + if !ok { + return "", "", fmt.Errorf("missing internal deployment_type") + } + deploymentType, ok := deploymentTypeRaw.(string) + if !ok { + return "", "", fmt.Errorf("internal deployment_type is not a string") + } + return deploymentID, deploymentType, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/statful_provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/ess/statful_provisioner.go new file mode 100644 index 00000000000..0ab6bf8db7d --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/statful_provisioner.go @@ -0,0 +1,201 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ess + +import ( + "context" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "os" + "strings" + "time" +) + +const ProvisionerStateful = "stateful" + +// ProvisionerConfig is the configuration for the ESS statefulProvisioner. +type ProvisionerConfig struct { + Identifier string + APIKey string + Region string +} + +// Validate returns an error if the information is invalid. +func (c *ProvisionerConfig) Validate() error { + if c.Identifier == "" { + return errors.New("field Identifier must be set") + } + if c.APIKey == "" { + return errors.New("field APIKey must be set") + } + if c.Region == "" { + return errors.New("field Region must be set") + } + return nil +} + +type statefulProvisioner struct { + logger common.Logger + cfg ProvisionerConfig + client *Client +} + +// NewProvisioner creates the ESS stateful Provisioner +func NewProvisioner(cfg ProvisionerConfig) (common.StackProvisioner, error) { + err := cfg.Validate() + if err != nil { + return nil, err + } + essClient := NewClient(Config{ + ApiKey: cfg.APIKey, + }) + return &statefulProvisioner{ + cfg: cfg, + client: essClient, + }, nil +} + +func (p *statefulProvisioner) Name() string { + return ProvisionerStateful +} + +func (p *statefulProvisioner) SetLogger(l common.Logger) { + p.logger = l +} + +// Create creates a stack. +func (p *statefulProvisioner) Create(ctx context.Context, request common.StackRequest) (common.Stack, error) { + // allow up to 2 minutes for request + createCtx, createCancel := context.WithTimeout(ctx, 2*time.Minute) + defer createCancel() + deploymentTags := map[string]string{ + "division": "engineering", + "org": "ingest", + "team": "elastic-agent-control-plane", + "project": "elastic-agent", + "integration-tests": "true", + } + // If the CI env var is set, this mean we are running inside the CI pipeline and some expected env vars are exposed + if _, e := os.LookupEnv("CI"); e { + deploymentTags["buildkite_id"] = os.Getenv("BUILDKITE_BUILD_NUMBER") + deploymentTags["creator"] = os.Getenv("BUILDKITE_BUILD_CREATOR") + deploymentTags["buildkite_url"] = os.Getenv("BUILDKITE_BUILD_URL") + deploymentTags["ci"] = "true" + } + resp, err := p.createDeployment(createCtx, request, deploymentTags) + if err != nil { + return common.Stack{}, err + } + return common.Stack{ + ID: request.ID, + Provisioner: p.Name(), + Version: request.Version, + Elasticsearch: resp.ElasticsearchEndpoint, + Kibana: resp.KibanaEndpoint, + Username: resp.Username, + Password: resp.Password, + Internal: map[string]interface{}{ + "deployment_id": resp.ID, + }, + Ready: false, + }, nil +} + +// WaitForReady should block until the stack is ready or the context is cancelled. +func (p *statefulProvisioner) WaitForReady(ctx context.Context, stack common.Stack) (common.Stack, error) { + deploymentID, err := p.getDeploymentID(stack) + if err != nil { + return stack, fmt.Errorf("failed to get deployment ID from the stack: %w", err) + } + // allow up to 10 minutes for it to become ready + ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) + defer cancel() + p.logger.Logf("Waiting for cloud stack %s to be ready [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + ready, err := p.client.DeploymentIsReady(ctx, deploymentID, 30*time.Second) + if err != nil { + return stack, fmt.Errorf("failed to check for cloud %s [stack_id: %s, deployment_id: %s] to be ready: %w", stack.Version, stack.ID, deploymentID, err) + } + if !ready { + return stack, fmt.Errorf("cloud %s [stack_id: %s, deployment_id: %s] never became ready: %w", stack.Version, stack.ID, deploymentID, err) + } + stack.Ready = true + return stack, nil +} + +// Delete deletes a stack. +func (p *statefulProvisioner) Delete(ctx context.Context, stack common.Stack) error { + deploymentID, err := p.getDeploymentID(stack) + if err != nil { + return err + } + + // allow up to 1 minute for request + ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) + defer cancel() + + p.logger.Logf("Destroying cloud stack %s [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + return p.client.ShutdownDeployment(ctx, deploymentID) +} + +func (p *statefulProvisioner) createDeployment(ctx context.Context, r common.StackRequest, tags map[string]string) (*CreateDeploymentResponse, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) + defer cancel() + + p.logger.Logf("Creating cloud stack %s [stack_id: %s]", r.Version, r.ID) + name := fmt.Sprintf("%s-%s", strings.Replace(p.cfg.Identifier, ".", "-", -1), r.ID) + + // prepare tags + tagArray := make([]Tag, 0, len(tags)) + for k, v := range tags { + tagArray = append(tagArray, Tag{ + Key: k, + Value: v, + }) + } + + createDeploymentRequest := CreateDeploymentRequest{ + Name: name, + Region: p.cfg.Region, + Version: r.Version, + Tags: tagArray, + } + + resp, err := p.client.CreateDeployment(ctx, createDeploymentRequest) + if err != nil { + p.logger.Logf("Failed to create ESS cloud %s: %s", r.Version, err) + return nil, fmt.Errorf("failed to create ESS cloud for version %s: %w", r.Version, err) + } + p.logger.Logf("Created cloud stack %s [stack_id: %s, deployment_id: %s]", r.Version, r.ID, resp.ID) + return resp, nil +} + +func (p *statefulProvisioner) getDeploymentID(stack common.Stack) (string, error) { + if stack.Internal == nil { + return "", fmt.Errorf("missing internal information") + } + deploymentIDRaw, ok := stack.Internal["deployment_id"] + if !ok { + return "", fmt.Errorf("missing internal deployment_id") + } + deploymentID, ok := deploymentIDRaw.(string) + if !ok { + return "", fmt.Errorf("internal deployment_id not a string") + } + return deploymentID, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/fetcher.go b/dev-tools/mage/target/srvrlesstest/testing/fetcher.go new file mode 100644 index 00000000000..e9e797c1823 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/fetcher.go @@ -0,0 +1,256 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package testing + +import ( + "archive/tar" + "archive/zip" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" +) + +const extAsc = ".asc" +const extHash = ".sha512" + +var ( + // ErrUnsupportedPlatform returned when the operating system and architecture combination is not supported. + ErrUnsupportedPlatform = errors.New("platform is not supported") +) + +// packageArchMap provides a mapping for the endings of the builds of Elastic Agent based on the +// operating system and architecture. +var packageArchMap = map[string]string{ + "linux-amd64-targz": "linux-x86_64.tar.gz", + "linux-amd64-deb": "amd64.deb", + "linux-amd64-rpm": "x86_64.rpm", + "linux-arm64-targz": "linux-arm64.tar.gz", + "linux-arm64-deb": "arm64.deb", + "linux-arm64-rpm": "aarch64.rpm", + "windows-amd64-zip": "windows-x86_64.zip", + "darwin-amd64-targz": "darwin-x86_64.tar.gz", + "darwin-arm64-targz": "darwin-aarch64.tar.gz", +} + +// GetPackageSuffix returns the suffix ending for the builds of Elastic Agent based on the +// operating system and architecture. +func GetPackageSuffix(operatingSystem string, architecture string, packageFormat string) (string, error) { + suffix, ok := packageArchMap[fmt.Sprintf("%s-%s-%s", operatingSystem, architecture, packageFormat)] + if !ok { + return "", fmt.Errorf("%w: %s/%s/%s", ErrUnsupportedPlatform, operatingSystem, architecture, packageFormat) + } + return suffix, nil +} + +// FetcherResult represents a pending result from the fetcher. +type FetcherResult interface { + // Name is the name of the fetched result. + Name() string + // Fetch performs the actual fetch into the provided directory. + Fetch(ctx context.Context, l Logger, dir string) error +} + +// Fetcher provides a path for fetching the Elastic Agent compressed archive +// to extract and run for the integration test. +type Fetcher interface { + // Name returns a unique name for the fetcher. + // + // This name is used as a caching key and if a build has already been fetched for a version then it will not + // be fetched again as long as the same fetcher is being used. + Name() string + // Fetch fetches the Elastic Agent compressed archive to extract and run for the integration test. + // + // The extraction is handled by the caller. This should only download the file + // and place it into the directory. + Fetch(ctx context.Context, operatingSystem string, architecture string, version string, packageFormat string) (FetcherResult, error) +} + +// fetchCache is global to all tests, reducing the time required to fetch the needed artifacts +// to only be need at the start of the first test. +var fetchCache map[string]*fetcherCache +var fetchCacheMx sync.Mutex + +// fetcherCache provides a caching mechanism for only fetching what has not already been fetched. +type fetcherCache struct { + mx sync.Mutex + dir string +} + +// fetch either uses the cache result or performs a new fetch if the content is missing. +func (c *fetcherCache) fetch(ctx context.Context, l Logger, res FetcherResult) (string, error) { + name := res.Name() + src := filepath.Join(c.dir, name) + _, err := os.Stat(src) + if err == nil || os.IsExist(err) { + l.Logf("Using existing artifact %s", name) + return src, nil + } + err = res.Fetch(ctx, l, c.dir) + if err != nil { + return "", err + } + return src, nil +} + +func splitFileType(name string) (string, string, error) { + if strings.HasSuffix(name, ".tar.gz") { + return strings.TrimSuffix(name, ".tar.gz"), ".tar.gz", nil + } + if strings.HasSuffix(name, ".zip") { + return strings.TrimSuffix(name, ".zip"), ".zip", nil + } + if strings.HasSuffix(name, ".deb") { + return strings.TrimSuffix(name, ".deb"), ".deb", nil + } + if strings.HasSuffix(name, ".rpm") { + return strings.TrimSuffix(name, ".rpm"), ".rpm", nil + } + return "", "", fmt.Errorf("unknown file extension type: %s", filepath.Ext(name)) +} + +// untar takes a .tar.gz and extracts its content +func untar(archivePath string, extractDir string) error { + r, err := os.Open(archivePath) + if err != nil { + return err + } + defer r.Close() + + zr, err := gzip.NewReader(r) + if err != nil { + return err + } + + tr := tar.NewReader(zr) + + for { + f, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + + fi := f.FileInfo() + mode := fi.Mode() + abs := filepath.Join(extractDir, f.Name) //nolint:gosec // used only in tests + switch { + case mode.IsRegular(): + // just to be sure, it should already be created by Dir type + if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { + return fmt.Errorf("failed creating directory for file %s: %w", abs, err) + } + + wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()) + if err != nil { + return fmt.Errorf("failed creating file %s: %w", abs, err) + } + + _, err = io.Copy(wf, tr) //nolint:gosec // used only in tests + if closeErr := wf.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return fmt.Errorf("error writing file %s: %w", abs, err) + } + case mode.IsDir(): + if err := os.MkdirAll(abs, 0755); err != nil { + return fmt.Errorf("failed creating directory %s: %w", abs, err) + } + case mode.Type()&os.ModeSymlink == os.ModeSymlink: + // just to be sure, it should already be created by Dir type + if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { + return fmt.Errorf("failed creating directory for symlink %s: %w", abs, err) + } + if err := os.Symlink(f.Linkname, abs); err != nil { + return fmt.Errorf("failed creating symlink %s: %w", abs, err) + } + default: + // skip unknown types + } + } + return nil +} + +// unzip takes a .zip and extracts its content +func unzip(archivePath string, extractDir string) error { + r, err := zip.OpenReader(archivePath) + if err != nil { + return err + } + defer r.Close() + + unpackFile := func(f *zip.File) (err error) { + rc, err := f.Open() + if err != nil { + return err + } + defer func() { + if cerr := rc.Close(); cerr != nil { + err = errors.Join(err, cerr) + } + }() + + fi := f.FileInfo() + mode := fi.Mode() + abs := filepath.Join(extractDir, f.Name) //nolint:gosec // used only in tests + switch { + case mode.IsRegular(): + // just to be sure, it should already be created by Dir type + if err := os.MkdirAll(filepath.Dir(abs), f.Mode()); err != nil { + return fmt.Errorf("failed creating directory for file %s: %w", abs, err) + } + + f, err := os.OpenFile(abs, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) + if err != nil { + return fmt.Errorf("failed creating file %s: %w", abs, err) + } + defer func() { + if cerr := f.Close(); cerr != nil { + err = errors.Join(err, cerr) + } + }() + + //nolint:gosec // used only in tests + if _, err = io.Copy(f, rc); err != nil { + return fmt.Errorf("error writing file %s: %w", abs, err) + } + case mode.IsDir(): + if err := os.MkdirAll(abs, f.Mode()); err != nil { + return fmt.Errorf("failed creating directory %s: %w", abs, err) + } + default: + // skip unknown types + } + return nil + } + + for _, f := range r.File { + if err := unpackFile(f); err != nil { + return err + } + } + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/image.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/image.go new file mode 100644 index 00000000000..f15970402a7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/image.go @@ -0,0 +1,258 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kubernetes + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + devtools "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type DockerConfig struct { + CurrentContext string `json:"currentContext"` +} + +type DockerContext struct { + Name string `json:"Name"` + Metadata map[string]interface{} `json:"Metadata"` + Endpoints map[string]Endpoint `json:"Endpoints"` + Storage map[string]interface{} `json:"Storage"` + TLS bool `json:"TLS"` +} + +type DockerBuildOutput struct { + Stream string `json:"stream"` + Aux struct { + ID string `json:"ID"` + } `json:"aux"` +} + +type Endpoint struct { + Host string `json:"Host"` +} + +// AddK8STestsToImage compiles and adds the k8s-inner-tests binary to the given image +func AddK8STestsToImage(ctx context.Context, logger common.Logger, baseImage string, arch string) (string, error) { + // compile k8s test with tag kubernetes_inner + buildBase, err := filepath.Abs("build") + if err != nil { + return "", err + } + + testBinary := filepath.Join(buildBase, "k8s-inner-tests") + + params := devtools.GoTestArgs{ + TestName: "k8s-inner-tests", + Race: false, + Packages: []string{"./testing/kubernetes_inner/..."}, + Tags: []string{"kubernetes_inner"}, + OutputFile: testBinary, + Env: map[string]string{ + "GOOS": "linux", + "GOARCH": arch, + "CGO_ENABLED": "0", + }, + } + + if err := devtools.GoTestBuild(ctx, params); err != nil { + return "", err + } + + cli, err := getDockerClient() + if err != nil { + return "", err + } + + // dockerfile to just copy the tests binary + dockerfile := fmt.Sprintf(` + FROM %s + COPY testsBinary /usr/share/elastic-agent/k8s-inner-tests + `, baseImage) + + // Create a tar archive with the Dockerfile and the binary + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + + // Add Dockerfile to tar + err = tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + if err != nil { + return "", err + } + _, err = tw.Write([]byte(dockerfile)) + if err != nil { + return "", err + } + + // Add binary to tar + binaryFile, err := os.Open(testBinary) + if err != nil { + return "", err + } + defer binaryFile.Close() + + info, err := binaryFile.Stat() + if err != nil { + return "", err + } + + err = tw.WriteHeader(&tar.Header{ + Name: "testsBinary", + Mode: 0777, + Size: info.Size(), + }) + if err != nil { + return "", err + } + _, err = io.Copy(tw, binaryFile) + if err != nil { + return "", err + } + + err = tw.Close() + if err != nil { + return "", err + } + + outputImage := baseImage + "-tests" + + // Build the image + imageBuildResponse, err := cli.ImageBuild(ctx, &buf, types.ImageBuildOptions{ + Tags: []string{outputImage}, + Dockerfile: "Dockerfile", + Remove: true, + }) + if err != nil { + return "", err + } + defer imageBuildResponse.Body.Close() + + scanner := bufio.NewScanner(imageBuildResponse.Body) + for scanner.Scan() { + line := scanner.Text() + var output DockerBuildOutput + if err := json.Unmarshal([]byte(line), &output); err != nil { + return "", fmt.Errorf("error at parsing JSON: %w", err) + } + + if output.Stream != "" { + if out := strings.TrimRight(output.Stream, "\n"); out != "" { + logger.Logf(out) + } + } + } + + if err := scanner.Err(); err != nil { + return "", err + } + + return outputImage, nil +} + +// getDockerClient returns an instance of the Docker client. It first checks +// if there is a current context inside $/.docker/config.json and instantiates +// a client based on it. Otherwise, it fallbacks to a docker client with values +// from environment variables. +func getDockerClient() (*client.Client, error) { + + envClient := func() (*client.Client, error) { + return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + } + + type DockerConfig struct { + CurrentContext string `json:"currentContext"` + } + + configFile := filepath.Join(os.Getenv("HOME"), ".docker", "config.json") + file, err := os.Open(configFile) + if err != nil { + if os.IsNotExist(err) { + return envClient() + } + return nil, err + } + defer file.Close() + + var config DockerConfig + decoder := json.NewDecoder(file) + err = decoder.Decode(&config) + if err != nil { + return nil, err + } + + if config.CurrentContext == "" { + return envClient() + } + + contextDir := filepath.Join(os.Getenv("HOME"), ".docker", "contexts", "meta") + files, err := os.ReadDir(contextDir) + if err != nil { + if os.IsNotExist(err) { + return envClient() + } + return nil, fmt.Errorf("unable to read Docker contexts directory: %w", err) + } + + for _, f := range files { + if f.IsDir() { + metaFile := filepath.Join(contextDir, f.Name(), "meta.json") + if _, err := os.Stat(metaFile); err == nil { + if os.IsNotExist(err) { + return envClient() + } + var dockerContext DockerContext + content, err := os.ReadFile(metaFile) + if err != nil { + return nil, fmt.Errorf("unable to read Docker context meta file: %w", err) + } + if err := json.Unmarshal(content, &dockerContext); err != nil { + return nil, fmt.Errorf("unable to parse Docker context meta file: %w", err) + } + if dockerContext.Name != config.CurrentContext { + continue + } + + endpoint, ok := dockerContext.Endpoints["docker"] + if !ok { + return nil, fmt.Errorf("docker endpoint not found in context") + } + + return client.NewClientWithOpts( + client.WithHost(endpoint.Host), + client.WithAPIVersionNegotiation(), + ) + } + } + } + + return envClient() +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind/provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind/provisioner.go new file mode 100644 index 00000000000..4769311941d --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind/provisioner.go @@ -0,0 +1,298 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kind + +import ( + "bytes" + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/kubernetes" + "io" + "os" + "os/exec" + "runtime" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" +) + +const ( + Name = "kind" +) + +const clusterCfg string = ` +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + scheduler: + extraArgs: + bind-address: "0.0.0.0" + secure-port: "10259" + controllerManager: + extraArgs: + bind-address: "0.0.0.0" + secure-port: "10257" +` + +func NewProvisioner() common.InstanceProvisioner { + return &provisioner{} +} + +type provisioner struct { + logger common.Logger +} + +func (p *provisioner) Name() string { + return Name +} + +func (p *provisioner) Type() common.ProvisionerType { + return common.ProvisionerTypeK8SCluster +} + +func (p *provisioner) SetLogger(l common.Logger) { + p.logger = l +} + +func (p *provisioner) Supported(batch define.OS) bool { + if batch.Type != define.Kubernetes || batch.Arch != runtime.GOARCH { + return false + } + if batch.Distro != "" && batch.Distro != Name { + // not kind, don't run + return false + } + return true +} + +func (p *provisioner) Provision(ctx context.Context, cfg common.Config, batches []common.OSBatch) ([]common.Instance, error) { + var instances []common.Instance + for _, batch := range batches { + k8sVersion := fmt.Sprintf("v%s", batch.OS.Version) + instanceName := fmt.Sprintf("%s-%s", k8sVersion, batch.Batch.Group) + + agentImageName, err := kubernetes.VariantToImage(batch.OS.DockerVariant) + if err != nil { + return nil, err + } + agentImageName = fmt.Sprintf("%s:%s", agentImageName, cfg.AgentVersion) + agentImage, err := kubernetes.AddK8STestsToImage(ctx, p.logger, agentImageName, runtime.GOARCH) + if err != nil { + return nil, fmt.Errorf("failed to add k8s tests to image %s: %w", agentImageName, err) + } + + exists, err := p.clusterExists(instanceName) + if err != nil { + return nil, fmt.Errorf("failed to check if cluster exists: %w", err) + } + if !exists { + p.logger.Logf("Provisioning kind cluster %s", instanceName) + nodeImage := fmt.Sprintf("kindest/node:%s", k8sVersion) + clusterConfig := strings.NewReader(clusterCfg) + + ret, err := p.kindCmd(clusterConfig, "create", "cluster", "--name", instanceName, "--image", nodeImage, "--config", "-") + if err != nil { + return nil, fmt.Errorf("kind: failed to create cluster %s: %s", instanceName, ret.stderr) + } + + exists, err = p.clusterExists(instanceName) + if err != nil { + return nil, err + } + + if !exists { + return nil, fmt.Errorf("kind: failed to find cluster %s after successful creation", instanceName) + } + } else { + p.logger.Logf("Kind cluster %s already exists", instanceName) + } + + kConfigPath, err := p.writeKubeconfig(instanceName) + if err != nil { + return nil, err + } + + c, err := klient.NewWithKubeConfigFile(kConfigPath) + if err != nil { + return nil, err + } + + if err := p.WaitForControlPlane(c); err != nil { + return nil, err + } + + if err := p.LoadImage(ctx, instanceName, agentImage); err != nil { + return nil, err + } + + instances = append(instances, common.Instance{ + ID: batch.ID, + Name: instanceName, + Provisioner: Name, + IP: "", + Username: "", + RemotePath: "", + Internal: map[string]interface{}{ + "config": kConfigPath, + "version": k8sVersion, + "agent_image": agentImage, + }, + }) + } + + return instances, nil +} + +func (p *provisioner) LoadImage(ctx context.Context, clusterName string, image string) error { + ret, err := p.kindCmd(nil, "load", "docker-image", "--name", clusterName, image) + if err != nil { + return fmt.Errorf("kind: load docker-image %s failed: %w: %s", image, err, ret.stderr) + } + return nil +} + +func (p *provisioner) WaitForControlPlane(client klient.Client) error { + r, err := resources.New(client.RESTConfig()) + if err != nil { + return err + } + for _, sl := range []metav1.LabelSelectorRequirement{ + {Key: "component", Operator: metav1.LabelSelectorOpIn, Values: []string{"etcd", "kube-apiserver", "kube-controller-manager", "kube-scheduler"}}, + {Key: "k8s-app", Operator: metav1.LabelSelectorOpIn, Values: []string{"kindnet", "kube-dns", "kube-proxy"}}, + } { + selector, err := metav1.LabelSelectorAsSelector( + &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + sl, + }, + }, + ) + if err != nil { + return err + } + err = wait.For(conditions.New(r).ResourceListMatchN(&v1.PodList{}, len(sl.Values), func(object k8s.Object) bool { + pod, ok := object.(*v1.Pod) + if !ok { + return false + } + + for _, cond := range pod.Status.Conditions { + if cond.Type != v1.PodReady { + continue + } + + return cond.Status == v1.ConditionTrue + } + + return false + }, resources.WithLabelSelector(selector.String()))) + if err != nil { + return err + } + } + return nil +} + +func (p *provisioner) Clean(ctx context.Context, cfg common.Config, instances []common.Instance) error { + // doesn't execute in parallel for the same reasons in Provision + // multipass just cannot handle it + for _, instance := range instances { + func(instance common.Instance) { + err := p.deleteCluster(instance.ID) + if err != nil { + // prevent a failure from stopping the other instances and clean + p.logger.Logf("Delete instance %s failed: %s", instance.Name, err) + } + }(instance) + } + + return nil +} + +func (p *provisioner) clusterExists(name string) (bool, error) { + ret, err := p.kindCmd(nil, "get", "clusters") + if err != nil { + return false, err + } + + for _, c := range strings.Split(ret.stdout, "\n") { + if c == name { + return true, nil + } + } + return false, nil +} + +func (p *provisioner) writeKubeconfig(name string) (string, error) { + kubecfg := fmt.Sprintf("%s-kubecfg", name) + + ret, err := p.kindCmd(nil, "get", "kubeconfig", "--name", name) + if err != nil { + return "", fmt.Errorf("kind get kubeconfig: stderr: %s: %w", ret.stderr, err) + } + + file, err := os.CreateTemp("", fmt.Sprintf("kind-cluster-%s", kubecfg)) + if err != nil { + return "", fmt.Errorf("kind kubeconfig file: %w", err) + } + defer file.Close() + + if n, err := io.WriteString(file, ret.stdout); n == 0 || err != nil { + return "", fmt.Errorf("kind kubecfg file: bytes copied: %d: %w]", n, err) + } + + return file.Name(), nil +} + +type cmdResult struct { + stdout string + stderr string +} + +func (p *provisioner) kindCmd(stdIn io.Reader, args ...string) (cmdResult, error) { + + var stdout, stderr bytes.Buffer + cmd := exec.Command("kind", args...) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if stdIn != nil { + cmd.Stdin = stdIn + } + err := cmd.Run() + return cmdResult{ + stdout: stdout.String(), + stderr: stderr.String(), + }, err +} + +func (p *provisioner) deleteCluster(name string) error { + _, err := p.kindCmd(nil, "delete", "cluster", "--name", name) + return err +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/runner.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/runner.go new file mode 100644 index 00000000000..6c5e7641b30 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/runner.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + devtools "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "os" + "path/filepath" + "strings" + "time" +) + +// Runner is a handler for running tests against a Kubernetes cluster +type Runner struct{} + +// Prepare configures the host for running the test +func (Runner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + return nil +} + +// Copy places the required files on the host +func (Runner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + return nil +} + +// Run the test +func (Runner) Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var goTestFlags []string + rawTestFlags := os.Getenv("GOTEST_FLAGS") + if rawTestFlags != "" { + goTestFlags = strings.Split(rawTestFlags, " ") + } + + maxDuration := 2 * time.Hour + var result []common.OSRunnerPackageResult + for _, pkg := range batch.Tests { + packageTestsStrBuilder := strings.Builder{} + packageTestsStrBuilder.WriteString("^(") + for idx, test := range pkg.Tests { + if idx > 0 { + packageTestsStrBuilder.WriteString("|") + } + packageTestsStrBuilder.WriteString(test.Name) + } + packageTestsStrBuilder.WriteString(")$") + + testPrefix := fmt.Sprintf("%s.%s", prefix, filepath.Base(pkg.Name)) + testName := fmt.Sprintf("k8s-%s", testPrefix) + fileName := fmt.Sprintf("build/TEST-go-%s", testName) + extraFlags := make([]string, 0, len(goTestFlags)+6) + if len(goTestFlags) > 0 { + extraFlags = append(extraFlags, goTestFlags...) + } + extraFlags = append(extraFlags, "-test.shuffle", "on", + "-test.timeout", maxDuration.String(), "-test.run", packageTestsStrBuilder.String()) + + env["AGENT_VERSION"] = agentVersion + env["TEST_DEFINE_PREFIX"] = testPrefix + + buildFolderAbsPath, err := filepath.Abs("build") + if err != nil { + return common.OSRunnerResult{}, err + } + + podLogsPath := filepath.Join(buildFolderAbsPath, fmt.Sprintf("k8s-logs-%s", testPrefix)) + err = os.Mkdir(podLogsPath, 0755) + if err != nil && !errors.Is(err, os.ErrExist) { + return common.OSRunnerResult{}, err + } + + env["K8S_TESTS_POD_LOGS_BASE"] = podLogsPath + + params := devtools.GoTestArgs{ + TestName: testName, + OutputFile: fileName + ".out", + JUnitReportFile: fileName + ".xml", + Packages: []string{pkg.Name}, + Tags: []string{"integration", "kubernetes"}, + ExtraFlags: extraFlags, + Env: env, + } + err = devtools.GoTest(ctx, params) + if err != nil { + return common.OSRunnerResult{}, err + } + + var resultPkg common.OSRunnerPackageResult + resultPkg.Name = pkg.Name + outputPath := fmt.Sprintf("build/TEST-go-k8s-%s.%s", prefix, filepath.Base(pkg.Name)) + resultPkg.Output, err = os.ReadFile(outputPath + ".out") + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to fetched test output at %s.out", outputPath) + } + resultPkg.JSONOutput, err = os.ReadFile(outputPath + ".out.json") + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to fetched test output at %s.out.json", outputPath) + } + resultPkg.XMLOutput, err = os.ReadFile(outputPath + ".xml") + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to fetched test output at %s.xml", outputPath) + } + result = append(result, resultPkg) + } + + return common.OSRunnerResult{ + Packages: result, + }, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (Runner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + // does nothing for kubernetes + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/supported.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/supported.go new file mode 100644 index 00000000000..125e7f92fe7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/supported.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kubernetes + +import ( + "errors" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" +) + +// ErrUnknownDockerVariant is the error returned when the variant is unknown. +var ErrUnknownDockerVariant = errors.New("unknown docker variant type") + +// arches defines the list of supported architectures of Kubernetes +var arches = []string{define.AMD64, define.ARM64} + +// versions defines the list of supported version of Kubernetes. +var versions = []define.OS{ + // Kubernetes 1.31 + { + Type: define.Kubernetes, + Version: "1.31.0", + }, + // Kubernetes 1.30 + { + Type: define.Kubernetes, + Version: "1.30.2", + }, + // Kubernetes 1.29 + { + Type: define.Kubernetes, + Version: "1.29.4", + }, + // Kubernetes 1.28 + { + Type: define.Kubernetes, + Version: "1.28.9", + }, +} + +// variants defines the list of variants and the image name for that variant. +// +// Note: This cannot be a simple map as the order matters. We need the +// one that we want to be the default test to be first. +var variants = []struct { + Name string + Image string +}{ + { + Name: "basic", + Image: "docker.elastic.co/beats/elastic-agent", + }, + { + Name: "ubi", + Image: "docker.elastic.co/beats/elastic-agent-ubi", + }, + { + Name: "wolfi", + Image: "docker.elastic.co/beats/elastic-agent-wolfi", + }, + { + Name: "complete", + Image: "docker.elastic.co/beats/elastic-agent-complete", + }, + { + Name: "complete-wolfi", + Image: "docker.elastic.co/beats/elastic-agent-complete-wolfi", + }, + { + Name: "cloud", + Image: "docker.elastic.co/beats-ci/elastic-agent-cloud", + }, + { + Name: "service", + Image: "docker.elastic.co/beats-ci/elastic-agent-service", + }, +} + +// GetSupported returns the list of supported OS types for Kubernetes. +func GetSupported() []define.OS { + supported := make([]define.OS, 0, len(versions)*len(variants)*2) + for _, a := range arches { + for _, v := range versions { + for _, variant := range variants { + c := v + c.Arch = a + c.DockerVariant = variant.Name + supported = append(supported, c) + } + } + } + return supported +} + +// VariantToImage returns the image name from the variant. +func VariantToImage(variant string) (string, error) { + for _, v := range variants { + if v.Name == variant { + return v.Image, nil + } + } + return "", ErrUnknownDockerVariant +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/linux/debian.go b/dev-tools/mage/target/srvrlesstest/testing/linux/debian.go new file mode 100644 index 00000000000..acecdb7969a --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/linux/debian.go @@ -0,0 +1,219 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "path" + "path/filepath" + "strings" + "time" +) + +// DebianRunner is a handler for running tests on Linux +type DebianRunner struct{} + +// Prepare the test +func (DebianRunner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + // prepare build-essential and unzip + // + // apt-get update and install are so terrible that we have to place this in a loop, because in some cases the + // apt-get update says it works, but it actually fails. so we add 3 tries here + var err error + for i := 0; i < 3; i++ { + err = func() error { + updateCtx, updateCancel := context.WithTimeout(ctx, 3*time.Minute) + defer updateCancel() + logger.Logf("Running apt-get update") + // `-o APT::Update::Error-Mode=any` ensures that any warning is tried as an error, so the retry + // will occur (without this we get random failures) + stdOut, errOut, err := sshClient.ExecWithRetry(updateCtx, "sudo", []string{"apt-get", "update", "-o APT::Update::Error-Mode=any"}, 15*time.Second) + if err != nil { + return fmt.Errorf("failed to run apt-get update: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + return func() error { + // golang is installed below and not using the package manager, ensures that the exact version + // of golang is used for the running of the test + installCtx, installCancel := context.WithTimeout(ctx, 1*time.Minute) + defer installCancel() + logger.Logf("Install build-essential and unzip") + stdOut, errOut, err = sshClient.ExecWithRetry(installCtx, "sudo", []string{"apt-get", "install", "-y", "build-essential", "unzip"}, 5*time.Second) + if err != nil { + return fmt.Errorf("failed to install build-essential and unzip: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + return nil + }() + }() + if err == nil { + // installation was successful + break + } + logger.Logf("Failed to install build-essential and unzip; will wait 15 seconds and try again") + <-time.After(15 * time.Second) + } + if err != nil { + // seems after 3 tries it still failed + return err + } + + // prepare golang + logger.Logf("Install golang %s (%s)", goVersion, arch) + downloadURL := fmt.Sprintf("https://go.dev/dl/go%s.linux-%s.tar.gz", goVersion, arch) + filename := path.Base(downloadURL) + stdOut, errOut, err := sshClient.Exec(ctx, "curl", []string{"-Ls", downloadURL, "--output", filename}, nil) + if err != nil { + return fmt.Errorf("failed to download go from %s with curl: %w (stdout: %s, stderr: %s)", downloadURL, err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "sudo", []string{"tar", "-C", "/usr/local", "-xzf", filename}, nil) + if err != nil { + return fmt.Errorf("failed to extract go to /usr/local with tar: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/go", "/usr/bin/go"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/go to /usr/bin/go: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/gofmt", "/usr/bin/gofmt"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/gofmt to /usr/bin/gofmt: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + return nil +} + +// Copy places the required files on the host. +func (DebianRunner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + return linuxCopy(ctx, sshClient, logger, repoArchive, builds) +} + +// Run the test +func (DebianRunner) Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var tests []string + for _, pkg := range batch.Tests { + for _, test := range pkg.Tests { + tests = append(tests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + var sudoTests []string + for _, pkg := range batch.SudoTests { + for _, test := range pkg.Tests { + sudoTests = append(sudoTests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + + logArg := "" + if verbose { + logArg = "-v" + } + var result common.OSRunnerResult + if len(tests) > 0 { + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(tests, ",")) + vars = extendVars(vars, env) + + script := fmt.Sprintf(`cd agent && %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + results, err := runTests(ctx, logger, "non-sudo", prefix, script, sshClient, batch.Tests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running non-sudo tests: %w", err) + } + result.Packages = results + } + + if len(sudoTests) > 0 { + prefix := fmt.Sprintf("%s-sudo", prefix) + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(sudoTests, ",")) + vars = extendVars(vars, env) + script := fmt.Sprintf(`cd agent && sudo %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + + results, err := runTests(ctx, logger, "sudo", prefix, script, sshClient, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running sudo tests: %w", err) + } + result.SudoPackages = results + } + + return result, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (DebianRunner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + return linuxDiagnostics(ctx, sshClient, logger, destination) +} + +func runTests(ctx context.Context, logger common.Logger, name string, prefix string, script string, sshClient ssh.SSHClient, tests []define.BatchPackageTests) ([]common.OSRunnerPackageResult, error) { + execTest := strings.NewReader(script) + + session, err := sshClient.NewSession() + if err != nil { + return nil, fmt.Errorf("failed to start session: %w", err) + } + + session.Stdout = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stdout): ", name)) + session.Stderr = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stderr): ", name)) + session.Stdin = execTest + + // allowed to fail because tests might fail + logger.Logf("Running %s tests...", name) + err = session.Run("bash") + if err != nil { + logger.Logf("%s tests failed: %s", name, err) + } + // this seems to always return an error + _ = session.Close() + + var result []common.OSRunnerPackageResult + // fetch the contents for each package + for _, pkg := range tests { + resultPkg, err := getRunnerPackageResult(ctx, sshClient, pkg, prefix) + if err != nil { + return nil, err + } + result = append(result, resultPkg) + } + return result, nil +} + +func getRunnerPackageResult(ctx context.Context, sshClient ssh.SSHClient, pkg define.BatchPackageTests, prefix string) (common.OSRunnerPackageResult, error) { + var err error + var resultPkg common.OSRunnerPackageResult + resultPkg.Name = pkg.Name + outputPath := fmt.Sprintf("$HOME/agent/build/TEST-go-remote-%s.%s", prefix, filepath.Base(pkg.Name)) + resultPkg.Output, err = sshClient.GetFileContents(ctx, outputPath+".out") + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out", outputPath) + } + resultPkg.JSONOutput, err = sshClient.GetFileContents(ctx, outputPath+".out.json") + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out.json", outputPath) + } + resultPkg.XMLOutput, err = sshClient.GetFileContents(ctx, outputPath+".xml") + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.xml", outputPath) + } + return resultPkg, nil +} + +func extendVars(vars string, env map[string]string) string { + var envStr []string + for k, v := range env { + envStr = append(envStr, fmt.Sprintf(`%s="%s"`, k, v)) + } + return fmt.Sprintf("%s %s", vars, strings.Join(envStr, " ")) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/linux/linux.go b/dev-tools/mage/target/srvrlesstest/testing/linux/linux.go new file mode 100644 index 00000000000..2aa9564e506 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/linux/linux.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "os" + "path/filepath" + "strings" +) + +func linuxDiagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + // take ownership, as sudo tests will create with root permissions (allow to fail in the case it doesn't exist) + diagnosticDir := "$HOME/agent/build/diagnostics" + _, _, _ = sshClient.Exec(ctx, "sudo", []string{"chown", "-R", "$USER:$USER", diagnosticDir}, nil) + stdOut, _, err := sshClient.Exec(ctx, "ls", []string{"-1", diagnosticDir}, nil) + if err != nil { + //nolint:nilerr // failed to list the directory, probably don't have any diagnostics (do nothing) + return nil + } + eachDiagnostic := strings.Split(string(stdOut), "\n") + for _, filename := range eachDiagnostic { + filename = strings.TrimSpace(filename) + if filename == "" { + continue + } + + // don't use filepath.Join as we need this to work in Windows as well + // this is because if we use `filepath.Join` on a Windows host connected to a Linux host + // it will use a `\` and that will be incorrect for Linux + fp := fmt.Sprintf("%s/%s", diagnosticDir, filename) + // use filepath.Join on this path because it's a path on this specific host platform + dp := filepath.Join(destination, filename) + logger.Logf("Copying diagnostic %s", filename) + out, err := os.Create(dp) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", dp, err) + } + err = sshClient.GetFileContentsOutput(ctx, fp, out) + _ = out.Close() + if err != nil { + return fmt.Errorf("failed to copy file from remote host to %s: %w", dp, err) + } + } + return nil +} + +func linuxCopy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + // copy the archive and extract it on the host + logger.Logf("Copying repo") + destRepoName := filepath.Base(repoArchive) + err := sshClient.Copy(repoArchive, destRepoName) + if err != nil { + return fmt.Errorf("failed to SCP repo archive %s: %w", repoArchive, err) + } + + // remove build paths, on cases where the build path is different from agent. + for _, build := range builds { + for _, remoteBuildPath := range []string{build.Path, build.SHA512Path} { + relativeAgentDir := filepath.Join("agent", remoteBuildPath) + _, _, err := sshClient.Exec(ctx, "sudo", []string{"rm", "-rf", relativeAgentDir}, nil) + // doesn't need to be a fatal error. + if err != nil { + logger.Logf("error removing build dir %s: %w", relativeAgentDir, err) + } + } + } + + // ensure that agent directory is removed (possible it already exists if instance already used) + stdout, stderr, err := sshClient.Exec(ctx, + "sudo", []string{"rm", "-rf", "agent"}, nil) + if err != nil { + return fmt.Errorf( + "failed to remove agent directory before unziping new one: %w. stdout: %q, stderr: %q", + err, stdout, stderr) + } + + stdOut, errOut, err := sshClient.Exec(ctx, "unzip", []string{destRepoName, "-d", "agent"}, nil) + if err != nil { + return fmt.Errorf("failed to unzip %s to agent directory: %w (stdout: %s, stderr: %s)", destRepoName, err, stdOut, errOut) + } + + // prepare for testing + logger.Logf("Running make mage and prepareOnRemote") + envs := `GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH"` + installMage := strings.NewReader(fmt.Sprintf(`cd agent && %s make mage && %s mage integration:prepareOnRemote`, envs, envs)) + stdOut, errOut, err = sshClient.Exec(ctx, "bash", nil, installMage) + if err != nil { + return fmt.Errorf("failed to perform make mage and prepareOnRemote: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // determine if the build needs to be replaced on the host + // if it already exists and the SHA512 are the same contents, then + // there is no reason to waste time uploading the build + for _, build := range builds { + copyBuild := true + localSHA512, err := os.ReadFile(build.SHA512Path) + if err != nil { + return fmt.Errorf("failed to read local SHA52 contents %s: %w", build.SHA512Path, err) + } + hostSHA512Path := filepath.Base(build.SHA512Path) + hostSHA512, err := sshClient.GetFileContents(ctx, hostSHA512Path) + if err == nil { + if string(localSHA512) == string(hostSHA512) { + logger.Logf("Skipping copy agent build %s; already the same", filepath.Base(build.Path)) + copyBuild = false + } + } + + if copyBuild { + // ensure the existing copies are removed first + toRemove := filepath.Base(build.Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "sudo", []string{"rm", "-f", toRemove}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + toRemove = filepath.Base(build.SHA512Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "sudo", []string{"rm", "-f", toRemove}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + logger.Logf("Copying agent build %s", filepath.Base(build.Path)) + } + + for _, buildPath := range []string{build.Path, build.SHA512Path} { + if copyBuild { + err = sshClient.Copy(buildPath, filepath.Base(buildPath)) + if err != nil { + return fmt.Errorf("failed to SCP build %s: %w", filepath.Base(buildPath), err) + } + } + insideAgentDir := filepath.Join("agent", buildPath) + stdOut, errOut, err = sshClient.Exec(ctx, "mkdir", []string{"-p", filepath.Dir(insideAgentDir)}, nil) + if err != nil { + return fmt.Errorf("failed to create %s directory: %w (stdout: %s, stderr: %s)", filepath.Dir(insideAgentDir), err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "ln", []string{filepath.Base(buildPath), insideAgentDir}, nil) + if err != nil { + return fmt.Errorf("failed to hard link %s to %s: %w (stdout: %s, stderr: %s)", filepath.Base(buildPath), insideAgentDir, err, stdOut, errOut) + } + } + } + + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/linux/rhel.go b/dev-tools/mage/target/srvrlesstest/testing/linux/rhel.go new file mode 100644 index 00000000000..f8d0bb1f6d2 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/linux/rhel.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "path" + "strings" + "time" +) + +// RhelRunner is a handler for running tests on SUSE Linux Enterpriser Server +type RhelRunner struct{} + +// Prepare configures the host for running the test +func (RhelRunner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + logger.Logf("Install development tools") + dnfCtx, dnfCancel := context.WithTimeout(ctx, 20*time.Minute) + defer dnfCancel() + stdOut, errOut, err := sshClient.ExecWithRetry(dnfCtx, "sudo", []string{"dnf", "-y", "-v", "group", "install", "\"Development Tools\""}, 15*time.Second) + if err != nil { + return fmt.Errorf("failed to run 'dnf group install \"Development Tools\"': %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // install golang + logger.Logf("Install golang %s (%s)", goVersion, arch) + goCtx, goCancel := context.WithTimeout(ctx, 20*time.Minute) + defer goCancel() + downloadURL := fmt.Sprintf("https://go.dev/dl/go%s.linux-%s.tar.gz", goVersion, arch) + filename := path.Base(downloadURL) + stdOut, errOut, err = sshClient.Exec(goCtx, "curl", []string{"-Ls", downloadURL, "--output", filename}, nil) + if err != nil { + return fmt.Errorf("failed to download go from %s with curl: %w (stdout: %s, stderr: %s)", downloadURL, err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(goCtx, "sudo", []string{"tar", "-C", "/usr/local", "-xzf", filename}, nil) + if err != nil { + return fmt.Errorf("failed to extract go to /usr/local with tar: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(goCtx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/go", "/usr/bin/go"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/go to /usr/bin/go: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(goCtx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/gofmt", "/usr/bin/gofmt"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/gofmt to /usr/bin/gofmt: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + return nil +} + +// Copy places the required files on the host +func (RhelRunner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + return linuxCopy(ctx, sshClient, logger, repoArchive, builds) +} + +// Run the test +func (RhelRunner) Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var tests []string + for _, pkg := range batch.Tests { + for _, test := range pkg.Tests { + tests = append(tests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + var sudoTests []string + for _, pkg := range batch.SudoTests { + for _, test := range pkg.Tests { + sudoTests = append(sudoTests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + + logArg := "" + if verbose { + logArg = "-v" + } + var result common.OSRunnerResult + if len(tests) > 0 { + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(tests, ",")) + vars = extendVars(vars, env) + + script := fmt.Sprintf(`cd agent && %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + results, err := runTests(ctx, logger, "non-sudo", prefix, script, sshClient, batch.Tests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running non-sudo tests: %w", err) + } + result.Packages = results + } + + if len(sudoTests) > 0 { + prefix := fmt.Sprintf("%s-sudo", prefix) + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH:/usr/sbin" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(sudoTests, ",")) + vars = extendVars(vars, env) + script := fmt.Sprintf(`cd agent && sudo %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + + results, err := runTests(ctx, logger, "sudo", prefix, script, sshClient, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running sudo tests: %w", err) + } + result.SudoPackages = results + } + + return result, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (RhelRunner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + return linuxDiagnostics(ctx, sshClient, logger, destination) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/log.go b/dev-tools/mage/target/srvrlesstest/testing/log.go new file mode 100644 index 00000000000..eeedc57fc88 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/log.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package testing + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + + "github.com/elastic/elastic-agent-libs/logp" +) + +// Logger is log interface that matches *testing.T. +type Logger interface { + // Log logs the arguments. + Log(args ...any) + // Logf logs the formatted arguments. + Logf(format string, args ...any) +} + +// logWatcher is an `io.Writer` that processes the log lines outputted from the spawned Elastic Agent. +// +// `Write` handles parsing lines as either ndjson or plain text. +type logWatcher struct { + remainder []byte + replicate Logger + alert chan error +} + +func newLogWatcher(replicate Logger) *logWatcher { + return &logWatcher{ + replicate: replicate, + alert: make(chan error), + } +} + +// Watch returns the channel that will get an error when an error is identified from the log. +func (r *logWatcher) Watch() <-chan error { + return r.alert +} + +// Write implements the `io.Writer` interface. +func (r *logWatcher) Write(p []byte) (int, error) { + if len(p) == 0 { + // nothing to do + return 0, nil + } + offset := 0 + for { + idx := bytes.IndexByte(p[offset:], '\n') + if idx < 0 { + // not all used add to remainder to be used on next call + r.remainder = append(r.remainder, p[offset:]...) + return len(p), nil + } + + var line []byte + if r.remainder != nil { + line = r.remainder + r.remainder = nil + line = append(line, p[offset:offset+idx]...) + } else { + line = append(line, p[offset:offset+idx]...) + } + offset += idx + 1 + // drop '\r' from line (needed for Windows) + if len(line) > 0 && line[len(line)-1] == '\r' { + line = line[0 : len(line)-1] + } + if len(line) == 0 { + // empty line + continue + } + str := strings.TrimSpace(string(line)) + // try to parse line as JSON + if str[0] == '{' && r.handleJSON(str) { + // handled as JSON + continue + } + // considered standard text being it's not JSON, just replicate + if r.replicate != nil { + r.replicate.Log(str) + } + } +} + +func (r *logWatcher) handleJSON(line string) bool { + var evt map[string]interface{} + if err := json.Unmarshal([]byte(line), &evt); err != nil { + return false + } + if r.replicate != nil { + r.replicate.Log(line) + } + lvl := getLevel(evt, "log.level") + msg := getMessage(evt, "message") + if lvl == logp.ErrorLevel { + r.alert <- errors.New(msg) + } + return true +} + +func getLevel(evt map[string]interface{}, key string) logp.Level { + lvl := logp.InfoLevel + err := unmarshalLevel(&lvl, getStrVal(evt, key)) + if err == nil { + delete(evt, key) + } + return lvl +} + +func unmarshalLevel(lvl *logp.Level, val string) error { + if val == "" { + return errors.New("empty val") + } else if val == "trace" { + // logp doesn't handle trace level we cast to debug + *lvl = logp.DebugLevel + return nil + } + return lvl.Unpack(val) +} + +func getMessage(evt map[string]interface{}, key string) string { + msg := getStrVal(evt, key) + if msg != "" { + delete(evt, key) + } + return msg +} + +func getStrVal(evt map[string]interface{}, key string) string { + raw, ok := evt[key] + if !ok { + return "" + } + str, ok := raw.(string) + if !ok { + return "" + } + return str +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/multipas/provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/multipas/provisioner.go new file mode 100644 index 00000000000..d0718d07833 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/multipas/provisioner.go @@ -0,0 +1,330 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package multipass + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/core/process" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/runner" + "os" + "os/exec" + "path/filepath" + "runtime" + "time" + + "gopkg.in/yaml.v2" +) + +const ( + Ubuntu = "ubuntu" + Name = "multipass" +) + +type provisioner struct { + logger common.Logger +} + +// NewProvisioner creates the multipass provisioner +func NewProvisioner() common.InstanceProvisioner { + return &provisioner{} +} + +func (p *provisioner) Name() string { + return Name +} + +func (p *provisioner) SetLogger(l common.Logger) { + p.logger = l +} + +func (p *provisioner) Type() common.ProvisionerType { + return common.ProvisionerTypeVM +} + +// Supported returns true if multipass supports this OS. +// +// multipass only supports Ubuntu on the same architecture as the running host. +func (p *provisioner) Supported(os define.OS) bool { + if os.Type != define.Linux { + return false + } + if os.Distro != Ubuntu { + return false + } + if os.Version != "20.04" && os.Version != "22.04" && os.Version != "24.04" { + return false + } + // multipass only supports the same architecture of the host + if os.Arch != runtime.GOARCH { + return false + } + return true +} + +func (p *provisioner) Provision(ctx context.Context, cfg common.Config, batches []common.OSBatch) ([]common.Instance, error) { + // this doesn't provision the instances in parallel on purpose + // multipass cannot handle it, it either results in instances sharing the same IP address + // or some instances stuck in Starting state + for _, batch := range batches { + err := func(batch common.OSBatch) error { + launchCtx, launchCancel := context.WithTimeout(ctx, 5*time.Minute) + defer launchCancel() + err := p.launch(launchCtx, cfg, batch) + if err != nil { + return fmt.Errorf("instance %s failed: %w", batch.ID, err) + } + return nil + }(batch) + if err != nil { + return nil, err + } + } + + var results []common.Instance + instances, err := p.list(ctx) + if err != nil { + return nil, err + } + for _, batch := range batches { + mi, ok := instances[batch.ID] + if !ok { + return nil, fmt.Errorf("failed to find %s in multipass list output", batch.ID) + } + if mi.State != "Running" { + return nil, fmt.Errorf("instance %s is not marked as running", batch.ID) + } + results = append(results, common.Instance{ + ID: batch.ID, + Provisioner: Name, + Name: batch.ID, + IP: mi.IPv4[0], + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + Internal: nil, + }) + } + return results, nil +} + +// Clean cleans up all provisioned resources. +func (p *provisioner) Clean(ctx context.Context, _ common.Config, instances []common.Instance) error { + // doesn't execute in parallel for the same reasons in Provision + // multipass just cannot handle it + for _, instance := range instances { + func(instance common.Instance) { + deleteCtx, deleteCancel := context.WithTimeout(ctx, 5*time.Minute) + defer deleteCancel() + err := p.delete(deleteCtx, instance) + if err != nil { + // prevent a failure from stopping the other instances and clean + p.logger.Logf("Delete instance %s failed: %s", instance.Name, err) + } + }(instance) + } + return nil +} + +// launch creates an instance. +func (p *provisioner) launch(ctx context.Context, cfg common.Config, batch common.OSBatch) error { + // check if instance already exists + err := p.ensureInstanceNotExist(ctx, batch) + if err != nil { + p.logger.Logf( + "could not check multipass instance %q does not exists, moving on anyway. Err: %v", err) + } + args := []string{ + "launch", + "-c", "2", + "-d", "50G", // need decent size for all the tests + "-m", "4G", + "-n", batch.ID, + "--cloud-init", "-", + batch.OS.Version, + } + + publicKeyPath := filepath.Join(cfg.StateDir, "id_rsa.pub") + publicKey, err := os.ReadFile(publicKeyPath) + if err != nil { + return fmt.Errorf("failed to read SSH key to send to multipass instance at %s: %w", publicKeyPath, err) + } + + var cloudCfg cloudinitConfig + cloudCfg.SSHAuthorizedKeys = []string{string(publicKey)} + cloudCfgData, err := yaml.Marshal(&cloudCfg) + if err != nil { + return fmt.Errorf("failed to marshal cloud-init configuration: %w", err) + } + + var output bytes.Buffer + p.logger.Logf("Launching multipass image %s", batch.ID) + proc, err := process.Start("multipass", process.WithContext(ctx), process.WithArgs(args), process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run multipass launch: %w", err) + } + _, err = proc.Stdin.Write([]byte(fmt.Sprintf("#cloud-config\n%s", cloudCfgData))) + if err != nil { + _ = proc.Stdin.Close() + _ = proc.Kill() + <-proc.Wait() + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to write cloudinit to stdin: %w", err) + } + _ = proc.Stdin.Close() + ps := <-proc.Wait() + if !ps.Success() { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run multipass launch: exited with code: %d", ps.ExitCode()) + } + return nil +} + +func (p *provisioner) ensureInstanceNotExist(ctx context.Context, batch common.OSBatch) error { + var output bytes.Buffer + var stdErr bytes.Buffer + proc, err := process.Start("multipass", + process.WithContext(ctx), + process.WithArgs([]string{"list", "--format", "json"}), + process.WithCmdOptions( + runner.AttachOut(&output), + runner.AttachErr(&stdErr))) + if err != nil { + return fmt.Errorf("multipass list failed to run: %w", err) + } + + state := <-proc.Wait() + if !state.Success() { + msg := fmt.Sprintf("multipass list exited with non-zero status: %s", + state.String()) + p.logger.Logf(msg) + p.logger.Logf("output: %s", output.String()) + p.logger.Logf("stderr: %s", stdErr.String()) + return errors.New(msg) + } + list := struct { + List []struct { + Ipv4 []string `json:"ipv4"` + Name string `json:"name"` + Release string `json:"release"` + State string `json:"state"` + } `json:"list"` + }{} + err = json.NewDecoder(&output).Decode(&list) + if err != nil { + return fmt.Errorf("could not decode mutipass list output: %w", err) + } + + for _, i := range list.List { + if i.Name == batch.ID { + p.logger.Logf("multipass trying to delete instance %s", batch.ID) + + output.Reset() + stdErr.Reset() + proc, err = process.Start("multipass", + process.WithContext(ctx), + process.WithArgs([]string{"delete", "--purge", batch.ID}), + process.WithCmdOptions( + runner.AttachOut(&output), + runner.AttachErr(&stdErr))) + if err != nil { + return fmt.Errorf( + "multipass instance %q already exist, state %q. Could not delete it: %w", + batch.ID, i.State, err) + } + state = <-proc.Wait() + if !state.Success() { + msg := fmt.Sprintf("failed to delete and purge multipass instance %s: %s", + batch.ID, + state.String()) + p.logger.Logf(msg) + p.logger.Logf("output: %s", output.String()) + p.logger.Logf("stderr: %s", stdErr.String()) + return errors.New(msg) + } + + break + } + } + + return nil +} + +// delete deletes an instance. +func (p *provisioner) delete(ctx context.Context, instance common.Instance) error { + args := []string{ + "delete", + "-p", + instance.ID, + } + + var output bytes.Buffer + p.logger.Logf("Deleting instance %s", instance.Name) + proc, err := process.Start("multipass", process.WithContext(ctx), process.WithArgs(args), process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run multipass delete: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run multipass delete: exited with code: %d", ps.ExitCode()) + } + return nil +} + +// list all the instances. +func (p *provisioner) list(ctx context.Context) (map[string]instance, error) { + cmd := exec.CommandContext(ctx, "multipass", "list", "--format", "yaml") + result, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("failed to run multipass list: %w", err) + } + + // yaml output from multipass gives a list of instances for each instance name, + // even though there is only ever 1 entry in the list + var instancesMulti map[string][]instance + err = yaml.Unmarshal(result, &instancesMulti) + if err != nil { + return nil, fmt.Errorf("failed to parse multipass list output: %w", err) + } + instances := map[string]instance{} + for name, multi := range instancesMulti { + instances[name] = multi[0] + } + + return instances, nil +} + +type instance struct { + State string `yaml:"state"` + IPv4 []string `yaml:"ipv4"` + Release string `yaml:"release"` +} + +type cloudinitConfig struct { + SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"` +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/api.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/api.go new file mode 100644 index 00000000000..ec9228118b7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/api.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ogc + +import "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + +// Layout definition for `ogc layout import`. +type Layout struct { + Name string `yaml:"name"` + Provider string `yaml:"provider"` + InstanceSize string `yaml:"instance_size"` + RunsOn string `yaml:"runs_on"` + RemotePath string `yaml:"remote_path"` + Scale int `yaml:"scale"` + Username string `yaml:"username"` + SSHPrivateKey string `yaml:"ssh_private_key"` + SSHPublicKey string `yaml:"ssh_public_key"` + Ports []string `yaml:"ports"` + Tags []string `yaml:"tags"` + Labels map[string]string `yaml:"labels"` + Scripts string `yaml:"scripts"` +} + +// Machine definition returned by `ogc up`. +type Machine struct { + ID int `yaml:"id"` + InstanceID string `yaml:"instance_id"` + InstanceName string `yaml:"instance_name"` + InstanceState string `yaml:"instance_state"` + PrivateIP string `yaml:"private_ip"` + PublicIP string `yaml:"public_ip"` + Layout Layout `yaml:"layout"` + Create string `yaml:"created"` +} + +// LayoutOS defines the minimal information for a mapping of an OS to the +// provider, instance size, and runs on for that OS. +type LayoutOS struct { + OS define.OS + Provider string + InstanceSize string + RunsOn string + Username string + RemotePath string +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/config.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/config.go new file mode 100644 index 00000000000..7a65e856c70 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/config.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ogc + +import ( + "encoding/json" + "errors" + "fmt" + "os" +) + +// Config is the configuration for using OGC. +type Config struct { + ServiceTokenPath string + Datacenter string + + content *serviceTokenContent +} + +// Validate returns an error if the information is invalid. +func (c *Config) Validate() error { + if c.ServiceTokenPath == "" { + return errors.New("field ServiceTokenPath must be set") + } + if c.Datacenter == "" { + return errors.New("field Datacenter must be set") + } + return c.ensureParsed() +} + +// ProjectID returns the project ID from the service token. +func (c *Config) ProjectID() (string, error) { + err := c.ensureParsed() + if err != nil { + return "", err + } + return c.content.ProjectID, nil +} + +// ClientEmail returns the client email from the service token. +func (c *Config) ClientEmail() (string, error) { + err := c.ensureParsed() + if err != nil { + return "", err + } + return c.content.ClientEmail, nil +} + +func (c *Config) ensureParsed() error { + if c.content != nil { + // already parsed + return nil + } + content, err := c.parse() + if err != nil { + return err + } + c.content = content + return nil +} + +func (c *Config) parse() (*serviceTokenContent, error) { + var content serviceTokenContent + raw, err := os.ReadFile(c.ServiceTokenPath) + if err != nil { + return nil, fmt.Errorf("failed to read contents of %s: %w", c.ServiceTokenPath, err) + } + err = json.Unmarshal(raw, &content) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON contents of %s: %w", c.ServiceTokenPath, err) + } + if content.Type != "service_account" { + return nil, fmt.Errorf("not a service account token at %s; type != service_account", c.ServiceTokenPath) + } + return &content, nil +} + +// serviceTokenContent is parsed content from a service token file. +type serviceTokenContent struct { + Type string `json:"type"` + ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + + // more fields exists but we only need the provided information +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/provisioner.go new file mode 100644 index 00000000000..eb05e9922d9 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/provisioner.go @@ -0,0 +1,354 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ogc + +import ( + "bytes" + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/core/process" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/runner" + "os" + "path/filepath" + "strings" + "time" + + "gopkg.in/yaml.v2" +) + +const ( + // LayoutIntegrationTag is the tag added to all layouts for the integration testing framework. + LayoutIntegrationTag = "agent-integration" + Name = "ogc" +) + +type provisioner struct { + logger common.Logger + cfg Config +} + +// NewProvisioner creates the OGC provisioner +func NewProvisioner(cfg Config) (common.InstanceProvisioner, error) { + err := cfg.Validate() + if err != nil { + return nil, err + } + return &provisioner{ + cfg: cfg, + }, nil +} + +func (p *provisioner) Name() string { + return Name +} + +func (p *provisioner) SetLogger(l common.Logger) { + p.logger = l +} + +func (p *provisioner) Type() common.ProvisionerType { + return common.ProvisionerTypeVM +} + +// Supported returns true when we support this OS for OGC. +func (p *provisioner) Supported(os define.OS) bool { + _, ok := findOSLayout(os) + return ok +} + +func (p *provisioner) Provision(ctx context.Context, cfg common.Config, batches []common.OSBatch) ([]common.Instance, error) { + // ensure the latest version + pullCtx, pullCancel := context.WithTimeout(ctx, 5*time.Minute) + defer pullCancel() + err := p.ogcPull(pullCtx) + if err != nil { + return nil, err + } + + // import the calculated layouts + importCtx, importCancel := context.WithTimeout(ctx, 30*time.Second) + defer importCancel() + err = p.ogcImport(importCtx, cfg, batches) + if err != nil { + return nil, err + } + + // bring up all the instances + upCtx, upCancel := context.WithTimeout(ctx, 30*time.Minute) + defer upCancel() + upOutput, err := p.ogcUp(upCtx) + if err != nil { + return nil, fmt.Errorf("ogc up failed: %w", err) + } + + // fetch the machines and run the batches on the machine + machines, err := p.ogcMachines(ctx) + if err != nil { + return nil, err + } + if len(machines) == 0 { + // Print the output so its clear what went wrong. + // Without this it's unclear where OGC went wrong, it + // doesn't do a great job of reporting a clean error + fmt.Fprintf(os.Stdout, "%s\n", upOutput) + return nil, fmt.Errorf("ogc didn't create any machines") + } + + // map the machines to instances + var instances []common.Instance + for _, b := range batches { + machine, ok := findMachine(machines, b.ID) + if !ok { + // print the output so its clear what went wrong. + // Without this it's unclear where OGC went wrong, it + // doesn't do a great job of reporting a clean error + fmt.Fprintf(os.Stdout, "%s\n", upOutput) + return nil, fmt.Errorf("failed to find machine for batch ID: %s", b.ID) + } + instances = append(instances, common.Instance{ + ID: b.ID, + Provisioner: Name, + Name: machine.InstanceName, + IP: machine.PublicIP, + Username: machine.Layout.Username, + RemotePath: machine.Layout.RemotePath, + Internal: map[string]interface{}{ + "instance_id": machine.InstanceID, + }, + }) + } + return instances, nil +} + +// Clean cleans up all provisioned resources. +func (p *provisioner) Clean(ctx context.Context, cfg common.Config, _ []common.Instance) error { + return p.ogcDown(ctx) +} + +// ogcPull pulls the latest ogc version. +func (p *provisioner) ogcPull(ctx context.Context) error { + args := []string{ + "pull", + "docker.elastic.co/observability-ci/ogc:5.0.1", + } + var output bytes.Buffer + p.logger.Logf("Pulling latest ogc image") + proc, err := process.Start("docker", process.WithContext(ctx), process.WithArgs(args), process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run docker ogcPull: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run ogc pull: docker run exited with code: %d", ps.ExitCode()) + } + return nil +} + +// ogcImport imports all the required batches into OGC. +func (p *provisioner) ogcImport(ctx context.Context, cfg common.Config, batches []common.OSBatch) error { + var layouts []Layout + for _, ob := range batches { + layouts = append(layouts, osBatchToOGC(cfg.StateDir, ob)) + } + layoutData, err := yaml.Marshal(struct { + Layouts []Layout `yaml:"layouts"` + }{ + Layouts: layouts, + }) + if err != nil { + return fmt.Errorf("failed to marshal layouts YAML: %w", err) + } + + var output bytes.Buffer + p.logger.Logf("Import layouts into ogc") + proc, err := p.ogcRun(ctx, []string{"layout", "import"}, true, process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run ogc import: %w", err) + } + _, err = proc.Stdin.Write(layoutData) + if err != nil { + _ = proc.Stdin.Close() + _ = proc.Kill() + <-proc.Wait() + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to write layouts to stdin: %w", err) + } + _ = proc.Stdin.Close() + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run ogc import: docker run exited with code: %d", ps.ExitCode()) + } + return nil +} + +// ogcUp brings up all the instances. +func (p *provisioner) ogcUp(ctx context.Context) ([]byte, error) { + p.logger.Logf("Bring up instances through ogc") + var output bytes.Buffer + proc, err := p.ogcRun(ctx, []string{"up", LayoutIntegrationTag}, false, process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return nil, fmt.Errorf("failed to run ogc up: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return nil, fmt.Errorf("failed to run ogc up: docker run exited with code: %d", ps.ExitCode()) + } + return output.Bytes(), nil +} + +// ogcDown brings down all the instances. +func (p *provisioner) ogcDown(ctx context.Context) error { + p.logger.Logf("Bring down instances through ogc") + var output bytes.Buffer + proc, err := p.ogcRun(ctx, []string{"down", LayoutIntegrationTag}, false, process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run ogc down: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run ogc down: docker run exited with code: %d", ps.ExitCode()) + } + return nil +} + +// ogcMachines lists all the instances. +func (p *provisioner) ogcMachines(ctx context.Context) ([]Machine, error) { + var out bytes.Buffer + proc, err := p.ogcRun(ctx, []string{"ls", "--as-yaml"}, false, process.WithCmdOptions(runner.AttachOut(&out))) + if err != nil { + return nil, fmt.Errorf("failed to run ogc ls: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + return nil, fmt.Errorf("failed to run ogc ls: docker run exited with code: %d", ps.ExitCode()) + } + var machines []Machine + err = yaml.Unmarshal(out.Bytes(), &machines) + if err != nil { + return nil, fmt.Errorf("failed to parse ogc ls output: %w", err) + } + return machines, nil +} + +func (p *provisioner) ogcRun(ctx context.Context, args []string, interactive bool, processOpts ...process.StartOption) (*process.Info, error) { + wd, err := runner.WorkDir() + if err != nil { + return nil, err + } + tokenName := filepath.Base(p.cfg.ServiceTokenPath) + clientEmail, err := p.cfg.ClientEmail() + if err != nil { + return nil, err + } + projectID, err := p.cfg.ProjectID() + if err != nil { + return nil, err + } + runArgs := []string{"run"} + if interactive { + runArgs = append(runArgs, "-i") + } + runArgs = append(runArgs, + "--rm", + "-e", + fmt.Sprintf("GOOGLE_APPLICATION_SERVICE_ACCOUNT=%s", clientEmail), + "-e", + fmt.Sprintf("GOOGLE_APPLICATION_CREDENTIALS=/root/%s", tokenName), + "-e", + fmt.Sprintf("GOOGLE_PROJECT=%s", projectID), + "-e", + fmt.Sprintf("GOOGLE_DATACENTER=%s", p.cfg.Datacenter), + "-v", + fmt.Sprintf("%s:/root/%s", p.cfg.ServiceTokenPath, tokenName), + "-v", + fmt.Sprintf("%s:%s", wd, wd), + "-w", + wd, + "docker.elastic.co/observability-ci/ogc:5.0.1", + "--", + "ogc", + "-v", + ) + runArgs = append(runArgs, args...) + opts := []process.StartOption{process.WithContext(ctx), process.WithArgs(runArgs)} + opts = append(opts, processOpts...) + return process.Start("docker", opts...) +} + +func osBatchToOGC(cacheDir string, batch common.OSBatch) Layout { + tags := []string{ + LayoutIntegrationTag, + batch.OS.Type, + batch.OS.Arch, + } + if batch.OS.Type == define.Linux { + tags = append(tags, strings.ToLower(fmt.Sprintf("%s-%s", batch.OS.Distro, strings.Replace(batch.OS.Version, ".", "-", -1)))) + } else { + tags = append(tags, strings.ToLower(fmt.Sprintf("%s-%s", batch.OS.Type, strings.Replace(batch.OS.Version, ".", "-", -1)))) + } + los, _ := findOSLayout(batch.OS.OS) + return Layout{ + Name: batch.ID, + Provider: los.Provider, + InstanceSize: los.InstanceSize, + RunsOn: los.RunsOn, + RemotePath: los.RemotePath, + Scale: 1, + Username: los.Username, + SSHPrivateKey: cacheDir + "/id_rsa", + SSHPublicKey: cacheDir + "/id_rsa.pub", + Ports: []string{"22:22"}, + Tags: tags, + Labels: map[string]string{ + "division": "engineering", + "org": "ingest", + "team": "elastic-agent-control-plane", + "project": "elastic-agent", + }, + Scripts: "path", // not used; but required by OGC + } +} + +func findOSLayout(os define.OS) (LayoutOS, bool) { + for _, s := range ogcSupported { + if s.OS == os { + return s, true + } + } + return LayoutOS{}, false +} + +func findMachine(machines []Machine, name string) (Machine, bool) { + for _, m := range machines { + if m.Layout.Name == name { + return m, true + } + } + return Machine{}, false +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/supported.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/supported.go new file mode 100644 index 00000000000..b3dcb1e77cf --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/supported.go @@ -0,0 +1,202 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ogc + +import ( + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/supported" +) + +const ( + // Google is for the Google Cloud Platform (GCP) + Google = "google" +) + +// ogcSupported defines the set of supported OS's the OGC provisioner currently supports. +// +// In the case that a batch is not specific on the version and/or distro the first +// one in this list will be picked. So it's best to place the one that we want the +// most testing at the top. +var ogcSupported = []LayoutOS{ + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Ubuntu, + Version: "24.04", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "ubuntu-2404-lts-amd64", + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + }, + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Ubuntu, + Version: "22.04", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "ubuntu-2204-lts", + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + }, + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Ubuntu, + Version: "20.04", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "ubuntu-2004-lts", + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + }, + // These instance types are experimental on Google Cloud and very unstable + // We will wait until Google introduces new ARM instance types + // https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu + // { + // OS: define.OS{ + // Type: define.Linux, + // Arch: define.ARM64, + // Distro: runner.Ubuntu, + // Version: "24.04", + // }, + // Provider: Google, + // InstanceSize: "t2a-standard-4", // 4 arm64 cpus, 16 GB RAM + // RunsOn: "ubuntu-2404-lts-arm64", + // Username: "ubuntu", + // RemotePath: "/home/ubuntu/agent", + // }, + // { + // OS: define.OS{ + // Type: define.Linux, + // Arch: define.ARM64, + // Distro: runner.Ubuntu, + // Version: "22.04", + // }, + // Provider: Google, + // InstanceSize: "t2a-standard-4", // 4 arm64 cpus, 16 GB RAM + // RunsOn: "ubuntu-2204-lts-arm64", + // Username: "ubuntu", + // RemotePath: "/home/ubuntu/agent", + // }, + // { + // OS: define.OS{ + // Type: define.Linux, + // Arch: define.ARM64, + // Distro: runner.Ubuntu, + // Version: "20.04", + // }, + // Provider: Google, + // InstanceSize: "t2a-standard-4", // 4 arm64 cpus, 16 GB RAM + // RunsOn: "ubuntu-2004-lts-arm64", + // Username: "ubuntu", + // RemotePath: "/home/ubuntu/agent", + // }, + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Rhel, + Version: "8", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "rhel-8", + Username: "rhel", + RemotePath: "/home/rhel/agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2022", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022-core", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2022-core", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2019", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019-core", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2019-core", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2016", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016-core", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2016-core", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/archiver.go b/dev-tools/mage/target/srvrlesstest/testing/runner/archiver.go new file mode 100644 index 00000000000..c0b668bed4f --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/archiver.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package runner + +import ( + "archive/zip" + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func createRepoZipArchive(ctx context.Context, dir string, dest string) error { + absDir, err := filepath.Abs(dir) + if err != nil { + return fmt.Errorf("failed to get absolute path to %s: %w", dir, err) + } + + projectFilesOutput, err := cmdBufferedOutput(exec.Command("git", "ls-files", "-z"), dir) + if err != nil { + return err + } + + // Add files that are not yet tracked in git. Prevents a footcannon where someone writes code to a new file, then tests it before they add to git + untrackedOutput, err := cmdBufferedOutput(exec.Command("git", "ls-files", "--exclude-standard", "-o", "-z"), dir) + if err != nil { + return err + } + + _, err = io.Copy(&projectFilesOutput, &untrackedOutput) + if err != nil { + return fmt.Errorf("failed to read stdout of git ls-files -o: %w", err) + } + + archive, err := os.Create(dest) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", dest, err) + } + defer archive.Close() + + zw := zip.NewWriter(archive) + defer zw.Close() + + s := bufio.NewScanner(&projectFilesOutput) + s.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if i := strings.IndexRune(string(data), '\x00'); i >= 0 { + return i + 1, data[0:i], nil + } + if !atEOF { + return 0, nil, nil + } + return len(data), data, bufio.ErrFinalToken + }) + for s.Scan() { + if ctx.Err() != nil { + // incomplete close and delete + _ = archive.Close() + _ = os.Remove(dest) + return ctx.Err() + } + err := func(line string) error { + if line == "" { + return nil + } + fullPath := filepath.Join(absDir, line) + s, err := os.Stat(fullPath) + if err != nil { + return fmt.Errorf("failed to stat file %s: %w", fullPath, err) + } + if s.IsDir() { + // skip directories + return nil + } + f, err := os.Open(fullPath) + if err != nil { + return fmt.Errorf("failed to open file %s: %w", fullPath, err) + } + defer f.Close() + w, err := zw.Create(line) + if err != nil { + return fmt.Errorf("failed to create zip entry %s: %w", line, err) + } + _, err = io.Copy(w, f) + if err != nil { + return fmt.Errorf("failed to copy zip entry %s: %w", line, err) + } + return nil + }(s.Text()) + if err != nil { + return fmt.Errorf("error adding files: %w", err) + } + } + return nil +} + +func cmdBufferedOutput(cmd *exec.Cmd, workDir string) (bytes.Buffer, error) { + var stdoutBuf bytes.Buffer + cmd.Dir = workDir + cmd.Stdout = &stdoutBuf + err := cmd.Run() + if err != nil { + return *bytes.NewBufferString(""), fmt.Errorf("failed to run cmd %s: %w", strings.Join(cmd.Args, " "), err) + } + return stdoutBuf, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/json.go b/dev-tools/mage/target/srvrlesstest/testing/runner/json.go new file mode 100644 index 00000000000..8e044a0ac0c --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/json.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package runner + +import ( + "bufio" + "bytes" + "encoding/json" +) + +type JSONTestEntry struct { + Time string `json:"Time"` + Action string `json:"Action"` + Package string `json:"Package"` + Test string `json:"Test"` + Output string `json:"Output"` +} + +func suffixJSONResults(content []byte, suffix string) ([]byte, error) { + var result bytes.Buffer + sc := bufio.NewScanner(bytes.NewReader(content)) + for sc.Scan() { + var entry JSONTestEntry + err := json.Unmarshal([]byte(sc.Text()), &entry) + if err != nil { + return nil, err + } + if entry.Package != "" { + entry.Package += suffix + } + raw, err := json.Marshal(&entry) + if err != nil { + return nil, err + } + _, err = result.Write(raw) + if err != nil { + return nil, err + } + _, err = result.Write([]byte("\n")) + if err != nil { + return nil, err + } + } + return result.Bytes(), nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/junit.go b/dev-tools/mage/target/srvrlesstest/testing/runner/junit.go new file mode 100644 index 00000000000..3ecb3721210 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/junit.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package runner + +import ( + "encoding/xml" + "io" +) + +// JUnitTestSuites is a collection of JUnit test suites. +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + Name string `xml:"name,attr,omitempty"` + Tests int `xml:"tests,attr,omitempty"` + Failures int `xml:"failures,attr,omitempty"` + Errors int `xml:"errors,attr,omitempty"` + Time string `xml:"time,attr,omitempty"` + Suites []JUnitTestSuite `xml:"testsuite"` +} + +// JUnitTestSuite is a single JUnit test suite which may contain many +// testcases. +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time string `xml:"time,attr"` + Name string `xml:"name,attr"` + Properties []JUnitProperty `xml:"properties>property,omitempty"` + TestCases []JUnitTestCase `xml:"testcase"` + Timestamp string `xml:"timestamp,attr"` +} + +// JUnitTestCase is a single test case with its result. +type JUnitTestCase struct { + XMLName xml.Name `xml:"testcase"` + Classname string `xml:"classname,attr"` + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"` + Failure *JUnitFailure `xml:"failure,omitempty"` +} + +// JUnitSkipMessage contains the reason why a testcase was skipped. +type JUnitSkipMessage struct { + Message string `xml:"message,attr"` +} + +// JUnitProperty represents a key/value pair used to define properties. +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// JUnitFailure contains data related to a failed test. +type JUnitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` + Contents string `xml:",chardata"` +} + +// parseJUnit parses contents into a JUnit structure. +func parseJUnit(contents []byte) (JUnitTestSuites, error) { + var suites JUnitTestSuites + err := xml.Unmarshal(contents, &suites) + if err != nil { + return JUnitTestSuites{}, err + } + return suites, nil +} + +// writeJUnit writes the suites to the out writer. +func writeJUnit(out io.Writer, suites JUnitTestSuites) error { + doc, err := xml.MarshalIndent(suites, "", "\t") + if err != nil { + return err + } + _, err = out.Write([]byte(xml.Header)) + if err != nil { + return err + } + _, err = out.Write(doc) + return err +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/runner.go b/dev-tools/mage/target/srvrlesstest/testing/runner/runner.go new file mode 100644 index 00000000000..832f1b6a217 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/runner.go @@ -0,0 +1,968 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package runner + +import ( + "bytes" + "context" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing" + "io" + "os" + "path/filepath" + "slices" + "strings" + "sync" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + tssh "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/supported" +) + +// Result is the complete result from the runner. +type Result struct { + // Tests is the number of tests ran. + Tests int + // Failures is the number of tests that failed. + Failures int + // Output is the raw test output. + Output []byte + // XMLOutput is the XML Junit output. + XMLOutput []byte + // JSONOutput is the JSON output. + JSONOutput []byte +} + +// State represents the state storage of what has been provisioned. +type State struct { + // Instances stores provisioned and prepared instances. + Instances []StateInstance `yaml:"instances"` + + // Stacks store provisioned stacks. + Stacks []common.Stack `yaml:"stacks"` +} + +// StateInstance is an instance stored in the state. +type StateInstance struct { + common.Instance + + // Prepared set to true when the instance is prepared. + Prepared bool `yaml:"prepared"` +} + +// Runner runs the tests on remote instances. +type Runner struct { + cfg common.Config + logger common.Logger + ip common.InstanceProvisioner + sp common.StackProvisioner + + batches []common.OSBatch + + batchToStack map[string]stackRes + batchToStackCh map[string]chan stackRes + batchToStackMx sync.Mutex + + stateMx sync.Mutex + state State +} + +// NewRunner creates a new runner based on the provided batches. +func NewRunner(cfg common.Config, ip common.InstanceProvisioner, sp common.StackProvisioner, batches ...define.Batch) (*Runner, error) { + err := cfg.Validate() + if err != nil { + return nil, err + } + platforms, err := cfg.GetPlatforms() + if err != nil { + return nil, err + } + + osBatches, err := supported.CreateBatches(batches, platforms, cfg.Groups, cfg.Matrix, cfg.SingleTest) + if err != nil { + return nil, err + } + osBatches = filterSupportedOS(osBatches, ip) + + logger := &runnerLogger{ + writer: os.Stdout, + timestamp: cfg.Timestamp, + } + ip.SetLogger(logger) + sp.SetLogger(logger) + + r := &Runner{ + cfg: cfg, + logger: logger, + ip: ip, + sp: sp, + batches: osBatches, + batchToStack: make(map[string]stackRes), + batchToStackCh: make(map[string]chan stackRes), + } + + err = r.loadState() + if err != nil { + return nil, err + } + return r, nil +} + +// Logger returns the logger used by the runner. +func (r *Runner) Logger() common.Logger { + return r.logger +} + +// Run runs all the tests. +func (r *Runner) Run(ctx context.Context) (Result, error) { + // validate tests can even be performed + err := r.validate() + if err != nil { + return Result{}, err + } + + // prepare + prepareCtx, prepareCancel := context.WithTimeout(ctx, 10*time.Minute) + defer prepareCancel() + sshAuth, repoArchive, err := r.prepare(prepareCtx) + if err != nil { + return Result{}, err + } + + // start the needed stacks + err = r.startStacks(ctx) + if err != nil { + return Result{}, err + } + + // only send to the provisioner the batches that need to be created + var instances []StateInstance + var batches []common.OSBatch + for _, b := range r.batches { + if !b.Skip { + i, ok := r.findInstance(b.ID) + if ok { + instances = append(instances, i) + } else { + batches = append(batches, b) + } + } + } + if len(batches) > 0 { + provisionedInstances, err := r.ip.Provision(ctx, r.cfg, batches) + if err != nil { + return Result{}, err + } + for _, i := range provisionedInstances { + instances = append(instances, StateInstance{ + Instance: i, + Prepared: false, + }) + } + } + + var results map[string]common.OSRunnerResult + switch r.ip.Type() { + case common.ProvisionerTypeVM: + // use SSH to perform all the required work on the instances + results, err = r.runInstances(ctx, sshAuth, repoArchive, instances) + if err != nil { + return Result{}, err + } + case common.ProvisionerTypeK8SCluster: + results, err = r.runK8sInstances(ctx, instances) + if err != nil { + return Result{}, err + } + + default: + return Result{}, fmt.Errorf("invalid provisioner type %d", r.ip.Type()) + } + + // merge the results + return r.mergeResults(results) +} + +// Clean performs a cleanup to ensure anything that could have been left running is removed. +func (r *Runner) Clean() error { + r.stateMx.Lock() + defer r.stateMx.Unlock() + + var instances []common.Instance + for _, i := range r.state.Instances { + instances = append(instances, i.Instance) + } + r.state.Instances = nil + stacks := make([]common.Stack, len(r.state.Stacks)) + copy(stacks, r.state.Stacks) + r.state.Stacks = nil + err := r.writeState() + if err != nil { + return err + } + + var g errgroup.Group + g.Go(func() error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + return r.ip.Clean(ctx, r.cfg, instances) + }) + for _, stack := range stacks { + g.Go(func(stack common.Stack) func() error { + return func() error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + return r.sp.Delete(ctx, stack) + } + }(stack)) + } + return g.Wait() +} + +func (r *Runner) runK8sInstances(ctx context.Context, instances []StateInstance) (map[string]common.OSRunnerResult, error) { + results := make(map[string]common.OSRunnerResult) + var resultsMx sync.Mutex + var err error + for _, instance := range instances { + batch, ok := findBatchByID(instance.ID, r.batches) + if !ok { + err = fmt.Errorf("unable to find batch with ID: %s", instance.ID) + continue + } + + logger := &batchLogger{wrapped: r.logger, prefix: instance.ID} + // start with the ExtraEnv first preventing the other environment flags below + // from being overwritten + env := map[string]string{} + for k, v := range r.cfg.ExtraEnv { + env[k] = v + } + + // ensure that we have all the requirements for the stack if required + if batch.Batch.Stack != nil { + // wait for the stack to be ready before continuing + logger.Logf("Waiting for stack to be ready...") + stack, stackErr := r.getStackForBatchID(batch.ID) + if stackErr != nil { + err = stackErr + continue + } + env["ELASTICSEARCH_HOST"] = stack.Elasticsearch + env["ELASTICSEARCH_USERNAME"] = stack.Username + env["ELASTICSEARCH_PASSWORD"] = stack.Password + env["KIBANA_HOST"] = stack.Kibana + env["KIBANA_USERNAME"] = stack.Username + env["KIBANA_PASSWORD"] = stack.Password + logger.Logf("Using Stack with Kibana host %s, credentials available under .integration-cache", stack.Kibana) + } + + // set the go test flags + env["GOTEST_FLAGS"] = r.cfg.TestFlags + env["KUBECONFIG"] = instance.Instance.Internal["config"].(string) + env["TEST_BINARY_NAME"] = r.cfg.BinaryName + env["K8S_VERSION"] = instance.Instance.Internal["version"].(string) + env["AGENT_IMAGE"] = instance.Instance.Internal["agent_image"].(string) + + prefix := fmt.Sprintf("%s-%s", instance.Instance.Internal["version"].(string), batch.ID) + + // run the actual tests on the host + result, runErr := batch.OS.Runner.Run(ctx, r.cfg.VerboseMode, nil, logger, r.cfg.AgentVersion, prefix, batch.Batch, env) + if runErr != nil { + logger.Logf("Failed to execute tests on instance: %s", err) + err = fmt.Errorf("failed to execute tests on instance %s: %w", instance.Name, err) + } + resultsMx.Lock() + results[batch.ID] = result + resultsMx.Unlock() + } + if err != nil { + return nil, err + } + return results, nil +} + +// runInstances runs the batch on each instance in parallel. +func (r *Runner) runInstances(ctx context.Context, sshAuth ssh.AuthMethod, repoArchive string, instances []StateInstance) (map[string]common.OSRunnerResult, error) { + g, ctx := errgroup.WithContext(ctx) + results := make(map[string]common.OSRunnerResult) + var resultsMx sync.Mutex + for _, i := range instances { + func(i StateInstance) { + g.Go(func() error { + batch, ok := findBatchByID(i.ID, r.batches) + if !ok { + return fmt.Errorf("unable to find batch with ID: %s", i.ID) + } + logger := &batchLogger{wrapped: r.logger, prefix: i.ID} + result, err := r.runInstance(ctx, sshAuth, logger, repoArchive, batch, i) + if err != nil { + logger.Logf("Failed for instance %s (@ %s): %s\n", i.ID, i.IP, err) + return err + } + resultsMx.Lock() + results[batch.ID] = result + resultsMx.Unlock() + return nil + }) + }(i) + } + err := g.Wait() + if err != nil { + return nil, err + } + return results, nil +} + +// runInstance runs the batch on the machine. +func (r *Runner) runInstance(ctx context.Context, sshAuth ssh.AuthMethod, logger common.Logger, repoArchive string, batch common.OSBatch, instance StateInstance) (common.OSRunnerResult, error) { + sshPrivateKeyPath, err := filepath.Abs(filepath.Join(r.cfg.StateDir, "id_rsa")) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to determine OGC SSH private key path: %w", err) + } + + logger.Logf("Starting SSH; connect with `ssh -i %s %s@%s`", sshPrivateKeyPath, instance.Username, instance.IP) + client := tssh.NewClient(instance.IP, instance.Username, sshAuth, logger) + connectCtx, connectCancel := context.WithTimeout(ctx, 10*time.Minute) + defer connectCancel() + err = client.Connect(connectCtx) + if err != nil { + logger.Logf("Failed to connect to instance %s: %s", instance.IP, err) + return common.OSRunnerResult{}, fmt.Errorf("failed to connect to instance %s: %w", instance.Name, err) + } + defer client.Close() + logger.Logf("Connected over SSH") + + if !instance.Prepared { + // prepare the host to run the tests + logger.Logf("Preparing instance") + err = batch.OS.Runner.Prepare(ctx, client, logger, batch.OS.Arch, r.cfg.GOVersion) + if err != nil { + logger.Logf("Failed to prepare instance: %s", err) + return common.OSRunnerResult{}, fmt.Errorf("failed to prepare instance %s: %w", instance.Name, err) + } + + // now its prepared, add to state + instance.Prepared = true + err = r.addOrUpdateInstance(instance) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to save instance state %s: %w", instance.Name, err) + } + } + + // copy the required files (done every run) + err = batch.OS.Runner.Copy(ctx, client, logger, repoArchive, r.getBuilds(batch)) + if err != nil { + logger.Logf("Failed to copy files instance: %s", err) + return common.OSRunnerResult{}, fmt.Errorf("failed to copy files to instance %s: %w", instance.Name, err) + } + // start with the ExtraEnv first preventing the other environment flags below + // from being overwritten + env := map[string]string{} + for k, v := range r.cfg.ExtraEnv { + env[k] = v + } + + // ensure that we have all the requirements for the stack if required + if batch.Batch.Stack != nil { + // wait for the stack to be ready before continuing + logger.Logf("Waiting for stack to be ready...") + stack, err := r.getStackForBatchID(batch.ID) + if err != nil { + return common.OSRunnerResult{}, err + } + env["ELASTICSEARCH_HOST"] = stack.Elasticsearch + env["ELASTICSEARCH_USERNAME"] = stack.Username + env["ELASTICSEARCH_PASSWORD"] = stack.Password + env["KIBANA_HOST"] = stack.Kibana + env["KIBANA_USERNAME"] = stack.Username + env["KIBANA_PASSWORD"] = stack.Password + logger.Logf("Using Stack with Kibana host %s, credentials available under .integration-cache", stack.Kibana) + } + + // set the go test flags + env["GOTEST_FLAGS"] = r.cfg.TestFlags + env["TEST_BINARY_NAME"] = r.cfg.BinaryName + + // run the actual tests on the host + result, err := batch.OS.Runner.Run(ctx, r.cfg.VerboseMode, client, logger, r.cfg.AgentVersion, batch.ID, batch.Batch, env) + if err != nil { + logger.Logf("Failed to execute tests on instance: %s", err) + return common.OSRunnerResult{}, fmt.Errorf("failed to execute tests on instance %s: %w", instance.Name, err) + } + + // fetch any diagnostics + if r.cfg.DiagnosticsDir != "" { + err = batch.OS.Runner.Diagnostics(ctx, client, logger, r.cfg.DiagnosticsDir) + if err != nil { + logger.Logf("Failed to fetch diagnostics: %s", err) + } + } else { + logger.Logf("Skipping diagnostics fetch as DiagnosticsDir was not set") + } + + return result, nil +} + +// validate ensures that required builds of Elastic Agent exist +func (r *Runner) validate() error { + var requiredFiles []string + for _, b := range r.batches { + if !b.Skip { + for _, build := range r.getBuilds(b) { + if !slices.Contains(requiredFiles, build.Path) { + requiredFiles = append(requiredFiles, build.Path) + } + if !slices.Contains(requiredFiles, build.SHA512Path) { + requiredFiles = append(requiredFiles, build.SHA512Path) + } + } + } + } + var missingFiles []string + for _, file := range requiredFiles { + _, err := os.Stat(file) + if os.IsNotExist(err) { + missingFiles = append(missingFiles, file) + } else if err != nil { + return err + } + } + if len(missingFiles) > 0 { + return fmt.Errorf("missing required Elastic Agent package builds for integration runner to execute: %s", strings.Join(missingFiles, ", ")) + } + return nil +} + +// getBuilds returns the build for the batch. +func (r *Runner) getBuilds(b common.OSBatch) []common.Build { + var builds []common.Build + formats := []string{"targz", "zip", "rpm", "deb"} + binaryName := "elastic-agent" + + var packages []string + for _, p := range r.cfg.Packages { + if slices.Contains(formats, p) { + packages = append(packages, p) + } + } + if len(packages) == 0 { + packages = formats + } + + // This is for testing beats in serverless environment + if strings.HasSuffix(r.cfg.BinaryName, "beat") { + var serverlessPackages []string + for _, p := range packages { + if slices.Contains([]string{"targz", "zip"}, p) { + serverlessPackages = append(serverlessPackages, p) + } + } + packages = serverlessPackages + } + + if r.cfg.BinaryName != "" { + binaryName = r.cfg.BinaryName + } + + for _, f := range packages { + arch := b.OS.Arch + if arch == define.AMD64 { + arch = "x86_64" + } + suffix, err := testing.GetPackageSuffix(b.OS.Type, b.OS.Arch, f) + if err != nil { + // Means that OS type & Arch doesn't support that package format + continue + } + packageName := filepath.Join(r.cfg.BuildDir, fmt.Sprintf("%s-%s-%s", binaryName, r.cfg.AgentVersion, suffix)) + build := common.Build{ + Version: r.cfg.ReleaseVersion, + Type: b.OS.Type, + Arch: arch, + Path: packageName, + SHA512Path: packageName + ".sha512", + } + + builds = append(builds, build) + } + return builds +} + +// prepare prepares for the runner to run. +// +// Creates the SSH keys to use, creates the archive of the repo and pulls the latest container for OGC. +func (r *Runner) prepare(ctx context.Context) (ssh.AuthMethod, string, error) { + wd, err := WorkDir() + if err != nil { + return nil, "", err + } + cacheDir := filepath.Join(wd, r.cfg.StateDir) + _, err = os.Stat(cacheDir) + if errors.Is(err, os.ErrNotExist) { + err = os.Mkdir(cacheDir, 0755) + if err != nil { + return nil, "", fmt.Errorf("failed to create %q: %w", cacheDir, err) + } + } else if err != nil { + // unknown error + return nil, "", err + } + + var auth ssh.AuthMethod + var repoArchive string + g, gCtx := errgroup.WithContext(ctx) + g.Go(func() error { + a, err := r.createSSHKey(cacheDir) + if err != nil { + return err + } + auth = a + return nil + }) + g.Go(func() error { + repo, err := r.createRepoArchive(gCtx, r.cfg.RepoDir, cacheDir) + if err != nil { + return err + } + repoArchive = repo + return nil + }) + err = g.Wait() + if err != nil { + return nil, "", err + } + return auth, repoArchive, err +} + +// createSSHKey creates the required SSH keys +func (r *Runner) createSSHKey(dir string) (ssh.AuthMethod, error) { + privateKey := filepath.Join(dir, "id_rsa") + _, priErr := os.Stat(privateKey) + publicKey := filepath.Join(dir, "id_rsa.pub") + _, pubErr := os.Stat(publicKey) + var signer ssh.Signer + if errors.Is(priErr, os.ErrNotExist) || errors.Is(pubErr, os.ErrNotExist) { + // either is missing (re-create) + r.logger.Logf("Create SSH keys to use for SSH") + _ = os.Remove(privateKey) + _ = os.Remove(publicKey) + pri, err := tssh.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("failed to create ssh private key: %w", err) + } + pubBytes, err := tssh.NewPublicKey(&pri.PublicKey) + if err != nil { + return nil, fmt.Errorf("failed to create ssh public key: %w", err) + } + priBytes := tssh.EncodeToPEM(pri) + err = os.WriteFile(privateKey, priBytes, 0600) + if err != nil { + return nil, fmt.Errorf("failed to write ssh private key: %w", err) + } + err = os.WriteFile(publicKey, pubBytes, 0644) + if err != nil { + return nil, fmt.Errorf("failed to write ssh public key: %w", err) + } + signer, err = ssh.ParsePrivateKey(priBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse ssh private key: %w", err) + } + } else if priErr != nil { + // unknown error + return nil, priErr + } else if pubErr != nil { + // unknown error + return nil, pubErr + } else { + // read from existing private key + priBytes, err := os.ReadFile(privateKey) + if err != nil { + return nil, fmt.Errorf("failed to read ssh private key %s: %w", privateKey, err) + } + signer, err = ssh.ParsePrivateKey(priBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse ssh private key: %w", err) + } + } + return ssh.PublicKeys(signer), nil +} + +func (r *Runner) createRepoArchive(ctx context.Context, repoDir string, dir string) (string, error) { + zipPath := filepath.Join(dir, "agent-repo.zip") + _ = os.Remove(zipPath) // start fresh + r.logger.Logf("Creating zip archive of repo to send to remote hosts") + err := createRepoZipArchive(ctx, repoDir, zipPath) + if err != nil { + return "", fmt.Errorf("failed to create zip archive of repo: %w", err) + } + return zipPath, nil +} + +// startStacks starts the stacks required for the tests to run +func (r *Runner) startStacks(ctx context.Context) error { + var versions []string + batchToVersion := make(map[string]string) + for _, lb := range r.batches { + if !lb.Skip && lb.Batch.Stack != nil { + if lb.Batch.Stack.Version == "" { + // no version defined on the stack; set it to the defined stack version + lb.Batch.Stack.Version = r.cfg.StackVersion + } + if !slices.Contains(versions, lb.Batch.Stack.Version) { + versions = append(versions, lb.Batch.Stack.Version) + } + batchToVersion[lb.ID] = lb.Batch.Stack.Version + } + } + + var requests []stackReq + for _, version := range versions { + id := strings.Replace(version, ".", "", -1) + requests = append(requests, stackReq{ + request: common.StackRequest{ID: id, Version: version}, + stack: r.findStack(id), + }) + } + + reportResult := func(version string, stack common.Stack, err error) { + r.batchToStackMx.Lock() + defer r.batchToStackMx.Unlock() + res := stackRes{ + stack: stack, + err: err, + } + for batchID, batchVersion := range batchToVersion { + if batchVersion == version { + r.batchToStack[batchID] = res + ch, ok := r.batchToStackCh[batchID] + if ok { + ch <- res + } + } + } + } + + // start goroutines to provision the needed stacks + for _, request := range requests { + go func(ctx context.Context, req stackReq) { + var err error + var stack common.Stack + if req.stack != nil { + stack = *req.stack + } else { + stack, err = r.sp.Create(ctx, req.request) + if err != nil { + reportResult(req.request.Version, stack, err) + return + } + err = r.addOrUpdateStack(stack) + if err != nil { + reportResult(stack.Version, stack, err) + return + } + } + + if stack.Ready { + reportResult(stack.Version, stack, nil) + return + } + + stack, err = r.sp.WaitForReady(ctx, stack) + if err != nil { + reportResult(stack.Version, stack, err) + return + } + + err = r.addOrUpdateStack(stack) + if err != nil { + reportResult(stack.Version, stack, err) + return + } + + reportResult(stack.Version, stack, nil) + }(ctx, request) + } + + return nil +} + +func (r *Runner) getStackForBatchID(id string) (common.Stack, error) { + r.batchToStackMx.Lock() + res, ok := r.batchToStack[id] + if ok { + r.batchToStackMx.Unlock() + return res.stack, res.err + } + _, ok = r.batchToStackCh[id] + if ok { + return common.Stack{}, fmt.Errorf("getStackForBatchID called twice; this is not allowed") + } + ch := make(chan stackRes, 1) + r.batchToStackCh[id] = ch + r.batchToStackMx.Unlock() + + // 12 minutes is because the stack should have been ready after 10 minutes or returned an error + // this only exists to ensure that if that code is not blocking that this doesn't block forever + t := time.NewTimer(12 * time.Minute) + defer t.Stop() + select { + case <-t.C: + return common.Stack{}, fmt.Errorf("failed waiting for a response after 12 minutes") + case res = <-ch: + return res.stack, res.err + } +} + +func (r *Runner) findInstance(id string) (StateInstance, bool) { + r.stateMx.Lock() + defer r.stateMx.Unlock() + for _, existing := range r.state.Instances { + if existing.Same(StateInstance{ + Instance: common.Instance{ID: id, Provisioner: r.ip.Name()}}) { + return existing, true + } + } + return StateInstance{}, false +} + +func (r *Runner) addOrUpdateInstance(instance StateInstance) error { + r.stateMx.Lock() + defer r.stateMx.Unlock() + + state := r.state + found := false + for idx, existing := range state.Instances { + if existing.Same(instance) { + state.Instances[idx] = instance + found = true + break + } + } + if !found { + state.Instances = append(state.Instances, instance) + } + r.state = state + return r.writeState() +} + +func (r *Runner) findStack(id string) *common.Stack { + r.stateMx.Lock() + defer r.stateMx.Unlock() + for _, existing := range r.state.Stacks { + if existing.Same(common.Stack{ID: id, Provisioner: r.sp.Name()}) { + return &existing + } + } + return nil +} + +func (r *Runner) addOrUpdateStack(stack common.Stack) error { + r.stateMx.Lock() + defer r.stateMx.Unlock() + + state := r.state + found := false + for idx, existing := range state.Stacks { + if existing.Same(stack) { + state.Stacks[idx] = stack + found = true + break + } + } + if !found { + state.Stacks = append(state.Stacks, stack) + } + r.state = state + return r.writeState() +} + +func (r *Runner) loadState() error { + data, err := os.ReadFile(r.getStatePath()) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to read state file %s: %w", r.getStatePath(), err) + } + var state State + err = yaml.Unmarshal(data, &state) + if err != nil { + return fmt.Errorf("failed unmarshal state file %s: %w", r.getStatePath(), err) + } + r.state = state + return nil +} + +func (r *Runner) writeState() error { + data, err := yaml.Marshal(&r.state) + if err != nil { + return fmt.Errorf("failed to marshal state: %w", err) + } + err = os.WriteFile(r.getStatePath(), data, 0644) + if err != nil { + return fmt.Errorf("failed to write state file %s: %w", r.getStatePath(), err) + } + return nil +} + +func (r *Runner) getStatePath() string { + return filepath.Join(r.cfg.StateDir, "state.yml") +} + +func (r *Runner) mergeResults(results map[string]common.OSRunnerResult) (Result, error) { + var rawOutput bytes.Buffer + var jsonOutput bytes.Buffer + var suites JUnitTestSuites + for id, res := range results { + for _, pkg := range res.Packages { + err := mergePackageResult(pkg, id, false, &rawOutput, &jsonOutput, &suites) + if err != nil { + return Result{}, err + } + } + for _, pkg := range res.SudoPackages { + err := mergePackageResult(pkg, id, true, &rawOutput, &jsonOutput, &suites) + if err != nil { + return Result{}, err + } + } + } + var junitBytes bytes.Buffer + err := writeJUnit(&junitBytes, suites) + if err != nil { + return Result{}, fmt.Errorf("failed to marshal junit: %w", err) + } + + var complete Result + for _, suite := range suites.Suites { + complete.Tests += suite.Tests + complete.Failures += suite.Failures + } + complete.Output = rawOutput.Bytes() + complete.JSONOutput = jsonOutput.Bytes() + complete.XMLOutput = junitBytes.Bytes() + return complete, nil +} + +// Same returns true if other is the same instance as this one. +// Two instances are considered the same if their provider and ID are the same. +func (s StateInstance) Same(other StateInstance) bool { + return s.Provisioner == other.Provisioner && + s.ID == other.ID +} + +func mergePackageResult(pkg common.OSRunnerPackageResult, batchName string, sudo bool, rawOutput io.Writer, jsonOutput io.Writer, suites *JUnitTestSuites) error { + suffix := "" + sudoStr := "false" + if sudo { + suffix = "(sudo)" + sudoStr = "true" + } + if pkg.Output != nil { + rawLogger := &runnerLogger{writer: rawOutput, timestamp: false} + pkgWriter := common.NewPrefixOutput(rawLogger, fmt.Sprintf("%s(%s)%s: ", pkg.Name, batchName, suffix)) + _, err := pkgWriter.Write(pkg.Output) + if err != nil { + return fmt.Errorf("failed to write raw output from %s %s: %w", batchName, pkg.Name, err) + } + } + if pkg.JSONOutput != nil { + jsonSuffix, err := suffixJSONResults(pkg.JSONOutput, fmt.Sprintf("(%s)%s", batchName, suffix)) + if err != nil { + return fmt.Errorf("failed to suffix json output from %s %s: %w", batchName, pkg.Name, err) + } + _, err = jsonOutput.Write(jsonSuffix) + if err != nil { + return fmt.Errorf("failed to write json output from %s %s: %w", batchName, pkg.Name, err) + } + } + if pkg.XMLOutput != nil { + pkgSuites, err := parseJUnit(pkg.XMLOutput) + if err != nil { + return fmt.Errorf("failed to parse junit from %s %s: %w", batchName, pkg.Name, err) + } + for _, pkgSuite := range pkgSuites.Suites { + // append the batch information to the suite name + pkgSuite.Name = fmt.Sprintf("%s(%s)%s", pkgSuite.Name, batchName, suffix) + pkgSuite.Properties = append(pkgSuite.Properties, JUnitProperty{ + Name: "batch", + Value: batchName, + }, JUnitProperty{ + Name: "sudo", + Value: sudoStr, + }) + suites.Suites = append(suites.Suites, pkgSuite) + } + } + return nil +} + +func findBatchByID(id string, batches []common.OSBatch) (common.OSBatch, bool) { + for _, batch := range batches { + if batch.ID == id { + return batch, true + } + } + return common.OSBatch{}, false +} + +type runnerLogger struct { + writer io.Writer + timestamp bool +} + +func (l *runnerLogger) Logf(format string, args ...any) { + if l.timestamp { + _, _ = fmt.Fprintf(l.writer, "[%s] >>> %s\n", time.Now().Format(time.StampMilli), fmt.Sprintf(format, args...)) + } else { + _, _ = fmt.Fprintf(l.writer, ">>> %s\n", fmt.Sprintf(format, args...)) + } +} + +type batchLogger struct { + wrapped common.Logger + prefix string +} + +func filterSupportedOS(batches []common.OSBatch, provisioner common.InstanceProvisioner) []common.OSBatch { + var filtered []common.OSBatch + for _, batch := range batches { + if ok := provisioner.Supported(batch.OS.OS); ok { + filtered = append(filtered, batch) + } + } + return filtered +} + +func (b *batchLogger) Logf(format string, args ...any) { + b.wrapped.Logf("(%s) %s", b.prefix, fmt.Sprintf(format, args...)) +} + +type stackRes struct { + stack common.Stack + err error +} + +type stackReq struct { + request common.StackRequest + stack *common.Stack +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/utils.go b/dev-tools/mage/target/srvrlesstest/testing/runner/utils.go new file mode 100644 index 00000000000..26a1c0bd7bf --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/utils.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package runner + +import ( + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/core/process" + "io" + "os" + "os/exec" + "path/filepath" +) + +// WorkDir returns the current absolute working directory. +func WorkDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("failed to get work directory: %w", err) + } + wd, err = filepath.Abs(wd) + if err != nil { + return "", fmt.Errorf("failed to get absolute path to work directory: %w", err) + } + return wd, nil +} + +func AttachOut(w io.Writer) process.CmdOption { + return func(c *exec.Cmd) error { + c.Stdout = w + return nil + } +} + +func AttachErr(w io.Writer) process.CmdOption { + return func(c *exec.Cmd) error { + c.Stderr = w + return nil + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/client.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/client.go new file mode 100644 index 00000000000..fac1ad05195 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/client.go @@ -0,0 +1,301 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ssh + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "os" + "strings" + "time" + + "golang.org/x/crypto/ssh" +) + +type logger interface { + // Logf logs the message for this runner. + Logf(format string, args ...any) +} + +type sshClient struct { + ip string + username string + auth ssh.AuthMethod + logger logger + c *ssh.Client +} + +// NewClient creates a new SSH client connection to the host. +func NewClient(ip string, username string, sshAuth ssh.AuthMethod, logger logger) SSHClient { + return &sshClient{ + ip: ip, + username: username, + auth: sshAuth, + logger: logger, + } +} + +// Connect connects to the host. +func (s *sshClient) Connect(ctx context.Context) error { + var lastErr error + config := &ssh.ClientConfig{ + User: s.username, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), //nolint:gosec // it's the tests framework test + Auth: []ssh.AuthMethod{s.auth}, + Timeout: 30 * time.Second, + } + addr := net.JoinHostPort(s.ip, "22") + + tcpAddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + return fmt.Errorf("unable to resolve ssh address %q :%w", addr, err) + } + delay := 1 * time.Second + for { + if ctx.Err() != nil { + if lastErr == nil { + return ctx.Err() + } + return lastErr + } + if lastErr != nil { + s.logger.Logf("ssh connect error: %q, will try again in %s", lastErr, delay) + time.Sleep(delay) + delay = 2 * delay + + } + conn, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + lastErr = fmt.Errorf("error dialing tcp address %q :%w", addr, err) + continue + } + err = conn.SetKeepAlive(true) + if err != nil { + _ = conn.Close() + lastErr = fmt.Errorf("error setting TCP keepalive for ssh to %q :%w", addr, err) + continue + } + err = conn.SetKeepAlivePeriod(config.Timeout) + if err != nil { + _ = conn.Close() + lastErr = fmt.Errorf("error setting TCP keepalive period for ssh to %q :%w", addr, err) + continue + } + sshConn, chans, reqs, err := ssh.NewClientConn(conn, addr, config) + if err != nil { + _ = conn.Close() + lastErr = fmt.Errorf("error NewClientConn for ssh to %q :%w", addr, err) + continue + } + s.c = ssh.NewClient(sshConn, chans, reqs) + return nil + } +} + +// ConnectWithTimeout connects to the host with a timeout. +func (s *sshClient) ConnectWithTimeout(ctx context.Context, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return s.Connect(ctx) +} + +// Close closes the client. +func (s *sshClient) Close() error { + if s.c != nil { + err := s.c.Close() + s.c = nil + return err + } + return nil +} + +// Reconnect disconnects and reconnected to the host. +func (s *sshClient) Reconnect(ctx context.Context) error { + _ = s.Close() + return s.Connect(ctx) +} + +// ReconnectWithTimeout disconnects and reconnected to the host with a timeout. +func (s *sshClient) ReconnectWithTimeout(ctx context.Context, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return s.Reconnect(ctx) +} + +// NewSession opens a new Session for this host. +func (s *sshClient) NewSession() (*ssh.Session, error) { + return s.c.NewSession() +} + +// Exec runs a command on the host. +func (s *sshClient) Exec(ctx context.Context, cmd string, args []string, stdin io.Reader) ([]byte, []byte, error) { + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } + + var session *ssh.Session + cmdArgs := []string{cmd} + cmdArgs = append(cmdArgs, args...) + cmdStr := strings.Join(cmdArgs, " ") + session, err := s.NewSession() + if err != nil { + s.logger.Logf("new session failed: %q, trying reconnect", err) + lErr := s.Reconnect(ctx) + if lErr != nil { + return nil, nil, fmt.Errorf("ssh reconnect failed: %w, after new session failed: %w", lErr, err) + } + session, lErr = s.NewSession() + if lErr != nil { + return nil, nil, fmt.Errorf("new session failed after reconnect: %w, original new session failure was: %w", lErr, err) + } + } + defer session.Close() + + var stdout bytes.Buffer + var stderr bytes.Buffer + session.Stdout = &stdout + session.Stderr = &stderr + if stdin != nil { + session.Stdin = stdin + } + err = session.Run(cmdStr) + if err != nil { + return stdout.Bytes(), stderr.Bytes(), fmt.Errorf("could not run %q though SSH: %w", + cmdStr, err) + } + return stdout.Bytes(), stderr.Bytes(), err +} + +// ExecWithRetry runs the command on loop waiting the interval between calls +func (s *sshClient) ExecWithRetry(ctx context.Context, cmd string, args []string, interval time.Duration) ([]byte, []byte, error) { + var lastErr error + var lastStdout []byte + var lastStderr []byte + for { + // the length of time for running the command is not blocked on the interval + // don't create a new context with the interval as its timeout + stdout, stderr, err := s.Exec(ctx, cmd, args, nil) + if err == nil { + return stdout, stderr, nil + } + s.logger.Logf("ssh exec error: %q, will try again in %s", err, interval) + lastErr = err + lastStdout = stdout + lastStderr = stderr + + // wait for the interval or ctx to be cancelled + select { + case <-ctx.Done(): + if lastErr != nil { + return lastStdout, lastStderr, lastErr + } + return nil, nil, ctx.Err() + case <-time.After(interval): + } + } +} + +// Copy copies the filePath to the host at dest. +func (s *sshClient) Copy(filePath string, dest string) error { + f, err := os.Open(filePath) + if err != nil { + return err + } + defer f.Close() + fs, err := f.Stat() + if err != nil { + return err + } + + session, err := s.NewSession() + if err != nil { + return err + } + defer session.Close() + + w, err := session.StdinPipe() + if err != nil { + return err + } + + cmd := fmt.Sprintf("scp -t %s", dest) + if err := session.Start(cmd); err != nil { + _ = w.Close() + return err + } + + errCh := make(chan error) + go func() { + errCh <- session.Wait() + }() + + _, err = fmt.Fprintf(w, "C%#o %d %s\n", fs.Mode().Perm(), fs.Size(), dest) + if err != nil { + _ = w.Close() + <-errCh + return err + } + _, err = io.Copy(w, f) + if err != nil { + _ = w.Close() + <-errCh + return err + } + _, _ = fmt.Fprint(w, "\x00") + _ = w.Close() + return <-errCh +} + +// GetFileContents returns the file content. +func (s *sshClient) GetFileContents(ctx context.Context, filename string, opts ...FileContentsOpt) ([]byte, error) { + var stdout bytes.Buffer + err := s.GetFileContentsOutput(ctx, filename, &stdout, opts...) + if err != nil { + return nil, err + } + return stdout.Bytes(), nil +} + +// GetFileContentsOutput returns the file content writing into output. +func (s *sshClient) GetFileContentsOutput(ctx context.Context, filename string, output io.Writer, opts ...FileContentsOpt) error { + if ctx.Err() != nil { + return ctx.Err() + } + + var fco fileContentsOpts + fco.command = "cat" + for _, opt := range opts { + opt(&fco) + } + + session, err := s.NewSession() + if err != nil { + return err + } + defer session.Close() + + session.Stdout = output + err = session.Run(fmt.Sprintf("%s %s", fco.command, filename)) + if err != nil { + return err + } + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/file.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/file.go new file mode 100644 index 00000000000..1fd6ffd9e33 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/file.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ssh + +type fileContentsOpts struct { + command string +} + +// FileContentsOpt provides an option to modify how fetching files from the remote host work. +type FileContentsOpt func(opts *fileContentsOpts) + +// WithContentFetchCommand changes the command to use for fetching the file contents. +func WithContentFetchCommand(command string) FileContentsOpt { + return func(opts *fileContentsOpts) { + opts.command = command + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/interface.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/interface.go new file mode 100644 index 00000000000..487f19a5c06 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/interface.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ssh + +import ( + "context" + "io" + "time" + + "golang.org/x/crypto/ssh" +) + +// SSHClient is a *ssh.Client that provides a nice interface to work with. +type SSHClient interface { + // Connect connects to the host. + Connect(ctx context.Context) error + + // ConnectWithTimeout connects to the host with a timeout. + ConnectWithTimeout(ctx context.Context, timeout time.Duration) error + + // Close closes the client. + Close() error + + // Reconnect disconnects and reconnected to the host. + Reconnect(ctx context.Context) error + + // ReconnectWithTimeout disconnects and reconnected to the host with a timeout. + ReconnectWithTimeout(ctx context.Context, timeout time.Duration) error + + // NewSession opens a new Session for this host. + NewSession() (*ssh.Session, error) + + // Exec runs a command on the host. + Exec(ctx context.Context, cmd string, args []string, stdin io.Reader) ([]byte, []byte, error) + + // ExecWithRetry runs the command on loop waiting the interval between calls + ExecWithRetry(ctx context.Context, cmd string, args []string, interval time.Duration) ([]byte, []byte, error) + + // Copy copies the filePath to the host at dest. + Copy(filePath string, dest string) error + + // GetFileContents returns the file content. + GetFileContents(ctx context.Context, filename string, opts ...FileContentsOpt) ([]byte, error) + + // GetFileContentsOutput returns the file content writing to output. + GetFileContentsOutput(ctx context.Context, filename string, output io.Writer, opts ...FileContentsOpt) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/keys.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/keys.go new file mode 100644 index 00000000000..6d40a31fea1 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/keys.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ssh + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + + "golang.org/x/crypto/ssh" +) + +// NewPrivateKey creates RSA private key +func NewPrivateKey() (*rsa.PrivateKey, error) { + pk, err := rsa.GenerateKey(rand.Reader, 2056) + if err != nil { + return nil, err + } + err = pk.Validate() + if err != nil { + return nil, err + } + return pk, nil +} + +// EncodeToPEM encodes private key to PEM format +func EncodeToPEM(privateKey *rsa.PrivateKey) []byte { + der := x509.MarshalPKCS1PrivateKey(privateKey) + privBlock := pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: der, + } + return pem.EncodeToMemory(&privBlock) +} + +// NewPublicKey returns bytes for writing to .pub file +func NewPublicKey(pk *rsa.PublicKey) ([]byte, error) { + pub, err := ssh.NewPublicKey(pk) + if err != nil { + return nil, err + } + return ssh.MarshalAuthorizedKey(pub), nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/supported/batch.go b/dev-tools/mage/target/srvrlesstest/testing/supported/batch.go new file mode 100644 index 00000000000..5138bbaf225 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/supported/batch.go @@ -0,0 +1,195 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package supported + +import ( + "crypto/sha512" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "strings" + "unicode/utf8" +) + +// CreateBatches creates the OSBatch set based on the defined supported OS's. +func CreateBatches(batches []define.Batch, platforms []define.OS, groups []string, matrix bool, singleTest string) ([]common.OSBatch, error) { + var err error + var osBatches []common.OSBatch + for _, b := range batches { + lbs, err := createBatchesFromBatch(b, platforms, groups, matrix) + if err != nil { + return nil, err + } + if lbs != nil { + osBatches = append(osBatches, lbs...) + } + } + if singleTest != "" { + osBatches, err = filterSingleTest(osBatches, singleTest) + if err != nil { + return nil, err + } + } + + return osBatches, nil +} + +func createBatchesFromBatch(batch define.Batch, platforms []define.OS, groups []string, matrix bool) ([]common.OSBatch, error) { + var batches []common.OSBatch + if len(groups) > 0 && !batchInGroups(batch, groups) { + return nil, nil + } + specifics, err := getSupported(batch.OS, platforms) + if errors.Is(err, ErrOSNotSupported) { + var s common.SupportedOS + s.OS.Type = batch.OS.Type + s.OS.Arch = batch.OS.Arch + s.OS.Distro = batch.OS.Distro + if s.OS.Distro == "" { + s.OS.Distro = "unknown" + } + if s.OS.Version == "" { + s.OS.Version = "unknown" + } + b := common.OSBatch{ + OS: s, + Batch: batch, + Skip: true, + } + b.ID = createBatchID(b) + batches = append(batches, b) + return batches, nil + } else if err != nil { + return nil, err + } + if matrix { + for _, s := range specifics { + b := common.OSBatch{ + OS: s, + Batch: batch, + Skip: false, + } + b.ID = createBatchID(b) + batches = append(batches, b) + } + } else { + b := common.OSBatch{ + OS: specifics[0], + Batch: batch, + Skip: false, + } + b.ID = createBatchID(b) + batches = append(batches, b) + } + return batches, nil +} + +func batchInGroups(batch define.Batch, groups []string) bool { + for _, g := range groups { + if batch.Group == g { + return true + } + } + return false +} + +func filterSingleTest(batches []common.OSBatch, singleTest string) ([]common.OSBatch, error) { + var filtered []common.OSBatch + for _, batch := range batches { + batch, ok := filterSingleTestBatch(batch, singleTest) + if ok { + filtered = append(filtered, batch) + } + } + if len(filtered) == 0 { + return nil, fmt.Errorf("test not found: %s", singleTest) + } + return filtered, nil +} + +func filterSingleTestBatch(batch common.OSBatch, testName string) (common.OSBatch, bool) { + for _, pt := range batch.Batch.Tests { + for _, t := range pt.Tests { + if t.Name == testName { + // filter batch to only run one test + batch.Batch.Tests = []define.BatchPackageTests{ + { + Name: pt.Name, + Tests: []define.BatchPackageTest{t}, + }, + } + batch.Batch.SudoTests = nil + // remove stack requirement when the test doesn't need a stack + if !t.Stack { + batch.Batch.Stack = nil + } + return batch, true + } + } + } + for _, pt := range batch.Batch.SudoTests { + for _, t := range pt.Tests { + if t.Name == testName { + // filter batch to only run one test + batch.Batch.SudoTests = []define.BatchPackageTests{ + { + Name: pt.Name, + Tests: []define.BatchPackageTest{t}, + }, + } + batch.Batch.Tests = nil + // remove stack requirement when the test doesn't need a stack + if !t.Stack { + batch.Batch.Stack = nil + } + return batch, true + } + } + } + return batch, false +} + +// createBatchID creates a consistent/unique ID for the batch +// +// ID needs to be consistent so each execution of the runner always +// selects the same ID for each batch. +func createBatchID(batch common.OSBatch) string { + id := batch.OS.Type + "-" + batch.OS.Arch + if batch.OS.Type == define.Linux { + id += "-" + batch.OS.Distro + } + if batch.OS.Version != "" { + id += "-" + strings.Replace(batch.OS.Version, ".", "", -1) + } + if batch.OS.Type == define.Kubernetes && batch.OS.DockerVariant != "" { + id += "-" + batch.OS.DockerVariant + } + id += "-" + strings.Replace(batch.Batch.Group, ".", "", -1) + + // The batchID needs to be at most 63 characters long otherwise + // OGC will fail to instantiate the VM. + maxIDLen := 63 + if len(id) > maxIDLen { + hash := fmt.Sprintf("%x", sha512.Sum384([]byte(id))) + hashLen := utf8.RuneCountInString(hash) + id = id[:maxIDLen-hashLen-1] + "-" + hash + } + + return strings.ToLower(id) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/supported/supported.go b/dev-tools/mage/target/srvrlesstest/testing/supported/supported.go new file mode 100644 index 00000000000..c9a0a15eab8 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/supported/supported.go @@ -0,0 +1,287 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package supported + +import ( + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/kubernetes" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/linux" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/windows" +) + +const ( + Rhel = "rhel" + // Ubuntu is a Linux distro. + Ubuntu = "ubuntu" +) + +var ( + // ErrOSNotSupported returned when it's an unsupported OS. + ErrOSNotSupported = errors.New("os/arch not currently supported") +) + +var ( + // UbuntuAMD64_2404 - Ubuntu (amd64) 24.04 + UbuntuAMD64_2404 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Ubuntu, + Version: "24.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuAMD64_2204 - Ubuntu (amd64) 22.04 + UbuntuAMD64_2204 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Ubuntu, + Version: "22.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuAMD64_2004 - Ubuntu (amd64) 20.04 + UbuntuAMD64_2004 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Ubuntu, + Version: "20.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuARM64_2404 - Ubuntu (arm64) 24.04 + UbuntuARM64_2404 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.ARM64, + Distro: Ubuntu, + Version: "24.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuARM64_2204 - Ubuntu (arm64) 22.04 + UbuntuARM64_2204 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.ARM64, + Distro: Ubuntu, + Version: "22.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuARM64_2004 - Ubuntu (arm64) 20.04 + UbuntuARM64_2004 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.ARM64, + Distro: Ubuntu, + Version: "20.04", + }, + Runner: linux.DebianRunner{}, + } + // RhelAMD64_8 - RedHat Enterprise Linux (amd64) 8 + RhelAMD64_8 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Rhel, + Version: "8", + }, + Runner: linux.RhelRunner{}, + } + // WindowsAMD64_2022 - Windows (amd64) Server 2022 + WindowsAMD64_2022 = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2022_Core - Windows (amd64) Server 2022 Core + WindowsAMD64_2022_Core = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022-core", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2019 - Windows (amd64) Server 2019 + WindowsAMD64_2019 = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2019_Core - Windows (amd64) Server 2019 Core + WindowsAMD64_2019_Core = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019-core", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2016 - Windows (amd64) Server 2016 + WindowsAMD64_2016 = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2016_Core - Windows (amd64) Server 2016 Core + WindowsAMD64_2016_Core = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016-core", + }, + Runner: windows.WindowsRunner{}, + } +) + +// supported defines the set of supported OS's. +// +// A provisioner might support a lesser number of this OS's, but the following +// are known to be supported by out OS runner logic. +// +// In the case that a batch is not specific on the version and/or distro the first +// one in this list will be picked. So it's best to place the one that we want the +// most testing at the top. +var supported = []common.SupportedOS{ + UbuntuAMD64_2404, + UbuntuAMD64_2204, + UbuntuAMD64_2004, + UbuntuARM64_2404, + UbuntuARM64_2204, + UbuntuARM64_2004, + RhelAMD64_8, + WindowsAMD64_2022, + WindowsAMD64_2022_Core, + WindowsAMD64_2019, + WindowsAMD64_2019_Core, + // https://github.com/elastic/ingest-dev/issues/3484 + // WindowsAMD64_2016, + // WindowsAMD64_2016_Core, +} + +// init injects the kubernetes support list into the support list above +func init() { + for _, k8sSupport := range kubernetes.GetSupported() { + supported = append(supported, common.SupportedOS{ + OS: k8sSupport, + Runner: kubernetes.Runner{}, + }) + } +} + +// osMatch returns true when the specific OS is a match for a non-specific OS. +func osMatch(specific define.OS, notSpecific define.OS) bool { + if specific.Type != notSpecific.Type || specific.Arch != notSpecific.Arch { + return false + } + if notSpecific.Distro != "" && specific.Distro != notSpecific.Distro { + return false + } + if notSpecific.Version != "" && specific.Version != notSpecific.Version { + return false + } + if notSpecific.DockerVariant != "" && specific.DockerVariant != notSpecific.DockerVariant { + return false + } + return true +} + +// getSupported returns all the supported based on the provided OS profile while using +// the provided platforms as a filter. +func getSupported(os define.OS, platforms []define.OS) ([]common.SupportedOS, error) { + var match []common.SupportedOS + for _, s := range supported { + if osMatch(s.OS, os) && allowedByPlatforms(s.OS, platforms) { + match = append(match, s) + } + } + if len(match) > 0 { + return match, nil + } + return nil, fmt.Errorf("%w: %s/%s", ErrOSNotSupported, os.Type, os.Arch) +} + +// allowedByPlatforms determines if the os is in the allowed list of platforms. +func allowedByPlatforms(os define.OS, platforms []define.OS) bool { + if len(platforms) == 0 { + return true + } + for _, platform := range platforms { + if ok := allowedByPlatform(os, platform); ok { + return true + } + } + return false +} + +// allowedByPlatform determines if the platform allows this os. +func allowedByPlatform(os define.OS, platform define.OS) bool { + if os.Type != platform.Type { + return false + } + if platform.Arch == "" { + // not specific on arch + return true + } + if os.Arch != platform.Arch { + return false + } + if platform.Type == define.Linux { + // on linux distro is supported + if platform.Distro == "" { + // not specific on distro + return true + } + if os.Distro != platform.Distro { + return false + } + } + if platform.Version == "" { + // not specific on version + return true + } + if os.Version != platform.Version { + return false + } + if platform.Type == define.Kubernetes { + // on kubernetes docker variant is supported + if platform.DockerVariant == "" { + return true + } + if os.DockerVariant != platform.DockerVariant { + return false + } + } + return true +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/windows/windows.go b/dev-tools/mage/target/srvrlesstest/testing/windows/windows.go new file mode 100644 index 00000000000..b554507f36f --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/windows/windows.go @@ -0,0 +1,342 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +// WindowsRunner is a handler for running tests on Windows +type WindowsRunner struct{} + +// Prepare the test +func (WindowsRunner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + // install chocolatey + logger.Logf("Installing chocolatey") + chocoInstall := `"[System.Net.ServicePointManager]::SecurityProtocol = 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))"` + updateCtx, updateCancel := context.WithTimeout(ctx, 3*time.Minute) + defer updateCancel() + stdOut, errOut, err := sshRunPowershell(updateCtx, sshClient, chocoInstall) + if err != nil { + return fmt.Errorf("failed to install chocolatey: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + // reconnect to get updated environment variables (1 minute as it should be quick to reconnect) + err = sshClient.ReconnectWithTimeout(ctx, 1*time.Minute) + if err != nil { + return fmt.Errorf("failed to reconnect: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // install curl + logger.Logf("Installing curl") + stdOut, errOut, err = sshClient.Exec(ctx, "choco", []string{"install", "-y", "curl"}, nil) + if err != nil { + return fmt.Errorf("failed to install curl: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + // install make + logger.Logf("Installing make") + stdOut, errOut, err = sshClient.Exec(ctx, "choco", []string{"install", "-y", "make"}, nil) + if err != nil { + return fmt.Errorf("failed to install make: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // install golang (doesn't use choco, because sometimes it doesn't have the required version) + logger.Logf("Installing golang %s (%s)", goVersion, arch) + downloadURL := fmt.Sprintf("https://go.dev/dl/go%s.windows-%s.msi", goVersion, arch) + filename := path.Base(downloadURL) + stdOut, errOut, err = sshClient.Exec(ctx, "curl", []string{"-Ls", downloadURL, "--output", filename}, nil) + if err != nil { + return fmt.Errorf("failed to download go from %s with curl: %w (stdout: %s, stderr: %s)", downloadURL, err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "msiexec", []string{"/i", filename, "/qn"}, nil) + if err != nil { + return fmt.Errorf("failed to install go: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + // reconnect to get updated environment variables (1 minute as it should be quick to reconnect) + err = sshClient.ReconnectWithTimeout(ctx, 1*time.Minute) + if err != nil { + return fmt.Errorf("failed to reconnect: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + return nil +} + +// Copy places the required files on the host. +func (WindowsRunner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + // copy the archive and extract it on the host (tar exists and can extract zip on windows) + logger.Logf("Copying repo") + destRepoName := filepath.Base(repoArchive) + err := sshClient.Copy(repoArchive, destRepoName) + if err != nil { + return fmt.Errorf("failed to SCP repo archive %s: %w", repoArchive, err) + } + + // ensure that agent directory is removed (possible it already exists if instance already used) + // Windows errors if the directory doesn't exist, it's okay if it doesn't ignore any error here + _, _, _ = sshClient.Exec(ctx, "rmdir", []string{"agent", "/s", "/q"}, nil) + + stdOut, errOut, err := sshClient.Exec(ctx, "mkdir", []string{"agent"}, nil) + if err != nil { + return fmt.Errorf("failed to mkdir agent: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "tar", []string{"-xf", destRepoName, "-C", "agent"}, nil) + if err != nil { + return fmt.Errorf("failed to unzip %s to agent directory: %w (stdout: %s, stderr: %s)", destRepoName, err, stdOut, errOut) + } + + // install mage and prepare for testing + logger.Logf("Running make mage and prepareOnRemote") + stdOut, errOut, err = sshClient.Exec(ctx, "cd", []string{"agent", "&&", "make", "mage", "&&", "mage", "integration:prepareOnRemote"}, nil) + if err != nil { + return fmt.Errorf("failed to to perform make mage and prepareOnRemote: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // determine if the build needs to be replaced on the host + // if it already exists and the SHA512 are the same contents, then + // there is no reason to waste time uploading the build + for _, build := range builds { + copyBuild := true + localSHA512, err := os.ReadFile(build.SHA512Path) + if err != nil { + return fmt.Errorf("failed to read local SHA52 contents %s: %w", build.SHA512Path, err) + } + hostSHA512Path := filepath.Base(build.SHA512Path) + hostSHA512, err := sshClient.GetFileContents(ctx, hostSHA512Path, ssh.WithContentFetchCommand("type")) + if err == nil { + if string(localSHA512) == string(hostSHA512) { + logger.Logf("Skipping copy agent build %s; already the same", filepath.Base(build.Path)) + copyBuild = false + } + } + + if copyBuild { + // ensure the existing copies are removed first + toRemove := filepath.Base(build.Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "del", []string{toRemove, "/f", "/q"}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + toRemove = filepath.Base(build.SHA512Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "del", []string{toRemove, "/f", "/q"}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + logger.Logf("Copying agent build %s", filepath.Base(build.Path)) + } + + for _, buildPath := range []string{build.Path, build.SHA512Path} { + if copyBuild { + err = sshClient.Copy(buildPath, filepath.Base(buildPath)) + if err != nil { + return fmt.Errorf("failed to SCP build %s: %w", filepath.Base(buildPath), err) + } + } + insideAgentDir := filepath.Join("agent", buildPath) + // possible the build path already exists, 'mkdir' on windows will fail if it already exists + // error from this call is ignored because of it + _, _, _ = sshClient.Exec(ctx, "mkdir", []string{toWindowsPath(filepath.Dir(insideAgentDir))}, nil) + stdOut, errOut, err = sshClient.Exec(ctx, "mklink", []string{"/h", toWindowsPath(insideAgentDir), filepath.Base(buildPath)}, nil) + if err != nil { + return fmt.Errorf("failed to hard link %s to %s: %w (stdout: %s, stderr: %s)", filepath.Base(buildPath), toWindowsPath(insideAgentDir), err, stdOut, errOut) + } + } + } + + return nil +} + +// Run the test +func (WindowsRunner) Run(ctx context.Context, verbose bool, c ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var tests []string + for _, pkg := range batch.Tests { + for _, test := range pkg.Tests { + tests = append(tests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + var sudoTests []string + for _, pkg := range batch.SudoTests { + for _, test := range pkg.Tests { + sudoTests = append(sudoTests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + + var result common.OSRunnerResult + if len(tests) > 0 { + script := toPowershellScript(agentVersion, prefix, verbose, tests, env) + + results, err := runTestsOnWindows(ctx, logger, "non-sudo", prefix, script, c, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running non-sudo tests: %w", err) + } + result.Packages = results + } + + if len(sudoTests) > 0 { + prefix := fmt.Sprintf("%s-sudo", prefix) + script := toPowershellScript(agentVersion, prefix, verbose, sudoTests, env) + + results, err := runTestsOnWindows(ctx, logger, "sudo", prefix, script, c, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running sudo tests: %w", err) + } + result.SudoPackages = results + + } + return result, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (WindowsRunner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + diagnosticDir := "agent\\build\\diagnostics" + stdOut, _, err := sshClient.Exec(ctx, "dir", []string{diagnosticDir, "/b"}, nil) + if err != nil { + //nolint:nilerr // failed to list the directory, probably don't have any diagnostics (do nothing) + return nil + } + eachDiagnostic := strings.Split(string(stdOut), "\n") + for _, filename := range eachDiagnostic { + filename = strings.TrimSpace(filename) + if filename == "" { + continue + } + + // don't use filepath.Join as we need this to work in Linux/Darwin as well + // this is because if we use `filepath.Join` on a Linux/Darwin host connected to a Windows host + // it will use a `/` and that will be incorrect for Windows + fp := fmt.Sprintf("%s\\%s", diagnosticDir, filename) + // use filepath.Join on this path because it's a path on this specific host platform + dp := filepath.Join(destination, filename) + logger.Logf("Copying diagnostic %s", filename) + out, err := os.Create(dp) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", dp, err) + } + err = sshClient.GetFileContentsOutput(ctx, fp, out, ssh.WithContentFetchCommand("type")) + _ = out.Close() + if err != nil { + return fmt.Errorf("failed to copy file from remote host to %s: %w", dp, err) + } + } + return nil +} + +func sshRunPowershell(ctx context.Context, sshClient ssh.SSHClient, cmd string) ([]byte, []byte, error) { + return sshClient.ExecWithRetry(ctx, "powershell", []string{ + "-NoProfile", + "-InputFormat", "None", + "-ExecutionPolicy", "Bypass", + "-Command", cmd, + }, 15*time.Second) +} + +func toPowershellScript(agentVersion string, prefix string, verbose bool, tests []string, env map[string]string) string { + var sb strings.Builder + for k, v := range env { + sb.WriteString("$env:") + sb.WriteString(k) + sb.WriteString("=\"") + sb.WriteString(v) + sb.WriteString("\"\n") + } + sb.WriteString("$env:AGENT_VERSION=\"") + sb.WriteString(agentVersion) + sb.WriteString("\"\n") + sb.WriteString("$env:TEST_DEFINE_PREFIX=\"") + sb.WriteString(prefix) + sb.WriteString("\"\n") + sb.WriteString("$env:TEST_DEFINE_TESTS=\"") + sb.WriteString(strings.Join(tests, ",")) + sb.WriteString("\"\n") + sb.WriteString("cd agent\n") + sb.WriteString("mage ") + if verbose { + sb.WriteString("-v ") + } + sb.WriteString("integration:testOnRemote\n") + return sb.String() +} + +func runTestsOnWindows(ctx context.Context, logger common.Logger, name string, prefix string, script string, sshClient ssh.SSHClient, tests []define.BatchPackageTests) ([]common.OSRunnerPackageResult, error) { + execTest := strings.NewReader(script) + + session, err := sshClient.NewSession() + if err != nil { + return nil, fmt.Errorf("failed to start session: %w", err) + } + + session.Stdout = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stdout): ", name)) + session.Stderr = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stderr): ", name)) + session.Stdin = execTest + // allowed to fail because tests might fail + logger.Logf("Running %s tests...", name) + err = session.Run("powershell -noprofile -noninteractive -") + if err != nil { + logger.Logf("%s tests failed: %s", name, err) + } + // this seems to always return an error + _ = session.Close() + + var result []common.OSRunnerPackageResult + // fetch the contents for each package + for _, pkg := range tests { + resultPkg, err := getWindowsRunnerPackageResult(ctx, sshClient, pkg, prefix) + if err != nil { + return nil, err + } + result = append(result, resultPkg) + } + return result, nil +} + +func toWindowsPath(path string) string { + return strings.ReplaceAll(path, "/", "\\") +} + +func getWindowsRunnerPackageResult(ctx context.Context, sshClient ssh.SSHClient, pkg define.BatchPackageTests, prefix string) (common.OSRunnerPackageResult, error) { + var err error + var resultPkg common.OSRunnerPackageResult + resultPkg.Name = pkg.Name + outputPath := fmt.Sprintf("%%home%%\\agent\\build\\TEST-go-remote-%s.%s", prefix, filepath.Base(pkg.Name)) + resultPkg.Output, err = sshClient.GetFileContents(ctx, outputPath+".out", ssh.WithContentFetchCommand("type")) + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out", outputPath) + } + resultPkg.JSONOutput, err = sshClient.GetFileContents(ctx, outputPath+".out.json", ssh.WithContentFetchCommand("type")) + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out.json", outputPath) + } + resultPkg.XMLOutput, err = sshClient.GetFileContents(ctx, outputPath+".xml", ssh.WithContentFetchCommand("type")) + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.xml", outputPath) + } + return resultPkg, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/utils/root_unix.go b/dev-tools/mage/target/srvrlesstest/utils/root_unix.go new file mode 100644 index 00000000000..d410c5de16f --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/utils/root_unix.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !windows + +package utils + +import "os" + +const ( + // PermissionUser is the permission level the user needs to be. + PermissionUser = "root" +) + +// HasRoot returns true if the user has root permissions. +// Added extra `nil` value to return since the HasRoot for windows will return an error as well +func HasRoot() (bool, error) { + return os.Geteuid() == 0, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/utils/root_windows.go b/dev-tools/mage/target/srvrlesstest/utils/root_windows.go new file mode 100644 index 00000000000..d3a83e32005 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/utils/root_windows.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows + +package utils + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +const ( + // PermissionUser is the permission level the user needs to be. + PermissionUser = "Administrator" +) + +// HasRoot returns true if the user has Administrator/SYSTEM permissions. +func HasRoot() (bool, error) { + var sid *windows.SID + // See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-checktokenmembership for more on the api + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false, fmt.Errorf("allocate sid error: %w", err) + } + defer func() { + _ = windows.FreeSid(sid) + }() + + token := windows.Token(0) + + member, err := token.IsMember(sid) + if err != nil { + return false, fmt.Errorf("token membership error: %w", err) + } + + return member, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/utils/root_windows_test.go b/dev-tools/mage/target/srvrlesstest/utils/root_windows_test.go new file mode 100644 index 00000000000..b8cd3080ea8 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/utils/root_windows_test.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHasRoot(t *testing.T) { + t.Run("check if user is admin", func(t *testing.T) { + _, err := HasRoot() + assert.NoError(t, err) + }) +} diff --git a/go.mod b/go.mod index 3e2fe304b67..a496d32f417 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/elastic/beats/v7 -go 1.22.0 +go 1.22.3 + +toolchain go1.22.7 require ( cloud.google.com/go/bigquery v1.62.0 @@ -155,9 +157,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 gotest.tools/gotestsum v1.7.0 howett.net/plist v1.0.1 - k8s.io/api v0.29.5 - k8s.io/apimachinery v0.29.5 - k8s.io/client-go v0.29.5 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 kernel.org/pub/linux/libs/security/libcap/cap v1.2.57 ) @@ -204,7 +206,7 @@ require ( github.com/go-ldap/ldap/v3 v3.4.6 github.com/gofrs/uuid/v5 v5.2.0 github.com/golang-jwt/jwt/v5 v5.2.1 - github.com/google/cel-go v0.19.0 + github.com/google/cel-go v0.20.1 github.com/googleapis/gax-go/v2 v2.13.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 @@ -230,6 +232,7 @@ require ( golang.org/x/term v0.24.0 google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f gopkg.in/natefinch/lumberjack.v2 v2.2.1 + sigs.k8s.io/e2e-framework v0.5.0 ) require ( @@ -289,9 +292,10 @@ require ( github.com/elazarl/goproxy v0.0.0-20240909085733-6741dbfc16a1 // indirect github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -349,7 +353,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/iochan v1.0.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -370,6 +374,7 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect @@ -389,15 +394,17 @@ require ( golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect kernel.org/pub/linux/libs/security/libcap/psx v1.2.57 // indirect mvdan.cc/garble v0.12.1 // indirect + sigs.k8s.io/controller-runtime v0.19.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) require ( diff --git a/go.sum b/go.sum index ba2722f5baa..81243a2d0d8 100644 --- a/go.sum +++ b/go.sum @@ -242,6 +242,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bluekeyes/go-gitdiff v0.7.1 h1:graP4ElLRshr8ecu0UtqfNTCHrtSyZd3DABQm/DWesQ= github.com/bluekeyes/go-gitdiff v0.7.1/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM= github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible h1:4g18+HnTDwEtO0n7K8B1Kjq+04MEKJRkhJNQ/hb9d5A= @@ -407,8 +409,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= @@ -427,6 +429,8 @@ github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-faker/faker/v4 v4.2.0 h1:dGebOupKwssrODV51E0zbMrv5e2gO9VWSLNC1WDCpWg= @@ -447,6 +451,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -468,7 +474,8 @@ github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfC github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be h1:zXHeEEJ231bTf/IXqvCfeaqjLpXsq42ybLoT4ROSR6Y= github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be/go.mod h1:/oj50ZdPq/cUjA02lMZhijk5kR31SEydKyqah1OgBuo= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= @@ -522,8 +529,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.3 h1:HR0kYDX2RJZvAup8CsiJwxB4dTCSC0AaUq6S4SiLwUc= github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= -github.com/google/cel-go v0.19.0 h1:vVgaZoHPBDd1lXCYGQOh5A06L4EtuIfmqQ/qnSXSKiU= -github.com/google/cel-go v0.19.0/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -729,8 +736,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= @@ -760,11 +767,11 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= -github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -890,8 +897,12 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vladimirvivien/gexe v0.3.0 h1:4xwiOwGrDob5OMR6E92B9olDXYDglXdHhzR1ggYtWJM= +github.com/vladimirvivien/gexe v0.3.0/go.mod h1:fp7cy60ON1xjhtEI/+bfSEIXX35qgmI+iRYlGOqbBFM= github.com/vmware/govmomi v0.39.0 h1:soLZ08Q2zvjRSinNup8xVlw0KDDCJPPA1rIDmBhi7As= github.com/vmware/govmomi v0.39.0/go.mod h1:oHzAQ1r6152zYDGcUqeK+EO8LhKo5wjtvWZBGHws2Hc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -1225,6 +1236,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= @@ -1269,18 +1282,22 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.29.5 h1:levS+umUigHCfI3riD36pMY1vQEbrzh4r1ivVWAhHaI= -k8s.io/api v0.29.5/go.mod h1:7b18TtPcJzdjk7w5zWyIHgoAtpGeRvGGASxlS7UZXdQ= -k8s.io/apimachinery v0.29.5 h1:Hofa2BmPfpoT+IyDTlcPdCHSnHtEQMoJYGVoQpRTfv4= -k8s.io/apimachinery v0.29.5/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= -k8s.io/client-go v0.29.5 h1:nlASXmPQy190qTteaVP31g3c/wi2kycznkTP7Sv1zPc= -k8s.io/client-go v0.29.5/go.mod h1:aY5CnqUUvXYccJhm47XHoPcRyX6vouHdIBHaKZGTbK4= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= +k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kernel.org/pub/linux/libs/security/libcap/cap v1.2.57 h1:2nmqI+aw7EQZuelYktkQHBE4jESD2tOR+lOJEnv/Apo= kernel.org/pub/linux/libs/security/libcap/cap v1.2.57/go.mod h1:uI99C3r4SXvJeuqoEtx/eWt7UbmfqqZ80H8q+9t/A7I= kernel.org/pub/linux/libs/security/libcap/psx v1.2.57 h1:NOFATXSf5z/cMR3HIwQ3Xrd3nwnWl5xThmNr5U/F0pI= @@ -1289,9 +1306,13 @@ mvdan.cc/garble v0.12.1 h1:GyKeyqr4FKhWz12ZD9kKT9VnDqFILVYxgmAE8RKd3x8= mvdan.cc/garble v0.12.1/go.mod h1:rJ4GvtUEuVCRAYQkpd1iG6bolz9NEnkk0iu6gdTwWqA= nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/e2e-framework v0.5.0 h1:YLhk8R7EHuTFQAe6Fxy5eBzn5Vb+yamR5u8MH1Rq3cE= +sigs.k8s.io/e2e-framework v0.5.0/go.mod h1:jJSH8u2RNmruekUZgHAtmRjb5Wj67GErli9UjLSY7Zc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/x-pack/agentbeat/magefile.go b/x-pack/agentbeat/magefile.go index bd72a558ba3..52f7581ba69 100644 --- a/x-pack/agentbeat/magefile.go +++ b/x-pack/agentbeat/magefile.go @@ -9,11 +9,9 @@ package main import ( "context" "fmt" - "log" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest" "os" - "os/exec" "path/filepath" - "strings" "time" "github.com/magefile/mage/sh" @@ -217,78 +215,8 @@ func PythonIntegTest(ctx context.Context) error { return devtools.PythonIntegTestFromHost(devtools.DefaultPythonTestIntegrationFromHostArgs()) } -// TestWithSpec executes unique commands from agentbeat.spec.yml and validates that app haven't exited with non-zero -func TestWithSpec(ctx context.Context) { - specPath := os.Getenv("AGENTBEAT_SPEC") - if specPath == "" { - log.Fatal("AGENTBEAT_SPEC is not defined\n") - } - - platform := os.Getenv("PLATFORM") - if platform == "" { - log.Fatal("PLATFORM is not defined\n") - } - - var commands = devtools.SpecCommands(specPath, platform) - - agentbeatPath := os.Getenv("AGENTBEAT_PATH") - - cmdResults := make(map[string]bool) - - for _, command := range commands { - cmdResults[command] = runCmd(agentbeatPath, strings.Split(command, " ")) - } - - hasFailures := false - for cmd, res := range cmdResults { - if res { - fmt.Printf("--- :large_green_circle: Succeeded: [%s.10s...]\n", cmd) - } else { - fmt.Printf("--- :bangbang: Failed: [%s.10s...]\n", cmd) - hasFailures = true - } - } - - if hasFailures { - fmt.Printf("Some inputs failed. Exiting with error\n") - os.Exit(1) - } -} - -func runCmd(agentbeatPath string, command []string) bool { - cmd := exec.Command(agentbeatPath, command...) - fmt.Printf("Executing: %s\n", cmd.String()) - - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin - - if err := cmd.Start(); err != nil { - fmt.Printf("failed to start command: %v\n", err) - } - - defer func() { - if err := cmd.Process.Kill(); err != nil { - fmt.Printf("failed to kill process: %v\n", err) - } else { - fmt.Print("command process killed\n") - } - }() - - done := make(chan error, 1) - go func() { - done <- cmd.Wait() - }() - timeout := 2 * time.Second - deadline := time.After(timeout) - - select { - case err := <-done: - fmt.Printf("command exited before %s: %v\n", timeout.String(), err) - return false - - case <-deadline: - fmt.Printf("%s\n", cmd.Stdout) - return true - } +// ServerlessTest starts serverless integration tests +func ServerlessTest(ctx context.Context) error { + mg.Deps(devtools.TestBeatServerless) + return srvrlesstest.IntegRunner(ctx, false, "TestBeatsServerless") }