From ffa5b6bef3f2e1550516c3e99d0983bd81a114c0 Mon Sep 17 00:00:00 2001 From: Nate Sweet Date: Thu, 12 Oct 2023 17:19:58 -0500 Subject: [PATCH] connectivity: Add Control Plane Node Connectivity Tests The control plane nodes create unique policy selection contexts that allow us to test that label selection of host and kube-apiserver entities is correct. This change adds control plane client-pods on every control plan node when the hidden test variable `--k8s-localhost-test` is enabled. Control plane components, as well as the control plane host are queried with control plane policy selections in place. Signed-off-by: Nate Sweet --- connectivity/check/check.go | 3 + connectivity/check/context.go | 104 ++++++++++++++---- connectivity/check/deployment.go | 52 ++++++++- connectivity/check/features.go | 8 ++ connectivity/check/peer.go | 16 ++- .../client-egress-to-cidr-cp-host-knp.yaml | 15 +++ .../manifests/client-egress-to-cidr-k8s.yaml | 19 ++++ .../client-egress-to-entities-host.yaml | 11 ++ .../client-egress-to-entities-k8s.yaml | 19 ++++ connectivity/suite.go | 56 +++++++++- connectivity/tests/host.go | 43 +++++++- connectivity/tests/k8s.go | 42 +++++++ internal/cli/cmd/connectivity.go | 2 + 13 files changed, 356 insertions(+), 34 deletions(-) create mode 100644 connectivity/manifests/client-egress-to-cidr-cp-host-knp.yaml create mode 100644 connectivity/manifests/client-egress-to-cidr-k8s.yaml create mode 100644 connectivity/manifests/client-egress-to-entities-host.yaml create mode 100644 connectivity/manifests/client-egress-to-entities-k8s.yaml create mode 100644 connectivity/tests/k8s.go diff --git a/connectivity/check/check.go b/connectivity/check/check.go index a2ae564f44..3b8ff2d178 100644 --- a/connectivity/check/check.go +++ b/connectivity/check/check.go @@ -28,6 +28,7 @@ type Parameters struct { ForceDeploy bool Hubble bool HubbleServer string + K8sLocalHostTest bool MultiCluster string RunTests []*regexp.Regexp SkipTests []*regexp.Regexp @@ -61,6 +62,8 @@ type Parameters struct { ExternalOtherIP string PodCIDRs []podCIDRs NodeCIDRs []string + ControlPlaneCIDRs []string + K8sCIDR string NodesWithoutCiliumIPs []nodesWithoutCiliumIP JunitFile string JunitProperties map[string]string diff --git a/connectivity/check/context.go b/connectivity/check/context.go index 62a77ef5e4..75238bed54 100644 --- a/connectivity/check/context.go +++ b/connectivity/check/context.go @@ -61,11 +61,13 @@ type ConnectivityTest struct { echoPods map[string]Pod echoExternalPods map[string]Pod clientPods map[string]Pod + clientCPPods map[string]Pod perfClientPods map[string]Pod perfServerPod map[string]Pod PerfResults map[PerfTests]PerfResult echoServices map[string]Service ingressService map[string]Service + k8sService Service externalWorkloads map[string]ExternalWorkload hostNetNSPodsByNode map[string]Pod @@ -78,6 +80,7 @@ type ConnectivityTest struct { lastFlowTimestamps map[string]time.Time nodes map[string]*corev1.Node + controlPlaneNodes map[string]*corev1.Node nodesWithoutCilium map[string]struct{} manifests map[string]string @@ -98,6 +101,29 @@ type PerfResult struct { Avg float64 } +func netIPToCIDRs(netIPs []netip.Addr) (netCIDRs []netip.Prefix) { + for _, ip := range netIPs { + found := false + for _, cidr := range netCIDRs { + if cidr.Addr().Is4() == ip.Is4() && cidr.Contains(ip) { + found = true + break + } + } + if found { + continue + } + + // Generate a /24 or /64 accordingly + bits := 24 + if ip.Is6() { + bits = 64 + } + netCIDRs = append(netCIDRs, netip.PrefixFrom(ip, bits).Masked()) + } + return +} + // verbose returns the value of the user-provided verbosity flag. func (ct *ConnectivityTest) verbose() bool { return ct.params.Verbose @@ -192,6 +218,7 @@ func NewConnectivityTest(client *k8s.Client, p Parameters, version string) (*Con echoPods: make(map[string]Pod), echoExternalPods: make(map[string]Pod), clientPods: make(map[string]Pod), + clientCPPods: make(map[string]Pod), perfClientPods: make(map[string]Pod), perfServerPod: make(map[string]Pod), PerfResults: make(map[PerfTests]PerfResult), @@ -336,6 +363,11 @@ func (ct *ConnectivityTest) SetupAndValidate(ctx context.Context, setupAndValida return fmt.Errorf("unable to detect node CIDRs: %w", err) } } + if ct.params.K8sLocalHostTest { + if err := ct.detectK8sCIDR(ctx); err != nil { + return fmt.Errorf("unable to detect K8s CIDR: %w", err) + } + } return nil } @@ -639,8 +671,9 @@ func (ct *ConnectivityTest) detectNodeCIDRs(ctx context.Context) error { } nodeIPs := make([]netip.Addr, 0, len(nodes.Items)) + cPIPs := make([]netip.Addr, 0, 1) - for _, node := range nodes.Items { + for i, node := range nodes.Items { for _, addr := range node.Status.Addresses { if addr.Type != "InternalIP" { continue @@ -651,6 +684,9 @@ func (ct *ConnectivityTest) detectNodeCIDRs(ctx context.Context) error { continue } nodeIPs = append(nodeIPs, ip) + if isControlPlane(&nodes.Items[i]) { + cPIPs = append(cPIPs, ip) + } } } @@ -659,36 +695,44 @@ func (ct *ConnectivityTest) detectNodeCIDRs(ctx context.Context) error { } // collapse set of IPs in to CIDRs - nodeCIDRs := []netip.Prefix{} - - for _, ip := range nodeIPs { - found := false - for _, cidr := range nodeCIDRs { - if cidr.Addr().Is4() == ip.Is4() && cidr.Contains(ip) { - found = true - break - } - } - if found { - continue - } - - // Generate a /24 or /64 accordingly - bits := 24 - if ip.Is6() { - bits = 64 - } - nodeCIDRs = append(nodeCIDRs, netip.PrefixFrom(ip, bits).Masked()) - } + nodeCIDRs := netIPToCIDRs(nodeIPs) + cPCIDRs := netIPToCIDRs(cPIPs) ct.params.NodeCIDRs = make([]string, 0, len(nodeCIDRs)) for _, cidr := range nodeCIDRs { ct.params.NodeCIDRs = append(ct.params.NodeCIDRs, cidr.String()) } + ct.params.ControlPlaneCIDRs = make([]string, 0, len(cPCIDRs)) + for _, cidr := range cPCIDRs { + ct.params.ControlPlaneCIDRs = append(ct.params.ControlPlaneCIDRs, cidr.String()) + } ct.Debugf("Detected NodeCIDRs: %v", ct.params.NodeCIDRs) return nil } +// detectK8sCIDR produces one CIDR that covers the kube-apiserver address. +// ipv4 addresses are collapsed in to one or more /24s, and v6 to one or more /64s +func (ct *ConnectivityTest) detectK8sCIDR(ctx context.Context) error { + service, err := ct.client.GetService(ctx, "default", "kubernetes", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get \"kubernetes.default\" service: %w", err) + } + addr, err := netip.ParseAddr(service.Spec.ClusterIP) + if err != nil { + return fmt.Errorf("failed to parse \"kubernetes.default\" service Cluster IP: %w", err) + } + + // Generate a /24 or /64 accordingly + bits := 24 + if addr.Is6() { + bits = 64 + } + ct.params.K8sCIDR = netip.PrefixFrom(addr, bits).Masked().String() + ct.k8sService = Service{Service: service, URLPath: "/healthz"} + ct.Debugf("Detected K8sCIDR: %q", ct.params.K8sCIDR) + return nil +} + func (ct *ConnectivityTest) detectNodesWithoutCiliumIPs() error { for n := range ct.nodesWithoutCilium { pod := ct.hostNetNSPodsByNode[n] @@ -812,6 +856,7 @@ func (ct *ConnectivityTest) initCiliumPods(ctx context.Context) error { func (ct *ConnectivityTest) getNodes(ctx context.Context) error { ct.nodes = make(map[string]*corev1.Node) + ct.controlPlaneNodes = make(map[string]*corev1.Node) ct.nodesWithoutCilium = make(map[string]struct{}) nodeList, err := ct.client.ListNodes(ctx, metav1.ListOptions{}) if err != nil { @@ -821,6 +866,9 @@ func (ct *ConnectivityTest) getNodes(ctx context.Context) error { for _, node := range nodeList.Items { node := node if canNodeRunCilium(&node) { + if isControlPlane(&node) { + ct.controlPlaneNodes[node.ObjectMeta.Name] = node.DeepCopy() + } ct.nodes[node.ObjectMeta.Name] = node.DeepCopy() } else { ct.nodesWithoutCilium[node.ObjectMeta.Name] = struct{}{} @@ -960,10 +1008,18 @@ func (ct *ConnectivityTest) Nodes() map[string]*corev1.Node { return ct.nodes } +func (ct *ConnectivityTest) ControlPlaneNodes() map[string]*corev1.Node { + return ct.controlPlaneNodes +} + func (ct *ConnectivityTest) ClientPods() map[string]Pod { return ct.clientPods } +func (ct *ConnectivityTest) ControlPlaneClientPods() map[string]Pod { + return ct.clientCPPods +} + func (ct *ConnectivityTest) HostNetNSPodsByNode() map[string]Pod { return ct.hostNetNSPodsByNode } @@ -1000,6 +1056,10 @@ func (ct *ConnectivityTest) IngressService() map[string]Service { return ct.ingressService } +func (ct *ConnectivityTest) K8sService() Service { + return ct.k8sService +} + func (ct *ConnectivityTest) ExternalWorkloads() map[string]ExternalWorkload { return ct.externalWorkloads } diff --git a/connectivity/check/deployment.go b/connectivity/check/deployment.go index 6f9c426569..763b14e0c3 100644 --- a/connectivity/check/deployment.go +++ b/connectivity/check/deployment.go @@ -15,6 +15,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -33,6 +34,7 @@ const ( clientDeploymentName = "client" client2DeploymentName = "client2" + clientCPDeployment = "client-cp" DNSTestServerContainerName = "dns-test-server" @@ -852,6 +854,36 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { } } + // 3rd client scheduled on the control plane + if ct.params.K8sLocalHostTest { + ct.Logf("✨ [%s] Deploying %s deployment...", ct.clients.src.ClusterName(), clientCPDeployment) + clientDeployment := newDeployment(deploymentParameters{ + Name: clientCPDeployment, + Kind: kindClientName, + NamedPort: "http-8080", + Port: 8080, + Image: ct.params.CurlImage, + Command: []string{"/bin/ash", "-c", "sleep 10000000"}, + Labels: map[string]string{"other": "client"}, + Annotations: ct.params.DeploymentAnnotations.Match(client2DeploymentName), + NodeSelector: map[string]string{ + "node-role.kubernetes.io/control-plane": "", + }, + Replicas: len(ct.ControlPlaneNodes()), + Tolerations: []corev1.Toleration{ + {Key: "node-role.kubernetes.io/control-plane"}, + }, + }) + _, err = ct.clients.src.CreateServiceAccount(ctx, ct.params.TestNamespace, k8s.NewServiceAccount(clientCPDeployment), metav1.CreateOptions{}) + if err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("unable to create service account %s: %s", clientCPDeployment, err) + } + _, err = ct.clients.src.CreateDeployment(ctx, ct.params.TestNamespace, clientDeployment, metav1.CreateOptions{}) + if err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("unable to create deployment %s: %s", clientCPDeployment, err) + } + } + if !ct.params.SingleNode || ct.params.MultiCluster != "" { _, err = ct.clients.dst.GetService(ctx, ct.params.TestNamespace, echoOtherNodeDeploymentName, metav1.GetOptions{}) if err != nil { @@ -1158,9 +1190,17 @@ func (ct *ConnectivityTest) validateDeployment(ctx context.Context) error { return err } - ct.clientPods[pod.Name] = Pod{ - K8sClient: ct.client, - Pod: pod.DeepCopy(), + if strings.Contains(pod.Name, clientCPDeployment) { + ct.clientCPPods[pod.Name] = Pod{ + K8sClient: ct.client, + Pod: pod.DeepCopy(), + } + } else { + ct.clientPods[pod.Name] = Pod{ + K8sClient: ct.client, + Pod: pod.DeepCopy(), + } + } } @@ -1222,7 +1262,11 @@ func (ct *ConnectivityTest) validateDeployment(ctx context.Context) error { return err } } - + for _, cpp := range ct.clientCPPods { + if err := WaitForCoreDNS(ctx, ct, cpp); err != nil { + return err + } + } for _, client := range ct.clients.clients() { echoPods, err := client.ListPods(ctx, ct.params.TestNamespace, metav1.ListOptions{LabelSelector: "kind=" + kindEchoName}) if err != nil { diff --git a/connectivity/check/features.go b/connectivity/check/features.go index 678793a03c..c07a62d1e5 100644 --- a/connectivity/check/features.go +++ b/connectivity/check/features.go @@ -347,3 +347,11 @@ func canNodeRunCilium(node *corev1.Node) bool { val, ok := node.ObjectMeta.Labels["cilium.io/no-schedule"] return !ok || val == "false" } + +func isControlPlane(node *corev1.Node) bool { + if node != nil { + _, ok := node.Labels["node-role.kubernetes.io/control-plane"] + return ok + } + return false +} diff --git a/connectivity/check/peer.go b/connectivity/check/peer.go index b7e43da3a0..60590fdcf2 100644 --- a/connectivity/check/peer.go +++ b/connectivity/check/peer.go @@ -4,6 +4,7 @@ package check import ( + "fmt" "net" "net/url" "strconv" @@ -162,6 +163,8 @@ func (p Pod) FlowFilters() []*flow.FlowFilter { type Service struct { // Service is the Kubernetes service resource Service *corev1.Service + + URLPath string } // Name returns the absolute name of the service. @@ -174,23 +177,28 @@ func (s Service) NameWithoutNamespace() string { return s.Service.Name } -// Scheme returns the string 'http'. +// Scheme returns the string 'https' if the port is 443 or 6443, otherwise +// it returns 'http'. func (s Service) Scheme() string { - // We only have http services for now. + switch s.Port() { + case 443, 6443: + return "https" + } return "http" + } // Path returns the string '/'. func (s Service) Path() string { // No support for paths yet. - return "" + return s.URLPath } // Address returns the network address of the Service. func (s Service) Address(family features.IPFamily) string { // If the cluster IP is empty (headless service case) or the IP family is set to any, return the service name if s.Service.Spec.ClusterIP == "" || family == features.IPFamilyAny { - return s.Service.Name + return fmt.Sprintf("%s.%s", s.Service.Name, s.Service.Namespace) } getClusterIPForIPFamily := func(family v1.IPFamily) string { diff --git a/connectivity/manifests/client-egress-to-cidr-cp-host-knp.yaml b/connectivity/manifests/client-egress-to-cidr-cp-host-knp.yaml new file mode 100644 index 0000000000..859dcdbd90 --- /dev/null +++ b/connectivity/manifests/client-egress-to-cidr-cp-host-knp.yaml @@ -0,0 +1,15 @@ +# This policy allows packets to all node IPs +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: client-egress-to-cidr-cp-host +spec: + podSelector: + matchLabels: + kind: client + egress: + - to: +{{- range .ControlPlaneCIDRs }} + - ipBlock: + cidr: {{.}} +{{- end }} diff --git a/connectivity/manifests/client-egress-to-cidr-k8s.yaml b/connectivity/manifests/client-egress-to-cidr-k8s.yaml new file mode 100644 index 0000000000..eb9f9080cc --- /dev/null +++ b/connectivity/manifests/client-egress-to-cidr-k8s.yaml @@ -0,0 +1,19 @@ +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: client-egress-to-cidr-k8s +spec: + endpointSelector: + matchLabels: + kind: client + egress: + - toCIDR: + - {{ .K8sCIDR }} + - toEndpoints: + - matchExpressions: + - { key: 'k8s-app', operator: In, values: [ "kube-dns", "coredns", "node-local-dns", "nodelocaldns" ] } + - { key: 'io.kubernetes.pod.namespace', operator: In, values: [ "kube-system" ] } + toPorts: + - ports: + - port: "53" + protocol: ANY diff --git a/connectivity/manifests/client-egress-to-entities-host.yaml b/connectivity/manifests/client-egress-to-entities-host.yaml new file mode 100644 index 0000000000..a303efd698 --- /dev/null +++ b/connectivity/manifests/client-egress-to-entities-host.yaml @@ -0,0 +1,11 @@ +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: client-egress-to-entities-host +spec: + endpointSelector: + matchLabels: + kind: client + egress: + - toEntities: + - host diff --git a/connectivity/manifests/client-egress-to-entities-k8s.yaml b/connectivity/manifests/client-egress-to-entities-k8s.yaml new file mode 100644 index 0000000000..5802cdcfdb --- /dev/null +++ b/connectivity/manifests/client-egress-to-entities-k8s.yaml @@ -0,0 +1,19 @@ +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: client-egress-to-entities-k8s +spec: + endpointSelector: + matchLabels: + kind: client + egress: + - toEntities: + - kube-apiserver + - toEndpoints: + - matchExpressions: + - { key: 'k8s-app', operator: In, values: [ "kube-dns", "coredns", "node-local-dns", "nodelocaldns" ] } + - { key: 'io.kubernetes.pod.namespace', operator: In, values: [ "kube-system" ] } + toPorts: + - ports: + - port: "53" + protocol: ANY diff --git a/connectivity/suite.go b/connectivity/suite.go index 743bec4980..0690e2f10d 100644 --- a/connectivity/suite.go +++ b/connectivity/suite.go @@ -107,15 +107,27 @@ var ( //go:embed manifests/echo-ingress-from-other-client-deny.yaml echoIngressFromOtherClientDenyPolicyYAML string + //go:embed manifests/client-egress-to-entities-host.yaml + clientEgressToEntitiesHostPolicyYAML string + + //go:embed manifests/client-egress-to-entities-k8s.yaml + clientEgressToEntitiesK8sPolicyYAML string + //go:embed manifests/client-egress-to-entities-world.yaml clientEgressToEntitiesWorldPolicyYAML string + //go:embed manifests/client-egress-to-cidr-cp-host-knp.yaml + clientEgressToCIDRCPHostPolicyYAML string + //go:embed manifests/client-egress-to-cidr-external.yaml clientEgressToCIDRExternalPolicyYAML string //go:embed manifests/client-egress-to-cidr-external-knp.yaml clientEgressToCIDRExternalPolicyKNPYAML string + //go:embed manifests/client-egress-to-cidr-k8s.yaml + clientEgressToCIDRK8sPolicyYAML string + //go:embed manifests/client-egress-to-cidr-node-knp.yaml clientEgressToCIDRNodeKNPYAML string @@ -181,8 +193,7 @@ func Run(ctx context.Context, ct *check.ConnectivityTest, addExtraTests func(*ch renderedTemplates := map[string]string{} - // render templates, if any problems fail early - for key, temp := range map[string]string{ + templates := map[string]string{ "clientEgressToCIDRExternalPolicyYAML": clientEgressToCIDRExternalPolicyYAML, "clientEgressToCIDRExternalPolicyKNPYAML": clientEgressToCIDRExternalPolicyKNPYAML, "clientEgressToCIDRNodeKNPYAML": clientEgressToCIDRNodeKNPYAML, @@ -193,7 +204,15 @@ func Run(ctx context.Context, ct *check.ConnectivityTest, addExtraTests func(*ch "clientEgressL7TLSPolicyYAML": clientEgressL7TLSPolicyYAML, "clientEgressL7HTTPMatchheaderSecretYAML": clientEgressL7HTTPMatchheaderSecretYAML, "echoIngressFromCIDRYAML": echoIngressFromCIDRYAML, - } { + } + + if ct.Params().K8sLocalHostTest { + templates["clientEgressToCIDRCPHostPolicyYAML"] = clientEgressToCIDRCPHostPolicyYAML + templates["clientEgressToCIDRK8sPolicyKNPYAML"] = clientEgressToCIDRK8sPolicyYAML + } + + // render templates, if any problems fail early + for key, temp := range templates { val, err := template.Render(temp, ct.Params()) if err != nil { return err @@ -1078,6 +1097,37 @@ func Run(ctx context.Context, ct *check.ConnectivityTest, addExtraTests func(*ch return check.ResultDNSOKDropCurlTimeout, check.ResultNone }) + if ct.Params().K8sLocalHostTest { + ct.NewTest("pod-to-controlplane-host"). + WithCiliumPolicy(clientEgressToEntitiesHostPolicyYAML). + WithScenarios( + tests.PodToControlPlaneHost(), + ) + + ct.NewTest("pod-to-k8s-on-controlplane"). + WithCiliumPolicy(clientEgressToEntitiesK8sPolicyYAML). + WithScenarios( + tests.PodToK8sLocal(), + ) + // Check that pods can access when referencing them by CIDR selectors + // (when this feature is enabled). + ct.NewTest("pod-to-controlplane-host-cidr"). + WithFeatureRequirements( + features.RequireEnabled(features.CIDRMatchNodes)). + WithK8SPolicy(renderedTemplates["clientEgressToCIDRCPHostPolicyYAML"]). + WithScenarios( + tests.PodToControlPlaneHost(), + ) + + ct.NewTest("pod-to-k8s-on-controlplane-cidr"). + WithFeatureRequirements( + features.RequireEnabled(features.CIDRMatchNodes)). + WithCiliumPolicy(renderedTemplates["clientEgressToCIDRK8sPolicyKNPYAML"]). + WithScenarios( + tests.PodToK8sLocal(), + ) + } + // Tests with DNS redirects to the proxy (e.g., client-egress-l7, dns-only, // and to-fqdns) should always be executed last. See #367 for details. diff --git a/connectivity/tests/host.go b/connectivity/tests/host.go index f243313894..d6d7831fa8 100644 --- a/connectivity/tests/host.go +++ b/connectivity/tests/host.go @@ -34,7 +34,7 @@ func (s *podToHost) Run(ctx context.Context, t *check.Test) { pod := pod // copy to avoid memory aliasing when using reference for _, node := range ct.Nodes() { - node := node + node := node // copy to avoid memory aliasing when using reference t.ForEachIPFamily(func(ipFam features.IPFamily) { for _, addr := range node.Status.Addresses { @@ -62,6 +62,47 @@ func (s *podToHost) Run(ctx context.Context, t *check.Test) { } } +// PodToControlPlaneHost sends an ICMP ping from the controlPlaneclient Pod to all nodes +// in the test context. +func PodToControlPlaneHost() check.Scenario { + return &podToControlPlaneHost{} +} + +// podToHost implements a Scenario. +type podToControlPlaneHost struct{} + +func (s *podToControlPlaneHost) Name() string { + return "pod-to-controlplane-host" +} + +func (s *podToControlPlaneHost) Run(ctx context.Context, t *check.Test) { + ct := t.Context() + for _, pod := range ct.ControlPlaneClientPods() { + pod := pod + for _, node := range ct.ControlPlaneNodes() { + t.ForEachIPFamily(func(ipFam features.IPFamily) { + for _, addr := range node.Status.Addresses { + if features.GetIPFamily(addr.Address) != ipFam { + continue + } + dst := check.ICMPEndpoint("", addr.Address) + ipFam := features.GetIPFamily(addr.Address) + + t.NewAction(s, fmt.Sprintf("ping-%s-node-%s-from-pod-%s", ipFam, node.Name, pod.Name()), &pod, dst, ipFam).Run(func(a *check.Action) { + a.ExecInPod(ctx, ct.PingCommand(dst, ipFam)) + + a.ValidateFlows(ctx, pod, a.GetEgressRequirements(check.FlowParameters{ + Protocol: check.ICMP, + })) + + a.ValidateMetrics(ctx, pod, a.GetEgressMetricsRequirements()) + }) + } + }) + } + } +} + // PodToHostPort sends an HTTP request from all client Pods // to all echo Services' HostPorts. func PodToHostPort() check.Scenario { diff --git a/connectivity/tests/k8s.go b/connectivity/tests/k8s.go new file mode 100644 index 0000000000..ad52e4f376 --- /dev/null +++ b/connectivity/tests/k8s.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package tests + +import ( + "context" + "fmt" + + "github.com/cilium/cilium-cli/connectivity/check" + "github.com/cilium/cilium-cli/utils/features" +) + +// PodToK8sLocal sends a curl from all control plane client Pods +// to all control-plane nodes. +func PodToK8sLocal() check.Scenario { + return &podToK8sLocal{} +} + +// podToK8sLocal implements a Scenario. +type podToK8sLocal struct{} + +func (s *podToK8sLocal) Name() string { + return "pod-to-k8s-local" +} + +func (s *podToK8sLocal) Run(ctx context.Context, t *check.Test) { + ct := t.Context() + k8sSvc := ct.K8sService() + for _, pod := range ct.ControlPlaneClientPods() { + pod := pod // copy to avoid memory aliasing when using reference + t.NewAction(s, fmt.Sprintf("curl-k8s-from-pod-%s", pod.Name()), &pod, k8sSvc, features.IPFamilyAny).Run(func(a *check.Action) { + a.ExecInPod(ctx, ct.CurlCommand(k8sSvc, features.IPFamilyAny)) + a.ValidateFlows(ctx, pod, a.GetEgressRequirements(check.FlowParameters{ + DNSRequired: true, + AltDstPort: k8sSvc.Port(), + })) + + a.ValidateMetrics(ctx, pod, a.GetEgressMetricsRequirements()) + }) + } +} diff --git a/internal/cli/cmd/connectivity.go b/internal/cli/cmd/connectivity.go index bb5ba9b8ca..760cdee873 100644 --- a/internal/cli/cmd/connectivity.go +++ b/internal/cli/cmd/connectivity.go @@ -146,6 +146,8 @@ func newCmdConnectivityTest(hooks Hooks) *cobra.Command { cmd.Flags().MarkHidden("skip-ip-cache-check") cmd.Flags().BoolVar(¶ms.IncludeUnsafeTests, "include-unsafe-tests", false, "Include tests which can modify cluster nodes state") cmd.Flags().MarkHidden("include-unsafe-tests") + cmd.Flags().BoolVar(¶ms.K8sLocalHostTest, "k8s-localhost-test", false, "Include tests which test for policy enforcement for the k8s entity on its own host") + cmd.Flags().MarkHidden("k8s-localhost-test") cmd.Flags().StringVar(¶ms.K8sVersion, "k8s-version", "", "Kubernetes server version in case auto-detection fails") cmd.Flags().StringVar(¶ms.HelmChartDirectory, "chart-directory", "", "Helm chart directory")