Skip to content

Commit

Permalink
Merge pull request #3482 from jiaqiluo/support-127
Browse files Browse the repository at this point in the history
  • Loading branch information
jiaqiluo authored Jan 23, 2024
2 parents b407d18 + 049b583 commit f329e90
Show file tree
Hide file tree
Showing 19 changed files with 3,651 additions and 3,010 deletions.
20 changes: 20 additions & 0 deletions .golangci.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,26 @@
{
"linters": "revive",
"text": "should be of the form"
},
{
"linters": "revive",
"text": "unused-parameter"
},
{
"linters": "revive",
"text": "redefines-builtin-id"
},
{
"linters": "revive",
"text": "superfluous-else"
},
{
"linters": "revive",
"text": "empty-block"
},
{
"linters": "revive",
"text": "if-return: redundant if"
}
]
}
Expand Down
4 changes: 2 additions & 2 deletions Dockerfile.dapper
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ RUN apt-get update && \
ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm=armv6l GOLANG_ARCH_arm64=arm64 GOLANG_ARCH=GOLANG_ARCH_${ARCH} \
GOPATH=/go PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash

RUN wget -O - https://storage.googleapis.com/golang/go1.19.3.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local
RUN wget -O - https://storage.googleapis.com/golang/go1.20.4.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local

RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.46.2
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.52.2

ENV DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 \
DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm \
Expand Down
4 changes: 2 additions & 2 deletions cluster/addons.go
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ func (c *Cluster) doAddonDeploy(ctx context.Context, addonYaml, resourceName str
if err != nil {
return &addonError{fmt.Sprintf("%v", err), isCritical}
}
node, err := k8s.GetNode(k8sClient, c.ControlPlaneHosts[0].HostnameOverride)
node, err := k8s.GetNode(k8sClient, c.ControlPlaneHosts[0].HostnameOverride, c.ControlPlaneHosts[0].InternalAddress, c.CloudProvider.Name)
if err != nil {
return &addonError{fmt.Sprintf("Failed to get Node [%s]: %v", c.ControlPlaneHosts[0].HostnameOverride, err), isCritical}
}
Expand All @@ -513,7 +513,7 @@ func (c *Cluster) doAddonDelete(ctx context.Context, resourceName string, isCrit
if err != nil {
return &addonError{fmt.Sprintf("%v", err), isCritical}
}
node, err := k8s.GetNode(k8sClient, c.ControlPlaneHosts[0].HostnameOverride)
node, err := k8s.GetNode(k8sClient, c.ControlPlaneHosts[0].HostnameOverride, c.ControlPlaneHosts[0].InternalAddress, c.CloudProvider.Name)
if err != nil {
return &addonError{fmt.Sprintf("Failed to get Node [%s]: %v", c.ControlPlaneHosts[0].HostnameOverride, err), isCritical}
}
Expand Down
21 changes: 11 additions & 10 deletions cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
continue
}
// find existing nodes that are in NotReady state
if err := services.CheckNodeReady(kubeClient, host, services.ControlRole); err != nil {
if err := services.CheckNodeReady(kubeClient, host, services.ControlRole, c.CloudProvider.Name); err != nil {
logrus.Debugf("Found node %v in NotReady state", host.HostnameOverride)
notReadyHosts = append(notReadyHosts, host)
notReadyHostNames = append(notReadyHostNames, host.HostnameOverride)
Expand Down Expand Up @@ -223,7 +223,7 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
}
// Calling CheckNodeReady wil give some time for nodes to get in Ready state
for _, host := range notReadyHosts {
err = services.CheckNodeReady(kubeClient, host, services.ControlRole)
err = services.CheckNodeReady(kubeClient, host, services.ControlRole, c.CloudProvider.Name)
if err != nil {
logrus.Errorf("Host %v failed to report Ready status with error: %v", host.HostnameOverride, err)
}
Expand All @@ -236,7 +236,8 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
cpNodePlanMap,
c.UpdateWorkersOnly,
c.SystemImages.Alpine,
c.Certificates, c.UpgradeStrategy, c.NewHosts, inactiveHosts, c.MaxUnavailableForControlNodes, c.Version)
c.Certificates, c.UpgradeStrategy, c.NewHosts, inactiveHosts, c.MaxUnavailableForControlNodes,
c.Version, c.CloudProvider.Name)
if err != nil {
return "", fmt.Errorf("[controlPlane] Failed to upgrade Control Plane: %v", err)
}
Expand Down Expand Up @@ -310,7 +311,7 @@ func (c *Cluster) UpgradeWorkerPlane(ctx context.Context, kubeClient *kubernetes
continue
}
// find existing nodes that are in NotReady state
if err := services.CheckNodeReady(kubeClient, host, services.WorkerRole); err != nil {
if err := services.CheckNodeReady(kubeClient, host, services.WorkerRole, c.CloudProvider.Name); err != nil {
logrus.Debugf("Found node %v in NotReady state", host.HostnameOverride)
notReadyHosts = append(notReadyHosts, host)
notReadyHostNames = append(notReadyHostNames, host.HostnameOverride)
Expand All @@ -332,7 +333,7 @@ func (c *Cluster) UpgradeWorkerPlane(ctx context.Context, kubeClient *kubernetes
}
// Calling CheckNodeReady wil give some time for nodes to get in Ready state
for _, host := range notReadyHosts {
err = services.CheckNodeReady(kubeClient, host, services.WorkerRole)
err = services.CheckNodeReady(kubeClient, host, services.WorkerRole, c.CloudProvider.Name)
if err != nil {
logrus.Errorf("Host %v failed to report Ready status with error: %v", host.HostnameOverride, err)
}
Expand All @@ -349,7 +350,8 @@ func (c *Cluster) UpgradeWorkerPlane(ctx context.Context, kubeClient *kubernetes
c.UpgradeStrategy,
c.NewHosts,
c.MaxUnavailableForWorkerNodes,
c.Version)
c.Version,
c.CloudProvider.Name)
if err != nil {
return "", fmt.Errorf("[workerPlane] Failed to upgrade Worker Plane: %v", err)
}
Expand Down Expand Up @@ -994,7 +996,7 @@ func (c *Cluster) SyncLabelsAndTaints(ctx context.Context, currentCluster *Clust
var errs []error
for host := range hostQueue {
logrus.Debugf("worker [%d] starting sync for node [%s]", w, host.HostnameOverride)
if err := setNodeAnnotationsLabelsTaints(k8sClient, host); err != nil {
if err := setNodeAnnotationsLabelsTaints(k8sClient, host, c.CloudProvider.Name); err != nil {
errs = append(errs, err)
}
}
Expand All @@ -1012,17 +1014,16 @@ func (c *Cluster) SyncLabelsAndTaints(ctx context.Context, currentCluster *Clust
return nil
}

func setNodeAnnotationsLabelsTaints(k8sClient *kubernetes.Clientset, host *hosts.Host) error {
func setNodeAnnotationsLabelsTaints(k8sClient *kubernetes.Clientset, host *hosts.Host, cloudProviderName string) error {
node := &v1.Node{}
var err error
for retries := 0; retries <= 5; retries++ {
node, err = k8s.GetNode(k8sClient, host.HostnameOverride)
node, err = k8s.GetNode(k8sClient, host.HostnameOverride, host.InternalAddress, cloudProviderName)
if err != nil {
logrus.Debugf("[hosts] Can't find node by name [%s], error: %v", host.HostnameOverride, err)
time.Sleep(2 * time.Second)
continue
}

oldNode := node.DeepCopy()
k8s.SetNodeAddressesAnnotations(node, host.InternalAddress, host.Address)
k8s.SyncNodeLabels(node, host.ToAddLabels, host.ToDelLabels)
Expand Down
19 changes: 17 additions & 2 deletions cluster/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/blang/semver"
"github.com/rancher/rke/cloudprovider"
"github.com/rancher/rke/cloudprovider/aws"
"github.com/rancher/rke/docker"
"github.com/rancher/rke/k8s"
"github.com/rancher/rke/log"
Expand Down Expand Up @@ -1061,11 +1062,25 @@ func (c *Cluster) setCloudProvider() error {
if p != nil {
c.CloudConfigFile, err = p.GenerateCloudConfigFile()
if err != nil {
return fmt.Errorf("Failed to parse cloud config file: %v", err)
return fmt.Errorf("failed to parse cloud config file: %v", err)
}
c.CloudProvider.Name = p.GetName()
if c.CloudProvider.Name == "" {
return fmt.Errorf("Name of the cloud provider is not defined for custom provider")
return fmt.Errorf("name of the cloud provider is not defined for custom provider")
}
if c.CloudProvider.Name == aws.AWSCloudProviderName {
clusterVersion, err := getClusterVersion(c.Version)
if err != nil {
return fmt.Errorf("failed to get cluster version for checking cloud provider: %v", err)
}
// cloud provider must be external or external-aws for >=1.27
defaultExternalAwsRange, err := semver.ParseRange(">=1.27.0-rancher0")
if err != nil {
return fmt.Errorf("failed to parse semver range for checking cloud provider %v", err)
}
if defaultExternalAwsRange(clusterVersion) {
return fmt.Errorf(fmt.Sprintf("Cloud provider %s is invalid for [%s]", aws.AWSCloudProviderName, c.Version))
}
}
}
return nil
Expand Down
29 changes: 23 additions & 6 deletions cluster/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (

"github.com/blang/semver"
"github.com/docker/docker/api/types"
"github.com/rancher/rke/cloudprovider/aws"
"github.com/rancher/rke/docker"
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/k8s"
Expand Down Expand Up @@ -69,6 +70,7 @@ var (
parsedRangeAtLeast123 = semver.MustParseRange(">= 1.23.0-rancher0")
parsedRangeAtLeast124 = semver.MustParseRange(">= 1.24.0-rancher0")
parsedRangeAtLeast125 = semver.MustParseRange(">= 1.25.0-rancher0")
parsedRangeBelow127 = semver.MustParseRange("< 1.27.0-rancher0")
parsedRange123 = semver.MustParseRange(">=1.23.0-rancher0 <=1.23.99-rancher-0")
parsedRange124 = semver.MustParseRange(">=1.24.0-rancher0 <=1.24.99-rancher-0")
)
Expand Down Expand Up @@ -179,7 +181,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, serviceOptions v3.Kubern
CommandArgs := map[string]string{
"admission-control-config-file": DefaultKubeAPIArgAdmissionControlConfigFileValue,
"client-ca-file": pki.GetCertPath(pki.CACertName),
"cloud-provider": c.CloudProvider.Name,
"cloud-provider": getCloudProviderName(c.CloudProvider.Name),
"etcd-cafile": etcdCAClientCert,
"etcd-certfile": etcdClientCert,
"etcd-keyfile": etcdClientKey,
Expand Down Expand Up @@ -344,7 +346,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, serviceOptions v3.Kubern
func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, serviceOptions v3.KubernetesServicesOptions) v3.Process {
Command := c.getRKEToolsEntryPoint(host.OS(), "kube-controller-manager")
CommandArgs := map[string]string{
"cloud-provider": c.CloudProvider.Name,
"cloud-provider": getCloudProviderName(c.CloudProvider.Name),
"cluster-cidr": c.ClusterCIDR,
"kubeconfig": pki.GetConfigPath(pki.KubeControllerCertName),
"root-ca-file": pki.GetCertPath(pki.CACertName),
Expand Down Expand Up @@ -463,7 +465,7 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, serviceOptions v3.Kubern
Command := c.getRKEToolsEntryPoint(host.OS(), "kubelet")
CommandArgs := map[string]string{
"client-ca-file": pki.GetCertPath(pki.CACertName),
"cloud-provider": c.CloudProvider.Name,
"cloud-provider": getCloudProviderName(c.CloudProvider.Name),
"cluster-dns": c.ClusterDNSServer,
"cluster-domain": c.ClusterDomain,
"fail-swap-on": strconv.FormatBool(kubelet.FailSwapOn),
Expand Down Expand Up @@ -495,6 +497,11 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, serviceOptions v3.Kubern
if host.IsWindows() { // compatible with Windows
CommandArgs["cloud-config"] = path.Join(host.PrefixPath, cloudConfigFileName)
}

if c.CloudProvider.Name == k8s.ExternalAWSCloudProviderName && c.CloudProvider.UseInstanceMetadataHostname != nil && *c.CloudProvider.UseInstanceMetadataHostname {
// rke-tools will inject hostname-override from ec2 instance metadata to match with the spec.nodeName set by cloud provider https://github.com/rancher/rke-tools/blob/3eab4f07aa97a8aeeaaef55b1b7bbc82e2a3374a/entrypoint.sh#L17
delete(CommandArgs, "hostname-override")
}
}

if c.IsKubeletGenerateServingCertificateEnabled() {
Expand All @@ -505,12 +512,14 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, serviceOptions v3.Kubern
var Binds []string

if c.IsCRIDockerdEnabled() {
CommandArgs["container-runtime"] = "remote"
CommandArgs["container-runtime-endpoint"] = "/var/run/dockershim.sock"
parsedVersion, err := getClusterVersion(c.Version)
if err != nil {
logrus.Debugf("Error while parsing cluster version: %s", err)
}
if parsedRangeBelow127(parsedVersion) {
CommandArgs["container-runtime"] = "remote" // This flag has been removed from v1.27 https://v1-26.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/
}
CommandArgs["container-runtime-endpoint"] = "/var/run/dockershim.sock"
// cri-dockerd must be enabled if the cluster version is 1.24 and higher
if parsedRangeAtLeast124(parsedVersion) {
CommandArgs["container-runtime-endpoint"] = "unix:///var/run/cri-dockerd.sock"
Expand Down Expand Up @@ -692,7 +701,8 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, serviceOptions v3.Kube
} else {
CommandArgs["bind-address"] = host.Address
}
if c.CloudProvider.Name == k8s.AWSCloudProvider && c.CloudProvider.UseInstanceMetadataHostname != nil && *c.CloudProvider.UseInstanceMetadataHostname {
if (c.CloudProvider.Name == k8s.ExternalAWSCloudProviderName || c.CloudProvider.Name == aws.AWSCloudProviderName) &&
c.CloudProvider.UseInstanceMetadataHostname != nil && *c.CloudProvider.UseInstanceMetadataHostname {
// rke-tools will inject hostname-override from ec2 instance metadata to match with the spec.nodeName set by cloud provider https://github.com/rancher/rke-tools/blob/3eab4f07aa97a8aeeaaef55b1b7bbc82e2a3374a/entrypoint.sh#L17
delete(CommandArgs, "hostname-override")
}
Expand Down Expand Up @@ -1286,3 +1296,10 @@ func (c *Cluster) IsCRIDockerdEnabled() bool {
}
return false
}

func getCloudProviderName(name string) string {
if name == k8s.ExternalAWSCloudProviderName {
return "external"
}
return name
}
9 changes: 8 additions & 1 deletion cluster/remove.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
v3 "github.com/rancher/rke/types"
"github.com/rancher/rke/util"
"golang.org/x/sync/errgroup"
v1 "k8s.io/api/core/v1"
)

func (c *Cluster) ClusterRemove(ctx context.Context) error {
Expand Down Expand Up @@ -92,7 +93,13 @@ func (c *Cluster) RemoveOldNodes(ctx context.Context) error {
host := &hosts.Host{}
host.HostnameOverride = node.Name
if !hosts.IsNodeInList(host, uniqueHosts) {
if err := k8s.DeleteNode(kubeClient, node.Name, c.CloudProvider.Name); err != nil {
nodeAddress := ""
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeInternalIP {
nodeAddress = addr.Address
}
}
if err := k8s.DeleteNode(kubeClient, node.Name, nodeAddress, c.CloudProvider.Name); err != nil {
log.Warnf(ctx, "Failed to delete old node [%s] from kubernetes")
}
}
Expand Down
23 changes: 23 additions & 0 deletions cluster/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,13 @@ func validateNetworkOptions(c *Cluster) error {
if c.Network.Plugin == FlannelNetworkPlugin && c.Network.MTU != 0 {
return fmt.Errorf("Network plugin [%s] does not support configuring MTU", FlannelNetworkPlugin)
}

if c.Network.Plugin == WeaveNetworkPlugin {
if err := warnWeaveDeprecation(c.Version); err != nil {
return fmt.Errorf("Error while printing Weave deprecation message: %w", err)
}
}

dualStack := false
serviceClusterRanges := strings.Split(c.Services.KubeAPI.ServiceClusterIPRange, ",")
if len(serviceClusterRanges) > 1 {
Expand Down Expand Up @@ -731,3 +738,19 @@ func getClusterVersion(version string) (semver.Version, error) {
}
return parsedVersion, nil
}

// warnWeaveDeprecation prints a deprecation warning if version is higher than 1.27
func warnWeaveDeprecation(k8sVersion string) error {
version, err := util.StrToSemVer(k8sVersion)
if err != nil {
return fmt.Errorf("error while parsing cluster version [%s]: %w", k8sVersion, err)
}
version127, err := util.StrToSemVer("v1.27.0")
if err != nil {
return fmt.Errorf("failed to translate v1.27.0 to semver notation: %w", err)
}
if !version.LessThan(*version127) {
logrus.Warn("Weave CNI plugin is deprecated starting with Kubernetes v1.27 and will be removed in Kubernetes v1.30")
}
return nil
}
2 changes: 1 addition & 1 deletion codegen/codegen.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
)

const (
defaultURL = "https://releases.rancher.com/kontainer-driver-metadata/release-v2.7/data.json"
defaultURL = "https://releases.rancher.com/kontainer-driver-metadata/dev-v2.7/data.json"
dataFile = "data/data.json"
)

Expand Down
4 changes: 2 additions & 2 deletions data/bindata.go

Large diffs are not rendered by default.

Loading

0 comments on commit f329e90

Please sign in to comment.