diff --git a/cmd/vclusterctl/cmd/platform/destroy.go b/cmd/vclusterctl/cmd/platform/destroy.go new file mode 100644 index 000000000..27eded82d --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/destroy.go @@ -0,0 +1,91 @@ +package platform + +import ( + "context" + "fmt" + + "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/cli/start" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" + "github.com/spf13/cobra" +) + +type DestroyCmd struct { + start.DeleteOptions +} + +func NewDestroyCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + cmd := &DestroyCmd{ + DeleteOptions: start.DeleteOptions{ + Options: start.Options{ + GlobalFlags: globalFlags, + Log: log.GetInstance(), + CommandName: "destroy", + }, + }, + } + + startCmd := &cobra.Command{ + Use: "destroy", + Short: "Destroy a vCluster platform instance", + Long: `######################################################## +############# vcluster platform destroy ################## +######################################################## + +Destroys a vCluster platform instance in your Kubernetes cluster. + +Please make sure you meet the following requirements +before running this command: + +1. Current kube-context has admin access to the cluster +2. Helm v3 must be installed + + +VirtualClusterInstances managed with driver helm will be deleted, but the underlying virtual cluster will not be uninstalled + +######################################################## + `, + Args: cobra.NoArgs, + RunE: func(cobraCmd *cobra.Command, _ []string) error { + return cmd.Run(cobraCmd.Context()) + }, + } + + startCmd.Flags().StringVar(&cmd.Context, "context", "", "The kube context to use for installation") + startCmd.Flags().StringVar(&cmd.Namespace, "namespace", "", "The namespace vCluster platform is installed in") + startCmd.Flags().BoolVar(&cmd.DeleteNamespace, "delete-namespace", true, "Whether to delete the namespace or not") + + return startCmd +} + +func (cmd *DestroyCmd) Run(ctx context.Context) error { + // initialise clients, verify binaries exist, sanity-check context + err := cmd.Options.Prepare() + if err != nil { + return fmt.Errorf("failed to prepare clients: %w", err) + } + + if cmd.Namespace == "" { + namespace, err := clihelper.VClusterPlatformInstallationNamespace(ctx) + if err != nil { + return fmt.Errorf("vCluster platform may not be installed: %w", err) + } + cmd.Log.Infof("found platform installation in namespace %q", namespace) + cmd.Namespace = namespace + } + + found, err := clihelper.IsLoftAlreadyInstalled(ctx, cmd.KubeClient, cmd.Namespace) + if err != nil { + return fmt.Errorf("vCluster platform may not be installed: %w", err) + } + if !found { + return fmt.Errorf("platform not installed in namespace %q", cmd.Namespace) + } + + err = start.Destroy(ctx, cmd.DeleteOptions) + if err != nil { + return fmt.Errorf("failed to destroy platform: %w", err) + } + return nil +} diff --git a/cmd/vclusterctl/cmd/platform/platform.go b/cmd/vclusterctl/cmd/platform/platform.go index ea2d09b2f..b2801b2b7 100644 --- a/cmd/vclusterctl/cmd/platform/platform.go +++ b/cmd/vclusterctl/cmd/platform/platform.go @@ -57,10 +57,12 @@ func NewPlatformCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { } startCmd := NewStartCmd(globalFlags) + destroyCmd := NewDestroyCmd(globalFlags) loginCmd := NewCobraLoginCmd(globalFlags) logoutCmd := NewLogoutCobraCmd(globalFlags) platformCmd.AddCommand(startCmd) + platformCmd.AddCommand(destroyCmd) platformCmd.AddCommand(NewResetCmd(globalFlags)) platformCmd.AddCommand(add.NewAddCmd(globalFlags)) platformCmd.AddCommand(NewAccessKeyCmd(globalFlags)) diff --git a/cmd/vclusterctl/cmd/platform/start.go b/cmd/vclusterctl/cmd/platform/start.go index f748fd8d7..ee4b7d4c6 100644 --- a/cmd/vclusterctl/cmd/platform/start.go +++ b/cmd/vclusterctl/cmd/platform/start.go @@ -21,19 +21,23 @@ import ( ) type StartCmd struct { - start.Options + start.StartOptions } func NewStartCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + name := "start" cmd := &StartCmd{ - Options: start.Options{ - GlobalFlags: globalFlags, - Log: log.GetInstance(), + StartOptions: start.StartOptions{ + Options: start.Options{ + CommandName: name, + GlobalFlags: globalFlags, + Log: log.GetInstance(), + }, }, } startCmd := &cobra.Command{ - Use: "start", + Use: name, Short: "Start a vCluster platform instance and connect via port-forwarding", Long: `######################################################## ############# vcluster platform start ################## @@ -146,7 +150,7 @@ func (cmd *StartCmd) Run(ctx context.Context) error { } } - return start.NewLoftStarter(cmd.Options).Start(ctx) + return start.NewLoftStarter(cmd.StartOptions).Start(ctx) } func (cmd *StartCmd) ensureEmailWithDisclaimer() error { diff --git a/pkg/cli/start/destroy.go b/pkg/cli/start/destroy.go new file mode 100644 index 000000000..ab90b3151 --- /dev/null +++ b/pkg/cli/start/destroy.go @@ -0,0 +1,248 @@ +package start + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" + apiextensionsv1clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" +) + +// define the order of resource deletion +var resourceOrder = []string{ + // instances + "virtualclusterinstances", + "virtualclusters", + "devpodworkspaceinstances", + "spaceinstances", + + // templates + "virtualclustertemplates", + "devpodenvironmenttemplates", + "devpodworkspacetemplates", + "clusterroletemplates", + "spacetemplates", + "apps", + "spaceconstraints", + + // infra + "tasks", + "clusterquotas", + "projects", + "runners", + "clusters", + "clusteraccesses", + "networkpeers", + + // access + "teams", + "users", + "sharedsecrets", + "accesskeys", + "localclusteraccesses", + "localteams", + "localusers", +} + +var legacyResourceOrder = []string{ + "virtualclusters", + "spaceconstraints", + "localclusteraccesses", + "localteams", + "localusers", +} + +// DeleteOptions holds cli options for the delete command +type DeleteOptions struct { + Options + DeleteNamespace bool +} + +func Destroy(ctx context.Context, opts DeleteOptions) error { + // create a dynamic client + dynamicClient, err := dynamic.NewForConfig(opts.RestConfig) + if err != nil { + return err + } + + // create a discovery client + discoveryClient, err := discovery.NewDiscoveryClientForConfig(opts.RestConfig) + if err != nil { + return err + } + + apiextensionclientset, err := apiextensionsv1clientset.NewForConfig(opts.RestConfig) + if err != nil { + return err + } + + // to compare resources advertised by server vs ones explicitly handled by us + clusterResourceSet := sets.New[string]() + handledResourceSet := sets.New(resourceOrder...) + legacyHandledResourceSet := sets.New(legacyResourceOrder...) + + // get all custom resource definitions in storage.loft.sh + resourceList, err := discoveryClient.ServerResourcesForGroupVersion("storage.loft.sh/v1") + if err != nil { + return err + } + + // populate the set + for _, resource := range resourceList.APIResources { + // don't insert subresources + if strings.Contains(resource.Name, "/") { + continue + } + clusterResourceSet.Insert(resource.Name) + } + + unhandledResourceSet := clusterResourceSet.Difference(handledResourceSet) + if unhandledResourceSet.Len() != 0 { + opts.Log.Errorf("some storage.loft.sh resources are unhandled: %v. Try a newer cli version", unhandledResourceSet.UnsortedList()) + return err + } + + for _, resourceName := range resourceOrder { + if !clusterResourceSet.Has(resourceName) { + // only debug output if legacy resource + if legacyHandledResourceSet.Has(resourceName) { + opts.Log.Debugf("legacy resource %q not found in discovery, skipping", resourceName) + } else { + opts.Log.Infof("resource %q not found in discovery, skipping", resourceName) + } + continue + } + // list and delete all resources + err = deleteAllResourcesAndWait(ctx, dynamicClient, opts.Log, "storage.loft.sh", "v1", resourceName) + if err != nil { + return err + } + } + + // helm uninstall and others + err = clihelper.UninstallLoft(ctx, opts.KubeClient, opts.RestConfig, opts.Context, opts.Namespace, opts.Log) + if err != nil { + return err + } + + for _, name := range clihelper.DefaultClusterRoles { + opts.Log.Infof("deleting clusterrole %q", name) + err := opts.KubeClient.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to delete clusterrole: %w", err) + } + } + for _, name := range clihelper.DefaultClusterRoles { + name := name + "-binding" + opts.Log.Infof("deleting clusterrolebinding %q", name) + err := opts.KubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, name+"-binding", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to delete clusterrole: %w", err) + } + } + + err = wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { + list, err := apiextensionclientset.ApiextensionsV1().CustomResourceDefinitions().List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + if len(list.Items) == 0 { + return true, nil + } + for _, object := range list.Items { + crdSuffix := ".storage.loft.sh" + if !strings.HasSuffix(object.Name, crdSuffix) { + continue + } + expectedResourceName := strings.TrimSuffix(object.Name, crdSuffix) + if !handledResourceSet.Has(expectedResourceName) { + opts.Log.Errorf("unhandled CRD: %q", object.Name) + continue + } + if !object.GetDeletionTimestamp().IsZero() { + opts.Log.Infof("deleted CRD still found: %q", object.GetName()) + continue + } + opts.Log.Infof("deleting customresourcedefinition %v", object.GetName()) + err := apiextensionclientset.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, object.Name, metav1.DeleteOptions{}) + if err != nil { + return false, err + } + } + return false, nil + }) + if err != nil { + return fmt.Errorf("failed to delete CRDs: %w", err) + } + + if opts.DeleteNamespace { + opts.Log.Infof("deleting namespace %q", opts.Namespace) + err = wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { + ns, err := opts.KubeClient.CoreV1().Namespaces().Get(ctx, opts.Namespace, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + return true, nil + } else if err != nil { + return false, err + } + + if ns.GetDeletionTimestamp().IsZero() { + err = opts.KubeClient.CoreV1().Namespaces().Delete(ctx, opts.Namespace, metav1.DeleteOptions{}) + if err != nil { + return false, err + } + } + return false, nil + }) + if err != nil { + return err + } + } + + return nil +} + +func deleteAllResourcesAndWait(ctx context.Context, dynamicClient dynamic.Interface, log log.Logger, group, version, resource string) error { + gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: resource} + err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { + log.Debugf("checking all %q", resource) + + resourceClient := dynamicClient.Resource(gvr) + list, err := resourceClient.List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + if len(list.Items) == 0 { + return true, nil + } + for _, object := range list.Items { + if !object.GetDeletionTimestamp().IsZero() { + return false, nil + } + if object.GetNamespace() == "" { + log.Infof("deleting %v %v", resource, object.GetName()) + } else { + log.Infof("deleting %v %v/%v", resource, object.GetNamespace(), object.GetName()) + } + err := resourceClient.Namespace(object.GetNamespace()).Delete(ctx, object.GetName(), metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } + } + return false, nil + }) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/cli/start/start.go b/pkg/cli/start/start.go index 17c17bb64..0684d7609 100644 --- a/pkg/cli/start/start.go +++ b/pkg/cli/start/start.go @@ -6,6 +6,7 @@ import ( "os" "os/exec" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" "github.com/loft-sh/api/v4/pkg/product" "github.com/loft-sh/log" "github.com/loft-sh/log/survey" @@ -15,25 +16,43 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubectl/pkg/util/term" ) -// Options holds the cmd flags +var ( + scheme = runtime.NewScheme() + + _ = clientgoscheme.AddToScheme(scheme) + _ = storagev1.AddToScheme(scheme) +) + type Options struct { *flags.GlobalFlags + // for logging + CommandName string + Log log.Logger // Will be filled later - KubeClient kubernetes.Interface - Log log.Logger - RestConfig *rest.Config - Context string + KubeClient kubernetes.Interface + RestConfig *rest.Config + + // cli options common to both start and destroy + Context string + Namespace string +} + +// StartOptions holds the cmd flags +type StartOptions struct { //nolint:revive // linter suggests renaming to options which already exists + Options + // cli options Values string LocalPort string Version string DockerImage string - Namespace string Password string Host string Email string @@ -52,14 +71,14 @@ type Options struct { Docker bool } -func NewLoftStarter(options Options) *LoftStarter { +func NewLoftStarter(options StartOptions) *LoftStarter { return &LoftStarter{ - Options: options, + StartOptions: options, } } type LoftStarter struct { - Options + StartOptions } // Start executes the functionality "loft start" @@ -74,7 +93,7 @@ func (l *LoftStarter) Start(ctx context.Context) error { l.LocalPort = "9898" } - err := l.prepare() + err := l.Prepare() if err != nil { return err } @@ -121,7 +140,8 @@ func (l *LoftStarter) Start(ctx context.Context) error { return l.success(ctx) } -func (l *LoftStarter) prepare() error { +// Prepare initializes clients, verifies the existense of binaries, and ensures we are starting with the right kube context +func (l *Options) Prepare() error { platformClient := platform.NewClientFromConfig(l.LoadedConfig(l.Log)) platformConfig := platformClient.Config().Platform @@ -141,7 +161,7 @@ func (l *LoftStarter) prepare() error { contextToLoad = l.Context } else if platformConfig.LastInstallContext != "" && platformConfig.LastInstallContext != contextToLoad { contextToLoad, err = l.Log.Question(&survey.QuestionOptions{ - Question: product.Replace("Seems like you try to use 'loft start' with a different kubernetes context than before. Please choose which kubernetes context you want to use"), + Question: product.Replace(fmt.Sprintf("Seems like you try to use 'loft %s' with a different kubernetes context than before. Please choose which kubernetes context you want to use", l.CommandName)), DefaultValue: contextToLoad, Options: []string{contextToLoad, platformConfig.LastInstallContext}, }) diff --git a/pkg/platform/clihelper/clihelper.go b/pkg/platform/clihelper/clihelper.go index dcabf61e0..deed52d57 100644 --- a/pkg/platform/clihelper/clihelper.go +++ b/pkg/platform/clihelper/clihelper.go @@ -65,6 +65,12 @@ const timeoutEnvVariable = "LOFT_TIMEOUT" var defaultDeploymentName = "loft" +var DefaultClusterRoles = []string{ + "loft-agent-cluster", + "loft-runnner-cluster", + "loft-vcluster-cluster", +} + func Timeout() time.Duration { if timeout := os.Getenv(timeoutEnvVariable); timeout != "" { if parsedTimeout, err := time.ParseDuration(timeout); err == nil { @@ -444,7 +450,14 @@ func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface } } - _, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + _, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + return false, nil + } else if err != nil { + return false, fmt.Errorf("failed to get namespace %q", namespace) + } + + _, err = kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil