Skip to content

Commit

Permalink
Storagepolicy should select ds based on connected cluster
Browse files Browse the repository at this point in the history
  • Loading branch information
rikatz committed Aug 24, 2023
1 parent de5ee99 commit 32390fa
Showing 1 changed file with 29 additions and 32 deletions.
61 changes: 29 additions & 32 deletions pkg/services/govmomi/vcenter/clone.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/pbm"
pbmTypes "github.com/vmware/govmomi/pbm/types"
"github.com/vmware/govmomi/view"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/utils/pointer"
Expand Down Expand Up @@ -229,27 +228,31 @@ func Clone(ctx *context.VMContext, bootstrapData []byte, format bootstrapv1.Form
return errors.Wrapf(err, "unable to get storageProfileID from name %s for %q", ctx.VSphereVM.Spec.StoragePolicyName, ctx)
}

kind := []string{"Datastore"}
m := view.NewManager(ctx.Session.Client.Client)

v, err := m.CreateContainerView(ctx, ctx.Session.Client.Client.ServiceContent.RootFolder, kind, true)
if err != nil {
return errors.Wrapf(err, "unable to create container view for Datastore for %q", ctx)
}

var content []types.ObjectContent
err = v.Retrieve(ctx, kind, []string{"name"}, &content)
_ = v.Destroy(ctx)
if err != nil {
return errors.Wrapf(err, "unable to retrieve container view for Datastore for %q", ctx)
}

var hubs []pbmTypes.PbmPlacementHub
for _, ds := range content {

// If there's a Datastore configured, it should be the only one for which we check if it matches the requirements of the Storage Policy
if datastoreRef != nil {
hubs = append(hubs, pbmTypes.PbmPlacementHub{
HubType: ds.Obj.Type,
HubId: ds.Obj.Value,
HubType: datastoreRef.Type,
HubId: datastoreRef.Value,
})
} else {
// Otherwise we should get just the Datastores connected to our pool
cluster, err := pool.Owner(ctx)
if err != nil {
return errors.Wrapf(err, "failed to get owning cluster of resourcepool %q to calculate datastore based on storage policy", pool)
}
dsGetter := object.NewComputeResource(ctx.Session.Client.Client, cluster.Reference())
datastores, err := dsGetter.Datastores(ctx)
if err != nil {
return errors.Wrapf(err, "unable to list datastores from owning cluster of requested resourcepool")
}
for _, ds := range datastores {
hubs = append(hubs, pbmTypes.PbmPlacementHub{
HubType: ds.Reference().Type,
HubId: ds.Reference().Value,
})
}
}

var constraints []pbmTypes.BasePbmPlacementRequirement
Expand All @@ -263,25 +266,18 @@ func Clone(ctx *context.VMContext, bootstrapData []byte, format bootstrapv1.Form
return fmt.Errorf("no compatible datastores found for storage policy: %s", ctx.VSphereVM.Spec.StoragePolicyName)
}

if datastoreRef != nil {
ctx.Logger.Info("datastore and storagepolicy defined; searching for datastore in storage policy compatible datastores")
found := false
for _, ds := range result.CompatibleDatastores() {
compatibleRef := types.ManagedObjectReference{Type: ds.HubType, Value: ds.HubId}
if compatibleRef.String() == datastoreRef.String() {
found = true
}
}
if !found {
return fmt.Errorf("couldn't find specified datastore: %s in compatible list of datastores for storage policy", ctx.VSphereVM.Spec.Datastore)
}
} else {
// If datastoreRef is nil here it means that the user didn't specify a Datastore. So we should
// select one of the datastores of the owning cluster of the resource pool that matched the
// requirements of the storage policy.
if datastoreRef == nil {
r := rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec
ds := result.CompatibleDatastores()[r.Intn(len(result.CompatibleDatastores()))]
datastoreRef = &types.ManagedObjectReference{Type: ds.HubType, Value: ds.HubId}
}
}

// if datastoreRef is nil here, means that user didn't specified a datastore NOR a
// storagepolicy, so we should select the default
if datastoreRef == nil {
// if no datastore defined through VM spec or storage policy, use default
datastore, err := ctx.Session.Finder.DefaultDatastore(ctx)
Expand All @@ -294,6 +290,7 @@ func Clone(ctx *context.VMContext, bootstrapData []byte, format bootstrapv1.Form
disks := devices.SelectByType((*types.VirtualDisk)(nil))
isLinkedClone := snapshotRef != nil
spec.Location.Disk = getDiskLocators(disks, *datastoreRef, isLinkedClone)
spec.Location.Datastore = datastoreRef

ctx.Logger.Info("cloning machine", "namespace", ctx.VSphereVM.Namespace, "name", ctx.VSphereVM.Name, "cloneType", ctx.VSphereVM.Status.CloneMode)
task, err := tpl.Clone(ctx, folder, ctx.VSphereVM.Name, spec)
Expand Down

0 comments on commit 32390fa

Please sign in to comment.