From 8941470b3ea1b7ce7aa57b214ef0a7f11cb9731a Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Tue, 14 May 2024 17:38:03 -0500 Subject: [PATCH] Update Swordfish objects to v1.2.6 bundle release Signed-off-by: Sean McGinnis --- swordfish/consistencygroup.go | 365 +++++++++++ swordfish/datastoragelineofservice.go | 24 + swordfish/featuresregistry.go | 120 ++++ swordfish/fileshare.go | 5 + swordfish/filesystem.go | 12 + swordfish/filesystemmetrics.go | 92 +++ swordfish/ioconnectivitylineofservice.go | 49 ++ swordfish/ioconnectivityloscapabilities.go | 3 + swordfish/ioperformancelineofservice.go | 50 ++ swordfish/ioperformanceloscapabilities.go | 3 + swordfish/lineofservice.go | 90 +++ swordfish/nvmedomain.go | 230 +++++++ swordfish/nvmefirmwareimage.go | 110 ++++ swordfish/storagegroup.go | 43 +- swordfish/storagepool.go | 292 ++++++++- swordfish/storagepoolmetrics.go | 110 ++++ swordfish/storagereplicainfo.go | 19 + swordfish/storageservice.go | 90 ++- swordfish/storageservicemetrics.go | 93 +++ swordfish/volume.go | 672 +++++++++++++++++---- swordfish/volumemetrics.go | 111 ++++ 21 files changed, 2455 insertions(+), 128 deletions(-) create mode 100644 swordfish/consistencygroup.go create mode 100644 swordfish/featuresregistry.go create mode 100644 swordfish/filesystemmetrics.go create mode 100644 swordfish/lineofservice.go create mode 100644 swordfish/nvmedomain.go create mode 100644 swordfish/nvmefirmwareimage.go create mode 100644 swordfish/storagepoolmetrics.go create mode 100644 swordfish/storageservicemetrics.go create mode 100644 swordfish/volumemetrics.go diff --git a/swordfish/consistencygroup.go b/swordfish/consistencygroup.go new file mode 100644 index 00000000..bbc4226d --- /dev/null +++ b/swordfish/consistencygroup.go @@ -0,0 +1,365 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +package swordfish + +import ( + "encoding/json" + "errors" + "reflect" + + "github.com/stmcginnis/gofish/common" +) + +type ApplicationConsistencyMethod string + +const ( + // HostStandbyApplicationConsistencyMethod supports consistency method commonly orchestrated using application-specific code. + HostStandbyApplicationConsistencyMethod ApplicationConsistencyMethod = "HotStandby" + // OtherApplicationConsistencyMethod supports consistency method orchestrated using vendor-specific code. + OtherApplicationConsistencyMethod ApplicationConsistencyMethod = "Other" + // VASAApplicationConsistencyMethod supports VMware consistency requirements, such as for VASA and VVOLs. + VASAApplicationConsistencyMethod ApplicationConsistencyMethod = "VASA" + // VDIApplicationConsistencyMethod supports Microsoft virtual backup device interface (VDI). + VDIApplicationConsistencyMethod ApplicationConsistencyMethod = "VDI" + // VSSApplicationConsistencyMethod supports Microsoft VSS. + VSSApplicationConsistencyMethod ApplicationConsistencyMethod = "VSS" +) + +// ConsistencyGroup A collection of volumes grouped together to ensure write order consistency across all those +// volumes. A management operation on a consistency group, such as configuring replication properties, applies to +// all the volumes within the consistency group. +type ConsistencyGroup struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // ConsistencyMethod shall set the consistency method used by this group. + ConsistencyMethod ApplicationConsistencyMethod + // ConsistencyType shall set the consistency type used by this group. + ConsistencyType ConsistencyType + // Description provides a description of this resource. + Description string + // IsConsistent shall be set to true when the consistency group is in a consistent state. + IsConsistent bool + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // RemoteReplicaTargets shall reference the URIs to the remote target replicas that are sourced by this replica. + // Remote indicates that the replica is managed by a separate Swordfish service instance. + RemoteReplicaTargets []string + // ReplicaInfo shall describe the replication relationship between this storage group and a corresponding source + // storage group. + ReplicaInfo ReplicaInfo + // ReplicaTargets shall reference the target replicas that are sourced by this replica. + ReplicaTargets []string + // ReplicaTargetsCount is the number of replica targets. + ReplicaTargetsCount int `json:"ReplicaTargets@odata.count"` + // Status shall contain the status of the ConsistencyGroup. + Status common.Status + // Volumes is an array of references to volumes managed by this storage group. + volumes []string + // VolumesCount is the number of volumes. + VolumesCount int `json:"Volumes@odata.count"` + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte + + assignReplicaTargetTarget string + createReplicaTargetTarget string + removeReplicaRelationshipTarget string + resumeReplicationTarget string + reverseReplicationRelationshipTarget string + splitReplicationTarget string + suspendReplicationTarget string +} + +// UnmarshalJSON unmarshals a ConsistencyGroup object from the raw JSON. +func (consistencygroup *ConsistencyGroup) UnmarshalJSON(b []byte) error { + type temp ConsistencyGroup + var t struct { + temp + Actions struct { + AssignReplicaTarget common.ActionTarget `json:"#ConsistencyGroup.AssignReplicaTarget"` + CreateReplicaTarget common.ActionTarget `json:"#ConsistencyGroup.CreateReplicaTarget"` + RemoveReplicaRelationship common.ActionTarget `json:"#ConsistencyGroup.RemoveReplicaRelationship"` + ResumeReplication common.ActionTarget `json:"#ConsistencyGroup.ResumeReplication"` + ReverseReplicationRelationship common.ActionTarget `json:"#ConsistencyGroup.ReverseReplicationRelationship"` + SplitReplication common.ActionTarget `json:"#ConsistencyGroup.SplitReplication"` + SuspendReplication common.ActionTarget `json:"#ConsistencyGroup.SuspendReplication"` + } + Volumes common.Links + } + + err := json.Unmarshal(b, &t) + if err != nil { + return err + } + + *consistencygroup = ConsistencyGroup(t.temp) + + // Extract the links to other entities for later + consistencygroup.assignReplicaTargetTarget = t.Actions.AssignReplicaTarget.Target + consistencygroup.createReplicaTargetTarget = t.Actions.CreateReplicaTarget.Target + consistencygroup.removeReplicaRelationshipTarget = t.Actions.RemoveReplicaRelationship.Target + consistencygroup.resumeReplicationTarget = t.Actions.ResumeReplication.Target + consistencygroup.reverseReplicationRelationshipTarget = t.Actions.ReverseReplicationRelationship.Target + consistencygroup.splitReplicationTarget = t.Actions.SplitReplication.Target + consistencygroup.suspendReplicationTarget = t.Actions.SuspendReplication.Target + + consistencygroup.volumes = t.Volumes.ToStrings() + + // This is a read/write object, so we need to save the raw object data for later + consistencygroup.rawData = b + + return nil +} + +// AssignReplicaTarget will establish a replication relationship by assigning an existing consistency group +// to serve as a target replica for an existing source consistency group. +// +// `replicaType` is the type of replica relationship to be created (e.g., Clone, Mirror, Snap). +// `updateMode` is the replica update mode (synchronous vs asynchronous). +// `targetGroupURI` is the Uri to the existing consistency group. +func (consistencygroup *ConsistencyGroup) AssignReplicaTarget(replicaType ReplicaType, updateMode ReplicaUpdateMode, targetGroupURI string) error { + if consistencygroup.assignReplicaTargetTarget == "" { + return errors.New("method not supported by this service") + } + + payload := struct { + ReplicateType string + ReplicaUpdateMode string + TargetConsistencyGroup string + }{ + ReplicateType: string(replicaType), + ReplicaUpdateMode: string(updateMode), + TargetConsistencyGroup: targetGroupURI, + } + + return consistencygroup.Post(consistencygroup.assignReplicaTargetTarget, payload) +} + +// CreateReplicaTarget will create a new consistency group resource to provide expanded data protection +// through a replica relationship with the specified source consistency group. +// +// `groupName` is the name for the target consistency group. +// `replicaType` is the type of replica relationship to be created (e.g., Clone, Mirror, Snap). +// `updateMode` is the replica update mode (synchronous vs asynchronous). +// `targetGroupURI` is the Uri to the existing consistency group. +func (consistencygroup *ConsistencyGroup) CreateReplicaTarget(groupName string, replicaType ReplicaType, updateMode ReplicaUpdateMode, targetGroupURI string) error { + if consistencygroup.createReplicaTargetTarget == "" { + return errors.New("method not supported by this service") + } + + payload := struct { + ConsistencyGroupName string + ReplicateType string + ReplicaUpdateMode string + TargetConsistencyGroup string + }{ + ConsistencyGroupName: groupName, + ReplicateType: string(replicaType), + ReplicaUpdateMode: string(updateMode), + TargetConsistencyGroup: targetGroupURI, + } + + return consistencygroup.Post(consistencygroup.createReplicaTargetTarget, payload) +} + +// RemoveReplicaRelationship will disable data synchronization between a source and target consistency group, +// remove the replication relationship, and optionally delete the target consistency group. +// +// `deleteTarget` indicates whether or not to delete the target consistency group as part of the operation. +// `targetGroupURI` is the Uri to the existing consistency group. +func (consistencygroup *ConsistencyGroup) RemoveReplicaRelationship(deleteTarget bool, targetGroupURI string) error { + if consistencygroup.removeReplicaRelationshipTarget == "" { + return errors.New("method not supported by this service") + } + + payload := struct { + DeleteTargetConsistencyGroup bool + TargetConsistencyGroup string + }{ + DeleteTargetConsistencyGroup: deleteTarget, + TargetConsistencyGroup: targetGroupURI, + } + + return consistencygroup.Post(consistencygroup.removeReplicaRelationshipTarget, payload) +} + +// ResumeReplication will resume the active data synchronization between a source and target +// consistency group, without otherwise altering the replication relationship. +// +// `targetGroupURI` is the Uri to the existing consistency group. +func (consistencygroup *ConsistencyGroup) ResumeReplication(targetGroupURI string) error { + if consistencygroup.resumeReplicationTarget == "" { + return errors.New("method not supported by this service") + } + + payload := struct { + TargetConsistencyGroup string + }{ + TargetConsistencyGroup: targetGroupURI, + } + + return consistencygroup.Post(consistencygroup.resumeReplicationTarget, payload) +} + +// ReverseReplicationRelationship will resume the active data synchronization between a source and target +// consistency group, without otherwise altering the replication relationship. +// +// `targetGroupURI` is the Uri to the existing consistency group. +func (consistencygroup *ConsistencyGroup) ReverseReplicationRelationship(targetGroupURI string) error { + if consistencygroup.reverseReplicationRelationshipTarget == "" { + return errors.New("method not supported by this service") + } + + payload := struct { + TargetConsistencyGroup string + }{ + TargetConsistencyGroup: targetGroupURI, + } + + return consistencygroup.Post(consistencygroup.reverseReplicationRelationshipTarget, payload) +} + +// SplitReplication will split the replication relationship and suspend data synchronization +// between a source and target consistency group. +// +// `targetGroupURI` is the Uri to the existing consistency group. +func (consistencygroup *ConsistencyGroup) SplitReplication(targetGroupURI string) error { + if consistencygroup.splitReplicationTarget == "" { + return errors.New("method not supported by this service") + } + + payload := struct { + TargetConsistencyGroup string + }{ + TargetConsistencyGroup: targetGroupURI, + } + + return consistencygroup.Post(consistencygroup.splitReplicationTarget, payload) +} + +// SuspendReplication will suspend active data synchronization between a source and target +// consistency group, without otherwise altering the replication relationship. +// +// `targetGroupURI` is the Uri to the existing consistency group. +func (consistencygroup *ConsistencyGroup) SuspendReplication(targetGroupURI string) error { + if consistencygroup.suspendReplicationTarget == "" { + return errors.New("method not supported by this service") + } + + payload := struct { + TargetConsistencyGroup string + }{ + TargetConsistencyGroup: targetGroupURI, + } + + return consistencygroup.Post(consistencygroup.suspendReplicationTarget, payload) +} + +// Volumes gets the volumes in this consistency group. +func (consistencygroup *ConsistencyGroup) Volumes() ([]*Volume, error) { + var result []*Volume + + collectionError := common.NewCollectionError() + for _, uri := range consistencygroup.volumes { + sc, err := GetVolume(consistencygroup.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, sc) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// Update commits updates to this object's properties to the running system. +func (consistencygroup *ConsistencyGroup) Update() error { + // Get a representation of the object's original state so we can find what + // to update. + original := new(ConsistencyGroup) + original.UnmarshalJSON(consistencygroup.rawData) + + readWriteFields := []string{ + "ConsistencyMethod", + "ConsistencyType", + "Volumes", + } + + originalElement := reflect.ValueOf(original).Elem() + currentElement := reflect.ValueOf(consistencygroup).Elem() + + return consistencygroup.Entity.Update(originalElement, currentElement, readWriteFields) +} + +// GetConsistencyGroup will get a ConsistencyGroup instance from the service. +func GetConsistencyGroup(c common.Client, uri string) (*ConsistencyGroup, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var consistencygroup ConsistencyGroup + err = json.NewDecoder(resp.Body).Decode(&consistencygroup) + if err != nil { + return nil, err + } + + consistencygroup.SetClient(c) + return &consistencygroup, nil +} + +// ListReferencedConsistencyGroups gets the collection of ConsistencyGroup from +// a provided reference. +func ListReferencedConsistencyGroups(c common.Client, link string) ([]*ConsistencyGroup, error) { + var result []*ConsistencyGroup + if link == "" { + return result, nil + } + + type GetResult struct { + Item *ConsistencyGroup + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + consistencygroup, err := GetConsistencyGroup(c, link) + ch <- GetResult{Item: consistencygroup, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} diff --git a/swordfish/datastoragelineofservice.go b/swordfish/datastoragelineofservice.go index 0c2068d4..9c10d156 100644 --- a/swordfish/datastoragelineofservice.go +++ b/swordfish/datastoragelineofservice.go @@ -6,6 +6,7 @@ package swordfish import ( "encoding/json" + "reflect" "github.com/stmcginnis/gofish/common" ) @@ -47,6 +48,8 @@ type DataStorageLineOfService struct { // 'offline'. The expectation is that the services required to implement // this capability are part of the advertising system. RecoveryTimeObjectives RecoveryAccessScope + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte } // UnmarshalJSON unmarshals a DataStorageLineOfService object from the raw JSON. @@ -68,6 +71,27 @@ func (datastoragelineofservice *DataStorageLineOfService) UnmarshalJSON(b []byte return nil } +// Update commits updates to this object's properties to the running system. +func (datastoragelineofservice *DataStorageLineOfService) Update() error { + // Get a representation of the object's original state so we can find what + // to update. + original := new(DataStorageLineOfService) + original.UnmarshalJSON(datastoragelineofservice.rawData) + + readWriteFields := []string{ + "AccessCapabilities", + "IsSpaceEfficient", + "ProvisioningPolicy", + "RecoverableCapacitySourceCount", + "RecoveryTimeObjectives", + } + + originalElement := reflect.ValueOf(original).Elem() + currentElement := reflect.ValueOf(datastoragelineofservice).Elem() + + return datastoragelineofservice.Entity.Update(originalElement, currentElement, readWriteFields) +} + // GetDataStorageLineOfService will get a DataStorageLineOfService instance from the service. func GetDataStorageLineOfService(c common.Client, uri string) (*DataStorageLineOfService, error) { var dataStorageLineOfService DataStorageLineOfService diff --git a/swordfish/featuresregistry.go b/swordfish/featuresregistry.go new file mode 100644 index 00000000..e711dd75 --- /dev/null +++ b/swordfish/featuresregistry.go @@ -0,0 +1,120 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +package swordfish + +import ( + "encoding/json" + + "github.com/stmcginnis/gofish/common" +) + +// FeaturesRegistry shall be used to represent a Feature registry for a Redfish implementation. +type FeaturesRegistry struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // Description provides a description of this resource. + Description string + // Features shall represent the suffix to be used in the FeatureId and shall be unique within this message + // registry. + Features []SupportedFeature + // Language shall be a string consisting of an RFC 5646 language code. + Language string + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // OwningEntity shall be a string that represents the publisher of this registry. + OwningEntity string + // RegistryPrefix shall be the prefix used in IDs which uniquely identifies all of the Features in this registry as + // belonging to this registry. + RegistryPrefix string + // RegistryVersion shall be the version of this message registry. The format of this string shall be of the format + // majorversion.minorversion.errata. + RegistryVersion string +} + +// GetFeaturesRegistry will get a FeaturesRegistry instance from the service. +func GetFeaturesRegistry(c common.Client, uri string) (*FeaturesRegistry, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var featuresregistry FeaturesRegistry + err = json.NewDecoder(resp.Body).Decode(&featuresregistry) + if err != nil { + return nil, err + } + + featuresregistry.SetClient(c) + return &featuresregistry, nil +} + +// ListReferencedFeaturesRegistrys gets the collection of FeaturesRegistry from +// a provided reference. +func ListReferencedFeaturesRegistrys(c common.Client, link string) ([]*FeaturesRegistry, error) { + var result []*FeaturesRegistry + if link == "" { + return result, nil + } + + type GetResult struct { + Item *FeaturesRegistry + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + featuresregistry, err := GetFeaturesRegistry(c, link) + ch <- GetResult{Item: featuresregistry, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// FeaturesRegistryProperty shall represent the suffix to be used in the Feature and shall be unique within this +// registry. +type FeaturesRegistryProperty struct { +} + +// SupportedFeature shall name a feature. +type SupportedFeature struct { + // CorrespondingProfileDefinition shall define a profile definition that contains the named profile declaration. + CorrespondingProfileDefinition string + // Description provides a description of this resource. + Description string + // FeatureName shall be the unique name of the feature prefixed by the defining organization separated by a period + // (e.g. 'vendor.feature'). + FeatureName string + // Version shall uniquely identify the version of the feature, using the major.minor.errata format. + Version string +} diff --git a/swordfish/fileshare.go b/swordfish/fileshare.go index fb60e913..937343bf 100644 --- a/swordfish/fileshare.go +++ b/swordfish/fileshare.go @@ -85,6 +85,10 @@ type FileShare struct { // {[(SUM(AllocatedBytes) - SUM(ConsumedBytes)]/SUM(AllocatedBytes)}*100 // represented as an integer value. RemainingCapacityPercent int + // ReplicationEnabled shall indicate whether or not replication is enabled + // on the file share. This property shall be consistent with the state + // reflected at the storage pool level. + ReplicationEnabled bool // RootAccess shall indicate whether Root // access is allowed by the file share. The default value for this // property is false. @@ -148,6 +152,7 @@ func (fileshare *FileShare) Update() error { "FileShareQuotaType", "FileShareTotalQuotaBytes", "LowSpaceWarningThresholdPercents", + "ReplicationEnabled", } originalElement := reflect.ValueOf(original).Elem() diff --git a/swordfish/filesystem.go b/swordfish/filesystem.go index 242dd199..930ca5d3 100644 --- a/swordfish/filesystem.go +++ b/swordfish/filesystem.go @@ -150,6 +150,7 @@ type FileSystem struct { // MaxFileNameLengthBytes shall specify the maximum length of a file name // within the file system. MaxFileNameLengthBytes int64 + metrics string // RecoverableCapacitySourceCount is the number of available capacity source // resources currently available in the event that an equivalent capacity // source resource fails. @@ -205,6 +206,7 @@ func (filesystem *FileSystem) UnmarshalJSON(b []byte) error { ExportedShares common.Link ReplicaTargets common.Links Links links + Metrics common.Link } err := json.Unmarshal(b, &t) @@ -228,6 +230,15 @@ func (filesystem *FileSystem) UnmarshalJSON(b []byte) error { return nil } +// Metrics gets the filesystem metrics. +func (filesystem *FileSystem) Metrics() (*FileSystemMetrics, error) { + if filesystem.metrics == "" { + return nil, nil + } + + return GetFileSystemMetrics(filesystem.GetClient(), filesystem.metrics) +} + // Update commits updates to this object's properties to the running system. func (filesystem *FileSystem) Update() error { // Get a representation of the object's original state so we can find what @@ -249,6 +260,7 @@ func (filesystem *FileSystem) Update() error { "LowSpaceWarningThresholdPercents", "MaxFileNameLengthBytes", "RecoverableCapacitySourceCount", + "ReplicationEnabled", } originalElement := reflect.ValueOf(original).Elem() diff --git a/swordfish/filesystemmetrics.go b/swordfish/filesystemmetrics.go new file mode 100644 index 00000000..976824dd --- /dev/null +++ b/swordfish/filesystemmetrics.go @@ -0,0 +1,92 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +//nolint:dupl +package swordfish + +import ( + "encoding/json" + + "github.com/stmcginnis/gofish/common" +) + +// FileSystemMetrics shall contain the usage and health statistics for a file system in a Redfish implementation. +type FileSystemMetrics struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // Description provides a description of this resource. + Description string + // IOStatistics shall represent IO statistics for this file system. + IOStatistics IOStatistics + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` +} + +// GetFileSystemMetrics will get a FileSystemMetrics instance from the service. +func GetFileSystemMetrics(c common.Client, uri string) (*FileSystemMetrics, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var filesystemmetrics FileSystemMetrics + err = json.NewDecoder(resp.Body).Decode(&filesystemmetrics) + if err != nil { + return nil, err + } + + filesystemmetrics.SetClient(c) + return &filesystemmetrics, nil +} + +// ListReferencedFileSystemMetricss gets the collection of FileSystemMetrics from +// a provided reference. +func ListReferencedFileSystemMetricss(c common.Client, link string) ([]*FileSystemMetrics, error) { + var result []*FileSystemMetrics + if link == "" { + return result, nil + } + + type GetResult struct { + Item *FileSystemMetrics + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + filesystemmetrics, err := GetFileSystemMetrics(c, link) + ch <- GetResult{Item: filesystemmetrics, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} diff --git a/swordfish/ioconnectivitylineofservice.go b/swordfish/ioconnectivitylineofservice.go index 0660e0d4..8faaaeac 100644 --- a/swordfish/ioconnectivitylineofservice.go +++ b/swordfish/ioconnectivitylineofservice.go @@ -5,6 +5,9 @@ package swordfish import ( + "encoding/json" + "reflect" + "github.com/stmcginnis/gofish/common" ) @@ -31,6 +34,52 @@ type IOConnectivityLineOfService struct { // MaxIOPS shall be the maximum IOs per second that the connection shall // allow for the selected access protocol. MaxIOPS int + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte +} + +// UnmarshalJSON unmarshals a IOConnectivityLineOfService object from the raw JSON. +func (ioconnectivitylineofservice *IOConnectivityLineOfService) UnmarshalJSON(b []byte) error { + type temp IOConnectivityLineOfService + var t struct { + temp + } + + err := json.Unmarshal(b, &t) + if err != nil { + return err + } + + *ioconnectivitylineofservice = IOConnectivityLineOfService(t.temp) + + // Extract the links to other entities for later + + // This is a read/write object, so we need to save the raw object data for later + ioconnectivitylineofservice.rawData = b + + return nil +} + +// Update commits updates to this object's properties to the running system. +func (ioconnectivitylineofservice *IOConnectivityLineOfService) Update() error { + // Get a representation of the object's original state so we can find what + // to update. + original := new(IOConnectivityLineOfService) + original.UnmarshalJSON(ioconnectivitylineofservice.rawData) + + readWriteFields := []string{ + "AccessProtocols", + "MaxBytesPerSecond", + "MaxIOPS", + } + + originalElement := reflect.ValueOf(original).Elem() + currentElement := reflect.ValueOf(ioconnectivitylineofservice).Elem() + + return ioconnectivitylineofservice.Entity.Update(originalElement, currentElement, readWriteFields) } // GetIOConnectivityLineOfService will get a IOConnectivityLineOfService instance from the service. diff --git a/swordfish/ioconnectivityloscapabilities.go b/swordfish/ioconnectivityloscapabilities.go index a8181799..6ae68539 100644 --- a/swordfish/ioconnectivityloscapabilities.go +++ b/swordfish/ioconnectivityloscapabilities.go @@ -29,6 +29,9 @@ type IOConnectivityLoSCapabilities struct { MaxSupportedBytesPerSecond int64 // MaxSupportedIOPS shall be the maximum IOPS that a connection can support. MaxSupportedIOPS int + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` // SupportedAccessProtocols is Access protocols supported by this service // option. NOTE: SMB+NFS* requires that SMB and at least one of NFSv3 or // NFXv4 are also selected, (i.e. {'SMB', 'NFSv4', 'SMB+NFS*'}). diff --git a/swordfish/ioperformancelineofservice.go b/swordfish/ioperformancelineofservice.go index 3d99d99a..eca4f564 100644 --- a/swordfish/ioperformancelineofservice.go +++ b/swordfish/ioperformancelineofservice.go @@ -5,6 +5,9 @@ package swordfish import ( + "encoding/json" + "reflect" + "github.com/stmcginnis/gofish/common" ) @@ -39,9 +42,56 @@ type IOPerformanceLineOfService struct { // capacity. Cost is a function of this value and the // AverageIOOperationLatencyMicroseconds. MaxIOOperationsPerSecondPerTerabyte int + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` // SamplePeriod shall be an ISO 8601 duration specifying the // sampling period over which average values are calculated. SamplePeriod string + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte +} + +// UnmarshalJSON unmarshals a IOPerformanceLineOfService object from the raw JSON. +func (ioperformancelineofservice *IOPerformanceLineOfService) UnmarshalJSON(b []byte) error { + type temp IOPerformanceLineOfService + var t struct { + temp + } + + err := json.Unmarshal(b, &t) + if err != nil { + return err + } + + *ioperformancelineofservice = IOPerformanceLineOfService(t.temp) + + // Extract the links to other entities for later + + // This is a read/write object, so we need to save the raw object data for later + ioperformancelineofservice.rawData = b + + return nil +} + +// Update commits updates to this object's properties to the running system. +func (ioperformancelineofservice *IOPerformanceLineOfService) Update() error { + // Get a representation of the object's original state so we can find what + // to update. + original := new(IOPerformanceLineOfService) + original.UnmarshalJSON(ioperformancelineofservice.rawData) + + readWriteFields := []string{ + "AverageIOOperationLatencyMicroseconds", + "IOOperationsPerSecondIsLimited", + "MaxIOOperationsPerSecondPerTerabyte", + "SamplePeriod", + } + + originalElement := reflect.ValueOf(original).Elem() + currentElement := reflect.ValueOf(ioperformancelineofservice).Elem() + + return ioperformancelineofservice.Entity.Update(originalElement, currentElement, readWriteFields) } // GetIOPerformanceLineOfService will get a IOPerformanceLineOfService instance from the service. diff --git a/swordfish/ioperformanceloscapabilities.go b/swordfish/ioperformanceloscapabilities.go index 5c8e6e5a..7ea64b50 100644 --- a/swordfish/ioperformanceloscapabilities.go +++ b/swordfish/ioperformanceloscapabilities.go @@ -59,6 +59,9 @@ type IOPerformanceLoSCapabilities struct { // MinSupportedIoOperationLatencyMicroseconds shall be the minimum supported // average IO latency in microseconds calculated over the SamplePeriod MinSupportedIoOperationLatencyMicroseconds int + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` // SupportedIOWorkloads shall be a collection of supported workloads. SupportedIOWorkloads []IOWorkload // SupportedLinesOfService shall be a collection of supported IO performance diff --git a/swordfish/lineofservice.go b/swordfish/lineofservice.go new file mode 100644 index 00000000..d1a7a9b3 --- /dev/null +++ b/swordfish/lineofservice.go @@ -0,0 +1,90 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +package swordfish + +import ( + "encoding/json" + + "github.com/stmcginnis/gofish/common" +) + +// LineOfService This service option is the abstract base class for other ClassOfService and concrete lines of +// service. +type LineOfService struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // Description provides a description of this resource. + Description string + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` +} + +// GetLineOfService will get a LineOfService instance from the service. +func GetLineOfService(c common.Client, uri string) (*LineOfService, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var lineofservice LineOfService + err = json.NewDecoder(resp.Body).Decode(&lineofservice) + if err != nil { + return nil, err + } + + lineofservice.SetClient(c) + return &lineofservice, nil +} + +// ListReferencedLineOfServices gets the collection of LineOfService from +// a provided reference. +func ListReferencedLineOfServices(c common.Client, link string) ([]*LineOfService, error) { + var result []*LineOfService + if link == "" { + return result, nil + } + + type GetResult struct { + Item *LineOfService + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + lineofservice, err := GetLineOfService(c, link) + ch <- GetResult{Item: lineofservice, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} diff --git a/swordfish/nvmedomain.go b/swordfish/nvmedomain.go new file mode 100644 index 00000000..1fdede39 --- /dev/null +++ b/swordfish/nvmedomain.go @@ -0,0 +1,230 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +package swordfish + +import ( + "encoding/json" + "reflect" + + "github.com/stmcginnis/gofish/common" +) + +// DomainContents shall contain properties that define the contents of the domain. +type DomainContents struct { + // Controllers contains the current controllers that are part of this domain. These can be IO, Admin, or discovery + // controllers. + controllers []string + // ControllersCount is the number of controllers + ControllersCount int `json:"Controllers@odata.count"` + // Namespaces contains the current namespaces that are part of this domain. These can be IO, Admin, or discovery + // controllers. + Namespaces []Volume + // NamespacesCount is the number of namespaces. + NamespacesCount int `json:"Namespaces@odata.count"` +} + +// UnmarshalJSON unmarshals a DomainContents object from the raw JSON. +func (domaincontents *DomainContents) UnmarshalJSON(b []byte) error { + type temp DomainContents + var t struct { + temp + Controllers common.Links + } + + err := json.Unmarshal(b, &t) + if err != nil { + return err + } + + *domaincontents = DomainContents(t.temp) + + // Extract the links to other entities for later + domaincontents.controllers = t.Controllers.ToStrings() + + return nil +} + +// NVMeDomain Properties for the Domain. +type NVMeDomain struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // ANAGroupID shall contain the ANA group id which applies to all namespaces within the domain. This corresponds to + // the value in the ANAGroupID field in volume. + ANAGroupID float64 + // AvailableFirmwareImages is a collection of available firmware images. + AvailableFirmwareImages []NVMeFirmwareImage + // AvailableFirmwareImagesCount is the number of available firmware images. + AvailableFirmwareImagesCount int `json:"AvailableFirmwareImages@odata.count"` + // Description provides a description of this resource. + Description string + // DomainContents shall contain the members of the domain. + DomainContents DomainContents + // DomainMembers are the members of the domain. + domainMembers []string + // DomainMembers@odata.count + DomainMembersCount int `json:"DomainMembers@odata.count"` + // FirmwareImages shall contain an array of pointers to available firmware images. + firmwareImages []string + // FirmwareImagesCount is the number of firmware images. + FirmwareImagesCount int `json:"FirmwareImages@odata.count"` + // MaxNamespacesSupportedPerController shall contain the maximum number of namespace attachments supported in this + // NVMe Domain. If there are no limits imposed, this property should not be implemented. + MaxNamespacesSupportedPerController float64 + // MaximumCapacityPerEnduranceGroupBytes shall contain the maximum capacity per endurance group in bytes of this + // NVMe Domain. + MaximumCapacityPerEnduranceGroupBytes int + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // Status shall contain any status or health properties of the resource. + Status common.Status + // TotalDomainCapacityBytes shall contain the total capacity in bytes of this NVMe Domain. + TotalDomainCapacityBytes int64 + // UnallocatedDomainCapacityBytes shall contain the total unallocated capacity in bytes of this NVMe Domain. + UnallocatedDomainCapacityBytes int64 + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte + + associatedDomains []string + // AssociatedDomainsCount is the number of associated domains. + AssociatedDomainsCount int +} + +// UnmarshalJSON unmarshals a NVMeDomain object from the raw JSON. +func (nvmedomain *NVMeDomain) UnmarshalJSON(b []byte) error { + type temp NVMeDomain + var t struct { + temp + Links struct { + AssociatedDomains common.Links + AssociatedDomainsCount int `json:"AssociatedDomains@odata.count"` + } + DomainMembers common.Links + FirmwareImages common.Links + } + + err := json.Unmarshal(b, &t) + if err != nil { + return err + } + + *nvmedomain = NVMeDomain(t.temp) + + // Extract the links to other entities for later + nvmedomain.domainMembers = t.DomainMembers.ToStrings() + nvmedomain.firmwareImages = t.FirmwareImages.ToStrings() + nvmedomain.associatedDomains = t.Links.AssociatedDomains.ToStrings() + nvmedomain.AssociatedDomainsCount = t.Links.AssociatedDomainsCount + + // This is a read/write object, so we need to save the raw object data for later + nvmedomain.rawData = b + + return nil +} + +// AssociatedDomains gets the NVMeDomains associated with this domain. +func (nvmedomain *NVMeDomain) AssociatedDomains() ([]*NVMeDomain, error) { + var result []*NVMeDomain + + collectionError := common.NewCollectionError() + for _, uri := range nvmedomain.associatedDomains { + sc, err := GetNVMeDomain(nvmedomain.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, sc) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// Update commits updates to this object's properties to the running system. +func (nvmedomain *NVMeDomain) Update() error { + // Get a representation of the object's original state so we can find what + // to update. + original := new(NVMeDomain) + original.UnmarshalJSON(nvmedomain.rawData) + + readWriteFields := []string{ + "DomainMembers", + } + + originalElement := reflect.ValueOf(original).Elem() + currentElement := reflect.ValueOf(nvmedomain).Elem() + + return nvmedomain.Entity.Update(originalElement, currentElement, readWriteFields) +} + +// GetNVMeDomain will get a NVMeDomain instance from the service. +func GetNVMeDomain(c common.Client, uri string) (*NVMeDomain, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var nvmedomain NVMeDomain + err = json.NewDecoder(resp.Body).Decode(&nvmedomain) + if err != nil { + return nil, err + } + + nvmedomain.SetClient(c) + return &nvmedomain, nil +} + +// ListReferencedNVMeDomains gets the collection of NVMeDomain from +// a provided reference. +func ListReferencedNVMeDomains(c common.Client, link string) ([]*NVMeDomain, error) { + var result []*NVMeDomain + if link == "" { + return result, nil + } + + type GetResult struct { + Item *NVMeDomain + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + nvmedomain, err := GetNVMeDomain(c, link) + ch <- GetResult{Item: nvmedomain, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} diff --git a/swordfish/nvmefirmwareimage.go b/swordfish/nvmefirmwareimage.go new file mode 100644 index 00000000..17b4f481 --- /dev/null +++ b/swordfish/nvmefirmwareimage.go @@ -0,0 +1,110 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +package swordfish + +import ( + "encoding/json" + + "github.com/stmcginnis/gofish/common" +) + +// NVMeDeviceType is the type of NVMe device. +type NVMeDeviceType string + +const ( + // DriveNVMeDeviceType specifies a device type of Drive, indicating a NVMe device that presents as an NVMe SSD device. + DriveNVMeDeviceType NVMeDeviceType = "Drive" + // FabricAttachArrayNVMeDeviceType specifies an NVMe device type of FabricAttachArray, + // indicating a NVMe device that presents an NVMe front-end that abstracts the back end + // storage, typically with multiple options for availability and protection. + FabricAttachArrayNVMeDeviceType NVMeDeviceType = "FabricAttachArray" + // JBOFNVMeDeviceType specifies a device type of JBOF, indicating a NVMe device that + // presents as an NVMe smart enclosure for NVMe devices, typically NVMe Drives. + JBOFNVMeDeviceType NVMeDeviceType = "JBOF" +) + +// NVMeFirmwareImage NVMe Domain firmware image information. +type NVMeFirmwareImage struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // Description provides a description of this resource. + Description string + // FirmwareVersion shall contain the firmware version of the available NVMe firmware image. + FirmwareVersion string + // NVMeDeviceType shall specify the type of NVMe device for this NVMe firmware image. + NVMeDeviceType NVMeDeviceType + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // Vendor shall include the name of the manufacturer or vendor associate with this NVMe firmware image. + Vendor string +} + +// GetNVMeFirmwareImage will get a NVMeFirmwareImage instance from the service. +func GetNVMeFirmwareImage(c common.Client, uri string) (*NVMeFirmwareImage, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var nvmefirmwareimage NVMeFirmwareImage + err = json.NewDecoder(resp.Body).Decode(&nvmefirmwareimage) + if err != nil { + return nil, err + } + + nvmefirmwareimage.SetClient(c) + return &nvmefirmwareimage, nil +} + +// ListReferencedNVMeFirmwareImages gets the collection of NVMeFirmwareImage from +// a provided reference. +func ListReferencedNVMeFirmwareImages(c common.Client, link string) ([]*NVMeFirmwareImage, error) { + var result []*NVMeFirmwareImage + if link == "" { + return result, nil + } + + type GetResult struct { + Item *NVMeFirmwareImage + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + nvmefirmwareimage, err := GetNVMeFirmwareImage(c, link) + ch <- GetResult{Item: nvmefirmwareimage, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} diff --git a/swordfish/storagegroup.go b/swordfish/storagegroup.go index 59106aaf..7f7fba68 100644 --- a/swordfish/storagegroup.go +++ b/swordfish/storagegroup.go @@ -32,21 +32,31 @@ const ( // CHAPInformation is used for CHAP auth. type CHAPInformation struct { - // InitiatorCHAPPassword shall be the - // shared secret for CHAP authentication. + // CHAPPassword shall be the password when CHAP authentication is specified. + CHAPPassword string + // CHAPUser shall be the username when CHAP authentication is specified. + CHAPUser string + // InitiatorCHAPPassword shall be the shared secret for Mutual (2-way) CHAP + // authentication. InitiatorCHAPPassword string - // InitiatorCHAPUser is If present, this property is the initiator CHAP - // username for authentication. For example, with an iSCSI scenario, use - // the initiator iQN. + // InitiatorCHAPUser If present, this property is the initiator CHAP username + // for Mutual (2-way) authentication. For example, with an iSCSI scenario, + // use the initiator iQN. InitiatorCHAPUser string - // TargetCHAPUser shall be the CHAP - // Username for 2-way CHAP authentication. For example, with an iSCSI - // scenario, use the target iQN. In a FC with DHCHAP, this value will be - // a FC WWN. + // TargetCHAPPassword shall be the CHAP Secret for 2-way CHAP authentication. + TargetCHAPPassword string + // TargetCHAPUser shall be the Target CHAP Username for Mutual (2-way) CHAP + // authentication. For example, with an iSCSI scenario, use the target iQN. TargetCHAPUser string - // TargetPassword shall be the CHAP Secret - // for 2-way CHAP authentication. - TargetPassword string +} + +// DHCHAPInformation User name and password values for target and initiator +// endpoints when CHAP authentication is used. +type DHCHAPInformation struct { + // LocalDHCHAPAuthSecret shall be the local DHCHAP auth secret for DHCHAP authentication. + LocalDHCHAPAuthSecret string + // PeerDHCHAPAuthSecret shall be the peer DHCHAP auth secret for DHCHAP authentication. + PeerDHCHAPAuthSecret string } // StorageGroup is a set of related storage entities (volumes, file systems...) @@ -90,6 +100,7 @@ type StorageGroup struct { MappedVolumes []MappedVolume // MembersAreConsistent shall be set to true if all members are in a // consistent state. The default value for this property is false. + // Deprecated in favor of using the ConsistencyGroup for Consistency set management. MembersAreConsistent bool // ReplicaInfo shall describe the replication relationship between this // storage group and a corresponding source storage group. @@ -110,11 +121,15 @@ type StorageGroup struct { // Status is the status of this group. Status common.Status // VolumesCount is the number of volumes. + // These references are replaced by the MappedVolumes array in StorageGroup. VolumesCount int `json:"Volumes@odata.count"` // VolumesAreExposed shall be set to true if storage volumes are exposed to // the paths defined by the client and server endpoints. The default value // for this property is false. VolumesAreExposed bool + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte + // ChildStorageGroups is an array of references to StorageGroups are // incorporated into this StorageGroup childStorageGroups []string @@ -128,12 +143,11 @@ type StorageGroup struct { parentStorageGroups []string // ParentStorageGroupsCount is the number of parent storage groups. ParentStorageGroupsCount int + // exposeVolumesTarget is the URL to for the ExposeVolumes action. exposeVolumesTarget string // hideVolumesTarget is the URL to for the HideVolumes action. hideVolumesTarget string - // rawData holds the original serialized JSON so we can compare updates. - rawData []byte } // UnmarshalJSON unmarshals a StorageGroup object from the raw JSON. @@ -169,6 +183,7 @@ func (storagegroup *StorageGroup) UnmarshalJSON(b []byte) error { storagegroup.classOfService = t.Links.ClassOfService.String() storagegroup.parentStorageGroups = t.Links.ParentStorageGroups.ToStrings() storagegroup.ParentStorageGroupsCount = t.Links.ParentStorageGroupsCount + storagegroup.exposeVolumesTarget = t.Actions.ExposeVolumes.Target storagegroup.hideVolumesTarget = t.Actions.HideVolumes.Target diff --git a/swordfish/storagepool.go b/swordfish/storagepool.go index 5081690a..e67c96cb 100644 --- a/swordfish/storagepool.go +++ b/swordfish/storagepool.go @@ -6,12 +6,113 @@ package swordfish import ( "encoding/json" + "errors" "reflect" "github.com/stmcginnis/gofish/common" "github.com/stmcginnis/gofish/redfish" ) +// EndGrpLifetime This contains properties for the Endurance Group Lifetime attributes. +type EndGrpLifetime struct { + // DataUnitsRead shall contain the total number of data units read from this endurance group. This value does not + // include controller reads due to internal operations such as garbage collection. The value is reported in + // billions, where a value of 1 corresponds to 1 billion bytes written, and is rounded up. A value of zero + // indicates the property is unsupported. + DataUnitsRead int + // DataUnitsWritten shall contain the total number of data units written from this endurance group. This value does + // not include controller writes due to internal operations such as garbage collection. The value is reported in + // billions, where a value of 1 corresponds to 1 billion bytes written, and is rounded up. A value of zero + // indicates the property is unsupported. + DataUnitsWritten int + // EnduranceEstimate shall contain an estimate of the total number of data bytes that may be written to the + // Endurance Group over the lifetime of the Endurance Group assuming a write amplication of 1. The value is + // reported in billions, where a value of 1 corresponds to 1 billion bytes written, and is rounded up. A value of + // zero indicates endurance estimates are unsupported. + EnduranceEstimate int + // ErrorInformationLogEntryCount shall contain the number of error information log entries over the life of the + // controller for the endurance group. + ErrorInformationLogEntryCount int + // HostReadCommandCount shall contain the number of read commands completed by all controllers in the NVM subsystem + // for the Endurance Group. For the NVM command set, the is the number of compare commands and read commands. + HostReadCommandCount int + // HostWriteCommandCount shall contain the number of write commands completed by all controllers in the NVM + // subsystem for the Endurance Group. For the NVM command set, the is the number of compare commands and write + // commands. + HostWriteCommandCount int + // MediaAndDataIntegrityErrorCount shall contain the number of occurrences where the controller detected an + // unrecovered data integrity error for the Endurance Group. Errors such as uncorrectable ECC, CRC checksum + // failure, or LBA tag mismatch are included in this field. + MediaAndDataIntegrityErrorCount int + // MediaUnitsWritten shall contain the total number of data units written from this endurance group. This value + // includes host and controller writes due to internal operations such as garbage collection. The value is reported + // in billions, where a value of 1 corresponds to 1 billion bytes written, and is rounded up. A value of zero + // indicates the property is unsupported. + MediaUnitsWritten int + // PercentUsed shall contain a vendor-specific estimate of the percent life used for the endurance group based on + // the actual usage and the manufacturer prediction of NVM life. A value of 100 indicates that the estimated + // endurance of the NVM in the Endurance Group has been consumed, but may not indicate an NVM failure. According to + // the NVMe and JEDEC specs, the value is allowed to exceed 100. Percentages greater than 254 shall be represented + // as 255. + PercentUsed int +} + +// NVMeEnduranceGroupProperties contains properties to use when StoragePool is used to describe an NVMe +// Endurance Group. +type NVMeEnduranceGroupProperties struct { + // EndGrpLifetime shall contain any Endurance Group Lifetime properties. + EndGrpLifetime EndGrpLifetime + // PredictedMediaLifeLeftPercent shall contain an indicator of the percentage of life remaining in the drive's + // media. + PredictedMediaLifeLeftPercent float64 +} + +type NVMePoolType string + +const ( + // EnduranceGroupNVMePoolType is of type EnduranceGroup, used by NVMe devices. + EnduranceGroupNVMePoolType NVMePoolType = "EnduranceGroup" + // NVMSetNVMePoolType is of type NVMSet, used by NVMe devices. + NVMSetNVMePoolType NVMePoolType = "NVMSet" +) + +// NVMeProperties contains properties to use when StoragePool is used to describe an NVMe construct. +type NVMeProperties struct { + // NVMePoolType shall indicate whether the StoragePool is used as an EnduranceGroup or an NVMSet. + NVMePoolType NVMePoolType +} + +// NVMeSetProperties contains properties to use when StoragePool is used to describe an NVMe Set. +type NVMeSetProperties struct { + // EnduranceGroupIdentifier shall contain a 16-bit hex value that contains the endurance group identifier. The + // endurance group identifier is unique within a subsystem. Reserved values include 0. + EnduranceGroupIdentifier string + // OptimalWriteSizeBytes shall contain the Optimal Write Size in Bytes for this NVMe Set. + OptimalWriteSizeBytes int + // Random4kReadTypicalNanoSeconds shall contain the typical time to complete a 4k read in 100 nano-second units + // when the NVM Set is in a Predictable Latency Mode Deterministic Window and there is 1 outstanding command per + // NVM Set. + Random4kReadTypicalNanoSeconds int + // SetIdentifier shall contain a 16-bit hex value that contains the NVMe Set group identifier. The NVM Set + // identifier is unique within a subsystem. Reserved values include 0. + SetIdentifier string + // UnallocatedNVMNamespaceCapacityBytes shall contain the unallocated capacity of the NVMe Set in bytes. + UnallocatedNVMNamespaceCapacityBytes int +} + +type PoolType string + +const ( + // BlockPoolType is of type block. + BlockPoolType PoolType = "Block" + // FilePoolType is of type file. + FilePoolType PoolType = "File" + // ObjectPoolType is of type object. + ObjectPoolType PoolType = "Object" + // PoolPoolType is of type pool, indicating a hierarchy. + PoolPoolType PoolType = "Pool" +) + // StoragePool is a container of data storage capable of providing // capacity conforming to one of its supported classes of service. The // storage pool does not support IO to its data storage. @@ -29,8 +130,8 @@ type StoragePool struct { // reference to the collection of volumes allocated from this storage // pool. allocatedVolumes string - // Capacity shall provide an information - // about the actual utilization of the capacity within this storage pool. + // Capacity shall provide information about the actual utilization of the + // capacity within this storage pool. Capacity Capacity // CapacitySources is fully or partially consumed storage from a source // resource. Each entry shall provide capacity allocation data from a @@ -44,16 +145,38 @@ type StoragePool struct { classesOfService string // Compressed shall contain a boolean indicator if the StoragePool is // currently utilizing compression or not. + // This property has been deprecated in favor of the IsCompressed and + // DefaultCompressionBehavior properties. Compressed bool + // CompressionEnabled shall indicate whether or not compression is enabled on the storage pool. + CompressionEnabled bool // Deduplicted shall contain a boolean indicator if the StoragePool is // currently utilizing deduplication or not. + // This property has been deprecated in favor of the IsDeduplicated and + // DefaultDedupeBehavior properties. Deduplicated bool + // DeduplicationEnabled shall indicate whether or not deduplication is enabled on the storage pool. + DeduplicationEnabled bool // DefaultClassOfService is used. defaultClassOfService string + // DefaultCompressionBehavior shall indicate the default dedupe behavior applied to the child resource (E.g., + // volume or storage pool) created out of the storage pool if the 'Compressed' property is not set on the create + // request. + DefaultCompressionBehavior bool + // DefaultDeduplicationBehavior shall indicate the default deduplication behavior applied to the child resource + // (E.g., volume or storage pool) created out of the storage pool if the 'Deduplicated' property is not set on the + // create request. + DefaultDeduplicationBehavior bool + // DefaultEncryptionBehavior shall indicate the default dedupe behavior applied to the child resource (E.g., volume + // or storage pool) created out of the storage pool if the 'Encrypted' property is not set on the create request. + DefaultEncryptionBehavior bool // Description provides a description of this resource. Description string + // EncryptionEnabled shall indicate whether or not encryption is enabled on the storage pool. + EncryptionEnabled bool // Encrypted shall contain a boolean indicator if the // StoragePool is currently utilizing encryption or not. + // This property has been deprecated in favor of the IsEncrypted and DefaultEncryptionBehavior properties. Encrypted bool // IOStatistics is the value shall represent IO statistics for this // StoragePool. @@ -71,6 +194,19 @@ type StoragePool struct { // concept is not valid (for example, with Memory), this property shall // be NULL. MaxBlockSizeBytes int64 + // Metrics shall contain a link to a resource of type StoragePoolMetrics that specifies the metrics for this + // storage pool. IO metrics are reported in the IOStatistics property. + metrics string + // NVMeEnduranceGroupProperties shall contain properties to use when StoragePool is used to describe an NVMe + // Endurance Group. + NVMeEnduranceGroupProperties NVMeEnduranceGroupProperties + // NVMeProperties shall indicate the type of storage pool. + NVMeProperties NVMeProperties + // NVMeSetProperties shall contain properties to use when StoragePool is used to describe an NVMe Set. + NVMeSetProperties NVMeSetProperties + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` // RecoverableCapacitySourceCount is the value of the number of available // capacity source resources currently available in the event that an // equivalent capacity source resource fails. @@ -79,8 +215,20 @@ type StoragePool struct { // {[(SUM(AllocatedBytes) - SUM(ConsumedBytes)]/SUM(AllocatedBytes)}*100 // represented as an integer value. RemainingCapacityPercent int + // ReplicationEnabled shall indicate whether or not replication is enabled on the storage pool. If enabled for + // pool, replication can still be disabled on individual resources (e.g., volumes) within the pool. + ReplicationEnabled bool // Status is the storage pool status. Status common.Status + // SupportedPoolTypes shall contain all the PoolType values supported by the storage pool. + SupportedPoolTypes []PoolType + // SupportedProvisioningPolicies shall specify all supported storage allocation policies for the Storage Pool. + SupportedProvisioningPolicies []ProvisioningPolicy + // SupportedRAIDTypes shall contain all the RAIDType values supported by the storage pool. + SupportedRAIDTypes []RAIDType + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte + // DedicatedSpareDrives shall be a reference to the resources that this // StoragePool is associated with and shall reference resources of type // Drive. This property shall only contain references to Drive entities @@ -89,13 +237,19 @@ type StoragePool struct { dedicatedSpareDrives []string // DedicatedSpareDrivesCount is the number of drives. DedicatedSpareDrivesCount int + // OwningStorageResource shall be a pointer to the Storage resource that owns or contains this StoragePool. + owningStorageResource string // SpareResourceSets shall contain resources that may be utilized to replace // the capacity provided by a failed resource having a compatible type. spareResourceSets []string // SpareResourceSetsCount is the number of spare resource sets. SpareResourceSetsCount int - // rawData holds the original serialized JSON so we can compare updates. - rawData []byte + + addDrivesTarget string + removeDrivesTarget string + setCompressionStateTarget string + setDeduplicationStateTarget string + setEncryptionStateTarget string } // UnmarshalJSON unmarshals a StoragePool object from the raw JSON. @@ -104,6 +258,7 @@ func (storagepool *StoragePool) UnmarshalJSON(b []byte) error { type links struct { DedicatedSpareDrives common.Links DedicatedSpareDrivesCount int `json:"DedicatedSpareDrives@odata.count"` + OwningStorageResource common.Link SpareResourceSets common.Links SpareResourceSetsCount int `json:"SpareResourceSets@odata.count"` } @@ -115,6 +270,14 @@ func (storagepool *StoragePool) UnmarshalJSON(b []byte) error { CapacitySource common.Links ClassesOfService common.Link DefaultClassOfService common.Link + Metrics common.Link + Actions struct { + AddDrives common.ActionTarget `json:"#StoragePool.AddDrives"` + RemoveDrives common.ActionTarget `json:"#StoragePool.RemoveDrives"` + SetCompressionState common.ActionTarget `json:"#StoragePool.SetCompressionState"` + SetDeduplicationState common.ActionTarget `json:"#StoragePool.SetDeduplicationState"` + SetEncryptionState common.ActionTarget `json:"#StoragePool.SetEncryptionState"` + } } err := json.Unmarshal(b, &t) @@ -127,13 +290,22 @@ func (storagepool *StoragePool) UnmarshalJSON(b []byte) error { // Extract the links to other entities for later storagepool.dedicatedSpareDrives = t.Links.DedicatedSpareDrives.ToStrings() storagepool.DedicatedSpareDrivesCount = t.Links.DedicatedSpareDrivesCount + storagepool.owningStorageResource = t.Links.OwningStorageResource.String() storagepool.spareResourceSets = t.Links.SpareResourceSets.ToStrings() storagepool.SpareResourceSetsCount = t.Links.SpareResourceSetsCount + storagepool.allocatedPools = t.AllocatedPools.String() storagepool.allocatedVolumes = t.AllocatedVolumes.String() storagepool.capacitySources = t.CapacitySource.ToStrings() storagepool.classesOfService = t.ClassesOfService.String() storagepool.defaultClassOfService = t.DefaultClassOfService.String() + storagepool.metrics = t.Metrics.String() + + storagepool.addDrivesTarget = t.Actions.AddDrives.Target + storagepool.removeDrivesTarget = t.Actions.RemoveDrives.Target + storagepool.setCompressionStateTarget = t.Actions.SetCompressionState.Target + storagepool.setDeduplicationStateTarget = t.Actions.SetDeduplicationState.Target + storagepool.setEncryptionStateTarget = t.Actions.SetEncryptionState.Target // This is a read/write object, so we need to save the raw object data for later storagepool.rawData = b @@ -308,3 +480,115 @@ func (storagepool *StoragePool) DefaultClassOfService() (*ClassOfService, error) } return GetClassOfService(storagepool.GetClient(), storagepool.defaultClassOfService) } + +// OwningStorageResource gets the Storage resource that owns or contains this StoragePool. +func (storagepool *StoragePool) OwningStorageResource() (*redfish.Storage, error) { + if storagepool.owningStorageResource == "" { + return nil, nil + } + + return redfish.GetStorage(storagepool.GetClient(), storagepool.owningStorageResource) +} + +// Metrics gets the metrics for this storage pool. +func (storagepool *StoragePool) Metrics() (*StoragePoolMetrics, error) { + if storagepool.metrics == "" { + return nil, nil + } + return GetStoragePoolMetrics(storagepool.GetClient(), storagepool.metrics) +} + +// AddDrives will add an additional drive, or set of drives, to a capacity source for the storage pool. +// +// `capacitySource` is the target capacity source for the drive(s). This property does not need to be +// specified if the storage pool only contains one capacity source, or if the implementation is +// capable of automatically selecting the appropriate capacity source. +// `drives` is the existing drive or drives to be added to a capacity source of the storage pool. The +// implementation may impose restrictions on the number of drives added simultaneously. +func (storagepool *StoragePool) AddDrives(capacitySource *CapacitySource, drives []*redfish.Drive) error { + if storagepool.addDrivesTarget == "" { + return errors.New("action not supported by this service") + } + + payload := struct { + CapacitySource string + Drives []string + }{} + + if capacitySource != nil { + payload.CapacitySource = capacitySource.ODataID + } + + for _, drive := range drives { + payload.Drives = append(payload.Drives, drive.ODataID) + } + + return storagepool.Post(storagepool.addDrivesTarget, payload) +} + +// RemoveDrives will remove drive(s) from the capacity source for the StoragePool. +// +// `drives` is the drive or drives to be removed from the underlying capacity source. +func (storagepool *StoragePool) RemoveDrives(drives []*redfish.Drive) error { + if storagepool.removeDrivesTarget == "" { + return errors.New("action not supported by this service") + } + + payload := struct { + Drives []string + }{} + + for _, drive := range drives { + payload.Drives = append(payload.Drives, drive.ODataID) + } + + return storagepool.Post(storagepool.removeDrivesTarget, payload) +} + +// SetCompressionState will set the compression state of the storage pool. +// This may be both a highly impactful, as well as a long running operation. +// +// `enable` indicates the desired compression state of the storage pool. +func (storagepool *StoragePool) SetCompressionState(enable bool) error { + if storagepool.setCompressionStateTarget == "" { + return errors.New("action not supported by this service") + } + + payload := struct { + Enable bool + }{Enable: enable} + + return storagepool.Post(storagepool.setCompressionStateTarget, payload) +} + +// SetDeduplicationState will set the dedupe state of the storage pool. +// This may be both a highly impactful, as well as a long running operation. +// +// `enable` indicates the desired deduplication state of the storage pool. +func (storagepool *StoragePool) SetDeduplicationState(enable bool) error { + if storagepool.setCompressionStateTarget == "" { + return errors.New("action not supported by this service") + } + + payload := struct { + Enable bool + }{Enable: enable} + + return storagepool.Post(storagepool.setCompressionStateTarget, payload) +} + +// SetEncryptionState set the encryption state of the storage pool. +// This may be both a highly impactful, as well as a long running operation. +// +// `enable` indicates the desired encryption state of the storage pool. +func (storagepool *StoragePool) SetEncryptionState(enable bool) error { + if storagepool.setEncryptionStateTarget == "" { + return errors.New("action not supported by this service") + } + + payload := struct { + Enable bool + }{Enable: enable} + + return storagepool.Post(storagepool.setEncryptionStateTarget, payload) +} diff --git a/swordfish/storagepoolmetrics.go b/swordfish/storagepoolmetrics.go new file mode 100644 index 00000000..3a5deabb --- /dev/null +++ b/swordfish/storagepoolmetrics.go @@ -0,0 +1,110 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +package swordfish + +import ( + "encoding/json" + + "github.com/stmcginnis/gofish/common" +) + +// StoragePoolMetrics shall contain the usage and health statistics for a storage pool in a Redfish implementation. +type StoragePoolMetrics struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // ConsistencyCheckErrorCount shall contain the number of consistency check errors over the lifetime of the storage + // pool. + ConsistencyCheckErrorCount int + // CorrectableIOReadErrorCount shall contain the number of the correctable read errors for the lifetime of the + // storage pool. + CorrectableIOReadErrorCount int + // CorrectableIOWriteErrorCount shall contain the number of the correctable write errors for the lifetime of the + // storage pool. + CorrectableIOWriteErrorCount int + // Description provides a description of this resource. + Description string + // IOStatistics shall represent IO statistics for this storage pool. + IOStatistics IOStatistics + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // RebuildErrorCount shall contain the number of rebuild errors over the lifetime of the storage pool. + RebuildErrorCount int + // StateChangeCount shall contain the number of state changes (changes in Status.State) for this storage pool. + StateChangeCount int + // UncorrectableIOReadErrorCount shall contain the number of the uncorrectable read errors for the lifetime of the + // storage pool. + UncorrectableIOReadErrorCount int + // UncorrectableIOWriteErrorCount shall contain the number of the uncorrectable write errors for the lifetime of + // the storage pool. + UncorrectableIOWriteErrorCount int +} + +// GetStoragePoolMetrics will get a StoragePoolMetrics instance from the service. +func GetStoragePoolMetrics(c common.Client, uri string) (*StoragePoolMetrics, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var storagepoolmetrics StoragePoolMetrics + err = json.NewDecoder(resp.Body).Decode(&storagepoolmetrics) + if err != nil { + return nil, err + } + + storagepoolmetrics.SetClient(c) + return &storagepoolmetrics, nil +} + +// ListReferencedStoragePoolMetricss gets the collection of StoragePoolMetrics from +// a provided reference. +func ListReferencedStoragePoolMetricss(c common.Client, link string) ([]*StoragePoolMetrics, error) { + var result []*StoragePoolMetrics + if link == "" { + return result, nil + } + + type GetResult struct { + Item *StoragePoolMetrics + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + storagepoolmetrics, err := GetStoragePoolMetrics(c, link) + ch <- GetResult{Item: storagepoolmetrics, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} diff --git a/swordfish/storagereplicainfo.go b/swordfish/storagereplicainfo.go index 6d756cbd..7a5b54b7 100644 --- a/swordfish/storagereplicainfo.go +++ b/swordfish/storagereplicainfo.go @@ -51,6 +51,15 @@ const ( SequentiallyConsistentConsistencyType ConsistencyType = "SequentiallyConsistent" ) +type ReplicaFaultDomain string + +const ( + // LocalReplicaFaultDomain indicates that the source and target replicas are contained within a single fault domain. + LocalReplicaFaultDomain ReplicaFaultDomain = "Local" + // RemoteReplicaFaultDomain indicates that the source and target replicas are in separate fault domains. + RemoteReplicaFaultDomain ReplicaFaultDomain = "Remote" +) + // ReplicaPriority is used to specify the priority of background copy // engine I/O relative to host I/O operations during a sequential // background copy operation. @@ -335,8 +344,12 @@ type ReplicaInfo struct { // PercentSynced shall be an average of the PercentSynced across all // members of the group. PercentSynced int + // RemoteSourceReplica shall describe the fault domain (local or remote) of the replica relationship. + RemoteSourceReplica string // Replica shall reference the resource that is the source of this replica. replica string + // ReplicaFaultDomain shall describe the fault domain (local or remote) of the replica relationship. + ReplicaFaultDomain ReplicaFaultDomain // ReplicaPriority shall specify the priority // of background copy engine I/O to be managed relative to host I/O // operations during a sequential background copy operation. @@ -370,6 +383,9 @@ type ReplicaInfo struct { // represented by ReplicaState. When RequestedState reaches the requested // state, this property shall be null. RequestedReplicaState ReplicaState + // SourceReplica shall contain the URI to the source replica when located on a different Swordfish service + // instance. + SourceReplica string // SyncMaintained is If true, Synchronization shall be maintained. The // default value for this property is false. SyncMaintained bool @@ -441,6 +457,9 @@ type StorageReplicaInfo struct { ODataType string `json:"@odata.type"` // Description provides a description of this resource. Description string + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` } // GetStorageReplicaInfo will get a StorageReplicaInfo instance from the service. diff --git a/swordfish/storageservice.go b/swordfish/storageservice.go index 9215e1c5..023df1fb 100644 --- a/swordfish/storageservice.go +++ b/swordfish/storageservice.go @@ -6,6 +6,7 @@ package swordfish import ( "encoding/json" + "reflect" "github.com/stmcginnis/gofish/common" "github.com/stmcginnis/gofish/redfish" @@ -62,9 +63,6 @@ type StorageService struct { // fileSystems is an array of references to FileSystems managed by this // storage service. fileSystems string - // hostingSystem shall reference the ComputerSystem or - // StorageController that hosts this service. - hostingSystem string // ioConnectivityLoSCapabilities shall reference the IO connectivity // capabilities of this service. ioConnectivityLoSCapabilities string @@ -73,6 +71,16 @@ type StorageService struct { ioPerformanceLoSCapabilities string // IOStatistics shall represent IO statistics for this StorageService. IOStatistics IOStatistics + // LinesOfService shall reference a LineOfService collection defined for this service. + linesOfService []string + // LinesOfServiceCount is the number of lines of service. + LinesOfServiceCount int `json:"LinesOfService@odata.count"` + // Metrics shall contain a link to a resource of type StorageServiceMetrics that specifies the metrics for this + // storage service. IO metrics are reported in the IOStatistics property. + metrics string + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` // spareResourceSets shall contain resources that may be utilized to // replace the capacity provided by a failed resource having a compatible type. spareResourceSets []string @@ -90,8 +98,15 @@ type StorageService struct { // Volumes is an array of references to Volumes managed by this storage // service. volumes string + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte + // setEncryptionKeyTarget is the URL to send SetEncryptionKey requests. setEncryptionKeyTarget string + + // hostingSystem shall reference the ComputerSystem or + // StorageController that hosts this service. + hostingSystem string } // UnmarshalJSON unmarshals a StorageService object from the raw JSON. @@ -119,6 +134,8 @@ func (storageservice *StorageService) UnmarshalJSON(b []byte) error { IOConnectivityLoSCapabilities common.Link IOPerformanceLoSCapabilities common.Link Redundancy common.Links + LinesOfService common.Links + Metrics common.Link SpareResourceSets common.Links StorageGroups common.Link StoragePools common.Link @@ -146,18 +163,54 @@ func (storageservice *StorageService) UnmarshalJSON(b []byte) error { storageservice.fileSystems = t.FileSystems.String() storageservice.ioConnectivityLoSCapabilities = t.IOConnectivityLoSCapabilities.String() storageservice.ioPerformanceLoSCapabilities = t.IOPerformanceLoSCapabilities.String() + storageservice.metrics = t.Metrics.String() storageservice.redundancy = t.Redundancy.ToStrings() + storageservice.linesOfService = t.LinesOfService.ToStrings() storageservice.spareResourceSets = t.SpareResourceSets.ToStrings() storageservice.storageGroups = t.StorageGroups.String() storageservice.storagePools = t.StoragePools.String() storageservice.storageSubsystems = t.StorageSubsystems.String() - storageservice.hostingSystem = t.Links.HostingSystem.String() storageservice.volumes = t.Volumes.String() + storageservice.setEncryptionKeyTarget = t.Actions.SetEncryptionKey.Target + storageservice.hostingSystem = t.Links.HostingSystem.String() + + // This is a read/write object, so we need to save the raw object data for later + storageservice.rawData = b + return nil } +// Update commits updates to this object's properties to the running system. +func (storageservice *StorageService) Update() error { + // Get a representation of the object's original state so we can find what + // to update. + original := new(StorageService) + original.UnmarshalJSON(storageservice.rawData) + + readWriteFields := []string{ + "ClassesOfService", + "ConsistencyGroups", + "DataProtectionLoSCapabilities", + "DataSecurityLoSCapabilities", + "DataStorageLoSCapabilities", + "DefaultClassOfService", + "EndpointGroups", + "FileSystems", + "IOConnectivityLoSCapabilities", + "IOPerformanceLoSCapabilities", + "LinesOfService", + "SpareResourceSets", + "Volumes", + } + + originalElement := reflect.ValueOf(original).Elem() + currentElement := reflect.ValueOf(storageservice).Elem() + + return storageservice.Entity.Update(originalElement, currentElement, readWriteFields) +} + // GetStorageService will get a StorageService instance from the service. func GetStorageService(c common.Client, uri string) (*StorageService, error) { var storageService StorageService @@ -305,6 +358,27 @@ func (storageservice *StorageService) Redundancy() ([]*redfish.Redundancy, error return result, collectionError } +// LinesOfService gets lines of service for this service. +func (storageservice *StorageService) LinesOfService() ([]*LineOfService, error) { + var result []*LineOfService + + collectionError := common.NewCollectionError() + for _, uri := range storageservice.linesOfService { + item, err := GetLineOfService(storageservice.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + // SpareResourceSets gets resources that may be utilized to replace the capacity // provided by a failed resource having a compatible type. func (storageservice *StorageService) SpareResourceSets() ([]*SpareResourceSet, error) { @@ -361,3 +435,11 @@ func (storageservice *StorageService) SetEncryptionKey(key string) error { return storageservice.Post(storageservice.setEncryptionKeyTarget, t) } + +// Metrics gets the metrics for this storage pool. +func (storageservice *StorageService) Metrics() (*StorageServiceMetrics, error) { + if storageservice.metrics == "" { + return nil, nil + } + return GetStorageServiceMetrics(storageservice.GetClient(), storageservice.metrics) +} diff --git a/swordfish/storageservicemetrics.go b/swordfish/storageservicemetrics.go new file mode 100644 index 00000000..fa7b4fa9 --- /dev/null +++ b/swordfish/storageservicemetrics.go @@ -0,0 +1,93 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +//nolint:dupl +package swordfish + +import ( + "encoding/json" + + "github.com/stmcginnis/gofish/common" +) + +// StorageServiceMetrics shall contain the usage and health statistics for a storage service in a Redfish +// implementation. +type StorageServiceMetrics struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // Description provides a description of this resource. + Description string + // IOStatistics shall represent IO statistics for this storage service. + IOStatistics IOStatistics + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` +} + +// GetStorageServiceMetrics will get a StorageServiceMetrics instance from the service. +func GetStorageServiceMetrics(c common.Client, uri string) (*StorageServiceMetrics, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var storageservicemetrics StorageServiceMetrics + err = json.NewDecoder(resp.Body).Decode(&storageservicemetrics) + if err != nil { + return nil, err + } + + storageservicemetrics.SetClient(c) + return &storageservicemetrics, nil +} + +// ListReferencedStorageServiceMetricss gets the collection of StorageServiceMetrics from +// a provided reference. +func ListReferencedStorageServiceMetricss(c common.Client, link string) ([]*StorageServiceMetrics, error) { + var result []*StorageServiceMetrics + if link == "" { + return result, nil + } + + type GetResult struct { + Item *StorageServiceMetrics + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + storageservicemetrics, err := GetStorageServiceMetrics(c, link) + ch <- GetResult{Item: storageservicemetrics, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} diff --git a/swordfish/volume.go b/swordfish/volume.go index 7744db15..e567523e 100644 --- a/swordfish/volume.go +++ b/swordfish/volume.go @@ -13,6 +13,74 @@ import ( "github.com/stmcginnis/gofish/redfish" ) +type ALUA struct { + // ANAGroupID shall contain the ANA group id for this volume. + ANAGroupID int64 +} + +type LBAFormat struct { + // LBADataSizeBytes shall be the LBA data size reported in bytes. + LBADataSizeBytes int + // LBAFormatType shall be the LBA format type. This property is intended for capabilities instrumentation. + LBAFormatType LBAFormatType + // LBAMetadataSizeBytes shall be the LBA metadata size reported in bytes. + LBAMetadataSizeBytes int + // RelativePerformance shall be the LBA Relative Performance type. This field indicates the relative performance of + // the LBA format indicated relative to other LBA formats supported by the controller. This property is intended + // for capabilities instrumentation. + RelativePerformance LBARelativePerformanceType +} + +type LBAFormatType string + +const ( + // LBAFormat0LBAFormatType indicates the LBA data size supported. + LBAFormat0LBAFormatType LBAFormatType = "LBAFormat0" + // LBAFormat1LBAFormatType indicates the LBA data size supported. + LBAFormat1LBAFormatType LBAFormatType = "LBAFormat1" + // LBAFormat10LBAFormatType indicates the LBA data size supported. + LBAFormat10LBAFormatType LBAFormatType = "LBAFormat10" + // LBAFormat11LBAFormatType indicates the LBA data size supported. + LBAFormat11LBAFormatType LBAFormatType = "LBAFormat11" + // LBAFormat12LBAFormatType indicates the LBA data size supported. + LBAFormat12LBAFormatType LBAFormatType = "LBAFormat12" + // LBAFormat13LBAFormatType indicates the LBA data size supported. + LBAFormat13LBAFormatType LBAFormatType = "LBAFormat13" + // LBAFormat14LBAFormatType indicates the LBA data size supported. + LBAFormat14LBAFormatType LBAFormatType = "LBAFormat14" + // LBAFormat15LBAFormatType indicates the LBA data size supported. + LBAFormat15LBAFormatType LBAFormatType = "LBAFormat15" + // LBAFormat2LBAFormatType indicates the LBA data size supported. + LBAFormat2LBAFormatType LBAFormatType = "LBAFormat2" + // LBAFormat3LBAFormatType indicates the LBA data size supported. + LBAFormat3LBAFormatType LBAFormatType = "LBAFormat3" + // LBAFormat4LBAFormatType indicates the LBA data size supported. + LBAFormat4LBAFormatType LBAFormatType = "LBAFormat4" + // LBAFormat5LBAFormatType indicates the LBA data size supported. + LBAFormat5LBAFormatType LBAFormatType = "LBAFormat5" + // LBAFormat6LBAFormatType indicates the LBA data size supported. + LBAFormat6LBAFormatType LBAFormatType = "LBAFormat6" + // LBAFormat7LBAFormatType indicates the LBA data size supported. + LBAFormat7LBAFormatType LBAFormatType = "LBAFormat7" + // LBAFormat8LBAFormatType indicates the LBA data size supported. + LBAFormat8LBAFormatType LBAFormatType = "LBAFormat8" + // LBAFormat9LBAFormatType indicates the LBA data size supported. + LBAFormat9LBAFormatType LBAFormatType = "LBAFormat9" +) + +type LBARelativePerformanceType string + +const ( + // BestLBARelativePerformanceType indicates the best performance. + BestLBARelativePerformanceType LBARelativePerformanceType = "Best" + // BetterLBARelativePerformanceType indicates the bestbetter performance. + BetterLBARelativePerformanceType LBARelativePerformanceType = "Better" + // DegradedLBARelativePerformanceType indicates degraded performance. + DegradedLBARelativePerformanceType LBARelativePerformanceType = "Degraded" + // GoodLBARelativePerformanceType indicates good performance. + GoodLBARelativePerformanceType LBARelativePerformanceType = "Good" +) + // InitializeType is type InitializeType string @@ -148,6 +216,17 @@ const ( OffReadCachePolicyType ReadCachePolicyType = "Off" ) +type InitializeMethod string + +const ( + // BackgroundInitializeMethod means the volume will be available for use immediately, with data erasure and preparation to happen as background tasks. + BackgroundInitializeMethod InitializeMethod = "Background" + // ForegroundInitializeMethod means data erasure and preparation tasks will complete before the volume is presented as available for use. + ForegroundInitializeMethod InitializeMethod = "Foreground" + // SkipInitializeMethod means the volume will be available for use immediately, with no preparation. + SkipInitializeMethod InitializeMethod = "Skip" +) + // VolumeUsageType is the type of volume usage. type VolumeUsageType string @@ -234,6 +313,126 @@ const ( OEMWriteHoleProtectionPolicyType WriteHoleProtectionPolicyType = "Oem" ) +type NamespaceType string + +const ( + // BlockNamespaceType indicates the namespace is configured for use with a block storage interface. + BlockNamespaceType NamespaceType = "Block" + // ComputationalNamespaceType indicates the namespace is configured for use with a computational storage interface. + ComputationalNamespaceType NamespaceType = "Computational" + // KeyValueNamespaceType indicates the namespace is configured for use with a KeyValue interface. + KeyValueNamespaceType NamespaceType = "KeyValue" + // ZNSNamespaceType indicates the namespace is configured for use with a zoned storage interface. + ZNSNamespaceType NamespaceType = "ZNS" +) + +// NVMeNamespaceProperties This contains properties to use when Volume is used to describe an NVMe Namespace. +type NVMeNamespaceProperties struct { + // FormattedLBASize shall contain the LBA data size and metadata size combination that the namespace has been + // formatted with. This is a 4-bit data structure. + FormattedLBASize string + // IsShareable shall indicate whether the namespace is shareable. + IsShareable bool + // LBAFormat shall describe the current LBA format ID and corresponding detailed properties, such as the LBA data + // size and metadata size. Use the LBAFormats property to describe namespace capabilities in a collection + // capabilities annotation. + LBAFormat LBAFormat + // LBAFormats shall describe the LBA format IDs and corresponding detailed properties, such as the LBA data size + // and metadata size. This property is intended for use in a collection capabilities annotation. Use the LBAFormat + // property on an instance of a namespace. + LBAFormats []LBAFormat + // LBAFormatsSupported shall be a list of the LBA formats supported for the namespace, or potential namespaces. + LBAFormatsSupported []LBAFormatType + // MetadataTransferredAtEndOfDataLBA shall indicate whether or not the metadata is transferred at the end of the + // LBA creating an extended data LBA. + MetadataTransferredAtEndOfDataLBA bool + // NVMeVersion shall contain the version of the NVMe Base Specification supported. + NVMeVersion string + // NamespaceFeatures shall contain a set of Namespace Features. + NamespaceFeatures NamespaceFeatures + // NamespaceID shall contain the NVMe Namespace Identifier for this namespace. This property shall be a hex value. + // Namespace identifiers are not durable and do not have meaning outside the scope of the NVMe subsystem. NSID 0x0, + // 0xFFFFFFFF, 0xFFFFFFFE are special purpose values. + NamespaceID string + // NamespaceType shall identify the type of namespace. + NamespaceType NamespaceType + // NumberLBAFormats shall contain the number of LBA data size and metadata size combinations supported by this + // namespace. The value of this property is between 0 and 16. LBA formats with an index set beyond this value will + // not be supported. + NumberLBAFormats int + // SupportsIOPerformanceHints shall indicate whether the namespace supports IO performance hints. + SupportsIOPerformanceHints bool + // SupportsMultipleNamespaceAttachments shall indicate whether the namespace may be attached to two or more + // controllers. + SupportsMultipleNamespaceAttachments bool + // Type shall identify the type of namespace. + Type NamespaceType +} + +// NamespaceFeatures +type NamespaceFeatures struct { + // SupportsAtomicTransactionSize shall indicate whether or not the NVM fields for Namespace preferred write + // granularity (NPWG), write alignment (NPWA), deallocate granularity (NPDG), deallocate alignment (NPDA) and + // optimal write size (NOWS) are defined for this namespace and should be used by the host for I/O optimization. + SupportsAtomicTransactionSize bool + // SupportsDeallocatedOrUnwrittenLBError shall indicate that the controller supports deallocated or unwritten + // logical block error for this namespace. + SupportsDeallocatedOrUnwrittenLBError bool + // SupportsIOPerformanceHints shall indicate that the Namespace Atomic Write Unit Normal (NAWUN), Namespace Atomic + // Write Unit Power Fail (NAWUPF), and Namespace Atomic Compare and Write Unit (NACWU) fields are defined for this + // namespace and should be used by the host for this namespace instead of the controller-level properties AWUN, + // AWUPF, and ACWU. + SupportsIOPerformanceHints bool + // SupportsNGUIDReuse shall indicate that the namespace supports the use of an NGUID (namespace globally unique + // identifier) value. + SupportsNGUIDReuse bool + // SupportsThinProvisioning shall indicate whether or not the NVMe Namespace supports thin provisioning. + // Specifically, the namespace capacity reported may be less than the namespace size. + SupportsThinProvisioning bool +} + +type Operation struct { + // AssociatedFeaturesRegistry A reference to the task associated with the operation if any. + AssociatedFeaturesRegistry string + // Operation shall contain the type of the operation. + Operation OperationType + // PercentageComplete The percentage of the operation that has been completed. + PercentageComplete int +} + +type OperationType string + +const ( + // ChangeRAIDTypeOperationType indicates a ChangeRAIDType operation is being performed. + ChangeRAIDTypeOperationType OperationType = "ChangeRAIDType" + // ChangeStripSizeOperationType indicates a ChangeStripSize operation is being performed. + ChangeStripSizeOperationType OperationType = "ChangeStripSize" + // CheckConsistencyOperationType indicates a CheckConsistency operation is being performed. + CheckConsistencyOperationType OperationType = "CheckConsistency" + // CompressOperationType indicates a Compress operation is being performed. + CompressOperationType OperationType = "Compress" + // DecryptOperationType indicates a Decrypt operation is being performed. + DecryptOperationType OperationType = "Decrypt" + // DeduplicateOperationType indicates a Deduplicate operation is being performed. + DeduplicateOperationType OperationType = "Deduplicate" + // DeleteOperationType indicates a Delete operation is being performed. + DeleteOperationType OperationType = "Delete" + // EncryptOperationType indicates a Encrypt operation is being performed. + EncryptOperationType OperationType = "Encrypt" + // FormatOperationType indicates an Format operation is being performed. + FormatOperationType OperationType = "Format" + // InitializeOperationType indicates a Initialize operation is being performed. + InitializeOperationType OperationType = "Initialize" + // RebuildOperationType indicates a Rebuild operation is being performed. + RebuildOperationType OperationType = "Rebuild" + // ReplicateOperationType indicates a Replicate operation is being performed. + ReplicateOperationType OperationType = "Replicate" + // ResizeOperationType indicates a Resize operation is being performed. + ResizeOperationType OperationType = "Resize" + // ChangeRAIDTypeOperationType indicates a Sanitize operation is being performed. + SanitizeOperationType OperationType = "Sanitize" +) + // Volume is used to represent a volume, virtual disk, logical disk, LUN, // or other logical storage for a Redfish implementation. type Volume struct { @@ -243,8 +442,13 @@ type Volume struct { ODataContext string `json:"@odata.context"` // ODataType is the odata type. ODataType string `json:"@odata.type"` + // ALUA shall identify the ALUA properties for this volume. + ALUA ALUA // AccessCapabilities shall specify a current storage access capability. AccessCapabilities []StorageAccessCapability + // allocatedPools shall contain references to all storage pools allocated + // from this volume. + allocatedPools []string // BlockSizeBytes shall contain size of the smallest // addressable unit of the associated volume. BlockSizeBytes int @@ -253,35 +457,46 @@ type Volume struct { Capacity Capacity // CapacityBytes shall contain the size in bytes of the // associated volume. - CapacityBytes int - // CapacitySources is Fully or partially consumed storage from a source + CapacityBytes int64 + // CapacitySources is fully or partially consumed storage from a source // resource. Each entry provides capacity allocation information from a // named source resource. - CapacitySources []CapacitySource - // CapacitySources@odata.count is + capacitySources []string + // CapacitySourcesCount is the number of capacity sources. CapacitySourcesCount int `json:"CapacitySources@odata.count"` // Compressed shall contain a boolean indicator if the Volume is currently // utilizing compression or not. Compressed bool + // Connections shall contain references to all Connections that include this volume. + connections []string + // ConnectionsCount is the number of connections. + ConnectionsCount int `json:"Connections@odata.count"` // Deduplicated shall contain a boolean indicator if the Volume is currently // utilizing deduplication or not. Deduplicated bool // Description provides a description of this resource. Description string + // DisplayName shall contain a user-configurable string to name the volume. + DisplayName string // Encrypted shall contain a boolean indicator if the // Volume is currently utilizing encryption or not. Encrypted bool - // EncryptionTypes is used by this Volume. + // EncryptionTypes is the type of encryption used by this Volume. EncryptionTypes []redfish.EncryptionTypes - // IOStatistics shall represent IO statistics for this volume. - // IOStatistics IOStatistics + // IOPerfModeEnabled shall indicate whether IO performance mode is enabled for the volume. + IOPerfModeEnabled bool // Identifiers shall contain a list of all known durable // names for the associated volume. Identifiers []common.Identifier - // Links is The Links property, as described by the Redfish - // Specification, shall contain references to resources that are related - // to, but not contained by (subordinate to), this resource. - Links string + // InitializeMethod shall indicate the initialization method used for this volume. If InitializeMethod is not + // specified, the InitializeMethod should be Foreground. This value reflects the most recently used Initialization + // Method, and may be changed using the Initialize Action. + InitializeMethod InitializeMethod + // IsBootCapable shall indicate whether or not the Volume contains a boot image and is capable of booting. This + // property may be settable by an admin or client with visibility into the contents of the volume. This property + // should only be set to true when VolumeUsage is either not specified, or when VolumeUsage is set to Data or + // SystemData. + IsBootCapable bool // LogicalUnitNumber shall contain host-visible LogicalUnitNumber assigned // to this Volume. This property shall only be used when in a single connect // configuration and no StorageGroup configuration is used. @@ -301,10 +516,18 @@ type Volume struct { // MediaSpanCount shall indicate the number of media elements used per span // in the secondary RAID for a hierarchical RAID type. MediaSpanCount int + // Metrics shall contain a link to a resource of type VolumeMetrics that specifies the metrics for this volume. IO + // metrics are reported in the IOStatistics property. + metrics string // Model is The value is assigned by the manufacturer and shall // represents a specific storage volume implementation. Model string - // Operations shall contain a list of all currently + // NVMeNamespaceProperties shall contain properties to use when Volume is used to describe an NVMe Namespace. + NVMeNamespaceProperties NVMeNamespaceProperties + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // Operations shall contain a list of all operations currently // running on the Volume. Operations []common.Operations // OptimumIOSizeBytes shall contain the optimum IO size @@ -328,15 +551,20 @@ type Volume struct { // {[(SUM(AllocatedBytes) - SUM(ConsumedBytes)]/SUM(AllocatedBytes)}*100 // represented as an integer value. RemainingCapacityPercent int - // ReplicaInfo shall describe the replica relationship - // between this storage volume and a corresponding source volume. - // ReplicaInfo redfish.ReplicaInfo - // ReplicaTargets shall reference the target replicas that - // are sourced by this replica. - ReplicaTargets []string - // ReplicaTargets@odata.count is + // RemoteReplicaTargets shall reference the URIs to the remote target replicas that are sourced by this replica. + // Remote indicates that the replica is managed by a separate Swordfish service instance. + RemoteReplicaTargets []string + // ReplicaInfo shall describe the replica relationship between this storage volume and a corresponding source + // volume. + ReplicaInfo ReplicaInfo + // ReplicaTargets shall reference the target replicas that are sourced by this replica. + replicaTargets []string + // ReplicaTargetsCount is the number of replica targets. ReplicaTargetsCount int `json:"ReplicaTargets@odata.count"` - // Status is + // ReplicationEnabled shall indicate whether or not replication is enabled on the volume. This property shall be + // consistent with the state reflected at the storage pool level. + ReplicationEnabled bool + // Status shall contain the status of the Volume. Status common.Status // StripSizeBytes is the number of consecutively addressed virtual disk // blocks (bytes) mapped to consecutively addressed blocks on a single @@ -355,38 +583,45 @@ type Volume struct { // the write hole issue on the RAID volume. If no policy is enabled at the // moment, this property shall be set to 'Off'. WriteHoleProtectionPolicy WriteHoleProtectionPolicyType + // rawData holds the original serialized JSON so we can compare updates. + rawData []byte + + cacheDataVolumes []string + // CacheDatavolumesCount is the number of cache data volumes. + CacheDataVolumesCount int + cacheVolumeSource string // classOfService shall contain a reference to the // ClassOfService that this storage volume conforms to. - classOfService string + classOfService string + clientEndpoints []string + // ClientEndpointsCount is the number of client endpoints. + ClientEndpointsCount int + consistencyGroups []string + // ConsistencyGroupsCount is the number of consistency groups associated with this volume. + ConsistencyGroupsCount int + controllers []string + // ControllersCount is the number of storage controllers associated with this volume. + ControllersCount int + dedicatedSpareDrives []string // DedicatedSpareDrivesCount is the number of dedicates spare drives DedicatedSpareDrivesCount int + drives []string // DrivesCount is the number of associated drives. - DrivesCount int + DrivesCount int + journalingMedia string + owningStorageResource string + owningStorageService string + providingStoragePool string + serverEndpoints []string + // ServerEndpointsCount is the number of server endpoints this volume is associated with. + ServerEndpointsCount int + spareResourceSets []string // SpareResourceSetsCount is the number of spare resources sets. SpareResourceSetsCount int - // dedicatedSpareDrives shall be a reference to the resources that this - // volume is associated with and shall reference resources of type Drive. - // This property shall only contain references to Drive entities which are - // currently assigned as a dedicated spare and are able to support this Volume. - dedicatedSpareDrives []string - // DisplayName shall contain a user-configurable string to name the volume. - DisplayName string - // drives shall be a reference to the resources that this volume is - // associated with and shall reference resources of type Drive. This - // property shall only contain references to Drive entities which are - // currently members of the Volume, not hot spare Drives which are not - // currently a member of the volume. - drives []string - // SpareResourceSets referenced SpareResourceSet shall contain - // resources that may be utilized to replace the capacity provided by a - // failed resource having a compatible type. - spareResourceSets []string - // allocatedPools shall contain references to all storage pools allocated - // from this volume. - allocatedPools []string - // storageGroups shall contain references to all storage groups that include - // this volume. - storageGroups []string + storageGroups []string + // StorageGroupsCount is the number of storage groups associated with this volume. + StorageGroupsCount int + // assignReplicaTargetTarget is the URL to send AssignReplicaTarget requests. assignReplicaTargetTarget string // checkConsistencyTarget is the URL to send CheckConsistency requests. @@ -406,60 +641,61 @@ type Volume struct { splitReplicationTarget string // suspendReplicationTarget is the URL to send SuspendReplication requests. suspendReplicationTarget string - // rawData holds the original serialized JSON so we can compare updates. - rawData []byte +} + +type volumeLinks struct { + CacheDataVolumes common.Links + CacheDataVolumesCount int `json:"CacheDataVolumes@odata.count"` + CacheVolumeSource common.Link + ClassOfService common.Link + ClientEndpoints common.Links + ClientEndpointsCount int `json:"ClientEndpoints@odata.count"` + ConsistencyGroups common.Links + ConsistencyGroupsCount int `json:"ConsistencyGroups@odata.count"` + Controllers common.Links + ControllersCount int `json:"Controllers@odata.count"` + DedicatedSpareDrives common.Links + DedicatedSpareDrivesCount int `json:"DedicatedSpareDrives@odata.count"` + Drives common.Links + DrivesCount int `json:"Drives@odata.count"` + JournalingMedia common.Link + OwningStorageResource common.Link + OwningStorageService common.Link + ProvidingStoragePool common.Link + ServerEndpoints common.Links + ServerEndpointsCount int `json:"ServerEndpoints@odata.count"` + SpareResourceSets common.Links + SpareResourceSetsCount int `json:"SpareResourceSets@odata.count"` + StorageGroups common.Links + StorageGroupsCount int `json:"StorageGroups@odata.count"` +} + +type volumeActions struct { + AssignReplicaTarget common.ActionTarget `json:"#Volume.AssignReplicaTarget"` + CheckConsistency common.ActionTarget `json:"#Volume.CheckConsistency"` + CreateReplicaTarget common.ActionTarget `json:"#Volume.CreateReplicaTarget"` + Initialize common.ActionTarget `json:"#Volume.Initialize"` + RemoveReplicaRelationship common.ActionTarget `json:"#Volume.RemoveReplicaRelationship"` + ResumeReplication common.ActionTarget `json:"#Volume.ResumeReplication"` + ReverseReplicationRelationship common.ActionTarget `json:"#Volume.ReverseReplicationRelationship"` + SplitReplication common.ActionTarget `json:"#Volume.SplitReplication"` + SuspendReplication common.ActionTarget `json:"#Volume.SuspendReplication"` } // UnmarshalJSON unmarshals a Volume object from the raw JSON. func (volume *Volume) UnmarshalJSON(b []byte) error { type temp Volume - type links struct { - // ClassOfService shall contain a reference to the - // ClassOfService that this storage volume conforms to. - ClassOfService common.Link - // DedicatedSpareDrives shall be a - // reference to the resources that this volume is associated with and - // shall reference resources of type Drive. This property shall only - // contain references to Drive entities which are currently assigned as a - // dedicated spare and are able to support this Volume. - DedicatedSpareDrives common.Links - // DedicatedSpareDrives@odata.count is - DedicatedSpareDrivesCount int `json:"DedicatedSpareDrives@odata.count"` - // Drives shall be a reference to the - // resources that this volume is associated with and shall reference - // resources of type Drive. This property shall only contain references - // to Drive entities which are currently members of the Volume, not hot - // spare Drives which are not currently a member of the volume. - Drives common.Links - // Drives@odata.count is - DrivesCount int `json:"Drives@odata.count"` - // SpareResourceSets is Each referenced SpareResourceSet shall contain - // resources that may be utilized to replace the capacity provided by a - // failed resource having a compatible type. - SpareResourceSets common.Links - // SpareResourceSets@odata.count is - SpareResourceSetsCount int `json:"SpareResourceSets@odata.count"` - } - - type actions struct { - AssignReplicaTarget common.ActionTarget `json:"#Volume.AssignReplicaTarget"` - CheckConsistency common.ActionTarget `json:"#Volume.CheckConsistency"` - CreateReplicaTarget common.ActionTarget `json:"#Volume.CreateReplicaTarget"` - Initialize common.ActionTarget `json:"#Volume.Initialize"` - RemoveReplicaRelationship common.ActionTarget `json:"#Volume.RemoveReplicaRelationship"` - ResumeReplication common.ActionTarget `json:"#Volume.ResumeReplication"` - ReverseReplicationRelationship common.ActionTarget `json:"#Volume.ReverseReplicationRelationship"` - SplitReplication common.ActionTarget `json:"#Volume.SplitReplication"` - SuspendReplication common.ActionTarget `json:"#Volume.SuspendReplication"` - } - var t struct { temp - AllocatedPools common.Links - StorageGroups common.Links - Links links - Actions actions + AllocatedPools common.Links + CapacitySources common.Links + Connections common.Links + Metrics common.Link + ReplicaTargets common.Links + StorageGroups common.Links + Links volumeLinks + Actions volumeActions } err := json.Unmarshal(b, &t) @@ -470,14 +706,40 @@ func (volume *Volume) UnmarshalJSON(b []byte) error { // Extract the links to other entities for later *volume = Volume(t.temp) volume.allocatedPools = t.AllocatedPools.ToStrings() + volume.capacitySources = t.CapacitySources.ToStrings() + volume.connections = t.Connections.ToStrings() + volume.metrics = t.Metrics.String() + volume.replicaTargets = t.ReplicaTargets.ToStrings() volume.storageGroups = t.StorageGroups.ToStrings() + + volume.cacheDataVolumes = t.Links.CacheDataVolumes.ToStrings() + volume.CacheDataVolumesCount = t.Links.CacheDataVolumesCount + volume.cacheVolumeSource = t.Links.CacheVolumeSource.String() volume.classOfService = t.Links.ClassOfService.String() + volume.clientEndpoints = t.Links.ClientEndpoints.ToStrings() + volume.ClientEndpointsCount = t.Links.ClientEndpointsCount + volume.consistencyGroups = t.Links.ConsistencyGroups.ToStrings() + volume.ConsistencyGroupsCount = t.Links.ConsistencyGroupsCount + volume.controllers = t.Links.Controllers.ToStrings() + volume.ControllersCount = t.Links.ControllersCount volume.dedicatedSpareDrives = t.Links.DedicatedSpareDrives.ToStrings() - volume.drives = t.Links.Drives.ToStrings() - volume.spareResourceSets = t.Links.SpareResourceSets.ToStrings() volume.DedicatedSpareDrivesCount = t.Links.DedicatedSpareDrivesCount + volume.drives = t.Links.Drives.ToStrings() volume.DrivesCount = t.Links.DrivesCount + volume.journalingMedia = t.Links.JournalingMedia.String() + volume.owningStorageResource = t.Links.OwningStorageResource.String() + volume.owningStorageService = t.Links.OwningStorageService.String() + volume.providingStoragePool = t.Links.ProvidingStoragePool.String() + volume.serverEndpoints = t.Links.ServerEndpoints.ToStrings() + volume.ServerEndpointsCount = t.Links.ServerEndpointsCount + volume.spareResourceSets = t.Links.SpareResourceSets.ToStrings() volume.SpareResourceSetsCount = t.Links.SpareResourceSetsCount + + if len(volume.storageGroups) == 0 { + volume.storageGroups = t.Links.StorageGroups.ToStrings() + volume.StorageGroupsCount = t.Links.StorageGroupsCount + } + volume.assignReplicaTargetTarget = t.Actions.AssignReplicaTarget.Target volume.checkConsistencyTarget = t.Actions.CheckConsistency.Target volume.createReplicaTargetTarget = t.Actions.CreateReplicaTarget.Target @@ -513,10 +775,13 @@ func (volume *Volume) Update() error { "DisplayName", "Encrypted", "EncryptionTypes", + "IOPerfModeEnabled", + "IsBootCapable", "LowSpaceWarningThresholdPercents", "ProvisioningPolicy", "ReadCachePolicy", "RecoverableCapacitySourceCount", + "ReplicationEnabled", "StripSizeBytes", "WriteCachePolicy", "WriteHoleProtectionPolicy", @@ -577,6 +842,36 @@ func ListReferencedVolumes(c common.Client, link string) ([]*Volume, error) { return result, collectionError } +// CacheDataVolumes gets the data volumes this volume serves as a cache volume. +func (volume *Volume) CacheDataVolumes() ([]*Volume, error) { + var result []*Volume + + collectionError := common.NewCollectionError() + for _, uri := range volume.cacheDataVolumes { + item, err := GetVolume(volume.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// CacheVolumeSources gets the cache volume source for this volume. +func (volume *Volume) CacheVolumeSource() (*Volume, error) { + if volume.cacheVolumeSource == "" { + return nil, nil + } + + return GetVolume(volume.GetClient(), volume.cacheVolumeSource) +} + // ClassOfService gets the class of service that this storage volume conforms to. func (volume *Volume) ClassOfService() (*ClassOfService, error) { if volume.classOfService == "" { @@ -586,15 +881,80 @@ func (volume *Volume) ClassOfService() (*ClassOfService, error) { return GetClassOfService(volume.GetClient(), volume.classOfService) } +// ClientEndpoints gets the client Endpoints associated with this volume. +func (volume *Volume) ClientEndpoints() ([]*redfish.Endpoint, error) { + var result []*redfish.Endpoint + + collectionError := common.NewCollectionError() + for _, uri := range volume.clientEndpoints { + item, err := redfish.GetEndpoint(volume.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// ConsistencyGroups gets the ConsistencyGroups associated with this volume. +func (volume *Volume) ConsistencyGroups() ([]*ConsistencyGroup, error) { + var result []*ConsistencyGroup + + collectionError := common.NewCollectionError() + for _, uri := range volume.consistencyGroups { + item, err := GetConsistencyGroup(volume.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// Controllers gets the controllers (of type StorageController) associated with +// this volume. When the volume is of type NVMe, these may be both the physical +// and logical controller representations. +func (volume *Volume) Controllers() ([]*redfish.StorageController, error) { + var result []*redfish.StorageController + + collectionError := common.NewCollectionError() + for _, uri := range volume.controllers { + item, err := redfish.GetStorageController(volume.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + // getDrives gets a set of referenced drives. func (volume *Volume) getDrives(links []string) ([]*redfish.Drive, error) { var result []*redfish.Drive collectionError := common.NewCollectionError() - for _, driveLink := range links { - drive, err := redfish.GetDrive(volume.GetClient(), driveLink) + for _, uri := range links { + drive, err := redfish.GetDrive(volume.GetClient(), uri) if err != nil { - collectionError.Failures[driveLink] = err + collectionError.Failures[uri] = err } else { result = append(result, drive) } @@ -618,6 +978,54 @@ func (volume *Volume) Drives() ([]*redfish.Drive, error) { return volume.getDrives(volume.drives) } +// OwningStorageResource gets the Storage resource that owns or contains this volume. +func (volume *Volume) OwningStorageResource() (*redfish.Storage, error) { + if volume.owningStorageResource == "" { + return nil, nil + } + + return redfish.GetStorage(volume.GetClient(), volume.owningStorageResource) +} + +// OwningStorageService gets the StorageService that owns or contains this volume. +func (volume *Volume) OwningStorageService() (*StorageService, error) { + if volume.owningStorageService == "" { + return nil, nil + } + + return GetStorageService(volume.GetClient(), volume.owningStorageService) +} + +// ProvidingStoragePool gets the StoragePool resource that provides this volume resource. +func (volume *Volume) ProvidingStoragePool() (*StoragePool, error) { + if volume.providingStoragePool == "" { + return nil, nil + } + + return GetStoragePool(volume.GetClient(), volume.providingStoragePool) +} + +// ServerEndpoints gets the server Endpoints associated with this volume. +func (volume *Volume) ServerEndpoints() ([]*redfish.Endpoint, error) { + var result []*redfish.Endpoint + + collectionError := common.NewCollectionError() + for _, uri := range volume.serverEndpoints { + item, err := redfish.GetEndpoint(volume.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + // SpareResourceSets gets the spare resources that can be used for this volume. func (volume *Volume) SpareResourceSets() ([]*SpareResourceSet, error) { var result []*SpareResourceSet @@ -640,16 +1048,17 @@ func (volume *Volume) SpareResourceSets() ([]*SpareResourceSet, error) { } // StorageGroups gets the storage groups that associated with this volume. +// This property is deprecated in favor of the Connections property. func (volume *Volume) StorageGroups() ([]*StorageGroup, error) { var result []*StorageGroup collectionError := common.NewCollectionError() - for _, sgLink := range volume.storageGroups { - sg, err := GetStorageGroup(volume.GetClient(), sgLink) + for _, uri := range volume.storageGroups { + item, err := GetStorageGroup(volume.GetClient(), uri) if err != nil { - collectionError.Failures[sgLink] = err + collectionError.Failures[uri] = err } else { - result = append(result, sg) + result = append(result, item) } } @@ -660,17 +1069,38 @@ func (volume *Volume) StorageGroups() ([]*StorageGroup, error) { return result, collectionError } -// StoragePools gets the storage pools that associated with this volume. -func (volume *Volume) StoragePools() ([]*StoragePool, error) { +// AllocatedPools gets the storage pools that associated with this volume. +func (volume *Volume) AllocatedPools() ([]*StoragePool, error) { var result []*StoragePool collectionError := common.NewCollectionError() - for _, sgLink := range volume.allocatedPools { - sg, err := GetStoragePool(volume.GetClient(), sgLink) + for _, uri := range volume.allocatedPools { + item, err := GetStoragePool(volume.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// CapacitySources gets the space allocations to this volume. +func (volume *Volume) CapacitySources() ([]*CapacitySource, error) { + var result []*CapacitySource + + collectionError := common.NewCollectionError() + for _, uri := range volume.capacitySources { + item, err := GetCapacitySource(volume.GetClient(), uri) if err != nil { - collectionError.Failures[sgLink] = err + collectionError.Failures[uri] = err } else { - result = append(result, sg) + result = append(result, item) } } @@ -681,6 +1111,36 @@ func (volume *Volume) StoragePools() ([]*StoragePool, error) { return result, collectionError } +// Connections gets the connections that include this volume. +func (volume *Volume) Connections() ([]*redfish.Connection, error) { + var result []*redfish.Connection + + collectionError := common.NewCollectionError() + for _, uri := range volume.connections { + item, err := redfish.GetConnection(volume.GetClient(), uri) + if err != nil { + collectionError.Failures[uri] = err + } else { + result = append(result, item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +} + +// Metrics gets the metrics for this volume. IO metrics are reported in the IOStatistics property. +func (volume *Volume) Metrics() (*VolumeMetrics, error) { + if volume.metrics == "" { + return nil, nil + } + + return GetVolumeMetrics(volume.GetClient(), volume.metrics) +} + // AssignReplicaTarget is used to establish a replication relationship by // assigning an existing volume to serve as a target replica for an existing // source volume. diff --git a/swordfish/volumemetrics.go b/swordfish/volumemetrics.go new file mode 100644 index 00000000..80f84387 --- /dev/null +++ b/swordfish/volumemetrics.go @@ -0,0 +1,111 @@ +// +// SPDX-License-Identifier: BSD-3-Clause +// + +package swordfish + +import ( + "encoding/json" + + "github.com/stmcginnis/gofish/common" +) + +// VolumeMetrics shall contain the usage and health statistics for a volume in a Redfish implementation. +type VolumeMetrics struct { + common.Entity + // ODataContext is the odata context. + ODataContext string `json:"@odata.context"` + // ODataEtag is the odata etag. + ODataEtag string `json:"@odata.etag"` + // ODataType is the odata type. + ODataType string `json:"@odata.type"` + // ConsistencyCheckCount shall contain the number of consistency checks completed over the lifetime of the volume. + ConsistencyCheckCount int64 + // ConsistencyCheckErrorCount shall contain the number of consistency check errors over the lifetime of the volume. + ConsistencyCheckErrorCount int64 + // CorrectableIOReadErrorCount shall contain the number of the correctable read errors for the lifetime of the + // volume. + CorrectableIOReadErrorCount int64 + // CorrectableIOWriteErrorCount shall contain the number of the correctable write errors for the lifetime of the + // volume. + CorrectableIOWriteErrorCount int64 + // Description provides a description of this resource. + Description string + // IOStatistics shall represent IO statistics for this volume. + IOStatistics IOStatistics + // Oem shall contain the OEM extensions. All values for properties that this object contains shall conform to the + // Redfish Specification-described requirements. + OEM json.RawMessage `json:"Oem"` + // RebuildErrorCount shall contain the number of rebuild errors over the lifetime of the volume. + RebuildErrorCount int64 + // StateChangeCount shall contain the number of state changes (changes in Status.State) for this volume. + StateChangeCount int64 + // UncorrectableIOReadErrorCount shall contain the number of the uncorrectable read errors for the lifetime of the + // volume. + UncorrectableIOReadErrorCount int64 + // UncorrectableIOWriteErrorCount shall contain the number of the uncorrectable write errors for the lifetime of + // the volume. + UncorrectableIOWriteErrorCount int64 +} + +// GetVolumeMetrics will get a VolumeMetrics instance from the service. +func GetVolumeMetrics(c common.Client, uri string) (*VolumeMetrics, error) { + resp, err := c.Get(uri) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var volumemetrics VolumeMetrics + err = json.NewDecoder(resp.Body).Decode(&volumemetrics) + if err != nil { + return nil, err + } + + volumemetrics.SetClient(c) + return &volumemetrics, nil +} + +// ListReferencedVolumeMetricss gets the collection of VolumeMetrics from +// a provided reference. +func ListReferencedVolumeMetricss(c common.Client, link string) ([]*VolumeMetrics, error) { + var result []*VolumeMetrics + if link == "" { + return result, nil + } + + type GetResult struct { + Item *VolumeMetrics + Link string + Error error + } + + ch := make(chan GetResult) + collectionError := common.NewCollectionError() + get := func(link string) { + volumemetrics, err := GetVolumeMetrics(c, link) + ch <- GetResult{Item: volumemetrics, Link: link, Error: err} + } + + go func() { + err := common.CollectList(get, c, link) + if err != nil { + collectionError.Failures[link] = err + } + close(ch) + }() + + for r := range ch { + if r.Error != nil { + collectionError.Failures[r.Link] = r.Error + } else { + result = append(result, r.Item) + } + } + + if collectionError.Empty() { + return result, nil + } + + return result, collectionError +}