diff --git a/go.mod b/go.mod index d3e08a20eae..159bfc3845a 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/metal3-io/baremetal-operator/pkg/hardwareutils v0.0.0 github.com/mitchellh/cli v1.1.2 github.com/openshift-metal3/terraform-provider-ironic v0.2.7 - github.com/openshift/api v0.0.0-20220124143425-d74727069f6f + github.com/openshift/api v0.0.0-20220203140920-bfe251c51d2d github.com/openshift/client-go v0.0.0-20211025111749-96ca2abfc56c github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e github.com/openshift/cluster-api-provider-baremetal v0.0.0-20210924143856-c2d3ece4da38 @@ -77,6 +77,7 @@ require ( github.com/terraform-providers/terraform-provider-azurestack v0.10.0 github.com/terraform-providers/terraform-provider-ignition/v2 v2.1.0 github.com/terraform-providers/terraform-provider-local v1.4.0 + github.com/terraform-providers/terraform-provider-nutanix v1.1.0 github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f github.com/ulikunitz/xz v0.5.8 github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 @@ -103,6 +104,8 @@ require ( sigs.k8s.io/controller-tools v0.7.0 ) +require github.com/kdomanski/iso9660 v0.2.1 + require ( cloud.google.com/go/bigtable v1.5.0 // indirect cloud.google.com/go/storage v1.11.0 // indirect @@ -376,6 +379,7 @@ replace ( github.com/terraform-providers/terraform-provider-ignition/v2 => github.com/community-terraform-providers/terraform-provider-ignition/v2 v2.1.0 k8s.io/client-go => k8s.io/client-go v0.23.0 k8s.io/kubectl => k8s.io/kubectl v0.23.0 + github.com/terraform-providers/terraform-provider-nutanix => github.com/nutanix/terraform-provider-nutanix v1.2.2-0.20211029075448-e21f85ac2cf7 sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210121023454-5ffc5f422a80 sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210626224711-5d94c794092f // Indirect dependency through MAO from cluster API providers sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20211111204942-611d320170af diff --git a/go.sum b/go.sum index 69922528556..c1e3ba1bbb1 100644 --- a/go.sum +++ b/go.sum @@ -1208,6 +1208,8 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/katbyte/terrafmt v0.2.1-0.20200303174203-e6a3e82cb21b/go.mod h1:WRq5tDmK04tcYbEr400zAUWtOK0jix54e8YeHP3IoQg= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kdomanski/iso9660 v0.2.1 h1:IepyfCeEqx77rZeOM4XZgWB4XJWEF7Jp+1ehMTrSElg= +github.com/kdomanski/iso9660 v0.2.1/go.mod h1:LY50s7BlG+ES6V99oxYGd0ub9giLrKdHZb3LLOweBj0= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= @@ -1439,6 +1441,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nishanths/exhaustive v0.0.0-20200708172631-8866003e3856/go.mod h1:wBEpHwM2OdmeNpdCvRPUlkEbBuaFmcK4Wv8Q7FuGW3c= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/nutanix/terraform-provider-nutanix v1.2.2-0.20211029075448-e21f85ac2cf7 h1:XSW7lfLeXiwu1wT8qchccK76lyco9MdSuch5RYjeaZA= +github.com/nutanix/terraform-provider-nutanix v1.2.2-0.20211029075448-e21f85ac2cf7/go.mod h1:XNd4Ph1C07UCzVdGq9IJ98nsRLq4gQwmiZ2fJo6Vhlg= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -1518,8 +1522,8 @@ github.com/openshift/api v0.0.0-20210816181336-8ff39b776da3/go.mod h1:x81TFA31x1 github.com/openshift/api v0.0.0-20211025104849-a11323ccb6ea/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8= github.com/openshift/api v0.0.0-20211108165917-be1be0e89115/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8= github.com/openshift/api v0.0.0-20211209135129-c58d9f695577/go.mod h1:DoslCwtqUpr3d/gsbq4ZlkaMEdYqKxuypsDjorcHhME= -github.com/openshift/api v0.0.0-20220124143425-d74727069f6f h1:iOTv1WudhVm2UsoST+L+ZrA5A9w57h9vmQsdlBuqG6g= -github.com/openshift/api v0.0.0-20220124143425-d74727069f6f/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= +github.com/openshift/api v0.0.0-20220203140920-bfe251c51d2d h1:WuD14VS4SFKKH5hKeYiHTswlEByICzMNvaZrDXUjZiY= +github.com/openshift/api v0.0.0-20220203140920-bfe251c51d2d/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= github.com/openshift/baremetal-operator v0.0.0-20211201170610-92ffa60c683d h1:4KkYItiWENIs2bV6lKOcIMdWUnZJA+GWT9Us7z0P1UA= github.com/openshift/baremetal-operator v0.0.0-20211201170610-92ffa60c683d/go.mod h1:p32F1DBUxfgd0JjM4rCuhJomFJokEoWR1Z/LZNL2LM8= github.com/openshift/baremetal-operator/apis v0.0.0-20211201170610-92ffa60c683d h1:DHGXCvXWsPExutf3tgQYD4TVDSAOviLXO7Vnc42oXhw= diff --git a/pkg/types/clustermetadata.go b/pkg/types/clustermetadata.go index e0ab9fa696e..e6f5c920e42 100644 --- a/pkg/types/clustermetadata.go +++ b/pkg/types/clustermetadata.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/libvirt" + "github.com/openshift/installer/pkg/types/nutanix" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" "github.com/openshift/installer/pkg/types/vsphere" @@ -37,6 +38,7 @@ type ClusterPlatformMetadata struct { BareMetal *baremetal.Metadata `json:"baremetal,omitempty"` Ovirt *ovirt.Metadata `json:"ovirt,omitempty"` VSphere *vsphere.Metadata `json:"vsphere,omitempty"` + Nutanix *nutanix.Metadata `json:"nutanix,omitempty"` } // Platform returns a string representation of the platform @@ -76,5 +78,8 @@ func (cpm *ClusterPlatformMetadata) Platform() string { if cpm.VSphere != nil { return vsphere.Name } + if cpm.Nutanix != nil { + return nutanix.Name + } return "" } diff --git a/pkg/types/defaults/installconfig.go b/pkg/types/defaults/installconfig.go index ace0565aa01..f72ea85d869 100644 --- a/pkg/types/defaults/installconfig.go +++ b/pkg/types/defaults/installconfig.go @@ -12,6 +12,7 @@ import ( ibmclouddefaults "github.com/openshift/installer/pkg/types/ibmcloud/defaults" libvirtdefaults "github.com/openshift/installer/pkg/types/libvirt/defaults" nonedefaults "github.com/openshift/installer/pkg/types/none/defaults" + nutanixdefaults "github.com/openshift/installer/pkg/types/nutanix/defaults" openstackdefaults "github.com/openshift/installer/pkg/types/openstack/defaults" ovirtdefaults "github.com/openshift/installer/pkg/types/ovirt/defaults" vspheredefaults "github.com/openshift/installer/pkg/types/vsphere/defaults" @@ -107,5 +108,8 @@ func SetInstallConfigDefaults(c *types.InstallConfig) { } case c.Platform.None != nil: nonedefaults.SetPlatformDefaults(c.Platform.None) + case c.Platform.Nutanix != nil: + nutanixdefaults.SetPlatformDefaults(c.Platform.Nutanix, c) } + } diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index a3ee2651029..d25e58d51d5 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -13,6 +13,7 @@ import ( "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" + "github.com/openshift/installer/pkg/types/nutanix" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" "github.com/openshift/installer/pkg/types/vsphere" @@ -37,6 +38,7 @@ var ( azure.Name, gcp.Name, ibmcloud.Name, + nutanix.Name, openstack.Name, ovirt.Name, vsphere.Name, @@ -212,6 +214,10 @@ type Platform struct { // Ovirt is the configuration used when installing on oVirt. // +optional Ovirt *ovirt.Platform `json:"ovirt,omitempty"` + + // Nutanix is the configuration used when installing on Nutanix. + // +optional + Nutanix *nutanix.Platform `json:"nutanix,omitempty"` } // Name returns a string representation of the platform (e.g. "aws" if @@ -243,6 +249,8 @@ func (p *Platform) Name() string { return vsphere.Name case p.Ovirt != nil: return ovirt.Name + case p.Nutanix != nil: + return nutanix.Name default: return "" } diff --git a/pkg/types/machinepools.go b/pkg/types/machinepools.go index e37ee8eba4f..5fa6e893b18 100644 --- a/pkg/types/machinepools.go +++ b/pkg/types/machinepools.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/libvirt" + "github.com/openshift/installer/pkg/types/nutanix" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" "github.com/openshift/installer/pkg/types/vsphere" @@ -100,6 +101,9 @@ type MachinePoolPlatform struct { // Ovirt is the configuration used when installing on oVirt. Ovirt *ovirt.MachinePool `json:"ovirt,omitempty"` + + // Nutanix is the configuration used when installing on Nutanix. + Nutanix *nutanix.MachinePool `json:"nutanix,omitempty"` } // Name returns a string representation of the platform (e.g. "aws" if @@ -129,6 +133,8 @@ func (p *MachinePoolPlatform) Name() string { return vsphere.Name case p.Ovirt != nil: return ovirt.Name + case p.Nutanix != nil: + return nutanix.Name default: return "" } diff --git a/pkg/types/nutanix/OWNERS b/pkg/types/nutanix/OWNERS new file mode 100644 index 00000000000..591f07c1073 --- /dev/null +++ b/pkg/types/nutanix/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - nutanix-approvers diff --git a/pkg/types/nutanix/client.go b/pkg/types/nutanix/client.go new file mode 100644 index 00000000000..6fff4a5f785 --- /dev/null +++ b/pkg/types/nutanix/client.go @@ -0,0 +1,35 @@ +package nutanix + +import ( + "context" + "fmt" + "time" + + nutanixClient "github.com/terraform-providers/terraform-provider-nutanix/client" + nutanixClientV3 "github.com/terraform-providers/terraform-provider-nutanix/client/v3" + "k8s.io/klog" +) + + +func CreateNutanixClient(ctx context.Context, prismCentral, port, username, password string, insecure bool) (*nutanixClientV3.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + + cred := nutanixClient.Credentials{ + URL: fmt.Sprintf("%s:%s", prismCentral, port), + Username: username, + Password: password, + Port: port, + Endpoint: prismCentral, + Insecure: insecure, + } + + cli, err := nutanixClientV3.NewV3Client(cred) + if err != nil { + klog.Errorf("Failed to create the nutanix client. error: %v", err) + return nil, err + } + + return cli, nil + +} diff --git a/pkg/types/nutanix/defaults/platform.go b/pkg/types/nutanix/defaults/platform.go new file mode 100644 index 00000000000..6922750317b --- /dev/null +++ b/pkg/types/nutanix/defaults/platform.go @@ -0,0 +1,10 @@ +package defaults + +import ( + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/nutanix" +) + + +// SetPlatformDefaults sets the defaults for the platform. +func SetPlatformDefaults(p *nutanix.Platform, installConfig *types.InstallConfig) {} diff --git a/pkg/types/nutanix/defaults/platform_test.go b/pkg/types/nutanix/defaults/platform_test.go new file mode 100644 index 00000000000..8be85a0225d --- /dev/null +++ b/pkg/types/nutanix/defaults/platform_test.go @@ -0,0 +1,42 @@ +package defaults + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/nutanix" +) + +const testClusterName = "test-cluster" + +func defaultPlatform() *nutanix.Platform { + return &nutanix.Platform{} +} + +func TestSetPlatformDefaults(t *testing.T) { + cases := []struct { + name string + platform *nutanix.Platform + expected *nutanix.Platform + }{ + { + name: "empty", + platform: &nutanix.Platform{}, + expected: defaultPlatform(), + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ic := &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: testClusterName, + }, + } + SetPlatformDefaults(tc.platform, ic) + assert.Equal(t, tc.expected, tc.platform, "unexpected platform") + }) + } +} diff --git a/pkg/types/nutanix/doc.go b/pkg/types/nutanix/doc.go new file mode 100644 index 00000000000..12e285fccd8 --- /dev/null +++ b/pkg/types/nutanix/doc.go @@ -0,0 +1,5 @@ +// Package nutanix contains Nutanix-specific structures for installer +// configuration and management. +package nutanix + +const Name = "nutanix" diff --git a/pkg/types/nutanix/helpers.go b/pkg/types/nutanix/helpers.go new file mode 100644 index 00000000000..debc52a794d --- /dev/null +++ b/pkg/types/nutanix/helpers.go @@ -0,0 +1,137 @@ +package nutanix + +import ( + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "github.com/google/uuid" + "github.com/kdomanski/iso9660" + "github.com/pkg/errors" + nutanixClientV3 "github.com/terraform-providers/terraform-provider-nutanix/client/v3" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +const ( + diskLabel = "config-2" + isoFile = "bootstrap-ign.iso" + metadataFilePath = "openstack/latest/meta_data.json" + userDataFilePath = "openstack/latest/user_data" + sleepTime = 10 * time.Second + timeout = 5 * time.Minute +) + +type MetadataCloudInit struct { + UUID string `json:"uuid"` +} + +func bootISOImageName(infraID string) string { + return fmt.Sprintf("%s-%s", infraID, isoFile) +} + +func createBootstrapISO(infraID, userData string) (string, error) { + id := uuid.New() + metaObj := &MetadataCloudInit{ + UUID: id.String(), + } + fullISOFile := bootISOImageName(infraID) + metadata, err := json.Marshal(metaObj) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed marshal metadata struct to json")) + } + writer, err := iso9660.NewWriter() + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed to create writer: %s", err)) + } + defer writer.Cleanup() + + userDataReader := strings.NewReader(userData) + err = writer.AddFile(userDataReader, userDataFilePath) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed to add file: %s", err)) + } + + metadataReader := strings.NewReader(string(metadata)) + err = writer.AddFile(metadataReader, metadataFilePath) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed to add file: %s", err)) + } + + outputFile, err := os.OpenFile(fullISOFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed to create file: %s", err)) + } + + err = writer.WriteTo(outputFile, diskLabel) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed to write ISO image: %s", err)) + } + + err = outputFile.Close() + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed to close output file: %s", err)) + } + return fullISOFile, nil +} + +func waitForTasks(clientV3 nutanixClientV3.Service, taskUUIDs []string) error { + for _, t := range taskUUIDs { + err := waitForTask(clientV3, t) + if err != nil { + return err + } + } + return nil +} + +func waitForTask(clientV3 nutanixClientV3.Service, taskUUID string) error { + finished := false + var err error + for start := time.Now(); time.Since(start) < timeout; { + finished, err = isTaskFinished(clientV3, taskUUID) + if err != nil { + return err + } + if finished { + break + } + time.Sleep(sleepTime) + } + if !finished { + return errors.Errorf("timeout while waiting for task UUID: %s", taskUUID) + } + + return nil +} + +func isTaskFinished(clientV3 nutanixClientV3.Service, taskUUID string) (bool, error) { + isFinished := map[string]bool{ + "QUEUED": false, + "RUNNING": false, + "SUCCEEDED": true, + } + status, err := getTaskStatus(clientV3, taskUUID) + if err != nil { + return false, err + } + if val, ok := isFinished[status]; ok { + return val, nil + } + return false, errors.Errorf("Retrieved unexpected task status: %s", status) + +} + +func getTaskStatus(clientV3 nutanixClientV3.Service, taskUUID string) (string, error) { + v, err := clientV3.GetTask(taskUUID) + + if err != nil { + return "", err + } + + if *v.Status == "INVALID_UUID" || *v.Status == "FAILED" { + return *v.Status, errors.Errorf("error_detail: %s, progress_message: %s", utils.StringValue(v.ErrorDetail), utils.StringValue(v.ProgressMessage)) + } + return *v.Status, nil +} diff --git a/pkg/types/nutanix/machinepool.go b/pkg/types/nutanix/machinepool.go new file mode 100644 index 00000000000..b87cc07f889 --- /dev/null +++ b/pkg/types/nutanix/machinepool.go @@ -0,0 +1,58 @@ +package nutanix + +// MachinePool stores the configuration for a machine pool installed +// on Nutanix. +type MachinePool struct { + // NumCPUs is the total number of virtual processor cores to assign a vm. + // + // +optional + NumCPUs int64 `json:"cpus"` + + // NumCoresPerSocket is the number of cores per socket in a vm. The number + // of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. + // + // +optional + NumCoresPerSocket int64 `json:"coresPerSocket"` + + // Memory is the size of a VM's memory in MB. + // + // +optional + MemoryMiB int64 `json:"memoryMB"` + + // OSDisk defines the storage for instance. + // + // +optional + OSDisk `json:"osDisk"` +} + + +// OSDisk defines the disk for a virtual machine. +type OSDisk struct { + // DiskSizeMib defines the size of disk in MiB. + // + // +optional + DiskSizeMib int64 `json:"diskSizeMib"` +} + +// Set sets the values from `required` to `p`. +func (p *MachinePool) Set(required *MachinePool) { + if required == nil || p == nil { + return + } + + if required.NumCPUs != 0 { + p.NumCPUs = required.NumCPUs + } + + if required.NumCoresPerSocket != 0 { + p.NumCoresPerSocket = required.NumCoresPerSocket + } + + if required.MemoryMiB != 0 { + p.MemoryMiB = required.MemoryMiB + } + + if required.OSDisk.DiskSizeMib != 0 { + p.OSDisk.DiskSizeMib = required.OSDisk.DiskSizeMib + } +} diff --git a/pkg/types/nutanix/metadata.go b/pkg/types/nutanix/metadata.go new file mode 100644 index 00000000000..6b82f3d2aa7 --- /dev/null +++ b/pkg/types/nutanix/metadata.go @@ -0,0 +1,15 @@ +package nutanix + +// Metadata contains Nutanix metadata (e.g. for uninstalling the cluster). +type Metadata struct { + // PrismCentral is the domain name or IP address of the Prism Central. + PrismCentral string `json:"prismCentral"` + // Username is the name of the user to use to connect to the Prism Central. + Username string `json:"username"` + // Password is the password for the user to use to connect to the Prism Central. + Password string `json:"password"` + // Port is the port used to connect to the Prism Central. + Port string `json:"port"` + // Disable certificate checking when connecting to Prism Central. + Insecure bool `json:"insecure"` +} diff --git a/pkg/types/nutanix/platform.go b/pkg/types/nutanix/platform.go new file mode 100644 index 00000000000..2ef6065d5f0 --- /dev/null +++ b/pkg/types/nutanix/platform.go @@ -0,0 +1,50 @@ +package nutanix + +// Platform stores any global configuration used for Nutanix platforms. +type Platform struct { + // PrismCentral is the domain name or IP address of the Prism Central. + PrismCentral string `json:"prismCentral"` + + // Insecure disables certificate checking when connecting to Prism Central. + Insecure bool `json:"insecure"` + + // Port is the port to use to connect to the Prism Central. + Port string `json:"port"` + + // Username is the name of the user to use to connect to the Prism Central. + Username string `json:"username"` + + // Password is the password for the user to use to connect to the Prism Central. + Password string `json:"password"` + + // PrismElement is the name of the Prism Element cluster to use in the Prism Central. + PrismElementUUID string `json:"prismElementUuid"` + + // DefaultStorageContainer is the default datastore to use for provisioning volumes. + // DefaultStorageContainer string `json:"defaultStorageContainer"` + + // ClusterOSImage overrides the url provided in rhcos.json to download the RHCOS Image + ClusterOSImage string `json:"clusterOSImage,omitempty"` + + // APIVIP is the virtual IP address for the api endpoint + // + // +kubebuilder:validation:format=ip + // +optional + APIVIP string `json:"apiVIP,omitempty"` + + // IngressVIP is the virtual IP address for ingress + // + // +kubebuilder:validation:format=ip + // +optional + IngressVIP string `json:"ingressVIP,omitempty"` + + // DefaultMachinePlatform is the default configuration used when + // installing on Nutanix for machine pools which do not define their own + // platform configuration. + // +optional + DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"` + + // Network specifies the name of the network to be used by the cluster. + SubnetUUID string `json:"subnetUuid,omitempty"` +} + diff --git a/pkg/types/nutanix/validation/machinepool.go b/pkg/types/nutanix/validation/machinepool.go new file mode 100644 index 00000000000..eebcd8c07f3 --- /dev/null +++ b/pkg/types/nutanix/validation/machinepool.go @@ -0,0 +1,28 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types/nutanix" +) + +// ValidateMachinePool checks that the specified machine pool is valid. +func ValidateMachinePool(p *nutanix.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if p.DiskSizeMib < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("diskSizeGB"), p.DiskSizeMib, "storage disk size must be positive")) + } + if p.MemoryMiB < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("memoryMB"), p.MemoryMiB, "memory size must be positive")) + } + if p.NumCPUs < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("cpus"), p.NumCPUs, "number of CPUs must be positive")) + } + if p.NumCoresPerSocket < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("coresPerSocket"), p.NumCoresPerSocket, "cores per socket must be positive")) + } + if p.NumCoresPerSocket >= 0 && p.NumCPUs >= 0 && p.NumCoresPerSocket > p.NumCPUs { + allErrs = append(allErrs, field.Invalid(fldPath.Child("coresPerSocket"), p.NumCoresPerSocket, "cores per socket must be less than number of CPUs")) + } + return allErrs +} diff --git a/pkg/types/nutanix/validation/machinepool_test.go b/pkg/types/nutanix/validation/machinepool_test.go new file mode 100644 index 00000000000..bf1a73908c5 --- /dev/null +++ b/pkg/types/nutanix/validation/machinepool_test.go @@ -0,0 +1,67 @@ +package validation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types/nutanix" +) + +func TestValidateMachinePool(t *testing.T) { + cases := []struct { + name string + pool *nutanix.MachinePool + expectedErrMsg string + }{ + { + name: "empty", + pool: &nutanix.MachinePool{}, + expectedErrMsg: "", + }, { + name: "negative disk size", + pool: &nutanix.MachinePool{ + OSDisk: nutanix.OSDisk{ + DiskSizeGB: -1, + }, + }, + expectedErrMsg: `^test-path\.diskSizeGB: Invalid value: -1: storage disk size must be positive$`, + }, { + name: "negative CPUs", + pool: &nutanix.MachinePool{ + NumCPUs: -1, + }, + expectedErrMsg: `^test-path\.cpus: Invalid value: -1: number of CPUs must be positive$`, + }, { + name: "negative cores", + pool: &nutanix.MachinePool{ + NumCoresPerSocket: -1, + }, + expectedErrMsg: `^test-path\.coresPerSocket: Invalid value: -1: cores per socket must be positive$`, + }, { + name: "negative memory", + pool: &nutanix.MachinePool{ + MemoryMiB: -1, + }, + expectedErrMsg: `^test-path\.memoryMB: Invalid value: -1: memory size must be positive$`, + }, { + name: "less CPUs than cores per socket", + pool: &nutanix.MachinePool{ + NumCPUs: 1, + NumCoresPerSocket: 8, + }, + expectedErrMsg: `^test-path\.coresPerSocket: Invalid value: 8: cores per socket must be less than number of CPUs$`, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := ValidateMachinePool(tc.pool, field.NewPath("test-path")).ToAggregate() + if tc.expectedErrMsg == "" { + assert.NoError(t, err) + } else { + assert.Regexp(t, tc.expectedErrMsg, err) + } + }) + } +} diff --git a/pkg/types/nutanix/validation/platform.go b/pkg/types/nutanix/validation/platform.go new file mode 100644 index 00000000000..e19b04401bb --- /dev/null +++ b/pkg/types/nutanix/validation/platform.go @@ -0,0 +1,85 @@ +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types/nutanix" + "github.com/openshift/installer/pkg/validate" +) + +// ValidatePlatform checks that the specified platform is valid. +// TODO(nutanix): Revisit for further expanding the validation logic +func ValidatePlatform(p *nutanix.Platform, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(p.PrismCentral) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("prismCentral"), "must specify the Prism Central")) + } + if len(p.PrismElementUUID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("prismElement"), "must specify the Prism Element")) + } + if len(p.Username) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("username"), "must specify the username")) + } + if len(p.Password) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("password"), "must specify the password")) + } + if len(p.Port) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("port"), "must specify the port")) + } + if len(p.SubnetUUID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("subnet"), "must specify the subnet")) + } + if len(p.PrismCentral) != 0 { + if err := validate.Host(p.PrismCentral); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("prismCentral"), p.PrismCentral, "must be the domain name or IP address of the Prism Central")) + } + } + + // If all VIPs are empty, skip IP validation. All VIPs are required to be defined together. + if strings.Join([]string{p.APIVIP, p.IngressVIP}, "") != "" { + allErrs = append(allErrs, validateVIPs(p, fldPath)...) + } + + return allErrs +} + +// ValidateForProvisioning checks that the specified platform is valid. +func ValidateForProvisioning(p *nutanix.Platform, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(p.PrismElementUUID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("prismElements"), "must specify the cluster")) + } + + if len(p.SubnetUUID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("subnet"), "must specify the subnet")) + } + + allErrs = append(allErrs, validateVIPs(p, fldPath)...) + return allErrs +} + +// validateVIPs checks that all required VIPs are provided and are valid IP addresses. +func validateVIPs(p *nutanix.Platform, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(p.APIVIP) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("apiVIP"), "must specify a VIP for the API")) + } else if err := validate.IP(p.APIVIP); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVIP"), p.APIVIP, err.Error())) + } + + if len(p.IngressVIP) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("ingressVIP"), "must specify a VIP for Ingress")) + } else if err := validate.IP(p.IngressVIP); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ingressVIP"), p.IngressVIP, err.Error())) + } + + if len(p.APIVIP) != 0 && len(p.IngressVIP) != 0 && p.APIVIP == p.IngressVIP { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVIP"), p.APIVIP, "IPs for both API and Ingress should not be the same")) + } + + return allErrs +} diff --git a/pkg/types/nutanix/validation/platform_test.go b/pkg/types/nutanix/validation/platform_test.go new file mode 100644 index 00000000000..e3c441984a2 --- /dev/null +++ b/pkg/types/nutanix/validation/platform_test.go @@ -0,0 +1,157 @@ +package validation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types/nutanix" +) + +func validPlatform() *nutanix.Platform { + return &nutanix.Platform{ + PrismCentral: "test-pc", + PrismElement: "test-pe", + Username: "test-username", + Password: "test-password", + Subnet: "test-subnet", + Port: "8080", + } +} + +func TestValidatePlatform(t *testing.T) { + cases := []struct { + name string + platform *nutanix.Platform + expectedError string + }{ + { + name: "minimal", + platform: validPlatform(), + }, + { + name: "missing Prism Central name", + platform: func() *nutanix.Platform { + p := validPlatform() + p.PrismCentral = "" + return p + }(), + expectedError: `^test-path\.prismCentral: Required value: must specify the Prism Central$`, + }, + { + name: "missing username", + platform: func() *nutanix.Platform { + p := validPlatform() + p.Username = "" + return p + }(), + expectedError: `^test-path\.username: Required value: must specify the username$`, + }, + { + name: "missing password", + platform: func() *nutanix.Platform { + p := validPlatform() + p.Password = "" + return p + }(), + expectedError: `^test-path\.password: Required value: must specify the password$`, + }, + { + name: "missing prism element", + platform: func() *nutanix.Platform { + p := validPlatform() + p.PrismElement = "" + return p + }(), + expectedError: `^test-path\.prismElement: Required value: must specify the Prism Element$`, + }, + { + name: "valid VIPs", + platform: func() *nutanix.Platform { + p := validPlatform() + p.APIVIP = "192.168.111.2" + p.IngressVIP = "192.168.111.3" + return p + }(), + }, + { + name: "missing API VIP", + platform: func() *nutanix.Platform { + p := validPlatform() + p.APIVIP = "" + p.IngressVIP = "192.168.111.3" + return p + }(), + expectedError: `^test-path\.apiVIP: Required value: must specify a VIP for the API$`, + }, + { + name: "missing Ingress VIP", + platform: func() *nutanix.Platform { + p := validPlatform() + p.APIVIP = "192.168.111.2" + p.IngressVIP = "" + return p + }(), + expectedError: `^test-path\.ingressVIP: Required value: must specify a VIP for Ingress$`, + }, + { + name: "Invalid API VIP", + platform: func() *nutanix.Platform { + p := validPlatform() + p.APIVIP = "192.168.111" + p.IngressVIP = "192.168.111.2" + return p + }(), + expectedError: `^test-path\.apiVIP: Invalid value: "192.168.111": "192.168.111" is not a valid IP$`, + }, + { + name: "Invalid Ingress VIP", + platform: func() *nutanix.Platform { + p := validPlatform() + p.APIVIP = "192.168.111.1" + p.IngressVIP = "192.168.111" + return p + }(), + expectedError: `^test-path\.ingressVIP: Invalid value: "192.168.111": "192.168.111" is not a valid IP$`, + }, + { + name: "Same API and Ingress VIP", + platform: func() *nutanix.Platform { + p := validPlatform() + p.APIVIP = "192.168.111.1" + p.IngressVIP = "192.168.111.1" + return p + }(), + expectedError: `^test-path\.apiVIP: Invalid value: "192.168.111.1": IPs for both API and Ingress should not be the same$`, + }, + { + name: "Capital letters in Prism Central", + platform: func() *nutanix.Platform { + p := validPlatform() + p.PrismCentral = "tEsT-PrismCentral" + return p + }(), + expectedError: `^test-path\.prismCentral: Invalid value: "tEsT-PrismCentral": must be the domain name or IP address of the Prism Central$`, + }, + { + name: "URL as Prism Central", + platform: func() *nutanix.Platform { + p := validPlatform() + p.PrismCentral = "https://test-pc" + return p + }(), + expectedError: `^test-path\.prismCentral: Invalid value: "https://test-pc": must be the domain name or IP address of the Prism Central$`, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := ValidatePlatform(tc.platform, field.NewPath("test-path")).ToAggregate() + if tc.expectedError == "" { + assert.NoError(t, err) + } else { + assert.Regexp(t, tc.expectedError, err) + } + }) + } +} diff --git a/pkg/types/validation/installconfig.go b/pkg/types/validation/installconfig.go index 3ebabed8611..4f8bc39db7d 100644 --- a/pkg/types/validation/installconfig.go +++ b/pkg/types/validation/installconfig.go @@ -35,6 +35,8 @@ import ( ibmcloudvalidation "github.com/openshift/installer/pkg/types/ibmcloud/validation" "github.com/openshift/installer/pkg/types/libvirt" libvirtvalidation "github.com/openshift/installer/pkg/types/libvirt/validation" + "github.com/openshift/installer/pkg/types/nutanix" + nutanixvalidation "github.com/openshift/installer/pkg/types/nutanix/validation" "github.com/openshift/installer/pkg/types/openstack" openstackvalidation "github.com/openshift/installer/pkg/types/openstack/validation" "github.com/openshift/installer/pkg/types/ovirt" @@ -500,6 +502,11 @@ func validatePlatform(platform *types.Platform, fldPath *field.Path, network *ty return ovirtvalidation.ValidatePlatform(platform.Ovirt, f) }) } + if platform.Nutanix != nil { + validate(nutanix.Name, platform.Nutanix, func(f *field.Path) field.ErrorList { + return nutanixvalidation.ValidatePlatform(platform.Nutanix, f) + }) + } return allErrs } diff --git a/pkg/types/validation/installconfig_test.go b/pkg/types/validation/installconfig_test.go index 848982f402d..f3ac8093a4f 100644 --- a/pkg/types/validation/installconfig_test.go +++ b/pkg/types/validation/installconfig_test.go @@ -20,6 +20,7 @@ import ( "github.com/openshift/installer/pkg/types/ibmcloud" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" + "github.com/openshift/installer/pkg/types/nutanix" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/ovirt" "github.com/openshift/installer/pkg/types/vsphere" @@ -154,6 +155,17 @@ func validOpenStackPlatform() *openstack.Platform { } } +func validNutanixPlatform() *nutanix.Platform { + return &nutanix.Platform{ + PrismCentral: "test-pc", + PrismElement: "test-pe", + Username: "test-username", + Password: "test-password", + Subnet: "test-subnet", + Port: "8080", + } +} + func validIPv4NetworkingConfig() *types.Networking { return &types.Networking{ NetworkType: "OpenShiftSDN", @@ -524,7 +536,7 @@ func TestValidateInstallConfig(t *testing.T) { c.Platform = types.Platform{} return c }(), - expectedError: `^platform: Invalid value: "": must specify one of the platforms \(alibabacloud, aws, azure, baremetal, gcp, ibmcloud, none, openstack, ovirt, vsphere\)$`, + expectedError: `^platform: Invalid value: "": must specify one of the platforms \(alibabacloud, aws, azure, baremetal, gcp, ibmcloud, none, nutanix, openstack, ovirt, vsphere\)$`, }, { name: "multiple platforms", @@ -555,7 +567,7 @@ func TestValidateInstallConfig(t *testing.T) { } return c }(), - expectedError: `^platform: Invalid value: "libvirt": must specify one of the platforms \(alibabacloud, aws, azure, baremetal, gcp, ibmcloud, none, openstack, ovirt, vsphere\)$`, + expectedError: `^platform: Invalid value: "libvirt": must specify one of the platforms \(alibabacloud, aws, azure, baremetal, gcp, ibmcloud, none, nutanix, openstack, ovirt, vsphere\)$`, }, { name: "invalid libvirt platform", @@ -567,7 +579,7 @@ func TestValidateInstallConfig(t *testing.T) { c.Platform.Libvirt.URI = "" return c }(), - expectedError: `^\[platform: Invalid value: "libvirt": must specify one of the platforms \(alibabacloud, aws, azure, baremetal, gcp, ibmcloud, none, openstack, ovirt, vsphere\), platform\.libvirt\.uri: Invalid value: "": invalid URI "" \(no scheme\)]$`, + expectedError: `^\[platform: Invalid value: "libvirt": must specify one of the platforms \(alibabacloud, aws, azure, baremetal, gcp, ibmcloud, none, nutanix, openstack, ovirt, vsphere\), platform\.libvirt\.uri: Invalid value: "": invalid URI "" \(no scheme\)]$`, }, { name: "valid none platform", @@ -1398,6 +1410,26 @@ func TestValidateInstallConfig(t *testing.T) { c.Publish = types.InternalPublishingStrategy return c }(), + }, { + name: "valid nutanix platform", + installConfig: func() *types.InstallConfig { + c := validInstallConfig() + c.Platform = types.Platform{ + Nutanix: validNutanixPlatform(), + } + return c + }(), + }, { + name: "invalid nutanix platform", + installConfig: func() *types.InstallConfig { + c := validInstallConfig() + c.Platform = types.Platform{ + Nutanix: validNutanixPlatform(), + } + c.Platform.Nutanix.PrismCentral = "" + return c + }(), + expectedError: `^platform\.nutanix\.prismCentral: Required value: must specify the Prism Central$`, }, } for _, tc := range cases { diff --git a/vendor/github.com/kdomanski/iso9660/LICENSE b/vendor/github.com/kdomanski/iso9660/LICENSE new file mode 100644 index 00000000000..221d965e586 --- /dev/null +++ b/vendor/github.com/kdomanski/iso9660/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2019-2020, Kamil DomaƄski and contributors + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kdomanski/iso9660/README.md b/vendor/github.com/kdomanski/iso9660/README.md new file mode 100644 index 00000000000..1ee7c719244 --- /dev/null +++ b/vendor/github.com/kdomanski/iso9660/README.md @@ -0,0 +1,79 @@ +## iso9660 +[![GoDoc](https://godoc.org/github.com/kdomanski/iso9660?status.svg)](https://godoc.org/github.com/kdomanski/iso9660) + +A package for reading and creating ISO9660 + +Joliet and Rock Ridge extensions are not supported. + +## Examples + +### Extracting an ISO + +```go +package main + +import ( + "log" + + "github.com/kdomanski/iso9660/util" +) + +func main() { + f, err := os.Open("/home/user/myImage.iso") + if err != nil { + log.Fatalf("failed to open file: %s", err) + } + defer f.Close() + + if err = util.ExtractImageToDirectory(f, "/home/user/target_dir"); err != nil { + log.Fatalf("failed to extract image: %s", err) + } +} +``` + +### Creating an ISO + +```go +package main + +import ( + "log" + "os" + + "github.com/kdomanski/iso9660" +) + +func main() { + writer, err := iso9660.NewWriter() + if err != nil { + log.Fatalf("failed to create writer: %s", err) + } + defer writer.Cleanup() + + f, err := os.Open("/home/user/myFile.txt") + if err != nil { + log.Fatalf("failed to open file: %s", err) + } + defer f.Close() + + err = writer.AddFile(f, "folder/MYFILE.TXT") + if err != nil { + log.Fatalf("failed to add file: %s", err) + } + + outputFile, err := os.OpenFile("/home/user/output.iso", os.O_WRONLY | os.O_TRUNC | os.O_CREATE, 0644) + if err != nil { + log.Fatalf("failed to create file: %s", err) + } + + err = writer.WriteTo(outputFile, "testvol") + if err != nil { + log.Fatalf("failed to write ISO image: %s", err) + } + + err = outputFile.Close() + if err != nil { + log.Fatalf("failed to close output file: %s", err) + } +} +``` diff --git a/vendor/github.com/kdomanski/iso9660/image_reader.go b/vendor/github.com/kdomanski/iso9660/image_reader.go new file mode 100644 index 00000000000..51f4b212b8f --- /dev/null +++ b/vendor/github.com/kdomanski/iso9660/image_reader.go @@ -0,0 +1,186 @@ +package iso9660 + +import ( + "fmt" + "io" + "os" + "strings" + "time" +) + +// Image is a wrapper around an image file that allows reading its ISO9660 data +type Image struct { + ra io.ReaderAt + volumeDescriptors []volumeDescriptor +} + +// OpenImage returns an Image reader reating from a given file +func OpenImage(ra io.ReaderAt) (*Image, error) { + i := &Image{ra: ra} + + if err := i.readVolumes(); err != nil { + return nil, err + } + + return i, nil +} + +func (i *Image) readVolumes() error { + buffer := make([]byte, sectorSize) + // skip the 16 sectors of system area + for sector := 16; ; sector++ { + if _, err := i.ra.ReadAt(buffer, int64(sector)*int64(sectorSize)); err != nil { + return err + } + + var vd volumeDescriptor + if err := vd.UnmarshalBinary(buffer); err != nil { + return err + } + + i.volumeDescriptors = append(i.volumeDescriptors, vd) + if vd.Header.Type == volumeTypeTerminator { + break + } + } + + return nil +} + +// RootDir returns the File structure corresponding to the root directory +// of the first primary volume +func (i *Image) RootDir() (*File, error) { + for _, vd := range i.volumeDescriptors { + if vd.Type() == volumeTypePrimary { + return &File{de: vd.Primary.RootDirectoryEntry, ra: i.ra, children: nil}, nil + } + } + return nil, fmt.Errorf("no primary volumes found") +} + +// File is a os.FileInfo-compatible wrapper around an ISO9660 directory entry +type File struct { + ra io.ReaderAt + de *DirectoryEntry + children []*File +} + +var _ os.FileInfo = &File{} + +// IsDir returns true if the entry is a directory or false otherwise +func (f *File) IsDir() bool { + return f.de.FileFlags&dirFlagDir != 0 +} + +// ModTime returns the entry's recording time +func (f *File) ModTime() time.Time { + return time.Time(f.de.RecordingDateTime) +} + +// Mode returns os.FileMode flag set with the os.ModeDir flag enabled in case of directories +func (f *File) Mode() os.FileMode { + var mode os.FileMode + if f.IsDir() { + mode |= os.ModeDir + } + return mode +} + +// Name returns the base name of the given entry +func (f *File) Name() string { + if f.IsDir() { + return f.de.Identifier + } + + // drop the version part + // assume only one ';' + fileIdentifier := strings.Split(f.de.Identifier, ";")[0] + + // split into filename and extension + // assume only only one '.' + splitFileIdentifier := strings.Split(fileIdentifier, ".") + + // there's no dot in the name, thus no extension + if len(splitFileIdentifier) == 1 { + return splitFileIdentifier[0] + } + + // extension is empty, return just the name without a dot + if len(splitFileIdentifier[1]) == 0 { + return splitFileIdentifier[0] + } + + // return file with extension + return fileIdentifier +} + +// Size returns the size in bytes of the extent occupied by the file or directory +func (f *File) Size() int64 { + return int64(f.de.ExtentLength) +} + +// Sys returns nil +func (f *File) Sys() interface{} { + return nil +} + +// GetChildren returns the chilren entries in case of a directory +// or an error in case of a file +func (f *File) GetChildren() ([]*File, error) { + if !f.IsDir() { + return nil, fmt.Errorf("%s is not a directory", f.Name()) + } + + if f.children != nil { + return f.children, nil + } + + baseOffset := uint32(f.de.ExtentLocation) * sectorSize + + buffer := make([]byte, sectorSize) + for bytesProcessed := uint32(0); bytesProcessed < uint32(f.de.ExtentLength); bytesProcessed += sectorSize { + if _, err := f.ra.ReadAt(buffer, int64(baseOffset+bytesProcessed)); err != nil { + return nil, nil + } + + for i := uint32(0); i < sectorSize; { + entryLength := uint32(buffer[i]) + if entryLength == 0 { + break + } + + if i+entryLength > sectorSize { + return nil, fmt.Errorf("reading directory entries: DE outside of sector boundries") + } + + newDE := &DirectoryEntry{} + if err := newDE.UnmarshalBinary(buffer[i : i+entryLength]); err != nil { + return nil, err + } + i += entryLength + if newDE.Identifier == string([]byte{0}) || newDE.Identifier == string([]byte{1}) { + continue + } + + newFile := &File{ra: f.ra, + de: newDE, + children: nil, + } + + f.children = append(f.children, newFile) + } + } + + return f.children, nil +} + +// Reader returns a reader that allows to read the file's data. +// If File is a directory, it returns nil. +func (f *File) Reader() io.Reader { + if f.IsDir() { + return nil + } + + baseOffset := int64(f.de.ExtentLocation) * int64(sectorSize) + return io.NewSectionReader(f.ra, baseOffset, int64(f.de.ExtentLength)) +} diff --git a/vendor/github.com/kdomanski/iso9660/image_writer.go b/vendor/github.com/kdomanski/iso9660/image_writer.go new file mode 100644 index 00000000000..a9cd0f1681e --- /dev/null +++ b/vendor/github.com/kdomanski/iso9660/image_writer.go @@ -0,0 +1,589 @@ +package iso9660 + +import ( + "bytes" + "container/list" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" + "runtime" + "strings" + "sync/atomic" + "time" +) + +const ( + primaryVolumeDirectoryIdentifierMaxLength = 31 // ECMA-119 7.6.3 + primaryVolumeFileIdentifierMaxLength = 30 // ECMA-119 7.5 +) + +var ( + // ErrFileTooLarge is returned when trying to process a file of size greater + // than 4GB, which due to the 32-bit address limitation is not possible + // except with ISO 9660-Level 3 + ErrFileTooLarge = errors.New("file is exceeding the maximum file size of 4GB") +) + +// ImageWriter is responsible for staging an image's contents +// and writing them to an image. +type ImageWriter struct { + stagingDir string +} + +// NewWriter creates a new ImageWrite and initializes its temporary staging dir. +// Cleanup should be called after the ImageWriter is no longer needed. +func NewWriter() (*ImageWriter, error) { + tmp, err := ioutil.TempDir("", "") + if err != nil { + return nil, err + } + + return &ImageWriter{stagingDir: tmp}, nil +} + +// Cleanup deletes the underlying temporary staging directory of an ImageWriter. +// It can be called multiple times without issues. +func (iw *ImageWriter) Cleanup() error { + if iw.stagingDir == "" { + return nil + } + + if err := os.RemoveAll(iw.stagingDir); err != nil { + return err + } + + iw.stagingDir = "" + return nil +} + +// AddFile adds a file to the ImageWriter's staging area. +// All path components are mangled to match basic ISO9660 filename requirements. +func (iw *ImageWriter) AddFile(data io.Reader, filePath string) error { + directoryPath, fileName := manglePath(filePath) + + if err := os.MkdirAll(path.Join(iw.stagingDir, directoryPath), 0755); err != nil { + return err + } + + f, err := os.OpenFile(path.Join(iw.stagingDir, directoryPath, fileName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, data) + return err +} + +// AddLocalFile adds a file identified by its path to the ImageWriter's staging area. +func (iw *ImageWriter) AddLocalFile(origin, target string) error { + directoryPath, fileName := manglePath(target) + + if err := os.MkdirAll(path.Join(iw.stagingDir, directoryPath), 0755); err != nil { + return err + } + + // try to hardlink file to staging area before copying. + stagedFile := path.Join(iw.stagingDir, directoryPath, fileName) + if err := os.Remove(stagedFile); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.Link(origin, stagedFile); err == nil { + return nil + } + + f, err := os.Open(origin) + if err != nil { + return err + } + + defer f.Close() + + return iw.AddFile(f, target) +} + +func manglePath(input string) (string, string) { + nonEmptySegments := splitPath(input) + + dirSegments := nonEmptySegments[:len(nonEmptySegments)-1] + name := nonEmptySegments[len(nonEmptySegments)-1] + + for i := 0; i < len(dirSegments); i++ { + dirSegments[i] = mangleDirectoryName(dirSegments[i]) + } + name = mangleFileName(name) + + return path.Join(dirSegments...), name +} + +func splitPath(input string) []string { + rawSegments := strings.Split(input, "/") + var nonEmptySegments []string + for _, s := range rawSegments { + if len(s) > 0 { + nonEmptySegments = append(nonEmptySegments, s) + } + } + return nonEmptySegments +} + +// See ECMA-119 7.5 +func mangleFileName(input string) string { + // https://github.com/torvalds/linux/blob/v5.6/fs/isofs/dir.c#L29 + input = strings.ToLower(input) + split := strings.Split(input, ".") + + version := "1" + var filename, extension string + if len(split) == 1 { + filename = split[0] + } else { + filename = strings.Join(split[:len(split)-1], "_") + extension = split[len(split)-1] + } + + // enough characters for the `.ignition` extension + extension = mangleD1String(extension, 8) + + maxRemainingFilenameLength := primaryVolumeFileIdentifierMaxLength - (1 + len(version)) + if len(extension) > 0 { + maxRemainingFilenameLength -= (1 + len(extension)) + } + + filename = mangleD1String(filename, maxRemainingFilenameLength) + + if len(extension) > 0 { + return filename + "." + extension + ";" + version + } + + return filename + ";" + version +} + +// See ECMA-119 7.6 +func mangleDirectoryName(input string) string { + return mangleD1String(input, primaryVolumeDirectoryIdentifierMaxLength) +} + +func mangleD1String(input string, maxCharacters int) string { + // https://github.com/torvalds/linux/blob/v5.6/fs/isofs/dir.c#L29 + input = strings.ToLower(input) + + var mangledString string + for i := 0; i < len(input) && i < maxCharacters; i++ { + r := rune(input[i]) + if strings.ContainsRune(d1Characters, r) { + mangledString += string(r) + } else { + mangledString += "_" + } + } + + return mangledString +} + +// calculateDirChildrenSectors calculates the total mashalled size of all DirectoryEntries +// within a directory. The size of each entry depends of the length of the filename. +func calculateDirChildrenSectors(path string) (uint32, error) { + contents, err := ioutil.ReadDir(path) + if err != nil { + return 0, err + } + + var sectors uint32 + var currentSectorOccupied uint32 = 68 // the 0x00 and 0x01 entries + + for _, c := range contents { + identifierLen := len(c.Name()) + idPaddingLen := (identifierLen + 1) % 2 + entryLength := uint32(33 + identifierLen + idPaddingLen) + + if currentSectorOccupied+entryLength > sectorSize { + sectors++ + currentSectorOccupied = entryLength + } else { + currentSectorOccupied += entryLength + } + } + + if currentSectorOccupied > 0 { + sectors++ + } + + return sectors, nil +} + +func fileLengthToSectors(l uint32) uint32 { + if (l % sectorSize) == 0 { + return l / sectorSize + } + + return (l / sectorSize) + 1 +} + +type writeContext struct { + stagingDir string + timestamp RecordingTimestamp + freeSectorPointer uint32 +} + +func (wc *writeContext) allocateSectors(n uint32) uint32 { + return atomic.AddUint32(&wc.freeSectorPointer, n) - n +} + +func (wc *writeContext) createDEForRoot() (*DirectoryEntry, error) { + extentLengthInSectors, err := calculateDirChildrenSectors(wc.stagingDir) + if err != nil { + return nil, err + } + + extentLocation := wc.allocateSectors(extentLengthInSectors) + de := &DirectoryEntry{ + ExtendedAtributeRecordLength: 0, + ExtentLocation: int32(extentLocation), + ExtentLength: int32(extentLengthInSectors * sectorSize), + RecordingDateTime: wc.timestamp, + FileFlags: dirFlagDir, + FileUnitSize: 0, // 0 for non-interleaved write + InterleaveGap: 0, // not interleaved + VolumeSequenceNumber: 1, // we only have one volume + Identifier: string([]byte{0}), + SystemUse: []byte{}, + } + return de, nil +} + +type itemToWrite struct { + isDirectory bool + dirPath string + ownEntry *DirectoryEntry + parentEntery *DirectoryEntry + childrenEntries []*DirectoryEntry + targetSector uint32 +} + +// scanDirectory reads the directory's contents and adds them to the queue, as well as stores all their DirectoryEntries in the item, +// because we'll need them to write this item's descriptor. +func (wc *writeContext) scanDirectory(item *itemToWrite, dirPath string, ownEntry *DirectoryEntry, parentEntery *DirectoryEntry, targetSector uint32) (*list.List, error) { + contents, err := ioutil.ReadDir(dirPath) + if err != nil { + return nil, err + } + + itemsToWrite := list.New() + + for _, c := range contents { + var ( + fileFlags byte + extentLengthInSectors uint32 + extentLength uint32 + ) + if c.IsDir() { + extentLengthInSectors, err = calculateDirChildrenSectors(path.Join(dirPath, c.Name())) + if err != nil { + return nil, err + } + fileFlags = dirFlagDir + extentLength = extentLengthInSectors * sectorSize + } else { + if c.Size() > int64(math.MaxUint32) { + return nil, ErrFileTooLarge + } + extentLength = uint32(c.Size()) + extentLengthInSectors = fileLengthToSectors(extentLength) + + fileFlags = 0 + } + + extentLocation := wc.allocateSectors(extentLengthInSectors) + de := &DirectoryEntry{ + ExtendedAtributeRecordLength: 0, + ExtentLocation: int32(extentLocation), + ExtentLength: int32(extentLength), + RecordingDateTime: wc.timestamp, + FileFlags: fileFlags, + FileUnitSize: 0, // 0 for non-interleaved write + InterleaveGap: 0, // not interleaved + VolumeSequenceNumber: 1, // we only have one volume + Identifier: c.Name(), + SystemUse: []byte{}, + } + + // Add this child's descriptor to the currently scanned directory's list of children, + // so that later we can use it for writing the current item. + if item.childrenEntries == nil { + item.childrenEntries = []*DirectoryEntry{de} + } else { + item.childrenEntries = append(item.childrenEntries, de) + } + + // queue this child for processing + itemsToWrite.PushBack(itemToWrite{ + isDirectory: c.IsDir(), + dirPath: path.Join(dirPath, c.Name()), + ownEntry: de, + parentEntery: ownEntry, + targetSector: uint32(de.ExtentLocation), + }) + } + + return itemsToWrite, nil +} + +// processDirectory writes a given directory item to the destination sectors +func processDirectory(w io.Writer, children []*DirectoryEntry, ownEntry *DirectoryEntry, parentEntry *DirectoryEntry) error { + var currentOffset uint32 + + currentDE := ownEntry.Clone() + currentDE.Identifier = string([]byte{0}) + parentDE := parentEntry.Clone() + parentDE.Identifier = string([]byte{1}) + + currentDEData, err := currentDE.MarshalBinary() + if err != nil { + return err + } + parentDEData, err := parentDE.MarshalBinary() + if err != nil { + return err + } + + n, err := w.Write(currentDEData) + if err != nil { + return err + } + currentOffset += uint32(n) + n, err = w.Write(parentDEData) + if err != nil { + return err + } + currentOffset += uint32(n) + + for _, childDescriptor := range children { + data, err := childDescriptor.MarshalBinary() + if err != nil { + return err + } + + remainingSectorSpace := sectorSize - (currentOffset % sectorSize) + if remainingSectorSpace < uint32(len(data)) { + // ECMA-119 6.8.1.1 If the body of the next descriptor won't fit into the sector, + // we fill the rest of space with zeros and skip to the next sector. + zeros := bytes.Repeat([]byte{0}, int(remainingSectorSpace)) + _, err = w.Write(zeros) + if err != nil { + return err + } + + // skip to the next sector + currentOffset = 0 + } + + n, err = w.Write(data) + if err != nil { + return err + } + currentOffset += uint32(n) + } + + // fill with zeros to the end of the sector + remainingSectorSpace := sectorSize - (currentOffset % sectorSize) + if remainingSectorSpace != 0 { + zeros := bytes.Repeat([]byte{0}, int(remainingSectorSpace)) + _, err = w.Write(zeros) + if err != nil { + return err + } + } + + return nil +} + +func processFile(w io.Writer, dirPath string) error { + f, err := os.Open(dirPath) + if err != nil { + return err + } + defer f.Close() + + fileinfo, err := f.Stat() + if err != nil { + return err + } + + if fileinfo.Size() > int64(math.MaxUint32) { + return ErrFileTooLarge + } + + buffer := make([]byte, sectorSize) + + for bytesLeft := uint32(fileinfo.Size()); bytesLeft > 0; { + var toRead uint32 + if bytesLeft < sectorSize { + toRead = bytesLeft + } else { + toRead = sectorSize + } + + if _, err = io.ReadAtLeast(f, buffer, int(toRead)); err != nil { + return err + } + + if _, err = w.Write(buffer); err != nil { + return err + } + + bytesLeft -= toRead + } + // We already write a whole sector-sized buffer, so there's need to fill with zeroes. + + return nil +} + +// traverseStagingDir creates a new queue of items to write by traversing the staging directory +func (wc *writeContext) traverseStagingDir(rootItem itemToWrite) (*list.List, error) { + itemsToWrite := list.New() + itemsToWrite.PushBack(rootItem) + + for item := itemsToWrite.Front(); item != nil; item = item.Next() { + it := item.Value.(itemToWrite) + + if it.isDirectory { + newItems, err := wc.scanDirectory(&it, it.dirPath, it.ownEntry, it.parentEntery, it.targetSector) + if err != nil { + relativePath := it.dirPath[len(wc.stagingDir):] + return nil, fmt.Errorf("processing %s: %s", relativePath, err) + } + itemsToWrite.PushBackList(newItems) + } + + item.Value = it + } + + return itemsToWrite, nil +} + +func writeAll(w io.Writer, itemsToWrite *list.List) error { + for item := itemsToWrite.Front(); item != nil; item = item.Next() { + it := item.Value.(itemToWrite) + var err error + if it.isDirectory { + err = processDirectory(w, it.childrenEntries, it.ownEntry, it.parentEntery) + } else { + err = processFile(w, it.dirPath) + } + + if err != nil { + return err + } + } + + return nil +} + +// WriteTo writes the image to the given WriterAt +func (iw *ImageWriter) WriteTo(w io.Writer, volumeIdentifier string) error { + buffer := make([]byte, sectorSize) + var err error + + now := time.Now() + + wc := writeContext{ + stagingDir: iw.stagingDir, + timestamp: RecordingTimestamp{}, + freeSectorPointer: 18, // system area (16) + 2 volume descriptors + } + + rootDE, err := wc.createDEForRoot() + if err != nil { + return fmt.Errorf("creating root directory descriptor: %s", err) + } + + rootItem := itemToWrite{ + isDirectory: true, + dirPath: wc.stagingDir, + ownEntry: rootDE, + parentEntery: rootDE, + targetSector: uint32(rootDE.ExtentLocation), + } + + itemsToWrite, err := wc.traverseStagingDir(rootItem) + if err != nil { + return fmt.Errorf("tranversing staging directory: %s", err) + } + + pvd := volumeDescriptor{ + Header: volumeDescriptorHeader{ + Type: volumeTypePrimary, + Identifier: standardIdentifierBytes, + Version: 1, + }, + Primary: &PrimaryVolumeDescriptorBody{ + SystemIdentifier: runtime.GOOS, + VolumeIdentifier: volumeIdentifier, + VolumeSpaceSize: int32(wc.freeSectorPointer), + VolumeSetSize: 1, + VolumeSequenceNumber: 1, + LogicalBlockSize: int16(sectorSize), + PathTableSize: 0, + TypeLPathTableLoc: 0, + OptTypeLPathTableLoc: 0, + TypeMPathTableLoc: 0, + OptTypeMPathTableLoc: 0, + RootDirectoryEntry: rootDE, + VolumeSetIdentifier: "", + PublisherIdentifier: "", + DataPreparerIdentifier: "", + ApplicationIdentifier: "github.com/kdomanski/iso9660", + CopyrightFileIdentifier: "", + AbstractFileIdentifier: "", + BibliographicFileIdentifier: "", + VolumeCreationDateAndTime: VolumeDescriptorTimestampFromTime(now), + VolumeModificationDateAndTime: VolumeDescriptorTimestampFromTime(now), + VolumeExpirationDateAndTime: VolumeDescriptorTimestamp{}, + VolumeEffectiveDateAndTime: VolumeDescriptorTimestampFromTime(now), + FileStructureVersion: 1, + ApplicationUsed: [512]byte{}, + }, + } + + terminator := volumeDescriptor{ + Header: volumeDescriptorHeader{ + Type: volumeTypeTerminator, + Identifier: standardIdentifierBytes, + Version: 1, + }, + } + + // write 16 sectors of zeroes + zeroSector := bytes.Repeat([]byte{0}, int(sectorSize)) + for i := uint32(0); i < 16; i++ { + if _, err = w.Write(zeroSector); err != nil { + return err + } + } + + if buffer, err = pvd.MarshalBinary(); err != nil { + return err + } + if _, err = w.Write(buffer); err != nil { + return err + } + + if buffer, err = terminator.MarshalBinary(); err != nil { + return err + } + if _, err = w.Write(buffer); err != nil { + return err + } + + if err = writeAll(w, itemsToWrite); err != nil { + return fmt.Errorf("writing files: %s", err) + } + + return nil +} diff --git a/vendor/github.com/kdomanski/iso9660/iso9660.go b/vendor/github.com/kdomanski/iso9660/iso9660.go new file mode 100644 index 00000000000..850b5e11cdf --- /dev/null +++ b/vendor/github.com/kdomanski/iso9660/iso9660.go @@ -0,0 +1,596 @@ +// Package iso9660 implements reading and creating basic ISO9660 images. +package iso9660 + +import ( + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// ISO 9660 Overview +// https://archive.fo/xs9ac + +const ( + sectorSize uint32 = 2048 + systemAreaSize = sectorSize * 16 + standardIdentifier = "CD001" + + volumeTypeBoot byte = 0 + volumeTypePrimary byte = 1 + volumeTypeSupplementary byte = 2 + volumeTypePartition byte = 3 + volumeTypeTerminator byte = 255 + + volumeDescriptorBodySize = sectorSize - 7 + + aCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_!\"%&'()*+,-./:;<=>?" + dCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_" + // ECMA-119 7.4.2.2 defines d1-characters as + // "subject to agreement between the originator and the recipient of the volume". + d1Characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_!\"%&'()*+,-./:;<=>?" +) + +const ( + dirFlagHidden = 1 << iota + dirFlagDir + dirFlagAssociated + dirFlagRecord + dirFlagProtection + _ + _ + dirFlagMultiExtent +) + +var standardIdentifierBytes = [5]byte{'C', 'D', '0', '0', '1'} + +// volumeDescriptorHeader represents the data in bytes 0-6 +// of a Volume Descriptor as defined in ECMA-119 8.1 +type volumeDescriptorHeader struct { + Type byte + Identifier [5]byte + Version byte +} + +var _ encoding.BinaryUnmarshaler = &volumeDescriptorHeader{} +var _ encoding.BinaryMarshaler = &volumeDescriptorHeader{} + +// UnmarshalBinary decodes a volumeDescriptorHeader from binary form +func (vdh *volumeDescriptorHeader) UnmarshalBinary(data []byte) error { + if len(data) < 7 { + return io.ErrUnexpectedEOF + } + + vdh.Type = data[0] + copy(vdh.Identifier[:], data[1:6]) + vdh.Version = data[6] + return nil +} + +func (vdh volumeDescriptorHeader) MarshalBinary() ([]byte, error) { + data := make([]byte, 7) + data[0] = vdh.Type + data[6] = vdh.Version + copy(data[1:6], vdh.Identifier[:]) + return data, nil +} + +// BootVolumeDescriptorBody represents the data in bytes 7-2047 +// of a Boot Record as defined in ECMA-119 8.2 +type BootVolumeDescriptorBody struct { + BootSystemIdentifier string + BootIdentifier string + BootSystemUse [1977]byte +} + +var _ encoding.BinaryUnmarshaler = &BootVolumeDescriptorBody{} + +// PrimaryVolumeDescriptorBody represents the data in bytes 7-2047 +// of a Primary Volume Descriptor as defined in ECMA-119 8.4 +type PrimaryVolumeDescriptorBody struct { + SystemIdentifier string + VolumeIdentifier string + VolumeSpaceSize int32 + VolumeSetSize int16 + VolumeSequenceNumber int16 + LogicalBlockSize int16 + PathTableSize int32 + TypeLPathTableLoc int32 + OptTypeLPathTableLoc int32 + TypeMPathTableLoc int32 + OptTypeMPathTableLoc int32 + RootDirectoryEntry *DirectoryEntry + VolumeSetIdentifier string + PublisherIdentifier string + DataPreparerIdentifier string + ApplicationIdentifier string + CopyrightFileIdentifier string + AbstractFileIdentifier string + BibliographicFileIdentifier string + VolumeCreationDateAndTime VolumeDescriptorTimestamp + VolumeModificationDateAndTime VolumeDescriptorTimestamp + VolumeExpirationDateAndTime VolumeDescriptorTimestamp + VolumeEffectiveDateAndTime VolumeDescriptorTimestamp + FileStructureVersion byte + ApplicationUsed [512]byte +} + +var _ encoding.BinaryUnmarshaler = &PrimaryVolumeDescriptorBody{} +var _ encoding.BinaryMarshaler = PrimaryVolumeDescriptorBody{} + +// DirectoryEntry contains data from a Directory Descriptor +// as described by ECMA-119 9.1 +type DirectoryEntry struct { + ExtendedAtributeRecordLength byte + ExtentLocation int32 + ExtentLength int32 + RecordingDateTime RecordingTimestamp + FileFlags byte + FileUnitSize byte + InterleaveGap byte + VolumeSequenceNumber int16 + Identifier string + SystemUse []byte +} + +var _ encoding.BinaryUnmarshaler = &DirectoryEntry{} +var _ encoding.BinaryMarshaler = &DirectoryEntry{} + +// UnmarshalBinary decodes a DirectoryEntry from binary form +func (de *DirectoryEntry) UnmarshalBinary(data []byte) error { + length := data[0] + if length == 0 { + return io.EOF + } + + var err error + + de.ExtendedAtributeRecordLength = data[1] + + if de.ExtentLocation, err = UnmarshalInt32LSBMSB(data[2:10]); err != nil { + return err + } + + if de.ExtentLength, err = UnmarshalInt32LSBMSB(data[10:18]); err != nil { + return err + } + + if err = de.RecordingDateTime.UnmarshalBinary(data[18:25]); err != nil { + return err + } + + de.FileFlags = data[25] + de.FileUnitSize = data[26] + de.InterleaveGap = data[27] + + if de.VolumeSequenceNumber, err = UnmarshalInt16LSBMSB(data[28:32]); err != nil { + return err + } + + identifierLen := data[32] + de.Identifier = string(data[33 : 33+identifierLen]) + + // add padding if identifier length was even] + idPaddingLen := (identifierLen + 1) % 2 + de.SystemUse = data[33+identifierLen+idPaddingLen : length] + + return nil +} + +// MarshalBinary encodes a DirectoryEntry to binary form +func (de *DirectoryEntry) MarshalBinary() ([]byte, error) { + identifierLen := len(de.Identifier) + idPaddingLen := (identifierLen + 1) % 2 + totalLen := 33 + identifierLen + idPaddingLen + len(de.SystemUse) + if totalLen > 255 { + return nil, fmt.Errorf("identifier %q is too long", de.Identifier) + } + + data := make([]byte, totalLen) + + data[0] = byte(totalLen) + data[1] = de.ExtendedAtributeRecordLength + + WriteInt32LSBMSB(data[2:10], de.ExtentLocation) + WriteInt32LSBMSB(data[10:18], de.ExtentLength) + de.RecordingDateTime.MarshalBinary(data[18:25]) + data[25] = de.FileFlags + data[26] = de.FileUnitSize + data[27] = de.InterleaveGap + WriteInt16LSBMSB(data[28:32], de.VolumeSequenceNumber) + data[32] = byte(identifierLen) + copy(data[33:33+identifierLen], []byte(de.Identifier)) + + copy(data[33+identifierLen+idPaddingLen:totalLen], de.SystemUse) + + return data, nil +} + +// Clone creates a copy of the DirectoryEntry +func (de *DirectoryEntry) Clone() DirectoryEntry { + newDE := DirectoryEntry{ + ExtendedAtributeRecordLength: de.ExtendedAtributeRecordLength, + ExtentLocation: de.ExtentLocation, + ExtentLength: de.ExtentLength, + RecordingDateTime: de.RecordingDateTime, + FileFlags: de.FileFlags, + FileUnitSize: de.FileUnitSize, + InterleaveGap: de.InterleaveGap, + VolumeSequenceNumber: de.VolumeSequenceNumber, + Identifier: de.Identifier, + SystemUse: make([]byte, len(de.SystemUse)), + } + copy(newDE.SystemUse, de.SystemUse) + return newDE +} + +// UnmarshalBinary decodes a PrimaryVolumeDescriptorBody from binary form as defined in ECMA-119 8.4 +func (pvd *PrimaryVolumeDescriptorBody) UnmarshalBinary(data []byte) error { + if len(data) < 2048 { + return io.ErrUnexpectedEOF + } + + var err error + + pvd.SystemIdentifier = strings.TrimRight(string(data[8:40]), " ") + pvd.VolumeIdentifier = strings.TrimRight(string(data[40:72]), " ") + + if pvd.VolumeSpaceSize, err = UnmarshalInt32LSBMSB(data[80:88]); err != nil { + return err + } + + if pvd.VolumeSetSize, err = UnmarshalInt16LSBMSB(data[120:124]); err != nil { + return err + } + + if pvd.VolumeSequenceNumber, err = UnmarshalInt16LSBMSB(data[124:128]); err != nil { + return err + } + + if pvd.LogicalBlockSize, err = UnmarshalInt16LSBMSB(data[128:132]); err != nil { + return err + } + + if pvd.PathTableSize, err = UnmarshalInt32LSBMSB(data[132:140]); err != nil { + return err + } + + pvd.TypeLPathTableLoc = int32(binary.LittleEndian.Uint32(data[140:144])) + pvd.OptTypeLPathTableLoc = int32(binary.LittleEndian.Uint32(data[144:148])) + pvd.TypeMPathTableLoc = int32(binary.BigEndian.Uint32(data[148:152])) + pvd.OptTypeMPathTableLoc = int32(binary.BigEndian.Uint32(data[152:156])) + + if pvd.RootDirectoryEntry == nil { + pvd.RootDirectoryEntry = &DirectoryEntry{} + } + if err = pvd.RootDirectoryEntry.UnmarshalBinary(data[156:190]); err != nil { + return err + } + + pvd.VolumeSetIdentifier = strings.TrimRight(string(data[190:318]), " ") + pvd.PublisherIdentifier = strings.TrimRight(string(data[318:446]), " ") + pvd.DataPreparerIdentifier = strings.TrimRight(string(data[446:574]), " ") + pvd.ApplicationIdentifier = strings.TrimRight(string(data[574:702]), " ") + pvd.CopyrightFileIdentifier = strings.TrimRight(string(data[702:740]), " ") + pvd.AbstractFileIdentifier = strings.TrimRight(string(data[740:776]), " ") + pvd.BibliographicFileIdentifier = strings.TrimRight(string(data[776:813]), " ") + + if pvd.VolumeCreationDateAndTime.UnmarshalBinary(data[813:830]) != nil { + return err + } + + if pvd.VolumeModificationDateAndTime.UnmarshalBinary(data[830:847]) != nil { + return err + } + + if pvd.VolumeExpirationDateAndTime.UnmarshalBinary(data[847:864]) != nil { + return err + } + + if pvd.VolumeEffectiveDateAndTime.UnmarshalBinary(data[864:881]) != nil { + return err + } + + pvd.FileStructureVersion = data[881] + copy(pvd.ApplicationUsed[:], data[883:1395]) + + return nil +} + +// MarshalBinary encodes the PrimaryVolumeDescriptorBody to its binary form +func (pvd PrimaryVolumeDescriptorBody) MarshalBinary() ([]byte, error) { + output := make([]byte, sectorSize) + + d := MarshalString(pvd.SystemIdentifier, 32) + copy(output[8:40], d) + + d = MarshalString(pvd.VolumeIdentifier, 32) + copy(output[40:72], d) + + WriteInt32LSBMSB(output[80:88], pvd.VolumeSpaceSize) + WriteInt16LSBMSB(output[120:124], pvd.VolumeSetSize) + WriteInt16LSBMSB(output[124:128], pvd.VolumeSequenceNumber) + WriteInt16LSBMSB(output[128:132], pvd.LogicalBlockSize) + WriteInt32LSBMSB(output[132:140], pvd.PathTableSize) + + binary.LittleEndian.PutUint32(output[140:144], uint32(pvd.TypeLPathTableLoc)) + binary.LittleEndian.PutUint32(output[144:148], uint32(pvd.OptTypeLPathTableLoc)) + binary.BigEndian.PutUint32(output[148:152], uint32(pvd.TypeMPathTableLoc)) + binary.BigEndian.PutUint32(output[152:156], uint32(pvd.OptTypeMPathTableLoc)) + + binaryRDE, err := pvd.RootDirectoryEntry.MarshalBinary() + if err != nil { + return nil, err + } + copy(output[156:190], binaryRDE) + + copy(output[190:318], MarshalString(pvd.VolumeSetIdentifier, 128)) + copy(output[318:446], MarshalString(pvd.PublisherIdentifier, 128)) + copy(output[446:574], MarshalString(pvd.DataPreparerIdentifier, 128)) + copy(output[574:702], MarshalString(pvd.ApplicationIdentifier, 128)) + copy(output[702:740], MarshalString(pvd.CopyrightFileIdentifier, 38)) + copy(output[740:776], MarshalString(pvd.AbstractFileIdentifier, 36)) + copy(output[776:813], MarshalString(pvd.BibliographicFileIdentifier, 37)) + + d, err = pvd.VolumeCreationDateAndTime.MarshalBinary() + if err != nil { + return nil, err + } + copy(output[813:830], d) + + d, err = pvd.VolumeModificationDateAndTime.MarshalBinary() + if err != nil { + return nil, err + } + copy(output[830:847], d) + + d, err = pvd.VolumeExpirationDateAndTime.MarshalBinary() + if err != nil { + return nil, err + } + copy(output[847:864], d) + + d, err = pvd.VolumeEffectiveDateAndTime.MarshalBinary() + if err != nil { + return nil, err + } + copy(output[864:881], d) + + output[881] = pvd.FileStructureVersion + output[882] = 0 + copy(output[883:1395], pvd.ApplicationUsed[:]) + for i := 1395; i < 2048; i++ { + output[i] = 0 + } + + return output, nil +} + +// UnmarshalBinary decodes a BootVolumeDescriptorBody from binary form +func (bvd *BootVolumeDescriptorBody) UnmarshalBinary(data []byte) error { + bvd.BootSystemIdentifier = strings.TrimRight(string(data[7:39]), " ") + bvd.BootIdentifier = strings.TrimRight(string(data[39:71]), " ") + if n := copy(bvd.BootSystemUse[:], data[71:2048]); n != 1977 { + return fmt.Errorf("BootVolumeDescriptorBody.UnmarshalBinary: copied %d bytes", n) + } + return nil +} + +type volumeDescriptor struct { + Header volumeDescriptorHeader + Boot *BootVolumeDescriptorBody + Primary *PrimaryVolumeDescriptorBody +} + +var _ encoding.BinaryUnmarshaler = &volumeDescriptor{} +var _ encoding.BinaryMarshaler = &volumeDescriptor{} + +func (vd volumeDescriptor) Type() byte { + return vd.Header.Type +} + +// UnmarshalBinary decodes a volumeDescriptor from binary form +func (vd *volumeDescriptor) UnmarshalBinary(data []byte) error { + if uint32(len(data)) < sectorSize { + return io.ErrUnexpectedEOF + } + + if err := vd.Header.UnmarshalBinary(data); err != nil { + return err + } + + if string(vd.Header.Identifier[:]) != standardIdentifier { + return fmt.Errorf("volume descriptor %q != %q", string(vd.Header.Identifier[:]), standardIdentifier) + } + + switch vd.Header.Type { + case volumeTypeBoot: + vd.Boot = &BootVolumeDescriptorBody{} + return vd.Boot.UnmarshalBinary(data) + case volumeTypePartition: + return errors.New("partition volumes are not yet supported") + case volumeTypePrimary, volumeTypeSupplementary: + vd.Primary = &PrimaryVolumeDescriptorBody{} + return vd.Primary.UnmarshalBinary(data) + case volumeTypeTerminator: + return nil + } + + return fmt.Errorf("unknown volume type 0x%X", vd.Header.Type) +} + +// UnmarshalBinary decodes a volumeDescriptor from binary form +func (vd volumeDescriptor) MarshalBinary() ([]byte, error) { + var output []byte + var err error + + switch vd.Header.Type { + case volumeTypeBoot: + return nil, errors.New("boot volumes are not yet supported") + case volumeTypePartition: + return nil, errors.New("partition volumes are not yet supported") + case volumeTypePrimary, volumeTypeSupplementary: + if output, err = vd.Primary.MarshalBinary(); err != nil { + return nil, err + } + case volumeTypeTerminator: + output = make([]byte, sectorSize) + } + + data, err := vd.Header.MarshalBinary() + if err != nil { + return nil, err + } + + copy(output[0:7], data) + + return output, nil +} + +// VolumeDescriptorTimestamp represents a time and date format +// that can be encoded according to ECMA-119 8.4.26.1 +type VolumeDescriptorTimestamp struct { + Year int + Month int + Day int + Hour int + Minute int + Second int + Hundredth int + Offset int +} + +var _ encoding.BinaryMarshaler = &VolumeDescriptorTimestamp{} +var _ encoding.BinaryUnmarshaler = &VolumeDescriptorTimestamp{} + +// MarshalBinary encodes the timestamp into a binary form +func (ts *VolumeDescriptorTimestamp) MarshalBinary() ([]byte, error) { + formatted := fmt.Sprintf("%04d%02d%02d%02d%02d%02d%02d", ts.Year, ts.Month, ts.Day, ts.Hour, ts.Minute, ts.Second, ts.Hundredth) + formattedBytes := append([]byte(formatted), byte(ts.Offset)) + if len(formattedBytes) != 17 { + return nil, fmt.Errorf("VolumeDescriptorTimestamp.MarshalBinary: the formatted timestamp is %d bytes long", len(formatted)) + } + return formattedBytes, nil +} + +// UnmarshalBinary decodes a VolumeDescriptorTimestamp from binary form +func (ts *VolumeDescriptorTimestamp) UnmarshalBinary(data []byte) error { + if len(data) < 17 { + return io.ErrUnexpectedEOF + } + + year, err := strconv.Atoi(strings.TrimSpace(string(data[0:4]))) + if err != nil { + return err + } + + month, err := strconv.Atoi(strings.TrimSpace(string(data[4:6]))) + if err != nil { + return err + } + + day, err := strconv.Atoi(strings.TrimSpace(string(data[6:8]))) + if err != nil { + return err + } + + hour, err := strconv.Atoi(strings.TrimSpace(string(data[8:10]))) + if err != nil { + return err + } + + min, err := strconv.Atoi(strings.TrimSpace(string(data[10:12]))) + if err != nil { + return err + } + + sec, err := strconv.Atoi(strings.TrimSpace(string(data[12:14]))) + if err != nil { + return err + } + + hundredth, err := strconv.Atoi(strings.TrimSpace(string(data[14:16]))) + if err != nil { + return err + } + + *ts = VolumeDescriptorTimestamp{ + Year: year, + Month: month, + Day: day, + Hour: hour, + Minute: min, + Second: sec, + Hundredth: hundredth, + Offset: int(data[16]), + } + + return nil +} + +// RecordingTimestamp represents a time and date format +// that can be encoded according to ECMA-119 9.1.5 +type RecordingTimestamp time.Time + +var _ encoding.BinaryUnmarshaler = &RecordingTimestamp{} + +// UnmarshalBinary decodes a RecordingTimestamp from binary form +func (ts *RecordingTimestamp) UnmarshalBinary(data []byte) error { + if len(data) < 7 { + return io.ErrUnexpectedEOF + } + + year := 1900 + int(data[0]) + month := int(data[1]) + day := int(data[2]) + hour := int(data[3]) + min := int(data[4]) + sec := int(data[5]) + tzOffset := int(data[6]) + secondsInAQuarter := 60 * 15 + + tz := time.FixedZone("", tzOffset*secondsInAQuarter) + *ts = RecordingTimestamp(time.Date(year, time.Month(month), day, hour, min, sec, 0, tz)) + return nil +} + +// MarshalBinary encodes the RecordingTimestamp in its binary form to a buffer +// of the length of 7 or more bytes +func (ts RecordingTimestamp) MarshalBinary(dst []byte) { + _ = dst[6] // early bounds check to guarantee safety of writes below + t := time.Time(ts) + year, month, day := t.Date() + hour, min, sec := t.Clock() + _, secOffset := t.Zone() + secondsInAQuarter := 60 * 15 + offsetInQuarters := secOffset / secondsInAQuarter + dst[0] = byte(year - 1900) + dst[1] = byte(month) + dst[2] = byte(day) + dst[3] = byte(hour) + dst[4] = byte(min) + dst[5] = byte(sec) + dst[6] = byte(offsetInQuarters) +} + +// VolumeDescriptorTimestampFromTime converts time.Time to VolumeDescriptorTimestamp +func VolumeDescriptorTimestampFromTime(t time.Time) VolumeDescriptorTimestamp { + t = t.UTC() + year, month, day := t.Date() + hour, minute, second := t.Clock() + hundredth := t.Nanosecond() / 10000000 + return VolumeDescriptorTimestamp{ + Year: year, + Month: int(month), + Day: day, + Hour: hour, + Minute: minute, + Second: second, + Hundredth: hundredth, + Offset: 0, // we converted to UTC + } +} diff --git a/vendor/github.com/kdomanski/iso9660/iso9660_datatypes.go b/vendor/github.com/kdomanski/iso9660/iso9660_datatypes.go new file mode 100644 index 00000000000..f3a7e5da551 --- /dev/null +++ b/vendor/github.com/kdomanski/iso9660/iso9660_datatypes.go @@ -0,0 +1,64 @@ +package iso9660 + +import ( + "encoding/binary" + "fmt" + "io" + "strings" +) + +// MarshalString encodes the given string as a byte array padded to the given length +func MarshalString(s string, padToLength int) []byte { + if len(s) > padToLength { + s = s[:padToLength] + } + missingPadding := padToLength - len(s) + s = s + strings.Repeat(" ", missingPadding) + return []byte(s) +} + +// UnmarshalInt32LSBMSB decodes a 32-bit integer in both byte orders, as defined in ECMA-119 7.3.3 +func UnmarshalInt32LSBMSB(data []byte) (int32, error) { + if len(data) < 8 { + return 0, io.ErrUnexpectedEOF + } + + lsb := int32(binary.LittleEndian.Uint32(data[0:4])) + msb := int32(binary.BigEndian.Uint32(data[4:8])) + + if lsb != msb { + return 0, fmt.Errorf("little-endian and big-endian value mismatch: %d != %d", lsb, msb) + } + + return lsb, nil +} + +// UnmarshalInt16LSBMSB decodes a 16-bit integer in both byte orders, as defined in ECMA-119 7.3.3 +func UnmarshalInt16LSBMSB(data []byte) (int16, error) { + if len(data) < 4 { + return 0, io.ErrUnexpectedEOF + } + + lsb := int16(binary.LittleEndian.Uint16(data[0:2])) + msb := int16(binary.BigEndian.Uint16(data[2:4])) + + if lsb != msb { + return 0, fmt.Errorf("little-endian and big-endian value mismatch: %d != %d", lsb, msb) + } + + return lsb, nil +} + +// WriteInt32LSBMSB writes a 32-bit integer in both byte orders, as defined in ECMA-119 7.3.3 +func WriteInt32LSBMSB(dst []byte, value int32) { + _ = dst[7] // early bounds check to guarantee safety of writes below + binary.LittleEndian.PutUint32(dst[0:4], uint32(value)) + binary.BigEndian.PutUint32(dst[4:8], uint32(value)) +} + +// WriteInt16LSBMSB writes a 16-bit integer in both byte orders, as defined in ECMA-119 7.2.3 +func WriteInt16LSBMSB(dst []byte, value int16) { + _ = dst[3] // early bounds check to guarantee safety of writes below + binary.LittleEndian.PutUint16(dst[0:2], uint16(value)) + binary.BigEndian.PutUint16(dst[2:4], uint16(value)) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/LICENSE b/vendor/github.com/terraform-providers/terraform-provider-nutanix/LICENSE new file mode 100644 index 00000000000..a612ad9813b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/client.go b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/client.go new file mode 100644 index 00000000000..a2a89e9ad2b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/client.go @@ -0,0 +1,354 @@ +package client + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/helper/logging" +) + +const ( + // libraryVersion = "v3" + defaultBaseURL = "https://%s/" + // absolutePath = "api/nutanix/" + libraryVersion + // userAgent = "nutanix/" + libraryVersion + mediaType = "application/json" +) + +// Client Config Configuration of the client +type Client struct { + Credentials *Credentials + + // HTTP client used to communicate with the Nutanix API. + client *http.Client + + // Base URL for API requests. + BaseURL *url.URL + + // User agent for client + UserAgent string + + Cookies []*http.Cookie + + // Optional function called after every successful request made. + onRequestCompleted RequestCompletionCallback + + // absolutePath: for example api/nutanix/v3 + AbsolutePath string +} + +// RequestCompletionCallback defines the type of the request callback function +type RequestCompletionCallback func(*http.Request, *http.Response, interface{}) + +// Credentials needed username and password +type Credentials struct { + URL string + Username string + Password string + Endpoint string + Port string + Insecure bool + SessionAuth bool + ProxyURL string +} + +// NewClient returns a new Nutanix API client. +func NewClient(credentials *Credentials, userAgent string, absolutePath string) (*Client, error) { + if userAgent == "" { + return nil, fmt.Errorf("userAgent argument must be passed") + } + if absolutePath == "" { + return nil, fmt.Errorf("absolutePath argument must be passed") + } + + transCfg := &http.Transport{ + // nolint:gas + TLSClientConfig: &tls.Config{InsecureSkipVerify: credentials.Insecure}, // ignore expired SSL certificates + } + + if credentials.ProxyURL != "" { + log.Printf("[DEBUG] Using proxy: %s\n", credentials.ProxyURL) + proxy, err := url.Parse(credentials.ProxyURL) + if err != nil { + return nil, fmt.Errorf("error parsing proxy url: %s", err) + } + + transCfg.Proxy = http.ProxyURL(proxy) + } + + httpClient := http.DefaultClient + + httpClient.Transport = logging.NewTransport("Nutanix", transCfg) + + baseURL, err := url.Parse(fmt.Sprintf(defaultBaseURL, credentials.URL)) + + if err != nil { + return nil, err + } + + c := &Client{credentials, httpClient, baseURL, userAgent, nil, nil, absolutePath} + + if credentials.SessionAuth { + log.Printf("[DEBUG] Using session_auth\n") + + ctx := context.TODO() + req, err := c.NewRequest(ctx, http.MethodGet, "/users/me", nil) + if err != nil { + return c, err + } + + resp, err := c.client.Do(req) + + if err != nil { + return c, err + } + defer func() { + if rerr := resp.Body.Close(); err == nil { + err = rerr + } + }() + + err = CheckResponse(resp) + + c.Cookies = resp.Cookies() + } + + return c, nil +} + +// NewRequest creates a request +func (c *Client) NewRequest(ctx context.Context, method, urlStr string, body interface{}) (*http.Request, error) { + rel, errp := url.Parse(c.AbsolutePath + urlStr) + if errp != nil { + return nil, errp + } + + u := c.BaseURL.ResolveReference(rel) + + buf := new(bytes.Buffer) + + if body != nil { + err := json.NewEncoder(buf).Encode(body) + + if err != nil { + return nil, err + } + } + req, err := http.NewRequest(method, u.String(), buf) + + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", mediaType) + req.Header.Add("Accept", mediaType) + req.Header.Add("User-Agent", c.UserAgent) + if c.Cookies != nil { + for _, i := range c.Cookies { + req.AddCookie(i) + } + } else { + req.Header.Add("Authorization", "Basic "+ + base64.StdEncoding.EncodeToString([]byte(c.Credentials.Username+":"+c.Credentials.Password))) + } + + return req, nil +} + +// NewUploadRequest Handles image uploads for image service +func (c *Client) NewUploadRequest(ctx context.Context, method, urlStr string, body []byte) (*http.Request, error) { + rel, errp := url.Parse(c.AbsolutePath + urlStr) + if errp != nil { + return nil, errp + } + + u := c.BaseURL.ResolveReference(rel) + + buf := bytes.NewBuffer(body) + + req, err := http.NewRequest(method, u.String(), buf) + + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", "application/octet-stream") + req.Header.Add("Accept", "application/octet-stream") + req.Header.Add("User-Agent", c.UserAgent) + req.Header.Add("Authorization", "Basic "+ + base64.StdEncoding.EncodeToString([]byte(c.Credentials.Username+":"+c.Credentials.Password))) + + return req, nil +} + +// OnRequestCompleted sets the DO API request completion callback +func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) { + c.onRequestCompleted = rc +} + +// Do performs request passed +func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) error { + req = req.WithContext(ctx) + resp, err := c.client.Do(req) + + if err != nil { + return err + } + + defer func() { + if rerr := resp.Body.Close(); err == nil { + err = rerr + } + }() + + err = CheckResponse(resp) + + if err != nil { + return err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + _, err = io.Copy(w, resp.Body) + if err != nil { + fmt.Printf("Error io.Copy %s", err) + + return err + } + } else { + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return fmt.Errorf("error unmarshalling json: %s", err) + } + } + } + + if c.onRequestCompleted != nil { + c.onRequestCompleted(req, resp, v) + } + + return err +} + +// CheckResponse checks errors if exist errors in request +func CheckResponse(r *http.Response) error { + c := r.StatusCode + + if c >= 200 && c <= 299 { + return nil + } + + // Nutanix returns non-json response with code 401 when + // invalid credentials are used + if c == http.StatusUnauthorized { + return fmt.Errorf("invalid Nutanix Credentials") + } + + buf, err := ioutil.ReadAll(r.Body) + + if err != nil { + return err + } + + rdr2 := ioutil.NopCloser(bytes.NewBuffer(buf)) + + r.Body = rdr2 + // if has entities -> return nil + // if has message_list -> check_error["state"] + // if has status -> check_error["status.state"] + if len(buf) == 0 { + return nil + } + + var res map[string]interface{} + err = json.Unmarshal(buf, &res) + if err != nil { + return fmt.Errorf("unmarshalling error response %s for response body %s", err, string(buf)) + } + log.Print("[DEBUG] after json.Unmarshal") + + errRes := &ErrorResponse{} + if status, ok := res["status"]; ok { + _, sok := status.(string) + if sok { + return nil + } + + err = fillStruct(status.(map[string]interface{}), errRes) + } else if _, ok := res["state"]; ok { + err = fillStruct(res, errRes) + } else if _, ok := res["entities"]; ok { + return nil + } + + log.Print("[DEBUG] after bunch of switch cases") + if err != nil { + return err + } + log.Print("[DEBUG] first nil check") + + // karbon error check + if messageInfo, ok := res["message_info"]; ok { + return fmt.Errorf("error: %s", messageInfo) + } + if message, ok := res["message"]; ok { + log.Print(message) + return fmt.Errorf("error: %s", message) + } + if errRes.State != "ERROR" { + return nil + } + + log.Print("[DEBUG] after errRes.State") + pretty, _ := json.MarshalIndent(errRes, "", " ") + return fmt.Errorf("error: %s", string(pretty)) +} + +// ErrorResponse ... +type ErrorResponse struct { + APIVersion string `json:"api_version,omitempty"` + Code int64 `json:"code,omitempty"` + Kind string `json:"kind,omitempty"` + MessageList []MessageResource `json:"message_list"` + State string `json:"state"` +} + +// MessageResource ... +type MessageResource struct { + + // Custom key-value details relevant to the status. + Details map[string]interface{} `json:"details,omitempty"` + + // If state is ERROR, a message describing the error. + Message string `json:"message"` + + // If state is ERROR, a machine-readable snake-cased *string. + Reason string `json:"reason"` +} + +func (r *ErrorResponse) Error() string { + err := "" + for key, value := range r.MessageList { + err = fmt.Sprintf("%d: {message:%s, reason:%s }", key, value.Message, value.Reason) + } + + return err +} + +func fillStruct(data map[string]interface{}, result interface{}) error { + j, err := json.Marshal(data) + if err != nil { + return err + } + + return json.Unmarshal(j, result) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3.go b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3.go new file mode 100644 index 00000000000..96465224fab --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3.go @@ -0,0 +1,42 @@ +package v3 + +import ( + "github.com/terraform-providers/terraform-provider-nutanix/client" +) + +const ( + libraryVersion = "v3" + absolutePath = "api/nutanix/" + libraryVersion + userAgent = "nutanix/" + libraryVersion +) + +// Client manages the V3 API +type Client struct { + client *client.Client + V3 Service +} + +// NewV3Client return a client to operate V3 resources +func NewV3Client(credentials client.Credentials) (*Client, error) { + c, err := client.NewClient(&credentials, userAgent, absolutePath) + + if err != nil { + return nil, err + } + + f := &Client{ + client: c, + V3: Operations{ + client: c, + }, + } + + // f.client.OnRequestCompleted(func(req *http.Request, resp *http.Response, v interface{}) { + // if v != nil { + // utils.PrintToJSON(v, "[Debug] FINISHED REQUEST") + // // TBD: How to print responses before all requests. + // } + // }) + + return f, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3_service.go b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3_service.go new file mode 100644 index 00000000000..52c373c69b1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3_service.go @@ -0,0 +1,2205 @@ +package v3 + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + + "github.com/terraform-providers/terraform-provider-nutanix/client" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +// Operations ... +type Operations struct { + client *client.Client +} + +// Service ... +type Service interface { + CreateVM(createRequest *VMIntentInput) (*VMIntentResponse, error) + DeleteVM(uuid string) (*DeleteResponse, error) + GetVM(uuid string) (*VMIntentResponse, error) + ListVM(getEntitiesRequest *DSMetadata) (*VMListIntentResponse, error) + UpdateVM(uuid string, body *VMIntentInput) (*VMIntentResponse, error) + CreateSubnet(createRequest *SubnetIntentInput) (*SubnetIntentResponse, error) + DeleteSubnet(uuid string) (*DeleteResponse, error) + GetSubnet(uuid string) (*SubnetIntentResponse, error) + ListSubnet(getEntitiesRequest *DSMetadata) (*SubnetListIntentResponse, error) + UpdateSubnet(uuid string, body *SubnetIntentInput) (*SubnetIntentResponse, error) + CreateImage(createRequest *ImageIntentInput) (*ImageIntentResponse, error) + DeleteImage(uuid string) (*DeleteResponse, error) + GetImage(uuid string) (*ImageIntentResponse, error) + ListImage(getEntitiesRequest *DSMetadata) (*ImageListIntentResponse, error) + UpdateImage(uuid string, body *ImageIntentInput) (*ImageIntentResponse, error) + UploadImage(uuid, filepath string) error + CreateOrUpdateCategoryKey(body *CategoryKey) (*CategoryKeyStatus, error) + ListCategories(getEntitiesRequest *CategoryListMetadata) (*CategoryKeyListResponse, error) + DeleteCategoryKey(name string) error + GetCategoryKey(name string) (*CategoryKeyStatus, error) + ListCategoryValues(name string, getEntitiesRequest *CategoryListMetadata) (*CategoryValueListResponse, error) + CreateOrUpdateCategoryValue(name string, body *CategoryValue) (*CategoryValueStatus, error) + GetCategoryValue(name string, value string) (*CategoryValueStatus, error) + DeleteCategoryValue(name string, value string) error + GetCategoryQuery(query *CategoryQueryInput) (*CategoryQueryResponse, error) + UpdateNetworkSecurityRule(uuid string, body *NetworkSecurityRuleIntentInput) (*NetworkSecurityRuleIntentResponse, error) + ListNetworkSecurityRule(getEntitiesRequest *DSMetadata) (*NetworkSecurityRuleListIntentResponse, error) + GetNetworkSecurityRule(uuid string) (*NetworkSecurityRuleIntentResponse, error) + DeleteNetworkSecurityRule(uuid string) (*DeleteResponse, error) + CreateNetworkSecurityRule(request *NetworkSecurityRuleIntentInput) (*NetworkSecurityRuleIntentResponse, error) + ListCluster(getEntitiesRequest *DSMetadata) (*ClusterListIntentResponse, error) + GetCluster(uuid string) (*ClusterIntentResponse, error) + UpdateVolumeGroup(uuid string, body *VolumeGroupInput) (*VolumeGroupResponse, error) + ListVolumeGroup(getEntitiesRequest *DSMetadata) (*VolumeGroupListResponse, error) + GetVolumeGroup(uuid string) (*VolumeGroupResponse, error) + DeleteVolumeGroup(uuid string) error + CreateVolumeGroup(request *VolumeGroupInput) (*VolumeGroupResponse, error) + ListAllVM(filter string) (*VMListIntentResponse, error) + ListAllSubnet(filter string) (*SubnetListIntentResponse, error) + ListAllNetworkSecurityRule(filter string) (*NetworkSecurityRuleListIntentResponse, error) + ListAllImage(filter string) (*ImageListIntentResponse, error) + ListAllCluster(filter string) (*ClusterListIntentResponse, error) + ListAllCategoryValues(categoryName, filter string) (*CategoryValueListResponse, error) + GetTask(taskUUID string) (*TasksResponse, error) + GetHost(taskUUID string) (*HostResponse, error) + ListHost(getEntitiesRequest *DSMetadata) (*HostListResponse, error) + ListAllHost() (*HostListResponse, error) + CreateProject(request *Project) (*Project, error) + GetProject(projectUUID string) (*Project, error) + ListProject(getEntitiesRequest *DSMetadata) (*ProjectListResponse, error) + ListAllProject(filter string) (*ProjectListResponse, error) + UpdateProject(uuid string, body *Project) (*Project, error) + DeleteProject(uuid string) (*DeleteResponse, error) + CreateAccessControlPolicy(request *AccessControlPolicy) (*AccessControlPolicy, error) + GetAccessControlPolicy(accessControlPolicyUUID string) (*AccessControlPolicy, error) + ListAccessControlPolicy(getEntitiesRequest *DSMetadata) (*AccessControlPolicyListResponse, error) + ListAllAccessControlPolicy(filter string) (*AccessControlPolicyListResponse, error) + UpdateAccessControlPolicy(uuid string, body *AccessControlPolicy) (*AccessControlPolicy, error) + DeleteAccessControlPolicy(uuid string) (*DeleteResponse, error) + CreateRole(request *Role) (*Role, error) + GetRole(uuid string) (*Role, error) + ListRole(getEntitiesRequest *DSMetadata) (*RoleListResponse, error) + ListAllRole(filter string) (*RoleListResponse, error) + UpdateRole(uuid string, body *Role) (*Role, error) + DeleteRole(uuid string) (*DeleteResponse, error) + CreateUser(request *UserIntentInput) (*UserIntentResponse, error) + GetUser(userUUID string) (*UserIntentResponse, error) + UpdateUser(uuid string, body *UserIntentInput) (*UserIntentResponse, error) + DeleteUser(uuid string) (*DeleteResponse, error) + ListUser(getEntitiesRequest *DSMetadata) (*UserListResponse, error) + ListAllUser(filter string) (*UserListResponse, error) + GetUserGroup(userUUID string) (*UserGroupIntentResponse, error) + ListUserGroup(getEntitiesRequest *DSMetadata) (*UserGroupListResponse, error) + ListAllUserGroup(filter string) (*UserGroupListResponse, error) + GetPermission(permissionUUID string) (*PermissionIntentResponse, error) + ListPermission(getEntitiesRequest *DSMetadata) (*PermissionListResponse, error) + ListAllPermission(filter string) (*PermissionListResponse, error) + GetProtectionRule(uuid string) (*ProtectionRuleResponse, error) + ListProtectionRules(getEntitiesRequest *DSMetadata) (*ProtectionRulesListResponse, error) + ListAllProtectionRules(filter string) (*ProtectionRulesListResponse, error) + CreateProtectionRule(request *ProtectionRuleInput) (*ProtectionRuleResponse, error) + UpdateProtectionRule(uuid string, body *ProtectionRuleInput) (*ProtectionRuleResponse, error) + DeleteProtectionRule(uuid string) (*DeleteResponse, error) + GetRecoveryPlan(uuid string) (*RecoveryPlanResponse, error) + ListRecoveryPlans(getEntitiesRequest *DSMetadata) (*RecoveryPlanListResponse, error) + ListAllRecoveryPlans(filter string) (*RecoveryPlanListResponse, error) + CreateRecoveryPlan(request *RecoveryPlanInput) (*RecoveryPlanResponse, error) + UpdateRecoveryPlan(uuid string, body *RecoveryPlanInput) (*RecoveryPlanResponse, error) + DeleteRecoveryPlan(uuid string) (*DeleteResponse, error) +} + +/*CreateVM Creates a VM + * This operation submits a request to create a VM based on the input parameters. + * + * @param body + * @return *VMIntentResponse + */ +func (op Operations) CreateVM(createRequest *VMIntentInput) (*VMIntentResponse, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/vms", createRequest) + vmIntentResponse := new(VMIntentResponse) + + if err != nil { + return nil, err + } + + return vmIntentResponse, op.client.Do(ctx, req, vmIntentResponse) +} + +/*DeleteVM Deletes a VM + * This operation submits a request to delete a op. + * + * @param uuid The uuid of the entity. + * @return error + */ +func (op Operations) DeleteVM(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/vms/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*GetVM Gets a VM + * This operation gets a op. + * + * @param uuid The uuid of the entity. + * @return *VMIntentResponse + */ +func (op Operations) GetVM(uuid string) (*VMIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/vms/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + vmIntentResponse := new(VMIntentResponse) + + if err != nil { + return nil, err + } + + return vmIntentResponse, op.client.Do(ctx, req, vmIntentResponse) +} + +/*ListVM Get a list of VMs This operation gets a list of VMs, allowing for sorting and pagination. Note: Entities that have not been created + * successfully are not listed. + * + * @param getEntitiesRequest @return *VmListIntentResponse + */ +func (op Operations) ListVM(getEntitiesRequest *DSMetadata) (*VMListIntentResponse, error) { + ctx := context.TODO() + path := "/vms/list" + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + vmListIntentResponse := new(VMListIntentResponse) + + if err != nil { + return nil, err + } + + return vmListIntentResponse, op.client.Do(ctx, req, vmListIntentResponse) +} + +/*UpdateVM Updates a VM + * This operation submits a request to update a VM based on the input parameters. + * + * @param uuid The uuid of the entity. + * @param body + * @return *VMIntentResponse + */ +func (op Operations) UpdateVM(uuid string, body *VMIntentInput) (*VMIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/vms/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + vmIntentResponse := new(VMIntentResponse) + + if err != nil { + return nil, err + } + + return vmIntentResponse, op.client.Do(ctx, req, vmIntentResponse) +} + +/*CreateSubnet Creates a subnet + * This operation submits a request to create a subnet based on the input parameters. A subnet is a block of IP addresses. + * + * @param body + * @return *SubnetIntentResponse + */ +func (op Operations) CreateSubnet(createRequest *SubnetIntentInput) (*SubnetIntentResponse, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/subnets", createRequest) + subnetIntentResponse := new(SubnetIntentResponse) + + if err != nil { + return nil, err + } + + return subnetIntentResponse, op.client.Do(ctx, req, subnetIntentResponse) +} + +/*DeleteSubnet Deletes a subnet + * This operation submits a request to delete a subnet. + * + * @param uuid The uuid of the entity. + * @return error if exist error + */ +func (op Operations) DeleteSubnet(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/subnets/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*GetSubnet Gets a subnet entity + * This operation gets a subnet. + * + * @param uuid The uuid of the entity. + * @return *SubnetIntentResponse + */ +func (op Operations) GetSubnet(uuid string) (*SubnetIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/subnets/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + subnetIntentResponse := new(SubnetIntentResponse) + + if err != nil { + return nil, err + } + + // Recheck subnet already exist error + // if *subnetIntentResponse.Status.State == "ERROR" { + // pretty, _ := json.MarshalIndent(subnetIntentResponse.Status.MessageList, "", " ") + // return nil, fmt.Errorf("error: %s", string(pretty)) + // } + + return subnetIntentResponse, op.client.Do(ctx, req, subnetIntentResponse) +} + +/*ListSubnet Gets a list of subnets This operation gets a list of subnets, allowing for sorting and pagination. Note: Entities that have not + * been created successfully are not listed. + * + * @param getEntitiesRequest @return *SubnetListIntentResponse + */ +func (op Operations) ListSubnet(getEntitiesRequest *DSMetadata) (*SubnetListIntentResponse, error) { + ctx := context.TODO() + path := "/subnets/list" + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + subnetListIntentResponse := new(SubnetListIntentResponse) + + if err != nil { + return nil, err + } + + return subnetListIntentResponse, op.client.Do(ctx, req, subnetListIntentResponse) +} + +/*UpdateSubnet Updates a subnet + * This operation submits a request to update a subnet based on the input parameters. + * + * @param uuid The uuid of the entity. + * @param body + * @return *SubnetIntentResponse + */ +func (op Operations) UpdateSubnet(uuid string, body *SubnetIntentInput) (*SubnetIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/subnets/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + subnetIntentResponse := new(SubnetIntentResponse) + + if err != nil { + return nil, err + } + + return subnetIntentResponse, op.client.Do(ctx, req, subnetIntentResponse) +} + +/*CreateImage Creates a IMAGE Images are raw ISO, QCOW2, or VMDK files that are uploaded by a user can be attached to a op. An ISO image is + * attached as a virtual CD-ROM drive, and QCOW2 and VMDK files are attached as SCSI disks. An image has to be explicitly added to the + * self-service catalog before users can create VMs from it. + * + * @param body @return *ImageIntentResponse + */ +func (op Operations) CreateImage(body *ImageIntentInput) (*ImageIntentResponse, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/images", body) + imageIntentResponse := new(ImageIntentResponse) + + if err != nil { + return nil, err + } + + return imageIntentResponse, op.client.Do(ctx, req, imageIntentResponse) +} + +/*UploadImage Uplloads a Image Binary file Images are raw ISO, QCOW2, or VMDK files that are uploaded by a user can be attached to a op. An + * ISO image is attached as a virtual CD-ROM drive, and QCOW2 and VMDK files are attached as SCSI disks. An image has to be explicitly added + * to the self-service catalog before users can create VMs from it. + * + * @param uuid @param filepath + */ +func (op Operations) UploadImage(uuid, filepath string) error { + ctx := context.Background() + + path := fmt.Sprintf("/images/%s/file", uuid) + + file, err := os.Open(filepath) + if err != nil { + return fmt.Errorf("error: cannot open file: %s", err) + } + defer file.Close() + + fileContents, err := ioutil.ReadAll(file) + if err != nil { + return fmt.Errorf("error: Cannot read file %s", err) + } + + req, err := op.client.NewUploadRequest(ctx, http.MethodPut, path, fileContents) + + if err != nil { + return fmt.Errorf("error: Creating request %s", err) + } + + err = op.client.Do(ctx, req, nil) + + return err +} + +/*DeleteImage deletes a IMAGE + * This operation submits a request to delete a IMAGE. + * + * @param uuid The uuid of the entity. + * @return error if error exists + */ +func (op Operations) DeleteImage(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/images/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*GetImage gets a IMAGE + * This operation gets a IMAGE. + * + * @param uuid The uuid of the entity. + * @return *ImageIntentResponse + */ +func (op Operations) GetImage(uuid string) (*ImageIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/images/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + imageIntentResponse := new(ImageIntentResponse) + + if err != nil { + return nil, err + } + + return imageIntentResponse, op.client.Do(ctx, req, imageIntentResponse) +} + +/*ListImage gets a list of IMAGEs This operation gets a list of IMAGEs, allowing for sorting and pagination. Note: Entities that have not + * been created successfully are not listed. + * + * @param getEntitiesRequest @return *ImageListIntentResponse + */ +func (op Operations) ListImage(getEntitiesRequest *DSMetadata) (*ImageListIntentResponse, error) { + ctx := context.TODO() + path := "/images/list" + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + imageListIntentResponse := new(ImageListIntentResponse) + + if err != nil { + return nil, err + } + + return imageListIntentResponse, op.client.Do(ctx, req, imageListIntentResponse) +} + +/*UpdateImage updates a IMAGE + * This operation submits a request to update a IMAGE based on the input parameters. + * + * @param uuid The uuid of the entity. + * @param body + * @return *ImageIntentResponse + */ +func (op Operations) UpdateImage(uuid string, body *ImageIntentInput) (*ImageIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/images/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + imageIntentResponse := new(ImageIntentResponse) + + if err != nil { + return nil, err + } + + return imageIntentResponse, op.client.Do(ctx, req, imageIntentResponse) +} + +/*GetCluster gets a CLUSTER + * This operation gets a CLUSTER. + * + * @param uuid The uuid of the entity. + * @return *ImageIntentResponse + */ +func (op Operations) GetCluster(uuid string) (*ClusterIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/clusters/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + clusterIntentResponse := new(ClusterIntentResponse) + + if err != nil { + return nil, err + } + + return clusterIntentResponse, op.client.Do(ctx, req, clusterIntentResponse) +} + +/*ListCluster gets a list of CLUSTERS This operation gets a list of CLUSTERS, allowing for sorting and pagination. Note: Entities that have + * not been created successfully are not listed. + * + * @param getEntitiesRequest @return *ClusterListIntentResponse + */ +func (op Operations) ListCluster(getEntitiesRequest *DSMetadata) (*ClusterListIntentResponse, error) { + ctx := context.TODO() + path := "/clusters/list" + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + clusterList := new(ClusterListIntentResponse) + + if err != nil { + return nil, err + } + + return clusterList, op.client.Do(ctx, req, clusterList) +} + +/*UpdateImage updates a CLUSTER + * This operation submits a request to update a CLUSTER based on the input parameters. + * + * @param uuid The uuid of the entity. + * @param body + * @return *ImageIntentResponse + */ +// func (op Operations) UpdateImage(uuid string, body *ImageIntentInput) (*ImageIntentResponse, error) { +// ctx := context.TODO() + +// path := fmt.Sprintf("/images/%s", uuid) + +// req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) +// if err != nil { +// return nil, err +// } + +// imageIntentResponse := new(ImageIntentResponse) + +// err = op.client.Do(ctx, req, imageIntentResponse) +// if err != nil { +// return nil, err +// } + +// return imageIntentResponse, nil +// } + +// CreateOrUpdateCategoryKey ... +func (op Operations) CreateOrUpdateCategoryKey(body *CategoryKey) (*CategoryKeyStatus, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/categories/%s", utils.StringValue(body.Name)) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + categoryKeyResponse := new(CategoryKeyStatus) + + if err != nil { + return nil, err + } + + return categoryKeyResponse, op.client.Do(ctx, req, categoryKeyResponse) +} + +/*ListCategories gets a list of Categories This operation gets a list of Categories, allowing for sorting and pagination. Note: Entities + * that have not been created successfully are not listed. + * + * @param getEntitiesRequest @return *ImageListIntentResponse + */ +func (op Operations) ListCategories(getEntitiesRequest *CategoryListMetadata) (*CategoryKeyListResponse, error) { + ctx := context.TODO() + path := "/categories/list" + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + categoryKeyListResponse := new(CategoryKeyListResponse) + + if err != nil { + return nil, err + } + + return categoryKeyListResponse, op.client.Do(ctx, req, categoryKeyListResponse) +} + +/*DeleteCategoryKey Deletes a Category + * This operation submits a request to delete a op. + * + * @param name The name of the entity. + * @return error + */ +func (op Operations) DeleteCategoryKey(name string) error { + ctx := context.TODO() + + path := fmt.Sprintf("/categories/%s", name) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return err + } + + return op.client.Do(ctx, req, nil) +} + +/*GetCategoryKey gets a Category + * This operation gets a Category. + * + * @param name The name of the entity. + * @return *CategoryKeyStatus + */ +func (op Operations) GetCategoryKey(name string) (*CategoryKeyStatus, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/categories/%s", name) + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + categoryKeyStatusResponse := new(CategoryKeyStatus) + + if err != nil { + return nil, err + } + + return categoryKeyStatusResponse, op.client.Do(ctx, req, categoryKeyStatusResponse) +} + +/*ListCategoryValues gets a list of Category values for a specific key This operation gets a list of Categories, allowing for sorting and + * pagination. Note: Entities that have not been created successfully are not listed. + * + * @param name @param getEntitiesRequest @return *CategoryValueListResponse + */ +func (op Operations) ListCategoryValues(name string, getEntitiesRequest *CategoryListMetadata) (*CategoryValueListResponse, error) { + ctx := context.TODO() + path := fmt.Sprintf("/categories/%s/list", name) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + categoryValueListResponse := new(CategoryValueListResponse) + + if err != nil { + return nil, err + } + + return categoryValueListResponse, op.client.Do(ctx, req, categoryValueListResponse) +} + +// CreateOrUpdateCategoryValue ... +func (op Operations) CreateOrUpdateCategoryValue(name string, body *CategoryValue) (*CategoryValueStatus, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/categories/%s/%s", name, utils.StringValue(body.Value)) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + categoryValueResponse := new(CategoryValueStatus) + + if err != nil { + return nil, err + } + + return categoryValueResponse, op.client.Do(ctx, req, categoryValueResponse) +} + +/*GetCategoryValue gets a Category Value + * This operation gets a Category Value. + * + * @param name The name of the entity. + * @params value the value of entity that belongs to category key + * @return *CategoryValueStatus + */ +func (op Operations) GetCategoryValue(name string, value string) (*CategoryValueStatus, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/categories/%s/%s", name, value) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + categoryValueStatusResponse := new(CategoryValueStatus) + + if err != nil { + return nil, err + } + + return categoryValueStatusResponse, op.client.Do(ctx, req, categoryValueStatusResponse) +} + +/*DeleteCategoryValue Deletes a Category Value + * This operation submits a request to delete a op. + * + * @param name The name of the entity. + * @params value the value of entity that belongs to category key + * @return error + */ +func (op Operations) DeleteCategoryValue(name string, value string) error { + ctx := context.TODO() + + path := fmt.Sprintf("/categories/%s/%s", name, value) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return err + } + + return op.client.Do(ctx, req, nil) +} + +/*GetCategoryQuery gets list of entities attached to categories or policies in which categories are used as defined by the filter criteria. + * + * @param query Categories query input object. + * @return *CategoryQueryResponse + */ +func (op Operations) GetCategoryQuery(query *CategoryQueryInput) (*CategoryQueryResponse, error) { + ctx := context.TODO() + + path := "/categories/query" + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, query) + categoryQueryResponse := new(CategoryQueryResponse) + + if err != nil { + return nil, err + } + + return categoryQueryResponse, op.client.Do(ctx, req, categoryQueryResponse) +} + +/*CreateNetworkSecurityRule Creates a Network security rule + * This operation submits a request to create a Network security rule based on the input parameters. + * + * @param request + * @return *NetworkSecurityRuleIntentResponse + */ +func (op Operations) CreateNetworkSecurityRule(request *NetworkSecurityRuleIntentInput) (*NetworkSecurityRuleIntentResponse, error) { + ctx := context.TODO() + + networkSecurityRuleIntentResponse := new(NetworkSecurityRuleIntentResponse) + req, err := op.client.NewRequest(ctx, http.MethodPost, "/network_security_rules", request) + + if err != nil { + return nil, err + } + + return networkSecurityRuleIntentResponse, op.client.Do(ctx, req, networkSecurityRuleIntentResponse) +} + +/*DeleteNetworkSecurityRule Deletes a Network security rule + * This operation submits a request to delete a Network security rule. + * + * @param uuid The uuid of the entity. + * @return void + */ +func (op Operations) DeleteNetworkSecurityRule(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/network_security_rules/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*GetNetworkSecurityRule Gets a Network security rule + * This operation gets a Network security rule. + * + * @param uuid The uuid of the entity. + * @return *NetworkSecurityRuleIntentResponse + */ +func (op Operations) GetNetworkSecurityRule(uuid string) (*NetworkSecurityRuleIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/network_security_rules/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + networkSecurityRuleIntentResponse := new(NetworkSecurityRuleIntentResponse) + + if err != nil { + return nil, err + } + + return networkSecurityRuleIntentResponse, op.client.Do(ctx, req, networkSecurityRuleIntentResponse) +} + +/*ListNetworkSecurityRule Gets all network security rules This operation gets a list of Network security rules, allowing for sorting and + * pagination. Note: Entities that have not been created successfully are not listed. + * + * @param getEntitiesRequest @return *NetworkSecurityRuleListIntentResponse + */ +func (op Operations) ListNetworkSecurityRule(getEntitiesRequest *DSMetadata) (*NetworkSecurityRuleListIntentResponse, error) { + ctx := context.TODO() + path := "/network_security_rules/list" + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + networkSecurityRuleListIntentResponse := new(NetworkSecurityRuleListIntentResponse) + + if err != nil { + return nil, err + } + + return networkSecurityRuleListIntentResponse, op.client.Do(ctx, req, networkSecurityRuleListIntentResponse) +} + +/*UpdateNetworkSecurityRule Updates a Network security rule + * This operation submits a request to update a Network security rule based on the input parameters. + * + * @param uuid The uuid of the entity. + * @param body + * @return void + */ +func (op Operations) UpdateNetworkSecurityRule( + uuid string, + body *NetworkSecurityRuleIntentInput) (*NetworkSecurityRuleIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/network_security_rules/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + networkSecurityRuleIntentResponse := new(NetworkSecurityRuleIntentResponse) + + if err != nil { + return nil, err + } + + return networkSecurityRuleIntentResponse, op.client.Do(ctx, req, networkSecurityRuleIntentResponse) +} + +/*CreateVolumeGroup Creates a Volume group + * This operation submits a request to create a Volume group based on the input parameters. + * + * @param request + * @return *VolumeGroupResponse + */ +func (op Operations) CreateVolumeGroup(request *VolumeGroupInput) (*VolumeGroupResponse, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/volume_groups", request) + networkSecurityRuleResponse := new(VolumeGroupResponse) + + if err != nil { + return nil, err + } + + return networkSecurityRuleResponse, op.client.Do(ctx, req, networkSecurityRuleResponse) +} + +/*DeleteVolumeGroup Deletes a Volume group + * This operation submits a request to delete a Volume group. + * + * @param uuid The uuid of the entity. + * @return void + */ +func (op Operations) DeleteVolumeGroup(uuid string) error { + ctx := context.TODO() + + path := fmt.Sprintf("/volume_groups/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return err + } + + return op.client.Do(ctx, req, nil) +} + +/*GetVolumeGroup Gets a Volume group + * This operation gets a Volume group. + * + * @param uuid The uuid of the entity. + * @return *VolumeGroupResponse + */ +func (op Operations) GetVolumeGroup(uuid string) (*VolumeGroupResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/volume_groups/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + networkSecurityRuleResponse := new(VolumeGroupResponse) + + if err != nil { + return nil, err + } + + return networkSecurityRuleResponse, op.client.Do(ctx, req, networkSecurityRuleResponse) +} + +/*ListVolumeGroup Gets all network security rules This operation gets a list of Volume groups, allowing for sorting and pagination. Note: + * Entities that have not been created successfully are not listed. + * + * @param getEntitiesRequest @return *VolumeGroupListResponse + */ +func (op Operations) ListVolumeGroup(getEntitiesRequest *DSMetadata) (*VolumeGroupListResponse, error) { + ctx := context.TODO() + path := "/volume_groups/list" + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + networkSecurityRuleListResponse := new(VolumeGroupListResponse) + + if err != nil { + return nil, err + } + + return networkSecurityRuleListResponse, op.client.Do(ctx, req, networkSecurityRuleListResponse) +} + +/*UpdateVolumeGroup Updates a Volume group + * This operation submits a request to update a Volume group based on the input parameters. + * + * @param uuid The uuid of the entity. + * @param body + * @return void + */ +func (op Operations) UpdateVolumeGroup(uuid string, body *VolumeGroupInput) (*VolumeGroupResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/volume_groups/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + networkSecurityRuleResponse := new(VolumeGroupResponse) + + if err != nil { + return nil, err + } + + return networkSecurityRuleResponse, op.client.Do(ctx, req, networkSecurityRuleResponse) +} + +const itemsPerPage int64 = 100 + +func hasNext(ri *int64) bool { + *ri -= itemsPerPage + return *ri >= (0 - itemsPerPage) +} + +// ListAllVM ... +func (op Operations) ListAllVM(filter string) (*VMListIntentResponse, error) { + entities := make([]*VMIntentResource, 0) + + resp, err := op.ListVM(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("vm"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListVM(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("vm"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + } + + resp.Entities = entities + } + + return resp, nil +} + +// ListAllSubnet ... +func (op Operations) ListAllSubnet(filter string) (*SubnetListIntentResponse, error) { + entities := make([]*SubnetIntentResponse, 0) + + resp, err := op.ListSubnet(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("subnet"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListSubnet(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("subnet"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +// ListAllNetworkSecurityRule ... +func (op Operations) ListAllNetworkSecurityRule(filter string) (*NetworkSecurityRuleListIntentResponse, error) { + entities := make([]*NetworkSecurityRuleIntentResource, 0) + + resp, err := op.ListNetworkSecurityRule(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("network_security_rule"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListNetworkSecurityRule(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("network_security_rule"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +// ListAllImage ... +func (op Operations) ListAllImage(filter string) (*ImageListIntentResponse, error) { + entities := make([]*ImageIntentResponse, 0) + + resp, err := op.ListImage(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("image"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListImage(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("image"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +// ListAllCluster ... +func (op Operations) ListAllCluster(filter string) (*ClusterListIntentResponse, error) { + entities := make([]*ClusterIntentResponse, 0) + + resp, err := op.ListCluster(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("cluster"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListCluster(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("cluster"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +// ListAllCluster ... +func (op Operations) ListAllCategoryValues(categoryKeyName, filter string) (*CategoryValueListResponse, error) { + entities := make([]*CategoryValueStatus, 0) + + resp, err := op.ListCategoryValues(categoryKeyName, &CategoryListMetadata{ + Filter: &filter, + Kind: utils.StringPtr("category"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListCategoryValues(categoryKeyName, &CategoryListMetadata{ + Filter: &filter, + Kind: utils.StringPtr("category"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +// GetTask ... +func (op Operations) GetTask(taskUUID string) (*TasksResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/tasks/%s", taskUUID) + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + tasksTesponse := new(TasksResponse) + + if err != nil { + return nil, err + } + + return tasksTesponse, op.client.Do(ctx, req, tasksTesponse) +} + +// GetHost ... +func (op Operations) GetHost(hostUUID string) (*HostResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/hosts/%s", hostUUID) + host := new(HostResponse) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return host, op.client.Do(ctx, req, host) +} + +// ListHost ... +func (op Operations) ListHost(getEntitiesRequest *DSMetadata) (*HostListResponse, error) { + ctx := context.TODO() + path := "/hosts/list" + + hostList := new(HostListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return hostList, op.client.Do(ctx, req, hostList) +} + +// ListAllHost ... +func (op Operations) ListAllHost() (*HostListResponse, error) { + entities := make([]*HostResponse, 0) + + resp, err := op.ListHost(&DSMetadata{ + Kind: utils.StringPtr("host"), + Length: utils.Int64Ptr(itemsPerPage), + }) + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListHost(&DSMetadata{ + Kind: utils.StringPtr("cluster"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +/*CreateProject creates a project + * This operation submits a request to create a project based on the input parameters. + * + * @param request *Project + * @return *Project + */ +func (op Operations) CreateProject(request *Project) (*Project, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/projects", request) + if err != nil { + return nil, err + } + + projectResponse := new(Project) + + return projectResponse, op.client.Do(ctx, req, projectResponse) +} + +/*GetProject This operation gets a project. + * + * @param uuid The prject uuid - string. + * @return *Project + */ +func (op Operations) GetProject(projectUUID string) (*Project, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/projects/%s", projectUUID) + project := new(Project) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return project, op.client.Do(ctx, req, project) +} + +/*ListProject gets a list of projects. + * + * @param metadata allows create filters to get specific data - *DSMetadata. + * @return *ProjectListResponse + */ +func (op Operations) ListProject(getEntitiesRequest *DSMetadata) (*ProjectListResponse, error) { + ctx := context.TODO() + path := "/projects/list" + + projectList := new(ProjectListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return projectList, op.client.Do(ctx, req, projectList) +} + +/*ListAllProject gets a list of projects + * This operation gets a list of Projects, allowing for sorting and pagination. + * Note: Entities that have not been created successfully are not listed. + * @return *ProjectListResponse + */ +func (op Operations) ListAllProject(filter string) (*ProjectListResponse, error) { + entities := make([]*Project, 0) + + resp, err := op.ListProject(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("project"), + Length: utils.Int64Ptr(itemsPerPage), + }) + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListProject(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("project"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +/*UpdateProject Updates a project + * This operation submits a request to update a existing Project based on the input parameters + * @param uuid The uuid of the entity - string. + * @param body - *Project + * @return *Project, error + */ +func (op Operations) UpdateProject(uuid string, body *Project) (*Project, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/projects/%s", uuid) + projectInput := new(Project) + + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + if err != nil { + return nil, err + } + + return projectInput, op.client.Do(ctx, req, projectInput) +} + +/*DeleteProject Deletes a project + * This operation submits a request to delete a existing Project. + * + * @param uuid The uuid of the entity. + * @return void + */ +func (op Operations) DeleteProject(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/projects/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*CreateAccessControlPolicy creates a access policy + * This operation submits a request to create a access policy based on the input parameters. + * + * @param request *Access Policy + * @return *Access Policy + */ +func (op Operations) CreateAccessControlPolicy(request *AccessControlPolicy) (*AccessControlPolicy, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/access_control_policies", request) + if err != nil { + return nil, err + } + + AccessControlPolicyResponse := new(AccessControlPolicy) + + return AccessControlPolicyResponse, op.client.Do(ctx, req, AccessControlPolicyResponse) +} + +/*GetAccessControlPolicy This operation gets a AccessControlPolicy. + * + * @param uuid The access policy uuid - string. + * @return *AccessControlPolicy + */ +func (op Operations) GetAccessControlPolicy(accessControlPolicyUUID string) (*AccessControlPolicy, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/access_control_policies/%s", accessControlPolicyUUID) + AccessControlPolicy := new(AccessControlPolicy) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return AccessControlPolicy, op.client.Do(ctx, req, AccessControlPolicy) +} + +/*ListAccessControlPolicy gets a list of AccessControlPolicys. + * + * @param metadata allows create filters to get specific data - *DSMetadata. + * @return *AccessControlPolicyListResponse + */ +func (op Operations) ListAccessControlPolicy(getEntitiesRequest *DSMetadata) (*AccessControlPolicyListResponse, error) { + ctx := context.TODO() + path := "/access_control_policies/list" + + AccessControlPolicyList := new(AccessControlPolicyListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return AccessControlPolicyList, op.client.Do(ctx, req, AccessControlPolicyList) +} + +/*ListAllAccessControlPolicy gets a list of AccessControlPolicys + * This operation gets a list of AccessControlPolicys, allowing for sorting and pagination. + * Note: Entities that have not been created successfully are not listed. + * @return *AccessControlPolicyListResponse + */ +func (op Operations) ListAllAccessControlPolicy(filter string) (*AccessControlPolicyListResponse, error) { + entities := make([]*AccessControlPolicy, 0) + + resp, err := op.ListAccessControlPolicy(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("access_control_policy"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListAccessControlPolicy(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("access_control_policy"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +/*UpdateAccessControlPolicy Updates a AccessControlPolicy + * This operation submits a request to update a existing AccessControlPolicy based on the input parameters + * @param uuid The uuid of the entity - string. + * @param body - *AccessControlPolicy + * @return *AccessControlPolicy, error + */ +func (op Operations) UpdateAccessControlPolicy(uuid string, body *AccessControlPolicy) (*AccessControlPolicy, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/access_control_policies/%s", uuid) + AccessControlPolicyInput := new(AccessControlPolicy) + + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + if err != nil { + return nil, err + } + + return AccessControlPolicyInput, op.client.Do(ctx, req, AccessControlPolicyInput) +} + +/*DeleteAccessControlPolicy Deletes a AccessControlPolicy + * This operation submits a request to delete a existing AccessControlPolicy. + * + * @param uuid The uuid of the entity. + * @return void + */ +func (op Operations) DeleteAccessControlPolicy(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/access_control_policies/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*CreateRole creates a role + * This operation submits a request to create a role based on the input parameters. + * + * @param request *Role + * @return *Role + */ +func (op Operations) CreateRole(request *Role) (*Role, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/roles", request) + if err != nil { + return nil, err + } + + RoleResponse := new(Role) + + return RoleResponse, op.client.Do(ctx, req, RoleResponse) +} + +/*GetRole This operation gets a role. + * + * @param uuid The role uuid - string. + * @return *Role + */ +func (op Operations) GetRole(roleUUID string) (*Role, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/roles/%s", roleUUID) + Role := new(Role) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return Role, op.client.Do(ctx, req, Role) +} + +/*ListRole gets a list of roles. + * + * @param metadata allows create filters to get specific data - *DSMetadata. + * @return *RoleListResponse + */ +func (op Operations) ListRole(getEntitiesRequest *DSMetadata) (*RoleListResponse, error) { + ctx := context.TODO() + path := "/roles/list" + + RoleList := new(RoleListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return RoleList, op.client.Do(ctx, req, RoleList) +} + +/*ListAllRole gets a list of Roles + * This operation gets a list of Roles, allowing for sorting and pagination. + * Note: Entities that have not been created successfully are not listed. + * @return *RoleListResponse + */ +func (op Operations) ListAllRole(filter string) (*RoleListResponse, error) { + entities := make([]*Role, 0) + + resp, err := op.ListRole(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("role"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListRole(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("role"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +/*UpdateRole Updates a role + * This operation submits a request to update a existing role based on the input parameters + * @param uuid The uuid of the entity - string. + * @param body - *Role + * @return *Role, error + */ +func (op Operations) UpdateRole(uuid string, body *Role) (*Role, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/roles/%s", uuid) + RoleInput := new(Role) + + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + if err != nil { + return nil, err + } + + return RoleInput, op.client.Do(ctx, req, RoleInput) +} + +/*DeleteRole Deletes a role + * This operation submits a request to delete a existing role. + * + * @param uuid The uuid of the entity. + * @return void + */ +func (op Operations) DeleteRole(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/roles/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*CreateUser creates a User + * This operation submits a request to create a userbased on the input parameters. + * + * @param request *VMIntentInput + * @return *UserIntentResponse + */ +func (op Operations) CreateUser(request *UserIntentInput) (*UserIntentResponse, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/users", request) + if err != nil { + return nil, err + } + + UserIntentResponse := new(UserIntentResponse) + + return UserIntentResponse, op.client.Do(ctx, req, UserIntentResponse) +} + +/*GetUser This operation gets a User. + * + * @param uuid The user uuid - string. + * @return *User + */ +func (op Operations) GetUser(userUUID string) (*UserIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/users/%s", userUUID) + User := new(UserIntentResponse) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return User, op.client.Do(ctx, req, User) +} + +/*UpdateUser Updates a User + * This operation submits a request to update a existing User based on the input parameters + * @param uuid The uuid of the entity - string. + * @param body - *User + * @return *User, error + */ +func (op Operations) UpdateUser(uuid string, body *UserIntentInput) (*UserIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/users/%s", uuid) + UserInput := new(UserIntentResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + if err != nil { + return nil, err + } + + return UserInput, op.client.Do(ctx, req, UserInput) +} + +/*DeleteUser Deletes a User + * This operation submits a request to delete a existing User. + * + * @param uuid The uuid of the entity. + * @return void + */ +func (op Operations) DeleteUser(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/users/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +/*ListUser gets a list of Users. + * + * @param metadata allows create filters to get specific data - *DSMetadata. + * @return *UserListResponse + */ +func (op Operations) ListUser(getEntitiesRequest *DSMetadata) (*UserListResponse, error) { + ctx := context.TODO() + path := "/users/list" + + UserList := new(UserListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return UserList, op.client.Do(ctx, req, UserList) +} + +// ListAllUser ... +func (op Operations) ListAllUser(filter string) (*UserListResponse, error) { + entities := make([]*UserIntentResponse, 0) + + resp, err := op.ListUser(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("user"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListUser(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("user"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + } + + resp.Entities = entities + } + + return resp, nil +} + +/*GetUserGroup This operation gets a User. + * + * @param uuid The user uuid - string. + * @return *User + */ +func (op Operations) GetUserGroup(userGroupUUID string) (*UserGroupIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/user_groups/%s", userGroupUUID) + User := new(UserGroupIntentResponse) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return User, op.client.Do(ctx, req, User) +} + +/*ListUserGroup gets a list of UserGroups. + * + * @param metadata allows create filters to get specific data - *DSMetadata. + * @return *UserGroupListResponse + */ +func (op Operations) ListUserGroup(getEntitiesRequest *DSMetadata) (*UserGroupListResponse, error) { + ctx := context.TODO() + path := "/user_groups/list" + + UserGroupList := new(UserGroupListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return UserGroupList, op.client.Do(ctx, req, UserGroupList) +} + +// ListAllUserGroup ... +func (op Operations) ListAllUserGroup(filter string) (*UserGroupListResponse, error) { + entities := make([]*UserGroupIntentResponse, 0) + + resp, err := op.ListUserGroup(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("user_group"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListUserGroup(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("user"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + } + + resp.Entities = entities + } + + return resp, nil +} + +/*GePermission This operation gets a Permission. + * + * @param uuid The permission uuid - string. + * @return *PermissionIntentResponse + */ +func (op Operations) GetPermission(permissionUUID string) (*PermissionIntentResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/permissions/%s", permissionUUID) + permission := new(PermissionIntentResponse) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return permission, op.client.Do(ctx, req, permission) +} + +/*ListPermission gets a list of Permissions. + * + * @param metadata allows create filters to get specific data - *DSMetadata. + * @return *PermissionListResponse + */ +func (op Operations) ListPermission(getEntitiesRequest *DSMetadata) (*PermissionListResponse, error) { + ctx := context.TODO() + path := "/permissions/list" + + PermissionList := new(PermissionListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return PermissionList, op.client.Do(ctx, req, PermissionList) +} + +// ListAllPermission ... +func (op Operations) ListAllPermission(filter string) (*PermissionListResponse, error) { + entities := make([]*PermissionIntentResponse, 0) + + resp, err := op.ListPermission(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("permission"), + Length: utils.Int64Ptr(itemsPerPage), + }) + + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListPermission(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("permission"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + } + + resp.Entities = entities + } + + return resp, nil +} + +//GetProtectionRule ... +func (op Operations) GetProtectionRule(uuid string) (*ProtectionRuleResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/protection_rules/%s", uuid) + protectionRule := new(ProtectionRuleResponse) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return protectionRule, op.client.Do(ctx, req, protectionRule) +} + +//ListProtectionRules ... +func (op Operations) ListProtectionRules(getEntitiesRequest *DSMetadata) (*ProtectionRulesListResponse, error) { + ctx := context.TODO() + path := "/protection_rules/list" + + list := new(ProtectionRulesListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return list, op.client.Do(ctx, req, list) +} + +// ListAllProtectionRules ... +func (op Operations) ListAllProtectionRules(filter string) (*ProtectionRulesListResponse, error) { + entities := make([]*ProtectionRuleResponse, 0) + + resp, err := op.ListProtectionRules(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("protection_rule"), + Length: utils.Int64Ptr(itemsPerPage), + }) + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListProtectionRules(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("protection_rule"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +//CreateProtectionRule ... +func (op Operations) CreateProtectionRule(createRequest *ProtectionRuleInput) (*ProtectionRuleResponse, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/protection_rules", createRequest) + protectionRuleResponse := new(ProtectionRuleResponse) + + if err != nil { + return nil, err + } + + return protectionRuleResponse, op.client.Do(ctx, req, protectionRuleResponse) +} + +//UpdateProtectionRule ... +func (op Operations) UpdateProtectionRule(uuid string, body *ProtectionRuleInput) (*ProtectionRuleResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/protection_rules/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + protectionRuleResponse := new(ProtectionRuleResponse) + + if err != nil { + return nil, err + } + + return protectionRuleResponse, op.client.Do(ctx, req, protectionRuleResponse) +} + +//DeleteProtectionRule ... +func (op Operations) DeleteProtectionRule(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/protection_rules/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} + +//GetRecoveryPlan ... +func (op Operations) GetRecoveryPlan(uuid string) (*RecoveryPlanResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/recovery_plans/%s", uuid) + RecoveryPlan := new(RecoveryPlanResponse) + + req, err := op.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + return RecoveryPlan, op.client.Do(ctx, req, RecoveryPlan) +} + +//ListRecoveryPlans ... +func (op Operations) ListRecoveryPlans(getEntitiesRequest *DSMetadata) (*RecoveryPlanListResponse, error) { + ctx := context.TODO() + path := "/recovery_plans/list" + + list := new(RecoveryPlanListResponse) + + req, err := op.client.NewRequest(ctx, http.MethodPost, path, getEntitiesRequest) + if err != nil { + return nil, err + } + + return list, op.client.Do(ctx, req, list) +} + +// ListAllRecoveryPlans ... +func (op Operations) ListAllRecoveryPlans(filter string) (*RecoveryPlanListResponse, error) { + entities := make([]*RecoveryPlanResponse, 0) + + resp, err := op.ListRecoveryPlans(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("recovery_plan"), + Length: utils.Int64Ptr(itemsPerPage), + }) + if err != nil { + return nil, err + } + + totalEntities := utils.Int64Value(resp.Metadata.TotalMatches) + remaining := totalEntities + offset := utils.Int64Value(resp.Metadata.Offset) + + if totalEntities > itemsPerPage { + for hasNext(&remaining) { + resp, err = op.ListRecoveryPlans(&DSMetadata{ + Filter: &filter, + Kind: utils.StringPtr("recovery_plan"), + Length: utils.Int64Ptr(itemsPerPage), + Offset: utils.Int64Ptr(offset), + }) + + if err != nil { + return nil, err + } + + entities = append(entities, resp.Entities...) + + offset += itemsPerPage + log.Printf("[Debug] total=%d, remaining=%d, offset=%d len(entities)=%d\n", totalEntities, remaining, offset, len(entities)) + } + + resp.Entities = entities + } + + return resp, nil +} + +//CreateRecoveryPlan ... +func (op Operations) CreateRecoveryPlan(createRequest *RecoveryPlanInput) (*RecoveryPlanResponse, error) { + ctx := context.TODO() + + req, err := op.client.NewRequest(ctx, http.MethodPost, "/recovery_plans", createRequest) + RecoveryPlanResponse := new(RecoveryPlanResponse) + + if err != nil { + return nil, err + } + + return RecoveryPlanResponse, op.client.Do(ctx, req, RecoveryPlanResponse) +} + +//UpdateRecoveryPlan ... +func (op Operations) UpdateRecoveryPlan(uuid string, body *RecoveryPlanInput) (*RecoveryPlanResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/recovery_plans/%s", uuid) + req, err := op.client.NewRequest(ctx, http.MethodPut, path, body) + RecoveryPlanResponse := new(RecoveryPlanResponse) + + if err != nil { + return nil, err + } + + return RecoveryPlanResponse, op.client.Do(ctx, req, RecoveryPlanResponse) +} + +//DeleteRecoveryPlan ... +func (op Operations) DeleteRecoveryPlan(uuid string) (*DeleteResponse, error) { + ctx := context.TODO() + + path := fmt.Sprintf("/recovery_plans/%s", uuid) + + req, err := op.client.NewRequest(ctx, http.MethodDelete, path, nil) + deleteResponse := new(DeleteResponse) + + if err != nil { + return nil, err + } + + return deleteResponse, op.client.Do(ctx, req, deleteResponse) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3_structs.go b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3_structs.go new file mode 100644 index 00000000000..f543d3d742f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/client/v3/v3_structs.go @@ -0,0 +1,2519 @@ +package v3 + +import ( + "time" +) + +// Reference ... +type Reference struct { + Kind *string `json:"kind" mapstructure:"kind"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + UUID *string `json:"uuid" mapstructure:"uuid"` +} + +// VMVnumaConfig Indicates how VM vNUMA should be configured +type VMVnumaConfig struct { + + // Number of vNUMA nodes. 0 means vNUMA is disabled. + NumVnumaNodes *int64 `json:"num_vnuma_nodes,omitempty" mapstructure:"num_vnuma_nodes,omitempty"` +} + +type VMSerialPort struct { + Index *int64 `json:"index,omitempty" mapstructure:"index,omitempty"` + IsConnected *bool `json:"is_connected,omitempty" mapstructure:"is_connected,omitempty"` +} + +// IPAddress An IP address. +type IPAddress struct { + + // Address *string. + IP *string `json:"ip,omitempty" mapstructure:"ip,omitempty"` + + // Address type. It can only be \"ASSIGNED\" in the spec. If no type is specified in the spec, the default type is + // set to \"ASSIGNED\". + Type *string `json:"type,omitempty" mapstructure:"type,omitempty"` +} + +// VMNic Virtual Machine NIC. +type VMNic struct { + + // IP endpoints for the adapter. Currently, IPv4 addresses are supported. + IPEndpointList []*IPAddress `json:"ip_endpoint_list,omitempty" mapstructure:"ip_endpoint_list,omitempty"` + + // The MAC address for the adapter. + MacAddress *string `json:"mac_address,omitempty" mapstructure:"mac_address,omitempty"` + + // The model of this NIC. + Model *string `json:"model,omitempty" mapstructure:"model,omitempty"` + + NetworkFunctionChainReference *Reference `json:"network_function_chain_reference,omitempty" mapstructure:"network_function_chain_reference,omitempty"` + + // The type of this Network function NIC. Defaults to INGRESS. + NetworkFunctionNicType *string `json:"network_function_nic_type,omitempty" mapstructure:"network_function_nic_type,omitempty"` + + // The type of this NIC. Defaults to NORMAL_NIC. + NicType *string `json:"nic_type,omitempty" mapstructure:"nic_type,omitempty"` + + SubnetReference *Reference `json:"subnet_reference,omitempty" mapstructure:"subnet_reference,omitempty"` + + // The NIC's UUID, which is used to uniquely identify this particular NIC. This UUID may be used to refer to the NIC + // outside the context of the particular VM it is attached to. + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` + + IsConnected *bool `json:"is_connected,omitempty" mapstructure:"is_connected,omitempty"` +} + +// DiskAddress Disk Address. +type DiskAddress struct { + AdapterType *string `json:"adapter_type,omitempty" mapstructure:"adapter_type,omitempty"` + DeviceIndex *int64 `json:"device_index,omitempty" mapstructure:"device_index,omitempty"` +} + +// VMBootDevice Indicates which device a VM should boot from. One of disk_address or mac_address should be provided. +type VMBootDevice struct { + + // Address of disk to boot from. + DiskAddress *DiskAddress `json:"disk_address,omitempty" mapstructure:"disk_address,omitempty"` + + // MAC address of nic to boot from. + MacAddress *string `json:"mac_address,omitempty" mapstructure:"mac_address,omitempty"` +} + +// VMBootConfig Indicates which device a VM should boot from. +type VMBootConfig struct { + + // Indicates which device a VM should boot from. Boot device takes precdence over boot device order. If both are + // given then specified boot device will be primary boot device and remaining devices will be assigned boot order + // according to boot device order field. + BootDevice *VMBootDevice `json:"boot_device,omitempty" mapstructure:"boot_device,omitempty"` + BootType *string `json:"boot_type,omitempty" mapstructure:"boot_type,omitempty"` + + // Indicates the order of device types in which VM should try to boot from. If boot device order is not provided the + // system will decide appropriate boot device order. + BootDeviceOrderList []*string `json:"boot_device_order_list,omitempty" mapstructure:"boot_device_order_list,omitempty"` +} + +// NutanixGuestToolsSpec Information regarding Nutanix Guest Tools. +type NutanixGuestToolsSpec struct { + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` // Nutanix Guest Tools is enabled or not. + Version *string `json:"version,omitempty" mapstructure:"version,omitempty"` // Version of Nutanix Guest Tools installed on the VM. + NgtState *string `json:"ngt_state,omitempty" mapstructure:"ngt_state,omitempty"` // Nutanix Guest Tools installed or not. + Credentials map[string]string `json:"credentials,omitempty" mapstructure:"credentials,omitempty"` // Credentials to login server + IsoMountState *string `json:"iso_mount_state,omitempty" mapstructure:"iso_mount_state,omitempty"` // Desired mount state of Nutanix Guest Tools ISO. + EnabledCapabilityList []*string `json:"enabled_capability_list,omitempty" mapstructure:"enabled_capability_list,omitempty"` // Application names that are enabled. +} + +// GuestToolsSpec Information regarding guest tools. +type GuestToolsSpec struct { + + // Nutanix Guest Tools information + NutanixGuestTools *NutanixGuestToolsSpec `json:"nutanix_guest_tools,omitempty" mapstructure:"nutanix_guest_tools,omitempty"` +} + +// VMGpu Graphics resource information for the Virtual Machine. +type VMGpu struct { + + // The device ID of the GPU. + DeviceID *int64 `json:"device_id,omitempty" mapstructure:"device_id,omitempty"` + + // The mode of this GPU. + Mode *string `json:"mode,omitempty" mapstructure:"mode,omitempty"` + + // The vendor of the GPU. + Vendor *string `json:"vendor,omitempty" mapstructure:"vendor,omitempty"` +} + +// GuestCustomizationCloudInit If this field is set, the guest will be customized using cloud-init. Either user_data or +// custom_key_values should be provided. If custom_key_ves are provided then the user data will be generated using these +// key-value pairs. +type GuestCustomizationCloudInit struct { + + // Generic key value pair used for custom attributes + CustomKeyValues map[string]string `json:"custom_key_values,omitempty" mapstructure:"custom_key_values,omitempty"` + + // The contents of the meta_data configuration for cloud-init. This can be formatted as YAML or JSON. The value must + // be base64 encoded. + MetaData *string `json:"meta_data,omitempty" mapstructure:"meta_data,omitempty"` + + // The contents of the user_data configuration for cloud-init. This can be formatted as YAML, JSON, or could be a + // shell script. The value must be base64 encoded. + UserData *string `json:"user_data,omitempty" mapstructure:"user_data,omitempty"` +} + +// GuestCustomizationSysprep If this field is set, the guest will be customized using Sysprep. Either unattend_xml or +// custom_key_values should be provided. If custom_key_values are provided then the unattended answer file will be +// generated using these key-value pairs. +type GuestCustomizationSysprep struct { + + // Generic key value pair used for custom attributes + CustomKeyValues map[string]string `json:"custom_key_values,omitempty" mapstructure:"custom_key_values,omitempty"` + + // Whether the guest will be freshly installed using this unattend configuration, or whether this unattend + // configuration will be applied to a pre-prepared image. Default is \"PREPARED\". + InstallType *string `json:"install_type,omitempty" mapstructure:"install_type,omitempty"` + + // This field contains a Sysprep unattend xml definition, as a *string. The value must be base64 encoded. + UnattendXML *string `json:"unattend_xml,omitempty" mapstructure:"unattend_xml,omitempty"` +} + +// GuestCustomization VM guests may be customized at boot time using one of several different methods. Currently, +// cloud-init w/ ConfigDriveV2 (for Linux VMs) and Sysprep (for Windows VMs) are supported. Only ONE OF sysprep or +// cloud_init should be provided. Note that guest customization can currently only be set during VM creation. Attempting +// to change it after creation will result in an error. Additional properties can be specified. For example - in the +// context of VM template creation if \"override_script\" is set to \"True\" then the deployer can upload their own +// custom script. +type GuestCustomization struct { + CloudInit *GuestCustomizationCloudInit `json:"cloud_init,omitempty" mapstructure:"cloud_init,omitempty"` + + // Flag to allow override of customization by deployer. + IsOverridable *bool `json:"is_overridable,omitempty" mapstructure:"is_overridable,omitempty"` + + Sysprep *GuestCustomizationSysprep `json:"sysprep,omitempty" mapstructure:"sysprep,omitempty"` +} + +// VMGuestPowerStateTransitionConfig Extra configs related to power state transition. +type VMGuestPowerStateTransitionConfig struct { + + // Indicates whether to execute set script before ngt shutdown/reboot. + EnableScriptExec *bool `json:"enable_script_exec,omitempty" mapstructure:"enable_script_exec,omitempty"` + + // Indicates whether to abort ngt shutdown/reboot if script fails. + ShouldFailOnScriptFailure *bool `json:"should_fail_on_script_failure,omitempty" mapstructure:"should_fail_on_script_failure,omitempty"` +} + +// VMPowerStateMechanism Indicates the mechanism guiding the VM power state transition. Currently used for the transition +// to \"OFF\" state. +type VMPowerStateMechanism struct { + GuestTransitionConfig *VMGuestPowerStateTransitionConfig `json:"guest_transition_config,omitempty" mapstructure:"guest_transition_config,omitempty"` + + // Power state mechanism (ACPI/GUEST/HARD). + Mechanism *string `json:"mechanism,omitempty" mapstructure:"mechanism,omitempty"` +} + +// VMDiskDeviceProperties ... +type VMDiskDeviceProperties struct { + DeviceType *string `json:"device_type,omitempty" mapstructure:"device_type,omitempty"` + DiskAddress *DiskAddress `json:"disk_address,omitempty" mapstructure:"disk_address,omitempty"` +} + +// StorageContainerReference references to a kind. Either one of (kind, uuid) or url needs to be specified. +type StorageContainerReference struct { + URL string `json:"url,omitempty"` + Kind string `json:"kind,omitempty"` + UUID string `json:"uuid,omitempty"` + Name string `json:"name,omitempty"` +} + +// VMStorageConfig specifies the storage configuration parameters for VM disks. +type VMStorageConfig struct { + FlashMode string `json:"flash_mode,omitempty"` + StorageContainerReference *StorageContainerReference `json:"storage_container_reference,omitempty"` +} + +// VMDisk VirtualMachine Disk (VM Disk). +type VMDisk struct { + DataSourceReference *Reference `json:"data_source_reference,omitempty" mapstructure:"data_source_reference,omitempty"` + + DeviceProperties *VMDiskDeviceProperties `json:"device_properties,omitempty" mapstructure:"device_properties,omitempty"` + + // Size of the disk in Bytes. + DiskSizeBytes *int64 `json:"disk_size_bytes,omitempty" mapstructure:"disk_size_bytes,omitempty"` + + // Size of the disk in MiB. Must match the size specified in 'disk_size_bytes' - rounded up to the nearest MiB - + // when that field is present. + DiskSizeMib *int64 `json:"disk_size_mib,omitempty" mapstructure:"disk_size_mib,omitempty"` + + // The device ID which is used to uniquely identify this particular disk. + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` + + VolumeGroupReference *Reference `json:"volume_group_reference,omitempty" mapstructure:"volume_group_reference,omitempty"` + + // This preference specifies the storage configuration parameters for VM disks. + StorageConfig *VMStorageConfig `json:"storage_config,omitempty" mapstructure:"storage_config,omitempty"` +} + +// VMResources VM Resources Definition. +type VMResources struct { + + // Indicates which device the VM should boot from. + BootConfig *VMBootConfig `json:"boot_config,omitempty" mapstructure:"boot_config,omitempty"` + + // Disks attached to the VM. + DiskList []*VMDisk `json:"disk_list,omitempty" mapstructure:"disk_list,omitempty"` + + // GPUs attached to the VM. + GpuList []*VMGpu `json:"gpu_list,omitempty" mapstructure:"gpu_list,omitempty"` + + GuestCustomization *GuestCustomization `json:"guest_customization,omitempty" mapstructure:"guest_customization,omitempty"` + + // Guest OS Identifier. For ESX, refer to VMware documentation link + // https://www.vmware.com/support/orchestrator/doc/vro-vsphere65-api/html/VcVirtualMachineGuestOsIdentifier.html + // for the list of guest OS identifiers. + GuestOsID *string `json:"guest_os_id,omitempty" mapstructure:"guest_os_id,omitempty"` + + // Information regarding guest tools. + GuestTools *GuestToolsSpec `json:"guest_tools,omitempty" mapstructure:"guest_tools,omitempty"` + + // VM's hardware clock timezone in IANA TZDB format (America/Los_Angeles). + HardwareClockTimezone *string `json:"hardware_clock_timezone,omitempty" mapstructure:"hardware_clock_timezone,omitempty"` + + // Memory size in MiB. + MemorySizeMib *int64 `json:"memory_size_mib,omitempty" mapstructure:"memory_size_mib,omitempty"` + + // NICs attached to the VM. + NicList []*VMNic `json:"nic_list,omitempty" mapstructure:"nic_list,omitempty"` + + // Number of threads per core + NumThreads *int64 `json:"num_threads_per_core,omitempty" mapstructure:"num_threads_per_core,omitempty"` + + // Number of vCPU sockets. + NumSockets *int64 `json:"num_sockets,omitempty" mapstructure:"num_sockets,omitempty"` + + // Number of vCPUs per socket. + NumVcpusPerSocket *int64 `json:"num_vcpus_per_socket,omitempty" mapstructure:"num_vcpus_per_socket,omitempty"` + + // *Reference to an entity that the VM should be cloned from. + ParentReference *Reference `json:"parent_reference,omitempty" mapstructure:"parent_reference,omitempty"` + + // The current or desired power state of the VM. + PowerState *string `json:"power_state,omitempty" mapstructure:"power_state,omitempty"` + + PowerStateMechanism *VMPowerStateMechanism `json:"power_state_mechanism,omitempty" mapstructure:"power_state_mechanism,omitempty"` + + // Indicates whether VGA console should be enabled or not. + VgaConsoleEnabled *bool `json:"vga_console_enabled,omitempty" mapstructure:"vga_console_enabled,omitempty"` + + // Indicates whether to passthrough the host’s CPU features to the guest. Enabling this will disable live migration of the VM. + EnableCPUPassthrough *bool `json:"enable_cpu_passthrough,omitempty" mapstructure:"enable_cpu_passthrough,omitempty"` + + // Information regarding vNUMA configuration. + VMVnumaConfig *VMVnumaConfig `json:"vnuma_config,omitempty" mapstructure:"vnuma_config,omitempty"` + + SerialPortList []*VMSerialPort `json:"serial_port_list,omitempty" mapstructure:"serial_port_list,omitempty"` + + MachineType *string `json:"machine_type,omitempty" mapstructure:"machine_type,omitempty"` +} + +// VM An intentful representation of a vm spec +type VM struct { + AvailabilityZoneReference *Reference `json:"availability_zone_reference,omitempty" mapstructure:"availability_zone_reference,omitempty"` + + ClusterReference *Reference `json:"cluster_reference,omitempty" mapstructure:"cluster_reference,omitempty"` + + // A description for vm. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // vm Name. + Name *string `json:"name" mapstructure:"name"` + + Resources *VMResources `json:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +// VMIntentInput ... +type VMIntentInput struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + + Spec *VM `json:"spec" mapstructure:"spec"` +} + +// MessageResource ... +type MessageResource struct { + + // Custom key-value details relevant to the status. + Details map[string]string `json:"details,omitempty" mapstructure:"details,omitempty"` + + // If state is ERROR, a message describing the error. + Message *string `json:"message" mapstructure:"message"` + + // If state is ERROR, a machine-readable snake-cased *string. + Reason *string `json:"reason" mapstructure:"reason"` +} + +// VMStatus The status of a REST API call. Only used when there is a failure to report. +type VMStatus struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // The HTTP error code. + Code *int64 `json:"code,omitempty" mapstructure:"code,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` +} + +// VMNicOutputStatus Virtual Machine NIC Status. +type VMNicOutputStatus struct { + + // The Floating IP associated with the vnic. + FloatingIP *string `json:"floating_ip,omitempty" mapstructure:"floating_ip,omitempty"` + + // IP endpoints for the adapter. Currently, IPv4 addresses are supported. + IPEndpointList []*IPAddress `json:"ip_endpoint_list,omitempty" mapstructure:"ip_endpoint_list,omitempty"` + + // The MAC address for the adapter. + MacAddress *string `json:"mac_address,omitempty" mapstructure:"mac_address,omitempty"` + + // The model of this NIC. + Model *string `json:"model,omitempty" mapstructure:"model,omitempty"` + + NetworkFunctionChainReference *Reference `json:"network_function_chain_reference,omitempty" mapstructure:"network_function_chain_reference,omitempty"` + + // The type of this Network function NIC. Defaults to INGRESS. + NetworkFunctionNicType *string `json:"network_function_nic_type,omitempty" mapstructure:"network_function_nic_type,omitempty"` + + // The type of this NIC. Defaults to NORMAL_NIC. + NicType *string `json:"nic_type,omitempty" mapstructure:"nic_type,omitempty"` + + SubnetReference *Reference `json:"subnet_reference,omitempty" mapstructure:"subnet_reference,omitempty"` + + // The NIC's UUID, which is used to uniquely identify this particular NIC. This UUID may be used to refer to the NIC + // outside the context of the particular VM it is attached to. + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` + + IsConnected *bool `json:"is_connected,omitempty" mapstructure:"is_connected,omitempty"` +} + +// NutanixGuestToolsStatus Information regarding Nutanix Guest Tools. +type NutanixGuestToolsStatus struct { + // Version of Nutanix Guest Tools available on the cluster. + AvailableVersion *string `json:"available_version,omitempty" mapstructure:"available_version,omitempty"` + // Nutanix Guest Tools installed or not. + NgtState *string `json:"ngt_state,omitempty" mapstructure:"ngt_state,omitempty"` + // Desired mount state of Nutanix Guest Tools ISO. + IsoMountState *string `json:"iso_mount_state,omitempty" mapstructure:"iso_mount_state,omitempty"` + // Nutanix Guest Tools is enabled or not. + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` + // Version of Nutanix Guest Tools installed on the VM. + Version *string `json:"version,omitempty" mapstructure:"version,omitempty"` + // Application names that are enabled. + EnabledCapabilityList []*string `json:"enabled_capability_list,omitempty" mapstructure:"enabled_capability_list,omitempty"` + // Credentials to login server + Credentials map[string]string `json:"credentials,omitempty" mapstructure:"credentials,omitempty"` + // Version of the operating system on the VM. + GuestOsVersion *string `json:"guest_os_version,omitempty" mapstructure:"guest_os_version,omitempty"` + // Whether the VM is configured to take VSS snapshots through NGT. + VSSSnapshotCapable *bool `json:"vss_snapshot_capable,omitempty" mapstructure:"vss_snapshot_capable,omitempty"` + // Communication from VM to CVM is active or not. + IsReachable *bool `json:"is_reachable,omitempty" mapstructure:"is_reachable,omitempty"` + // Whether VM mobility drivers are installed in the VM. + VMMobilityDriversInstalled *bool `json:"vm_mobility_drivers_installed,omitempty" mapstructure:"vm_mobility_drivers_installed,omitempty"` +} + +// GuestToolsStatus Information regarding guest tools. +type GuestToolsStatus struct { + + // Nutanix Guest Tools information + NutanixGuestTools *NutanixGuestToolsStatus `json:"nutanix_guest_tools,omitempty" mapstructure:"nutanix_guest_tools,omitempty"` +} + +// VMGpuOutputStatus Graphics resource status information for the Virtual Machine. +type VMGpuOutputStatus struct { + + // The device ID of the GPU. + DeviceID *int64 `json:"device_id,omitempty" mapstructure:"device_id,omitempty"` + + // Fraction of the physical GPU assigned. + Fraction *int64 `json:"fraction,omitempty" mapstructure:"fraction,omitempty"` + + // GPU frame buffer size in MiB. + FrameBufferSizeMib *int64 `json:"frame_buffer_size_mib,omitempty" mapstructure:"frame_buffer_size_mib,omitempty"` + + // Last determined guest driver version. + GuestDriverVersion *string `json:"guest_driver_version,omitempty" mapstructure:"guest_driver_version,omitempty"` + + // The mode of this GPU + Mode *string `json:"mode,omitempty" mapstructure:"mode,omitempty"` + + // Name of the GPU resource. + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + // Number of supported virtual display heads. + NumVirtualDisplayHeads *int64 `json:"num_virtual_display_heads,omitempty" mapstructure:"num_virtual_display_heads,omitempty"` + + // GPU {segment:bus:device:function} (sbdf) address if assigned. + PCIAddress *string `json:"pci_address,omitempty" mapstructure:"pci_address,omitempty"` + + // UUID of the GPU. + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` + + // The vendor of the GPU. + Vendor *string `json:"vendor,omitempty" mapstructure:"vendor,omitempty"` +} + +// GuestCustomizationStatus VM guests may be customized at boot time using one of several different methods. Currently, +// cloud-init w/ ConfigDriveV2 (for Linux VMs) and Sysprep (for Windows VMs) are supported. Only ONE OF sysprep or +// cloud_init should be provided. Note that guest customization can currently only be set during VM creation. Attempting +// to change it after creation will result in an error. Additional properties can be specified. For example - in the +// context of VM template creation if \"override_script\" is set to \"True\" then the deployer can upload their own +// custom script. +type GuestCustomizationStatus struct { + CloudInit *GuestCustomizationCloudInit `json:"cloud_init,omitempty" mapstructure:"cloud_init,omitempty"` + + // Flag to allow override of customization by deployer. + IsOverridable *bool `json:"is_overridable,omitempty" mapstructure:"is_overridable,omitempty"` + + Sysprep *GuestCustomizationSysprep `json:"sysprep,omitempty" mapstructure:"sysprep,omitempty"` +} + +// VMResourcesDefStatus VM Resources Status Definition. +type VMResourcesDefStatus struct { + + // Indicates which device the VM should boot from. + BootConfig *VMBootConfig `json:"boot_config,omitempty" mapstructure:"boot_config,omitempty"` + + // Disks attached to the VM. + DiskList []*VMDisk `json:"disk_list,omitempty" mapstructure:"disk_list,omitempty"` + + // GPUs attached to the VM. + GpuList []*VMGpuOutputStatus `json:"gpu_list,omitempty" mapstructure:"gpu_list,omitempty"` + + GuestCustomization *GuestCustomizationStatus `json:"guest_customization,omitempty" mapstructure:"guest_customization,omitempty"` + + // Guest OS Identifier. For ESX, refer to VMware documentation link + // https://www.vmware.com/support/orchestrator/doc/vro-vsphere65-api/html/VcVirtualMachineGuestOsIdentifier.html + // for the list of guest OS identifiers. + GuestOsID *string `json:"guest_os_id,omitempty" mapstructure:"guest_os_id,omitempty"` + + // Information regarding guest tools. + GuestTools *GuestToolsStatus `json:"guest_tools,omitempty" mapstructure:"guest_tools,omitempty"` + + // VM's hardware clock timezone in IANA TZDB format (America/Los_Angeles). + HardwareClockTimezone *string `json:"hardware_clock_timezone,omitempty" mapstructure:"hardware_clock_timezone,omitempty"` + + HostReference *Reference `json:"host_reference,omitempty" mapstructure:"host_reference,omitempty"` + + // The hypervisor type for the hypervisor the VM is hosted on. + HypervisorType *string `json:"hypervisor_type,omitempty" mapstructure:"hypervisor_type,omitempty"` + + // Memory size in MiB. + MemorySizeMib *int64 `json:"memory_size_mib,omitempty" mapstructure:"memory_size_mib,omitempty"` + + // NICs attached to the VM. + NicList []*VMNicOutputStatus `json:"nic_list,omitempty" mapstructure:"nic_list,omitempty"` + + // Number of vCPU sockets. + NumSockets *int64 `json:"num_sockets,omitempty" mapstructure:"num_sockets,omitempty"` + + // Number of vCPUs per socket. + NumVcpusPerSocket *int64 `json:"num_vcpus_per_socket,omitempty" mapstructure:"num_vcpus_per_socket,omitempty"` + + // *Reference to an entity that the VM cloned from. + ParentReference *Reference `json:"parent_reference,omitempty" mapstructure:"parent_reference,omitempty"` + + // Current power state of the VM. + PowerState *string `json:"power_state,omitempty" mapstructure:"power_state,omitempty"` + + PowerStateMechanism *VMPowerStateMechanism `json:"power_state_mechanism,omitempty" mapstructure:"power_state_mechanism,omitempty"` + + // Indicates whether VGA console has been enabled or not. + VgaConsoleEnabled *bool `json:"vga_console_enabled,omitempty" mapstructure:"vga_console_enabled,omitempty"` + + // Indicates whether to passthrough the host’s CPU features to the guest. Enabling this will disable live migration of the VM. + EnableCPUPassthrough *bool `json:"enable_cpu_passthrough,omitempty" mapstructure:"enable_cpu_passthrough,omitempty"` + + // Information regarding vNUMA configuration. + VnumaConfig *VMVnumaConfig `json:"vnuma_config,omitempty" mapstructure:"vnuma_config,omitempty"` + + SerialPortList []*VMSerialPort `json:"serial_port_list,omitempty" mapstructure:"serial_port_list,omitempty"` + + MachineType *string `json:"machine_type,omitempty" mapstructure:"machine_type,omitempty"` +} + +// VMDefStatus An intentful representation of a vm status +type VMDefStatus struct { + AvailabilityZoneReference *Reference `json:"availability_zone_reference,omitempty" mapstructure:"availability_zone_reference,omitempty"` + + ClusterReference *Reference `json:"cluster_reference,omitempty" mapstructure:"cluster_reference,omitempty"` + + // A description for vm. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // Any error messages for the vm, if in an error state. + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + + // vm Name. + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + Resources *VMResourcesDefStatus `json:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The state of the vm. + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` + + ExecutionContext *ExecutionContext `json:"execution_context,omitempty" mapstructure:"execution_context,omitempty"` +} + +// ExecutionContext ... +type ExecutionContext struct { + TaskUUID interface{} `json:"task_uuid,omitempty" mapstructure:"task_uuid,omitempty"` +} + +// VMIntentResponse Response object for intentful operations on a vm +type VMIntentResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + + Metadata *Metadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` + + Spec *VM `json:"spec,omitempty" mapstructure:"spec,omitempty"` + + Status *VMDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// DSMetadata All api calls that return a list will have this metadata block as input +type DSMetadata struct { + + // The filter in FIQL syntax used for the results. + Filter *string `json:"filter,omitempty" mapstructure:"filter,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + // The number of records to retrieve relative to the offset + Length *int64 `json:"length,omitempty" mapstructure:"length,omitempty"` + + // Offset from the start of the entity list + Offset *int64 `json:"offset,omitempty" mapstructure:"offset,omitempty"` + + // The attribute to perform sort on + SortAttribute *string `json:"sort_attribute,omitempty" mapstructure:"sort_attribute,omitempty"` + + // The sort order in which results are returned + SortOrder *string `json:"sort_order,omitempty" mapstructure:"sort_order,omitempty"` +} + +// VMIntentResource Response object for intentful operations on a vm +type VMIntentResource struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + + Spec *VM `json:"spec,omitempty" mapstructure:"spec,omitempty"` + + Status *VMDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// VMListIntentResponse Response object for intentful operation of vms +type VMListIntentResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + + Entities []*VMIntentResource `json:"entities,omitempty" mapstructure:"entities,omitempty"` + + Metadata *ListMetadataOutput `json:"metadata" mapstructure:"metadata"` +} + +// SubnetMetadata The subnet kind metadata +type SubnetMetadata struct { + + // Categories for the subnet + Categories map[string]string `json:"categories,omitempty" mapstructure:"categories,omitempty"` + + // UTC date and time in RFC-3339 format when subnet was created + CreationTime *time.Time `json:"creation_time,omitempty" mapstructure:"creation_time,omitempty"` + + // The kind name + Kind *string `json:"kind" mapstructure:"kind"` + + // UTC date and time in RFC-3339 format when subnet was last updated + LastUpdateTime *time.Time `json:"last_update_time,omitempty" mapstructure:"last_update_time,omitempty"` + + // subnet name + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + OwnerReference *Reference `json:"owner_reference,omitempty" mapstructure:"owner_reference,omitempty"` + + // project reference + ProjectReference *Reference `json:"project_reference,omitempty" mapstructure:"project_reference,omitempty"` + + // Hash of the spec. This will be returned from server. + SpecHash *string `json:"spec_hash,omitempty" mapstructure:"spec_hash,omitempty"` + + // Version number of the latest spec. + SpecVersion *int64 `json:"spec_version,omitempty" mapstructure:"spec_version,omitempty"` + + // subnet uuid + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` +} + +// Address represents the Host address. +type Address struct { + + // Fully qualified domain name. + FQDN *string `json:"fqdn,omitempty" mapstructure:"fqdn,omitempty"` + + // IPV4 address. + IP *string `json:"ip,omitempty" mapstructure:"ip,omitempty"` + + // IPV6 address. + IPV6 *string `json:"ipv6,omitempty" mapstructure:"ipv6,omitempty"` + + // Port Number + Port *int64 `json:"port,omitempty" mapstructure:"port,omitempty"` +} + +// IPPool represents IP pool. +type IPPool struct { + + // Range of IPs (example: 10.0.0.9 10.0.0.19). + Range *string `json:"range,omitempty" mapstructure:"range,omitempty"` +} + +// DHCPOptions Spec for defining DHCP options. +type DHCPOptions struct { + BootFileName *string `json:"boot_file_name,omitempty" mapstructure:"boot_file_name,omitempty"` + + DomainName *string `json:"domain_name,omitempty" mapstructure:"domain_name,omitempty"` + + DomainNameServerList []*string `json:"domain_name_server_list,omitempty" mapstructure:"domain_name_server_list,omitempty"` + + DomainSearchList []*string `json:"domain_search_list,omitempty" mapstructure:"domain_search_list,omitempty"` + + TFTPServerName *string `json:"tftp_server_name,omitempty" mapstructure:"tftp_server_name,omitempty"` +} + +// IPConfig represents the configurtion of IP. +type IPConfig struct { + + // Default gateway IP address. + DefaultGatewayIP *string `json:"default_gateway_ip,omitempty" mapstructure:"default_gateway_ip,omitempty"` + + DHCPOptions *DHCPOptions `json:"dhcp_options,omitempty" mapstructure:"dhcp_options,omitempty"` + + DHCPServerAddress *Address `json:"dhcp_server_address,omitempty" mapstructure:"dhcp_server_address,omitempty"` + + PoolList []*IPPool `json:"pool_list,omitempty" mapstructure:"pool_list,omitempty"` + + PrefixLength *int64 `json:"prefix_length,omitempty" mapstructure:"prefix_length,omitempty"` + + // Subnet IP address. + SubnetIP *string `json:"subnet_ip,omitempty" mapstructure:"subnet_ip,omitempty"` +} + +// SubnetResources represents Subnet creation/modification spec. +type SubnetResources struct { + IPConfig *IPConfig `json:"ip_config,omitempty" mapstructure:"ip_config,omitempty"` + + NetworkFunctionChainReference *Reference `json:"network_function_chain_reference,omitempty" mapstructure:"network_function_chain_reference,omitempty"` + + SubnetType *string `json:"subnet_type" mapstructure:"subnet_type"` + + VlanID *int64 `json:"vlan_id,omitempty" mapstructure:"vlan_id,omitempty"` + + VswitchName *string `json:"vswitch_name,omitempty" mapstructure:"vswitch_name,omitempty"` +} + +// Subnet An intentful representation of a subnet spec +type Subnet struct { + AvailabilityZoneReference *Reference `json:"availability_zone_reference,omitempty" mapstructure:"availability_zone_reference,omitempty"` + + ClusterReference *Reference `json:"cluster_reference,omitempty" mapstructure:"cluster_reference,omitempty"` + + // A description for subnet. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // subnet Name. + Name *string `json:"name" mapstructure:"name"` + + Resources *SubnetResources `json:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +// SubnetIntentInput An intentful representation of a subnet +type SubnetIntentInput struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + + Spec *Subnet `json:"spec" mapstructure:"spec"` +} + +// SubnetStatus represents The status of a REST API call. Only used when there is a failure to report. +type SubnetStatus struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // The HTTP error code. + Code *int64 `json:"code,omitempty" mapstructure:"code,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` +} + +// SubnetResourcesDefStatus represents a Subnet creation/modification status. +type SubnetResourcesDefStatus struct { + IPConfig *IPConfig `json:"ip_config,omitempty" mapstructure:"ip_config,omitempty"` + + NetworkFunctionChainReference *Reference `json:"network_function_chain_reference,omitempty" mapstructure:"network_function_chain_reference,omitempty"` + + SubnetType *string `json:"subnet_type" mapstructure:"subnet_type"` + + VlanID *int64 `json:"vlan_id,omitempty" mapstructure:"vlan_id,omitempty"` + + VswitchName *string `json:"vswitch_name,omitempty" mapstructure:"vswitch_name,omitempty"` +} + +// SubnetDefStatus An intentful representation of a subnet status +type SubnetDefStatus struct { + AvailabilityZoneReference *Reference `json:"availability_zone_reference,omitempty" mapstructure:"availability_zone_reference,omitempty"` + + ClusterReference *Reference `json:"cluster_reference,omitempty" mapstructure:"cluster_reference,omitempty"` + + // A description for subnet. + Description *string `json:"description" mapstructure:"description"` + + // Any error messages for the subnet, if in an error state. + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + + // subnet Name. + Name *string `json:"name" mapstructure:"name"` + + Resources *SubnetResourcesDefStatus `json:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The state of the subnet. + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` + + ExecutionContext *ExecutionContext `json:"execution_context,omitempty" mapstructure:"execution_context,omitempty"` +} + +// SubnetIntentResponse represents the response object for intentful operations on a subnet +type SubnetIntentResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + + Metadata *Metadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` + + Spec *Subnet `json:"spec,omitempty" mapstructure:"spec,omitempty"` + + Status *SubnetDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// SubnetIntentResource represents Response object for intentful operations on a subnet +type SubnetIntentResource struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + + Spec *Subnet `json:"spec,omitempty" mapstructure:"spec,omitempty"` + + Status *SubnetDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// SubnetListIntentResponse represents the response object for intentful operation of subnets +type SubnetListIntentResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + + Entities []*SubnetIntentResponse `json:"entities,omitempty" mapstructure:"entities,omitempty"` + + Metadata *ListMetadataOutput `json:"metadata" mapstructure:"metadata"` +} + +// SubnetListMetadata ... +type SubnetListMetadata struct { + + // The filter in FIQL syntax used for the results. + Filter *string `json:"filter,omitempty" mapstructure:"filter,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + // The number of records to retrieve relative to the offset + Length *int64 `json:"length,omitempty" mapstructure:"length,omitempty"` + + // Offset from the start of the entity list + Offset *int64 `json:"offset,omitempty" mapstructure:"offset,omitempty"` + + // The attribute to perform sort on + SortAttribute *string `json:"sort_attribute,omitempty" mapstructure:"sort_attribute,omitempty"` + + // The sort order in which results are returned + SortOrder *string `json:"sort_order,omitempty" mapstructure:"sort_order,omitempty"` +} + +// Checksum represents the image checksum +type Checksum struct { + ChecksumAlgorithm *string `json:"checksum_algorithm" mapstructure:"checksum_algorithm"` + ChecksumValue *string `json:"checksum_value" mapstructure:"checksum_value"` +} + +// ImageVersionResources The image version, which is composed of a product name and product version. +type ImageVersionResources struct { + + // Name of the producer/distribution of the image. For example windows or red hat. + ProductName *string `json:"product_name" mapstructure:"product_name"` + + // Version *string for the disk image. + ProductVersion *string `json:"product_version" mapstructure:"product_version"` +} + +// ImageResources describes the image spec resources object. +type ImageResources struct { + + // The supported CPU architecture for a disk image. + Architecture *string `json:"architecture,omitempty" mapstructure:"architecture,omitempty"` + + // Checksum of the image. The checksum is used for image validation if the image has a source specified. For images + // that do not have their source specified the checksum is generated by the image service. + Checksum *Checksum `json:"checksum,omitempty" mapstructure:"checksum,omitempty"` + + // The type of image. + ImageType *string `json:"image_type,omitempty" mapstructure:"image_type,omitempty"` + + // The source URI points at the location of a the source image which is used to create/update image. + SourceURI *string `json:"source_uri,omitempty" mapstructure:"source_uri,omitempty"` + + // The image version + Version *ImageVersionResources `json:"version,omitempty" mapstructure:"version,omitempty"` + + // Reference to the source image such as 'vm_disk + DataSourceReference *Reference `json:"data_source_reference,omitempty" mapstructure:"data_source_reference,omitempty"` +} + +// Image An intentful representation of a image spec +type Image struct { + + // A description for image. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // image Name. + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + Resources *ImageResources `json:"resources" mapstructure:"resources"` +} + +// ImageMetadata Metadata The image kind metadata +type ImageMetadata struct { + + // Categories for the image + Categories map[string]string `json:"categories,omitempty" mapstructure:"categories,omitempty"` + + // UTC date and time in RFC-3339 format when vm was created + CreationTime *time.Time `json:"creation_time,omitempty" mapstructure:"creation_time,omitempty"` + + // The kind name + Kind *string `json:"kind" mapstructure:"kind"` + + // UTC date and time in RFC-3339 format when image was last updated + LastUpdateTime *time.Time `json:"last_update_time,omitempty" mapstructure:"last_update_time,omitempty"` + + // image name + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + // project reference + ProjectReference *Reference `json:"project_reference,omitempty" mapstructure:"project_reference,omitempty"` + + OwnerReference *Reference `json:"owner_reference,omitempty" mapstructure:"owner_reference,omitempty"` + + // Hash of the spec. This will be returned from server. + SpecHash *string `json:"spec_hash,omitempty" mapstructure:"spec_hash,omitempty"` + + // Version number of the latest spec. + SpecVersion *int64 `json:"spec_version,omitempty" mapstructure:"spec_version,omitempty"` + + // image uuid + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` +} + +// ImageIntentInput An intentful representation of a image +type ImageIntentInput struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *Metadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` + + Spec *Image `json:"spec,omitempty" mapstructure:"spec,omitempty"` +} + +// ImageStatus represents the status of a REST API call. Only used when there is a failure to report. +type ImageStatus struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // The HTTP error code. + Code *int64 `json:"code,omitempty" mapstructure:"code,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` +} + +// ImageVersionStatus represents the image version, which is composed of a product name and product version. +type ImageVersionStatus struct { + + // Name of the producer/distribution of the image. For example windows or red hat. + ProductName *string `json:"product_name" mapstructure:"product_name"` + + // Version *string for the disk image. + ProductVersion *string `json:"product_version" mapstructure:"product_version"` +} + +// ImageResourcesDefStatus describes the image status resources object. +type ImageResourcesDefStatus struct { + + // The supported CPU architecture for a disk image. + Architecture *string `json:"architecture,omitempty" mapstructure:"architecture,omitempty"` + + // Checksum of the image. The checksum is used for image validation if the image has a source specified. For images + // that do not have their source specified the checksum is generated by the image service. + Checksum *Checksum `json:"checksum,omitempty" mapstructure:"checksum,omitempty"` + + // The type of image. + ImageType *string `json:"image_type,omitempty" mapstructure:"image_type,omitempty"` + + // List of URIs where the raw image data can be accessed. + RetrievalURIList []*string `json:"retrieval_uri_list,omitempty" mapstructure:"retrieval_uri_list,omitempty"` + + // The size of the image in bytes. + SizeBytes *int64 `json:"size_bytes,omitempty" mapstructure:"size_bytes,omitempty"` + + // The source URI points at the location of a the source image which is used to create/update image. + SourceURI *string `json:"source_uri,omitempty" mapstructure:"source_uri,omitempty"` + + // The image version + Version *ImageVersionStatus `json:"version,omitempty" mapstructure:"version,omitempty"` +} + +// ImageDefStatus represents an intentful representation of a image status +type ImageDefStatus struct { + AvailabilityZoneReference *Reference `json:"availability_zone_reference,omitempty" mapstructure:"availability_zone_reference,omitempty"` + + ClusterReference *Reference `json:"cluster_reference,omitempty" mapstructure:"cluster_reference,omitempty"` + + // A description for image. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // Any error messages for the image, if in an error state. + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + + // image Name. + Name *string `json:"name" mapstructure:"name"` + + Resources ImageResourcesDefStatus `json:"resources" mapstructure:"resources"` + + // The state of the image. + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` + + ExecutionContext *ExecutionContext `json:"execution_context,omitempty" mapstructure:"execution_context,omitempty"` +} + +// ImageIntentResponse represents the response object for intentful operations on a image +type ImageIntentResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + + Spec *Image `json:"spec,omitempty" mapstructure:"spec,omitempty"` + + Status *ImageDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// ImageListMetadata represents metadata input +type ImageListMetadata struct { + + // The filter in FIQL syntax used for the results. + Filter *string `json:"filter,omitempty" mapstructure:"filter,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + // The number of records to retrieve relative to the offset + Length *int64 `json:"length,omitempty" mapstructure:"length,omitempty"` + + // Offset from the start of the entity list + Offset *int64 `json:"offset,omitempty" mapstructure:"offset,omitempty"` + + // The attribute to perform sort on + SortAttribute *string `json:"sort_attribute,omitempty" mapstructure:"sort_attribute,omitempty"` + + // The sort order in which results are returned + SortOrder *string `json:"sort_order,omitempty" mapstructure:"sort_order,omitempty"` +} + +// ImageIntentResource represents the response object for intentful operations on a image +type ImageIntentResource struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + + Spec *Image `json:"spec,omitempty" mapstructure:"spec,omitempty"` + + Status *ImageDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// ImageListIntentResponse represents the response object for intentful operation of images +type ImageListIntentResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + + Entities []*ImageIntentResponse `json:"entities,omitempty" mapstructure:"entities,omitempty"` + + Metadata *ListMetadataOutput `json:"metadata" mapstructure:"metadata"` +} + +// ClusterListIntentResponse ... +type ClusterListIntentResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + Entities []*ClusterIntentResponse `json:"entities,omitempty" mapstructure:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata" mapstructure:"metadata"` +} + +// ClusterIntentResponse ... +type ClusterIntentResponse struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + + Spec *Cluster `json:"spec,omitempty" mapstructure:"spec,omitempty"` + + Status *ClusterDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// Cluster ... +type Cluster struct { + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + Resources *ClusterResource `json:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +// ClusterDefStatus ... +type ClusterDefStatus struct { + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + Resources *ClusterObj `json:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +// ClusterObj ... +type ClusterObj struct { + Nodes *ClusterNodes `json:"nodes,omitempty" mapstructure:"nodes,omitempty"` + Config *ClusterConfig `json:"config,omitempty" mapstructure:"config,omitempty"` + Network *ClusterNetwork `json:"network,omitempty" mapstructure:"network,omitempty"` + Analysis *ClusterAnalysis `json:"analysis,omitempty" mapstructure:"analysis,omitempty"` + RuntimeStatusList []*string `json:"runtime_status_list,omitempty" mapstructure:"runtime_status_list,omitempty"` +} + +// ClusterNodes ... +type ClusterNodes struct { + HypervisorServerList []*HypervisorServer `json:"hypervisor_server_list,omitempty" mapstructure:"hypervisor_server_list,omitempty"` +} + +// SoftwareMapValues ... +type SoftwareMapValues struct { + SoftwareType *string `json:"software_type,omitempty" mapstructure:"software_type,omitempty"` + Status *string `json:"status,omitempty" mapstructure:"status,omitempty"` + Version *string `json:"version,omitempty" mapstructure:"version,omitempty"` +} + +// SoftwareMap ... +type SoftwareMap struct { + NCC *SoftwareMapValues `json:"ncc,omitempty" mapstructure:"ncc,omitempty"` + NOS *SoftwareMapValues `json:"nos,omitempty" mapstructure:"nos,omitempty"` +} + +// ClusterConfig ... +type ClusterConfig struct { + GpuDriverVersion *string `json:"gpu_driver_version,omitempty" mapstructure:"gpu_driver_version,omitempty"` + ClientAuth *ClientAuth `json:"client_auth,omitempty" mapstructure:"client_auth,omitempty"` + AuthorizedPublicKeyList []*PublicKey `json:"authorized_public_key_list,omitempty" mapstructure:"authorized_public_key_list,omitempty"` + SoftwareMap *SoftwareMap `json:"software_map,omitempty" mapstructure:"software_map,omitempty"` + EncryptionStatus *string `json:"encryption_status,omitempty" mapstructure:"encryption_status,omitempty"` + SslKey *SslKey `json:"ssl_key,omitempty" mapstructure:"ssl_key,omitempty"` + ServiceList []*string `json:"service_list,omitempty" mapstructure:"service_list,omitempty"` + SupportedInformationVerbosity *string `json:"supported_information_verbosity,omitempty" mapstructure:"supported_information_verbosity,omitempty"` + CertificationSigningInfo *CertificationSigningInfo `json:"certification_signing_info,omitempty" mapstructure:"certification_signing_info,omitempty"` + RedundancyFactor *int64 `json:"redundancy_factor,omitempty" mapstructure:"redundancy_factor,omitempty"` + ExternalConfigurations *ExternalConfigurations `json:"external_configurations,omitempty" mapstructure:"external_configurations,omitempty"` + OperationMode *string `json:"operation_mode,omitempty" mapstructure:"operation_mode,omitempty"` + CaCertificateList []*CaCert `json:"ca_certificate_list,omitempty" mapstructure:"ca_certificate_list,omitempty"` + EnabledFeatureList []*string `json:"enabled_feature_list,omitempty" mapstructure:"enabled_feature_list,omitempty"` + IsAvailable *bool `json:"is_available,omitempty" mapstructure:"is_available,omitempty"` + Build *BuildInfo `json:"build,omitempty" mapstructure:"build,omitempty"` + Timezone *string `json:"timezone,omitempty" mapstructure:"timezone,omitempty"` + ClusterArch *string `json:"cluster_arch,omitempty" mapstructure:"cluster_arch,omitempty"` + ManagementServerList []*ClusterManagementServer `json:"management_server_list,omitempty" mapstructure:"management_server_list,omitempty"` +} + +// ClusterManagementServer ... +type ClusterManagementServer struct { + IP *string `json:"ip,omitempty" mapstructure:"ip,omitempty"` + DrsEnabled *bool `json:"drs_enabled,omitempty" mapstructure:"drs_enabled,omitempty"` + StatusList []*string `json:"status_list,omitempty" mapstructure:"status_list,omitempty"` + Type *string `json:"type,omitempty" mapstructure:"type,omitempty"` +} + +// BuildInfo ... +type BuildInfo struct { + CommitID *string `json:"commit_id,omitempty" mapstructure:"commit_id,omitempty"` + FullVersion *string `json:"full_version,omitempty" mapstructure:"full_version,omitempty"` + CommitDate *string `json:"commit_date,omitempty" mapstructure:"commit_date,omitempty"` + Version *string `json:"version,omitempty" mapstructure:"version,omitempty"` + ShortCommitID *string `json:"short_commit_id,omitempty" mapstructure:"short_commit_id,omitempty"` + BuildType *string `json:"build_type,omitempty" mapstructure:"build_type,omitempty"` +} + +// CaCert ... +type CaCert struct { + CaName *string `json:"ca_name,omitempty" mapstructure:"ca_name,omitempty"` + Certificate *string `json:"certificate,omitempty" mapstructure:"certificate,omitempty"` +} + +// ExternalConfigurations ... +type ExternalConfigurations struct { + CitrixConnectorConfig *CitrixConnectorConfigDetails `json:"citrix_connector_config,omitempty" mapstructure:"citrix_connector_config,omitempty"` +} + +// CitrixConnectorConfigDetails ... +type CitrixConnectorConfigDetails struct { + CitrixVMReferenceList *[]Reference `json:"citrix_vm_reference_list,omitempty" mapstructure:"citrix_vm_reference_list,omitempty"` + ClientSecret *string `json:"client_secret,omitempty" mapstructure:"client_secret,omitempty"` + CustomerID *string `json:"customer_id,omitempty" mapstructure:"customer_id,omitempty"` + ClientID *string `json:"client_id,omitempty" mapstructure:"client_id,omitempty"` + ResourceLocation *CitrixResourceLocation `json:"resource_location,omitempty" mapstructure:"resource_location,omitempty"` +} + +// CitrixResourceLocation ... +type CitrixResourceLocation struct { + ID *string `json:"id,omitempty" mapstructure:"id,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` +} + +// SslKey ... +type SslKey struct { + KeyType *string `json:"key_type,omitempty" mapstructure:"key_type,omitempty"` + KeyName *string `json:"key_name,omitempty" mapstructure:"key_name,omitempty"` + SigningInfo *CertificationSigningInfo `json:"signing_info,omitempty" mapstructure:"signing_info,omitempty"` + ExpireDatetime *string `json:"expire_datetime,omitempty" mapstructure:"expire_datetime,omitempty"` +} + +// CertificationSigningInfo ... +type CertificationSigningInfo struct { + City *string `json:"city,omitempty" mapstructure:"city,omitempty"` + CommonNameSuffix *string `json:"common_name_suffix,omitempty" mapstructure:"common_name_suffix,omitempty"` + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` + CountryCode *string `json:"country_code,omitempty" mapstructure:"country_code,omitempty"` + CommonName *string `json:"common_name,omitempty" mapstructure:"common_name,omitempty"` + Organization *string `json:"organization,omitempty" mapstructure:"organization,omitempty"` + EmailAddress *string `json:"email_address,omitempty" mapstructure:"email_address,omitempty"` +} + +// PublicKey ... +type PublicKey struct { + Key *string `json:"key,omitempty" mapstructure:"key,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` +} + +// ClientAuth ... +type ClientAuth struct { + Status *string `json:"status,omitempty" mapstructure:"status,omitempty"` + CaChain *string `json:"ca_chain,omitempty" mapstructure:"ca_chain,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` +} + +// HypervisorServer ... +type HypervisorServer struct { + IP *string `json:"ip,omitempty" mapstructure:"ip,omitempty"` + Version *string `json:"version,omitempty" mapstructure:"version,omitempty"` + Type *string `json:"type,omitempty" mapstructure:"type,omitempty"` +} + +// ClusterResource ... +type ClusterResource struct { + Config *ConfigClusterSpec `json:"config,omitempty" mapstructure:"config,omitempty"` + Network *ClusterNetwork `json:"network,omitempty" mapstructure:"network,omitempty"` + RunTimeStatusList []*string `json:"runtime_status_list,omitempty" mapstructure:"runtime_status_list,omitempty"` +} + +// ConfigClusterSpec ... +type ConfigClusterSpec struct { + GpuDriverVersion *string `json:"gpu_driver_version,omitempty" mapstructure:"gpu_driver_version,omitempty"` + ClientAuth *ClientAuth `json:"client_auth,omitempty" mapstructure:"client_auth,omitempty"` + AuthorizedPublicKeyList []*PublicKey `json:"authorized_public_key_list,omitempty" mapstructure:"authorized_public_key_list,omitempty"` + SoftwareMap map[string]interface{} `json:"software_map,omitempty" mapstructure:"software_map,omitempty"` + EncryptionStatus string `json:"encryption_status,omitempty" mapstructure:"encryption_status,omitempty"` + RedundancyFactor *int64 `json:"redundancy_factor,omitempty" mapstructure:"redundancy_factor,omitempty"` + CertificationSigningInfo *CertificationSigningInfo `json:"certification_signing_info,omitempty" mapstructure:"certification_signing_info,omitempty"` + SupportedInformationVerbosity *string `json:"supported_information_verbosity,omitempty" mapstructure:"supported_information_verbosity,omitempty"` + ExternalConfigurations *ExternalConfigurationsSpec `json:"external_configurations,omitempty" mapstructure:"external_configurations,omitempty"` + EnabledFeatureList []*string `json:"enabled_feature_list,omitempty" mapstructure:"enabled_feature_list,omitempty"` + Timezone *string `json:"timezone,omitempty" mapstructure:"timezone,omitempty"` + OperationMode *string `json:"operation_mode,omitempty" mapstructure:"operation_mode,omitempty"` +} + +// ExternalConfigurationsSpec ... +type ExternalConfigurationsSpec struct { + CitrixConnectorConfig *CitrixConnectorConfigDetailsSpec `json:"citrix_connector_config,omitempty" mapstructure:"citrix_connector_config,omitempty"` +} + +// CitrixConnectorConfigDetailsSpec ... +type CitrixConnectorConfigDetailsSpec struct { + CitrixVMReferenceList []*Reference `json:"citrix_connector_config,omitempty" mapstructure:"citrix_connector_config,omitempty"` + ClientSecret *string `json:"client_secret,omitempty" mapstructure:"client_secret,omitempty"` + CustomerID *string `json:"customer_id,omitempty" mapstructure:"customer_id,omitempty"` + ClientID *string `json:"client_id,omitempty" mapstructure:"client_id,omitempty"` + ResourceLocation *CitrixResourceLocationSpec `json:"resource_location,omitempty" mapstructure:"resource_location,omitempty"` +} + +// CitrixResourceLocationSpec ... +type CitrixResourceLocationSpec struct { + ID *string `json:"id,omitempty" mapstructure:"id,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` +} + +// ClusterNetwork ... +type ClusterNetwork struct { + MasqueradingPort *int64 `json:"masquerading_port,omitempty" mapstructure:"masquerading_port,omitempty"` + MasqueradingIP *string `json:"masquerading_ip,omitempty" mapstructure:"masquerading_ip,omitempty"` + ExternalIP *string `json:"external_ip,omitempty" mapstructure:"external_ip,omitempty"` + HTTPProxyList []*ClusterNetworkEntity `json:"http_proxy_list,omitempty" mapstructure:"http_proxy_list,omitempty"` + SMTPServer *SMTPServer `json:"smtp_server,omitempty" mapstructure:"smtp_server,omitempty"` + NTPServerIPList []*string `json:"ntp_server_ip_list,omitempty" mapstructure:"ntp_server_ip_list,omitempty"` + ExternalSubnet *string `json:"external_subnet,omitempty" mapstructure:"external_subnet,omitempty"` + NFSSubnetWhitelist []*string `json:"nfs_subnet_whitelist,omitempty" mapstructure:"nfs_subnet_whitelist,omitempty"` + ExternalDataServicesIP *string `json:"external_data_services_ip,omitempty" mapstructure:"external_data_services_ip,omitempty"` + DomainServer *ClusterDomainServer `json:"domain_server,omitempty" mapstructure:"domain_server,omitempty"` + NameServerIPList []*string `json:"name_server_ip_list,omitempty" mapstructure:"name_server_ip_list,omitempty"` + HTTPProxyWhitelist []*HTTPProxyWhitelist `json:"http_proxy_whitelist,omitempty" mapstructure:"http_proxy_whitelist,omitempty"` + InternalSubnet *string `json:"internal_subnet,omitempty" mapstructure:"internal_subnet,omitempty"` +} + +// HTTPProxyWhitelist ... +type HTTPProxyWhitelist struct { + Target *string `json:"target,omitempty" mapstructure:"target,omitempty"` + TargetType *string `json:"target_type,omitempty" mapstructure:"target_type,omitempty"` +} + +// ClusterDomainServer ... +type ClusterDomainServer struct { + Nameserver *string `json:"nameserver,omitempty" mapstructure:"nameserver,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + DomainCredentials *Credentials `json:"external_data_services_ip,omitempty" mapstructure:"external_data_services_ip,omitempty"` +} + +// SMTPServer ... +type SMTPServer struct { + Type *string `json:"type,omitempty" mapstructure:"type,omitempty"` + EmailAddress *string `json:"email_address,omitempty" mapstructure:"email_address,omitempty"` + Server *ClusterNetworkEntity `json:"server,omitempty" mapstructure:"server,omitempty"` +} + +// ClusterNetworkEntity ... +type ClusterNetworkEntity struct { + Credentials *Credentials `json:"credentials,omitempty" mapstructure:"credentials,omitempty"` + ProxyTypeList []*string `json:"proxy_type_list,omitempty" mapstructure:"proxy_type_list,omitempty"` + Address *Address `json:"address,omitempty" mapstructure:"address,omitempty"` +} + +// Credentials ... +type Credentials struct { + Username *string `json:"username,omitempty" mapstructure:"username,omitempty"` + Password *string `json:"password,omitempty" mapstructure:"password,omitempty"` +} + +// VMEfficiencyMap ... +type VMEfficiencyMap struct { + BullyVMNum *string `json:"bully_vm_num,omitempty" mapstructure:"bully_vm_num,omitempty"` + ConstrainedVMNum *string `json:"constrained_vm_num,omitempty" mapstructure:"constrained_vm_num,omitempty"` + DeadVMNum *string `json:"dead_vm_num,omitempty" mapstructure:"dead_vm_num,omitempty"` + InefficientVMNum *string `json:"inefficient_vm_num,omitempty" mapstructure:"inefficient_vm_num,omitempty"` + OverprovisionedVMNum *string `json:"overprovisioned_vm_num,omitempty" mapstructure:"overprovisioned_vm_num,omitempty"` +} + +// ClusterAnalysis ... +type ClusterAnalysis struct { + VMEfficiencyMap *VMEfficiencyMap `json:"vm_efficiency_map,omitempty" mapstructure:"vm_efficiency_map,omitempty"` +} + +// CategoryListMetadata All api calls that return a list will have this metadata block as input +type CategoryListMetadata struct { + + // The filter in FIQL syntax used for the results. + Filter *string `json:"filter,omitempty" mapstructure:"filter,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + // The number of records to retrieve relative to the offset + Length *int64 `json:"length,omitempty" mapstructure:"length,omitempty"` + + // Offset from the start of the entity list + Offset *int64 `json:"offset,omitempty" mapstructure:"offset,omitempty"` + + // The attribute to perform sort on + SortAttribute *string `json:"sort_attribute,omitempty" mapstructure:"sort_attribute,omitempty"` + + // The sort order in which results are returned + SortOrder *string `json:"sort_order,omitempty" mapstructure:"sort_order,omitempty"` + + // Total number of matched results. + TotalMatches *int64 `json:"total_matches,omitempty" mapstructure:"total_matches,omitempty"` +} + +// CategoryKeyStatus represents Category Key Definition. +type CategoryKeyStatus struct { + + // API version. + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // Description of the category. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // Name of the category. + Name *string `json:"name" mapstructure:"name"` + + // Specifying whether its a system defined category. + SystemDefined *bool `json:"system_defined,omitempty" mapstructure:"system_defined,omitempty"` +} + +// CategoryKeyListResponse represents the category key list response. +type CategoryKeyListResponse struct { + + // API Version. + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Entities []*CategoryKeyStatus `json:"entities,omitempty" mapstructure:"entities,omitempty"` + + Metadata *CategoryListMetadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` +} + +// CategoryKey represents category key definition. +type CategoryKey struct { + + // API version. + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // Description of the category. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // Name of the category. + Name *string `json:"name" mapstructure:"name"` +} + +// CategoryStatus represents The status of a REST API call. Only used when there is a failure to report. +type CategoryStatus struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // The HTTP error code. + Code *int64 `json:"code,omitempty" mapstructure:"code,omitempty"` + + // The kind name + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` +} + +// CategoryValueListResponse represents Category Value list response. +type CategoryValueListResponse struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Entities []*CategoryValueStatus `json:"entities,omitempty" mapstructure:"entities,omitempty"` + + Metadata *CategoryListMetadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` +} + +// CategoryValueStatus represents Category value definition. +type CategoryValueStatus struct { + + // API version. + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // Description of the category value. + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + + // The name of the category. + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + // Specifying whether its a system defined category. + SystemDefined *bool `json:"system_defined,omitempty" mapstructure:"system_defined,omitempty"` + + // The value of the category. + Value *string `json:"value,omitempty" mapstructure:"value,omitempty"` +} + +// CategoryFilter represents A category filter. +type CategoryFilter struct { + + // List of kinds associated with this filter. + KindList []*string `json:"kind_list,omitempty" mapstructure:"kind_list,omitempty"` + + // A list of category key and list of values. + Params map[string][]string `json:"params,omitempty" mapstructure:"params,omitempty"` + + // The type of the filter being used. + Type *string `json:"type,omitempty" mapstructure:"type,omitempty"` +} + +// CategoryQueryInput represents Categories query input object. +type CategoryQueryInput struct { + + // API version. + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + CategoryFilter *CategoryFilter `json:"category_filter,omitempty" mapstructure:"category_filter,omitempty"` + + // The maximum number of members to return per group. + GroupMemberCount *int64 `json:"group_member_count,omitempty" mapstructure:"group_member_count,omitempty"` + + // The offset into the total member set to return per group. + GroupMemberOffset *int64 `json:"group_member_offset,omitempty" mapstructure:"group_member_offset,omitempty"` + + // TBD: USED_IN - to get policies in which specified categories are used. APPLIED_TO - to get entities attached to + // specified categories. + UsageType *string `json:"usage_type,omitempty" mapstructure:"usage_type,omitempty"` +} + +// CategoryQueryResponseMetadata represents Response metadata. +type CategoryQueryResponseMetadata struct { + + // The maximum number of records to return per group. + GroupMemberCount *int64 `json:"group_member_count,omitempty" mapstructure:"group_member_count,omitempty"` + + // The offset into the total records set to return per group. + GroupMemberOffset *int64 `json:"group_member_offset,omitempty" mapstructure:"group_member_offset,omitempty"` + + // Total number of matched results. + TotalMatches *int64 `json:"total_matches,omitempty" mapstructure:"total_matches,omitempty"` + + // TBD: USED_IN - to get policies in which specified categories are used. APPLIED_TO - to get entities attached to specified categories. + UsageType *string `json:"usage_type,omitempty" mapstructure:"usage_type,omitempty"` +} + +// EntityReference Reference to an entity. +type EntityReference struct { + + // Categories for the entity. + Categories map[string]string `json:"categories,omitempty" mapstructure:"categories,omitempty"` + + // Kind of the reference. + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + // Name of the entity. + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + // The type of filter being used. (Options : CATEGORIES_MATCH_ALL , CATEGORIES_MATCH_ANY) + Type *string `json:"type,omitempty" mapstructure:"type,omitempty"` + + // UUID of the entity. + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` +} + +// CategoryQueryResponseResults ... +type CategoryQueryResponseResults struct { + + // List of entity references. + EntityAnyReferenceList []*EntityReference `json:"entity_any_reference_list,omitempty" mapstructure:"entity_any_reference_list,omitempty"` + + // Total number of filtered results. + FilteredEntityCount *int64 `json:"filtered_entity_count,omitempty" mapstructure:"filtered_entity_count,omitempty"` + + // The entity kind. + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` + + // Total number of the matched results. + TotalEntityCount *int64 `json:"total_entity_count,omitempty" mapstructure:"total_entity_count,omitempty"` +} + +// CategoryQueryResponse represents Categories query response object. +type CategoryQueryResponse struct { + + // API version. + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + Metadata *CategoryQueryResponseMetadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` + + Results []*CategoryQueryResponseResults `json:"results,omitempty" mapstructure:"results,omitempty"` +} + +// CategoryValue represents Category value definition. +type CategoryValue struct { + + // API version. + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + + // Description of the category value. + Description *string `json:"description,omitempty" ` + + // Value for the category. + Value *string `json:"value,omitempty" mapstructure:"value,omitempty"` +} + +// PortRange represents Range of TCP/UDP ports. +type PortRange struct { + EndPort *int64 `json:"end_port,omitempty" mapstructure:"end_port,omitempty"` + + StartPort *int64 `json:"start_port,omitempty" mapstructure:"start_port,omitempty"` +} + +// IPSubnet IP subnet provided as an address and prefix length. +type IPSubnet struct { + + // IPV4 address. + IP *string `json:"ip,omitempty" mapstructure:"ip,omitempty"` + + PrefixLength *int64 `json:"prefix_length,omitempty" mapstructure:"prefix_length,omitempty"` +} + +// NetworkRuleIcmpTypeCodeList .. +type NetworkRuleIcmpTypeCodeList struct { + Code *int64 `json:"code,omitempty" mapstructure:"code,omitempty"` + + Type *int64 `json:"type,omitempty" mapstructure:"type,omitempty"` +} + +// NetworkRule ... +type NetworkRule struct { + ExpirationTime *string `json:"expiration_time,omitempty" mapstructure:"expiration_time,omitempty"` + Filter *CategoryFilter `json:"filter,omitempty" mapstructure:"filter,omitempty"` + IcmpTypeCodeList []*NetworkRuleIcmpTypeCodeList `json:"icmp_type_code_list,omitempty" mapstructure:"icmp_type_code_list,omitempty"` + IPSubnet *IPSubnet `json:"ip_subnet,omitempty" mapstructure:"ip_subnet,omitempty"` + NetworkFunctionChainReference *Reference `json:"network_function_chain_reference,omitempty" mapstructure:"network_function_chain_reference,omitempty"` + PeerSpecificationType *string `json:"peer_specification_type,omitempty" mapstructure:"peer_specification_type,omitempty"` + Protocol *string `json:"protocol,omitempty" mapstructure:"protocol,omitempty"` + TCPPortRangeList []*PortRange `json:"tcp_port_range_list,omitempty" mapstructure:"tcp_port_range_list,omitempty"` + UDPPortRangeList []*PortRange `json:"udp_port_range_list,omitempty" mapstructure:"udp_port_range_list,omitempty"` + AddressGroupInclusionList []*Reference `json:"address_group_inclusion_list,omitempty" mapstructure:"address_group_inclusion_list,omitempty"` + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` + ServiceGroupList []*Reference `json:"service_group_list,omitempty" mapstructure:"service_group_list,omitempty"` +} + +// TargetGroup ... +type TargetGroup struct { + + // Default policy for communication within target group. + DefaultInternalPolicy *string `json:"default_internal_policy,omitempty" mapstructure:"default_internal_policy,omitempty"` + + // The set of categories that matching VMs need to have. + Filter *CategoryFilter `json:"filter,omitempty" mapstructure:"filter,omitempty"` + + // Way to identify the object for which rule is applied. + PeerSpecificationType *string `json:"peer_specification_type,omitempty" mapstructure:"peer_specification_type,omitempty"` +} + +// NetworkSecurityRuleResourcesRule These rules are used for quarantining suspected VMs. Target group is a required +// attribute. Empty inbound_allow_list will not allow anything into target group. Empty outbound_allow_list will allow +// everything from target group. +type NetworkSecurityRuleResourcesRule struct { + Action *string `json:"action,omitempty" mapstructure:"action,omitempty"` // Type of action. + InboundAllowList []*NetworkRule `json:"inbound_allow_list,omitempty" mapstructure:"inbound_allow_list,omitempty"` // + OutboundAllowList []*NetworkRule `json:"outbound_allow_list,omitempty" mapstructure:"outbound_allow_list,omitempty"` + TargetGroup *TargetGroup `json:"target_group,omitempty" mapstructure:"target_group,omitempty"` +} + +// NetworkSecurityRuleIsolationRule These rules are used for environmental isolation. +type NetworkSecurityRuleIsolationRule struct { + Action *string `json:"action,omitempty" mapstructure:"action,omitempty"` // Type of action. + FirstEntityFilter *CategoryFilter `json:"first_entity_filter,omitempty" mapstructure:"first_entity_filter,omitempty"` // The set of categories that matching VMs need to have. + SecondEntityFilter *CategoryFilter `json:"second_entity_filter,omitempty" mapstructure:"second_entity_filter,omitempty"` // The set of categories that matching VMs need to have. +} + +// NetworkSecurityRuleResources ... +type NetworkSecurityRuleResources struct { + AllowIpv6Traffic *bool `json:"allow_ipv6_traffic,omitempty" mapstructure:"allow_ipv6_traffic,omitempty"` + IsPolicyHitlogEnabled *bool `json:"is_policy_hitlog_enabled,omitempty" mapstructure:"is_policy_hitlog_enabled,omitempty"` + AdRule *NetworkSecurityRuleResourcesRule `json:"ad_rule,omitempty" mapstructure:"ad_rule,omitempty"` + AppRule *NetworkSecurityRuleResourcesRule `json:"app_rule,omitempty" mapstructure:"app_rule,omitempty"` + IsolationRule *NetworkSecurityRuleIsolationRule `json:"isolation_rule,omitempty" mapstructure:"isolation_rule,omitempty"` + QuarantineRule *NetworkSecurityRuleResourcesRule `json:"quarantine_rule,omitempty" mapstructure:"quarantine_rule,omitempty"` +} + +// NetworkSecurityRule ... +type NetworkSecurityRule struct { + Description *string `json:"description" mapstructure:"description"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + Resources *NetworkSecurityRuleResources `json:"resources,omitempty" ` +} + +// Metadata Metadata The kind metadata +type Metadata struct { + LastUpdateTime *time.Time `json:"last_update_time,omitempty" mapstructure:"last_update_time,omitempty"` // + Kind *string `json:"kind" mapstructure:"kind"` // + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` // + ProjectReference *Reference `json:"project_reference,omitempty" mapstructure:"project_reference,omitempty"` // project reference + CreationTime *time.Time `json:"creation_time,omitempty" mapstructure:"creation_time,omitempty"` + SpecVersion *int64 `json:"spec_version,omitempty" mapstructure:"spec_version,omitempty"` + SpecHash *string `json:"spec_hash,omitempty" mapstructure:"spec_hash,omitempty"` + OwnerReference *Reference `json:"owner_reference,omitempty" mapstructure:"owner_reference,omitempty"` + Categories map[string]string `json:"categories,omitempty" mapstructure:"categories,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + + // Applied on Prism Central only. Indicate whether force to translate the spec of the fanout request to fit the target cluster API schema. + ShouldForceTranslate *bool `json:"should_force_translate,omitempty" mapstructure:"should_force_translate,omitempty"` + + //TODO: add if necessary + //CategoriesMapping map[string][]string `json:"categories_mapping,omitempty" mapstructure:"categories_mapping,omitempty"` + //EntityVersion *string `json:"entity_version,omitempty" mapstructure:"entity_version,omitempty"` + //UseCategoriesMapping *bool `json:"use_categories_mapping,omitempty" mapstructure:"use_categories_mapping,omitempty"` + +} + +// NetworkSecurityRuleIntentInput An intentful representation of a network_security_rule +type NetworkSecurityRuleIntentInput struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + Spec *NetworkSecurityRule `json:"spec" mapstructure:"spec"` +} + +// NetworkSecurityRuleDefStatus ... Network security rule status +type NetworkSecurityRuleDefStatus struct { + Resources *NetworkSecurityRuleResources `json:"resources,omitempty" mapstructure:"resources,omitempty"` + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` + ExecutionContext *ExecutionContext `json:"execution_context,omitempty" mapstructure:"execution_context,omitempty"` + Name *string `json:"name,omitempty" mapstructure:"name,omitempty"` + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` +} + +// NetworkSecurityRuleIntentResponse Response object for intentful operations on a network_security_rule +type NetworkSecurityRuleIntentResponse struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` + Spec *NetworkSecurityRule `json:"spec,omitempty" mapstructure:"spec,omitempty"` + Status *NetworkSecurityRuleDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// NetworkSecurityRuleStatus The status of a REST API call. Only used when there is a failure to report. +type NetworkSecurityRuleStatus struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` // + Code *int64 `json:"code,omitempty" mapstructure:"code,omitempty"` // The HTTP error code. + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` // The kind name + MessageList []*MessageResource `json:"message_list,omitempty" mapstructure:"message_list,omitempty"` + State *string `json:"state,omitempty" mapstructure:"state,omitempty"` +} + +// ListMetadata All api calls that return a list will have this metadata block as input +type ListMetadata struct { + Filter *string `json:"filter,omitempty" mapstructure:"filter,omitempty"` // The filter in FIQL syntax used for the results. + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` // The kind name + Length *int64 `json:"length,omitempty" mapstructure:"length,omitempty"` // The number of records to retrieve relative to the offset + Offset *int64 `json:"offset,omitempty" mapstructure:"offset,omitempty"` // Offset from the start of the entity list + SortAttribute *string `json:"sort_attribute,omitempty" mapstructure:"sort_attribute,omitempty"` // The attribute to perform sort on + SortOrder *string `json:"sort_order,omitempty" mapstructure:"sort_order,omitempty"` // The sort order in which results are returned +} + +// ListMetadataOutput All api calls that return a list will have this metadata block +type ListMetadataOutput struct { + Filter *string `json:"filter,omitempty" mapstructure:"filter,omitempty"` // The filter used for the results + Kind *string `json:"kind,omitempty" mapstructure:"kind,omitempty"` // The kind name + Length *int64 `json:"length,omitempty" mapstructure:"length,omitempty"` // The number of records retrieved relative to the offset + Offset *int64 `json:"offset,omitempty" mapstructure:"offset,omitempty"` // Offset from the start of the entity list + SortAttribute *string `json:"sort_attribute,omitempty" mapstructure:"sort_attribute,omitempty"` // The attribute to perform sort on + SortOrder *string `json:"sort_order,omitempty" mapstructure:"sort_order,omitempty"` // The sort order in which results are returned + TotalMatches *int64 `json:"total_matches,omitempty" mapstructure:"total_matches,omitempty"` // Total matches found +} + +// NetworkSecurityRuleIntentResource ... Response object for intentful operations on a network_security_rule +type NetworkSecurityRuleIntentResource struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` + Spec *NetworkSecurityRule `json:"spec,omitempty" mapstructure:"spec,omitempty"` + Status *NetworkSecurityRuleDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` +} + +// NetworkSecurityRuleListIntentResponse Response object for intentful operation of network_security_rules +type NetworkSecurityRuleListIntentResponse struct { + APIVersion string `json:"api_version" mapstructure:"api_version"` + Entities []*NetworkSecurityRuleIntentResource `json:"entities,omitempty" bson:"entities,omitempty" mapstructure:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata" mapstructure:"metadata"` +} + +// VolumeGroupInput Represents the request body for create volume_grop request +type VolumeGroupInput struct { + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` // default 3.1.0 + Metadata *Metadata `json:"metadata,omitempty" mapstructure:"metadata,omitempty"` // The volume_group kind metadata. + Spec *VolumeGroup `json:"spec,omitempty" mapstructure:"spec,omitempty"` // Volume group input spec. +} + +// VolumeGroup Represents volume group input spec. +type VolumeGroup struct { + Name *string `json:"name" mapstructure:"name"` // Volume Group name (required) + Description *string `json:"description,omitempty" mapstructure:"description,omitempty"` // Volume Group description. + Resources *VolumeGroupResources `json:"resources" mapstructure:"resources"` // Volume Group resources. +} + +// VolumeGroupResources Represents the volume group resources +type VolumeGroupResources struct { + FlashMode *string `json:"flash_mode,omitempty" mapstructure:"flash_mode,omitempty"` // Flash Mode, if enabled all disks of the VG are pinned to SSD + FileSystemType *string `json:"file_system_type,omitempty" mapstructure:"file_system_type,omitempty"` // File system to be used for volume + SharingStatus *string `json:"sharing_status,omitempty" mapstructure:"sharing_status,omitempty"` // Whether the VG can be shared across multiple iSCSI initiators + AttachmentList []*VMAttachment `json:"attachment_list,omitempty" mapstructure:"attachment_list,omitempty"` // VMs attached to volume group. + DiskList []*VGDisk `json:"disk_list,omitempty" mapstructure:"disk_list,omitempty"` // VGDisk Volume group disk specification. + IscsiTargetPrefix *string `json:"iscsi_target_prefix,omitempty" mapstructure:"iscsi_target_prefix,omitempty"` // iSCSI target prefix-name. +} + +// VMAttachment VMs attached to volume group. +type VMAttachment struct { + VMReference *Reference `json:"vm_reference" mapstructure:"vm_reference"` // Reference to a kind + IscsiInitiatorName *string `json:"iscsi_initiator_name" mapstructure:"iscsi_initiator_name"` // Name of the iSCSI initiator of the workload outside Nutanix cluster. +} + +// VGDisk Volume group disk specification. +type VGDisk struct { + VmdiskUUID *string `json:"vmdisk_uuid" mapstructure:"vmdisk_uuid"` // The UUID of this volume disk + Index *int64 `json:"index" mapstructure:"index"` // Index of the volume disk in the group. + DataSourceReference *Reference `json:"data_source_reference" mapstructure:"data_source_reference"` // Reference to a kind + DiskSizeMib *int64 `json:"disk_size_mib" mapstructure:"disk_size_mib"` // Size of the disk in MiB. + StorageContainerUUID *string `json:"storage_container_uuid" mapstructure:"storage_container_uuid"` // Container UUID on which to create the disk. +} + +// VolumeGroupResponse Response object for intentful operations on a volume_group +type VolumeGroupResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` // + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` // The volume_group kind metadata + Spec *VolumeGroup `json:"spec,omitempty" mapstructure:"spec,omitempty"` // Volume group input spec. + Status *VolumeGroupDefStatus `json:"status,omitempty" mapstructure:"status,omitempty"` // Volume group configuration. +} + +// VolumeGroupDefStatus Volume group configuration. +type VolumeGroupDefStatus struct { + State *string `json:"state" mapstructure:"state"` // The state of the volume group entity. + MessageList []*MessageResource `json:"message_list" mapstructure:"message_list"` // Volume group message list. + Name *string `json:"name" mapstructure:"name"` // Volume group name. + Resources *VolumeGroupResources `json:"resources" mapstructure:"resources"` // Volume group resources. + Description *string `json:"description" mapstructure:"description"` // Volume group description. +} + +// VolumeGroupListResponse Response object for intentful operation of volume_groups +type VolumeGroupListResponse struct { + APIVersion *string `json:"api_version" mapstructure:"api_version"` + Entities []*VolumeGroupResponse `json:"entities,omitempty" mapstructure:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata" mapstructure:"metadata"` +} + +// TasksResponse ... +type TasksResponse struct { + Status *string `json:"status,omitempty" mapstructure:"status,omitempty"` + LastUpdateTime *time.Time `json:"last_update_time,omitempty" mapstructure:"last_update_time,omitempty"` + LogicalTimestamp *int64 `json:"logical_timestamp,omitempty" mapstructure:"logical_timestamp,omitempty"` + EntityReferenceList []*Reference `json:"entity_reference_list,omitempty" mapstructure:"entity_reference_list,omitempty"` + StartTime *time.Time `json:"start_time,omitempty" mapstructure:"start_time,omitempty"` + CreationTime *time.Time `json:"creation_time,omitempty" mapstructure:"creation_time,omitempty"` + ClusterReference *Reference `json:"cluster_reference,omitempty" mapstructure:"cluster_reference,omitempty"` + SubtaskReferenceList []*Reference `json:"subtask_reference_list,omitempty" mapstructure:"subtask_reference_list,omitempty"` + CompletionTime *time.Time `json:"completion_timev" mapstructure:"completion_timev"` + ProgressMessage *string `json:"progress_message,omitempty" mapstructure:"progress_message,omitempty"` + OperationType *string `json:"operation_type,omitempty" mapstructure:"operation_type,omitempty"` + PercentageComplete *int64 `json:"percentage_complete,omitempty" mapstructure:"percentage_complete,omitempty"` + APIVersion *string `json:"api_version,omitempty" mapstructure:"api_version,omitempty"` + UUID *string `json:"uuid,omitempty" mapstructure:"uuid,omitempty"` + ErrorDetail *string `json:"error_detail,omitempty" mapstructure:"error_detail,omitempty"` +} + +// DeleteResponse ... +type DeleteResponse struct { + Status *DeleteStatus `json:"status" mapstructure:"status"` + Spec string `json:"spec" mapstructure:"spec"` + APIVersion string `json:"api_version" mapstructure:"api_version"` + Metadata *Metadata `json:"metadata" mapstructure:"metadata"` +} + +// DeleteStatus ... +type DeleteStatus struct { + State string `json:"state" mapstructure:"state"` + ExecutionContext *ExecutionContext `json:"execution_context" mapstructure:"execution_context"` +} + +/* Host Resource */ + +// DomainCredencial represents the way to login server +type DomainCredencial struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` +} + +// WindowsDomain means Hyper-V node domain +type WindowsDomain struct { + Name string `json:"name,omitempty"` + NameServerIP string `json:"name_server_ip,omitempty"` + OrganizationUnitPath string `json:"organization_unit_path,omitempty"` + NamePrefix string `json:"name_prefix,omitempty"` + DomainName string `json:"domain_name,omitempty"` + DomainCredencial *DomainCredencial `json:"domain_credencial,omitempty"` +} + +// OplogUsage represents oplog disk usage +type OplogUsage struct { + OplogDiskPct *float64 `json:"oplog_disk_pct,omitempty"` + OplogDiskSize *int64 `json:"oplog_disk_size,omitempty"` +} + +// ControllerVM means Hyper-V node domain +type ControllerVM struct { + IP string `json:"ip,omitempty"` + NatIP string `json:"nat_ip,omitempty"` + NatPort *int64 `json:"nat_port,omitempty"` + OplogUsage *OplogUsage `json:"oplog_usage,omitempty"` +} + +// FailoverCluster means Hiper-V failover cluster +type FailoverCluster struct { + IP string `json:"ip,omitempty"` + Name string `json:"name,omitempty"` + DomainCredencial *DomainCredencial `json:"domain_credencial,omitempty"` +} + +// IPMI means Host IPMI Information +type IPMI struct { + IP string `json:"ip,omitempty"` +} + +// ReferenceValues references to a kind +type ReferenceValues struct { + Kind string `json:"kind,omitempty"` + UUID string `json:"uuid,omitempty"` + Name string `json:"name,omitempty"` +} + +// GPU represnts list of GPUs on the host +type GPU struct { + Status string `json:"status,omitempty"` + Vendor string `json:"vendor,omitempty"` + NumVirtualDisplayHeads *int64 `json:"num_virtual_display_heads,omitempty"` + Assignable bool `json:"assignable,omitempty"` + LicenseList []*string `json:"license_list,omitempty"` + NumVgpusAllocated *int64 `json:"num_vgpus_allocated,omitempty"` + PciAddress string `json:"pci_address,omitempty"` + Name string `json:"name,omitempty"` + FrameBufferSizeMib *int64 `json:"frame_buffer_size_mib,omitempty"` + Index *int64 `json:"index,omitempty"` + UUID string `json:"uuid,omitempty"` + NumaNode *int64 `json:"numa_node,omitempty"` + MaxResoution string `json:"max_resolution,omitempty"` + ConsumerReference *ReferenceValues `json:"consumer_reference,omitempty"` + Mode string `json:"mode,omitempty"` + Fraction *int64 `json:"fraction,omitempty"` + GuestDriverVersion string `json:"guest_driver_version,omitempty"` + DeviceID *int64 `json:"device_id,omitempty"` +} + +// Hypervisor Full name of hypervisor running on Host +type Hypervisor struct { + NumVms *int64 `json:"num_vms,omitempty"` + IP string `json:"ip,omitempty"` + HypervisorFullName string `json:"hypervisor_full_name,omitempty"` +} + +// Block represents Host block config info. +type Block struct { + BlockSerialNumber string `json:"block_serial_number,omitempty"` + BlockModel string `json:"block_model,omitempty"` +} + +// HostResources represents the host resources +type HostResources struct { + GPUDriverVersion string `json:"gpu_driver_version,omitempty"` + FailoverCluster *FailoverCluster `json:"failover_cluster,omitempty"` + IPMI *IPMI `json:"ipmi,omitempty"` + CPUModel string `json:"cpu_model,omitempty"` + HostNicsIDList []*string `json:"host_nics_id_list,omitempty"` + NumCPUSockets *int64 `json:"num_cpu_sockets,omitempty"` + WindowsDomain *WindowsDomain `json:"windows_domain,omitempty"` + GPUList []*GPU `json:"gpu_list,omitempty"` + SerialNumber string `json:"serial_number,omitempty"` + CPUCapacityHZ *int64 `json:"cpu_capacity_hz,omitempty"` + MemoryVapacityMib *int64 `json:"memory_capacity_mib,omitempty"` + HostDisksReferenceList []*ReferenceValues `json:"host_disks_reference_list,omitempty"` + MonitoringState string `json:"monitoring_state,omitempty"` + Hypervisor *Hypervisor `json:"hypervisor,omitempty"` + HostType string `json:"host_type,omitempty"` + NumCPUCores *int64 `json:"num_cpu_cores,omitempty"` + RackableUnitReference *ReferenceValues `json:"rackable_unit_reference,omitempty"` + ControllerVM *ControllerVM `json:"controller_vm,omitempty"` + Block *Block `json:"block,omitempty"` +} + +// HostSpec Represents volume group input spec. +type HostSpec struct { + Name string `json:"name,omitempty"` + Resources *HostResources `json:"resources,omitempty"` +} + +// HostStatus Volume group configuration. +type HostStatus struct { + State string `json:"state,omitempty"` + MessageList []*MessageResource `json:"message_list,omitempty"` + Name string `json:"name,omitempty"` + Resources *HostResources `json:"resources,omitempty"` + ClusterReference *ReferenceValues `json:"cluster_reference,omitempty"` +} + +// HostResponse Response object for intentful operations on a Host +type HostResponse struct { + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + Spec *HostSpec `json:"spec,omitempty"` + Status *HostStatus `json:"status,omitempty"` +} + +// HostListResponse Response object for intentful operation of Host +type HostListResponse struct { + APIVersion string `json:"api_version,omitempty"` + Entities []*HostResponse `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` +} + +/* Project Resource */ + +// Resources represents the utilization limits for resource types +type Resources struct { + Units string `json:"units,omitempty"` + Limit *int64 `json:"limit,omitempty"` + ResourceType string `json:"resource_type,omitempty"` + Value *int64 `json:"value,omitempty"` +} + +// ResourceDomain specification (limits) +type ResourceDomain struct { + Resources []*Resources `json:"resources,omitempty"` +} + +// ProjectResources ... +type ProjectResources struct { + ResourceDomain *ResourceDomain `json:"resource_domain,omitempty"` + AccountReferenceList []*ReferenceValues `json:"account_reference_list,omitempty"` + EnvironmentReferenceList []*ReferenceValues `json:"environment_reference_list,omitempty"` + DefaultSubnetReference *ReferenceValues `json:"default_subnet_reference,omitempty"` + UserReferenceList []*ReferenceValues `json:"user_reference_list,omitempty"` + IsDefault bool `json:"is_default,omitempty"` + ExternalUserGroupReferenceList []*ReferenceValues `json:"external_user_group_reference_list,omitempty"` + SubnetReferenceList []*ReferenceValues `json:"subnet_reference_list,omitempty"` + ExternalNetworkList []*ReferenceValues `json:"external_network_list,omitempty"` +} + +// ProjectStatus ... +type ProjectStatus struct { + State string `json:"state,omitempty"` + MessageList []*MessageResource `json:"message_list,omitempty"` + Name string `json:"name,omitempty"` + Resources *ProjectResources `json:"resources,omitempty"` + Descripion string `json:"description,omitempty"` + ExecutionContext *ExecutionContext `json:"execution_context,omitempty"` +} + +// ProjectSpec ... +type ProjectSpec struct { + Name string `json:"name,omitempty"` + Resources *ProjectResources `json:"resources,omitempty"` + Descripion string `json:"description,omitempty"` +} + +// Project Response object for intentful operations on a Host +type Project struct { + Status *ProjectStatus `json:"status,omitempty"` + Spec *ProjectSpec `json:"spec,omitempty"` + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` +} + +// ProjectListResponse Response object for intentful operation of Host +type ProjectListResponse struct { + APIVersion string `json:"api_version,omitempty"` + Entities []*Project `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` +} + +// AccessControlPolicyResources ... +type AccessControlPolicyResources struct { + UserReferenceList []*Reference `json:"user_reference_list,omitempty"` + UserGroupReferenceList []*Reference `json:"user_group_reference_list,omitempty"` + RoleReference *Reference `json:"role_reference,omitempty"` + FilterList *FilterList `json:"filter_list,omitempty"` +} + +// FilterList ... +type FilterList struct { + ContextList []*ContextList `json:"context_list,omitempty"` +} + +// ContextList ... +type ContextList struct { + ScopeFilterExpressionList []*ScopeFilterExpressionList `json:"scope_filter_expression_list,omitempty"` + EntityFilterExpressionList []EntityFilterExpressionList `json:"entity_filter_expression_list,omitempty"` +} + +// ScopeFilterExpressionList ... +type ScopeFilterExpressionList struct { + LeftHandSide string `json:"left_hand_side,omitempty"` + Operator string `json:"operator,omitempty"` + RightHandSide RightHandSide `json:"right_hand_side,omitempty"` +} + +// EntityFilterExpressionList ... +type EntityFilterExpressionList struct { + LeftHandSide LeftHandSide `json:"left_hand_side,omitempty"` + Operator string `json:"operator,omitempty"` + RightHandSide RightHandSide `json:"right_hand_side,omitempty"` +} + +// LeftHandSide ... +type LeftHandSide struct { + EntityType *string `json:"entity_type,omitempty"` +} + +// RightHandSide ... +type RightHandSide struct { + Collection *string `json:"collection,omitempty"` + Categories map[string][]string `json:"categories,omitempty"` + UUIDList []string `json:"uuid_list,omitempty"` +} + +// AccessControlPolicyStatus ... +type AccessControlPolicyStatus struct { + State *string `json:"state,omitempty"` + MessageList []*MessageResource `json:"message_list,omitempty"` + Name *string `json:"name,omitempty"` + Resources *AccessControlPolicyResources `json:"resources,omitempty"` + Description *string `json:"description,omitempty"` + ExecutionContext *ExecutionContext `json:"execution_context,omitempty"` +} + +// AccessControlPolicySpec ... +type AccessControlPolicySpec struct { + Name *string `json:"name,omitempty"` + Resources *AccessControlPolicyResources `json:"resources,omitempty"` + Description *string `json:"description,omitempty"` +} + +// AccessControlPolicy Response object for intentful operations on a access policy +type AccessControlPolicy struct { + Status *AccessControlPolicyStatus `json:"status,omitempty"` + Spec *AccessControlPolicySpec `json:"spec,omitempty"` + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` +} + +// AccessControlPolicyListResponse Response object for intentful operation of access policy +type AccessControlPolicyListResponse struct { + APIVersion string `json:"api_version,omitempty"` + Entities []*AccessControlPolicy `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` +} + +// RoleResources ... +type RoleResources struct { + PermissionReferenceList []*Reference `json:"permission_reference_list,omitempty"` +} + +// RoleStatus ... +type RoleStatus struct { + State *string `json:"state,omitempty"` + MessageList []*MessageResource `json:"message_list,omitempty"` + Name *string `json:"name,omitempty"` + Resources *RoleResources `json:"resources,omitempty"` + Description *string `json:"description,omitempty"` + ExecutionContext *ExecutionContext `json:"execution_context,omitempty"` +} + +// RoleSpec ... +type RoleSpec struct { + Name *string `json:"name,omitempty"` + Resources *RoleResources `json:"resources,omitempty"` + Description *string `json:"description,omitempty"` +} + +// Role Response object for intentful operations on a access policy +type Role struct { + Status *RoleStatus `json:"status,omitempty"` + Spec *RoleSpec `json:"spec,omitempty"` + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` +} + +// RoleListResponse Response object for intentful operation of access policy +type RoleListResponse struct { + APIVersion string `json:"api_version,omitempty"` + Entities []*Role `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` +} + +type ResourceUsageSummary struct { + ResourceDomain *ResourceDomainStatus `json:"resource_domain"` // The status for a resource domain (limits and values) +} + +type ResourceDomainStatus struct { + Resources []ResourceUtilizationStatus `json:"resources,omitempty"` // The utilization/limit for resource types +} + +type ResourceUtilizationStatus struct { + Limit *int64 `json:"limit,omitempty"` // The resource consumption limit (unspecified is unlimited) + ResourceType *string `json:"resource_type,omitempty"` // The type of resource (for example storage, CPUs) + Units *string `json:"units,omitempty"` // The units of the resource type + Value *int64 `json:"value,omitempty"` // The amount of resource consumed +} + +// An intentful representation of a user +type UserIntentInput struct { + APIVersion *string `json:"api_version,omitempty"` // API Version of the Nutanix v3 API framework. + Metadata *Metadata `json:"metadata,omitempty"` // The user kind metadata + Spec *UserSpec `json:"spec,omitempty"` // User Input Definition. +} + +// Response object for intentful operations on a user +type UserIntentResponse struct { + APIVersion *string `json:"api_version,omitempty"` // API Version of the Nutanix v3 API framework. + Metadata *Metadata `json:"metadata,omitempty"` // The user kind metadata + Spec *UserSpec `json:"spec,omitempty"` // User Input Definition. + Status *UserStatus `json:"status,omitempty"` // User status definition. +} + +// User Input Definition. +type UserSpec struct { + Resources *UserResources `json:"resources,omitempty"` // User Resource Definition. +} + +// User Resource Definition. +type UserResources struct { + DirectoryServiceUser *DirectoryServiceUser `json:"directory_service_user,omitempty"` // A Directory Service user. + IdentityProviderUser *IdentityProvider `json:"identity_provider_user,omitempty"` // An Identity Provider user. +} + +// A Directory Service user. +type DirectoryServiceUser struct { + DefaultUserPrincipalName *string `json:"default_user_principal_name,omitempty"` // The Default UserPrincipalName of the user from the directory service. + DirectoryServiceReference *Reference `json:"directory_service_reference,omitempty"` // The reference to a directory_service + UserPrincipalName *string `json:"user_principal_name,omitempty"` // The UserPrincipalName of the user from the directory service. +} + +// An Identity Provider user. +type IdentityProvider struct { + IdentityProviderReference *Reference `json:"identity_provider_reference,omitempty"` // The reference to a identity_provider + Username *string `json:"username,omitempty"` // The username from the identity provider. Name Id for SAML Identity Provider. +} + +// User status definition. +type UserStatus struct { + MessageList []MessageResource `json:"message_list,omitempty"` + Name *string `json:"name,omitempty"` // Name of the User. + Resources *UserStatusResources `json:"resources,omitempty"` // User Resource Definition. + State *string `json:"state,omitempty"` // The state of the entity. + ExecutionContext *ExecutionContext `json:"execution_context,omitempty"` +} + +// User Resource Definition. +type UserStatusResources struct { + AccessControlPolicyReferenceList []*Reference `json:"access_control_policy_reference_list,omitempty"` // List of ACP references. + DirectoryServiceUser *DirectoryServiceUser `json:"directory_service_user,omitempty"` // A Directory Service user. + DisplayName *string `json:"display_name,omitempty"` // The display name of the user (common name) provided by the directory service. + IdentityProviderUser *IdentityProvider `json:"identity_provider_user,omitempty"` // An Identity Provider user. + ProjectsReferenceList []*Reference `json:"projects_reference_list,omitempty"` // A list of projects the user is part of. + ResourceUsageSummary *ResourceUsageSummary `json:"resource_usage_summary,omitempty"` + UserType *string `json:"user_type,omitempty"` +} + +type UserListResponse struct { + APIVersion *string `json:"api_version,omitempty"` // API Version of the Nutanix v3 API framework. + Entities []*UserIntentResponse `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` // All api calls that return a list will have this metadata block +} + +// Response object for intentful operations on a user_group +type UserGroupIntentResponse struct { + APIVersion *string `json:"api_version,omitempty"` // API Version of the Nutanix v3 API framework. + Metadata *Metadata `json:"metadata,omitempty"` // The user_group kind metadata + Spec *UserGroupSpec `json:"spec,omitempty"` // User Group Input Definition. + Status *UserGroupStatus `json:"status,omitempty"` // User group status definition. +} + +// User Group Input Definition. +type UserGroupSpec struct { + Resources *UserGroupResources `json:"resources,omitempty"` // User Group Resource Definition +} + +// User Group Resource Definition +type UserGroupResources struct { + AccessControlPolicyReferenceList []*Reference `json:"access_control_policy_reference_list,omitempty"` // List of ACP references. + DirectoryServiceUserGroup *DirectoryServiceUserGroup `json:"directory_service_user_group,omitempty"` // A Directory Service user group. + DisplayName *string `json:"display_name,omitempty"` // The display name for the user group. + ProjectsReferenceList []*Reference `json:"projects_reference_list,omitempty"` // A list of projects the user group is part of. + UserGroupType *string `json:"user_group_type,omitempty"` +} + +// User group status definition. +type UserGroupStatus struct { + MessageList []MessageResource `json:"message_list,omitempty"` + Resources *UserGroupResources `json:"resources,omitempty"` // User Group Resource Definition. + State *string `json:"state,omitempty"` // The state of the entity. +} + +// A Directory Service user group. +type DirectoryServiceUserGroup struct { + DirectoryServiceReference *Reference `json:"directory_service_reference,omitempty"` // The reference to a directory_service + DistinguishedName *string `json:"distinguished_name,omitempty"` // The Distinguished name for the user group. +} + +// Response object for intentful operation of user_groups +type UserGroupListResponse struct { + APIVersion *string `json:"api_version,omitempty"` // API Version of the Nutanix v3 API framework. + Entities []*UserGroupIntentResponse `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` // All api calls that return a list will have this metadata block +} + +// Response object for intentful operations on a user_group +type PermissionIntentResponse struct { + APIVersion *string `json:"api_version,omitempty"` // API Version of the Nutanix v3 API framework. + Metadata *Metadata `json:"metadata,omitempty"` // The user_group kind metadata + Spec *PermissionSpec `json:"spec,omitempty"` // Permission Input Definition. + Status *PermissionStatus `json:"status,omitempty"` // User group status definition. +} + +// Permission Input Definition. +type PermissionSpec struct { + Name *string `json:"name,omitempty"` // The name for the permission. + Description *string `json:"description,omitempty"` // The display name for the permission. + Resources *PermissionResources `json:"resources,omitempty"` // Permission Resource Definition +} + +// Permission Resource Definition +type PermissionResources struct { + Operation *string `json:"operation,omitempty"` + Kind *string `json:"kind,omitempty"` + Fields *FieldsPermission `json:"fields,omitempty"` +} + +type FieldsPermission struct { + FieldMode *string `json:"field_mode,omitempty"` + FieldNameList []*string `json:"field_name_list,omitempty"` +} + +// Permission status definition. +type PermissionStatus struct { + Name *string `json:"name,omitempty"` // The name for the permission. + Description *string `json:"description,omitempty"` // The display name for the permission. + Resources *PermissionResources `json:"resources,omitempty"` // Permission Resource Definition + MessageList []MessageResource `json:"message_list,omitempty"` + State *string `json:"state,omitempty"` // The state of the entity. +} + +// Response object for intentful operation of Permissions +type PermissionListResponse struct { + APIVersion *string `json:"api_version,omitempty"` // API Version of the Nutanix v3 API framework. + Entities []*PermissionIntentResponse `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` // All api calls that return a list will have this metadata block +} + +//ProtectionRuleResources represents the resources of protection rules +type ProtectionRuleResources struct { + StartTime string `json:"start_time,omitempty"` + AvailabilityZoneConnectivityList []*AvailabilityZoneConnectivityList `json:"availability_zone_connectivity_list,omitempty"` + OrderedAvailabilityZoneList []*OrderedAvailabilityZoneList `json:"ordered_availability_zone_list,omitempty"` + CategoryFilter *CategoryFilter `json:"category_filter,omitempty"` +} + +//AvailabilityZoneConnectivityList represents a object for resource of protection rule +type AvailabilityZoneConnectivityList struct { + DestinationAvailabilityZoneIndex *int64 `json:"destination_availability_zone_index,omitempty"` + SourceAvailabilityZoneIndex *int64 `json:"source_availability_zone_index,omitempty"` + SnapshotScheduleList []*SnapshotScheduleList `json:"snapshot_schedule_list,omitempty"` +} + +//SnapshotScheduleList represents a object for resource of protection rule +type SnapshotScheduleList struct { + RecoveryPointObjectiveSecs *int64 `json:"recovery_point_objective_secs,omitempty"` + LocalSnapshotRetentionPolicy *SnapshotRetentionPolicy `json:"local_snapshot_retention_policy,omitempty"` + AutoSuspendTimeoutSecs *int64 `json:"auto_suspend_timeout_secs,omitempty"` + SnapshotType string `json:"snapshot_type,omitempty"` + RemoteSnapshotRetentionPolicy *SnapshotRetentionPolicy `json:"remote_snapshot_retention_policy,omitempty"` +} + +//SnapshotRetentionPolicy represents a object for resource of protection rule +type SnapshotRetentionPolicy struct { + NumSnapshots *int64 `json:"num_snapshots,omitempty"` + RollupRetentionPolicy *RollupRetentionPolicy `json:"rollup_retention_policy,omitempty"` +} + +//RollupRetentionPolicy represents a object for resource of protection rule +type RollupRetentionPolicy struct { + Multiple *int64 `json:"multiple,omitempty"` + SnapshotIntervalType string `json:"snapshot_interval_type,omitempty"` +} + +//OrderedAvailabilityZoneList represents a object for resource of protection rule +type OrderedAvailabilityZoneList struct { + ClusterUUID string `json:"cluster_uuid,omitempty"` + AvailabilityZoneURL string `json:"availability_zone_url,omitempty"` +} + +//ProtectionRuleStatus represents a status of a protection rule +type ProtectionRuleStatus struct { + State string `json:"state,omitempty"` + MessageList []*MessageResource `json:"message_list,omitempty"` + Name string `json:"name,omitempty"` + Resources *ProtectionRuleResources `json:"resources,omitempty"` + ExecutionContext *ExecutionContext `json:"execution_context,omitempty"` +} + +//ProtectionRuleSpec represents a spec of protection rules +type ProtectionRuleSpec struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Resources *ProtectionRuleResources `json:"resources,omitempty"` +} + +//ProtectionRuleResponse represents a response object of a protection rule +type ProtectionRuleResponse struct { + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + Spec *ProtectionRuleSpec `json:"spec,omitempty"` + Status *ProtectionRuleStatus `json:"status,omitempty"` +} + +//ProtectionRulesListResponse represents the response of a list of protection rules +type ProtectionRulesListResponse struct { + APIVersion string `json:"api_version,omitempty"` + Entities []*ProtectionRuleResponse `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` +} + +//ProtectionRuleInput Represents the request of create protection rule +type ProtectionRuleInput struct { + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + Spec *ProtectionRuleSpec `json:"spec,omitempty"` +} + +//RecoveryPlanResources represents the resources of recovery plan +type RecoveryPlanResources struct { + StageList []*StageList `json:"stage_list,omitempty"` + Parameters *Parameters `json:"parameters,omitempty"` +} + +//Parameters represents a object for resource of recovery plan +type Parameters struct { + FloatingIPAssignmentList []*FloatingIPAssignmentList `json:"floating_ip_assignment_list,omitempty"` + NetworkMappingList []*NetworkMappingList `json:"network_mapping_list,omitempty"` +} + +//FloatingIPAssignmentList represents a object for resource of recovery plan +type FloatingIPAssignmentList struct { + AvailabilityZoneURL string `json:"availability_zone_url,omitempty"` + VMIPAssignmentList []*VMIPAssignmentList `json:"vm_ip_assignment_list,omitempty"` +} + +//VMIPAssignmentList represents a object for resource of recovery plan +type VMIPAssignmentList struct { + TestFloatingIPConfig *FloatingIPConfig `json:"test_floating_ip_config,omitempty"` + RecoveryFloatingIPConfig *FloatingIPConfig `json:"recovery_floating_ip_config,omitempty"` + VMReference *Reference `json:"vm_reference,omitempty"` + VMNICInformation *VMNICInformation `json:"vm_nic_information,omitempty"` +} + +//FloatingIPConfig represents a object for resource of recovery plan +type FloatingIPConfig struct { + IP string `json:"ip,omitempty"` + ShouldAllocateDynamically *bool `json:"should_allocate_dynamically,omitempty"` +} + +//VMNICInformation represents a object for resource of recovery plan +type VMNICInformation struct { + IP string `json:"ip,omitempty"` + UUID string `json:"uuid,omitempty"` +} + +// represents a object for resource of recovery plan +type NetworkMappingList struct { + AvailabilityZoneNetworkMappingList []*AvailabilityZoneNetworkMappingList `json:"availability_zone_network_mapping_list,omitempty"` + AreNetworksStretched *bool `json:"are_networks_stretched,omitempty"` +} + +//AvailabilityZoneNetworkMappingList represents a object for resource of recovery plan +type AvailabilityZoneNetworkMappingList struct { + RecoveryNetwork *Network `json:"recovery_network,omitempty"` + AvailabilityZoneURL string `json:"availability_zone_url,omitempty"` + TestNetwork *Network `json:"test_network,omitempty"` + RecoveryIPAssignmentList []*IPAssignmentList `json:"recovery_ip_assignment_list,omitempty"` + TestIPAssignmentList []*IPAssignmentList `json:"test_ip_assignment_list,omitempty"` + ClusterReferenceList []*Reference `json:"cluster_reference_list,omitempty"` +} + +type IPAssignmentList struct { + VMReference *Reference `json:"vm_reference,omitempty"` + IPConfigList []*IPConfigList `json:"ip_config_list,omitempty"` +} + +type IPConfigList struct { + IPAddress string `json:"ip_address,omitempty"` +} + +//Network represents a object for resource of recovery plan +type Network struct { + VirtualNetworkReference *Reference `json:"virtual_network_reference,omitempty"` + SubnetList []*SubnetList `json:"subnet_list,omitempty"` + Name string `json:"name,omitempty"` + VPCReference *Reference `json:"vpc_reference,omitempty"` + UseVPCReference *bool `json:"use_vpc_reference,omitempty"` +} + +//SubnetList represents a object for resource of recovery plan +type SubnetList struct { + GatewayIP string `json:"gateway_ip,omitempty"` + ExternalConnectivityState string `json:"external_connectivity_state,omitempty"` + PrefixLength *int64 `json:"prefix_length,omitempty"` +} + +//StageList represents a object for resource of recovery plan +type StageList struct { + StageWork *StageWork `json:"stage_work,omitempty"` + StageUUID string `json:"stage_uuid,omitempty"` + DelayTimeSecs *int64 `json:"delay_time_secs,omitempty"` +} + +//StageWork represents a object for resource of recovery plan +type StageWork struct { + RecoverEntities *RecoverEntities `json:"recover_entities,omitempty"` +} + +//RecoverEntities represents a object for resource of recovery plan +type RecoverEntities struct { + EntityInfoList []*EntityInfoList `json:"entity_info_list,omitempty"` +} + +//EntityInfoList represents a object for resource of recovery plan +type EntityInfoList struct { + AnyEntityReference *Reference `json:"any_entity_reference,omitempty"` + Categories map[string]string `json:"categories,omitempty"` + ScriptList []*ScriptList `json:"script_list,omitempty"` +} + +type ScriptList struct { + EnableScriptExec *bool `json:"enable_script_exec,omitempty"` + Timeout *int64 `json:"timeout,omitempty"` +} + +//RecoveryPlanStatus represents a status of a recovery plan +type RecoveryPlanStatus struct { + State string `json:"state,omitempty"` + MessageList []*MessageResource `json:"message_list,omitempty"` + Name string `json:"name,omitempty"` + Resources *RecoveryPlanResources `json:"resources,omitempty"` + ExecutionContext *ExecutionContext `json:"execution_context,omitempty"` +} + +//RecoveryPlanSpec represents a spec of recovery plans +type RecoveryPlanSpec struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Resources *RecoveryPlanResources `json:"resources,omitempty"` +} + +//RecoveryPlanResponse represents a response object of a recovery plan +type RecoveryPlanResponse struct { + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + Spec *RecoveryPlanSpec `json:"spec,omitempty"` + Status *RecoveryPlanStatus `json:"status,omitempty"` +} + +//RecoveryPlanListResponse represents the response of a list of recovery plans +type RecoveryPlanListResponse struct { + APIVersion string `json:"api_version,omitempty"` + Entities []*RecoveryPlanResponse `json:"entities,omitempty"` + Metadata *ListMetadataOutput `json:"metadata,omitempty"` +} + +//RecoveryPlanInput Represents the request of create recovery plan +type RecoveryPlanInput struct { + APIVersion string `json:"api_version,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + Spec *RecoveryPlanSpec `json:"spec,omitempty"` +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/flatmap.go b/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/flatmap.go new file mode 100644 index 00000000000..46240794386 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/flatmap.go @@ -0,0 +1,152 @@ +package utils + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// Expand takes a map and a key (prefix) and expands that value into +// a more complex structure. This is the reverse of the Flatten operation. +func Expand(m map[string]string, key string) interface{} { + // If the key is exactly a key in the map, just return it + if v, ok := m[key]; ok { + if v == "true" { + return true + } else if v == "false" { + return false + } + + return v + } + + // Check if the key is an array, and if so, expand the array + if v, ok := m[key+".#"]; ok { + // If the count of the key is unknown, then just put the unknown + // value in the value itself. This will be detected by Terraform + // core later. + if v == UnknownVariableValue { + return v + } + + return expandArray(m, key) + } + + // Check if this is a prefix in the map + prefix := key + "." + for k := range m { + if strings.HasPrefix(k, prefix) { + return expandMap(m, prefix) + } + } + + return nil +} + +func expandArray(m map[string]string, prefix string) []interface{} { + num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) + if err != nil { + panic(err) + } + + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // NOTE: "num" is not necessarily accurate, e.g. if a user tampers + // with state, so the following code should not crash when given a + // number of items more or less than what's given in num. The + // num key is mainly just a hint that this is a list or set. + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. + // See GH-11042 for more details. + keySet := map[int]bool{} + computed := map[string]bool{} + for k := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + + key := k[len(prefix)+1:] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + + // skip the count value + if key == "#" { + continue + } + + // strip the computed flag if there is one + if strings.HasPrefix(key, "~") { + key = key[1:] + computed[key] = true + } + + k, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + keySet[k] = true + } + + keysList := make([]int, 0, num) + for key := range keySet { + keysList = append(keysList, key) + } + sort.Ints(keysList) + + result := make([]interface{}, len(keysList)) + for i, key := range keysList { + keyString := strconv.Itoa(key) + if computed[keyString] { + keyString = "~" + keyString + } + result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) + } + + return result +} + +func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + + result := make(map[string]interface{}) + for k := range m { + if !strings.HasPrefix(k, prefix) { + continue + } + + key := k[len(prefix):] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + if _, ok := result[key]; ok { + continue + } + + // skip the map count value + if key == "%" { + continue + } + + result[key] = Expand(m, k[:len(prefix)+len(key)]) + } + + return result +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/pointers.go b/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/pointers.go new file mode 100644 index 00000000000..71215db2608 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/pointers.go @@ -0,0 +1,442 @@ +package utils + +import "time" + +// StringPtr returns a pointer to the string value passed in. +func StringPtr(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// BoolPtr returns a pointer to the bool value passed in. +func BoolPtr(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// IntPtr returns a pointer to the int value passed in. +func IntPtr(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Int64Ptr returns a pointer to the int64 value passed in. +func Int64Ptr(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Float64Ptr returns a pointer to the float64 value passed in. +func Float64Ptr(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/utils.go b/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/utils.go new file mode 100644 index 00000000000..9570fa311cc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-nutanix/utils/utils.go @@ -0,0 +1,73 @@ +package utils + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "log" + "net/http" + "net/http/httputil" + "strings" +) + +// PrintToJSON method helper to debug responses +func PrintToJSON(v interface{}, msg string) { + pretty, _ := json.MarshalIndent(v, "", " ") + log.Print("\n", msg, string(pretty)) + fmt.Print("\n", msg, string(pretty)) +} + +func ToJSONString(v interface{}) string { + pretty, _ := json.MarshalIndent(v, "", " ") + + return string(pretty) +} + +// DebugRequest ... +func DebugRequest(req *http.Request) { + requestDump, err := httputil.DumpRequest(req, true) + if err != nil { + log.Printf("[WARN] Error getting request's dump: %s\n", err) + } + + log.Printf("[DEBUG] %s\n", string(requestDump)) +} + +// DebugResponse ... +func DebugResponse(res *http.Response) { + requestDump, err := httputil.DumpResponse(res, true) + if err != nil { + log.Printf("[WARN] Error getting response's dump: %s\n", err) + } + + log.Printf("[DEBUG] %s\n", string(requestDump)) +} + +func ConvertMapString(o map[string]interface{}) map[string]string { + converted := make(map[string]string) + for k, v := range o { + converted[k] = fmt.Sprintf(v.(string)) + } + + return converted +} + +func StringLowerCaseValidateFunc(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + if !(strings.ToLower(v) == v) { + errs = append(errs, fmt.Errorf("%q must be in lowercase, got: %s", key, v)) + } + return +} + +func GenUUID() string { + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + log.Fatal(err) + } + uuid := fmt.Sprintf("%x-%x-%x-%x-%x", + b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + + return uuid +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8f1dc3c50bb..dd56f68efcb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1532,6 +1532,9 @@ github.com/kardianos/osext # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote +# github.com/kdomanski/iso9660 v0.2.1 +## explicit; go 1.14 +github.com/kdomanski/iso9660 # github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 ## explicit github.com/keybase/go-crypto/brainpool @@ -1677,7 +1680,7 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/openshift-metal3/terraform-provider-ironic v0.2.7 ## explicit; go 1.13 github.com/openshift-metal3/terraform-provider-ironic/ironic -# github.com/openshift/api v0.0.0-20220124143425-d74727069f6f +# github.com/openshift/api v0.0.0-20220203140920-bfe251c51d2d ## explicit; go 1.16 github.com/openshift/api/config/v1 github.com/openshift/api/machine/v1 @@ -2247,6 +2250,11 @@ github.com/terraform-providers/terraform-provider-ignition/v2/ignition # github.com/terraform-providers/terraform-provider-local v1.4.0 ## explicit github.com/terraform-providers/terraform-provider-local/local +# github.com/terraform-providers/terraform-provider-nutanix v1.1.0 => github.com/nutanix/terraform-provider-nutanix v1.2.2-0.20211029075448-e21f85ac2cf7 +## explicit; go 1.13 +github.com/terraform-providers/terraform-provider-nutanix/client +github.com/terraform-providers/terraform-provider-nutanix/client/v3 +github.com/terraform-providers/terraform-provider-nutanix/utils # github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f ## explicit github.com/terraform-providers/terraform-provider-random/random @@ -3158,6 +3166,7 @@ sigs.k8s.io/yaml # github.com/terraform-providers/terraform-provider-ignition/v2 => github.com/community-terraform-providers/terraform-provider-ignition/v2 v2.1.0 # k8s.io/client-go => k8s.io/client-go v0.23.0 # k8s.io/kubectl => k8s.io/kubectl v0.23.0 +# github.com/terraform-providers/terraform-provider-nutanix => github.com/nutanix/terraform-provider-nutanix v1.2.2-0.20211029075448-e21f85ac2cf7 # sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210121023454-5ffc5f422a80 # sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210626224711-5d94c794092f # sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20211111204942-611d320170af