diff --git a/pkg/api/error.go b/pkg/api/error.go index cad5a8dc921..dbe8552426f 100644 --- a/pkg/api/error.go +++ b/pkg/api/error.go @@ -80,6 +80,7 @@ var ( CloudErrorCodeUnsupportedMediaType = "UnsupportedMediaType" CloudErrorCodeInvalidLinkedVNet = "InvalidLinkedVNet" CloudErrorCodeInvalidLinkedRouteTable = "InvalidLinkedRouteTable" + CloudErrorCodeInvalidLinkedDiskEncryptionSet = "InvalidLinkedDiskEncryptionSet" CloudErrorCodeNotFound = "NotFound" CloudErrorCodeForbidden = "Forbidden" CloudErrorCodeInvalidSubscriptionState = "InvalidSubscriptionState" diff --git a/pkg/api/v20210131preview/openshiftcluster_validatestatic.go b/pkg/api/v20210131preview/openshiftcluster_validatestatic.go index 5059c5ed62c..0506454b559 100644 --- a/pkg/api/v20210131preview/openshiftcluster_validatestatic.go +++ b/pkg/api/v20210131preview/openshiftcluster_validatestatic.go @@ -214,7 +214,6 @@ func (sv *openShiftClusterStaticValidator) validateNetworkProfile(path string, n } func (sv *openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile) error { - if !validate.VMSizeIsValid(api.VMSize(mp.VMSize), sv.requireD2sV3Workers, true) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", "The provided master VM size '%s' is invalid.", mp.VMSize) } @@ -228,6 +227,18 @@ func (sv *openShiftClusterStaticValidator) validateMasterProfile(path string, mp if sr.SubscriptionID != sv.r.SubscriptionID { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided master VM subnet '%s' is invalid: must be in same subscription as cluster.", mp.SubnetID) } + if mp.DiskEncryptionSetID != "" { + if !validate.RxDiskEncryptionSetID.MatchString(mp.DiskEncryptionSetID) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskEncryptionSetId", "The provided master disk encryption set '%s' is invalid.", mp.DiskEncryptionSetID) + } + desr, err := azure.ParseResourceID(mp.DiskEncryptionSetID) + if err != nil { + return err + } + if desr.SubscriptionID != sv.r.SubscriptionID { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskEncryptionSetId", "The provided master disk encryption set '%s' is invalid: must be in same subscription as cluster.", mp.DiskEncryptionSetID) + } + } return nil } @@ -262,6 +273,9 @@ func (sv *openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Count < 2 || wp.Count > 50 { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".count", "The provided worker count '%d' is invalid.", wp.Count) } + if !strings.EqualFold(mp.DiskEncryptionSetID, wp.DiskEncryptionSetID) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", "The provided worker disk encryption set '%s' is invalid: must be the same as master disk encryption set '%s'.", wp.DiskEncryptionSetID, mp.DiskEncryptionSetID) + } return nil } diff --git a/pkg/api/v20210131preview/openshiftcluster_validatestatic_test.go b/pkg/api/v20210131preview/openshiftcluster_validatestatic_test.go index 347788cfd84..d10b8497ea9 100644 --- a/pkg/api/v20210131preview/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20210131preview/openshiftcluster_validatestatic_test.go @@ -463,7 +463,7 @@ func TestOpenShiftClusterStaticValidateNetworkProfile(t *testing.T) { } func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { - tests := []*validateTest{ + commonTests := []*validateTest{ { name: "valid", }, @@ -488,10 +488,37 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { }, wantErr: "400: InvalidParameter: properties.masterProfile.subnetId: The provided master VM subnet '/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourcegroups/test-vnet/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/master' is invalid: must be in same subscription as cluster.", }, + { + name: "disk encryption set is invalid", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.DiskEncryptionSetID = "invalid" + oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = "invalid" + }, + wantErr: "400: InvalidParameter: properties.masterProfile.diskEncryptionSetId: The provided master disk encryption set 'invalid' is invalid.", + }, + { + name: "disk encryption set not matching cluster subscriptionId", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.DiskEncryptionSetID = "/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1" + }, + wantErr: "400: InvalidParameter: properties.masterProfile.diskEncryptionSetId: The provided master disk encryption set '/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1' is invalid: must be in same subscription as cluster.", + }, } - runTests(t, testModeCreate, tests) - runTests(t, testModeUpdate, tests) + createTests := []*validateTest{ + { + name: "disk encryption set is valid", + modify: func(oc *OpenShiftCluster) { + desID := fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set", subscriptionID) + oc.Properties.MasterProfile.DiskEncryptionSetID = desID + oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = desID + }, + }, + } + + runTests(t, testModeCreate, createTests) + runTests(t, testModeCreate, commonTests) + runTests(t, testModeUpdate, commonTests) } func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { @@ -570,6 +597,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { }, wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].count: The provided worker count '51' is invalid.", }, + { + name: "disk encryption set not matching master disk encryption set", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.DiskEncryptionSetID = fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set", subscriptionID) + oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = "/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1" + }, + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].subnetId: The provided worker disk encryption set '/subscriptions/7a3036d1-60a1-4605-8a41-44955e050804/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1' is invalid: must be the same as master disk encryption set '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-cluster/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set'.", + }, } // We do not perform this validation on update diff --git a/pkg/api/validate/dynamic/diskencryptionset.go b/pkg/api/validate/dynamic/diskencryptionset.go new file mode 100644 index 00000000000..359f77acee8 --- /dev/null +++ b/pkg/api/validate/dynamic/diskencryptionset.go @@ -0,0 +1,108 @@ +package dynamic + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/Azure/ARO-RP/pkg/api" +) + +func (dv *dynamic) ValidateDiskEncryptionSets(ctx context.Context, oc *api.OpenShiftCluster) error { + dv.log.Print("ValidateDiskEncryptionSet") + + // It is very likely that master and worker profiles use the same + // disk encryption set, so to optimise we only validate unique ones. + // We maintain the slice of ids separately to the map to have stable + // validation order because iteration order for maps is not stable. + uniqueIds := map[string]struct{}{} + ids := []string{} + paths := []string{} + if oc.Properties.MasterProfile.DiskEncryptionSetID != "" { + uniqueIds[strings.ToLower(oc.Properties.MasterProfile.DiskEncryptionSetID)] = struct{}{} + ids = append(ids, oc.Properties.MasterProfile.DiskEncryptionSetID) + paths = append(paths, "properties.masterProfile.diskEncryptionSetId") + } + for i, wp := range oc.Properties.WorkerProfiles { + if wp.DiskEncryptionSetID != "" { + lowercasedId := strings.ToLower(wp.DiskEncryptionSetID) + if _, ok := uniqueIds[lowercasedId]; ok { + continue + } + + uniqueIds[lowercasedId] = struct{}{} + ids = append(ids, wp.DiskEncryptionSetID) + paths = append(paths, fmt.Sprintf("properties.workerProfiles[%d].diskEncryptionSetId", i)) + } + } + + for i, id := range ids { + r, err := azure.ParseResourceID(id) + if err != nil { + return err + } + + err = dv.validateDiskEncryptionSetPermissions(ctx, &r, paths[i]) + if err != nil { + return err + } + + err = dv.validateDiskEncryptionSetLocation(ctx, &r, oc.Location, paths[i]) + if err != nil { + return err + } + + } + + return nil +} + +func (dv *dynamic) validateDiskEncryptionSetPermissions(ctx context.Context, desr *azure.Resource, path string) error { + dv.log.Print("validateDiskEncryptionSetPermissions") + + errCode := api.CloudErrorCodeInvalidResourceProviderPermissions + if dv.authorizerType == AuthorizerClusterServicePrincipal { + errCode = api.CloudErrorCodeInvalidServicePrincipalPermissions + } + + err := dv.validateActions(ctx, desr, []string{ + "Microsoft.Compute/diskEncryptionSets/read", + }) + + if err == wait.ErrWaitTimeout { + return api.NewCloudError(http.StatusBadRequest, errCode, path, "The %s service principal does not have Reader permission on disk encryption set '%s'.", dv.authorizerType, desr.String()) + } + if detailedErr, ok := err.(autorest.DetailedError); ok && + detailedErr.StatusCode == http.StatusNotFound { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidLinkedDiskEncryptionSet, path, "The disk encryption set '%s' could not be found.", desr.String()) + } + + return err +} + +func (dv *dynamic) validateDiskEncryptionSetLocation(ctx context.Context, desr *azure.Resource, location, path string) error { + dv.log.Print("validateDiskEncryptionSetLocation") + + des, err := dv.diskEncryptionSets.Get(ctx, desr.ResourceGroup, desr.ResourceName) + if err != nil { + if detailedErr, ok := err.(autorest.DetailedError); ok && + detailedErr.StatusCode == http.StatusNotFound { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidLinkedDiskEncryptionSet, path, "The disk encryption set '%s' could not be found.", desr.String()) + } + return err + } + + if !strings.EqualFold(*des.Location, location) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidLinkedDiskEncryptionSet, "", "The disk encryption set location '%s' must match the cluster location '%s'.", *des.Location, location) + } + + return nil +} diff --git a/pkg/api/validate/dynamic/diskencryptionset_test.go b/pkg/api/validate/dynamic/diskencryptionset_test.go new file mode 100644 index 00000000000..01e575fd792 --- /dev/null +++ b/pkg/api/validate/dynamic/diskencryptionset_test.go @@ -0,0 +1,295 @@ +package dynamic + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "errors" + "fmt" + "net/http" + "testing" + + mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute" + mgmtauthorization "github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + "github.com/sirupsen/logrus" + + "github.com/Azure/ARO-RP/pkg/api" + mock_authorization "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/authorization" + mock_compute "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/compute" +) + +func TestValidateDiskEncryptionSets(t *testing.T) { + fakeDesID1 := "/subscriptions/0000000-0000-0000-0000-000000000000/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES1" + fakeDesR1, err := azure.ParseResourceID(fakeDesID1) + if err != nil { + t.Fatal(err) + } + fakeDesID2 := "/subscriptions/0000000-0000-0000-0000-000000000000/resourceGroups/fakeRG/providers/Microsoft.Compute/diskEncryptionSets/fakeDES2" + fakeDesR2, err := azure.ParseResourceID(fakeDesID2) + if err != nil { + t.Fatal(err) + } + + for _, authorizerType := range []AuthorizerType{AuthorizerClusterServicePrincipal, AuthorizerFirstParty} { + wantErrCode := api.CloudErrorCodeInvalidResourceProviderPermissions + if authorizerType == AuthorizerClusterServicePrincipal { + wantErrCode = api.CloudErrorCodeInvalidServicePrincipalPermissions + } + + t.Run(string(authorizerType), func(t *testing.T) { + for _, tt := range []struct { + name string + oc *api.OpenShiftCluster + mocks func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) + wantErr string + }{ + { + name: "no disk encryption set provided", + oc: &api.OpenShiftCluster{}, + }, + { + name: "valid disk encryption set", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID1, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Return([]mgmtauthorization.Permission{{ + Actions: &[]string{"Microsoft.Compute/diskEncryptionSets/read"}, + NotActions: &[]string{}, + }}, nil) + diskEncryptionSets.EXPECT(). + Get(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.ResourceName). + Return(mgmtcompute.DiskEncryptionSet{Location: to.StringPtr("eastus")}, nil) + }, + }, + { + name: "valid permissions multiple disk encryption sets", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID2, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Return([]mgmtauthorization.Permission{{ + Actions: &[]string{"Microsoft.Compute/diskEncryptionSets/read"}, + NotActions: &[]string{}, + }}, nil) + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR2.ResourceGroup, fakeDesR2.Provider, "", fakeDesR2.ResourceType, fakeDesR2.ResourceName). + Return([]mgmtauthorization.Permission{{ + Actions: &[]string{"Microsoft.Compute/diskEncryptionSets/read"}, + NotActions: &[]string{}, + }}, nil) + diskEncryptionSets.EXPECT(). + Get(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.ResourceName). + Return(mgmtcompute.DiskEncryptionSet{Location: to.StringPtr("eastus")}, nil) + diskEncryptionSets.EXPECT(). + Get(gomock.Any(), fakeDesR2.ResourceGroup, fakeDesR2.ResourceName). + Return(mgmtcompute.DiskEncryptionSet{Location: to.StringPtr("eastus")}, nil) + }, + }, + { + name: "disk encryption set not found", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID1, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Return([]mgmtauthorization.Permission{{ + Actions: &[]string{"Microsoft.Compute/diskEncryptionSets/read"}, + NotActions: &[]string{}, + }}, nil) + diskEncryptionSets.EXPECT(). + Get(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.ResourceName). + Return(mgmtcompute.DiskEncryptionSet{}, autorest.DetailedError{StatusCode: http.StatusNotFound}) + }, + wantErr: fmt.Sprintf("400: InvalidLinkedDiskEncryptionSet: properties.masterProfile.diskEncryptionSetId: The disk encryption set '%s' could not be found.", fakeDesID1), + }, + { + name: "disk encryption set unhandled permissions error", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID1, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Return(nil, errors.New("fakeerr")) + }, + wantErr: "fakeerr", + }, + { + name: "disk encryption set unhandled get error", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID1, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Return([]mgmtauthorization.Permission{{ + Actions: &[]string{"Microsoft.Compute/diskEncryptionSets/read"}, + NotActions: &[]string{}, + }}, nil) + diskEncryptionSets.EXPECT(). + Get(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.ResourceName). + Return(mgmtcompute.DiskEncryptionSet{}, errors.New("fakeerr")) + }, + wantErr: "fakeerr", + }, + { + name: "invalid permissions", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID2, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Do(func(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) { + cancel() + }) + }, + wantErr: fmt.Sprintf("400: %s: properties.masterProfile.diskEncryptionSetId: The %s service principal does not have Reader permission on disk encryption set '%s'.", wantErrCode, authorizerType, fakeDesID1), + }, + { + name: "one of the disk encryption set permissions not found", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID2, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Return([]mgmtauthorization.Permission{{ + Actions: &[]string{"Microsoft.Compute/diskEncryptionSets/read"}, + NotActions: &[]string{}, + }}, nil) + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR2.ResourceGroup, fakeDesR2.Provider, "", fakeDesR2.ResourceType, fakeDesR2.ResourceName). + Return(nil, autorest.DetailedError{StatusCode: http.StatusNotFound}) + diskEncryptionSets.EXPECT(). + Get(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.ResourceName). + Return(mgmtcompute.DiskEncryptionSet{Location: to.StringPtr("eastus")}, nil) + }, + wantErr: fmt.Sprintf("400: InvalidLinkedDiskEncryptionSet: properties.workerProfiles[0].diskEncryptionSetId: The disk encryption set '%s' could not be found.", fakeDesID2), + }, + { + name: "disk encryption set invalid location", + oc: &api.OpenShiftCluster{ + Location: "eastus", + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + DiskEncryptionSetID: fakeDesID1, + }, + WorkerProfiles: []api.WorkerProfile{{ + DiskEncryptionSetID: fakeDesID1, + }}, + }, + }, + mocks: func(permissions *mock_authorization.MockPermissionsClient, diskEncryptionSets *mock_compute.MockDiskEncryptionSetsClient, cancel context.CancelFunc) { + permissions.EXPECT(). + ListForResource(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.Provider, "", fakeDesR1.ResourceType, fakeDesR1.ResourceName). + Return([]mgmtauthorization.Permission{{ + Actions: &[]string{"Microsoft.Compute/diskEncryptionSets/read"}, + NotActions: &[]string{}, + }}, nil) + diskEncryptionSets.EXPECT(). + Get(gomock.Any(), fakeDesR1.ResourceGroup, fakeDesR1.ResourceName). + Return(mgmtcompute.DiskEncryptionSet{Location: to.StringPtr("westeurope")}, nil) + }, + wantErr: "400: InvalidLinkedDiskEncryptionSet: : The disk encryption set location 'westeurope' must match the cluster location 'eastus'.", + }, + } { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + controller := gomock.NewController(t) + defer controller.Finish() + + permissionsClient := mock_authorization.NewMockPermissionsClient(controller) + diskEncryptionSetsClient := mock_compute.NewMockDiskEncryptionSetsClient(controller) + + if tt.mocks != nil { + tt.mocks(permissionsClient, diskEncryptionSetsClient, cancel) + } + + dv := &dynamic{ + authorizerType: authorizerType, + log: logrus.NewEntry(logrus.StandardLogger()), + permissions: permissionsClient, + diskEncryptionSets: diskEncryptionSetsClient, + } + + err := dv.ValidateDiskEncryptionSets(ctx, tt.oc) + if err != nil && err.Error() != tt.wantErr || + err == nil && tt.wantErr != "" { + t.Error(err) + } + }) + } + }) + } + +} diff --git a/pkg/api/validate/dynamic/dynamic.go b/pkg/api/validate/dynamic/dynamic.go index 90a3eda4978..1c3d603ac98 100644 --- a/pkg/api/validate/dynamic/dynamic.go +++ b/pkg/api/validate/dynamic/dynamic.go @@ -19,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/util/azureclient" "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/authorization" "github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute" @@ -31,7 +32,10 @@ import ( ) type Subnet struct { - ID string + // ID is a resource id of the subnet + ID string + + // Path is a path in the cluster document. For example, properties.workerProfiles[0].subnetId Path string } @@ -41,20 +45,23 @@ type Dynamic interface { ValidateSubnets(ctx context.Context, oc *api.OpenShiftCluster, subnets []Subnet) error ValidateProviders(ctx context.Context) error ValidateServicePrincipal(ctx context.Context, clientID, clientSecret, tenantID string) error - ValidateQuota(ctx context.Context, oc *api.OpenShiftCluster) error + ValidateDiskEncryptionSets(ctx context.Context, oc *api.OpenShiftCluster) error + ValidateEncryptionAtHost(ctx context.Context, oc *api.OpenShiftCluster) error } type dynamic struct { log *logrus.Entry authorizerType AuthorizerType + env env.Interface azEnv *azureclient.AROEnvironment - permissions authorization.PermissionsClient - providers features.ProvidersClient - virtualNetworks virtualNetworksGetClient - spComputeUsage compute.UsageClient - spNetworkUsage network.UsageClient + permissions authorization.PermissionsClient + providers features.ProvidersClient + virtualNetworks virtualNetworksGetClient + diskEncryptionSets compute.DiskEncryptionSetsClient + spComputeUsage compute.UsageClient + spNetworkUsage network.UsageClient } type AuthorizerType string @@ -62,17 +69,19 @@ type AuthorizerType string const AuthorizerFirstParty AuthorizerType = "resource provider" const AuthorizerClusterServicePrincipal AuthorizerType = "cluster" -func NewValidator(log *logrus.Entry, azEnv *azureclient.AROEnvironment, subscriptionID string, authorizer refreshable.Authorizer, authorizerType AuthorizerType) (*dynamic, error) { +func NewValidator(log *logrus.Entry, env env.Interface, azEnv *azureclient.AROEnvironment, subscriptionID string, authorizer refreshable.Authorizer, authorizerType AuthorizerType) (Dynamic, error) { return &dynamic{ log: log, authorizerType: authorizerType, + env: env, azEnv: azEnv, - providers: features.NewProvidersClient(azEnv, subscriptionID, authorizer), - spComputeUsage: compute.NewUsageClient(azEnv, subscriptionID, authorizer), - spNetworkUsage: network.NewUsageClient(azEnv, subscriptionID, authorizer), - permissions: authorization.NewPermissionsClient(azEnv, subscriptionID, authorizer), - virtualNetworks: newVirtualNetworksCache(network.NewVirtualNetworksClient(azEnv, subscriptionID, authorizer)), + providers: features.NewProvidersClient(azEnv, subscriptionID, authorizer), + spComputeUsage: compute.NewUsageClient(azEnv, subscriptionID, authorizer), + spNetworkUsage: network.NewUsageClient(azEnv, subscriptionID, authorizer), + permissions: authorization.NewPermissionsClient(azEnv, subscriptionID, authorizer), + virtualNetworks: newVirtualNetworksCache(network.NewVirtualNetworksClient(azEnv, subscriptionID, authorizer)), + diskEncryptionSets: compute.NewDiskEncryptionSetsClient(azEnv, subscriptionID, authorizer), }, nil } @@ -109,7 +118,7 @@ func (dv *dynamic) ValidateVnet(ctx context.Context, location string, subnets [] return err } - err = dv.validateLocation(ctx, vnet, location) + err = dv.validateVnetLocation(ctx, vnet, location) if err != nil { return err } @@ -288,8 +297,8 @@ func (dv *dynamic) validateCIDRRanges(ctx context.Context, subnets []Subnet, add return nil } -func (dv *dynamic) validateLocation(ctx context.Context, vnetr azure.Resource, location string) error { - dv.log.Print("validateLocation") +func (dv *dynamic) validateVnetLocation(ctx context.Context, vnetr azure.Resource, location string) error { + dv.log.Print("validateVnetLocation") vnet, err := dv.virtualNetworks.Get(ctx, vnetr.ResourceGroup, vnetr.ResourceName, "") if err != nil { diff --git a/pkg/api/validate/dynamic/dynamic_test.go b/pkg/api/validate/dynamic/dynamic_test.go index c483142a9af..e78eab56f59 100644 --- a/pkg/api/validate/dynamic/dynamic_test.go +++ b/pkg/api/validate/dynamic/dynamic_test.go @@ -527,7 +527,7 @@ func TestValidateVnetLocation(t *testing.T) { t.Fatal(err) } - err = dv.validateLocation(ctx, vnetr, "eastus") + err = dv.validateVnetLocation(ctx, vnetr, "eastus") if err != nil && err.Error() != tt.wantErr || err == nil && tt.wantErr != "" { t.Error(err) diff --git a/pkg/api/validate/dynamic/encryptionathost.go b/pkg/api/validate/dynamic/encryptionathost.go new file mode 100644 index 00000000000..e83a313d166 --- /dev/null +++ b/pkg/api/validate/dynamic/encryptionathost.go @@ -0,0 +1,48 @@ +package dynamic + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/util/computeskus" +) + +func (dv *dynamic) ValidateEncryptionAtHost(ctx context.Context, oc *api.OpenShiftCluster) error { + dv.log.Print("ValidateEncryptionAtHost") + + if oc.Properties.MasterProfile.EncryptionAtHost { + err := dv.validateEncryptionAtHostSupport(oc.Properties.MasterProfile.VMSize, "properties.masterProfile.encryptionAtHost") + if err != nil { + return err + } + } + + for i, wp := range oc.Properties.WorkerProfiles { + if wp.EncryptionAtHost { + err := dv.validateEncryptionAtHostSupport(wp.VMSize, fmt.Sprintf("properties.workerProfiles[%d].encryptionAtHost", i)) + if err != nil { + return err + } + } + } + + return nil +} + +func (dv *dynamic) validateEncryptionAtHostSupport(VMSize api.VMSize, path string) error { + sku, err := dv.env.VMSku(string(VMSize)) + if err != nil { + return err + } + + if !computeskus.HasCapability(sku, "EncryptionAtHostSupported") { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path, "VM SKU '%s' does not support encryption at host.", VMSize) + } + + return nil +} diff --git a/pkg/api/validate/dynamic/encryptionathost_test.go b/pkg/api/validate/dynamic/encryptionathost_test.go new file mode 100644 index 00000000000..20aa01219ea --- /dev/null +++ b/pkg/api/validate/dynamic/encryptionathost_test.go @@ -0,0 +1,160 @@ +package dynamic + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "errors" + "testing" + + mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + "github.com/sirupsen/logrus" + + "github.com/Azure/ARO-RP/pkg/api" + mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env" +) + +func TestValidateEncryptionAtHost(t *testing.T) { + for _, tt := range []struct { + name string + oc *api.OpenShiftCluster + mocks func(env *mock_env.MockInterface) + wantErr string + }{ + { + name: "encryption at host disabled", + oc: &api.OpenShiftCluster{}, + }, + { + name: "encryption at host enabled with valid VM SKU", + oc: &api.OpenShiftCluster{ + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + EncryptionAtHost: true, + VMSize: api.VMSizeStandardD8sV3, + }, + WorkerProfiles: []api.WorkerProfile{{ + EncryptionAtHost: true, + VMSize: api.VMSizeStandardD4asV4, + }}, + }, + }, + mocks: func(env *mock_env.MockInterface) { + env.EXPECT().VMSku(string(api.VMSizeStandardD8sV3)). + Return(&mgmtcompute.ResourceSku{ + Capabilities: &([]mgmtcompute.ResourceSkuCapabilities{ + {Name: to.StringPtr("EncryptionAtHostSupported"), Value: to.StringPtr("True")}, + }), + }, nil) + env.EXPECT().VMSku(string(api.VMSizeStandardD4asV4)). + Return(&mgmtcompute.ResourceSku{ + Capabilities: &([]mgmtcompute.ResourceSkuCapabilities{ + {Name: to.StringPtr("EncryptionAtHostSupported"), Value: to.StringPtr("True")}, + }), + }, nil) + }, + }, + { + name: "encryption at host enabled with unsupported master VM SKU", + oc: &api.OpenShiftCluster{ + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + EncryptionAtHost: true, + VMSize: api.VMSizeStandardG5, + }, + WorkerProfiles: []api.WorkerProfile{{ + EncryptionAtHost: true, + VMSize: api.VMSizeStandardD4asV4, + }}, + }, + }, + mocks: func(env *mock_env.MockInterface) { + env.EXPECT().VMSku(string(api.VMSizeStandardG5)). + Return(&mgmtcompute.ResourceSku{ + Capabilities: &([]mgmtcompute.ResourceSkuCapabilities{ + {Name: to.StringPtr("EncryptionAtHostSupported"), Value: to.StringPtr("False")}, + }), + }, nil) + }, + wantErr: "400: InvalidParameter: properties.masterProfile.encryptionAtHost: VM SKU 'Standard_G5' does not support encryption at host.", + }, + { + name: "encryption at host enabled with unsupported worker VM SKU", + oc: &api.OpenShiftCluster{ + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + EncryptionAtHost: true, + VMSize: api.VMSizeStandardD8sV3, + }, + WorkerProfiles: []api.WorkerProfile{{ + EncryptionAtHost: true, + VMSize: api.VMSizeStandardG5, + }}, + }, + }, + mocks: func(env *mock_env.MockInterface) { + env.EXPECT().VMSku(string(api.VMSizeStandardD8sV3)). + Return(&mgmtcompute.ResourceSku{ + Capabilities: &([]mgmtcompute.ResourceSkuCapabilities{ + {Name: to.StringPtr("EncryptionAtHostSupported"), Value: to.StringPtr("True")}, + }), + }, nil) + env.EXPECT().VMSku(string(api.VMSizeStandardG5)). + Return(&mgmtcompute.ResourceSku{ + Capabilities: &([]mgmtcompute.ResourceSkuCapabilities{ + {Name: to.StringPtr("EncryptionAtHostSupported"), Value: to.StringPtr("False")}, + }), + }, nil) + }, + wantErr: "400: InvalidParameter: properties.workerProfiles[0].encryptionAtHost: VM SKU 'Standard_G5' does not support encryption at host.", + }, + { + name: "encryption at host enabled with unknown VM SKU", + oc: &api.OpenShiftCluster{ + Properties: api.OpenShiftClusterProperties{ + MasterProfile: api.MasterProfile{ + EncryptionAtHost: true, + VMSize: "invalid", + }, + WorkerProfiles: []api.WorkerProfile{{ + EncryptionAtHost: true, + VMSize: api.VMSizeStandardG5, + }}, + }, + }, + mocks: func(env *mock_env.MockInterface) { + env.EXPECT().VMSku("invalid"). + Return(nil, errors.New("fake error")) + }, + wantErr: "fake error", + }, + } { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + + controller := gomock.NewController(t) + defer controller.Finish() + + _env := mock_env.NewMockInterface(controller) + + if tt.mocks != nil { + tt.mocks(_env) + } + + dv := &dynamic{ + env: _env, + authorizerType: AuthorizerClusterServicePrincipal, + log: logrus.NewEntry(logrus.StandardLogger()), + } + + err := dv.ValidateEncryptionAtHost(ctx, tt.oc) + if err != nil && err.Error() != tt.wantErr || + err == nil && tt.wantErr != "" { + t.Error(err) + } + }) + } +} diff --git a/pkg/api/validate/format_regexps.go b/pkg/api/validate/format_regexps.go index 291f27e3436..f48e840ec38 100644 --- a/pkg/api/validate/format_regexps.go +++ b/pkg/api/validate/format_regexps.go @@ -9,10 +9,11 @@ import ( // Regular expressions used to validate the format of resource names and IDs acceptable by API. var ( - RxClusterID = regexp.MustCompile(`(?i)^/subscriptions/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/resourceGroups/[-a-z0-9_().]{0,89}[-a-z0-9_()]/providers/Microsoft\.RedHatOpenShift/openShiftClusters/[-a-z0-9_().]{0,89}[-a-z0-9_()]$`) - RxResourceGroupID = regexp.MustCompile(`(?i)^/subscriptions/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/resourceGroups/[-a-z0-9_().]{0,89}[-a-z0-9_()]$`) - RxSubnetID = regexp.MustCompile(`(?i)^/subscriptions/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/resourceGroups/[-a-z0-9_().]{0,89}[-a-z0-9_()]/providers/Microsoft\.Network/virtualNetworks/[-a-z0-9_.]{2,64}/subnets/[-a-z0-9_.]{2,80}$`) - RxDomainName = regexp.MustCompile(`^` + + RxClusterID = regexp.MustCompile(`(?i)^/subscriptions/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/resourceGroups/[-a-z0-9_().]{0,89}[-a-z0-9_()]/providers/Microsoft\.RedHatOpenShift/openShiftClusters/[-a-z0-9_().]{0,89}[-a-z0-9_()]$`) + RxResourceGroupID = regexp.MustCompile(`(?i)^/subscriptions/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/resourceGroups/[-a-z0-9_().]{0,89}[-a-z0-9_()]$`) + RxSubnetID = regexp.MustCompile(`(?i)^/subscriptions/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/resourceGroups/[-a-z0-9_().]{0,89}[-a-z0-9_()]/providers/Microsoft\.Network/virtualNetworks/[-a-z0-9_.]{2,64}/subnets/[-a-z0-9_.]{2,80}$`) + RxDiskEncryptionSetID = regexp.MustCompile(`(?i)^/subscriptions/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/resourceGroups/[-a-z0-9_().]{0,89}[-a-z0-9_()]/providers/Microsoft\.Compute/diskEncryptionSets/[-a-z0-9_]{1,80}$`) + RxDomainName = regexp.MustCompile(`^` + `([a-z][-a-z0-9]{0,61}[a-z0-9])` + `(\.([a-z0-9]|[a-z0-9][-a-z0-9]{0,61}[a-z0-9]))*` + `$`) diff --git a/pkg/api/validate/openshiftcluster_validatedynamic.go b/pkg/api/validate/openshiftcluster_validatedynamic.go index 4a3661406cb..0f7cabc017d 100644 --- a/pkg/api/validate/openshiftcluster_validatedynamic.go +++ b/pkg/api/validate/openshiftcluster_validatedynamic.go @@ -22,7 +22,7 @@ type OpenShiftClusterDynamicValidator interface { } // NewOpenShiftClusterDynamicValidator creates a new OpenShiftClusterDynamicValidator -func NewOpenShiftClusterDynamicValidator(log *logrus.Entry, env env.Core, oc *api.OpenShiftCluster, subscriptionDoc *api.SubscriptionDocument, fpAuthorizer refreshable.Authorizer) OpenShiftClusterDynamicValidator { +func NewOpenShiftClusterDynamicValidator(log *logrus.Entry, env env.Interface, oc *api.OpenShiftCluster, subscriptionDoc *api.SubscriptionDocument, fpAuthorizer refreshable.Authorizer) OpenShiftClusterDynamicValidator { return &openShiftClusterDynamicValidator{ log: log, env: env, @@ -35,7 +35,7 @@ func NewOpenShiftClusterDynamicValidator(log *logrus.Entry, env env.Core, oc *ap type openShiftClusterDynamicValidator struct { log *logrus.Entry - env env.Core + env env.Interface oc *api.OpenShiftCluster subscriptionDoc *api.SubscriptionDocument @@ -45,21 +45,19 @@ type openShiftClusterDynamicValidator struct { // Dynamic validates an OpenShift cluster func (dv *openShiftClusterDynamicValidator) Dynamic(ctx context.Context) error { // Get all subnets - var subnets []dynamic.Subnet - subnets = append(subnets, dynamic.Subnet{ + subnets := []dynamic.Subnet{{ ID: dv.oc.Properties.MasterProfile.SubnetID, Path: "properties.masterProfile.subnetId", - }) - - for i, s := range dv.oc.Properties.WorkerProfiles { + }} + for i, wp := range dv.oc.Properties.WorkerProfiles { subnets = append(subnets, dynamic.Subnet{ - ID: s.SubnetID, + ID: wp.SubnetID, Path: fmt.Sprintf("properties.workerProfiles[%d].subnetId", i), }) } // FP validation - fpDynamic, err := dynamic.NewValidator(dv.log, dv.env.Environment(), dv.subscriptionDoc.ID, dv.fpAuthorizer, dynamic.AuthorizerFirstParty) + fpDynamic, err := dynamic.NewValidator(dv.log, dv.env, dv.env.Environment(), dv.subscriptionDoc.ID, dv.fpAuthorizer, dynamic.AuthorizerFirstParty) if err != nil { return err } @@ -69,6 +67,11 @@ func (dv *openShiftClusterDynamicValidator) Dynamic(ctx context.Context) error { return err } + err = fpDynamic.ValidateDiskEncryptionSets(ctx, dv.oc) + if err != nil { + return err + } + spp := dv.oc.Properties.ServicePrincipalProfile token, err := aad.GetToken(ctx, dv.log, spp.ClientID, string(spp.ClientSecret), dv.subscriptionDoc.Subscription.Properties.TenantID, dv.env.Environment().ActiveDirectoryEndpoint, dv.env.Environment().ResourceManagerEndpoint) if err != nil { @@ -77,7 +80,7 @@ func (dv *openShiftClusterDynamicValidator) Dynamic(ctx context.Context) error { spAuthorizer := refreshable.NewAuthorizer(token) - spDynamic, err := dynamic.NewValidator(dv.log, dv.env.Environment(), dv.subscriptionDoc.ID, spAuthorizer, dynamic.AuthorizerClusterServicePrincipal) + spDynamic, err := dynamic.NewValidator(dv.log, dv.env, dv.env.Environment(), dv.subscriptionDoc.ID, spAuthorizer, dynamic.AuthorizerClusterServicePrincipal) if err != nil { return err } @@ -108,5 +111,15 @@ func (dv *openShiftClusterDynamicValidator) Dynamic(ctx context.Context) error { return err } + err = spDynamic.ValidateDiskEncryptionSets(ctx, dv.oc) + if err != nil { + return err + } + + err = spDynamic.ValidateEncryptionAtHost(ctx, dv.oc) + if err != nil { + return err + } + return nil } diff --git a/pkg/operator/controllers/checker/serviceprincipalchecker.go b/pkg/operator/controllers/checker/serviceprincipalchecker.go index fd031180c52..d015fc246dd 100644 --- a/pkg/operator/controllers/checker/serviceprincipalchecker.go +++ b/pkg/operator/controllers/checker/serviceprincipalchecker.go @@ -79,7 +79,7 @@ func (r *ServicePrincipalChecker) Check(ctx context.Context) error { updateFailedCondition(cond, err) } - spDynamic, err := dynamic.NewValidator(r.log, &azEnv, resource.SubscriptionID, nil, dynamic.AuthorizerClusterServicePrincipal) + spDynamic, err := dynamic.NewValidator(r.log, nil, &azEnv, resource.SubscriptionID, nil, dynamic.AuthorizerClusterServicePrincipal) if err != nil { return err } diff --git a/pkg/util/azureclient/mgmt/compute/diskencryptionsets.go b/pkg/util/azureclient/mgmt/compute/diskencryptionsets.go new file mode 100644 index 00000000000..f356836ae07 --- /dev/null +++ b/pkg/util/azureclient/mgmt/compute/diskencryptionsets.go @@ -0,0 +1,34 @@ +package compute + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + + mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute" + "github.com/Azure/go-autorest/autorest" + + "github.com/Azure/ARO-RP/pkg/util/azureclient" +) + +// DiskEncryptionSetsClient is a minimal interface for azure DiskEncryptionSetsClient +type DiskEncryptionSetsClient interface { + Get(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result mgmtcompute.DiskEncryptionSet, err error) +} + +type diskEncryptionSetsClient struct { + mgmtcompute.DiskEncryptionSetsClient +} + +var _ DiskEncryptionSetsClient = &diskEncryptionSetsClient{} + +// NewDisksClient creates a new DisksClient +func NewDiskEncryptionSetsClient(environment *azureclient.AROEnvironment, subscriptionID string, authorizer autorest.Authorizer) DiskEncryptionSetsClient { + client := mgmtcompute.NewDiskEncryptionSetsClientWithBaseURI(environment.ResourceManagerEndpoint, subscriptionID) + client.Authorizer = authorizer + + return &diskEncryptionSetsClient{ + DiskEncryptionSetsClient: client, + } +} diff --git a/pkg/util/azureclient/mgmt/compute/generate.go b/pkg/util/azureclient/mgmt/compute/generate.go index b2a7aad2e14..bb429a7da01 100644 --- a/pkg/util/azureclient/mgmt/compute/generate.go +++ b/pkg/util/azureclient/mgmt/compute/generate.go @@ -4,5 +4,5 @@ package compute // Licensed under the Apache License 2.0. //go:generate rm -rf ../../../../util/mocks/$GOPACKAGE -//go:generate go run ../../../../../vendor/github.com/golang/mock/mockgen -destination=../../../../util/mocks/azureclient/mgmt/$GOPACKAGE/$GOPACKAGE.go github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/$GOPACKAGE DisksClient,ResourceSkusClient,VirtualMachinesClient,UsageClient,VirtualMachineScaleSetVMsClient,VirtualMachineScaleSetsClient +//go:generate go run ../../../../../vendor/github.com/golang/mock/mockgen -destination=../../../../util/mocks/azureclient/mgmt/$GOPACKAGE/$GOPACKAGE.go github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/$GOPACKAGE DisksClient,ResourceSkusClient,VirtualMachinesClient,UsageClient,VirtualMachineScaleSetVMsClient,VirtualMachineScaleSetsClient,DiskEncryptionSetsClient //go:generate go run ../../../../../vendor/golang.org/x/tools/cmd/goimports -local=github.com/Azure/ARO-RP -e -w ../../../../util/mocks/azureclient/mgmt/$GOPACKAGE/$GOPACKAGE.go diff --git a/pkg/util/mocks/azureclient/mgmt/compute/compute.go b/pkg/util/mocks/azureclient/mgmt/compute/compute.go index 93a08101456..3f2be02958b 100644 --- a/pkg/util/mocks/azureclient/mgmt/compute/compute.go +++ b/pkg/util/mocks/azureclient/mgmt/compute/compute.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute (interfaces: DisksClient,ResourceSkusClient,VirtualMachinesClient,UsageClient,VirtualMachineScaleSetVMsClient,VirtualMachineScaleSetsClient) +// Source: github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute (interfaces: DisksClient,ResourceSkusClient,VirtualMachinesClient,UsageClient,VirtualMachineScaleSetVMsClient,VirtualMachineScaleSetsClient,DiskEncryptionSetsClient) // Package mock_compute is a generated GoMock package. package mock_compute @@ -352,3 +352,41 @@ func (mr *MockVirtualMachineScaleSetsClientMockRecorder) List(arg0, arg1 interfa mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockVirtualMachineScaleSetsClient)(nil).List), arg0, arg1) } + +// MockDiskEncryptionSetsClient is a mock of DiskEncryptionSetsClient interface. +type MockDiskEncryptionSetsClient struct { + ctrl *gomock.Controller + recorder *MockDiskEncryptionSetsClientMockRecorder +} + +// MockDiskEncryptionSetsClientMockRecorder is the mock recorder for MockDiskEncryptionSetsClient. +type MockDiskEncryptionSetsClientMockRecorder struct { + mock *MockDiskEncryptionSetsClient +} + +// NewMockDiskEncryptionSetsClient creates a new mock instance. +func NewMockDiskEncryptionSetsClient(ctrl *gomock.Controller) *MockDiskEncryptionSetsClient { + mock := &MockDiskEncryptionSetsClient{ctrl: ctrl} + mock.recorder = &MockDiskEncryptionSetsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDiskEncryptionSetsClient) EXPECT() *MockDiskEncryptionSetsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockDiskEncryptionSetsClient) Get(arg0 context.Context, arg1, arg2 string) (compute.DiskEncryptionSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2) + ret0, _ := ret[0].(compute.DiskEncryptionSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDiskEncryptionSetsClientMockRecorder) Get(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDiskEncryptionSetsClient)(nil).Get), arg0, arg1, arg2) +}