diff --git a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go index a7ceaaaccd79..22b597c02d51 100644 --- a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go +++ b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go @@ -583,11 +583,17 @@ func init() { Resources: sets.NewString("events"), }, // PersistentVolumeBinder.findProvisionablePlugin() + // Glusterfs provisioner { APIGroups: []string{storage.GroupName}, - Verbs: sets.NewString("list", "watch"), + Verbs: sets.NewString("list", "watch", "get"), Resources: sets.NewString("storageclasses"), }, + // Gluster provisioner + { + Verbs: sets.NewString("get", "create", "delete"), + Resources: sets.NewString("services", "endpoints"), + }, }, }, ) diff --git a/pkg/cmd/server/kubernetes/master.go b/pkg/cmd/server/kubernetes/master.go index c0d3fc1243ee..bdd0a15aeb7d 100644 --- a/pkg/cmd/server/kubernetes/master.go +++ b/pkg/cmd/server/kubernetes/master.go @@ -66,8 +66,10 @@ import ( "k8s.io/kubernetes/pkg/volume/cinder" "k8s.io/kubernetes/pkg/volume/flexvolume" "k8s.io/kubernetes/pkg/volume/gce_pd" + "k8s.io/kubernetes/pkg/volume/glusterfs" "k8s.io/kubernetes/pkg/volume/host_path" "k8s.io/kubernetes/pkg/volume/nfs" + "k8s.io/kubernetes/pkg/volume/rbd" "k8s.io/kubernetes/pkg/volume/vsphere_volume" "k8s.io/kubernetes/plugin/pkg/scheduler" @@ -262,6 +264,8 @@ func probeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration, na allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) allPlugins = append(allPlugins, flexvolume.ProbeVolumePlugins(config.FlexVolumePluginDir)...) allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) return allPlugins } diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index e35f624af357..1fb1fdd7b23e 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -3036,6 +3036,8 @@ items: verbs: - list - watch + - get + - apiVersion: v1 kind: ClusterRole metadata: @@ -3090,6 +3092,16 @@ items: - persistentvolumeclaims/status verbs: - update + - apiGroups: + - "" + attributeRestrictions: null + resources: + - services + - endpoints + verbs: + - create + - delete + - get - apiVersion: v1 kind: ClusterRole metadata: diff --git a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md index 27dfde7de469..fede998c38db 100644 --- a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md +++ b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/README.md @@ -55,9 +55,8 @@ parameters: * `zone`: GCE zone. If not specified, a random zone in the same region as controller-manager will be chosen. #### GLUSTERFS - ```yaml -apiVersion: extensions/v1beta1 +apiVersion: storage.k8s.io/v1beta1 kind: StorageClass metadata: name: slow @@ -65,16 +64,50 @@ provisioner: kubernetes.io/glusterfs parameters: endpoint: "glusterfs-cluster" resturl: "http://127.0.0.1:8081" - restauthenabled: "true" restuser: "admin" - restuserkey: "password" + secretNamespace: "default" + secretName: "heketi-secret" ``` -* `endpoint`: `glusterfs-cluster` is the endpoint/service name which includes GlusterFS trusted pool IP addresses and this parameter is mandatory. -* `resturl` : Gluster REST service url which provision gluster volumes on demand. The format should be `IPaddress:Port` and this is a mandatory parameter for GlusterFS dynamic provisioner. -* `restauthenabled` : Gluster REST service authentication boolean is required if the authentication is enabled on the REST server. If this value is 'true', 'restuser' and 'restuserkey' have to be filled. -* `restuser` : Gluster REST service user who has access to create volumes in the Gluster Trusted Pool. -* `restuserkey` : Gluster REST service user's password which will be used for authentication to the REST server. +* `endpoint`: `glusterfs-cluster` is the endpoint name which includes GlusterFS trusted pool IP addresses. This parameter is mandatory. We need to also create a service for this endpoint, so that the endpoint will be persisted. This service can be without a selector to tell Kubernetes we want to add its endpoints manually. Please note that, glusterfs plugin looks for the endpoint in the pod namespace, so it is mandatory that the endpoint and service have to be created in Pod's namespace for successful mount of gluster volumes in the pod. +* `resturl` : Gluster REST service/Heketi service url which provision gluster volumes on demand. The general format should be `IPaddress:Port` and this is a mandatory parameter for GlusterFS dynamic provisioner. If Heketi service is exposed as a routable service in openshift/kubernetes setup, this can have a format similar to +`http://heketi-storage-project.cloudapps.mystorage.com` where the fqdn is a resolvable heketi service url. +* `restauthenabled` : Gluster REST service authentication boolean that enables authentication to the REST server. If this value is 'true', `restuser` and `restuserkey` or `secretNamespace` + `secretName` have to be filled. This option is deprecated, authentication is enabled when any of `restuser`, `restuserkey`, `secretName` or `secretNamespace` is specified. +* `restuser` : Gluster REST service/Heketi user who has access to create volumes in the Gluster Trusted Pool. +* `restuserkey` : Gluster REST service/Heketi user's password which will be used for authentication to the REST server. This parameter is deprecated in favor of `secretNamespace` + `secretName`. +* `secretNamespace` + `secretName` : Identification of Secret instance that containes user password to use when talking to Gluster REST service. These parameters are optional, empty password will be used when both `secretNamespace` and `secretName` are omitted. + +When both `restuserkey` and `secretNamespace` + `secretName` is specified, the secret will be used. + +Example of a secret can be found in [glusterfs-provisioning-secret.yaml](glusterfs-provisioning-secret.yaml). + +Reference : ([How to configure Heketi](https://github.com/heketi/heketi/wiki/Setting-up-the-topology)) + +Create endpoints + +As in example [glusterfs-endpoints.json](../../volumes/glusterfs/glusterfs-endpoints.json) file, the "IP" field should be filled with the address of a node in the Glusterfs server cluster. It is fine to give any valid value (from 1 to 65535) to the "port" field. + +Create the endpoints, + +```sh +$ kubectl create -f examples/volumes/glusterfs/glusterfs-endpoints.json +``` + +You can verify that the endpoints are successfully created by running + +```sh +$ kubectl get endpoints +NAME ENDPOINTS +glusterfs-cluster 10.240.106.152:1,10.240.79.157:1 +``` + +We need also create a service for this endpoints, so that the endpoints will be persisted. It is possible to create `service` without a selector to tell Kubernetes we want to add its endpoints manually. For an example service file refer [glusterfs-service.json](../../volumes/glusterfs/glusterfs-service.json). + +Use this command to create the service: + +```sh +$ kubectl create -f examples/volumes/glusterfs/glusterfs-service.json +``` #### OpenStack Cinder diff --git a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/glusterfs-dp.yaml b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/glusterfs-dp.yaml index 9072066559cd..26d413de53d0 100644 --- a/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/glusterfs-dp.yaml +++ b/vendor/k8s.io/kubernetes/examples/experimental/persistent-volume-provisioning/glusterfs-dp.yaml @@ -4,7 +4,6 @@ metadata: name: slow provisioner: kubernetes.io/glusterfs parameters: - endpoint: "glusterfs-cluster" resturl: "http://127.0.0.1:8081" restauthenabled: "true" restuser: "admin" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go index 54ba1c0175cc..740b0c657933 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go @@ -1149,15 +1149,17 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { } if call.ret == nil { // Create a fake PV with known GCE volume (to match expected volume) + capacity := plugin.provisionOptions.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + accessModes := plugin.provisionOptions.PVC.Spec.AccessModes pv = &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: plugin.provisionOptions.PVName, }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): plugin.provisionOptions.Capacity, + api.ResourceName(api.ResourceStorage): capacity, }, - AccessModes: plugin.provisionOptions.AccessModes, + AccessModes: accessModes, PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy, PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go index 63c9d5a8e0f5..2d21fb6e6318 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go @@ -1256,15 +1256,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa tags[cloudVolumeCreatedForVolumeNameTag] = pvName options := vol.VolumeOptions{ - Capacity: claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)], - AccessModes: claim.Spec.AccessModes, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, CloudTags: &tags, ClusterName: ctrl.clusterName, PVName: pvName, - PVCName: claim.Name, + PVC: claim, Parameters: storageClass.Parameters, - Selector: claim.Spec.Selector, } // Provision the volume diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go index 93fe8326f573..ef1d38d80ba0 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_base.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -42,7 +42,7 @@ import ( // This file contains the controller base functionality, i.e. framework to // process PV/PVC added/updated/deleted events. The real binding, provisioning, -// recycling and deleting is done in controller.go +// recycling and deleting is done in pv_controller.go // NewPersistentVolumeController creates a new PersistentVolumeController func NewPersistentVolumeController( diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/controller_test.go rename to vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go index e60a7c867502..8cf704334be3 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go @@ -160,9 +160,6 @@ func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, } func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - if len(options.AccessModes) == 0 { - options.AccessModes = plugin.GetAccessModes() - } return plugin.newProvisionerInternal(options, &AWSDiskUtil{}) } @@ -429,7 +426,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, er }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, - AccessModes: c.options.AccessModes, + AccessModes: c.options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, @@ -444,6 +441,10 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, er }, } + if len(c.options.PVC.Spec.AccessModes) == 0 { + pv.Spec.AccessModes = c.plugin.GetAccessModes() + } + if len(labels) != 0 { if pv.Labels == nil { pv.Labels = make(map[string]string) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go index 212d8504d1da..4173eef97312 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_test.go @@ -23,7 +23,6 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/mount" @@ -180,12 +179,8 @@ func TestPlugin(t *testing.T) { } // Test Provisioner - cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ - Capacity: cap, - AccessModes: []api.PersistentVolumeAccessMode{ - api.ReadWriteOnce, - }, + PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{}) @@ -197,7 +192,7 @@ func TestPlugin(t *testing.T) { if persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID != "test-aws-volume-name" { t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID) } - cap = persistentSpec.Spec.Capacity[api.ResourceStorage] + cap := persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 100*1024*1024*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go index 4b19ece41024..8010809fd6e8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go @@ -23,6 +23,7 @@ import ( "time" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/volume" @@ -77,13 +78,14 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (strin } tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters - requestBytes := c.options.Capacity.Value() + capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + requestBytes := capacity.Value() // AWS works with gigabytes, convert to GiB with rounding up requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) volumeOptions := &aws.VolumeOptions{ CapacityGB: requestGB, Tags: tags, - PVCName: c.options.PVCName, + PVCName: c.options.PVC.Name, } // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. @@ -110,8 +112,8 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (strin } } - // TODO: implement c.options.ProvisionerSelector parsing - if c.options.Selector != nil { + // TODO: implement PVC.Selector parsing + if c.options.PVC.Spec.Selector != nil { return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS") } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go index 9e4419297068..3aef51fc63e1 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go @@ -161,9 +161,6 @@ func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdMana } func (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - if len(options.AccessModes) == 0 { - options.AccessModes = plugin.GetAccessModes() - } return plugin.newProvisionerInternal(options, &CinderDiskUtil{}) } @@ -474,7 +471,7 @@ func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) { }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, - AccessModes: c.options.AccessModes, + AccessModes: c.options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, @@ -487,6 +484,10 @@ func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) { }, }, } + if len(c.options.PVC.Spec.AccessModes) == 0 { + pv.Spec.AccessModes = c.plugin.GetAccessModes() + } + return pv, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go index 66217ac20594..dc06d54abd5c 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_test.go @@ -24,7 +24,6 @@ import ( "time" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utiltesting "k8s.io/kubernetes/pkg/util/testing" @@ -199,12 +198,8 @@ func TestPlugin(t *testing.T) { } // Test Provisioner - cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ - Capacity: cap, - AccessModes: []api.PersistentVolumeAccessMode{ - api.ReadWriteOnce, - }, + PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0}) @@ -216,7 +211,7 @@ func TestPlugin(t *testing.T) { if persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID != "test-volume-name" { t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID) } - cap = persistentSpec.Spec.Capacity[api.ResourceStorage] + cap := persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 1024*1024*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go index 69d83eba7eef..2333d40af270 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go @@ -24,6 +24,7 @@ import ( "time" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/volume" ) @@ -137,7 +138,8 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s return "", 0, err } - volSizeBytes := c.options.Capacity.Value() + capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + volSizeBytes := capacity.Value() // Cinder works with gigabytes, convert to GiB with rounding up volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters @@ -155,8 +157,8 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName()) } } - // TODO: implement c.options.ProvisionerSelector parsing - if c.options.Selector != nil { + // TODO: implement PVC.Selector parsing + if c.options.PVC.Spec.Selector != nil { return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder") } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go index cc0431e1c7e1..5c51d231bd90 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go @@ -166,9 +166,6 @@ func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, man } func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - if len(options.AccessModes) == 0 { - options.AccessModes = plugin.GetAccessModes() - } return plugin.newProvisionerInternal(options, &GCEDiskUtil{}) } @@ -393,7 +390,7 @@ func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, - AccessModes: c.options.AccessModes, + AccessModes: c.options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, @@ -406,6 +403,9 @@ func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error }, }, } + if len(c.options.PVC.Spec.AccessModes) == 0 { + pv.Spec.AccessModes = c.plugin.GetAccessModes() + } if len(labels) != 0 { if pv.Labels == nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go index e5ca63ff4f57..6acc5e630071 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_test.go @@ -23,7 +23,6 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/mount" @@ -174,12 +173,8 @@ func TestPlugin(t *testing.T) { } // Test Provisioner - cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ - Capacity: cap, - AccessModes: []api.PersistentVolumeAccessMode{ - api.ReadWriteOnce, - }, + PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{}) @@ -191,7 +186,7 @@ func TestPlugin(t *testing.T) { if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" { t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName) } - cap = persistentSpec.Spec.Capacity[api.ResourceStorage] + cap := persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 100*1024*1024*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go index fa7b85d25d17..163028a39596 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go @@ -24,6 +24,7 @@ import ( "time" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/cloudprovider" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/util/exec" @@ -75,7 +76,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin } name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters - requestBytes := c.options.Capacity.Value() + capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + requestBytes := capacity.Value() // GCE works with gigabytes, convert to GiB with rounding up requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024) @@ -94,8 +96,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin } } - // TODO: implement c.options.ProvisionerSelector parsing - if c.options.Selector != nil { + // TODO: implement PVC.Selector parsing + if c.options.PVC.Spec.Selector != nil { return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE") } @@ -107,7 +109,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin glog.V(2).Infof("error getting zone information from GCE: %v", err) return "", 0, nil, err } - zone = volume.ChooseZoneForVolume(zones, c.options.PVCName) + zone = volume.ChooseZoneForVolume(zones, c.options.PVC.Name) } err = cloud.CreateDisk(name, diskType, zone, int64(requestGB), *c.options.CloudTags) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index 124cb874dc6e..41bae67b1b12 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -18,31 +18,33 @@ package glusterfs import ( "fmt" + "os" + "path" + dstrings "strings" + "github.com/golang/glog" gcli "github.com/heketi/heketi/client/api/go-client" gapi "github.com/heketi/heketi/pkg/glusterfs/api" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "os" - "path" - "strconv" - dstrings "strings" + volutil "k8s.io/kubernetes/pkg/volume/util" ) // This is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { - return []volume.VolumePlugin{&glusterfsPlugin{nil, exec.New(), new(glusterfsClusterConf)}} + return []volume.VolumePlugin{&glusterfsPlugin{nil, exec.New()}} } type glusterfsPlugin struct { - host volume.VolumeHost - exe exec.Interface - clusterconf *glusterfsClusterConf + host volume.VolumeHost + exe exec.Interface } var _ volume.VolumePlugin = &glusterfsPlugin{} @@ -53,10 +55,17 @@ var _ volume.Provisioner = &glusterfsVolumeProvisioner{} var _ volume.Deleter = &glusterfsVolumeDeleter{} const ( - glusterfsPluginName = "kubernetes.io/glusterfs" - volprefix = "vol_" - replicacount = 3 - durabilitytype = "replicate" + glusterfsPluginName = "kubernetes.io/glusterfs" + volPrefix = "vol_" + dynamicEpSvcPrefix = "gluster-dynamic-" + replicaCount = 3 + durabilityType = "replicate" + secretKeyName = "key" // key name used in secret + annGlusterURL = "glusterfs.kubernetes.io/url" + annGlusterSecretName = "glusterfs.kubernetes.io/secretname" + annGlusterSecretNamespace = "glusterfs.kubernetes.io/secretnamespace" + annGlusterUserKey = "glusterfs.kubernetes.io/userkey" + annGlusterUser = "glusterfs.kubernetes.io/userid" ) func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error { @@ -104,6 +113,7 @@ func (plugin *glusterfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { source, _ := plugin.getGlusterVolumeSource(spec) ep_name := source.EndpointsName + // PVC/POD is in same ns. ns := pod.Namespace ep, err := plugin.host.GetKubeClient().Core().Endpoints(ns).Get(ep_name) if err != nil { @@ -283,30 +293,39 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { // adding log-level ERROR to remove noise // and more specific log path so each pod has - // it's own log based on PV + Pod + // its own log based on PV + Pod log := path.Join(p, b.pod.Name+"-glusterfs.log") options = append(options, "log-level=ERROR") options = append(options, "log-file="+log) - addr := make(map[string]struct{}) - for _, s := range b.hosts.Subsets { - for _, a := range s.Addresses { - addr[a.IP] = struct{}{} + var addrlist []string + if b.hosts == nil { + return fmt.Errorf("glusterfs: endpoint is nil") + } else { + addr := make(map[string]struct{}) + if b.hosts.Subsets != nil { + for _, s := range b.hosts.Subsets { + for _, a := range s.Addresses { + addr[a.IP] = struct{}{} + addrlist = append(addrlist, a.IP) + } + } + } - } - // Avoid mount storm, pick a host randomly. - // Iterate all hosts until mount succeeds. - for hostIP := range addr { - errs = b.mounter.Mount(hostIP+":"+b.path, dir, "glusterfs", options) - if errs == nil { - glog.Infof("glusterfs: successfully mounted %s", dir) - return nil + // Avoid mount storm, pick a host randomly. + // Iterate all hosts until mount succeeds. + for _, ip := range addrlist { + errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", options) + if errs == nil { + glog.Infof("glusterfs: successfully mounted %s", dir) + return nil + } } } // Failed mount scenario. - // Since gluster does not return eror text + // Since gluster does not return error text // it all goes in a log file, we will read the log file logerror := readGlusterLog(log, b.pod.Name) if logerror != nil { @@ -329,9 +348,6 @@ func getVolumeSource( } func (plugin *glusterfsPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - if len(options.AccessModes) == 0 { - options.AccessModes = plugin.GetAccessModes() - } return plugin.newProvisionerInternal(options) } @@ -346,18 +362,18 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio }, nil } -type glusterfsClusterConf struct { - glusterep string - glusterRestvolpath string - glusterRestUrl string - glusterRestAuth bool - glusterRestUser string - glusterRestUserKey string +type provisioningConfig struct { + url string + user string + userKey string + secretNamespace string + secretName string + secretValue string } type glusterfsVolumeProvisioner struct { *glusterfsMounter - *glusterfsClusterConf + provisioningConfig options volume.VolumeOptions } @@ -376,12 +392,15 @@ func (plugin *glusterfsPlugin) newDeleterInternal(spec *volume.Spec) (volume.Del plugin: plugin, }, path: spec.PersistentVolume.Spec.Glusterfs.Path, - }}, nil + }, + spec: spec.PersistentVolume, + }, nil } type glusterfsVolumeDeleter struct { *glusterfsMounter - *glusterfsClusterConf + provisioningConfig + spec *api.PersistentVolume } func (d *glusterfsVolumeDeleter) GetPath() string { @@ -391,65 +410,86 @@ func (d *glusterfsVolumeDeleter) GetPath() string { func (d *glusterfsVolumeDeleter) Delete() error { var err error - glog.V(2).Infof("glusterfs: delete volume :%s ", d.glusterfsMounter.path) - volumetodel := d.glusterfsMounter.path - d.glusterfsClusterConf = d.plugin.clusterconf - newvolumetodel := dstrings.TrimPrefix(volumetodel, volprefix) - cli := gcli.NewClient(d.glusterRestUrl, d.glusterRestUser, d.glusterRestUserKey) + glog.V(2).Infof("glusterfs: delete volume: %s ", d.glusterfsMounter.path) + volumeName := d.glusterfsMounter.path + volumeId := dstrings.TrimPrefix(volumeName, volPrefix) + class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec) + if err != nil { + return err + } + + cfg, err := parseClassParameters(class.Parameters, d.plugin.host.GetKubeClient()) + if err != nil { + return err + } + d.provisioningConfig = *cfg + + glog.V(4).Infof("glusterfs: deleting volume %q with configuration %+v", volumeId, d.provisioningConfig) + + cli := gcli.NewClient(d.url, d.user, d.secretValue) if cli == nil { glog.Errorf("glusterfs: failed to create gluster rest client") return fmt.Errorf("glusterfs: failed to create gluster rest client, REST server authentication failed") } - err = cli.VolumeDelete(newvolumetodel) + err = cli.VolumeDelete(volumeId) if err != nil { - glog.V(4).Infof("glusterfs: error when deleting the volume :%s", err) + glog.Errorf("glusterfs: error when deleting the volume :%v", err) return err } - glog.V(2).Infof("glusterfs: volume %s deleted successfully", volumetodel) - return nil + glog.V(2).Infof("glusterfs: volume %s deleted successfully", volumeName) + //Deleter takes endpoint and endpointnamespace from pv spec. + pvSpec := d.spec.Spec + var dynamicEndpoint, dynamicNamespace string + if pvSpec.ClaimRef == nil { + glog.Errorf("glusterfs: ClaimRef is nil") + return fmt.Errorf("glusterfs: ClaimRef is nil") + } + if pvSpec.ClaimRef.Namespace == "" { + glog.Errorf("glusterfs: namespace is nil") + return fmt.Errorf("glusterfs: namespace is nil") + } + dynamicNamespace = pvSpec.ClaimRef.Namespace + if pvSpec.Glusterfs.EndpointsName != "" { + dynamicEndpoint = pvSpec.Glusterfs.EndpointsName + } + glog.V(3).Infof("glusterfs: dynamic namespace and endpoint : [%v/%v]", dynamicNamespace, dynamicEndpoint) + err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint) + if err != nil { + glog.Errorf("glusterfs: error when deleting endpoint/service :%v", err) + } else { + glog.V(1).Infof("glusterfs: [%v/%v] deleted successfully ", dynamicNamespace, dynamicEndpoint) + } + return nil } func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error) { var err error - if r.options.Selector != nil { + if r.options.PVC.Spec.Selector != nil { glog.V(4).Infof("glusterfs: not able to parse your claim Selector") return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector") } glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", r.options) - for k, v := range r.options.Parameters { - switch dstrings.ToLower(k) { - case "endpoint": - r.plugin.clusterconf.glusterep = v - case "path": - r.plugin.clusterconf.glusterRestvolpath = v - case "resturl": - r.plugin.clusterconf.glusterRestUrl = v - case "restauthenabled": - r.plugin.clusterconf.glusterRestAuth, err = strconv.ParseBool(v) - case "restuser": - r.plugin.clusterconf.glusterRestUser = v - case "restuserkey": - r.plugin.clusterconf.glusterRestUserKey = v - default: - return nil, fmt.Errorf("glusterfs: invalid option %q for volume plugin %s", k, r.plugin.GetPluginName()) - } - } - glog.V(4).Infof("glusterfs: storage class parameters in plugin clusterconf %v", r.plugin.clusterconf) - if !r.plugin.clusterconf.glusterRestAuth { - r.plugin.clusterconf.glusterRestUser = "" - r.plugin.clusterconf.glusterRestUserKey = "" + + cfg, err := parseClassParameters(r.options.Parameters, r.plugin.host.GetKubeClient()) + if err != nil { + return nil, err } - r.glusterfsClusterConf = r.plugin.clusterconf + r.provisioningConfig = *cfg + + glog.V(4).Infof("glusterfs: creating volume with configuration %+v", r.provisioningConfig) glusterfs, sizeGB, err := r.CreateVolume() if err != nil { - glog.Errorf("glusterfs: create volume err: %s.", err) - return nil, fmt.Errorf("glusterfs: create volume err: %s.", err) + glog.Errorf("glusterfs: create volume err: %v.", err) + return nil, fmt.Errorf("glusterfs: create volume err: %v.", err) } pv := new(api.PersistentVolume) pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy - pv.Spec.AccessModes = r.options.AccessModes + pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes + if len(pv.Spec.AccessModes) == 0 { + pv.Spec.AccessModes = r.plugin.GetAccessModes() + } pv.Spec.Capacity = api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), } @@ -457,28 +497,200 @@ func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error) } func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSource, size int, err error) { - volSizeBytes := p.options.Capacity.Value() + capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + volSizeBytes := capacity.Value() sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) - glog.V(2).Infof("glusterfs: create volume of size:%d bytes", volSizeBytes) - if p.glusterfsClusterConf.glusterRestUrl == "" { + glog.V(2).Infof("glusterfs: create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisioningConfig) + if p.url == "" { glog.Errorf("glusterfs : rest server endpoint is empty") return nil, 0, fmt.Errorf("failed to create gluster REST client, REST URL is empty") } - cli := gcli.NewClient(p.glusterRestUrl, p.glusterRestUser, p.glusterRestUserKey) + cli := gcli.NewClient(p.url, p.user, p.secretValue) if cli == nil { glog.Errorf("glusterfs: failed to create gluster rest client") return nil, 0, fmt.Errorf("failed to create gluster REST client, REST server authentication failed") } - volumeReq := &gapi.VolumeCreateRequest{Size: sz, Durability: gapi.VolumeDurabilityInfo{Type: durabilitytype, Replicate: gapi.ReplicaDurability{Replica: replicacount}}} + volumeReq := &gapi.VolumeCreateRequest{Size: sz, Durability: gapi.VolumeDurabilityInfo{Type: durabilityType, Replicate: gapi.ReplicaDurability{Replica: replicaCount}}} volume, err := cli.VolumeCreate(volumeReq) if err != nil { - glog.Errorf("glusterfs: error creating volume %s ", err) + glog.Errorf("glusterfs: error creating volume %v ", err) return nil, 0, fmt.Errorf("error creating volume %v", err) } - glog.V(1).Infof("glusterfs: volume with size :%d and name:%s created", volume.Size, volume.Name) + glog.V(1).Infof("glusterfs: volume with size: %d and name: %s created", volume.Size, volume.Name) + clusterinfo, err := cli.ClusterInfo(volume.Cluster) + if err != nil { + glog.Errorf("glusterfs: failed to get cluster details") + return nil, 0, fmt.Errorf("failed to get cluster details %v", err) + } + // For the above dynamically provisioned volume, we gather the list of node IPs + // of the cluster on which provisioned volume belongs to, as there can be multiple + // clusters. + var dynamicHostIps []string + for _, node := range clusterinfo.Nodes { + nodei, err := cli.NodeInfo(string(node)) + if err != nil { + glog.Errorf("glusterfs: failed to get hostip %v ", err) + return nil, 0, fmt.Errorf("failed to get hostip %v", err) + } + ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "") + dynamicHostIps = append(dynamicHostIps, ipaddr) + } + glog.V(3).Infof("glusterfs: hostlist :%v", dynamicHostIps) + if len(dynamicHostIps) == 0 { + glog.Errorf("glusterfs: no hosts found %v ", err) + return nil, 0, fmt.Errorf("no hosts found %v", err) + } + + // The 'endpointname' is created in form of 'gluster-dynamic-'. + // createEndpointService() checks for this 'endpoint' existence in PVC's namespace and + // If not found, it create an endpoint and svc using the IPs we dynamically picked at time + // of volume creation. + epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name + epNamespace := p.options.PVC.Namespace + endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name) + if err != nil { + glog.Errorf("glusterfs: failed to create endpoint/service") + return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err) + } + glog.V(3).Infof("glusterfs: dynamic ep %v and svc : %v ", endpoint, service) return &api.GlusterfsVolumeSource{ - EndpointsName: p.glusterfsClusterConf.glusterep, + EndpointsName: endpoint.Name, Path: volume.Name, ReadOnly: false, }, sz, nil } + +func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *api.Endpoints, service *api.Service, err error) { + + addrlist := make([]api.EndpointAddress, len(hostips)) + for i, v := range hostips { + addrlist[i].IP = v + } + endpoint = &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Namespace: namespace, + Name: epServiceName, + Labels: map[string]string{ + "gluster.kubernetes.io/provisioned-for-pvc": pvcname, + }, + }, + Subsets: []api.EndpointSubset{{ + Addresses: addrlist, + Ports: []api.EndpointPort{{Port: 1, Protocol: "TCP"}}, + }}, + } + _, err = p.plugin.host.GetKubeClient().Core().Endpoints(namespace).Create(endpoint) + if err != nil && errors.IsAlreadyExists(err) { + glog.V(1).Infof("glusterfs: endpoint [%s] already exist in namespace [%s]", endpoint, namespace) + err = nil + } + if err != nil { + glog.Errorf("glusterfs: failed to create endpoint %v", err) + return nil, nil, fmt.Errorf("error creating endpoint %v", err) + } + service = &api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: epServiceName, + Namespace: namespace, + Labels: map[string]string{ + "gluster.kubernetes.io/provisioned-for-pvc": pvcname, + }, + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Protocol: "TCP", Port: 1}}}} + _, err = p.plugin.host.GetKubeClient().Core().Services(namespace).Create(service) + if err != nil && errors.IsAlreadyExists(err) { + glog.V(1).Infof("glusterfs: service [%s] already exist in namespace [%s]", service, namespace) + err = nil + } + if err != nil { + glog.Errorf("glusterfs: failed to create service %v", err) + return nil, nil, fmt.Errorf("error creating service %v", err) + } + return endpoint, service, nil +} + +func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServiceName string) (err error) { + err = d.plugin.host.GetKubeClient().Core().Services(namespace).Delete(epServiceName, nil) + if err != nil { + glog.Errorf("glusterfs: failed to delete service %s in namespace %s error %v", epServiceName, namespace, err) + return fmt.Errorf("error deleting service %v in namespace [%s]", err, namespace) + } + glog.V(1).Infof("glusterfs: service/endpoint [%s] in namespace [%s] deleted successfully", epServiceName, namespace) + return nil +} + +// parseSecret finds a given Secret instance and reads user password from it. +func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { + secretMap, err := volutil.GetSecret(namespace, secretName, kubeClient) + if err != nil { + glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + return "", fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + } + if len(secretMap) == 0 { + return "", fmt.Errorf("empty secret map") + } + secret := "" + for k, v := range secretMap { + if k == secretKeyName { + return v, nil + } + secret = v + } + // If not found, the last secret in the map wins as done before + return secret, nil +} + +// parseClassParameters parses StorageClass.Parameters +func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisioningConfig, error) { + var cfg provisioningConfig + var err error + + authEnabled := true + for k, v := range params { + switch dstrings.ToLower(k) { + case "resturl": + cfg.url = v + case "restuser": + cfg.user = v + case "restuserkey": + cfg.userKey = v + case "secretname": + cfg.secretName = v + case "secretnamespace": + cfg.secretNamespace = v + case "restauthenabled": + authEnabled = dstrings.ToLower(v) == "true" + default: + return nil, fmt.Errorf("glusterfs: invalid option %q for volume plugin %s", k, glusterfsPluginName) + } + } + + if len(cfg.url) == 0 { + return nil, fmt.Errorf("StorageClass for provisioner %s must contain 'resturl' parameter", glusterfsPluginName) + } + + if !authEnabled { + cfg.user = "" + cfg.secretName = "" + cfg.secretNamespace = "" + cfg.userKey = "" + cfg.secretValue = "" + } + + if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 { + // secretName + Namespace has precedence over userKey + if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 { + cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("StorageClass for provisioner %q must have secretNamespace and secretName either both set or both empty", glusterfsPluginName) + } + } else { + cfg.secretValue = cfg.userKey + } + return &cfg, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go index 992e827f3d00..c720799f4334 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_test.go @@ -19,10 +19,13 @@ package glusterfs import ( "fmt" "os" + "reflect" "testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/mount" @@ -236,3 +239,134 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) { t.Errorf("Expected true for mounter.IsReadOnly") } } + +func TestParseClassParameters(t *testing.T) { + secret := api.Secret{ + Data: map[string][]byte{ + "data": []byte("mypassword"), + }, + } + + tests := []struct { + name string + parameters map[string]string + secret *api.Secret + expectError bool + expectConfig *provisioningConfig + }{ + { + "password", + map[string]string{ + "resturl": "https://localhost:8080", + "restuser": "admin", + "restuserkey": "password", + }, + nil, // secret + false, // expect error + &provisioningConfig{ + url: "https://localhost:8080", + user: "admin", + userKey: "password", + secretValue: "password", + }, + }, + { + "secret", + map[string]string{ + "resturl": "https://localhost:8080", + "restuser": "admin", + "secretname": "mysecret", + "secretnamespace": "default", + }, + &secret, + false, // expect error + &provisioningConfig{ + url: "https://localhost:8080", + user: "admin", + secretName: "mysecret", + secretNamespace: "default", + secretValue: "mypassword", + }, + }, + { + "no authentication", + map[string]string{ + "resturl": "https://localhost:8080", + "restauthenabled": "false", + }, + &secret, + false, // expect error + &provisioningConfig{ + url: "https://localhost:8080", + }, + }, + { + "missing secret", + map[string]string{ + "resturl": "https://localhost:8080", + "secretname": "mysecret", + "secretnamespace": "default", + }, + nil, // secret + true, // expect error + nil, + }, + { + "secret with no namespace", + map[string]string{ + "resturl": "https://localhost:8080", + "secretname": "mysecret", + }, + &secret, + true, // expect error + nil, + }, + { + "missing url", + map[string]string{ + "restuser": "admin", + "restuserkey": "password", + }, + nil, // secret + true, // expect error + nil, + }, + { + "unknown parameter", + map[string]string{ + "unknown": "yes", + "resturl": "https://localhost:8080", + "restuser": "admin", + "restuserkey": "password", + }, + nil, // secret + true, // expect error + nil, + }, + } + + for _, test := range tests { + + client := &fake.Clientset{} + client.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if test.secret != nil { + return true, test.secret, nil + } + return true, nil, fmt.Errorf("Test %s did not set a secret", test.name) + }) + + cfg, err := parseClassParameters(test.parameters, client) + + if err != nil && !test.expectError { + t.Errorf("Test %s got unexpected error %v", test.name, err) + } + if err == nil && test.expectError { + t.Errorf("test %s expected error and got none", test.name) + } + if test.expectConfig != nil { + if !reflect.DeepEqual(cfg, test.expectConfig) { + t.Errorf("Test %s returned unexpected data, expected: %+v, got: %+v", test.name, test.expectConfig, cfg) + } + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go index 40a109a08a66..ed62b035c614 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go @@ -48,7 +48,7 @@ type hostPathPlugin struct { // decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing. newRecyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error) newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) - newProvisionerFunc func(options volume.VolumeOptions, host volume.VolumeHost) (volume.Provisioner, error) + newProvisionerFunc func(options volume.VolumeOptions, host volume.VolumeHost, plugin *hostPathPlugin) (volume.Provisioner, error) config volume.VolumeConfig } @@ -124,10 +124,7 @@ func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volu if !plugin.config.ProvisioningEnabled { return nil, fmt.Errorf("Provisioning in volume plugin %q is disabled", plugin.GetPluginName()) } - if len(options.AccessModes) == 0 { - options.AccessModes = plugin.GetAccessModes() - } - return plugin.newProvisionerFunc(options, plugin.host) + return plugin.newProvisionerFunc(options, plugin.host, plugin) } func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { @@ -165,8 +162,8 @@ func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, erro return &hostPathDeleter{name: spec.Name(), path: path, host: host}, nil } -func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost) (volume.Provisioner, error) { - return &hostPathProvisioner{options: options, host: host}, nil +func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost, plugin *hostPathPlugin) (volume.Provisioner, error) { + return &hostPathProvisioner{options: options, host: host, plugin: plugin}, nil } // HostPath volumes represent a bare host file or directory mount. @@ -261,6 +258,7 @@ func (r *hostPathRecycler) Recycle() error { type hostPathProvisioner struct { host volume.VolumeHost options volume.VolumeOptions + plugin *hostPathPlugin } // Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume. @@ -268,6 +266,7 @@ type hostPathProvisioner struct { func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) { fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID()) + capacity := r.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: r.options.PVName, @@ -277,9 +276,9 @@ func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) { }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy, - AccessModes: r.options.AccessModes, + AccessModes: r.options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): r.options.Capacity, + api.ResourceName(api.ResourceStorage): capacity, }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{ @@ -288,6 +287,9 @@ func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) { }, }, } + if len(r.options.PVC.Spec.AccessModes) == 0 { + pv.Spec.AccessModes = r.plugin.GetAccessModes() + } return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go index acf07bc750d7..02973e2a119d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_test.go @@ -161,7 +161,11 @@ func TestProvisioner(t *testing.T) { if err != nil { t.Errorf("Can't find the plugin by name") } - creater, err := plug.NewProvisioner(volume.VolumeOptions{Capacity: resource.MustParse("1Gi"), PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete}) + options := volume.VolumeOptions{ + PVC: volumetest.CreateTestPVC("1Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), + PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, + } + creater, err := plug.NewProvisioner(options) if err != nil { t.Errorf("Failed to make a new Provisioner: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 8dd1b6a2e231..bd2fd6d5bb6e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -24,8 +24,6 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/types" @@ -41,25 +39,22 @@ type VolumeOptions struct { // TODO: refactor all of this out of volumes when an admin can configure // many kinds of provisioners. - // Capacity is the size of a volume. - Capacity resource.Quantity - // AccessModes of a volume - AccessModes []api.PersistentVolumeAccessMode // Reclamation policy for a persistent volume PersistentVolumeReclaimPolicy api.PersistentVolumeReclaimPolicy // PV.Name of the appropriate PersistentVolume. Used to generate cloud // volume name. PVName string - // PVC.Name of the PersistentVolumeClaim; only set during dynamic provisioning. - PVCName string + // PVC is reference to the claim that lead to provisioning of a new PV. + // Provisioners *must* create a PV that would be matched by this PVC, + // i.e. with required capacity, accessMode, labels matching PVC.Selector and + // so on. + PVC *api.PersistentVolumeClaim // Unique name of Kubernetes cluster. ClusterName string // Tags to attach to the real volume in the cloud provider - e.g. AWS EBS CloudTags *map[string]string // Volume provisioning parameters from StorageClass Parameters map[string]string - // Volume selector from PersistentVolumeClaim - Selector *unversioned.LabelSelector } // VolumePlugin is an interface to volume plugins that can be used on a diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go index 15d1e8ef073a..065049bcffb6 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go @@ -26,6 +26,7 @@ import ( "os" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" ) @@ -37,6 +38,10 @@ type diskManager interface { AttachDisk(disk rbdMounter) error // Detaches the disk from the kubelet's host machine. DetachDisk(disk rbdUnmounter, mntPath string) error + // Creates a rbd image + CreateImage(provisioner *rbdVolumeProvisioner) (r *api.RBDVolumeSource, volumeSizeGB int, err error) + // Deletes a rbd image + DeleteImage(deleter *rbdVolumeDeleter) error } // utility to mount a disk based filesystem diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go index bfc852b17ef7..1c603988db82 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go @@ -18,14 +18,19 @@ package rbd import ( "fmt" + dstrings "strings" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/volume" + volutil "k8s.io/kubernetes/pkg/volume/util" ) // This is the primary entrypoint for volume plugins. @@ -40,9 +45,12 @@ type rbdPlugin struct { var _ volume.VolumePlugin = &rbdPlugin{} var _ volume.PersistentVolumePlugin = &rbdPlugin{} +var _ volume.DeletableVolumePlugin = &rbdPlugin{} +var _ volume.ProvisionableVolumePlugin = &rbdPlugin{} const ( rbdPluginName = "kubernetes.io/rbd" + secretKeyName = "key" // key name used in secret ) func (plugin *rbdPlugin) Init(host volume.VolumeHost) error { @@ -86,26 +94,17 @@ func (plugin *rbdPlugin) GetAccessModes() []api.PersistentVolumeAccessMode { } func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - secret := "" + var secret string + var err error source, _ := plugin.getRBDVolumeSource(spec) if source.SecretRef != nil { - kubeClient := plugin.host.GetKubeClient() - if kubeClient == nil { - return nil, fmt.Errorf("Cannot get kube client") - } - - secretName, err := kubeClient.Core().Secrets(pod.Namespace).Get(source.SecretRef.Name) - if err != nil { - glog.Errorf("Couldn't get secret %v/%v", pod.Namespace, source.SecretRef) + if secret, err = parseSecret(pod.Namespace, source.SecretRef.Name, plugin.host.GetKubeClient()); err != nil { + glog.Errorf("Couldn't get secret from %v/%v", pod.Namespace, source.SecretRef) return nil, err } - for name, data := range secretName.Data { - secret = string(data) - glog.V(1).Infof("ceph secret info: %s/%s", name, secret) - } - } + // Inject real implementations here, test through the internal function. return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(), secret) } @@ -177,6 +176,170 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol return volume.NewSpecFromVolume(rbdVolume), nil } +func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { + return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil") + } + class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume) + if err != nil { + return nil, err + } + adminSecretName := "" + adminSecretNamespace := "default" + admin := "" + + for k, v := range class.Parameters { + switch dstrings.ToLower(k) { + case "adminid": + admin = v + case "adminsecretname": + adminSecretName = v + case "adminsecretnamespace": + adminSecretNamespace = v + } + } + + secret, err := parseSecret(adminSecretNamespace, adminSecretName, plugin.host.GetKubeClient()) + if err != nil { + // log error but don't return yet + glog.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err) + } + return plugin.newDeleterInternal(spec, admin, secret, &RBDUtil{}) +} + +func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret string, manager diskManager) (volume.Deleter, error) { + return &rbdVolumeDeleter{ + rbdMounter: &rbdMounter{ + rbd: &rbd{ + volName: spec.Name(), + Image: spec.PersistentVolume.Spec.RBD.RBDImage, + Pool: spec.PersistentVolume.Spec.RBD.RBDPool, + manager: manager, + plugin: plugin, + }, + Mon: spec.PersistentVolume.Spec.RBD.CephMonitors, + adminId: admin, + adminSecret: secret, + }}, nil +} + +func (plugin *rbdPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + return plugin.newProvisionerInternal(options, &RBDUtil{}) +} + +func (plugin *rbdPlugin) newProvisionerInternal(options volume.VolumeOptions, manager diskManager) (volume.Provisioner, error) { + return &rbdVolumeProvisioner{ + rbdMounter: &rbdMounter{ + rbd: &rbd{ + manager: manager, + plugin: plugin, + }, + }, + options: options, + }, nil +} + +type rbdVolumeProvisioner struct { + *rbdMounter + options volume.VolumeOptions +} + +func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) { + if r.options.PVC.Spec.Selector != nil { + return nil, fmt.Errorf("claim Selector is not supported") + } + var err error + adminSecretName := "" + adminSecretNamespace := "default" + secretName := "" + secret := "" + + for k, v := range r.options.Parameters { + switch dstrings.ToLower(k) { + case "monitors": + arr := dstrings.Split(v, ",") + for _, m := range arr { + r.Mon = append(r.Mon, m) + } + case "adminid": + r.adminId = v + case "adminsecretname": + adminSecretName = v + case "adminsecretnamespace": + adminSecretNamespace = v + case "userid": + r.Id = v + case "pool": + r.Pool = v + case "usersecretname": + secretName = v + default: + return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, r.plugin.GetPluginName()) + } + } + // sanity check + if adminSecretName == "" { + return nil, fmt.Errorf("missing Ceph admin secret name") + } + if secret, err = parseSecret(adminSecretNamespace, adminSecretName, r.plugin.host.GetKubeClient()); err != nil { + // log error but don't return yet + glog.Errorf("failed to get admin secret from [%q/%q]", adminSecretNamespace, adminSecretName) + } + r.adminSecret = secret + if len(r.Mon) < 1 { + return nil, fmt.Errorf("missing Ceph monitors") + } + if secretName == "" { + return nil, fmt.Errorf("missing user secret name") + } + if r.adminId == "" { + r.adminId = "admin" + } + if r.Pool == "" { + r.Pool = "rbd" + } + if r.Id == "" { + r.Id = r.adminId + } + + // create random image name + image := fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID()) + r.rbdMounter.Image = image + rbd, sizeMB, err := r.manager.CreateImage(r) + if err != nil { + glog.Errorf("rbd: create volume failed, err: %v", err) + return nil, fmt.Errorf("rbd: create volume failed, err: %v", err) + } + glog.Infof("successfully created rbd image %q", image) + pv := new(api.PersistentVolume) + rbd.SecretRef = new(api.LocalObjectReference) + rbd.SecretRef.Name = secretName + rbd.RadosUser = r.Id + pv.Spec.PersistentVolumeSource.RBD = rbd + pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy + pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes + if len(pv.Spec.AccessModes) == 0 { + pv.Spec.AccessModes = r.plugin.GetAccessModes() + } + pv.Spec.Capacity = api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)), + } + return pv, nil +} + +type rbdVolumeDeleter struct { + *rbdMounter +} + +func (r *rbdVolumeDeleter) GetPath() string { + name := rbdPluginName + return r.plugin.host.GetPodVolumeDir(r.podUID, strings.EscapeQualifiedNameForDisk(name), r.volName) +} + +func (r *rbdVolumeDeleter) Delete() error { + return r.manager.DeleteImage(r) +} + type rbd struct { volName string podUID types.UID @@ -199,11 +362,13 @@ func (rbd *rbd) GetPath() string { type rbdMounter struct { *rbd // capitalized so they can be exported in persistRBD() - Mon []string - Id string - Keyring string - Secret string - fsType string + Mon []string + Id string + Keyring string + Secret string + fsType string + adminSecret string + adminId string } var _ volume.Mounter = &rbdMounter{} @@ -262,3 +427,24 @@ func getVolumeSource( return nil, false, fmt.Errorf("Spec does not reference a RBD volume type") } + +// parseSecretMap locates the secret by key name. +func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { + secretMap, err := volutil.GetSecret(namespace, secretName, kubeClient) + if err != nil { + glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + return "", fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + } + if len(secretMap) == 0 { + return "", fmt.Errorf("empty secret map") + } + secret := "" + for k, v := range secretMap { + if k == secretKeyName { + return v, nil + } + secret = v + } + // If not found, the last secret in the map wins as done before + return secret, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go index a1bbfea4a3f7..b7f28b545a59 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_test.go @@ -87,6 +87,14 @@ func (fake *fakeDiskManager) DetachDisk(c rbdUnmounter, mntPath string) error { return nil } +func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *api.RBDVolumeSource, volumeSizeGB int, err error) { + return nil, 0, fmt.Errorf("not implemented") +} + +func (fake *fakeDiskManager) DeleteImage(deleter *rbdVolumeDeleter) error { + return fmt.Errorf("not implemented") +} + func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("rbd_test") if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go index 1cfc42b5d719..5eca4c3621cf 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go @@ -34,12 +34,17 @@ import ( "time" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" ) +const ( + imageWatcherStr = "watcher=" +) + // search /sys/bus for rbd device that matches given pool and image func getDevFromImageAndPool(pool, image string) (string, bool) { // /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool @@ -307,3 +312,103 @@ func (util *RBDUtil) DetachDisk(c rbdUnmounter, mntPath string) error { } return nil } + +func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSource, size int, err error) { + capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + volSizeBytes := capacity.Value() + // convert to MB that rbd defaults on + sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024)) + volSz := fmt.Sprintf("%d", sz) + // rbd create + l := len(p.rbdMounter.Mon) + // pick a mon randomly + start := rand.Int() % l + // iterate all monitors until create succeeds. + for i := start; i < start+l; i++ { + mon := p.Mon[i%l] + glog.V(4).Infof("rbd: create %s size %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + var output []byte + output, err = p.rbdMounter.plugin.execCommand("rbd", + []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", "1"}) + if err == nil { + break + } else { + glog.Warningf("failed to create rbd image, output %v", string(output)) + } + } + + if err != nil { + glog.Errorf("rbd: Error creating rbd image: %v", err) + return nil, 0, err + } + + return &api.RBDVolumeSource{ + CephMonitors: p.rbdMounter.Mon, + RBDImage: p.rbdMounter.Image, + RBDPool: p.rbdMounter.Pool, + }, sz, nil +} + +func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { + var output []byte + found, err := util.rbdStatus(p.rbdMounter) + if err != nil { + return err + } + if found { + glog.Info("rbd is still being used ", p.rbdMounter.Image) + return fmt.Errorf("rbd %s is still being used", p.rbdMounter.Image) + } + // rbd rm + l := len(p.rbdMounter.Mon) + // pick a mon randomly + start := rand.Int() % l + // iterate all monitors until rm succeeds. + for i := start; i < start+l; i++ { + mon := p.rbdMounter.Mon[i%l] + glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + output, err = p.plugin.execCommand("rbd", + []string{"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret}) + if err == nil { + return nil + } else { + glog.Errorf("failed to delete rbd image, error %v output %v", err, string(output)) + } + } + return err +} + +// run rbd status command to check if there is watcher on the image +func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, error) { + var err error + var output string + var cmd []byte + + l := len(b.Mon) + start := rand.Int() % l + // iterate all hosts until mount succeeds. + for i := start; i < start+l; i++ { + mon := b.Mon[i%l] + // cmd "rbd status" list the rbd client watch with the following output: + // Watchers: + // watcher=10.16.153.105:0/710245699 client.14163 cookie=1 + glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, b.adminId, b.adminSecret) + cmd, err = b.plugin.execCommand("rbd", + []string{"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", b.adminId, "--key=" + b.adminSecret}) + output = string(cmd) + + if err != nil { + // ignore error code, just checkout output for watcher string + glog.Warningf("failed to execute rbd status on mon %s", mon) + } + + if strings.Contains(output, imageWatcherStr) { + glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) + return true, nil + } else { + glog.Warningf("rbd: no watchers on %s", b.Image) + return false, nil + } + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go b/vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go index 375c1a0bee3a..9f28b7972c13 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/testing/testing.go @@ -497,9 +497,9 @@ func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) { }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy, - AccessModes: fc.Options.AccessModes, + AccessModes: fc.Options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ - api.ResourceName(api.ResourceStorage): fc.Options.Capacity, + api.ResourceName(api.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)], }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{ @@ -746,3 +746,22 @@ func GetTestVolumePluginMgr( return &v.pluginMgr, plugins[0].(*FakeVolumePlugin) } + +// CreateTestPVC returns a provisionable PVC for tests +func CreateTestPVC(capacity string, accessModes []api.PersistentVolumeAccessMode) *api.PersistentVolumeClaim { + claim := api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "dummy", + Namespace: "default", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: accessModes, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), + }, + }, + }, + } + return &claim +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 9859506a14a0..95aa4aa6edfe 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -22,6 +22,9 @@ import ( "path" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/storage" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/util/mount" ) @@ -108,3 +111,64 @@ func PathExists(path string) (bool, error) { return false, err } } + +// GetSecret locates secret by name and namespace and returns secret map +func GetSecret(namespace, secretName string, kubeClient clientset.Interface) (map[string]string, error) { + secret := make(map[string]string) + if kubeClient == nil { + return secret, fmt.Errorf("Cannot get kube client") + } + + secrets, err := kubeClient.Core().Secrets(namespace).Get(secretName) + if err != nil { + return secret, err + } + for name, data := range secrets.Data { + secret[name] = string(data) + } + return secret, nil +} + +// AddVolumeAnnotations adds a golang Map as annotation to a PersistentVolume +func AddVolumeAnnotations(pv *api.PersistentVolume, annotations map[string]string) { + if pv.Annotations == nil { + pv.Annotations = map[string]string{} + } + + for k, v := range annotations { + pv.Annotations[k] = v + } +} + +// ParseVolumeAnnotations reads the defined annoations from a PersistentVolume +func ParseVolumeAnnotations(pv *api.PersistentVolume, parseAnnotations []string) (map[string]string, error) { + result := map[string]string{} + + if pv.Annotations == nil { + return result, fmt.Errorf("cannot parse volume annotations: no annotations found") + } + + for _, annotation := range parseAnnotations { + if val, ok := pv.Annotations[annotation]; ok { + result[annotation] = val + } else { + return result, fmt.Errorf("cannot parse volume annotations: annotation %s not found", annotation) + } + } + + return result, nil +} + +func GetClassForVolume(kubeClient clientset.Interface, pv *api.PersistentVolume) (*storage.StorageClass, error) { + // TODO: replace with a real attribute after beta + className, found := pv.Annotations["volume.beta.kubernetes.io/storage-class"] + if !found { + return nil, fmt.Errorf("Volume has no class annotation") + } + + class, err := kubeClient.Storage().StorageClasses().Get(className) + if err != nil { + return nil, err + } + return class, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index e800eae03990..13c49c088217 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -321,9 +321,6 @@ type vsphereVolumeProvisioner struct { var _ volume.Provisioner = &vsphereVolumeProvisioner{} func (plugin *vsphereVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - if len(options.AccessModes) == 0 { - options.AccessModes = plugin.GetAccessModes() - } return plugin.newProvisionerInternal(options, &VsphereDiskUtil{}) } @@ -353,7 +350,7 @@ func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) { }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy, - AccessModes: v.options.AccessModes, + AccessModes: v.options.PVC.Spec.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)), }, @@ -365,6 +362,10 @@ func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) { }, }, } + if len(v.options.PVC.Spec.AccessModes) == 0 { + pv.Spec.AccessModes = v.plugin.GetAccessModes() + } + return pv, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go index aa7e888ae148..b51803685168 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_test.go @@ -23,7 +23,6 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utiltesting "k8s.io/kubernetes/pkg/util/testing" @@ -142,12 +141,8 @@ func TestPlugin(t *testing.T) { } // Test Provisioner - cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ - Capacity: cap, - AccessModes: []api.PersistentVolumeAccessMode{ - api.ReadWriteOnce, - }, + PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{}) @@ -160,7 +155,7 @@ func TestPlugin(t *testing.T) { t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath) } - cap = persistentSpec.Spec.Capacity[api.ResourceStorage] + cap := persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 100*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go index 6f4947e0991a..bf3ef1481001 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -23,6 +23,7 @@ import ( "time" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/volume" @@ -57,7 +58,8 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa return "", 0, err } - volSizeBytes := v.options.Capacity.Value() + capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + volSizeBytes := capacity.Value() // vSphere works with kilobytes, convert to KiB with rounding up volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024)) name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)