diff --git a/.gitmodules b/.gitmodules index a710084cf7..5da914a220 100644 --- a/.gitmodules +++ b/.gitmodules @@ -66,10 +66,6 @@ path = src/code.cloudfoundry.org/cfdot url = https://github.com/cloudfoundry/cfdot branch = main -[submodule "src/code.cloudfoundry.org/volman"] - path = src/code.cloudfoundry.org/volman - url = https://github.com/cloudfoundry/volman - branch = main [submodule "src/code.cloudfoundry.org/localdriver"] path = src/code.cloudfoundry.org/localdriver url = https://github.com/cloudfoundry/localdriver diff --git a/src/code.cloudfoundry.org/volman b/src/code.cloudfoundry.org/volman deleted file mode 160000 index 3458c241e8..0000000000 --- a/src/code.cloudfoundry.org/volman +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3458c241e8fe92a374aaff09ae3b0d696f75721c diff --git a/src/code.cloudfoundry.org/volman/.gitignore b/src/code.cloudfoundry.org/volman/.gitignore new file mode 100755 index 0000000000..ab5bf37ffd --- /dev/null +++ b/src/code.cloudfoundry.org/volman/.gitignore @@ -0,0 +1,4 @@ +pkg +bin +*coverprofile +.DS_Store diff --git a/src/code.cloudfoundry.org/volman/TROUBLESHOOTING.md b/src/code.cloudfoundry.org/volman/TROUBLESHOOTING.md new file mode 100644 index 0000000000..301a33614b --- /dev/null +++ b/src/code.cloudfoundry.org/volman/TROUBLESHOOTING.md @@ -0,0 +1,63 @@ +# Troubleshooting Problems with Cloud Foundry Volume Services + +## When the application does not start + +- If you have pushed an app with `--no-start` and then bound the app to a service, the cf cli will tell you to `cf restage` to start the app with the new binding. This is incorrect. You must use `cf start` on an app that is not running. It the app is already running, then `cf restage` is OK. +- If your application still won't start, try unbinding it from the volume service and see if it starts when it is *not* bound. (Most of our test applications like [pora](https://github.com/cloudfoundry-incubator/persi-acceptance-tests/tree/master/assets/pora) and [kitty](https://github.com/EMC-Dojo/Kitty) will start up even when no volume is available.) + If the application starts up, then that indicates the problem is volume services related. If you still see the same error regardless, then that indicates that the problem is elsewhere, and you should go through the [Troubleshooting Application Deployment and Health](https://docs.cloudfoundry.org/devguide/deploy-apps/troubleshoot-app-health.html) steps. +- Simple errors may turn up in the application logs. Try `cf logs `. Even if there are no errors in evidence, make a note of the application guid in this log--it could be useful later. +- If you're specifying a `mount` in your bind configuration that overlaps with the application directory, you may encounter errors or odd behaviors at application startup because the mounts are established before the application droplet is unpacked into the container. As a result, diego will attempt to unpack the app droplet into your mounted directory, and will fail if it cannot. If you require the mounted directory to be available within your application directory tree, the preferred approach is to mount it elsewhere, and then create a symlink to it in your application's start script. +- More detailed logging is available by restarting your app with `CF_TRACE`. To do this, type + ```bash + CF_TRACE=true cf restart + ``` +- If you see mount errors in the cf application logs when using the NFS volume service, it is possible that your NFS share is not opened to the Diego cells, or that the network access between the cell and the NFS server is not open. To test this, you will need to SSH onto the cell. See the steps below about failing broker/driver deployment for some information about how to bosh ssh into the cell. Once you are ssh'd into the cell, type the following command to test NFS access: + ```bash + mkdir foo + sudo mount -t nfs -o vers=3 : + sudo umount foo + sudo mount -t nfs -o vers=4 : + sudo umount foo + ``` + If the network is open, one or both of these commands should successfully mount. If neither works, check to make sure that your share is opened to the Diego cell IPs. If only one of the mount commands succeeds, that suggests that the share is only open to NFS3 or NFS4 connections, and you will need to specify that version in your service configuration. + You can employ a similar set of steps to dignose failures with SMB mounts, which may also need to be opened to Diego cell IPs. +- If you don't find any mount error, and your mount takes longer than 10s. It is possible that volman has canceled the connection to the volume driver which can't mount your NFS share in 10s. Check the network latency between your NFS share and Diego cells and DNS resolve speed. +- If you get this far, then you will need to consult the BOSH logs while restaging your application to see if you can find an error there (assuming that you have bosh access). See the steps below about failing broker/driver deployment for some information about how to bosh ssh into the cell. Once you are ssh'd into the cell, check the driver stderr/stdout logs. It is also useful to look at the `rep` logs as some errors in the volume services infrastructure will end up there. +- If you don't see any errors on the Diego cell, it is likely that your error occurred in the cloud controller, before the could be placed on a cell. To find the cloud controller logs related to your application, you can `bosh ssh` into the `api` vm in your cloudfoundry deployment. `grep` for your application guid in the `cloud_controller_ng` logs. Sometimes it is helpful to pipe the results of that `grep` to also grep for `error`: + ```bash + grep cloud_controller_ng.log | grep error + ``` + +## When the application starts, but data is missing + +If your application starts up, but it cannot find the data you expected in your share, it is possible that there is an issue with volume services--the volume will be mounted onto the diego cell, and then bind-mounted from the diego cell into the application container by garden. Failures in either of those mounts that go undetected by the infrastructure could theoretically leave an empty directory in place of the volume mount, which could result in the appearance of an empty mount. +However, it's a good idea to take a look on your application container to make sure that your volume mount is really placed where you expected it: +- `cf ssh ` to enter the application container +- `echo $VCAP_SERVICES` to dump out the environment passed into the container by cloudfoundry. In that data block you should see an entry called either `container_path` or `container_dir` (depending on your cloudfoundry version). That will contain the path where your volume is mounted. +- `cd` to the path above, and validate that it contains the data you expected and/or that you can create files in that location. +- to double check that volume services are really working, you can bind a second app to the same service and `cf ssh` into that application. If volume services are operational, data written in one application container will be in the share when you ssh into the other. + +If your application requires data to be mounted in a specific location, you can normally alter the mount path when you bind your application to the volume by using the `-c` flag as follows; + ```bash + cf bind-service -c '{"mount":"/path/in/container"}' + ``` +This mount configuration is supported by all of the volume service brokers in the `cloudfoundry-incubator` and `cloudfoundry` github orgs. + +## When BOSH deployment fails + +### Broker deployment (for bosh deployed brokers) + +When broker deployment fails, assuming that Bosh has successfully parsed the manifest and created a vm for your broker, you will normally find any errors that occurred during startup by looking in the bosh logs. +Although you can gather the logs from your bosh vm using the `bosh logs` command, that command creates a big zip file with all the logs in it that muust be unpacked, so it is usually easier and faster to `bosh ssh` onto the vm and look at the logs in a shell. +Instructions for bosh ssh are [here](https://bosh.io/docs/sysadmin-commands.html#ssh). + +Once you are ssh'd into the vm, switch to root with `sudo su` and then type `monit summary` to make sure that your broker job is really not running. +Assuming that the broker is not showing as running, you should see some type of error in one of three places: +- `/var/vcap/sys/log/monit/` contains monit script output for the various bosh logs. Errors that occur in outer monit scripts will appear here. +- `/var/vcap/sys/log/packages/` contains package installation logs for your broker source. Some packaging errors end up here +- `/var/vcap/sys/log/jobs/` contains logs for your actual broker process. Any errors from the running executable or pre-start script will appear in this directory. + +### Driver deployment + +Diagnosing failures in driver deployment is quite similar to bosh deployed broker diagnosis as described above. The principal difference is that the driver is deployed alongside diego, so you must ssh into the diego-cell VM to find the driver job. +In a multi-cell deployment, sometimes it is necessary to try different cell vms to find the failed one, but most of the time if configuration is not right, all cells will fail in the same way. diff --git a/src/code.cloudfoundry.org/volman/client.go b/src/code.cloudfoundry.org/volman/client.go new file mode 100644 index 0000000000..683ac3eeae --- /dev/null +++ b/src/code.cloudfoundry.org/volman/client.go @@ -0,0 +1,11 @@ +package volman + +import "code.cloudfoundry.org/lager/v3" + +//go:generate counterfeiter -o volmanfakes/fake_manager_client.go . Manager + +type Manager interface { + ListDrivers(logger lager.Logger) (ListDriversResponse, error) + Mount(logger lager.Logger, driverId string, volumeId string, containerId string, config map[string]interface{}) (MountResponse, error) + Unmount(logger lager.Logger, driverId string, volumeId string, containerId string) error +} diff --git a/src/code.cloudfoundry.org/volman/docs/01-overview.md b/src/code.cloudfoundry.org/volman/docs/01-overview.md new file mode 100644 index 0000000000..9895f7e87c --- /dev/null +++ b/src/code.cloudfoundry.org/volman/docs/01-overview.md @@ -0,0 +1,79 @@ +--- +title: Overview +expires_at: never +tags: [volman,diego-release,volume-services] +--- + +# Overview + +Volume services as a product is shipped as part of OSS Cloud Foundry + +OSS cloud foundry: OSS Users can choose to install volume-services on their deployment by enabling the deployment of the driver using the ops file, and running the errand to install the broker. The two volume services brokers shipped as part of cf-deployment are nfs broker and smb broker + +Application developers consume volume services via the cf services interface. + +# Architecture + +![volume-services-arch.png](./assets/volume-services-arch.png "Volume Services Architecture") + +## Key concepts: + +- Service Broker: OSBAPI compliant broker which enables the application developers to list and provision services them selfs. +- Volume Mounts: When a Service Broker binds an application to a service, it can now include an array of volume_mounts, which describe where and how to mount the filesystem in the container. +- Volume Drivers: Volume Drivers are 3rd party plugins which manage the lifecycle of creating, mounting, unmounting, and destroying a Volume Service instance. They are deployed on the Diego cell. More details here + +Each volume services deployment contains one service broker component deployed as a CF app, and one driver component deployed as a BOSH release with the Diego cell. + +## High level workflow + +```mermaid +sequenceDiagram + participant CLI as cf-cli + participant CC as Cloud Controller + participant B as Volume Service Broker + participant D as Diego Cell + participant A as Application + CLI->>CC: cf create-service/ cf bind-service + CC->>B: OSBAPI create service / OSBAPI bind service + B->>CC: volume_mounts + CLI->>CC: cf restart app + CC->>D: deploy app with volume_mounts config + D->>A: Mount storage + D->>A: Start App +``` + +When a Service Broker binds an application to a service, it can now include an array of volume_mounts, which describe where and how to mount the filesystem in the container. + +Before a container starts, the storage device is attached and mounted on the Cell by a trusted component – the Volume Driver – and then bind-mounted into the untrusted container by Garden. This allows storage setup to occur without needing any privilege escalation inside of the container. + +## Diego Cell Workflow +This section documents the workflow of what happens internally when the cloud controller asks diego to start an app with a volume attached to it. + +```mermaid +sequenceDiagram + participant bbs as Diego-BBS + participant rep as Diego-Rep + participant vm as VolMan + participant nfs as NFS Driver + participant garden as Garden + bbs->>rep: Run app on the cell + Note right of rep: Detect app has a binding with a volume + rep->>vm: provision volume from binding + vm->>nfs: provision volume + nfs->>nfs: mounts the remote share + nfs->>vm: local mount point + vm->>rep: local mount point + rep->>garden: Run app container with the directory mounted +``` + +## Volume Drivers + +Volume Drivers are 3rd party plugins which manage the lifecycle of creating, mounting, unmounting, and destroying a Volume Service instance. Currently, Volume Drivers written against the Docker v1.12 Volume Plugin specification are supported. + +Docker Volume Plugin Docs: [https://docs.docker.com/engine/extend/plugins_volume/](https://docs.docker.com/engine/extend/plugins_volume/) + +Drivers are deployed onto Diego Cells in one of two ways. Either they are deployed as colocated jobs, or they are deployed as job-specific add-ons. + +Known implementations: +- [NFS](https://github.com/cloudfoundry/nfs-volume-release): Support for pre-existing NFS shares is available through the nfs-volume-release. This release allows application developers to provision new service instances corresponding to existing nfs shares, provided that the NFS server is configured to allow connections from the Diego cell VMs in the Cloud Foundry deployment. During application bind, the application developer can specify a UID and GID to be used when connecting to the NFS server. +- [SMB Volume](https://github.com/cloudfoundry/smb-volume-release): A driver and broker that mounts SMB shares. diff --git a/src/code.cloudfoundry.org/volman/docs/assets/volume-services-arch.png b/src/code.cloudfoundry.org/volman/docs/assets/volume-services-arch.png new file mode 100644 index 0000000000..da0cae5452 Binary files /dev/null and b/src/code.cloudfoundry.org/volman/docs/assets/volume-services-arch.png differ diff --git a/src/code.cloudfoundry.org/volman/resources.go b/src/code.cloudfoundry.org/volman/resources.go new file mode 100644 index 0000000000..4de9685eee --- /dev/null +++ b/src/code.cloudfoundry.org/volman/resources.go @@ -0,0 +1,71 @@ +package volman + +import ( + "code.cloudfoundry.org/lager/v3" +) + +//go:generate counterfeiter -o volmanfakes/fake_plugin.go . Plugin +type Plugin interface { + ListVolumes(logger lager.Logger) ([]string, error) + Mount(logger lager.Logger, volumeId string, config map[string]interface{}) (MountResponse, error) + Unmount(logger lager.Logger, volumeId string) error + Matches(lager.Logger, PluginSpec) bool + GetPluginSpec() PluginSpec +} + +//go:generate counterfeiter -o volmanfakes/fake_discoverer.go . Discoverer +type Discoverer interface { + Discover(logger lager.Logger) (map[string]Plugin, error) +} + +type ListDriversResponse struct { + Drivers []InfoResponse `json:"drivers"` +} + +type MountRequest struct { + DriverId string `json:"driverId"` + VolumeId string `json:"volumeId"` + Config map[string]interface{} `json:"config"` +} + +type MountResponse struct { + Path string `json:"path"` +} + +type InfoResponse struct { + Name string `json:"name"` +} + +type UnmountRequest struct { + DriverId string `json:"driverId"` + VolumeId string `json:"volumeId"` +} + +type PluginSpec struct { + Name string `json:"Name"` + Address string `json:"Addr"` + TLSConfig *TLSConfig `json:"TLSConfig"` + UniqueVolumeIds bool +} + +type TLSConfig struct { + InsecureSkipVerify bool `json:"InsecureSkipVerify"` + CAFile string `json:"CAFile"` + CertFile string `json:"CertFile"` + KeyFile string `json:"KeyFile"` +} + +type PluginRegistry interface { + Plugin(id string) (Plugin, bool) + Plugins() map[string]Plugin + Set(plugins map[string]Plugin) + Keys() []string +} + +type SafeError struct { + SafeDescription string `json:"SafeDescription"` +} + +func (s SafeError) Error() string { + return s.SafeDescription +} diff --git a/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_discoverer.go b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_discoverer.go new file mode 100644 index 0000000000..66d772cb88 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_discoverer.go @@ -0,0 +1,247 @@ +package voldiscoverers + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + + "code.cloudfoundry.org/dockerdriver" + "code.cloudfoundry.org/dockerdriver/driverhttp" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/volman" + "code.cloudfoundry.org/volman/voldocker" +) + +type dockerDriverDiscoverer struct { + logger lager.Logger + driverFactory DockerDriverFactory + + driverRegistry volman.PluginRegistry + driverPaths []string +} + +func NewDockerDriverDiscoverer(logger lager.Logger, driverRegistry volman.PluginRegistry, driverPaths []string) volman.Discoverer { + return &dockerDriverDiscoverer{ + logger: logger, + driverFactory: NewDockerDriverFactory(), + + driverRegistry: driverRegistry, + driverPaths: driverPaths, + } +} + +func NewDockerDriverDiscovererWithDriverFactory(logger lager.Logger, driverRegistry volman.PluginRegistry, driverPaths []string, factory DockerDriverFactory) volman.Discoverer { + return &dockerDriverDiscoverer{ + logger: logger, + driverFactory: factory, + + driverRegistry: driverRegistry, + driverPaths: driverPaths, + } +} + +func (r *dockerDriverDiscoverer) Discover(logger lager.Logger) (map[string]volman.Plugin, error) { + logger = logger.Session("discover") + logger.Debug("start") + logger.Info("discovering-drivers", lager.Data{"driver-paths": r.driverPaths}) + defer logger.Debug("end") + + endpoints := make(map[string]volman.Plugin) + + for _, driverPath := range r.driverPaths { + specTypes := [3]string{"json", "spec", "sock"} + for _, specType := range specTypes { + matchingDriverSpecs, err := r.getMatchingDriverSpecs(logger, driverPath, specType) + + if err != nil { + // untestable on linux, does glob work differently on windows??? + return map[string]volman.Plugin{}, fmt.Errorf("Volman configured with an invalid driver path '%s', error occured list files (%s)", driverPath, err.Error()) + } + if len(matchingDriverSpecs) > 0 { + logger.Debug("driver-specs", lager.Data{"drivers": matchingDriverSpecs}) + var existing map[string]volman.Plugin + if r.driverRegistry != nil { + existing = r.driverRegistry.Plugins() + logger.Debug("existing-drivers", lager.Data{"len": len(existing)}) + } + + endpoints = r.findAllPlugins(logger, endpoints, driverPath, matchingDriverSpecs, existing) + endpoints = r.activatePlugins(logger, endpoints, driverPath, matchingDriverSpecs) + } + } + } + return endpoints, nil +} + +func (r *dockerDriverDiscoverer) findAllPlugins(logger lager.Logger, newPlugins map[string]volman.Plugin, driverPath string, specs []string, existingPlugins map[string]volman.Plugin) map[string]volman.Plugin { + logger = logger.Session("insert-if-not-found") + logger.Debug("start") + defer logger.Debug("end") + var plugin volman.Plugin + + for _, spec := range specs { + validSpecName, specName, specFile := specName(logger, spec) + if !validSpecName { + continue + } + + _, newPluginFound := newPlugins[specName] + if !newPluginFound { + pluginSpec, err := r.getPluginSpec(logger, specName, driverPath, specFile) + if err != nil { + continue + } + + var existingPluginFound bool + plugin, existingPluginFound = existingPlugins[specName] + if !existingPluginFound || r.pluginDoesNotMatch(logger, plugin, pluginSpec) { + plugin, err = r.createPlugin(logger, specName, driverPath, specFile, pluginSpec) + if err != nil { + continue + } + } + + logger.Info("new-plugin", lager.Data{"name": specName}) + newPlugins[specName] = plugin + } + } + return newPlugins +} + +func (r *dockerDriverDiscoverer) activatePlugins(logger lager.Logger, plugins map[string]volman.Plugin, driverPath string, specs []string) map[string]volman.Plugin { + + activatedPlugins := map[string]volman.Plugin{} + + for k, plugin := range plugins { + dockerPlugin := plugin.(*voldocker.DockerDriverPlugin) + dockerDriver := dockerPlugin.DockerDriver.(dockerdriver.Driver) + env := driverhttp.NewHttpDriverEnv(logger, context.Background()) + resp := dockerDriver.Activate(env) + if resp.Err == "" { + if implementVolumeDriver(resp) { + activatedPlugins[k] = dockerPlugin + } else { + logger.Error("driver-invalid", fmt.Errorf("driver-implements: %#v, expecting: VolumeDriver", resp.Implements)) + } + } else { + logger.Error("existing-driver-unreachable", errors.New(resp.Err), lager.Data{"spec-name": dockerPlugin.GetPluginSpec().Name, "address": dockerPlugin.GetPluginSpec().Address, "tls": dockerPlugin.GetPluginSpec().TLSConfig}) + + foundSpecFile, specFile := r.findDockerSpecFileByName(logger, dockerPlugin.GetPluginSpec().Name, driverPath, specs) + if !foundSpecFile { + logger.Info("error-spec-file-not-found", lager.Data{"spec-name": dockerPlugin.GetPluginSpec().Name}) + continue + } + + logger.Info("updating-driver", lager.Data{"spec-name": plugin.GetPluginSpec().Name, "driver-path": driverPath}) + driver, err := r.driverFactory.DockerDriver(logger, plugin.GetPluginSpec().Name, driverPath, specFile) + if err != nil { + logger.Error("error-creating-driver", err) + } + env := driverhttp.NewHttpDriverEnv(logger, context.TODO()) + resp := driver.Activate(env) + if resp.Err == "" { + if implementVolumeDriver(resp) { + activatedPlugins[k] = dockerPlugin + } else { + logger.Error("driver-invalid", fmt.Errorf("driver-implements: %#v, expecting: VolumeDriver", resp.Implements)) + } + } else { + logger.Info("updated-driver-unreachable", lager.Data{"spec-name": dockerPlugin.GetPluginSpec().Name, "address": dockerPlugin.GetPluginSpec().Address, "tls": dockerPlugin.GetPluginSpec().TLSConfig}) + } + } + + } + + return activatedPlugins +} + +func (r *dockerDriverDiscoverer) getMatchingDriverSpecs(logger lager.Logger, path string, pattern string) ([]string, error) { + logger.Debug("binaries", lager.Data{"path": path, "pattern": pattern}) + matchingDriverSpecs, err := filepath.Glob(path + string(os.PathSeparator) + "*." + pattern) + if err != nil { // untestable on linux, does glob work differently on windows??? + return nil, fmt.Errorf("Volman configured with an invalid driver path '%s', error occured list files (%s)", path, err.Error()) + } + return matchingDriverSpecs, nil + +} + +func (r *dockerDriverDiscoverer) pluginDoesNotMatch(logger lager.Logger, plugin volman.Plugin, pluginSpec volman.PluginSpec) bool { + if plugin == nil { + return true + } + doesNotMatch := !plugin.Matches(logger, pluginSpec) + if doesNotMatch { + logger.Info("existing-plugin-mismatch", lager.Data{"specName": plugin.GetPluginSpec().Name, "existing-address": plugin.GetPluginSpec().Address, "new-adddress": pluginSpec.Address}) + } + return doesNotMatch +} + +func (r *dockerDriverDiscoverer) getPluginSpec(logger lager.Logger, specName string, driverPath string, specFile string) (volman.PluginSpec, error) { + driverSpec, err := dockerdriver.ReadDriverSpec(logger, specName, driverPath, specFile) + if err != nil { + logger.Error("error-reading-driver-spec", err) + return volman.PluginSpec{}, errors.New("error-reading-driver-spec") + } + + pluginSpec := mapDriverSpecToPluginSpec(driverSpec) + return pluginSpec, err +} + +func (r *dockerDriverDiscoverer) findDockerSpecFileByName(logger lager.Logger, nameToFind string, driverPath string, specs []string) (bool, string) { + for _, spec := range specs { + found, specName, specFile := specName(logger, spec) + + if found && specName == nameToFind { + return true, specFile + } + } + return false, "'" +} + +func (r *dockerDriverDiscoverer) createPlugin(logger lager.Logger, specName string, driverPath string, specFile string, pluginSpec volman.PluginSpec) (volman.Plugin, error) { + logger.Info("creating-driver", lager.Data{"specName": specName, "driver-path": driverPath, "specFile": specFile}) + driver, err := r.driverFactory.DockerDriver(logger, specName, driverPath, specFile) + if err != nil { + logger.Error("error-creating-driver", err) + return nil, err + } + + return voldocker.NewVolmanPluginWithDockerDriver(driver, pluginSpec), nil +} + +func specName(logger lager.Logger, spec string) (bool, string, string) { + re := regexp.MustCompile(`([^/]*/)?([^/]*)\.(sock|spec|json)$`) + + segs2 := re.FindAllStringSubmatch(spec, 1) + if len(segs2) <= 0 { + return false, "", "" + } + specName := segs2[0][2] + specFile := segs2[0][2] + "." + segs2[0][3] + logger.Debug("insert-unique-spec", lager.Data{"specname": specName}) + return true, specName, specFile +} + +func implementVolumeDriver(resp dockerdriver.ActivateResponse) bool { + return len(resp.Implements) > 0 && driverImplements("VolumeDriver", resp.Implements) +} + +func mapDriverSpecToPluginSpec(driverSpec *dockerdriver.DriverSpec) volman.PluginSpec { + pluginSpec := volman.PluginSpec{ + Name: driverSpec.Name, + Address: driverSpec.Address, + UniqueVolumeIds: driverSpec.UniqueVolumeIds, + } + if driverSpec.TLSConfig != nil { + pluginSpec.TLSConfig = &volman.TLSConfig{ + InsecureSkipVerify: driverSpec.TLSConfig.InsecureSkipVerify, + CAFile: driverSpec.TLSConfig.CAFile, + CertFile: driverSpec.TLSConfig.CertFile, + KeyFile: driverSpec.TLSConfig.KeyFile, + } + } + return pluginSpec +} diff --git a/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_discoverer_test.go b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_discoverer_test.go new file mode 100644 index 0000000000..6086528bb9 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_discoverer_test.go @@ -0,0 +1,371 @@ +package voldiscoverers_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "code.cloudfoundry.org/lager/v3/lagertest" + + "code.cloudfoundry.org/dockerdriver" + "code.cloudfoundry.org/dockerdriver/dockerdriverfakes" + "code.cloudfoundry.org/volman" + "code.cloudfoundry.org/volman/voldiscoverers" + "code.cloudfoundry.org/volman/vollocal" + "code.cloudfoundry.org/volman/volmanfakes" +) + +var _ = Describe("Docker Driver Discoverer", func() { + var ( + logger *lagertest.TestLogger + + fakeDriverFactory *volmanfakes.FakeDockerDriverFactory + + registry volman.PluginRegistry + discoverer volman.Discoverer + + fakeDriver *dockerdriverfakes.FakeMatchableDriver + + driverName string + ) + + BeforeEach(func() { + logger = lagertest.NewTestLogger("driver-discovery-test") + + fakeDriverFactory = new(volmanfakes.FakeDockerDriverFactory) + + registry = vollocal.NewPluginRegistry() + discoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, registry, []string{defaultPluginsDirectory}, fakeDriverFactory) + + fakeDriver = new(dockerdriverfakes.FakeMatchableDriver) + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{ + Implements: []string{"VolumeDriver"}, + }) + + fakeDriverFactory.DockerDriverReturns(fakeDriver, nil) + + driverName = fmt.Sprintf("fakedriver-%d", GinkgoParallelProcess()) + }) + + Describe("#Discover", func() { + Context("when given driverspath with no drivers", func() { + It("no drivers are found", func() { + drivers, err := discoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(0)) + }) + }) + + Context("with a single driver", func() { + var ( + drivers map[string]volman.Plugin + err error + driverSpecContents []byte + driverSpecExtension string + ) + + BeforeEach(func() { + driverSpecContents = []byte("http://0.0.0.0:8080") + driverSpecExtension = "spec" + }) + + JustBeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, driverSpecExtension, driverSpecContents) + Expect(err).NotTo(HaveOccurred()) + + drivers, err = discoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + }) + + Context("when activate returns an error", func() { + BeforeEach(func() { + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{Err: "Error"}) + }) + It("should not find drivers that are unresponsive", func() { + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(0)) + }) + }) + + Context("when activation with the old plugin spec returns an error", func() { + BeforeEach(func() { + fakeDriver.ActivateReturnsOnCall(0, dockerdriver.ActivateResponse{Err: "Error"}) + fakeDriver.ActivateReturnsOnCall(1, dockerdriver.ActivateResponse{Implements: []string{"VolumeDriver"}, Err: ""}) + }) + + It("should activate with the new plugin spec", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(fakeDriver.ActivateCallCount()).To(BeNumerically(">", 1)) + }) + }) + + It("should find drivers", func() { + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(1)) + Expect(fakeDriverFactory.DockerDriverCallCount()).To(Equal(1)) + }) + + Context("when discover is running with the same config", func() { + BeforeEach(func() { + fakeDriver.MatchesReturns(true) + }) + + JustBeforeEach(func() { + Expect(len(drivers)).To(Equal(1)) + Expect(fakeDriverFactory.DockerDriverCallCount()).To(Equal(1)) + + drivers, err = discoverer.Discover(logger) + }) + + It("should not replace the driver in the registry", func() { + // Expect SetDrivers not to be called + Expect(len(drivers)).To(Equal(1)) + Expect(fakeDriver.ActivateCallCount()).To(Equal(2)) + }) + Context("when the existing driver connection is broken", func() { + BeforeEach(func() { + fakeDriver.ActivateReturnsOnCall(1, dockerdriver.ActivateResponse{Err: "badness"}) + }) + It("should replace the driver in the registry", func() { + Expect(len(drivers)).To(Equal(1)) + Expect(fakeDriver.ActivateCallCount()).To(Equal(3)) + }) + }) + }) + + Context("with different config", func() { + BeforeEach(func() { + fakeDriver.MatchesReturns(false) + }) + + JustBeforeEach(func() { + Expect(len(drivers)).To(Equal(1)) + Expect(fakeDriverFactory.DockerDriverCallCount()).To(Equal(1)) + + drivers, err = discoverer.Discover(logger) + registry.Set(drivers) + }) + + It("should replace the driver in the registry", func() { + // Expect SetDrivers to be called + Expect(len(drivers)).To(Equal(1)) + Expect(fakeDriverFactory.DockerDriverCallCount()).To(Equal(2)) + Expect(fakeDriver.ActivateCallCount()).To(Equal(2)) + }) + }) + + Context("when the driver opts in to unique volume IDs", func() { + BeforeEach(func() { + driverSpecContents = []byte("{\"Addr\":\"http://0.0.0.0:8080\",\"UniqueVolumeIds\": true}") + driverSpecExtension = "json" + }) + + It("discovers the driver and sets the flag on the plugin state", func() { + Expect(err).ToNot(HaveOccurred()) + + Expect(len(drivers)).To(Equal(1)) + plugin := drivers[driverName] + Expect(plugin.GetPluginSpec().UniqueVolumeIds).To(BeTrue()) + + Expect(fakeDriverFactory.DockerDriverCallCount()).To(Equal(1)) + }) + }) + }) + + Context("with multiple driver specs", func() { + var ( + drivers map[string]volman.Plugin + err error + ) + + DescribeTable("should discover drivers in the order: json -> spec -> sock", func(expectedNumberOfDrivers int, specTuple ...specTuple) { + for _, value := range specTuple { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, value.DriverName, value.Spec, []byte(value.SpecFileContents)) + Expect(err).NotTo(HaveOccurred()) + } + + drivers, err = discoverer.Discover(logger) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(expectedNumberOfDrivers)) + Expect(fakeDriverFactory.DockerDriverCallCount()).To(Equal(expectedNumberOfDrivers)) + + }, Entry("when there are two unique drivers with different driver formats", 2, + jsonSpec(), + specSpec(), + ), Entry("when there are three unique drivers with different driver formats", 3, + jsonSpec(), + specSpec(), + sockSpec(), + ), Entry("when there is 1 unique driver, represented by 2 different driver formats (json and spec)", 1, + specTuple{DriverName: "driver1", Spec: "json", SpecFileContents: `{}`}, + specTuple{DriverName: "driver1", Spec: "spec", SpecFileContents: ``}, + ), Entry("when there is 1 unique driver, represented by 2 different driver formats (spec and sock)", 1, + specTuple{DriverName: "driver2", Spec: "spec", SpecFileContents: ``}, + specTuple{DriverName: "driver2", Spec: "sock", SpecFileContents: ``}, + ), + ) + }) + + Context("when given a compound driverspath", func() { + BeforeEach(func() { + discoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, registry, []string{defaultPluginsDirectory, secondPluginsDirectory}, fakeDriverFactory) + }) + + Context("with a single driver", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, secondPluginsDirectory, driverName, "spec", []byte("http://0.0.0.0:8080")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should find drivers", func() { + drivers, err := discoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(1)) + Expect(fakeDriverFactory.DockerDriverCallCount()).To(Equal(1)) + }) + + }) + + Context("with multiple drivers in multiple directories", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, "json", []byte("{\"Addr\":\"http://0.0.0.0:8080\"}")) + Expect(err).NotTo(HaveOccurred()) + err = dockerdriver.WriteDriverSpec(logger, secondPluginsDirectory, "some-other-driver-name", "json", []byte("{\"Addr\":\"http://0.0.0.0:9090\"}")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should find both drivers", func() { + drivers, err := discoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(2)) + }) + }) + + Context("with the same driver but in multiple directories", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, "json", []byte("{\"Addr\":\"http://0.0.0.0:8080\"}")) + Expect(err).NotTo(HaveOccurred()) + err = dockerdriver.WriteDriverSpec(logger, secondPluginsDirectory, driverName, "spec", []byte("http://0.0.0.0:9090")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should preferentially select the driver in the first directory", func() { + _, err := discoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + _, _, _, specFileName := fakeDriverFactory.DockerDriverArgsForCall(0) + Expect(specFileName).To(Equal(driverName + ".json")) + }) + }) + }) + + Context("when given a driver spec not in canonical form", func() { + var ( + fakeRemoteClientFactory *dockerdriverfakes.FakeRemoteClientFactory + driverFactory voldiscoverers.DockerDriverFactory + fakeDriver *dockerdriverfakes.FakeDriver + driverDiscoverer volman.Discoverer + ) + + JustBeforeEach(func() { + fakeRemoteClientFactory = new(dockerdriverfakes.FakeRemoteClientFactory) + driverFactory = voldiscoverers.NewDockerDriverFactoryWithRemoteClientFactory(fakeRemoteClientFactory) + driverDiscoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, nil, []string{defaultPluginsDirectory}, driverFactory) + }) + + TestCanonicalization := func(context, actual, it, expected string) { + Context(context, func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, "spec", []byte(actual)) + Expect(err).NotTo(HaveOccurred()) + }) + + JustBeforeEach(func() { + fakeDriver = new(dockerdriverfakes.FakeDriver) + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{ + Implements: []string{"VolumeDriver"}, + }) + + fakeRemoteClientFactory.NewRemoteClientReturns(fakeDriver, nil) + }) + + It(it, func() { + drivers, err := driverDiscoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(1)) + Expect(fakeRemoteClientFactory.NewRemoteClientCallCount()).To(Equal(1)) + Expect(fakeRemoteClientFactory.NewRemoteClientArgsForCall(0)).To(Equal(expected)) + }) + }) + } + + TestCanonicalization("with an ip (and no port)", "127.0.0.1", "should return a canonicalized address", "http://127.0.0.1") + TestCanonicalization("with a tcp protocol uri with port", "tcp://127.0.0.1:8080", "should return a canonicalized address", "http://127.0.0.1:8080") + TestCanonicalization("with a tcp protocol uri without port", "tcp://127.0.0.1", "should return a canonicalized address", "http://127.0.0.1") + TestCanonicalization("with a unix address including protocol", "unix:///other.sock", "should return a canonicalized address", "unix:///other.sock") + TestCanonicalization("with a unix address missing its protocol", "/other.sock", "should return a canonicalized address", "/other.sock") + + Context("with an invalid url", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName+"2", "spec", []byte("127.0.0.1:8080")) + Expect(err).ToNot(HaveOccurred()) + err = dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, "spec", []byte("htt%p:\\\\")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("doesn't make a driver", func() { + _, err := driverDiscoverer.Discover(logger) + Expect(err).NotTo(HaveOccurred()) + Expect(fakeRemoteClientFactory.NewRemoteClientCallCount()).To(Equal(0)) + }) + }) + }) + + Context("when given a driver spec with a bad driver", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, driverName, "spec", []byte("127.0.0.1:8080")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return no drivers if the driver doesn't implement VolumeDriver", func() { + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{ + Implements: []string{"something-else"}, + }) + + drivers, err := discoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(0)) + }) + + It("should return no drivers if the driver doesn't respond", func() { + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{ + Err: "some-error", + }) + + drivers, err := discoverer.Discover(logger) + Expect(err).ToNot(HaveOccurred()) + Expect(len(drivers)).To(Equal(0)) + }) + }) + }) +}) + +func sockSpec() specTuple { + return specTuple{DriverName: "driver3", Spec: "sock", SpecFileContents: ``} +} + +func specSpec() specTuple { + return specTuple{DriverName: "driver2", Spec: "spec", SpecFileContents: `http://0.0.0.0:8080`} +} + +func jsonSpec() specTuple { + return specTuple{DriverName: "driver1", Spec: "json", SpecFileContents: `{}`} +} + +type specTuple struct { + Spec string + SpecFileContents string + DriverName string + ExpectedNumberOfDrivers int +} diff --git a/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_factory.go b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_factory.go new file mode 100644 index 0000000000..c5f4180422 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_factory.go @@ -0,0 +1,144 @@ +package voldiscoverers + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + + "net/url" + + "code.cloudfoundry.org/dockerdriver" + "code.cloudfoundry.org/dockerdriver/driverhttp" + "code.cloudfoundry.org/goshims/osshim" + "code.cloudfoundry.org/lager/v3" +) + +//go:generate counterfeiter -o ../volmanfakes/fake_docker_driver_factory.go . DockerDriverFactory + +// DriverFactories are responsible for instantiating remote client implementations of the dockerdriver.Driver interface. +type DockerDriverFactory interface { + // Given a driver id, path and config filename returns a remote client implementation of the dockerdriver.Driver interface + DockerDriver(logger lager.Logger, driverId string, driverPath, driverFileName string) (dockerdriver.Driver, error) +} + +type dockerDriverFactory struct { + Factory driverhttp.RemoteClientFactory + useOs osshim.Os +} + +func NewDockerDriverFactory() DockerDriverFactory { + remoteClientFactory := driverhttp.NewRemoteClientFactory() + return NewDockerDriverFactoryWithRemoteClientFactory(remoteClientFactory) +} + +func NewDockerDriverFactoryWithRemoteClientFactory(remoteClientFactory driverhttp.RemoteClientFactory) DockerDriverFactory { + return &dockerDriverFactory{remoteClientFactory, &osshim.OsShim{}} +} + +func NewDockerDriverFactoryWithOs(useOs osshim.Os) DockerDriverFactory { + remoteClientFactory := driverhttp.NewRemoteClientFactory() + return &dockerDriverFactory{remoteClientFactory, useOs} +} + +func (r *dockerDriverFactory) DockerDriver(logger lager.Logger, driverId string, driverPath string, driverFileName string) (dockerdriver.Driver, error) { + logger = logger.Session("driver", lager.Data{"driverId": driverId, "driverFileName": driverFileName}) + logger.Info("start") + defer logger.Info("end") + + var address string + var tls *dockerdriver.TLSConfig + if strings.Contains(driverFileName, ".") { + extension := strings.Split(driverFileName, ".")[1] + switch extension { + case "sock": + address = path.Join(driverPath, driverFileName) + case "spec": + configFile, err := r.useOs.Open(path.Join(driverPath, driverFileName)) + if err != nil { + logger.Error("error-opening-config", err, lager.Data{"DriverFileName": driverFileName}) + return nil, err + } + reader := bufio.NewReader(configFile) + addressBytes, _, err := reader.ReadLine() + if err != nil { // no real value in faking this as bigger problems exist when this fails + logger.Error("error-reading-driver-file", err, lager.Data{"DriverFileName": driverFileName}) + return nil, err + } + address = string(addressBytes) + case "json": + // extract url from json file + var driverJsonSpec dockerdriver.DriverSpec + configFile, err := r.useOs.Open(path.Join(driverPath, driverFileName)) + if err != nil { + logger.Error("error-opening-config", err, lager.Data{"DriverFileName": driverFileName}) + return nil, err + } + jsonParser := json.NewDecoder(configFile) + if err = jsonParser.Decode(&driverJsonSpec); err != nil { + logger.Error("parsing-config-file-error", err) + return nil, err + } + address = driverJsonSpec.Address + tls = driverJsonSpec.TLSConfig + default: + err := fmt.Errorf("unknown-driver-extension: %s", extension) + logger.Error("driver", err) + return nil, err + + } + var err error + + address, err = r.canonicalize(logger, address) + if err != nil { + logger.Error("invalid-address", err, lager.Data{"address": address}) + return nil, err + } + + logger.Info("getting-driver", lager.Data{"address": address}) + driver, err := r.Factory.NewRemoteClient(address, tls) + if err != nil { + logger.Error("error-building-driver", err, lager.Data{"address": address}) + return nil, err + } + + return driver, nil + } + + return nil, fmt.Errorf("Driver '%s' not found in list of known drivers", driverId) +} + +func (r *dockerDriverFactory) canonicalize(logger lager.Logger, address string) (string, error) { + logger = logger.Session("canonicalize", lager.Data{"address": address}) + logger.Debug("start") + defer logger.Debug("end") + + url, err := url.Parse(address) + if err != nil { + return address, err + } + + switch url.Scheme { + case "http", "https": + return address, nil + case "tcp": + return fmt.Sprintf("http://%s%s", url.Host, url.Path), nil + case "unix": + return address, nil + default: + if strings.HasSuffix(url.Path, ".sock") { + return fmt.Sprintf("%s%s", url.Host, url.Path), nil + } + } + return fmt.Sprintf("http://%s", address), nil +} + +func driverImplements(protocol string, activateResponseProtocols []string) bool { + for _, nextProtocol := range activateResponseProtocols { + if protocol == nextProtocol { + return true + } + } + return false +} diff --git a/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_factory_test.go b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_factory_test.go new file mode 100644 index 0000000000..8451634fbe --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldiscoverers/docker_driver_factory_test.go @@ -0,0 +1,147 @@ +package voldiscoverers_test + +import ( + "fmt" + "os" + "path" + "path/filepath" + "runtime" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "code.cloudfoundry.org/dockerdriver" + "code.cloudfoundry.org/dockerdriver/dockerdriverfakes" + "code.cloudfoundry.org/goshims/osshim/os_fake" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/lager/v3/lagertest" + "code.cloudfoundry.org/volman/voldiscoverers" +) + +var _ = Describe("DriverFactory", func() { + var ( + testLogger lager.Logger + driverName string + ) + BeforeEach(func() { + testLogger = lagertest.NewTestLogger("ClientTest") + }) + + Context("when a valid driver spec is discovered", func() { + var ( + fakeRemoteClientFactory *dockerdriverfakes.FakeRemoteClientFactory + localDriver *dockerdriverfakes.FakeDriver + driver dockerdriver.Driver + driverFactory voldiscoverers.DockerDriverFactory + ) + BeforeEach(func() { + driverName = "some-driver-name" + fakeRemoteClientFactory = new(dockerdriverfakes.FakeRemoteClientFactory) + localDriver = new(dockerdriverfakes.FakeDriver) + fakeRemoteClientFactory.NewRemoteClientReturns(localDriver, nil) + driverFactory = voldiscoverers.NewDockerDriverFactoryWithRemoteClientFactory(fakeRemoteClientFactory) + + }) + + Context("when a json driver spec is discovered", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(testLogger, defaultPluginsDirectory, driverName, "json", []byte("{\"Addr\":\"http://0.0.0.0:8080\"}")) + Expect(err).NotTo(HaveOccurred()) + driver, err = driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".json") + Expect(err).ToNot(HaveOccurred()) + }) + It("should return the correct driver", func() { + Expect(driver).To(Equal(localDriver)) + Expect(fakeRemoteClientFactory.NewRemoteClientArgsForCall(0)).To(Equal("http://0.0.0.0:8080")) + }) + It("should fail if unable to open file", func() { + fakeOs := new(os_fake.FakeOs) + driverFactory := voldiscoverers.NewDockerDriverFactoryWithOs(fakeOs) + fakeOs.OpenReturns(nil, fmt.Errorf("error opening file")) + _, err := driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".json") + Expect(err).To(HaveOccurred()) + }) + }) + + Context("when an invalid json spec is discovered", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(testLogger, defaultPluginsDirectory, driverName, "json", []byte("{\"invalid\"}")) + Expect(err).NotTo(HaveOccurred()) + }) + It("should error", func() { + _, err := driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".json") + Expect(err).To(HaveOccurred()) + }) + }) + + Context("when a spec driver spec is discovered", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(testLogger, defaultPluginsDirectory, driverName, "spec", []byte("http://0.0.0.0:8080")) + Expect(err).NotTo(HaveOccurred()) + driver, err = driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".spec") + Expect(err).ToNot(HaveOccurred()) + }) + It("should return the correct driver", func() { + Expect(driver).To(Equal(localDriver)) + Expect(fakeRemoteClientFactory.NewRemoteClientArgsForCall(0)).To(Equal("http://0.0.0.0:8080")) + }) + It("should fail if unable to open file", func() { + fakeOs := new(os_fake.FakeOs) + driverFactory := voldiscoverers.NewDockerDriverFactoryWithOs(fakeOs) + fakeOs.OpenReturns(nil, fmt.Errorf("error opening file")) + _, err := driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".spec") + Expect(err).To(HaveOccurred()) + }) + + It("should error if driver id doesn't match found driver", func() { + fakeRemoteClientFactory := new(dockerdriverfakes.FakeRemoteClientFactory) + driverFactory := voldiscoverers.NewDockerDriverFactoryWithRemoteClientFactory(fakeRemoteClientFactory) + _, err := driverFactory.DockerDriver(testLogger, "garbage", defaultPluginsDirectory, "garbage.garbage") + Expect(err).To(HaveOccurred()) + }) + }) + + if runtime.GOOS != "windows" { + Context("when a sock driver spec is discovered", func() { + BeforeEach(func() { + f, err := os.Create(filepath.Join(defaultPluginsDirectory, driverName+".sock")) + Expect(err).ToNot(HaveOccurred()) + defer f.Close() + }) + It("should return the correct driver", func() { + driver, err := driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".sock") + Expect(err).ToNot(HaveOccurred()) + Expect(driver).To(Equal(localDriver)) + address := path.Join(defaultPluginsDirectory, driverName+".sock") + Expect(fakeRemoteClientFactory.NewRemoteClientArgsForCall(0)).To(Equal(address)) + }) + It("should error for invalid sock endpoint address", func() { + fakeRemoteClientFactory.NewRemoteClientReturns(nil, fmt.Errorf("invalid address")) + _, err := driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".sock") + Expect(err).To(HaveOccurred()) + }) + }) + } + }) + + Context("when valid driver spec is not discovered", func() { + var ( + fakeRemoteClientFactory *dockerdriverfakes.FakeRemoteClientFactory + fakeDriver *dockerdriverfakes.FakeDriver + driverFactory voldiscoverers.DockerDriverFactory + ) + BeforeEach(func() { + driverName = "some-driver-name" + fakeRemoteClientFactory = new(dockerdriverfakes.FakeRemoteClientFactory) + fakeDriver = new(dockerdriverfakes.FakeDriver) + fakeRemoteClientFactory.NewRemoteClientReturns(fakeDriver, nil) + driverFactory = voldiscoverers.NewDockerDriverFactoryWithRemoteClientFactory(fakeRemoteClientFactory) + + }) + It("should error", func() { + _, err := driverFactory.DockerDriver(testLogger, driverName, defaultPluginsDirectory, driverName+".spec") + Expect(err).To(HaveOccurred()) + }) + }) + +}) diff --git a/src/code.cloudfoundry.org/volman/voldiscoverers/voldiscoverers_suite_test.go b/src/code.cloudfoundry.org/volman/voldiscoverers/voldiscoverers_suite_test.go new file mode 100644 index 0000000000..53aad5c8ca --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldiscoverers/voldiscoverers_suite_test.go @@ -0,0 +1,28 @@ +package voldiscoverers_test + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "testing" +) + +var defaultPluginsDirectory string +var secondPluginsDirectory string + +var _ = BeforeEach(func() { + var err error + + defaultPluginsDirectory, err = os.MkdirTemp(os.TempDir(), "clienttest") + Expect(err).ShouldNot(HaveOccurred()) + + secondPluginsDirectory, err = os.MkdirTemp(os.TempDir(), "clienttest2") + Expect(err).ShouldNot(HaveOccurred()) +}) + +func TestVoldiscoverers(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Voldiscoverers Suite") +} diff --git a/src/code.cloudfoundry.org/volman/voldocker/docker_driver_plugin.go b/src/code.cloudfoundry.org/volman/voldocker/docker_driver_plugin.go new file mode 100644 index 0000000000..a646ef06e7 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldocker/docker_driver_plugin.go @@ -0,0 +1,132 @@ +package voldocker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + + "code.cloudfoundry.org/dockerdriver" + "code.cloudfoundry.org/dockerdriver/driverhttp" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/volman" +) + +type DockerDriverPlugin struct { + DockerDriver interface{} + PluginSpec volman.PluginSpec +} + +func NewVolmanPluginWithDockerDriver(driver dockerdriver.Driver, pluginSpec volman.PluginSpec) volman.Plugin { + return &DockerDriverPlugin{ + DockerDriver: driver, + PluginSpec: pluginSpec, + } +} + +func (dw *DockerDriverPlugin) Matches(logger lager.Logger, pluginSpec volman.PluginSpec) bool { + logger = logger.Session("matches") + logger.Info("start") + defer logger.Info("end") + + var matches bool + matchableDriver, ok := dw.DockerDriver.(dockerdriver.MatchableDriver) + logger.Info("matches", lager.Data{"is-matchable": ok}) + if ok { + var tlsConfig *dockerdriver.TLSConfig + if pluginSpec.TLSConfig != nil { + tlsConfig = &dockerdriver.TLSConfig{ + InsecureSkipVerify: pluginSpec.TLSConfig.InsecureSkipVerify, + CAFile: pluginSpec.TLSConfig.CAFile, + CertFile: pluginSpec.TLSConfig.CertFile, + KeyFile: pluginSpec.TLSConfig.KeyFile, + } + } + matches = matchableDriver.Matches(logger, pluginSpec.Address, tlsConfig) + } + logger.Info("matches", lager.Data{"matches": matches}) + return matches +} + +func (d *DockerDriverPlugin) ListVolumes(logger lager.Logger) ([]string, error) { + logger = logger.Session("list-volumes") + logger.Info("start") + defer logger.Info("end") + + volumes := []string{} + env := driverhttp.NewHttpDriverEnv(logger, context.TODO()) + + response := d.DockerDriver.(dockerdriver.Driver).List(env) + if response.Err != "" { + return volumes, errors.New(response.Err) + } + + for _, volumeInfo := range response.Volumes { + volumes = append(volumes, volumeInfo.Name) + } + + return volumes, nil +} + +func (d *DockerDriverPlugin) Mount(logger lager.Logger, volumeId string, opts map[string]interface{}) (volman.MountResponse, error) { + logger = logger.Session("mount") + logger.Info("start") + defer logger.Info("end") + + env := driverhttp.NewHttpDriverEnv(logger, context.TODO()) + + logger.Debug("creating-volume", lager.Data{"volumeId": volumeId}) + response := d.DockerDriver.(dockerdriver.Driver).Create(env, dockerdriver.CreateRequest{Name: volumeId, Opts: opts}) + if response.Err != "" { + return volman.MountResponse{}, errors.New(response.Err) + } + + mountRequest := dockerdriver.MountRequest{Name: volumeId} + logger.Debug("calling-docker-driver-with-mount-request", lager.Data{"mountRequest": mountRequest}) + mountResponse := d.DockerDriver.(dockerdriver.Driver).Mount(env, mountRequest) + logger.Debug("response-from-docker-driver", lager.Data{"response": mountResponse}) + + if !strings.HasPrefix(mountResponse.Mountpoint, "/var/vcap/data") { + logger.Info("invalid-mountpath", lager.Data{"detail": fmt.Sprintf("Invalid or dangerous mountpath %s outside of /var/vcap/data", mountResponse.Mountpoint)}) + } + + if mountResponse.Err != "" { + safeError := dockerdriver.SafeError{} + err := json.Unmarshal([]byte(mountResponse.Err), &safeError) + if err == nil { + return volman.MountResponse{}, safeError + } else { + return volman.MountResponse{}, errors.New(mountResponse.Err) + } + } + + return volman.MountResponse{Path: mountResponse.Mountpoint}, nil +} + +func (d *DockerDriverPlugin) Unmount(logger lager.Logger, volumeId string) error { + logger = logger.Session("unmount") + logger.Info("start") + defer logger.Info("end") + + env := driverhttp.NewHttpDriverEnv(logger, context.TODO()) + + if response := d.DockerDriver.(dockerdriver.Driver).Unmount(env, dockerdriver.UnmountRequest{Name: volumeId}); response.Err != "" { + + safeError := dockerdriver.SafeError{} + err := json.Unmarshal([]byte(response.Err), &safeError) + if err == nil { + err = safeError + } else { + err = errors.New(response.Err) + } + + logger.Error("unmount-failed", err) + return err + } + return nil +} + +func (d *DockerDriverPlugin) GetPluginSpec() volman.PluginSpec { + return d.PluginSpec +} diff --git a/src/code.cloudfoundry.org/volman/voldocker/docker_driver_plugin_test.go b/src/code.cloudfoundry.org/volman/voldocker/docker_driver_plugin_test.go new file mode 100644 index 0000000000..dd412b6518 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldocker/docker_driver_plugin_test.go @@ -0,0 +1,250 @@ +package voldocker_test + +import ( + "encoding/json" + "errors" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "code.cloudfoundry.org/dockerdriver" + "code.cloudfoundry.org/dockerdriver/dockerdriverfakes" + "code.cloudfoundry.org/volman/voldocker" + + "code.cloudfoundry.org/lager/v3/lagertest" + "code.cloudfoundry.org/volman" + "github.com/onsi/gomega/gbytes" +) + +var _ = Describe("DockerDriverMounter", func() { + var ( + volumeId string + logger *lagertest.TestLogger + dockerPlugin volman.Plugin + fakeDockerDriver *dockerdriverfakes.FakeDriver + ) + + BeforeEach(func() { + volumeId = "fake-volume" + logger = lagertest.NewTestLogger("docker-mounter-test") + fakeDockerDriver = &dockerdriverfakes.FakeDriver{} + dockerPlugin = voldocker.NewVolmanPluginWithDockerDriver(fakeDockerDriver, volman.PluginSpec{}) + }) + + Describe("Mount", func() { + Context("when given a driver", func() { + + Context("mount", func() { + + BeforeEach(func() { + mountResponse := dockerdriver.MountResponse{Mountpoint: "/var/vcap/data/mounts/" + volumeId} + fakeDockerDriver.MountReturns(mountResponse) + }) + + It("should be able to mount without warning", func() { + mountPath, err := dockerPlugin.Mount(logger, volumeId, map[string]interface{}{"volume_id": volumeId}) + Expect(err).NotTo(HaveOccurred()) + Expect(mountPath).NotTo(Equal("")) + Expect(logger.Buffer()).NotTo(gbytes.Say("Invalid or dangerous mountpath")) + }) + + It("should not be able to mount if mount fails", func() { + mountResponse := dockerdriver.MountResponse{Err: "an error"} + fakeDockerDriver.MountReturns(mountResponse) + _, err := dockerPlugin.Mount(logger, volumeId, map[string]interface{}{"volume_id": volumeId}) + Expect(err).To(HaveOccurred()) + }) + + Context("with bad mount path", func() { + var err error + BeforeEach(func() { + mountResponse := dockerdriver.MountResponse{Mountpoint: "/var/tmp"} + fakeDockerDriver.MountReturns(mountResponse) + }) + + JustBeforeEach(func() { + _, err = dockerPlugin.Mount(logger, volumeId, map[string]interface{}{"volume_id": volumeId}) + }) + + It("should return a warning in the log", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(logger.Buffer()).To(gbytes.Say("Invalid or dangerous mountpath")) + }) + }) + + Context("with safe error", func() { + var ( + err error + safeError dockerdriver.SafeError + unsafeError error + errString string + ) + + JustBeforeEach(func() { + + mountResponse := dockerdriver.MountResponse{Err: errString} + fakeDockerDriver.MountReturns(mountResponse) + _, err = dockerPlugin.Mount(logger, volumeId, map[string]interface{}{"volume_id": volumeId}) + }) + + Context("with safe error msg", func() { + BeforeEach(func() { + safeError = dockerdriver.SafeError{SafeDescription: "safe-badness"} + errBytes, err := json.Marshal(safeError) + Expect(err).NotTo(HaveOccurred()) + errString = string(errBytes[:]) + }) + + It("should return a safe error", func() { + Expect(err).To(HaveOccurred()) + _, ok := err.(dockerdriver.SafeError) + Expect(ok).To(Equal(true)) + Expect(err.Error()).To(Equal("safe-badness")) + }) + }) + + Context("with unsafe error msg", func() { + BeforeEach(func() { + unsafeError = errors.New("unsafe-badness") + errString = unsafeError.Error() + }) + + It("should return regular error", func() { + Expect(err).To(HaveOccurred()) + _, ok := err.(dockerdriver.SafeError) + Expect(ok).To(Equal(false)) + Expect(err.Error()).To(Equal("unsafe-badness")) + }) + + }) + + Context("with a really unsafe error msg", func() { + BeforeEach(func() { + errString = "{ badness" + }) + + It("should return regular error", func() { + Expect(err).To(HaveOccurred()) + _, ok := err.(dockerdriver.SafeError) + Expect(ok).To(Equal(false)) + Expect(err.Error()).To(Equal("{ badness")) + }) + }) + }) + + }) + }) + }) + + Describe("Unmount", func() { + It("should be able to unmount", func() { + err := dockerPlugin.Unmount(logger, volumeId) + Expect(err).NotTo(HaveOccurred()) + Expect(fakeDockerDriver.UnmountCallCount()).To(Equal(1)) + Expect(fakeDockerDriver.RemoveCallCount()).To(Equal(0)) + }) + + It("should not be able to unmount when driver unmount fails", func() { + fakeDockerDriver.UnmountReturns(dockerdriver.ErrorResponse{Err: "unmount failure"}) + err := dockerPlugin.Unmount(logger, volumeId) + Expect(err).To(HaveOccurred()) + }) + + Context("with safe error", func() { + var ( + err error + safeError dockerdriver.SafeError + unsafeError error + errString string + ) + + JustBeforeEach(func() { + fakeDockerDriver.UnmountReturns(dockerdriver.ErrorResponse{Err: errString}) + err = dockerPlugin.Unmount(logger, volumeId) + }) + + Context("with safe error msg", func() { + BeforeEach(func() { + safeError = dockerdriver.SafeError{SafeDescription: "safe-badness"} + errBytes, err := json.Marshal(safeError) + Expect(err).NotTo(HaveOccurred()) + errString = string(errBytes[:]) + }) + + It("should return a safe error", func() { + Expect(err).To(HaveOccurred()) + _, ok := err.(dockerdriver.SafeError) + Expect(ok).To(Equal(true)) + Expect(err.Error()).To(Equal("safe-badness")) + }) + }) + + Context("with unsafe error msg", func() { + BeforeEach(func() { + unsafeError = errors.New("unsafe-badness") + errString = unsafeError.Error() + }) + + It("should return regular error", func() { + Expect(err).To(HaveOccurred()) + _, ok := err.(dockerdriver.SafeError) + Expect(ok).To(Equal(false)) + Expect(err.Error()).To(Equal("unsafe-badness")) + }) + + }) + + Context("with a really unsafe error msg", func() { + BeforeEach(func() { + errString = "{ badness" + }) + + It("should return regular error", func() { + Expect(err).To(HaveOccurred()) + _, ok := err.(dockerdriver.SafeError) + Expect(ok).To(Equal(false)) + Expect(err.Error()).To(Equal("{ badness")) + }) + }) + }) + + }) + + Describe("ListVolumes", func() { + var ( + volumes []string + err error + ) + BeforeEach(func() { + listResponse := dockerdriver.ListResponse{Volumes: []dockerdriver.VolumeInfo{ + {Name: "fake_volume_1"}, + {Name: "fake_volume_2"}, + }} + fakeDockerDriver.ListReturns(listResponse) + }) + + JustBeforeEach(func() { + volumes, err = dockerPlugin.ListVolumes(logger) + }) + + It("should be able list volumes", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(volumes).To(ContainElement("fake_volume_1")) + Expect(volumes).To(ContainElement("fake_volume_2")) + }) + + Context("when the driver returns an err response", func() { + BeforeEach(func() { + listResponse := dockerdriver.ListResponse{Volumes: []dockerdriver.VolumeInfo{ + {Name: "fake_volume_1"}, + {Name: "fake_volume_2"}, + }, Err: "badness"} + fakeDockerDriver.ListReturns(listResponse) + }) + It("should return an error", func() { + Expect(err).To(HaveOccurred()) + }) + }) + }) + +}) diff --git a/src/code.cloudfoundry.org/volman/voldocker/voldocker_suite_test.go b/src/code.cloudfoundry.org/volman/voldocker/voldocker_suite_test.go new file mode 100644 index 0000000000..8379d05e38 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/voldocker/voldocker_suite_test.go @@ -0,0 +1,13 @@ +package voldocker_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "testing" +) + +func TestVoldocker(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Voldocker Suite") +} diff --git a/src/code.cloudfoundry.org/volman/vollocal/client_local.go b/src/code.cloudfoundry.org/volman/vollocal/client_local.go new file mode 100644 index 0000000000..0088fd8d9f --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/client_local.go @@ -0,0 +1,213 @@ +package vollocal + +import ( + "errors" + "time" + + "github.com/tedsuo/ifrit" + + "os" + + "code.cloudfoundry.org/clock" + loggingclient "code.cloudfoundry.org/diego-logging-client" + "code.cloudfoundry.org/dockerdriver" + dockerdriverutils "code.cloudfoundry.org/dockerdriver/utils" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/volman" + "code.cloudfoundry.org/volman/voldiscoverers" + "github.com/tedsuo/ifrit/grouper" +) + +const ( + volmanMountErrorsCounter = "VolmanMountErrors" + volmanMountDuration = "VolmanMountDuration" + volmanUnmountErrorsCounter = "VolmanUnmountErrors" + volmanUnmountDuration = "VolmanUnmountDuration" +) + +var ( + pluginMountDurations = map[string]string{} + pluginUnmountDurations = map[string]string{} +) + +type DriverConfig struct { + DriverPaths []string + SyncInterval time.Duration +} + +func NewDriverConfig() DriverConfig { + return DriverConfig{ + SyncInterval: time.Second * 30, + } +} + +type localClient struct { + pluginRegistry volman.PluginRegistry + metronClient loggingclient.IngressClient + clock clock.Clock +} + +func NewServer(logger lager.Logger, metronClient loggingclient.IngressClient, config DriverConfig) (volman.Manager, ifrit.Runner) { + clock := clock.NewClock() + registry := NewPluginRegistry() + + dockerDiscoverer := voldiscoverers.NewDockerDriverDiscoverer(logger, registry, config.DriverPaths) + + syncer := NewSyncer(logger, registry, []volman.Discoverer{dockerDiscoverer}, config.SyncInterval, clock) + purger := NewMountPurger(logger, registry) + + grouper := grouper.NewOrdered(os.Kill, grouper.Members{grouper.Member{Name: "volman-syncer", Runner: syncer.Runner()}, grouper.Member{Name: "volman-purger", Runner: purger.Runner()}}) + + return NewLocalClient(logger, registry, metronClient, clock), grouper +} + +func NewLocalClient(logger lager.Logger, registry volman.PluginRegistry, metronClient loggingclient.IngressClient, clock clock.Clock) volman.Manager { + return &localClient{ + pluginRegistry: registry, + metronClient: metronClient, + clock: clock, + } +} + +func (client *localClient) ListDrivers(logger lager.Logger) (volman.ListDriversResponse, error) { + logger = logger.Session("list-drivers") + logger.Info("start") + defer logger.Info("end") + + var infoResponses []volman.InfoResponse + plugins := client.pluginRegistry.Plugins() + + for name := range plugins { + infoResponses = append(infoResponses, volman.InfoResponse{Name: name}) + } + + logger.Debug("listing-drivers", lager.Data{"drivers": infoResponses}) + return volman.ListDriversResponse{Drivers: infoResponses}, nil +} + +func (client *localClient) Mount(logger lager.Logger, pluginId string, volumeId string, containerId string, config map[string]interface{}) (volman.MountResponse, error) { + logger = logger.Session("mount") + logger.Info("start") + defer logger.Info("end") + + mountStart := client.clock.Now() + + defer func() { + sendMountDurationMetrics(logger, client.metronClient, time.Since(mountStart), pluginId) + }() + + logger.Debug("plugin-mounting-volume", lager.Data{"pluginId": pluginId, "volumeId": volumeId, "containerId": containerId}) + + plugin, found := client.pluginRegistry.Plugin(pluginId) + if !found { + err := errors.New("Plugin '" + pluginId + "' not found in list of known plugins") + logger.Error("mount-plugin-lookup-error", err) + metricErr := client.metronClient.IncrementCounter(volmanMountErrorsCounter) + if metricErr != nil { + logger.Debug("failed-emitting-mount-plugin-lookup-error-metric", lager.Data{"error": err}) + } + + return volman.MountResponse{}, err + } + + if plugin.GetPluginSpec().UniqueVolumeIds { + logger.Debug("generating-unique-volume-id") + uniqueVolId := dockerdriverutils.NewVolumeId(volumeId, containerId) + volumeId = uniqueVolId.GetUniqueId() + } + + mountResponse, err := plugin.Mount(logger, volumeId, config) + + if err != nil { + metricErr := client.metronClient.IncrementCounter(volmanMountErrorsCounter) + if metricErr != nil { + logger.Debug("failed-emitting-mount-error-metric", lager.Data{"error": err}) + } + if dockerdriverSafeErr, ok := err.(dockerdriver.SafeError); ok { + return volman.MountResponse{}, volman.SafeError{SafeDescription: dockerdriverSafeErr.SafeDescription} + } + return volman.MountResponse{}, err + } + + return mountResponse, nil +} + +func sendMountDurationMetrics(logger lager.Logger, metronClient loggingclient.IngressClient, duration time.Duration, pluginId string) { + err := metronClient.SendDuration(volmanMountDuration, duration) + if err != nil { + logger.Error("failed-to-send-volman-mount-duration-metric", err) + } + + m, ok := pluginMountDurations[pluginId] + if !ok { + m = "VolmanMountDurationFor" + pluginId + pluginMountDurations[pluginId] = m + } + err = metronClient.SendDuration(m, duration) + if err != nil { + logger.Error("failed-to-send-volman-mount-duration-metric", err) + } +} + +func sendUnmountDurationMetrics(logger lager.Logger, metronClient loggingclient.IngressClient, duration time.Duration, pluginId string) { + err := metronClient.SendDuration(volmanUnmountDuration, duration) + if err != nil { + logger.Error("failed-to-send-volman-unmount-duration-metric", err) + } + + m, ok := pluginUnmountDurations[pluginId] + if !ok { + m = "VolmanUnmountDurationFor" + pluginId + pluginUnmountDurations[pluginId] = m + } + err = metronClient.SendDuration(m, duration) + if err != nil { + logger.Error("failed-to-send-volman-unmount-duration-metric", err) + } +} + +func (client *localClient) Unmount(logger lager.Logger, pluginId string, volumeId string, containerId string) error { + logger = logger.Session("unmount") + logger.Info("start") + defer logger.Info("end") + logger.Debug("unmounting-volume", lager.Data{"volumeName": volumeId}) + + unmountStart := client.clock.Now() + + defer func() { + sendUnmountDurationMetrics(logger, client.metronClient, time.Since(unmountStart), pluginId) + }() + + plugin, found := client.pluginRegistry.Plugin(pluginId) + if !found { + err := errors.New("Plugin '" + pluginId + "' not found in list of known plugins") + logger.Error("mount-plugin-lookup-error", err) + metricErr := client.metronClient.IncrementCounter(volmanUnmountErrorsCounter) + if metricErr != nil { + logger.Debug("failed-emitting-mount-plugin-lookup-error-metric", lager.Data{"error": err}) + } + return err + } + + if plugin.GetPluginSpec().UniqueVolumeIds { + logger.Debug("generating-unique-volume-id") + uniqueVolId := dockerdriverutils.NewVolumeId(volumeId, containerId) + volumeId = uniqueVolId.GetUniqueId() + } + + err := plugin.Unmount(logger, volumeId) + if err != nil { + metricErr := client.metronClient.IncrementCounter(volmanUnmountErrorsCounter) + if metricErr != nil { + logger.Debug("failed-emitting-unmount-error-metric", lager.Data{"error": err}) + } + logger.Error("unmount-failed", err) + + if dockerdriverSafeErr, ok := err.(dockerdriver.SafeError); ok { + return volman.SafeError{SafeDescription: dockerdriverSafeErr.SafeDescription} + } + return err + } + + return nil +} diff --git a/src/code.cloudfoundry.org/volman/vollocal/client_local_suite_test.go b/src/code.cloudfoundry.org/volman/vollocal/client_local_suite_test.go new file mode 100644 index 0000000000..25a7e77dc0 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/client_local_suite_test.go @@ -0,0 +1,82 @@ +package vollocal_test + +import ( + "fmt" + "os" + "os/exec" + "strings" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "code.cloudfoundry.org/volman" + "github.com/onsi/gomega/gexec" + "github.com/tedsuo/ifrit" + ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2" +) + +var client volman.Manager + +var defaultPluginsDirectory string +var secondPluginsDirectory string + +var localDriverPath string +var localDriverServerPort int +var debugServerAddress string +var localDriverProcess ifrit.Process +var localDriverRunner *ginkgomon.Runner + +var tmpDriversPath string + +func TestDriver(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Volman Local Client Suite") +} + +var _ = SynchronizedBeforeSuite(func() []byte { + var err error + + localDriverPath, err = gexec.Build("code.cloudfoundry.org/localdriver/cmd/localdriver", "-race") + Expect(err).NotTo(HaveOccurred()) + return []byte(localDriverPath) +}, func(pathsByte []byte) { + path := string(pathsByte) + localDriverPath = strings.Split(path, ",")[0] +}) + +var _ = BeforeEach(func() { + var err error + tmpDriversPath, err = os.MkdirTemp("", "driversPath") + Expect(err).NotTo(HaveOccurred()) + + defaultPluginsDirectory, err = os.MkdirTemp(os.TempDir(), "clienttest") + Expect(err).ShouldNot(HaveOccurred()) + + secondPluginsDirectory, err = os.MkdirTemp(os.TempDir(), "clienttest2") + Expect(err).ShouldNot(HaveOccurred()) + + localDriverServerPort = 9750 + GinkgoParallelProcess() + + debugServerAddress = fmt.Sprintf("0.0.0.0:%d", 9850+GinkgoParallelProcess()) + localDriverRunner = ginkgomon.New(ginkgomon.Config{ + Name: "localdriver", + Command: exec.Command( + localDriverPath, + "-listenAddr", fmt.Sprintf("0.0.0.0:%d", localDriverServerPort), + "-debugAddr", debugServerAddress, + "-driversPath", defaultPluginsDirectory, + ), + StartCheck: "localdriver-server.started", + }) +}) + +var _ = AfterEach(func() { + ginkgomon.Kill(localDriverProcess) +}) + +var _ = SynchronizedAfterSuite(func() { + +}, func() { + gexec.CleanupBuildArtifacts() +}) diff --git a/src/code.cloudfoundry.org/volman/vollocal/client_local_test.go b/src/code.cloudfoundry.org/volman/vollocal/client_local_test.go new file mode 100644 index 0000000000..8fc10bca5e --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/client_local_test.go @@ -0,0 +1,466 @@ +package vollocal_test + +import ( + "encoding/json" + "sync" + "time" + + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + mfakes "code.cloudfoundry.org/diego-logging-client/testhelpers" + "code.cloudfoundry.org/dockerdriver" + loggregator "code.cloudfoundry.org/go-loggregator/v9" + "code.cloudfoundry.org/volman/voldiscoverers" + "code.cloudfoundry.org/volman/vollocal" + "code.cloudfoundry.org/volman/volmanfakes" + + "code.cloudfoundry.org/clock/fakeclock" + "code.cloudfoundry.org/dockerdriver/dockerdriverfakes" + dockerdriverutils "code.cloudfoundry.org/dockerdriver/utils" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/lager/v3/lagertest" + "code.cloudfoundry.org/volman" + "github.com/onsi/gomega/gbytes" + "github.com/tedsuo/ifrit" + ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2" +) + +var fakeDriverId = 0 +var mutex = &sync.Mutex{} + +func GetNextFakeDriverId() string { + mutex.Lock() + defer mutex.Unlock() + + fakeDriverId = fakeDriverId + 1 + return fmt.Sprintf("fakedriver-%d", fakeDriverId) +} + +var _ = Describe("Volman", func() { + var ( + logger *lagertest.TestLogger + + fakeDriverFactory *volmanfakes.FakeDockerDriverFactory + fakeDriver *dockerdriverfakes.FakeDriver + fakeClock *fakeclock.FakeClock + fakeMetronClient *mfakes.FakeIngressClient + + scanInterval time.Duration + + driverRegistry volman.PluginRegistry + dockerDriverDiscoverer volman.Discoverer + durationMetricMap map[string]time.Duration + counterMetricMap map[string]int + + process ifrit.Process + + fakeDriverId string + ) + + BeforeEach(func() { + logger = lagertest.NewTestLogger("client-test") + + fakeDriverFactory = new(volmanfakes.FakeDockerDriverFactory) + fakeClock = fakeclock.NewFakeClock(time.Unix(123, 456)) + + scanInterval = 1 * time.Second + + driverRegistry = vollocal.NewPluginRegistry() + durationMetricMap = make(map[string]time.Duration) + counterMetricMap = make(map[string]int) + + fakeMetronClient = new(mfakes.FakeIngressClient) + fakeMetronClient.SendDurationStub = func(name string, value time.Duration, opts ...loggregator.EmitGaugeOption) error { + durationMetricMap[name] = value + return nil + } + fakeMetronClient.IncrementCounterStub = func(name string) error { + value, ok := counterMetricMap[name] + if ok { + counterMetricMap[name] = value + 1 + } else { + counterMetricMap[name] = 1 + } + return nil + } + + fakeDriverId = GetNextFakeDriverId() + }) + + Describe("ListDrivers", func() { + BeforeEach(func() { + dockerDriverDiscoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, driverRegistry, []string{"/somePath"}, fakeDriverFactory) + client = vollocal.NewLocalClient(logger, driverRegistry, fakeMetronClient, fakeClock) + + syncer := vollocal.NewSyncer(logger, driverRegistry, []volman.Discoverer{dockerDriverDiscoverer}, scanInterval, fakeClock) + process = ginkgomon.Invoke(syncer.Runner()) + }) + + It("should report empty list of drivers", func() { + drivers, err := client.ListDrivers(logger) + Expect(err).NotTo(HaveOccurred()) + Expect(len(drivers.Drivers)).To(Equal(0)) + }) + + Context("has no drivers in location", func() { + + BeforeEach(func() { + fakeDriverFactory = new(volmanfakes.FakeDockerDriverFactory) + }) + + It("should report empty list of drivers", func() { + drivers, err := client.ListDrivers(logger) + Expect(err).NotTo(HaveOccurred()) + Expect(len(drivers.Drivers)).To(Equal(0)) + }) + + AfterEach(func() { + ginkgomon.Kill(process) + }) + + }) + + Context("has driver in location", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, fakeDriverId, "spec", []byte("http://0.0.0.0:8080")) + Expect(err).NotTo(HaveOccurred()) + + dockerDriverDiscoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, driverRegistry, []string{defaultPluginsDirectory}, fakeDriverFactory) + client = vollocal.NewLocalClient(logger, driverRegistry, fakeMetronClient, fakeClock) + + fakeDriver := new(dockerdriverfakes.FakeDriver) + fakeDriverFactory.DockerDriverReturns(fakeDriver, nil) + + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{Implements: []string{"VolumeDriver"}}) + }) + + It("should report empty list of drivers", func() { + drivers, err := client.ListDrivers(logger) + Expect(err).NotTo(HaveOccurred()) + Expect(len(drivers.Drivers)).To(Equal(0)) + }) + + Context("after running drivers discovery", func() { + BeforeEach(func() { + syncer := vollocal.NewSyncer(logger, driverRegistry, []volman.Discoverer{dockerDriverDiscoverer}, scanInterval, fakeClock) + process = ginkgomon.Invoke(syncer.Runner()) + }) + + AfterEach(func() { + ginkgomon.Kill(process) + }) + + It("should report fakedriver", func() { + drivers, err := client.ListDrivers(logger) + Expect(err).NotTo(HaveOccurred()) + Expect(len(drivers.Drivers)).ToNot(Equal(0)) + Expect(drivers.Drivers[0].Name).To(Equal(fakeDriverId)) + }) + + }) + }) + }) + + Describe("Mount and Unmount", func() { + var ( + volumeId string + ) + BeforeEach(func() { + volumeId = "fake-volume" + }) + Context("when given a driver", func() { + var ( + driverSpecExtension string + driverSpecContents []byte + ) + BeforeEach(func() { + fakeDriverFactory = new(volmanfakes.FakeDockerDriverFactory) + fakeDriver = new(dockerdriverfakes.FakeDriver) + fakeDriverFactory.DockerDriverReturns(fakeDriver, nil) + + drivers := make(map[string]dockerdriver.Driver) + drivers[fakeDriverId] = fakeDriver + + driverSpecExtension = "spec" + driverSpecContents = []byte("http://0.0.0.0:8080") + + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{Implements: []string{"VolumeDriver"}}) + + dockerDriverDiscoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, driverRegistry, []string{defaultPluginsDirectory}, fakeDriverFactory) + client = vollocal.NewLocalClient(logger, driverRegistry, fakeMetronClient, fakeClock) + + }) + + JustBeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, fakeDriverId, driverSpecExtension, driverSpecContents) + Expect(err).NotTo(HaveOccurred()) + + syncer := vollocal.NewSyncer(logger, driverRegistry, []volman.Discoverer{dockerDriverDiscoverer}, scanInterval, fakeClock) + process = ginkgomon.Invoke(syncer.Runner()) + }) + + AfterEach(func() { + ginkgomon.Kill(process) + }) + + Context("mount", func() { + BeforeEach(func() { + mountResponse := dockerdriver.MountResponse{Mountpoint: "/var/vcap/data/mounts/" + volumeId} + fakeDriver.MountReturns(mountResponse) + }) + + It("should be able to mount without warning", func() { + mountPath, err := client.Mount(logger, fakeDriverId, volumeId, "", map[string]interface{}{}) + Expect(err).NotTo(HaveOccurred()) + Expect(mountPath).NotTo(Equal("")) + Expect(logger.Buffer()).NotTo(gbytes.Say("Invalid or dangerous mountpath")) + }) + + It("should not be able to mount if mount fails", func() { + mountResponse := dockerdriver.MountResponse{Err: "an error"} + fakeDriver.MountReturns(mountResponse) + + _, err := client.Mount(logger, fakeDriverId, volumeId, "", map[string]interface{}{}) + Expect(err).To(HaveOccurred()) + _, isVolmanSafeError := err.(volman.SafeError) + Expect(isVolmanSafeError).To(Equal(false)) + + }) + + It("should wrap dockerdriver safeError to volman safeError", func() { + dockerdriverSafeError := dockerdriver.SafeError{SafeDescription: "safe-badness"} + safeErrBytes, err := json.Marshal(dockerdriverSafeError) + Expect(err).NotTo(HaveOccurred()) + mountResponse := dockerdriver.MountResponse{Err: string(safeErrBytes[:])} + fakeDriver.MountReturns(mountResponse) + + _, err = client.Mount(logger, fakeDriverId, volumeId, "", map[string]interface{}{}) + Expect(err).To(HaveOccurred()) + _, isVolmanSafeError := err.(volman.SafeError) + Expect(isVolmanSafeError).To(Equal(true)) + }) + + Context("with bad mount path", func() { + var err error + BeforeEach(func() { + mountResponse := dockerdriver.MountResponse{Mountpoint: "/var/tmp"} + fakeDriver.MountReturns(mountResponse) + }) + + JustBeforeEach(func() { + _, err = client.Mount(logger, fakeDriverId, volumeId, "", map[string]interface{}{}) + }) + + It("should return a warning in the log", func() { + Expect(err).NotTo(HaveOccurred()) + Expect(logger.Buffer()).To(gbytes.Say("Invalid or dangerous mountpath")) + }) + }) + + Context("with metrics", func() { + It("should emit mount time on successful mount", func() { + + client.Mount(logger, fakeDriverId, volumeId, "", map[string]interface{}{"volume_id": volumeId}) + + Eventually(durationMetricMap).Should(HaveKeyWithValue("VolmanMountDuration", Not(BeZero()))) + Eventually(durationMetricMap).Should(HaveKeyWithValue(fmt.Sprintf("VolmanMountDurationFor%s", fakeDriverId), Not(BeZero()))) + }) + + It("should increment error count on mount failure", func() { + Expect(counterMetricMap).ShouldNot(HaveKey("VolmanMountErrors")) + mountResponse := dockerdriver.MountResponse{Err: "an error"} + fakeDriver.MountReturns(mountResponse) + + client.Mount(logger, fakeDriverId, volumeId, "", map[string]interface{}{"volume_id": volumeId}) + Expect(counterMetricMap).Should(HaveKeyWithValue("VolmanMountErrors", 1)) + }) + }) + + Context("when UniqueVolumeIds is set", func() { + BeforeEach(func() { + driverSpecExtension = "json" + driverSpecContents = []byte(`{"Addr":"http://0.0.0.0:8080","UniqueVolumeIds": true}`) + }) + + It("should append the container ID to the volume ID passed to the plugin's Mount() call", func() { + mountResponse, err := client.Mount(logger, fakeDriverId, volumeId, "some-container-id", map[string]interface{}{}) + Expect(err).NotTo(HaveOccurred()) + Expect(mountResponse.Path).To(Equal("/var/vcap/data/mounts/" + volumeId)) + + Expect(fakeDriver.MountCallCount()).To(Equal(1)) + _, mountRequest := fakeDriver.MountArgsForCall(0) + uniqueVolId := dockerdriverutils.NewVolumeId(volumeId, "some-container-id") + Expect(mountRequest.Name).To(Equal(uniqueVolId.GetUniqueId())) + }) + }) + }) + + Context("umount", func() { + It("should be able to unmount", func() { + err := client.Unmount(logger, fakeDriverId, volumeId, "") + Expect(err).NotTo(HaveOccurred()) + Expect(fakeDriver.UnmountCallCount()).To(Equal(1)) + Expect(fakeDriver.RemoveCallCount()).To(Equal(0)) + }) + + It("should not be able to unmount when driver unmount fails", func() { + fakeDriver.UnmountReturns(dockerdriver.ErrorResponse{Err: "unmount failure"}) + err := client.Unmount(logger, fakeDriverId, volumeId, "") + Expect(err).To(HaveOccurred()) + + _, isVolmanSafeError := err.(volman.SafeError) + Expect(isVolmanSafeError).To(Equal(false)) + }) + + It("should wrap dockerdriver safeError to volman safeError", func() { + dockerdriverSafeError := dockerdriver.SafeError{SafeDescription: "safe-badness"} + safeErrBytes, err := json.Marshal(dockerdriverSafeError) + Expect(err).NotTo(HaveOccurred()) + unmountResponse := dockerdriver.ErrorResponse{Err: string(safeErrBytes[:])} + fakeDriver.UnmountReturns(unmountResponse) + + err = client.Unmount(logger, fakeDriverId, volumeId, "") + Expect(err).To(HaveOccurred()) + _, isVolmanSafeError := err.(volman.SafeError) + Expect(isVolmanSafeError).To(Equal(true)) + }) + + Context("with metrics", func() { + It("should emit unmount time on successful unmount", func() { + client.Unmount(logger, fakeDriverId, volumeId, "") + + Eventually(durationMetricMap).Should(HaveKeyWithValue("VolmanUnmountDuration", Not(BeZero()))) + Eventually(durationMetricMap).Should(HaveKeyWithValue(fmt.Sprintf("VolmanUnmountDurationFor%s", fakeDriverId), Not(BeZero()))) + }) + + It("should increment error count on unmount failure", func() { + fakeDriver.UnmountReturns(dockerdriver.ErrorResponse{Err: "unmount failure"}) + + client.Unmount(logger, fakeDriverId, volumeId, "") + Expect(counterMetricMap).Should(HaveKeyWithValue("VolmanUnmountErrors", 1)) + }) + }) + + Context("when UniqueVolumeIds is set", func() { + BeforeEach(func() { + driverSpecExtension = "json" + driverSpecContents = []byte(`{"Addr":"http://0.0.0.0:8080","UniqueVolumeIds": true}`) + }) + + It("should append the container ID to the volume ID passed to the plugin's Unmount() call", func() { + err := client.Unmount(logger, fakeDriverId, volumeId, "some-container-id") + Expect(err).NotTo(HaveOccurred()) + + Expect(fakeDriver.UnmountCallCount()).To(Equal(1)) + _, unmountRequest := fakeDriver.UnmountArgsForCall(0) + uniqueVolId := dockerdriverutils.NewVolumeId(volumeId, "some-container-id") + Expect(unmountRequest.Name).To(Equal(uniqueVolId.GetUniqueId())) + }) + }) + }) + + Context("when driver is not found", func() { + BeforeEach(func() { + fakeDriverFactory.DockerDriverReturns(nil, fmt.Errorf("driver not found")) + }) + + It("should not be able to mount", func() { + _, err := client.Mount(logger, fakeDriverId, "fake-volume", "", map[string]interface{}{}) + Expect(err).To(HaveOccurred()) + }) + + It("should not be able to unmount", func() { + err := client.Unmount(logger, fakeDriverId, "fake-volume", "") + Expect(err).To(HaveOccurred()) + }) + }) + + Context("when driver does not implement VolumeDriver", func() { + BeforeEach(func() { + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{Implements: []string{"nada"}}) + }) + + It("should not be able to mount", func() { + _, err := client.Mount(logger, fakeDriverId, "fake-volume", "", map[string]interface{}{}) + Expect(err).To(HaveOccurred()) + }) + + It("should not be able to unmount", func() { + err := client.Unmount(logger, fakeDriverId, "fake-volume", "") + Expect(err).To(HaveOccurred()) + }) + }) + }) + + Context("after creating successfully driver is not found", func() { + BeforeEach(func() { + + fakeDriverFactory = new(volmanfakes.FakeDockerDriverFactory) + fakeDriver = new(dockerdriverfakes.FakeDriver) + mountReturn := dockerdriver.MountResponse{Err: "driver not found", + Mountpoint: "", + } + fakeDriver.MountReturns(mountReturn) + fakeDriverFactory.DockerDriverReturns(fakeDriver, nil) + + driverRegistry := vollocal.NewPluginRegistry() + dockerDriverDiscoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, driverRegistry, []string{"/somePath"}, fakeDriverFactory) + client = vollocal.NewLocalClient(logger, driverRegistry, fakeMetronClient, fakeClock) + + syncer := vollocal.NewSyncer(logger, driverRegistry, []volman.Discoverer{dockerDriverDiscoverer}, scanInterval, fakeClock) + process = ginkgomon.Invoke(syncer.Runner()) + + calls := 0 + fakeDriverFactory.DockerDriverStub = func(lager.Logger, string, string, string) (dockerdriver.Driver, error) { + calls++ + if calls > 1 { + return nil, fmt.Errorf("driver not found") + } + return fakeDriver, nil + } + }) + + AfterEach(func() { + ginkgomon.Kill(process) + }) + + It("should not be able to mount", func() { + _, err := client.Mount(logger, fakeDriverId, "fake-volume", "", map[string]interface{}{}) + Expect(err).To(HaveOccurred()) + }) + + }) + + Context("after unsuccessfully creating", func() { + BeforeEach(func() { + localDriverProcess = ginkgomon.Invoke(localDriverRunner) + fakeDriver = new(dockerdriverfakes.FakeDriver) + + fakeDriverFactory = new(volmanfakes.FakeDockerDriverFactory) + fakeDriverFactory.DockerDriverReturns(fakeDriver, nil) + + fakeDriver.CreateReturns(dockerdriver.ErrorResponse{Err: "create fails"}) + + driverRegistry := vollocal.NewPluginRegistry() + dockerDriverDiscoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, driverRegistry, []string{"/somePath"}, fakeDriverFactory) + client = vollocal.NewLocalClient(logger, driverRegistry, fakeMetronClient, fakeClock) + + syncer := vollocal.NewSyncer(logger, driverRegistry, []volman.Discoverer{dockerDriverDiscoverer}, scanInterval, fakeClock) + process = ginkgomon.Invoke(syncer.Runner()) + }) + + AfterEach(func() { + ginkgomon.Kill(process) + }) + + It("should not be able to mount", func() { + _, err := client.Mount(logger, fakeDriverId, "fake-volume", "", map[string]interface{}{}) + Expect(err).To(HaveOccurred()) + }) + + }) + }) +}) diff --git a/src/code.cloudfoundry.org/volman/vollocal/mount_purger.go b/src/code.cloudfoundry.org/volman/vollocal/mount_purger.go new file mode 100644 index 0000000000..2d63a8779f --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/mount_purger.go @@ -0,0 +1,66 @@ +package vollocal + +import ( + "fmt" + "os" + + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/volman" + "github.com/tedsuo/ifrit" +) + +type MountPurger interface { + Runner() ifrit.Runner + PurgeMounts(logger lager.Logger) error +} + +type mountPurger struct { + logger lager.Logger + registry volman.PluginRegistry +} + +func NewMountPurger(logger lager.Logger, registry volman.PluginRegistry) MountPurger { + return &mountPurger{ + logger, + registry, + } +} + +func (p *mountPurger) Runner() ifrit.Runner { + return p +} + +func (p *mountPurger) Run(signals <-chan os.Signal, ready chan<- struct{}) error { + + if err := p.PurgeMounts(p.logger); err != nil { + return err + } + + close(ready) + <-signals + return nil +} + +func (p *mountPurger) PurgeMounts(logger lager.Logger) error { + logger = logger.Session("purge-mounts") + logger.Info("start") + defer logger.Info("end") + + plugins := p.registry.Plugins() + + for _, plugin := range plugins { + volumes, err := plugin.ListVolumes(logger) + if err != nil { + logger.Error("failed-listing-volume-mount", err) + continue + } + + for _, volume := range volumes { + err = plugin.Unmount(logger, volume) + if err != nil { + logger.Error(fmt.Sprintf("failed-unmounting-volume-mount %s", volume), err) + } + } + } + return nil +} diff --git a/src/code.cloudfoundry.org/volman/vollocal/mount_purger_test.go b/src/code.cloudfoundry.org/volman/vollocal/mount_purger_test.go new file mode 100644 index 0000000000..b9a9bd9ac9 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/mount_purger_test.go @@ -0,0 +1,127 @@ +package vollocal_test + +import ( + "code.cloudfoundry.org/volman/voldiscoverers" + "code.cloudfoundry.org/volman/vollocal" + + "time" + + "code.cloudfoundry.org/clock" + "code.cloudfoundry.org/clock/fakeclock" + "code.cloudfoundry.org/dockerdriver" + "code.cloudfoundry.org/dockerdriver/dockerdriverfakes" + "code.cloudfoundry.org/lager/v3/lagertest" + "code.cloudfoundry.org/volman" + "code.cloudfoundry.org/volman/volmanfakes" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/tedsuo/ifrit" + ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2" +) + +var _ = Describe("MountPurger", func() { + + var ( + logger *lagertest.TestLogger + + driverRegistry volman.PluginRegistry + dockerDriverDiscoverer volman.Discoverer + purger vollocal.MountPurger + + fakeDriverFactory *volmanfakes.FakeDockerDriverFactory + fakeDriver *dockerdriverfakes.FakeDriver + fakeClock clock.Clock + + scanInterval time.Duration + + process ifrit.Process + + err error + ) + + BeforeEach(func() { + logger = lagertest.NewTestLogger("mount-purger") + + driverRegistry = vollocal.NewPluginRegistry() + }) + + JustBeforeEach(func() { + purger = vollocal.NewMountPurger(logger, driverRegistry) + err = purger.PurgeMounts(logger) + }) + + It("should succeed when there are no drivers", func() { + //err := purger.PurgeMounts(logger) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("when there is a non-dockerdriver plugin", func() { + BeforeEach(func() { + driverRegistry.Set(map[string]volman.Plugin{"not-a-dockerdriver": new(volmanfakes.FakePlugin)}) + }) + + It("should succeed", func() { + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("when there is a driver", func() { + BeforeEach(func() { + err := dockerdriver.WriteDriverSpec(logger, defaultPluginsDirectory, "fakedriver", "spec", []byte("http://0.0.0.0:8080")) + Expect(err).NotTo(HaveOccurred()) + + fakeDriverFactory = new(volmanfakes.FakeDockerDriverFactory) + + fakeClock = fakeclock.NewFakeClock(time.Unix(123, 456)) + + scanInterval = 1 * time.Second + + dockerDriverDiscoverer = voldiscoverers.NewDockerDriverDiscovererWithDriverFactory(logger, driverRegistry, []string{defaultPluginsDirectory}, fakeDriverFactory) + client = vollocal.NewLocalClient(logger, driverRegistry, nil, fakeClock) + syncer := vollocal.NewSyncer(logger, driverRegistry, []volman.Discoverer{dockerDriverDiscoverer}, scanInterval, fakeClock) + fakeDriver = new(dockerdriverfakes.FakeDriver) + fakeDriverFactory.DockerDriverReturns(fakeDriver, nil) + + fakeDriver.ActivateReturns(dockerdriver.ActivateResponse{Implements: []string{"VolumeDriver"}}) + + process = ginkgomon.Invoke(syncer.Runner()) + }) + + AfterEach(func() { + ginkgomon.Kill(process) + }) + + It("should succeed when there are no mounts", func() { + Expect(err).NotTo(HaveOccurred()) + }) + + Context("when there is a mount", func() { + BeforeEach(func() { + fakeDriver.ListReturns(dockerdriver.ListResponse{Volumes: []dockerdriver.VolumeInfo{ + { + Name: "a-volume", + Mountpoint: "foo", + }, + }}) + }) + + It("should unmount the volume", func() { + Expect(err).NotTo(HaveOccurred()) + + Expect(fakeDriver.UnmountCallCount()).To(Equal(1)) + }) + + Context("when the unmount fails", func() { + BeforeEach(func() { + fakeDriver.UnmountReturns(dockerdriver.ErrorResponse{Err: "badness"}) + }) + + It("should log but not fail", func() { + Expect(err).NotTo(HaveOccurred()) + + Expect(logger.TestSink.LogMessages()).To(ContainElement("mount-purger.purge-mounts.failed-unmounting-volume-mount a-volume")) + }) + }) + }) + }) +}) diff --git a/src/code.cloudfoundry.org/volman/vollocal/plugin_registry.go b/src/code.cloudfoundry.org/volman/vollocal/plugin_registry.go new file mode 100644 index 0000000000..9501fa786e --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/plugin_registry.go @@ -0,0 +1,66 @@ +package vollocal + +import ( + "sync" + + "code.cloudfoundry.org/volman" +) + +type pluginRegistry struct { + sync.RWMutex + registryEntries map[string]volman.Plugin +} + +func NewPluginRegistry() volman.PluginRegistry { + return &pluginRegistry{ + registryEntries: map[string]volman.Plugin{}, + } +} + +func NewPluginRegistryWith(initialMap map[string]volman.Plugin) volman.PluginRegistry { + return &pluginRegistry{ + registryEntries: initialMap, + } +} + +func (d *pluginRegistry) Plugin(id string) (volman.Plugin, bool) { + d.RLock() + defer d.RUnlock() + + if !d.containsPlugin(id) { + return nil, false + } + + return d.registryEntries[id], true +} + +func (d *pluginRegistry) Plugins() map[string]volman.Plugin { + d.RLock() + defer d.RUnlock() + + return d.registryEntries +} + +func (d *pluginRegistry) Set(plugins map[string]volman.Plugin) { + d.Lock() + defer d.Unlock() + + d.registryEntries = plugins +} + +func (d *pluginRegistry) Keys() []string { + d.Lock() + defer d.Unlock() + + var keys []string + for k := range d.registryEntries { + keys = append(keys, k) + } + + return keys +} + +func (d *pluginRegistry) containsPlugin(id string) bool { + _, ok := d.registryEntries[id] + return ok +} diff --git a/src/code.cloudfoundry.org/volman/vollocal/plugin_registry_test.go b/src/code.cloudfoundry.org/volman/vollocal/plugin_registry_test.go new file mode 100644 index 0000000000..91b52e8a5a --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/plugin_registry_test.go @@ -0,0 +1,94 @@ +package vollocal_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "code.cloudfoundry.org/volman/voldocker" + . "code.cloudfoundry.org/volman/vollocal" + + "code.cloudfoundry.org/dockerdriver/dockerdriverfakes" + "code.cloudfoundry.org/volman" +) + +var _ = Describe("PluginRegistry", func() { + var ( + emptyRegistry, oneRegistry, manyRegistry volman.PluginRegistry + ) + + BeforeEach(func() { + emptyRegistry = NewPluginRegistry() + + oneRegistry = NewPluginRegistryWith(map[string]volman.Plugin{ + "one": voldocker.NewVolmanPluginWithDockerDriver(new(dockerdriverfakes.FakeDriver), volman.PluginSpec{}), + }) + + manyRegistry = NewPluginRegistryWith(map[string]volman.Plugin{ + "one": voldocker.NewVolmanPluginWithDockerDriver(new(dockerdriverfakes.FakeDriver), volman.PluginSpec{}), + "two": voldocker.NewVolmanPluginWithDockerDriver(new(dockerdriverfakes.FakeDriver), volman.PluginSpec{}), + }) + }) + + Describe("#Plugin", func() { + It("sets the plugin to new value", func() { + onePlugin, exists := oneRegistry.Plugin("one") + Expect(exists).To(BeTrue()) + Expect(onePlugin).NotTo(BeNil()) + }) + + It("returns nil and false if the plugin doesn't exist", func() { + onePlugin, exists := oneRegistry.Plugin("doesnotexist") + Expect(exists).To(BeFalse()) + Expect(onePlugin).To(BeNil()) + }) + }) + + Describe("#Plugins", func() { + It("should return return empty map for emptyRegistry", func() { + plugins := emptyRegistry.Plugins() + Expect(len(plugins)).To(Equal(0)) + }) + + It("should return return one driver for oneRegistry", func() { + plugins := oneRegistry.Plugins() + Expect(len(plugins)).To(Equal(1)) + }) + }) + + Describe("#Set", func() { + It("replaces plugin if it already exists", func() { + newPlugin := map[string]volman.Plugin{ + "one": voldocker.NewVolmanPluginWithDockerDriver(new(dockerdriverfakes.FakeDriver), volman.PluginSpec{}), + } + oneRegistry.Set(newPlugin) + onePlugin, exists := oneRegistry.Plugin("one") + Expect(exists).To(BeTrue()) + Expect(onePlugin).NotTo(BeNil()) + }) + + It("adds plugin that does not exists", func() { + newPlugin := map[string]volman.Plugin{ + "one": voldocker.NewVolmanPluginWithDockerDriver(new(dockerdriverfakes.FakeDriver), volman.PluginSpec{}), + "two": voldocker.NewVolmanPluginWithDockerDriver(new(dockerdriverfakes.FakeDriver), volman.PluginSpec{}), + "three": voldocker.NewVolmanPluginWithDockerDriver(new(dockerdriverfakes.FakeDriver), volman.PluginSpec{}), + } + manyRegistry.Set(newPlugin) + threePlugin, exists := manyRegistry.Plugin("three") + Expect(exists).To(BeTrue()) + Expect(threePlugin).NotTo(BeNil()) + }) + }) + + Describe("#Keys", func() { + It("should return return {'one'} for oneRegistry keys", func() { + keys := emptyRegistry.Keys() + Expect(len(keys)).To(Equal(0)) + }) + + It("should return return {'one'} for oneRegistry keys", func() { + keys := oneRegistry.Keys() + Expect(len(keys)).To(Equal(1)) + Expect(keys[0]).To(Equal("one")) + }) + }) +}) diff --git a/src/code.cloudfoundry.org/volman/vollocal/syncer.go b/src/code.cloudfoundry.org/volman/vollocal/syncer.go new file mode 100644 index 0000000000..b84f931a39 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/syncer.go @@ -0,0 +1,97 @@ +package vollocal + +import ( + "fmt" + "os" + "time" + + "code.cloudfoundry.org/clock" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/volman" + "github.com/tedsuo/ifrit" +) + +type Syncer struct { + logger lager.Logger + registry volman.PluginRegistry + scanInterval time.Duration + clock clock.Clock + discoverer []volman.Discoverer +} + +func NewSyncer(logger lager.Logger, registry volman.PluginRegistry, discoverer []volman.Discoverer, scanInterval time.Duration, clock clock.Clock) *Syncer { + return &Syncer{ + logger: logger, + registry: registry, + scanInterval: scanInterval, + clock: clock, + discoverer: discoverer, + } +} + +func NewSyncerWithShims(logger lager.Logger, registry volman.PluginRegistry, discoverer []volman.Discoverer, scanInterval time.Duration, clock clock.Clock) *Syncer { + return &Syncer{ + logger: logger, + registry: registry, + scanInterval: scanInterval, + clock: clock, + discoverer: discoverer, + } +} + +func (p *Syncer) Runner() ifrit.Runner { + return p +} + +func (p *Syncer) Run(signals <-chan os.Signal, ready chan<- struct{}) error { + logger := p.logger.Session("sync-plugin") + logger.Info("start") + defer logger.Info("end") + + logger.Info("running-discovery") + allPlugins, err := discoverAllplugins(logger, p.discoverer) + if err != nil { + return err + } + + p.registry.Set(allPlugins) + + timer := p.clock.NewTimer(p.scanInterval) + defer timer.Stop() + + close(ready) + + for { + select { + case <-timer.C(): + go func() { + logger.Info("running-re-discovery") + allPlugins, err := discoverAllplugins(logger, p.discoverer) + if err != nil { + logger.Error("failed-discover", err) + } + p.registry.Set(allPlugins) + timer.Reset(p.scanInterval) + }() + case signal := <-signals: + logger.Info("signalled", lager.Data{"signal": signal.String()}) + return nil + } + } +} + +func discoverAllplugins(logger lager.Logger, discoverers []volman.Discoverer) (map[string]volman.Plugin, error) { + allPlugins := map[string]volman.Plugin{} + for _, discoverer := range discoverers { + plugins, err := discoverer.Discover(logger) + logger.Debug(fmt.Sprintf("plugins found: %#v", plugins)) + if err != nil { + logger.Error("failed-discover", err) + return map[string]volman.Plugin{}, err + } + for k, v := range plugins { + allPlugins[k] = v + } + } + return allPlugins, nil +} diff --git a/src/code.cloudfoundry.org/volman/vollocal/syncer_test.go b/src/code.cloudfoundry.org/volman/vollocal/syncer_test.go new file mode 100644 index 0000000000..d719857f3e --- /dev/null +++ b/src/code.cloudfoundry.org/volman/vollocal/syncer_test.go @@ -0,0 +1,149 @@ +package vollocal_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "code.cloudfoundry.org/lager/v3/lagertest" + + "time" + + "code.cloudfoundry.org/clock/fakeclock" + . "code.cloudfoundry.org/volman/vollocal" + "code.cloudfoundry.org/volman/volmanfakes" + "github.com/tedsuo/ifrit" + ginkgomon "github.com/tedsuo/ifrit/ginkgomon_v2" + + "code.cloudfoundry.org/volman" +) + +var _ = Describe("Syncer", func() { + var ( + syncer *Syncer + registry volman.PluginRegistry + logger *lagertest.TestLogger + scanInterval time.Duration + fakeClock *fakeclock.FakeClock + fakeDiscoverer1 *volmanfakes.FakeDiscoverer + fakeDiscoverer2 *volmanfakes.FakeDiscoverer + fakeDiscoverer3 *volmanfakes.FakeDiscoverer + process ifrit.Process + ) + + BeforeEach(func() { + scanInterval = 10 * time.Second + fakeClock = fakeclock.NewFakeClock(time.Unix(123, 456)) + + logger = lagertest.NewTestLogger("plugin-syncer-test") + + registry = NewPluginRegistry() + }) + + JustBeforeEach(func() { + syncer = NewSyncer(logger, registry, []volman.Discoverer{fakeDiscoverer1, fakeDiscoverer2, fakeDiscoverer3}, scanInterval, fakeClock) + }) + + Describe("#Runner", func() { + It("has a non-nil runner", func() { + Expect(syncer.Runner()).NotTo(BeNil()) + }) + + It("has a non-nil and empty driver registry", func() { + Expect(registry).NotTo(BeNil()) + Expect(len(registry.Plugins())).To(Equal(0)) + }) + }) + + Describe("#Run", func() { + + JustBeforeEach(func() { + process = ginkgomon.Invoke(syncer.Runner()) + }) + + AfterEach(func() { + ginkgomon.Kill(process) + }) + + Context("given three discoverers", func() { + BeforeEach(func() { + fakeDiscoverer1 = &volmanfakes.FakeDiscoverer{} + fakeDiscoverer2 = &volmanfakes.FakeDiscoverer{} + fakeDiscoverer3 = &volmanfakes.FakeDiscoverer{} + }) + + Context("given each discoverer returns a plugin", func() { + BeforeEach(func() { + fakePluginDiscovered1 := map[string]volman.Plugin{"plugin1": &volmanfakes.FakePlugin{}} + fakeDiscoverer1.DiscoverReturns(fakePluginDiscovered1, nil) + }) + + It("should add both to the registry", func() { + Expect(registry).NotTo(BeNil()) + Expect(len(registry.Plugins())).To(Equal(1)) + }) + + }) + + Context("given plugins are added over time", func() { + It("should discover each new plugin", func() { + Eventually(registry.Plugins).Should(HaveLen(0)) + + fakePluginDiscovered1 := map[string]volman.Plugin{"plugin1": &volmanfakes.FakePlugin{}} + fakeDiscoverer1.DiscoverReturns(fakePluginDiscovered1, nil) + fakeClock.Increment(scanInterval + 1) + Eventually(registry.Plugins).Should(HaveLen(1)) + + fakePluginDiscovered2 := map[string]volman.Plugin{"plugin2": &volmanfakes.FakePlugin{}} + fakeDiscoverer2.DiscoverReturns(fakePluginDiscovered2, nil) + fakeClock.Increment(scanInterval + 1) + Eventually(registry.Plugins).Should(HaveLen(2)) + + fakePluginDiscovered3 := map[string]volman.Plugin{"plugin3": &volmanfakes.FakePlugin{}} + fakeDiscoverer3.DiscoverReturns(fakePluginDiscovered3, nil) + fakeClock.Increment(scanInterval + 1) + Eventually(registry.Plugins).Should(HaveLen(3)) + + Expect(fakeDiscoverer1.DiscoverCallCount()).To(Equal(4)) + Expect(fakeDiscoverer2.DiscoverCallCount()).To(Equal(4)) + Expect(fakeDiscoverer3.DiscoverCallCount()).To(Equal(4)) + }) + }) + + Context("given plugins are removed over time", func() { + BeforeEach(func() { + fakePluginDiscovered1 := map[string]volman.Plugin{"plugin1": &volmanfakes.FakePlugin{}} + fakeDiscoverer1.DiscoverReturns(fakePluginDiscovered1, nil) + + fakePluginDiscovered2 := map[string]volman.Plugin{"plugin2": &volmanfakes.FakePlugin{}} + fakeDiscoverer2.DiscoverReturns(fakePluginDiscovered2, nil) + + fakePluginDiscovered3 := map[string]volman.Plugin{"plugin3": &volmanfakes.FakePlugin{}} + fakeDiscoverer3.DiscoverReturns(fakePluginDiscovered3, nil) + }) + + It("should remove the plugins", func() { + Eventually(registry.Plugins).Should(HaveLen(3)) + + fakePluginDiscovered1 := map[string]volman.Plugin{} + fakeDiscoverer1.DiscoverReturns(fakePluginDiscovered1, nil) + fakeClock.Increment(scanInterval + 1) + Eventually(registry.Plugins).Should(HaveLen(2)) + + fakePluginDiscovered2 := map[string]volman.Plugin{} + fakeDiscoverer2.DiscoverReturns(fakePluginDiscovered2, nil) + fakeClock.Increment(scanInterval + 1) + Eventually(registry.Plugins).Should(HaveLen(1)) + + fakePluginDiscovered3 := map[string]volman.Plugin{} + fakeDiscoverer3.DiscoverReturns(fakePluginDiscovered3, nil) + fakeClock.Increment(scanInterval + 1) + Eventually(registry.Plugins).Should(HaveLen(0)) + + Expect(fakeDiscoverer1.DiscoverCallCount()).To(Equal(4)) + Expect(fakeDiscoverer2.DiscoverCallCount()).To(Equal(4)) + Expect(fakeDiscoverer3.DiscoverCallCount()).To(Equal(4)) + }) + }) + }) + }) +}) diff --git a/src/code.cloudfoundry.org/volman/volmanfakes/fake_discoverer.go b/src/code.cloudfoundry.org/volman/volmanfakes/fake_discoverer.go new file mode 100644 index 0000000000..c1ff7a2dc4 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/volmanfakes/fake_discoverer.go @@ -0,0 +1,116 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package volmanfakes + +import ( + sync "sync" + + lager "code.cloudfoundry.org/lager/v3" + volman "code.cloudfoundry.org/volman" +) + +type FakeDiscoverer struct { + DiscoverStub func(lager.Logger) (map[string]volman.Plugin, error) + discoverMutex sync.RWMutex + discoverArgsForCall []struct { + arg1 lager.Logger + } + discoverReturns struct { + result1 map[string]volman.Plugin + result2 error + } + discoverReturnsOnCall map[int]struct { + result1 map[string]volman.Plugin + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeDiscoverer) Discover(arg1 lager.Logger) (map[string]volman.Plugin, error) { + fake.discoverMutex.Lock() + ret, specificReturn := fake.discoverReturnsOnCall[len(fake.discoverArgsForCall)] + fake.discoverArgsForCall = append(fake.discoverArgsForCall, struct { + arg1 lager.Logger + }{arg1}) + fake.recordInvocation("Discover", []interface{}{arg1}) + fake.discoverMutex.Unlock() + if fake.DiscoverStub != nil { + return fake.DiscoverStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.discoverReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeDiscoverer) DiscoverCallCount() int { + fake.discoverMutex.RLock() + defer fake.discoverMutex.RUnlock() + return len(fake.discoverArgsForCall) +} + +func (fake *FakeDiscoverer) DiscoverCalls(stub func(lager.Logger) (map[string]volman.Plugin, error)) { + fake.discoverMutex.Lock() + defer fake.discoverMutex.Unlock() + fake.DiscoverStub = stub +} + +func (fake *FakeDiscoverer) DiscoverArgsForCall(i int) lager.Logger { + fake.discoverMutex.RLock() + defer fake.discoverMutex.RUnlock() + argsForCall := fake.discoverArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeDiscoverer) DiscoverReturns(result1 map[string]volman.Plugin, result2 error) { + fake.discoverMutex.Lock() + defer fake.discoverMutex.Unlock() + fake.DiscoverStub = nil + fake.discoverReturns = struct { + result1 map[string]volman.Plugin + result2 error + }{result1, result2} +} + +func (fake *FakeDiscoverer) DiscoverReturnsOnCall(i int, result1 map[string]volman.Plugin, result2 error) { + fake.discoverMutex.Lock() + defer fake.discoverMutex.Unlock() + fake.DiscoverStub = nil + if fake.discoverReturnsOnCall == nil { + fake.discoverReturnsOnCall = make(map[int]struct { + result1 map[string]volman.Plugin + result2 error + }) + } + fake.discoverReturnsOnCall[i] = struct { + result1 map[string]volman.Plugin + result2 error + }{result1, result2} +} + +func (fake *FakeDiscoverer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.discoverMutex.RLock() + defer fake.discoverMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeDiscoverer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ volman.Discoverer = new(FakeDiscoverer) diff --git a/src/code.cloudfoundry.org/volman/volmanfakes/fake_docker_driver_factory.go b/src/code.cloudfoundry.org/volman/volmanfakes/fake_docker_driver_factory.go new file mode 100644 index 0000000000..d88d41b5c4 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/volmanfakes/fake_docker_driver_factory.go @@ -0,0 +1,123 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package volmanfakes + +import ( + sync "sync" + + dockerdriver "code.cloudfoundry.org/dockerdriver" + lager "code.cloudfoundry.org/lager/v3" + voldiscoverers "code.cloudfoundry.org/volman/voldiscoverers" +) + +type FakeDockerDriverFactory struct { + DockerDriverStub func(lager.Logger, string, string, string) (dockerdriver.Driver, error) + dockerDriverMutex sync.RWMutex + dockerDriverArgsForCall []struct { + arg1 lager.Logger + arg2 string + arg3 string + arg4 string + } + dockerDriverReturns struct { + result1 dockerdriver.Driver + result2 error + } + dockerDriverReturnsOnCall map[int]struct { + result1 dockerdriver.Driver + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeDockerDriverFactory) DockerDriver(arg1 lager.Logger, arg2 string, arg3 string, arg4 string) (dockerdriver.Driver, error) { + fake.dockerDriverMutex.Lock() + ret, specificReturn := fake.dockerDriverReturnsOnCall[len(fake.dockerDriverArgsForCall)] + fake.dockerDriverArgsForCall = append(fake.dockerDriverArgsForCall, struct { + arg1 lager.Logger + arg2 string + arg3 string + arg4 string + }{arg1, arg2, arg3, arg4}) + fake.recordInvocation("DockerDriver", []interface{}{arg1, arg2, arg3, arg4}) + fake.dockerDriverMutex.Unlock() + if fake.DockerDriverStub != nil { + return fake.DockerDriverStub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.dockerDriverReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeDockerDriverFactory) DockerDriverCallCount() int { + fake.dockerDriverMutex.RLock() + defer fake.dockerDriverMutex.RUnlock() + return len(fake.dockerDriverArgsForCall) +} + +func (fake *FakeDockerDriverFactory) DockerDriverCalls(stub func(lager.Logger, string, string, string) (dockerdriver.Driver, error)) { + fake.dockerDriverMutex.Lock() + defer fake.dockerDriverMutex.Unlock() + fake.DockerDriverStub = stub +} + +func (fake *FakeDockerDriverFactory) DockerDriverArgsForCall(i int) (lager.Logger, string, string, string) { + fake.dockerDriverMutex.RLock() + defer fake.dockerDriverMutex.RUnlock() + argsForCall := fake.dockerDriverArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeDockerDriverFactory) DockerDriverReturns(result1 dockerdriver.Driver, result2 error) { + fake.dockerDriverMutex.Lock() + defer fake.dockerDriverMutex.Unlock() + fake.DockerDriverStub = nil + fake.dockerDriverReturns = struct { + result1 dockerdriver.Driver + result2 error + }{result1, result2} +} + +func (fake *FakeDockerDriverFactory) DockerDriverReturnsOnCall(i int, result1 dockerdriver.Driver, result2 error) { + fake.dockerDriverMutex.Lock() + defer fake.dockerDriverMutex.Unlock() + fake.DockerDriverStub = nil + if fake.dockerDriverReturnsOnCall == nil { + fake.dockerDriverReturnsOnCall = make(map[int]struct { + result1 dockerdriver.Driver + result2 error + }) + } + fake.dockerDriverReturnsOnCall[i] = struct { + result1 dockerdriver.Driver + result2 error + }{result1, result2} +} + +func (fake *FakeDockerDriverFactory) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.dockerDriverMutex.RLock() + defer fake.dockerDriverMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeDockerDriverFactory) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ voldiscoverers.DockerDriverFactory = new(FakeDockerDriverFactory) diff --git a/src/code.cloudfoundry.org/volman/volmanfakes/fake_manager_client.go b/src/code.cloudfoundry.org/volman/volmanfakes/fake_manager_client.go new file mode 100644 index 0000000000..d2d72b5aa1 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/volmanfakes/fake_manager_client.go @@ -0,0 +1,281 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package volmanfakes + +import ( + sync "sync" + + lager "code.cloudfoundry.org/lager/v3" + volman "code.cloudfoundry.org/volman" +) + +type FakeManager struct { + ListDriversStub func(lager.Logger) (volman.ListDriversResponse, error) + listDriversMutex sync.RWMutex + listDriversArgsForCall []struct { + arg1 lager.Logger + } + listDriversReturns struct { + result1 volman.ListDriversResponse + result2 error + } + listDriversReturnsOnCall map[int]struct { + result1 volman.ListDriversResponse + result2 error + } + MountStub func(lager.Logger, string, string, string, map[string]interface{}) (volman.MountResponse, error) + mountMutex sync.RWMutex + mountArgsForCall []struct { + arg1 lager.Logger + arg2 string + arg3 string + arg4 string + arg5 map[string]interface{} + } + mountReturns struct { + result1 volman.MountResponse + result2 error + } + mountReturnsOnCall map[int]struct { + result1 volman.MountResponse + result2 error + } + UnmountStub func(lager.Logger, string, string, string) error + unmountMutex sync.RWMutex + unmountArgsForCall []struct { + arg1 lager.Logger + arg2 string + arg3 string + arg4 string + } + unmountReturns struct { + result1 error + } + unmountReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeManager) ListDrivers(arg1 lager.Logger) (volman.ListDriversResponse, error) { + fake.listDriversMutex.Lock() + ret, specificReturn := fake.listDriversReturnsOnCall[len(fake.listDriversArgsForCall)] + fake.listDriversArgsForCall = append(fake.listDriversArgsForCall, struct { + arg1 lager.Logger + }{arg1}) + fake.recordInvocation("ListDrivers", []interface{}{arg1}) + fake.listDriversMutex.Unlock() + if fake.ListDriversStub != nil { + return fake.ListDriversStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.listDriversReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeManager) ListDriversCallCount() int { + fake.listDriversMutex.RLock() + defer fake.listDriversMutex.RUnlock() + return len(fake.listDriversArgsForCall) +} + +func (fake *FakeManager) ListDriversCalls(stub func(lager.Logger) (volman.ListDriversResponse, error)) { + fake.listDriversMutex.Lock() + defer fake.listDriversMutex.Unlock() + fake.ListDriversStub = stub +} + +func (fake *FakeManager) ListDriversArgsForCall(i int) lager.Logger { + fake.listDriversMutex.RLock() + defer fake.listDriversMutex.RUnlock() + argsForCall := fake.listDriversArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeManager) ListDriversReturns(result1 volman.ListDriversResponse, result2 error) { + fake.listDriversMutex.Lock() + defer fake.listDriversMutex.Unlock() + fake.ListDriversStub = nil + fake.listDriversReturns = struct { + result1 volman.ListDriversResponse + result2 error + }{result1, result2} +} + +func (fake *FakeManager) ListDriversReturnsOnCall(i int, result1 volman.ListDriversResponse, result2 error) { + fake.listDriversMutex.Lock() + defer fake.listDriversMutex.Unlock() + fake.ListDriversStub = nil + if fake.listDriversReturnsOnCall == nil { + fake.listDriversReturnsOnCall = make(map[int]struct { + result1 volman.ListDriversResponse + result2 error + }) + } + fake.listDriversReturnsOnCall[i] = struct { + result1 volman.ListDriversResponse + result2 error + }{result1, result2} +} + +func (fake *FakeManager) Mount(arg1 lager.Logger, arg2 string, arg3 string, arg4 string, arg5 map[string]interface{}) (volman.MountResponse, error) { + fake.mountMutex.Lock() + ret, specificReturn := fake.mountReturnsOnCall[len(fake.mountArgsForCall)] + fake.mountArgsForCall = append(fake.mountArgsForCall, struct { + arg1 lager.Logger + arg2 string + arg3 string + arg4 string + arg5 map[string]interface{} + }{arg1, arg2, arg3, arg4, arg5}) + fake.recordInvocation("Mount", []interface{}{arg1, arg2, arg3, arg4, arg5}) + fake.mountMutex.Unlock() + if fake.MountStub != nil { + return fake.MountStub(arg1, arg2, arg3, arg4, arg5) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.mountReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeManager) MountCallCount() int { + fake.mountMutex.RLock() + defer fake.mountMutex.RUnlock() + return len(fake.mountArgsForCall) +} + +func (fake *FakeManager) MountCalls(stub func(lager.Logger, string, string, string, map[string]interface{}) (volman.MountResponse, error)) { + fake.mountMutex.Lock() + defer fake.mountMutex.Unlock() + fake.MountStub = stub +} + +func (fake *FakeManager) MountArgsForCall(i int) (lager.Logger, string, string, string, map[string]interface{}) { + fake.mountMutex.RLock() + defer fake.mountMutex.RUnlock() + argsForCall := fake.mountArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 +} + +func (fake *FakeManager) MountReturns(result1 volman.MountResponse, result2 error) { + fake.mountMutex.Lock() + defer fake.mountMutex.Unlock() + fake.MountStub = nil + fake.mountReturns = struct { + result1 volman.MountResponse + result2 error + }{result1, result2} +} + +func (fake *FakeManager) MountReturnsOnCall(i int, result1 volman.MountResponse, result2 error) { + fake.mountMutex.Lock() + defer fake.mountMutex.Unlock() + fake.MountStub = nil + if fake.mountReturnsOnCall == nil { + fake.mountReturnsOnCall = make(map[int]struct { + result1 volman.MountResponse + result2 error + }) + } + fake.mountReturnsOnCall[i] = struct { + result1 volman.MountResponse + result2 error + }{result1, result2} +} + +func (fake *FakeManager) Unmount(arg1 lager.Logger, arg2 string, arg3 string, arg4 string) error { + fake.unmountMutex.Lock() + ret, specificReturn := fake.unmountReturnsOnCall[len(fake.unmountArgsForCall)] + fake.unmountArgsForCall = append(fake.unmountArgsForCall, struct { + arg1 lager.Logger + arg2 string + arg3 string + arg4 string + }{arg1, arg2, arg3, arg4}) + fake.recordInvocation("Unmount", []interface{}{arg1, arg2, arg3, arg4}) + fake.unmountMutex.Unlock() + if fake.UnmountStub != nil { + return fake.UnmountStub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.unmountReturns + return fakeReturns.result1 +} + +func (fake *FakeManager) UnmountCallCount() int { + fake.unmountMutex.RLock() + defer fake.unmountMutex.RUnlock() + return len(fake.unmountArgsForCall) +} + +func (fake *FakeManager) UnmountCalls(stub func(lager.Logger, string, string, string) error) { + fake.unmountMutex.Lock() + defer fake.unmountMutex.Unlock() + fake.UnmountStub = stub +} + +func (fake *FakeManager) UnmountArgsForCall(i int) (lager.Logger, string, string, string) { + fake.unmountMutex.RLock() + defer fake.unmountMutex.RUnlock() + argsForCall := fake.unmountArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeManager) UnmountReturns(result1 error) { + fake.unmountMutex.Lock() + defer fake.unmountMutex.Unlock() + fake.UnmountStub = nil + fake.unmountReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeManager) UnmountReturnsOnCall(i int, result1 error) { + fake.unmountMutex.Lock() + defer fake.unmountMutex.Unlock() + fake.UnmountStub = nil + if fake.unmountReturnsOnCall == nil { + fake.unmountReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.unmountReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeManager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.listDriversMutex.RLock() + defer fake.listDriversMutex.RUnlock() + fake.mountMutex.RLock() + defer fake.mountMutex.RUnlock() + fake.unmountMutex.RLock() + defer fake.unmountMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeManager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ volman.Manager = new(FakeManager) diff --git a/src/code.cloudfoundry.org/volman/volmanfakes/fake_plugin.go b/src/code.cloudfoundry.org/volman/volmanfakes/fake_plugin.go new file mode 100644 index 0000000000..e6b9af5b40 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/volmanfakes/fake_plugin.go @@ -0,0 +1,412 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package volmanfakes + +import ( + sync "sync" + + lager "code.cloudfoundry.org/lager/v3" + volman "code.cloudfoundry.org/volman" +) + +type FakePlugin struct { + GetPluginSpecStub func() volman.PluginSpec + getPluginSpecMutex sync.RWMutex + getPluginSpecArgsForCall []struct { + } + getPluginSpecReturns struct { + result1 volman.PluginSpec + } + getPluginSpecReturnsOnCall map[int]struct { + result1 volman.PluginSpec + } + ListVolumesStub func(lager.Logger) ([]string, error) + listVolumesMutex sync.RWMutex + listVolumesArgsForCall []struct { + arg1 lager.Logger + } + listVolumesReturns struct { + result1 []string + result2 error + } + listVolumesReturnsOnCall map[int]struct { + result1 []string + result2 error + } + MatchesStub func(lager.Logger, volman.PluginSpec) bool + matchesMutex sync.RWMutex + matchesArgsForCall []struct { + arg1 lager.Logger + arg2 volman.PluginSpec + } + matchesReturns struct { + result1 bool + } + matchesReturnsOnCall map[int]struct { + result1 bool + } + MountStub func(lager.Logger, string, map[string]interface{}) (volman.MountResponse, error) + mountMutex sync.RWMutex + mountArgsForCall []struct { + arg1 lager.Logger + arg2 string + arg3 map[string]interface{} + } + mountReturns struct { + result1 volman.MountResponse + result2 error + } + mountReturnsOnCall map[int]struct { + result1 volman.MountResponse + result2 error + } + UnmountStub func(lager.Logger, string) error + unmountMutex sync.RWMutex + unmountArgsForCall []struct { + arg1 lager.Logger + arg2 string + } + unmountReturns struct { + result1 error + } + unmountReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakePlugin) GetPluginSpec() volman.PluginSpec { + fake.getPluginSpecMutex.Lock() + ret, specificReturn := fake.getPluginSpecReturnsOnCall[len(fake.getPluginSpecArgsForCall)] + fake.getPluginSpecArgsForCall = append(fake.getPluginSpecArgsForCall, struct { + }{}) + fake.recordInvocation("GetPluginSpec", []interface{}{}) + fake.getPluginSpecMutex.Unlock() + if fake.GetPluginSpecStub != nil { + return fake.GetPluginSpecStub() + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.getPluginSpecReturns + return fakeReturns.result1 +} + +func (fake *FakePlugin) GetPluginSpecCallCount() int { + fake.getPluginSpecMutex.RLock() + defer fake.getPluginSpecMutex.RUnlock() + return len(fake.getPluginSpecArgsForCall) +} + +func (fake *FakePlugin) GetPluginSpecCalls(stub func() volman.PluginSpec) { + fake.getPluginSpecMutex.Lock() + defer fake.getPluginSpecMutex.Unlock() + fake.GetPluginSpecStub = stub +} + +func (fake *FakePlugin) GetPluginSpecReturns(result1 volman.PluginSpec) { + fake.getPluginSpecMutex.Lock() + defer fake.getPluginSpecMutex.Unlock() + fake.GetPluginSpecStub = nil + fake.getPluginSpecReturns = struct { + result1 volman.PluginSpec + }{result1} +} + +func (fake *FakePlugin) GetPluginSpecReturnsOnCall(i int, result1 volman.PluginSpec) { + fake.getPluginSpecMutex.Lock() + defer fake.getPluginSpecMutex.Unlock() + fake.GetPluginSpecStub = nil + if fake.getPluginSpecReturnsOnCall == nil { + fake.getPluginSpecReturnsOnCall = make(map[int]struct { + result1 volman.PluginSpec + }) + } + fake.getPluginSpecReturnsOnCall[i] = struct { + result1 volman.PluginSpec + }{result1} +} + +func (fake *FakePlugin) ListVolumes(arg1 lager.Logger) ([]string, error) { + fake.listVolumesMutex.Lock() + ret, specificReturn := fake.listVolumesReturnsOnCall[len(fake.listVolumesArgsForCall)] + fake.listVolumesArgsForCall = append(fake.listVolumesArgsForCall, struct { + arg1 lager.Logger + }{arg1}) + fake.recordInvocation("ListVolumes", []interface{}{arg1}) + fake.listVolumesMutex.Unlock() + if fake.ListVolumesStub != nil { + return fake.ListVolumesStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.listVolumesReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakePlugin) ListVolumesCallCount() int { + fake.listVolumesMutex.RLock() + defer fake.listVolumesMutex.RUnlock() + return len(fake.listVolumesArgsForCall) +} + +func (fake *FakePlugin) ListVolumesCalls(stub func(lager.Logger) ([]string, error)) { + fake.listVolumesMutex.Lock() + defer fake.listVolumesMutex.Unlock() + fake.ListVolumesStub = stub +} + +func (fake *FakePlugin) ListVolumesArgsForCall(i int) lager.Logger { + fake.listVolumesMutex.RLock() + defer fake.listVolumesMutex.RUnlock() + argsForCall := fake.listVolumesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakePlugin) ListVolumesReturns(result1 []string, result2 error) { + fake.listVolumesMutex.Lock() + defer fake.listVolumesMutex.Unlock() + fake.ListVolumesStub = nil + fake.listVolumesReturns = struct { + result1 []string + result2 error + }{result1, result2} +} + +func (fake *FakePlugin) ListVolumesReturnsOnCall(i int, result1 []string, result2 error) { + fake.listVolumesMutex.Lock() + defer fake.listVolumesMutex.Unlock() + fake.ListVolumesStub = nil + if fake.listVolumesReturnsOnCall == nil { + fake.listVolumesReturnsOnCall = make(map[int]struct { + result1 []string + result2 error + }) + } + fake.listVolumesReturnsOnCall[i] = struct { + result1 []string + result2 error + }{result1, result2} +} + +func (fake *FakePlugin) Matches(arg1 lager.Logger, arg2 volman.PluginSpec) bool { + fake.matchesMutex.Lock() + ret, specificReturn := fake.matchesReturnsOnCall[len(fake.matchesArgsForCall)] + fake.matchesArgsForCall = append(fake.matchesArgsForCall, struct { + arg1 lager.Logger + arg2 volman.PluginSpec + }{arg1, arg2}) + fake.recordInvocation("Matches", []interface{}{arg1, arg2}) + fake.matchesMutex.Unlock() + if fake.MatchesStub != nil { + return fake.MatchesStub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.matchesReturns + return fakeReturns.result1 +} + +func (fake *FakePlugin) MatchesCallCount() int { + fake.matchesMutex.RLock() + defer fake.matchesMutex.RUnlock() + return len(fake.matchesArgsForCall) +} + +func (fake *FakePlugin) MatchesCalls(stub func(lager.Logger, volman.PluginSpec) bool) { + fake.matchesMutex.Lock() + defer fake.matchesMutex.Unlock() + fake.MatchesStub = stub +} + +func (fake *FakePlugin) MatchesArgsForCall(i int) (lager.Logger, volman.PluginSpec) { + fake.matchesMutex.RLock() + defer fake.matchesMutex.RUnlock() + argsForCall := fake.matchesArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakePlugin) MatchesReturns(result1 bool) { + fake.matchesMutex.Lock() + defer fake.matchesMutex.Unlock() + fake.MatchesStub = nil + fake.matchesReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakePlugin) MatchesReturnsOnCall(i int, result1 bool) { + fake.matchesMutex.Lock() + defer fake.matchesMutex.Unlock() + fake.MatchesStub = nil + if fake.matchesReturnsOnCall == nil { + fake.matchesReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.matchesReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *FakePlugin) Mount(arg1 lager.Logger, arg2 string, arg3 map[string]interface{}) (volman.MountResponse, error) { + fake.mountMutex.Lock() + ret, specificReturn := fake.mountReturnsOnCall[len(fake.mountArgsForCall)] + fake.mountArgsForCall = append(fake.mountArgsForCall, struct { + arg1 lager.Logger + arg2 string + arg3 map[string]interface{} + }{arg1, arg2, arg3}) + fake.recordInvocation("Mount", []interface{}{arg1, arg2, arg3}) + fake.mountMutex.Unlock() + if fake.MountStub != nil { + return fake.MountStub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.mountReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakePlugin) MountCallCount() int { + fake.mountMutex.RLock() + defer fake.mountMutex.RUnlock() + return len(fake.mountArgsForCall) +} + +func (fake *FakePlugin) MountCalls(stub func(lager.Logger, string, map[string]interface{}) (volman.MountResponse, error)) { + fake.mountMutex.Lock() + defer fake.mountMutex.Unlock() + fake.MountStub = stub +} + +func (fake *FakePlugin) MountArgsForCall(i int) (lager.Logger, string, map[string]interface{}) { + fake.mountMutex.RLock() + defer fake.mountMutex.RUnlock() + argsForCall := fake.mountArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakePlugin) MountReturns(result1 volman.MountResponse, result2 error) { + fake.mountMutex.Lock() + defer fake.mountMutex.Unlock() + fake.MountStub = nil + fake.mountReturns = struct { + result1 volman.MountResponse + result2 error + }{result1, result2} +} + +func (fake *FakePlugin) MountReturnsOnCall(i int, result1 volman.MountResponse, result2 error) { + fake.mountMutex.Lock() + defer fake.mountMutex.Unlock() + fake.MountStub = nil + if fake.mountReturnsOnCall == nil { + fake.mountReturnsOnCall = make(map[int]struct { + result1 volman.MountResponse + result2 error + }) + } + fake.mountReturnsOnCall[i] = struct { + result1 volman.MountResponse + result2 error + }{result1, result2} +} + +func (fake *FakePlugin) Unmount(arg1 lager.Logger, arg2 string) error { + fake.unmountMutex.Lock() + ret, specificReturn := fake.unmountReturnsOnCall[len(fake.unmountArgsForCall)] + fake.unmountArgsForCall = append(fake.unmountArgsForCall, struct { + arg1 lager.Logger + arg2 string + }{arg1, arg2}) + fake.recordInvocation("Unmount", []interface{}{arg1, arg2}) + fake.unmountMutex.Unlock() + if fake.UnmountStub != nil { + return fake.UnmountStub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.unmountReturns + return fakeReturns.result1 +} + +func (fake *FakePlugin) UnmountCallCount() int { + fake.unmountMutex.RLock() + defer fake.unmountMutex.RUnlock() + return len(fake.unmountArgsForCall) +} + +func (fake *FakePlugin) UnmountCalls(stub func(lager.Logger, string) error) { + fake.unmountMutex.Lock() + defer fake.unmountMutex.Unlock() + fake.UnmountStub = stub +} + +func (fake *FakePlugin) UnmountArgsForCall(i int) (lager.Logger, string) { + fake.unmountMutex.RLock() + defer fake.unmountMutex.RUnlock() + argsForCall := fake.unmountArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakePlugin) UnmountReturns(result1 error) { + fake.unmountMutex.Lock() + defer fake.unmountMutex.Unlock() + fake.UnmountStub = nil + fake.unmountReturns = struct { + result1 error + }{result1} +} + +func (fake *FakePlugin) UnmountReturnsOnCall(i int, result1 error) { + fake.unmountMutex.Lock() + defer fake.unmountMutex.Unlock() + fake.UnmountStub = nil + if fake.unmountReturnsOnCall == nil { + fake.unmountReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.unmountReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakePlugin) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getPluginSpecMutex.RLock() + defer fake.getPluginSpecMutex.RUnlock() + fake.listVolumesMutex.RLock() + defer fake.listVolumesMutex.RUnlock() + fake.matchesMutex.RLock() + defer fake.matchesMutex.RUnlock() + fake.mountMutex.RLock() + defer fake.mountMutex.RUnlock() + fake.unmountMutex.RLock() + defer fake.unmountMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakePlugin) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ volman.Plugin = new(FakePlugin) diff --git a/src/code.cloudfoundry.org/volman/watchers.xml b/src/code.cloudfoundry.org/volman/watchers.xml new file mode 100644 index 0000000000..43bbc55cc3 --- /dev/null +++ b/src/code.cloudfoundry.org/volman/watchers.xml @@ -0,0 +1,42 @@ + + + + + + + + + +