diff --git a/cmd/minikube/cmd/config/config.go b/cmd/minikube/cmd/config/config.go index 42fb394f9d7a..554676e5f4fc 100644 --- a/cmd/minikube/cmd/config/config.go +++ b/cmd/minikube/cmd/config/config.go @@ -212,6 +212,18 @@ var settings = []Setting{ validations: []setFn{IsValidAddon}, callbacks: []setFn{EnableOrDisableAddon}, }, + { + name: "nvidia-driver-installer", + set: SetBool, + validations: []setFn{IsValidAddon}, + callbacks: []setFn{EnableOrDisableAddon}, + }, + { + name: "nvidia-gpu-device-plugin", + set: SetBool, + validations: []setFn{IsValidAddon}, + callbacks: []setFn{EnableOrDisableAddon}, + }, { name: "hyperv-virtual-switch", set: SetString, diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index ddddf545d575..99078907ad17 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -73,6 +73,7 @@ const ( uuid = "uuid" vpnkitSock = "hyperkit-vpnkit-sock" vsockPorts = "hyperkit-vsock-ports" + gpu = "gpu" ) var ( @@ -136,6 +137,10 @@ func runStart(cmd *cobra.Command, args []string) { validateK8sVersion(k8sVersion) } + if viper.GetBool(gpu) && viper.GetString(vmDriver) != "kvm2" { + glog.Exitf("--gpu is only supported with --vm-driver=kvm2") + } + config := cfg.MachineConfig{ MinikubeISO: viper.GetString(isoURL), Memory: viper.GetInt(memory), @@ -157,6 +162,7 @@ func runStart(cmd *cobra.Command, args []string) { Downloader: pkgutil.DefaultDownloader{}, DisableDriverMounts: viper.GetBool(disableDriverMounts), UUID: viper.GetString(uuid), + GPU: viper.GetBool(gpu), } fmt.Printf("Starting local Kubernetes %s cluster...\n", viper.GetString(kubernetesVersion)) @@ -419,6 +425,7 @@ func init() { startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (only supported with Hyperkit driver).") startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock.") startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).") + startCmd.Flags().Bool(gpu, false, "Enable experimental NVIDIA GPU support in minikube (works only with kvm2 driver on Linux)") viper.BindPFlags(startCmd.Flags()) RootCmd.AddCommand(startCmd) } diff --git a/deploy/addons/gpu/nvidia-driver-installer.yaml b/deploy/addons/gpu/nvidia-driver-installer.yaml new file mode 100644 index 000000000000..ad5f2d0d3a9f --- /dev/null +++ b/deploy/addons/gpu/nvidia-driver-installer.yaml @@ -0,0 +1,76 @@ +# Copyright 2018 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The Dockerfile and other source for this daemonset are in +# https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/nvidia-driver-installer/minikube + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nvidia-driver-installer + namespace: kube-system + labels: + k8s-app: nvidia-driver-installer + kubernetes.io/minikube-addons: nvidia-driver-installer + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nvidia-driver-installer + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: nvidia-driver-installer + spec: + tolerations: + - key: "nvidia.com/gpu" + effect: "NoSchedule" + operator: "Exists" + volumes: + - name: dev + hostPath: + path: /dev + - name: nvidia-install-dir-host + hostPath: + path: /home/kubernetes/bin/nvidia + - name: root-mount + hostPath: + path: / + initContainers: + - image: k8s.gcr.io/minikube-nvidia-driver-installer@sha256:85cbeadb8bee62a96079823e81915955af0959063ff522ec01522e4edda28f33 + name: nvidia-driver-installer + resources: + requests: + cpu: 0.15 + securityContext: + privileged: true + env: + - name: NVIDIA_INSTALL_DIR_HOST + value: /home/kubernetes/bin/nvidia + - name: NVIDIA_INSTALL_DIR_CONTAINER + value: /usr/local/nvidia + - name: ROOT_MOUNT_DIR + value: /root + volumeMounts: + - name: nvidia-install-dir-host + mountPath: /usr/local/nvidia + - name: dev + mountPath: /dev + - name: root-mount + mountPath: /root + containers: + - image: "gcr.io/google-containers/pause:2.0" + name: pause diff --git a/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml new file mode 100644 index 000000000000..8d403735bc4e --- /dev/null +++ b/deploy/addons/gpu/nvidia-gpu-device-plugin.yaml @@ -0,0 +1,67 @@ +# Copyright 2018 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nvidia-gpu-device-plugin + namespace: kube-system + labels: + k8s-app: nvidia-gpu-device-plugin + kubernetes.io/minikube-addons: nvidia-gpu-device-plugin + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: nvidia-gpu-device-plugin + template: + metadata: + labels: + k8s-app: nvidia-gpu-device-plugin + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + volumes: + - name: device-plugin + hostPath: + path: /var/lib/kubelet/device-plugins + - name: dev + hostPath: + path: /dev + containers: + - image: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"] + name: nvidia-gpu-device-plugin + resources: + requests: + cpu: 50m + memory: 10Mi + limits: + cpu: 50m + memory: 10Mi + securityContext: + privileged: true + volumeMounts: + - name: device-plugin + mountPath: /device-plugin + - name: dev + mountPath: /dev + updateStrategy: + type: RollingUpdate diff --git a/docs/README.md b/docs/README.md index 6b36ad3ba3d9..bdfd18a68eb2 100644 --- a/docs/README.md +++ b/docs/README.md @@ -12,6 +12,8 @@ * **Caching Images** ([cache.md](cache.md)): Caching non-minikube images in minikube +* **GPUs** ([gpu.md](gpu.md)): Using NVIDIA GPUs on minikube + ### Installation and debugging * **Driver installation** ([drivers.md](drivers.md)): In depth instructions for installing the various hypervisor drivers diff --git a/docs/addons.md b/docs/addons.md index cc181e817457..ea38dc9995f1 100644 --- a/docs/addons.md +++ b/docs/addons.md @@ -15,6 +15,8 @@ $ minikube addons list - ingress: disabled - default-storageclass: enabled - storage-provisioner: enabled +- nvidia-driver-installer: disabled +- nvidia-gpu-device-plugin: disabled # minikube must be running for these commands to take effect $ minikube addons enable heapster @@ -36,6 +38,8 @@ The currently supported addons include: * [CoreDNS](https://github.com/coredns/deployment/tree/master/kubernetes) * [Ingress](https://github.com/kubernetes/ingress-nginx) * [Freshpod](https://github.com/GoogleCloudPlatform/freshpod) +* [nvidia-driver-installer](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/nvidia-driver-installer/minikube) +* [nvidia-gpu-device-plugin](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu) If you would like to have minikube properly start/restart custom addons, place the addon(s) you wish to be launched with minikube in the `.minikube/addons` directory. Addons in this folder will be moved to the minikube VM and launched each time minikube is started/restarted. diff --git a/docs/gpu.md b/docs/gpu.md new file mode 100644 index 000000000000..5217a38fdd58 --- /dev/null +++ b/docs/gpu.md @@ -0,0 +1,116 @@ +# (Experimental) NVIDIA GPU support in minikube + +minikube has experimental support for using NVIDIA GPUs on Linux. + +## Using NVIDIA GPUs on minikube on Linux with `--vm-driver=kvm2` + +When using NVIDIA GPUs with the kvm2 vm-driver. We passthrough spare GPUs on the +host to the minikube VM. Doing so has a few prerequisites: + +- You must install the [kvm2 driver](drivers.md#kvm2-driver). If you already had + this installed make sure that you fetch the latest + `docker-machine-driver-kvm2` binary that has GPU support. + +- Your CPU must support IOMMU. Different vendors have different names for this + technology. Intel calls it Intel VT-d. AMD calls it AMD-Vi. Your motherboard + must also support IOMMU. + +- You must enable IOMMU in the kernel: add `intel_iommu=on` or `amd_iommu=on` + (depending to your CPU vendor) to the kernel command line. Also add `iommu=pt` + to the kernel command line. + +- You must have spare GPUs that are not used on the host and can be passthrough + to the VM. These GPUs must not be controlled by the nvidia/nouveau driver. You + can ensure this by either not loading the nvidia/nouveau driver on the host at + all or assigning the spare GPU devices to stub kernel modules like `vfio-pci` + or `pci-stub` at boot time. You can do that by adding the + [vendorId:deviceId](https://pci-ids.ucw.cz/read/PC/10de) of your spare GPU to + the kernel command line. For ex. for Quadro M4000 add `pci-stub.ids=10de:13f1` + to the kernel command line. Note that you will have to do this for all GPUs + you want to passthrough to the VM and all other devices that are in the IOMMU + group of these GPUs. + +- Once you reboot the system after doing the above, you should be ready to you + GPUs with kvm2. Run the following command to start minikube: + ``` + minikube start --vm-driver kvm2 --gpu + ``` + This command will check if all the above conditions are satisfied and + passthrough spare GPUs found on the host to the VM. + + If this succeeded, run the following commands: + ``` + minikube addons enable nvidia-gpu-device-plugin + minikube addons enable nvidia-driver-installer + ``` + This will install the NVIDIA driver (that works for GeForce/Quadro cards) + on the VM. + +- If everything succeeded, you should be able to see `nvidia.com/gpu` in the + capacity: + ``` + kubectl get nodes -ojson | jq .items[].status.capacity + ``` + +### Where can I learn more about GPU passthrough? +See the excellent documentation at +https://wiki.archlinux.org/index.php/PCI_passthrough_via_OVMF + +### Why are so many manual steps required to use GPUs with kvm2 on minikube? +These steps require elevated privileges which minikube doesn't run with and they +are disruptive to the host, so we decided to not do them automatically. + + +## Using NVIDIA GPU on minikube on Linux with `--vm-driver=none` + +NOTE: This approach used to expose GPUs here is different than the approach used +to expose GPUs with `--vm-driver=kvm2`. Please don't mix these instructions. + +- Install minikube. + +- Install the nvidia driver, nvidia-docker and configure docker with nvidia as + the default runtime. See instructions at + https://github.com/NVIDIA/nvidia-docker + +- Start minikube: + ``` + minikube start --vm-driver=none --apiserver-ips 127.0.0.1 --apiserver-name localhost + ``` + +- Install NVIDIA's device plugin: + ``` + kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v1.10/nvidia-device-plugin.yml + ``` + + +## Why does minikube not support NVIDIA GPUs on macOS? +VM drivers supported by minikube for macOS doesn't support GPU passthrough: +- [mist64/xhyve#108](https://github.com/mist64/xhyve/issues/108) +- [moby/hyperkit#159](https://github.com/moby/hyperkit/issues/159) +- [VirtualBox docs](http://www.virtualbox.org/manual/ch09.html#pcipassthrough) + +Also: +- For quite a while, all Mac hardware (both laptops and desktops) have come with + Intel or AMD GPUs (and not with NVIDIA GPUs). Recently, Apple added [support + for eGPUs](https://support.apple.com/en-us/HT208544), but even then all the + supported GPUs listed are AMD’s. + +- nvidia-docker [doesn't support + macOS](https://github.com/NVIDIA/nvidia-docker/issues/101) either. + + +## Why does minikube not support NVIDIA GPUs on Windows? +minikube suppports Windows host through Hyper-V or VirtualBox. + +- VirtualBox doesn't support PCI passthrough for [Windows + host](http://www.virtualbox.org/manual/ch09.html#pcipassthrough). + +- Hyper-V supports DDA (discrete device assignment) but [only for Windows Server + 2016](https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/plan-for-deploying-devices-using-discrete-device-assignment) + +Since the only possibility of supporting GPUs on minikube on Windows is on a +server OS where users don't usually run minikube, we haven't invested time in +trying to support NVIDIA GPUs on minikube on Windows. + +Also, nvidia-docker [doesn't support +Windows](https://github.com/NVIDIA/nvidia-docker/issues/197) either. diff --git a/pkg/drivers/kvm/domain.go b/pkg/drivers/kvm/domain.go index efa3101c06c0..381b1610ccf6 100644 --- a/pkg/drivers/kvm/domain.go +++ b/pkg/drivers/kvm/domain.go @@ -74,6 +74,9 @@ const domainTmpl = ` /dev/random + {{if .GPU}} + {{.DevicesXML}} + {{end}} ` diff --git a/pkg/drivers/kvm/gpu.go b/pkg/drivers/kvm/gpu.go new file mode 100644 index 000000000000..301e0fc3dab5 --- /dev/null +++ b/pkg/drivers/kvm/gpu.go @@ -0,0 +1,208 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kvm + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "text/template" + + "github.com/docker/machine/libmachine/log" +) + +var sysFsPCIDevicesPath = "/sys/bus/pci/devices/" +var sysKernelIOMMUGroupsPath = "/sys/kernel/iommu_groups/" + +const nvidiaVendorId = "0x10de" + +const devicesTmpl = ` + + + + + +{{range .}} + + +
+ + +{{end}} +` + +type PCIDevice struct { + Domain string + Bus string + Slot string + Function string +} + +// getDevicesXML returns the XML that can be added to the libvirt domain XML to +// passthrough NVIDIA devices. +func getDevicesXML() (string, error) { + unboundNVIDIADevices, err := getPassthroughableNVIDIADevices() + if err != nil { + return "", fmt.Errorf("coundn't generate devices XML: %v", err) + } + var pciDevices []PCIDevice + for _, device := range unboundNVIDIADevices { + splits := strings.Split(device, ":") + if len(splits) != 3 { + log.Infof("Error while parsing PCI device %q. Not splitable into domain:bus:slot.function.", device) + continue + } + parts := strings.Split(splits[2], ".") + if len(parts) != 2 { + log.Infof("Error while parsing PCI device %q. Not splitable into domain:bus:slot.function.", device) + continue + } + pciDevice := PCIDevice{ + Domain: "0x" + splits[0], + Bus: "0x" + splits[1], + Slot: "0x" + parts[0], + Function: "0x" + parts[1], + } + pciDevices = append(pciDevices, pciDevice) + } + if len(pciDevices) == 0 { + return "", fmt.Errorf("couldn't generate devices XML: parsing failed") + } + tmpl := template.Must(template.New("").Parse(devicesTmpl)) + var devicesXML bytes.Buffer + if err := tmpl.Execute(&devicesXML, pciDevices); err != nil { + return "", fmt.Errorf("couldn't generate devices XML: %v", err) + } + return devicesXML.String(), nil +} + +// getPassthroughableNVIDIADevices returns a list of NVIDIA devices that can be +// passthrough from the host to a VM. It returns an error if: +// - host doesn't support pci passthrough (IOMMU). +// - there are no passthorughable NVIDIA devices on the host. +func getPassthroughableNVIDIADevices() ([]string, error) { + + // Make sure the host supports IOMMU + iommuGroups, err := ioutil.ReadDir(sysKernelIOMMUGroupsPath) + if err != nil { + return []string{}, fmt.Errorf("error reading %q: %v", sysKernelIOMMUGroupsPath, err) + } + if len(iommuGroups) == 0 { + return []string{}, fmt.Errorf("no IOMMU groups found at %q. Make sure your host supports IOMMU. See instructions at https://github.com/kubernetes/minikube/blob/master/docs/gpu.md", sysKernelIOMMUGroupsPath) + } + + // Get list of PCI devices + devices, err := ioutil.ReadDir(sysFsPCIDevicesPath) + if err != nil { + return []string{}, fmt.Errorf("error reading %q: %v", sysFsPCIDevicesPath, err) + } + + unboundNVIDIADevices := make(map[string]bool) + found := false + for _, device := range devices { + vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor") + content, err := ioutil.ReadFile(vendorPath) + if err != nil { + log.Infof("Error while reading %q: %v", vendorPath, err) + continue + } + + // Check if this is an NVIDIA device + if strings.EqualFold(strings.TrimSpace(string(content)), nvidiaVendorId) { + log.Infof("Found device %v with NVIDIA's vendorId %v", device.Name(), nvidiaVendorId) + found = true + + // Check whether it's unbound. We don't want the device to be bound to nvidia/nouveau etc. + if isUnbound(device.Name()) { + // Add the unbound device to the map. The value is set to false initially, + // it will be set to true later if the device is also isolated. + unboundNVIDIADevices[device.Name()] = false + } + } + } + if !found { + return []string{}, fmt.Errorf("no NVIDIA devices found") + } + if len(unboundNVIDIADevices) == 0 { + return []string{}, fmt.Errorf("some NVIDIA devices were found but none of them were unbound. See instructions at https://github.com/kubernetes/minikube/blob/master/docs/gpu.md") + } + + // Make sure all the unbound devices are in IOMMU groups that only contain unbound devices. + for device := range unboundNVIDIADevices { + unboundNVIDIADevices[device] = isIsolated(device) + } + + isolatedNVIDIADevices := make([]string, 0, len(unboundNVIDIADevices)) + for unboundNVIDIADevice, isIsolated := range unboundNVIDIADevices { + if isIsolated { + isolatedNVIDIADevices = append(isolatedNVIDIADevices, unboundNVIDIADevice) + } + } + if len(isolatedNVIDIADevices) == 0 { + return []string{}, fmt.Errorf("some unbound NVIDIA devices were found but they had other devices in their IOMMU group that were bound. See instructoins at https://github.com/kubernetes/minikube/blob/master/docs/gpu.md") + } + + return isolatedNVIDIADevices, nil +} + +// isIsolated returns true if the device is an IOMMU group that only consists of unbound devices. +// The input device is expected to be a string like 0000:03:00.1 (Domain:Bus:Slot.Function) +func isIsolated(device string) bool { + // Find out the other devices in the same IOMMU group as one of our unbound device. + iommuGroupPath := filepath.Join(sysFsPCIDevicesPath, device, "iommu_group", "devices") + otherDevices, err := ioutil.ReadDir(iommuGroupPath) + if err != nil { + log.Infof("Error reading %q: %v", iommuGroupPath) + return false + } + + for _, otherDevice := range otherDevices { + // Check if the other device in the IOMMU group is unbound. + if isUnbound(otherDevice.Name()) { + continue + } + // If any of the other device in the IOMMU group is not unbound, + // then our device is not isolated and cannot be safely passthrough. + return false + } + return true +} + +// isUnbound returns true if the device is not bound to any driver or if it's +// bound to a stub driver like pci-stub or vfio-pci. +// The input device is expected to be a string like 0000:03:00.1 (Domain:Bus:Slot.Function) +func isUnbound(device string) bool { + modulePath, err := filepath.EvalSymlinks(filepath.Join(sysFsPCIDevicesPath, device, "driver", "module")) + if os.IsNotExist(err) { + log.Infof("%v is not bound to any driver", device) + return true + } else { + module := filepath.Base(modulePath) + if module == "pci_stub" || module == "vfio_pci" { + log.Infof("%v is bound to a stub module: %v", device, module) + return true + } else { + log.Infof("%v is bound to a non-stub module: %v", device, module) + return false + } + } +} diff --git a/pkg/drivers/kvm/kvm.go b/pkg/drivers/kvm/kvm.go index 5bdc6475749e..2e30ef7efe25 100644 --- a/pkg/drivers/kvm/kvm.go +++ b/pkg/drivers/kvm/kvm.go @@ -65,6 +65,12 @@ type Driver struct { // The randomly generated MAC Address // If empty, a random MAC will be generated. MAC string + + // Whether to passthrough GPU devices from the host to the VM. + GPU bool + + // XML that needs to be added to passthrough GPU devices. + DevicesXML string } const ( @@ -256,6 +262,13 @@ func (d *Driver) Create() error { if err != nil { return errors.Wrap(err, "creating network") } + if d.GPU { + log.Info("Creating devices...") + d.DevicesXML, err = getDevicesXML() + if err != nil { + return errors.Wrap(err, "creating devices") + } + } log.Info("Setting up minikube home directory...") if err := os.MkdirAll(d.ResolveStorePath("."), 0755); err != nil { diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index e0e8fb4c31e1..cf3d7b214361 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -257,6 +257,20 @@ var Addons = map[string]*Addon{ "freshpod-rc.yaml", "0640"), }, false, "freshpod"), + "nvidia-driver-installer": NewAddon([]*BinDataAsset{ + NewBinDataAsset( + "deploy/addons/gpu/nvidia-driver-installer.yaml", + constants.AddonsPath, + "nvidia-driver-installer.yaml", + "0640"), + }, false, "nvidia-driver-installer"), + "nvidia-gpu-device-plugin": NewAddon([]*BinDataAsset{ + NewBinDataAsset( + "deploy/addons/gpu/nvidia-gpu-device-plugin.yaml", + constants.AddonsPath, + "nvidia-gpu-device-plugin.yaml", + "0640"), + }, false, "nvidia-gpu-device-plugin"), } func AddMinikubeDirAssets(assets *[]CopyableFile) error { diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 957454351d3a..332810f7d0d4 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -50,6 +50,7 @@ type MachineConfig struct { NFSShare []string NFSSharesRoot string UUID string // Only used by hyperkit to restore the mac address + GPU bool // Only used by kvm2 } // KubernetesConfig contains the parameters used to configure the VM Kubernetes. diff --git a/pkg/minikube/drivers/kvm2/driver.go b/pkg/minikube/drivers/kvm2/driver.go index 3c101ce60b65..d8fd3d136e7d 100644 --- a/pkg/minikube/drivers/kvm2/driver.go +++ b/pkg/minikube/drivers/kvm2/driver.go @@ -49,8 +49,7 @@ type kvmDriver struct { ISO string Boot2DockerURL string DiskPath string - CacheMode string - IOMode string + GPU bool } func createKVM2Host(config cfg.MachineConfig) interface{} { @@ -68,7 +67,6 @@ func createKVM2Host(config cfg.MachineConfig) interface{} { DiskSize: config.DiskSize, DiskPath: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), fmt.Sprintf("%s.rawdisk", cfg.GetMachineName())), ISO: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), "boot2docker.iso"), - CacheMode: "default", - IOMode: "threads", + GPU: config.GPU, } }