From e862aa54854d2fc265d1254a38b70afda71e0591 Mon Sep 17 00:00:00 2001 From: Andrew Burden Date: Wed, 18 Nov 2020 18:31:58 +0100 Subject: [PATCH] This is a pretty crazy test and I'll be surprised as anyone if it worked --- _topic_map.yml | 28 ++++++------- modules/virt-about-block-pvs.adoc | 8 ++-- modules/virt-about-collecting-virt-data.adoc | 2 +- modules/virt-about-conditions-and-events.adoc | 4 +- modules/virt-about-container-disks.adoc | 10 ++--- modules/virt-about-datavolumes.adoc | 8 ++-- modules/virt-about-hostpath-provisioner.adoc | 8 ++-- .../virt-about-liveness-readiness-probes.adoc | 10 ++--- ...about-storage-setting-for-datavolumes.adoc | 18 ++++---- modules/virt-about-upgrading-virt.adoc | 6 +-- modules/virt-about-vm-snapshots.adoc | 2 +- modules/virt-about-vmis.adoc | 2 +- modules/virt-accessing-vmi-ssh.adoc | 4 +- modules/virt-add-boot-order-web.adoc | 2 +- modules/virt-add-disk-to-vm.adoc | 2 +- ...ecret-configmap-service-account-to-vm.adoc | 14 +++---- ...ficates-for-authenticating-dv-imports.adoc | 14 +++---- ...dditional-scc-for-kubevirt-controller.adoc | 6 +-- ...zing-datavolume-conditions-and-events.adoc | 16 ++++---- .../virt-attaching-vm-to-sriov-network.adoc | 6 +-- modules/virt-checking-storage-class.adoc | 4 +- modules/virt-cloning-a-datavolume.adoc | 22 +++++----- ...-cloning-local-volume-to-another-node.adoc | 14 +++---- ...ng-pvc-of-vm-disk-into-new-datavolume.adoc | 30 +++++++------- ...ing-configmap-for-obsolete-cpu-models.adoc | 6 +-- .../virt-configuring-masquerade-mode-cli.adoc | 4 +- ...irt-configuring-selinux-hpp-on-rhcos8.adoc | 6 +-- ...rt-confirming-policy-updates-on-nodes.adoc | 16 ++++---- ...ting-a-service-from-a-virtual-machine.adoc | 2 +- ...creating-a-vm-from-a-default-os-image.adoc | 2 +- modules/virt-creating-an-upload-dv.adoc | 12 +++--- .../virt-creating-blank-disk-datavolumes.adoc | 10 ++--- modules/virt-creating-bridge-nad-cli.adoc | 21 +++++----- modules/virt-creating-bridge-nad-web.adoc | 6 +-- modules/virt-creating-configmap.adoc | 20 ++++----- modules/virt-creating-interface-on-nodes.adoc | 8 ++-- modules/virt-creating-local-block-pv.adoc | 12 +++--- ...m-cloned-pvc-using-datavolumetemplate.adoc | 20 ++++----- ...ating-pvcs-to-store-default-os-images.adoc | 2 +- modules/virt-creating-rbac-cloning-dvs.adoc | 28 ++++++------- modules/virt-creating-storage-class.adoc | 10 ++--- modules/virt-creating-vddk-image.adoc | 4 +- modules/virt-creating-vm-wizard-web.adoc | 2 +- modules/virt-creating-vm.adoc | 2 +- modules/virt-define-http-liveness-probe.adoc | 7 ++-- modules/virt-define-readiness-probe.adoc | 2 +- modules/virt-define-tcp-liveness-probe.adoc | 7 ++-- ...ing-storageclass-in-cdi-configuration.adoc | 8 ++-- modules/virt-delete-vm-web.adoc | 2 +- ...t-deleting-deployment-custom-resource.adoc | 15 +++---- modules/virt-deleting-dvs.adoc | 8 ++-- modules/virt-deleting-virt-cli.adoc | 6 +-- modules/virt-deleting-vms.adoc | 2 +- modules/virt-deploying-operator-cli.adoc | 2 +- modules/virt-disabling-tls-for-registry.adoc | 4 +- modules/virt-edit-boot-order-yaml-web.adoc | 4 +- ...ting-kubevirtstorageclassdefaults-cli.adoc | 14 +++---- ...ting-kubevirtstorageclassdefaults-web.adoc | 8 ++-- ...example-ansible-playbook-creating-vms.adoc | 2 +- modules/virt-example-bond-nncp.adoc | 6 +-- modules/virt-example-bridge-nncp.adoc | 6 +-- ...irt-example-configmap-tls-certificate.adoc | 4 +- modules/virt-example-ethernet-nncp.adoc | 6 +-- ...-example-kubevirtstorageclassdefaults.adoc | 4 +- .../virt-example-nmstate-IP-management.adoc | 2 +- ...t-example-nmstate-multiple-interfaces.adoc | 4 +- modules/virt-example-vlan-nncp.adoc | 6 +-- modules/virt-importing-vm-cli.adoc | 20 ++++----- modules/virt-importing-vm-datavolume.adoc | 26 +++++------- modules/virt-importing-vm-to-block-pv.adoc | 26 ++++-------- modules/virt-importing-vm-wizard.adoc | 2 +- modules/virt-initiating-vm-migration-cli.adoc | 12 ++---- modules/virt-listing-dvs.adoc | 6 +-- modules/virt-monitoring-upgrade-status.adoc | 2 +- modules/virt-monitoring-vm-migration-cli.adoc | 3 +- modules/virt-networking-glossary.adoc | 23 ++++------- .../virt-networking-wizard-fields-web.adoc | 10 ++--- modules/virt-openshift-client-commands.adoc | 5 +-- ...virt-preparing-container-disk-for-vms.adoc | 2 +- .../virt-pxe-booting-with-mac-address.adoc | 38 ++++++----------- .../virt-removing-interface-from-nodes.adoc | 12 +++--- ...g-secret-configmap-service-account-vm.adoc | 8 ++-- ...-restoring-node-network-configuration.adoc | 2 +- .../virt-setting-node-maintenance-cli.adoc | 2 +- modules/virt-storage-wizard-fields-web.adoc | 10 ++--- .../virt-template-blank-disk-datavolume.adoc | 4 +- modules/virt-template-datavolume-clone.adoc | 4 +- modules/virt-template-datavolume-import.adoc | 4 +- modules/virt-template-datavolume-vm.adoc | 4 +- modules/virt-template-vm-config.adoc | 2 +- modules/virt-template-vmi-pxe-config.adoc | 2 +- ...oubleshooting-incorrect-policy-config.adoc | 41 +++++++++---------- modules/virt-troubleshooting-vm-import.adoc | 32 +++++++-------- .../virt-understanding-live-migration.adoc | 2 +- modules/virt-understanding-logs.adoc | 8 ++-- ...ing-node-labeling-obsolete-cpu-models.adoc | 4 +- .../virt-understanding-node-maintenance.adoc | 2 +- modules/virt-understanding-scratch-space.adoc | 30 +++++--------- modules/virt-understanding-smart-cloning.adoc | 4 +- ...dating-access-mode-for-live-migration.adoc | 4 +- modules/virt-uploading-image-web.adoc | 2 +- .../virt-uploading-local-disk-image-dv.adoc | 16 ++++---- .../virt-uploading-local-disk-image-pvc.adoc | 16 +++----- modules/virt-using-hostpath-provisioner.adoc | 10 ++--- .../virt-viewing-network-state-of-node.adoc | 4 +- modules/virt-viewing-resource-events-cli.adoc | 2 +- ...virt-viewing-virtual-machine-logs-web.adoc | 2 +- modules/virt-virtctl-commands.adoc | 4 +- modules/virt-vm-storage-volume-types.adoc | 4 +- virt/install/uninstalling-virt-cli.adoc | 2 +- virt/install/uninstalling-virt-web.adoc | 2 +- virt/live_migration/virt-live-migration.adoc | 6 +-- ...tavolumes-using-events-and-conditions.adoc | 4 +- ...ing-node-labeling-obsolete-cpu-models.adoc | 2 +- .../virt-troubleshooting-node-network.adoc | 2 +- .../virt-updating-node-network-config.adoc | 8 ++-- virt/upgrading-virt.adoc | 2 +- ...ty-privileges-controller-and-launcher.adoc | 2 +- ...ing-vm-disk-into-new-datavolume-block.adoc | 8 ++-- ...t-cloning-vm-disk-into-new-datavolume.adoc | 8 ++-- ...t-cloning-vm-using-datavolumetemplate.adoc | 8 ++-- ...user-permissions-to-clone-datavolumes.adoc | 8 ++-- ...tual-machine-images-datavolumes-block.adoc | 8 ++-- ...ng-virtual-machine-images-datavolumes.adoc | 12 +++--- .../virt-importing-vmware-vm.adoc | 2 +- .../virt-tls-certificates-for-dv-imports.adoc | 2 +- virt/virtual_machines/virt-create-vms.adoc | 4 +- .../virt-edit-boot-order.adoc | 2 +- virt/virtual_machines/virt-edit-vms.adoc | 4 +- ...g-configmaps-secrets-service-accounts.adoc | 13 +++--- ...ning-a-datavolume-using-smart-cloning.adoc | 6 +-- .../virt-deleting-datavolumes.adoc | 6 +-- ...oving-local-vm-disk-to-different-node.adoc | 2 +- .../virt-preparing-cdi-scratch-space.adoc | 2 +- ...virt-storage-defaults-for-datavolumes.adoc | 4 +- ...irt-uploading-local-disk-images-block.adoc | 12 +++--- ...t-uploading-local-disk-images-virtctl.adoc | 4 +- .../virt-uploading-local-disk-images-web.adoc | 2 +- .../virt-using-container-disks-with-vms.adoc | 2 +- .../virt-attaching-vm-multiple-networks.adoc | 10 ++--- ...ing-the-default-pod-network-with-virt.adoc | 6 +-- .../virt-viewing-ip-of-vm-nic.adoc | 2 +- 142 files changed, 538 insertions(+), 607 deletions(-) diff --git a/_topic_map.yml b/_topic_map.yml index 13ec79a6add9..18223e458051 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -2337,7 +2337,7 @@ Topics: File: virt-installing-qemu-guest-agent - Name: Viewing the QEMU guest agent information for virtual machines File: virt-viewing-qemu-guest-agent-web - - Name: Managing ConfigMaps, secrets, and service accounts in virtual machines + - Name: Managing config maps, secrets, and service accounts in virtual machines File: virt-managing-configmaps-secrets-service-accounts - Name: Installing VirtIO driver on an existing Windows virtual machine File: virt-installing-virtio-drivers-on-existing-windows-vm @@ -2363,11 +2363,11 @@ Topics: - Name: Importing virtual machines Dir: importing_vms Topics: - - Name: TLS certificates for DataVolume imports + - Name: TLS certificates for data volume imports File: virt-tls-certificates-for-dv-imports - - Name: Importing virtual machine images with DataVolumes + - Name: Importing virtual machine images with data volumes File: virt-importing-virtual-machine-images-datavolumes - - Name: Importing virtual machine images to block storage with DataVolumes + - Name: Importing virtual machine images to block storage with data volumes File: virt-importing-virtual-machine-images-datavolumes-block - Name: Importing a Red Hat Virtualization virtual machine File: virt-importing-rhv-vm @@ -2377,19 +2377,19 @@ Topics: - Name: Cloning virtual machines Dir: cloning_vms Topics: - - Name: Enabling user permissions to clone DataVolumes across namespaces + - Name: Enabling user permissions to clone data volumes across namespaces File: virt-enabling-user-permissions-to-clone-datavolumes - - Name: Cloning a virtual machine disk into a new DataVolume + - Name: Cloning a virtual machine disk into a new data volume File: virt-cloning-vm-disk-into-new-datavolume - - Name: Cloning a virtual machine by using a DataVolumeTemplate + - Name: Cloning a virtual machine by using a data volume template File: virt-cloning-vm-using-datavolumetemplate - - Name: Cloning a virtual machine disk into a new block storage DataVolume + - Name: Cloning a virtual machine disk into a new block storage data volume File: virt-cloning-vm-disk-into-new-datavolume-block # Virtual machine networking - Name: Virtual machine networking Dir: vm_networking Topics: - - Name: Using the default Pod network with OpenShift Virtualization + - Name: Using the default pod network with OpenShift Virtualization File: virt-using-the-default-pod-network-with-virt - Name: Attaching a virtual machine to multiple networks File: virt-attaching-vm-multiple-networks @@ -2417,7 +2417,7 @@ Topics: File: virt-uploading-local-disk-images-web - Name: Uploading local disk images by using the virtctl tool File: virt-uploading-local-disk-images-virtctl - - Name: Uploading a local disk image to a block storage DataVolume + - Name: Uploading a local disk image to a block storage data volume File: virt-uploading-local-disk-images-block - Name: Managing offline virtual machine snapshots File: virt-managing-offline-vm-snapshots @@ -2425,9 +2425,9 @@ Topics: File: virt-moving-local-vm-disk-to-different-node - Name: Expanding virtual storage by adding blank disk images File: virt-expanding-virtual-storage-with-blank-disk-images - - Name: Cloning a DataVolume using smart-cloning + - Name: Cloning a data volume using smart-cloning File: virt-cloning-a-datavolume-using-smart-cloning - - Name: Storage defaults for DataVolumes + - Name: Storage defaults for data volumes File: virt-storage-defaults-for-datavolumes - Name: Creating and using default OS images File: virt-creating-and-using-default-os-images @@ -2437,7 +2437,7 @@ Topics: File: virt-preparing-cdi-scratch-space - Name: Re-using statically provisioned persistent volumes File: virt-reusing-statically-provisioned-persistent-volumes - - Name: Deleting DataVolumes + - Name: Deleting data volumes File: virt-deleting-datavolumes # Templates - Name: Virtual machine templates @@ -2499,7 +2499,7 @@ Topics: File: virt-logs - Name: Viewing events File: virt-events - - Name: Diagnosing DataVolumes using events and conditions + - Name: Diagnosing data volumes using events and conditions File: virt-diagnosing-datavolumes-using-events-and-conditions - Name: Viewing information about virtual machine workloads File: virt-viewing-information-about-vm-workloads diff --git a/modules/virt-about-block-pvs.adoc b/modules/virt-about-block-pvs.adoc index 8062e96b3e78..0a64cb845622 100644 --- a/modules/virt-about-block-pvs.adoc +++ b/modules/virt-about-block-pvs.adoc @@ -5,11 +5,11 @@ // * virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc [id="virt-about-block-pvs_{context}"] -= About block PersistentVolumes += About block persistent volumes -A block PersistentVolume (PV) is a PV that is backed by a raw block device. These volumes -do not have a filesystem and can provide performance benefits for +A block persistent volume (PV) is a PV that is backed by a raw block device. These volumes +do not have a file system and can provide performance benefits for virtual machines by reducing overhead. Raw block volumes are provisioned by specifying `volumeMode: Block` in the -PV and PersistentVolumeClaim (PVC) specification. +PV and persistent volume claim (PVC) specification. diff --git a/modules/virt-about-collecting-virt-data.adoc b/modules/virt-about-collecting-virt-data.adoc index 4658e8fb7f3c..1f0e61a29754 100644 --- a/modules/virt-about-collecting-virt-data.adoc +++ b/modules/virt-about-collecting-virt-data.adoc @@ -13,7 +13,7 @@ cluster, including features and objects associated with {VirtProductName}: * The Hyperconverged Cluster Operator namespaces (and child objects) * All namespaces (and their child objects) that belong to any {VirtProductName} resources -* All {VirtProductName} Custom Resource Definitions (CRDs) +* All {VirtProductName} custom resource definitions (CRDs) * All namespaces that contain virtual machines * All virtual machine definitions diff --git a/modules/virt-about-conditions-and-events.adoc b/modules/virt-about-conditions-and-events.adoc index 8324a44b3c91..f5cf3cf1b299 100644 --- a/modules/virt-about-conditions-and-events.adoc +++ b/modules/virt-about-conditions-and-events.adoc @@ -5,7 +5,7 @@ [id="virt-about-conditions-and-events.adoc_{context}"] = About conditions and events -Diagnose DataVolume issues by examining the output of the `Conditions` and `Events` sections +Diagnose data volume issues by examining the output of the `Conditions` and `Events` sections generated by the command: [source,terminal] @@ -29,7 +29,7 @@ The `Events` section provides the following additional information: The output from `oc describe` does not always contains `Events`. An event is generated when either `Status`, `Reason`, or `Message` changes. -Both conditions and events react to changes in the state of the DataVolume. +Both conditions and events react to changes in the state of the data volume. For example, if you misspell the URL during an import operation, the import generates a 404 message. That message change generates an event with a reason. diff --git a/modules/virt-about-container-disks.adoc b/modules/virt-about-container-disks.adoc index 58b3d59e1f18..07f8fb78208a 100644 --- a/modules/virt-about-container-disks.adoc +++ b/modules/virt-about-container-disks.adoc @@ -8,20 +8,20 @@ A container disk is a virtual machine image that is stored as a container image in a container image registry. You can use container disks to deliver the same disk images to multiple virtual machines and to create large numbers of virtual machine clones. -A container disk can either be imported into a persistent volume claim (PVC) by using a DataVolume that is attached to a virtual machine, or attached directly to a virtual machine as an ephemeral `containerDisk` volume. +A container disk can either be imported into a persistent volume claim (PVC) by using a data volume that is attached to a virtual machine, or attached directly to a virtual machine as an ephemeral `containerDisk` volume. -== Importing a container disk into a PVC by using a DataVolume +== Importing a container disk into a PVC by using a data volume -Use the Containerized Data Importer (CDI) to import the container disk into a PVC by using a DataVolume. You can then attach the DataVolume to a virtual machine for persistent storage. +Use the Containerized Data Importer (CDI) to import the container disk into a PVC by using a data volume. You can then attach the data volume to a virtual machine for persistent storage. == Attaching a container disk to a virtual machine as a `containerDisk` volume A `containerDisk` volume is ephemeral. It is discarded when the virtual machine is stopped, restarted, or deleted. When a virtual machine with a `containerDisk` volume starts, the container image is pulled from the registry and hosted on the node that is hosting the virtual machine. -Use `containerDisk` volumes for read-only filesystems such as CD-ROMs or for disposable virtual machines. +Use `containerDisk` volumes for read-only file systems such as CD-ROMs or for disposable virtual machines. [IMPORTANT] ==== -Using `containerDisk` volumes for read-write filesystems is not recommended because the data is temporarily written to local storage on the hosting node. This slows live migration of the virtual machine, such as in the case of node maintenance, because the data must be migrated to the destination node. Additionally, all data is lost if the node loses power or otherwise shuts down unexpectedly. +Using `containerDisk` volumes for read-write file systems is not recommended because the data is temporarily written to local storage on the hosting node. This slows live migration of the virtual machine, such as in the case of node maintenance, because the data must be migrated to the destination node. Additionally, all data is lost if the node loses power or otherwise shuts down unexpectedly. ==== diff --git a/modules/virt-about-datavolumes.adoc b/modules/virt-about-datavolumes.adoc index 55155d36f12b..01b8cc17b6fc 100644 --- a/modules/virt-about-datavolumes.adoc +++ b/modules/virt-about-datavolumes.adoc @@ -11,10 +11,10 @@ // * virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc [id="virt-about-datavolumes_{context}"] -= About DataVolumes += About data volumes `DataVolume` objects are custom resources that are provided by the Containerized -Data Importer (CDI) project. DataVolumes orchestrate import, clone, and upload -operations that are associated with an underlying PersistentVolumeClaim (PVC). -DataVolumes are integrated with KubeVirt, and they prevent a virtual machine +Data Importer (CDI) project. Data volumes orchestrate import, clone, and upload +operations that are associated with an underlying persistent volume claim (PVC). +Data volumes are integrated with {VirtProductName}, and they prevent a virtual machine from being started before the PVC has been prepared. diff --git a/modules/virt-about-hostpath-provisioner.adoc b/modules/virt-about-hostpath-provisioner.adoc index dd57df20a6f3..119db362fec4 100644 --- a/modules/virt-about-hostpath-provisioner.adoc +++ b/modules/virt-about-hostpath-provisioner.adoc @@ -13,14 +13,14 @@ When you install the {VirtProductName} Operator, the hostpath provisioner Operat is automatically installed. To use it, you must: * Configure SELinux: -** If you use Red Hat Enterprise Linux CoreOS 8 workers, you must create a MachineConfig +** If you use Red Hat Enterprise Linux CoreOS 8 workers, you must create a `MachineConfig` object on each node. -** Otherwise, apply the SELinux label `container_file_t` to the PersistentVolume (PV) backing +** Otherwise, apply the SELinux label `container_file_t` to the persistent volume (PV) backing directory on each node. -* Create a HostPathProvisioner custom resource. +* Create a `HostPathProvisioner` custom resource. * Create a `StorageClass` object for the hostpath provisioner. The hostpath provisioner Operator deploys the provisioner as a _DaemonSet_ on each node when you create its custom resource. In the custom resource file, you specify -the backing directory for the PersistentVolumes that the hostpath provisioner +the backing directory for the persistent volumes that the hostpath provisioner creates. diff --git a/modules/virt-about-liveness-readiness-probes.adoc b/modules/virt-about-liveness-readiness-probes.adoc index d953e3cc6108..472673054e5c 100644 --- a/modules/virt-about-liveness-readiness-probes.adoc +++ b/modules/virt-about-liveness-readiness-probes.adoc @@ -6,11 +6,7 @@ = About liveness and readiness probes -When a VirtualMachineInstance (VMI) fails, _liveness probes_ stop the VMI. -Controllers such as VirtualMachine then spawn other VMIs, restoring virtual -machine responsiveness. +_Liveness probes_ determine if a virtual machine instance (VMI) is still responding. If the probe fails, the VMI is stopped. A replacement VMI is then created to restore virtual machine responsiveness. -_Readiness probes_ tell services and endpoints that the VirtualMachineInstance -is ready to receive traffic from services. If readiness probes fail, -the VirtualMachineInstance is removed from -applicable endpoints until the probe recovers. +_Readiness probes_ tell services and endpoints that the virtual machine instance +is ready to receive traffic from services. If readiness probes fail, the virtual machine instance is removed from applicable endpoints until the probe recovers. diff --git a/modules/virt-about-storage-setting-for-datavolumes.adoc b/modules/virt-about-storage-setting-for-datavolumes.adoc index a46bafeec0d7..626b33bf3c9b 100644 --- a/modules/virt-about-storage-setting-for-datavolumes.adoc +++ b/modules/virt-about-storage-setting-for-datavolumes.adoc @@ -3,25 +3,25 @@ // * virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc [id="virt-about-kubevirtstorageclassdefaults_{context}"] -= About storage settings for DataVolumes += About storage settings for data volumes -DataVolumes require a defined _access mode_ and _volume mode_ to be created in the web console. +Data volumes require a defined _access mode_ and _volume mode_ to be created in the web console. These storage settings are configured by default with a `ReadWriteOnce` access mode and `Filesystem` volume mode. -You can modify these settings by editing the `kubevirt-storage-class-defaults` ConfigMap in the `openshift-cnv` namespace. -You can also add settings for other storage classes in order to create DataVolumes in the web console for different storage types. +You can modify these settings by editing the `kubevirt-storage-class-defaults` config map in the `openshift-cnv` namespace. +You can also add settings for other storage classes in order to create data volumes in the web console for different storage types. [NOTE] ==== You must configure storage settings that are supported by the underlying storage. ==== -All DataVolumes that you create in the web console use the default storage settings unless you specify a storage class that is also defined in the ConfigMap. +All data volumes that you create in the web console use the default storage settings unless you specify a storage class that is also defined in the config map. [id="virt-datavolumes-access-modes_{context}"] == Access modes -DataVolumes support the following access modes: +Data volumes support the following access modes: * `ReadWriteOnce`: The volume can be mounted as read-write by a single node. `ReadWriteOnce` has greater versatility and is the default setting. * `ReadWriteMany`: The volume can be mounted as read-write by many nodes. `ReadWriteMany` is required for some features, such as live migration of virtual machines between nodes. @@ -31,7 +31,7 @@ DataVolumes support the following access modes: [id="virt-datavolumes-volume-modes_{context}"] == Volume modes -The volume mode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. DataVolumes support the following volume modes: +The volume mode defines if a volume is intended to be used with a formatted file system or to remain in raw block state. Data volumes support the following volume modes: -* `Filesystem`: Creates a filesystem on the DataVolume. This is the default setting. -* `Block`: Creates a block DataVolume. Only use `Block` if the underlying storage supports it. +* `Filesystem`: Creates a file system on the data volume. This is the default setting. +* `Block`: Creates a block data volume. Only use `Block` if the underlying storage supports it. diff --git a/modules/virt-about-upgrading-virt.adoc b/modules/virt-about-upgrading-virt.adoc index dd1b3e16615e..55473211ea05 100644 --- a/modules/virt-about-upgrading-virt.adoc +++ b/modules/virt-about-upgrading-virt.adoc @@ -28,14 +28,14 @@ virtual machine. + [NOTE] ==== -Each virtual machine has a `virt-launcher` Pod that runs the virtual machine -instance. The `virt-launcher` Pod runs an instance of `libvirt`, which is +Each virtual machine has a `virt-launcher` pod that runs the virtual machine +instance. The `virt-launcher` pod runs an instance of `libvirt`, which is used to manage the virtual machine process. ==== * Upgrading does not interrupt network connections. -* DataVolumes and their associated PersistentVolumeClaims are preserved during +* Data volumes and their associated persistent volume claims are preserved during upgrade. + [IMPORTANT] diff --git a/modules/virt-about-vm-snapshots.adoc b/modules/virt-about-vm-snapshots.adoc index ab911ce81633..c642649a2c3b 100644 --- a/modules/virt-about-vm-snapshots.adoc +++ b/modules/virt-about-vm-snapshots.adoc @@ -18,7 +18,7 @@ With the offline VM snapshots feature, cluster administrators and application de * Restore a VM from a snapshot * Delete an existing VM snapshot -== Virtual machine snapshot controller and Custom Resource Definitions (CRDs) +== Virtual machine snapshot controller and custom resource definitions (CRDs) The VM snapshot feature introduces three new API objects defined as CRDs for managing snapshots: diff --git a/modules/virt-about-vmis.adoc b/modules/virt-about-vmis.adoc index 819283f3c3b6..1313b9e7c001 100644 --- a/modules/virt-about-vmis.adoc +++ b/modules/virt-about-vmis.adoc @@ -20,5 +20,5 @@ When you delete a VM, the associated VMI is automatically deleted. You delete a [NOTE] ==== -Before you uninstall {VirtProductName}, list and view the standalone VMIs by using the CLI or the web console. Then, delete any outstanding VMIs. +Before you uninstall {VirtProductName}, list and view the standalone VMIs by using the CLI or the web console. Then, delete any outstanding VMIs. ==== diff --git a/modules/virt-accessing-vmi-ssh.adoc b/modules/virt-accessing-vmi-ssh.adoc index 9c0dc36dcc91..712f215811d6 100644 --- a/modules/virt-accessing-vmi-ssh.adoc +++ b/modules/virt-accessing-vmi-ssh.adoc @@ -15,7 +15,7 @@ machine. .Prerequisites * You must be in the same project as the VMI. * The VMI you want to access must be connected -to the default Pod network by using the `masquerade` binding method. +to the default pod network by using the `masquerade` binding method. * The VMI you want to access must be running. * Install the OpenShift CLI (`oc`). @@ -44,7 +44,7 @@ fedora-vm-ssh NodePort 127.0.0.1 22:32551/TCP 6s + In this example, the service acquired the `32551` port. -. Log in to the VMI via SSH. Use the `ipAddress` of any of the cluster +. Log in to the VMI via SSH. Use the `ipAddress` of any of the cluster nodes and the port that you found in the previous step: + [source,terminal] diff --git a/modules/virt-add-boot-order-web.adoc b/modules/virt-add-boot-order-web.adoc index 428e15b96a88..676eb01fa1c6 100644 --- a/modules/virt-add-boot-order-web.adoc +++ b/modules/virt-add-boot-order-web.adoc @@ -20,7 +20,7 @@ Add items to a boot order list by using the web console. . Click the pencil icon that is located on the right side of *Boot Order*. If a YAML configuration does not exist, or if this is the first time that you are creating a boot order list, the following message displays: *No resource selected. VM will attempt to boot from disks by order of appearance in YAML file.* -. Click *Add Source* and select a bootable disk or Network Interface Card (NIC) for the virtual machine. +. Click *Add Source* and select a bootable disk or network interface card (NIC) for the virtual machine. . Add any additional disks or NICs to the boot order list. diff --git a/modules/virt-add-disk-to-vm.adoc b/modules/virt-add-disk-to-vm.adoc index 301b9b359817..f11f48d429ad 100644 --- a/modules/virt-add-disk-to-vm.adoc +++ b/modules/virt-add-disk-to-vm.adoc @@ -32,7 +32,7 @@ Use this procedure to add a virtual disk to a {object}. . Click the *Disks* tab. . Click *Add Disk* to open the *Add Disk* window. . In the *Add Disk* window, specify the *Source*, *Name*, *Size*, *Interface*, *Type*, and *Storage Class*. -.. Optional: In the *Advanced* list, specify the *Volume Mode* and *Access Mode* for the virtual disk. If you do not specify these parameters, the system uses the default values from the `kubevirt-storage-class-defaults` ConfigMap. +.. Optional: In the *Advanced* list, specify the *Volume Mode* and *Access Mode* for the virtual disk. If you do not specify these parameters, the system uses the default values from the `kubevirt-storage-class-defaults` config map. . Click *Add*. ifdef::virt-vm[] diff --git a/modules/virt-adding-secret-configmap-service-account-to-vm.adoc b/modules/virt-adding-secret-configmap-service-account-to-vm.adoc index e0f844792be1..7c8bdec6874e 100644 --- a/modules/virt-adding-secret-configmap-service-account-to-vm.adoc +++ b/modules/virt-adding-secret-configmap-service-account-to-vm.adoc @@ -4,13 +4,13 @@ [id="virt-adding-secret-configmap-service-account-to-vm_{context}"] -= Adding a secret, ConfigMap, or service account to a virtual machine += Adding a secret, config map, or service account to a virtual machine -Add a secret, ConfigMap, or service account to a virtual machine by using the {product-title} web console. +Add a secret, config map, or service account to a virtual machine by using the {product-title} web console. .Prerequisites -* The secret, ConfigMap, or service account that you want to add must exist in the same namespace as the target virtual machine. +* The secret, config map, or service account that you want to add must exist in the same namespace as the target virtual machine. .Procedure @@ -22,7 +22,7 @@ Add a secret, ConfigMap, or service account to a virtual machine by using the {p . Click the *Environment* tab. -. Click *Select a resource* and select a secret, ConfigMap, or service account from the list. A six character serial number is automatically +. Click *Select a resource* and select a secret, config map, or service account from the list. A six character serial number is automatically generated for the selected resource. . Click *Save*. @@ -33,7 +33,7 @@ generated for the selected resource. ==== .. You can reset the form to the last saved state by clicking *Reload*. -.. The *Environment* resources are added to the virtual machine as disks. You can mount the secret, ConfigMap, or service account as you would mount any other disk. +.. The *Environment* resources are added to the virtual machine as disks. You can mount the secret, config map, or service account as you would mount any other disk. .. If the virtual machine is running, changes will not take effect until you restart the virtual machine. The newly added resources are marked as pending changes for both the *Environment* and *Disks* tab in the *Pending Changes* banner at the top of the page. @@ -44,7 +44,7 @@ for both the *Environment* and *Disks* tab in the *Pending Changes* banner at th . From the *Virtual Machine Overview* page, click the *Disks* tab. -. Check to ensure that the secret, ConfigMap, or service account is included in the list of disks. +. Check to ensure that the secret, config map, or service account is included in the list of disks. . Optional. Choose the appropriate method to apply your changes: @@ -52,4 +52,4 @@ for both the *Environment* and *Disks* tab in the *Pending Changes* banner at th .. If the virtual machine is stopped, start the virtual machine by clicking *Actions* -> *Start Virtual Machine*. -You can now mount the secret, ConfigMap, or service account as you would mount any other disk. +You can now mount the secret, config map, or service account as you would mount any other disk. diff --git a/modules/virt-adding-tls-certificates-for-authenticating-dv-imports.adoc b/modules/virt-adding-tls-certificates-for-authenticating-dv-imports.adoc index c553fb59e1d7..1803e7a258fb 100644 --- a/modules/virt-adding-tls-certificates-for-authenticating-dv-imports.adoc +++ b/modules/virt-adding-tls-certificates-for-authenticating-dv-imports.adoc @@ -3,17 +3,17 @@ // * virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc [id="virt-adding-tls-certificates-for-authenticating-dv-imports_{context}"] -= Adding TLS certificates for authenticating DataVolume imports += Adding TLS certificates for authenticating data volume imports -TLS certificates for registry or HTTPS endpoints must be added to a ConfigMap -in order to import data from these sources. This ConfigMap must be present -in the namespace of the destination DataVolume. +TLS certificates for registry or HTTPS endpoints must be added to a config map +in order to import data from these sources. This config map must be present +in the namespace of the destination data volume. -Create the ConfigMap by referencing the relative file path for the TLS certificate. +Create the config map by referencing the relative file path for the TLS certificate. .Procedure -. Ensure you are in the correct namespace. The ConfigMap can only be referenced by DataVolumes if it is in the same namespace. +. Ensure you are in the correct namespace. The config map can only be referenced by data volumes if it is in the same namespace. + [source,terminal] @@ -21,7 +21,7 @@ Create the ConfigMap by referencing the relative file path for the TLS certifica $ oc get ns ---- -. Create the ConfigMap: +. Create the config map: + [source,terminal] diff --git a/modules/virt-additional-scc-for-kubevirt-controller.adoc b/modules/virt-additional-scc-for-kubevirt-controller.adoc index 752d1b763d8e..4e088c529b83 100644 --- a/modules/virt-additional-scc-for-kubevirt-controller.adoc +++ b/modules/virt-additional-scc-for-kubevirt-controller.adoc @@ -5,7 +5,7 @@ [id="virt-additional-scc-for-kubevirt-controller_{context}"] = Additional {product-title} security context constraints and Linux capabilities for the `kubevirt-controller` service account -Security context constraints (SCCs) control permissions for pods. These permissions include actions that a pod, a collection of containers, can perform and what resources it can access. You can use SCCs to define a set of conditions that a Pod must run with in order to be accepted into the system. +Security context constraints (SCCs) control permissions for pods. These permissions include actions that a pod, a collection of containers, can perform and what resources it can access. You can use SCCs to define a set of conditions that a pod must run with in order to be accepted into the system. The `kubevirt-controller` is a cluster controller that creates the virt-launcher pods for virtual machines in the cluster. These virt-launcher pods are granted permissions by the `kubevirt-controller` service account. @@ -16,10 +16,10 @@ The `kubevirt-controller` service account is granted additional SCCs and Linux c The `kubevirt-controller` service account is granted the following SCCs: * `scc.AllowHostDirVolumePlugin = true` + -This allows virtual machines to use the hostPath volume plug-in. +This allows virtual machines to use the hostpath volume plug-in. * `scc.AllowPrivilegedContainer = false` + -This ensures the virt-launcher Pod is not run as a privileged container. +This ensures the virt-launcher pod is not run as a privileged container. * `scc.AllowedCapabilities = []corev1.Capability{"NET_ADMIN", "NET_RAW", "SYS_NICE"}` + This provides the following additional Linux capabilities diff --git a/modules/virt-analyzing-datavolume-conditions-and-events.adoc b/modules/virt-analyzing-datavolume-conditions-and-events.adoc index 58185e0216d7..52cba9e95ef4 100644 --- a/modules/virt-analyzing-datavolume-conditions-and-events.adoc +++ b/modules/virt-analyzing-datavolume-conditions-and-events.adoc @@ -3,13 +3,13 @@ // * virt/logging_events_monitoring/virt-analyzing-datavolumes-using-events-and-conditions.adoc [id="virt-analyzing-datavolume-conditions-and-events_{context}"] -= Analyzing DataVolumes using conditions and events += Analyzing data volumes using conditions and events By inspecting the `Conditions` and `Events` sections generated by the `describe` -command, you determine the state of the DataVolume -in relation to PersistentVolumeClaims (PVCs), and whether or +command, you determine the state of the data volume +in relation to persistent volume claims (PVCs), and whether or not an operation is actively running or completed. You might also receive messages -that offer specific details about the status of the DataVolume, and how +that offer specific details about the status of the data volume, and how it came to be in its current state. There are many different combinations of conditions. Each must be evaluated in its unique context. @@ -23,7 +23,7 @@ If the PVC is not bound, the `Status` is `False`. + When the PVC is bound, an event is generated stating that the PVC is bound. In this case, the `Reason` is `Bound` and `Status` is `True`. -The `Message` indicates which PVC owns the DataVolume. +The `Message` indicates which PVC owns the data volume. + `Message`, in the `Events` section, provides further details including how long the PVC has been bound (`Age`) and by what resource (`From`), @@ -61,7 +61,7 @@ the `Message` displays an inability to connect due to a `404`, listed in the + From this information, you conclude that an import operation was running, creating contention for other operations that are -attempting to access the DataVolume: +attempting to access the data volume: + .Example output [source,terminal] @@ -83,8 +83,8 @@ Status: ---- -* `Ready` – If `Type` is `Ready` and `Status` is `True`, then the DataVolume is ready -to be used, as in the following example. If the DataVolume is not ready to be +* `Ready` – If `Type` is `Ready` and `Status` is `True`, then the data volume is ready +to be used, as in the following example. If the data volume is not ready to be used, the `Status` is `False`: + .Example output diff --git a/modules/virt-attaching-vm-to-sriov-network.adoc b/modules/virt-attaching-vm-to-sriov-network.adoc index 0804a4774e0b..8e0d1cbebaa4 100644 --- a/modules/virt-attaching-vm-to-sriov-network.adoc +++ b/modules/virt-attaching-vm-to-sriov-network.adoc @@ -31,10 +31,10 @@ spec: networkName: <6> ... ---- -<1> A unique name for the interface that is connected to the Pod network. -<2> The `masquerade` binding to the default Pod network. +<1> A unique name for the interface that is connected to the pod network. +<2> The `masquerade` binding to the default pod network. <3> A unique name for the SR-IOV interface. -<4> The name of the Pod network interface. This must be the same as the `interfaces.name` that you defined earlier. +<4> The name of the pod network interface. This must be the same as the `interfaces.name` that you defined earlier. <5> The name of the SR-IOV interface. This must be the same as the `interfaces.name` that you defined earlier. <6> The name of the SR-IOV network attachment definition. diff --git a/modules/virt-checking-storage-class.adoc b/modules/virt-checking-storage-class.adoc index e875456f4d02..bf0d6c95b39a 100644 --- a/modules/virt-checking-storage-class.adoc +++ b/modules/virt-checking-storage-class.adoc @@ -13,7 +13,7 @@ Cinder, the default storage class, does not support VM import. See link:https:// You can check the default storage class in the {product-title} console. If the default storage class is not NFS, you can change the default storage class so that it is no longer the default and change the NFS storage class so that it is the default. -If more than one default storage class is defined, the VirtualMachineImport CR uses the default storage class that is first in alphabetical order. +If more than one default storage class is defined, the `VirtualMachineImport` CR uses the default storage class that is first in alphabetical order. .Procedure @@ -37,7 +37,7 @@ If more than one default storage class is defined, the VirtualMachineImport CR u You can check the default storage class from the CLI. -If the default storage class is not NFS, you must change the default storage class to NFS and change the existing default storage class so that it is not the default. If more than one default storage class is defined, the VirtualMachineImport CR uses the default storage class that is first in alphabetical order. +If the default storage class is not NFS, you must change the default storage class to NFS and change the existing default storage class so that it is not the default. If more than one default storage class is defined, the `VirtualMachineImport` CR uses the default storage class that is first in alphabetical order. .Procedure diff --git a/modules/virt-cloning-a-datavolume.adoc b/modules/virt-cloning-a-datavolume.adoc index d5e777b370a6..ddf0f75388ee 100644 --- a/modules/virt-cloning-a-datavolume.adoc +++ b/modules/virt-cloning-a-datavolume.adoc @@ -3,7 +3,7 @@ // * virt/virtual_machines/virtual_disks/virt-cloning-a-datavolume-using-smart-cloning.adoc [id="virt-cloning-a-datavolume_{context}"] -= Cloning a DataVolume += Cloning a data volume .Prerequisites @@ -12,16 +12,16 @@ For smart-cloning to occur, the following conditions are required. * Your storage provider must support snapshots. * The source and target PVCs must be defined to the same namespace. * The source and target PVCs must be defined to the same storage class. -* The VolumeSnapshotClass object must reference the storage class defined to both the source and target PVCs. +* The `VolumeSnapshotClass` object must reference the storage class defined to both the source and target PVCs. -If any of these prerequisites are not met, host-assisted cloning automatically occurs when you create a DataVolume with a PVC source. +If any of these prerequisites are not met, host-assisted cloning automatically occurs when you create a data volume with a PVC source. .Procedure -To initiate cloning of a DataVolume: +To initiate cloning of a data volume: . Create a YAML file for a `DataVolume` object that specifies the name of the -new DataVolume, the name and namespace of the source PVC, and the size of the new DataVolume. This example clones a source PVC in block mode, so `volumeMode: Block` is used: +new data volume, the name and namespace of the source PVC, and the size of the new data volume. This example clones a source PVC in block mode, so `volumeMode: Block` is used: + [source,yaml] ---- @@ -42,14 +42,14 @@ spec: storage: <2Gi> <4> volumeMode: Block <5> ---- -<1> The name of the new DataVolume. +<1> The name of the new data volume. <2> The namespace where the source PVC exists. <3> The name of the source PVC. -<4> The size of the new DataVolume. You must allocate enough space, or the +<4> The size of the new data volume. You must allocate enough space, or the cloning operation fails. The size must be the same as or larger than the source PVC. -<5> Specifies that the destination is a block PV +<5> Specifies that the destination is a block PV. -. Start cloning the PVC by creating the DataVolume: +. Start cloning the PVC by creating the data volume: + [source,terminal] ---- @@ -58,7 +58,7 @@ $ oc create -f .yaml + [NOTE] ==== -DataVolumes prevent a virtual machine from starting before the PVC is prepared, -so you can create a virtual machine that references the new DataVolume while the +Data volumes prevent a virtual machine from starting before the PVC is prepared, +so you can create a virtual machine that references the new data volume while the PVC clones. ==== diff --git a/modules/virt-cloning-local-volume-to-another-node.adoc b/modules/virt-cloning-local-volume-to-another-node.adoc index c81b379d31ca..dee5e7c91cf7 100644 --- a/modules/virt-cloning-local-volume-to-another-node.adoc +++ b/modules/virt-cloning-local-volume-to-another-node.adoc @@ -5,10 +5,10 @@ [id="virt-cloning-local-volume-to-another-node_{context}"] = Cloning a local volume to another node -You can move a virtual machine disk so that it runs on a specific node by cloning the underlying PersistentVolumeClaim (PVC). +You can move a virtual machine disk so that it runs on a specific node by cloning the underlying persistent volume claim (PVC). -To ensure the virtual machine disk is cloned to the correct node, you must either create a new PersistentVolume (PV) or identify one on the correct node. -Apply a unique label to the PV so that it can be referenced by the DataVolume. +To ensure the virtual machine disk is cloned to the correct node, you must either create a new persistent volume (PV) or identify one on the correct node. +Apply a unique label to the PV so that it can be referenced by the data volume. [NOTE] ==== @@ -91,7 +91,7 @@ spec: $ oc label pv node=node01 ---- -. Create a DataVolume manifest that references the following: +. Create a data volume manifest that references the following: * The PVC name and namespace of the virtual machine. * The label you applied to the PV in the previous step. @@ -118,17 +118,17 @@ spec: requests: storage: <10Gi> <5> ---- -<1> The name of the new DataVolume. +<1> The name of the new data volume. <2> The name of the source PVC. If you do not know the PVC name, you can find it in the virtual machine configuration: `spec.volumes.persistentVolumeClaim.claimName`. <3> The namespace where the source PVC exists. <4> The label that you applied to the PV in the previous step. <5> The size of the destination PV. -. Start the cloning operation by applying the DataVolume manifest to your cluster: +. Start the cloning operation by applying the data volume manifest to your cluster: + [source,terminal] ---- $ oc apply -f ---- -The DataVolume clones the PVC of the virtual machine into the PV on the specific node. +The data volume clones the PVC of the virtual machine into the PV on the specific node. diff --git a/modules/virt-cloning-pvc-of-vm-disk-into-new-datavolume.adoc b/modules/virt-cloning-pvc-of-vm-disk-into-new-datavolume.adoc index ae5e73627baf..042ac0af3e39 100644 --- a/modules/virt-cloning-pvc-of-vm-disk-into-new-datavolume.adoc +++ b/modules/virt-cloning-pvc-of-vm-disk-into-new-datavolume.adoc @@ -6,17 +6,17 @@ // `blockstorage` conditionals are used (declared in the "*-block" assembly) to separate content [id="virt-cloning-pvc-of-vm-disk-into-new-datavolume_{context}"] -= Cloning the PersistentVolumeClaim of a virtual machine disk into a new DataVolume += Cloning the persistent volume claim of a virtual machine disk into a new data volume -You can clone a PersistentVolumeClaim (PVC) of an existing virtual machine disk -into a new DataVolume. The new DataVolume can then be used for a new virtual +You can clone a persistent volume claim (PVC) of an existing virtual machine disk +into a new data volume. The new data volume can then be used for a new virtual machine. [NOTE] ==== -When a DataVolume is created independently of a virtual machine, the lifecycle -of the DataVolume is independent of the virtual machine. If the virtual machine -is deleted, neither the DataVolume nor its associated PVC is deleted. +When a data volume is created independently of a virtual machine, the lifecycle +of the data volume is independent of the virtual machine. If the virtual machine +is deleted, neither the data volume nor its associated PVC is deleted. ==== .Prerequisites @@ -25,7 +25,7 @@ is deleted, neither the DataVolume nor its associated PVC is deleted. down the virtual machine that is associated with the PVC before you can clone it. * Install the OpenShift CLI (`oc`). ifdef::blockstorage[] -* At least one available block PersistentVolume (PV) that is the same size as or larger than the source PVC. +* At least one available block persistent volume (PV) that is the same size as or larger than the source PVC. endif::[] .Procedure @@ -33,12 +33,12 @@ endif::[] . Examine the virtual machine disk you want to clone to identify the name and namespace of the associated PVC. -. Create a YAML file for a DataVolume object that specifies the name of the -new DataVolume, the name and namespace of the source PVC, +. Create a YAML file for a data volume that specifies the name of the +new data volume, the name and namespace of the source PVC, ifdef::blockstorage[] `volumeMode: Block` so that an available block PV is used, endif::[] -and the size of the new DataVolume. +and the size of the new data volume. + For example: + @@ -63,16 +63,16 @@ ifdef::blockstorage[] volumeMode: Block <5> endif::[] ---- -<1> The name of the new DataVolume. +<1> The name of the new data volume. <2> The namespace where the source PVC exists. <3> The name of the source PVC. -<4> The size of the new DataVolume. You must allocate enough space, or the +<4> The size of the new data volume. You must allocate enough space, or the cloning operation fails. The size must be the same as or larger than the source PVC. ifdef::blockstorage[] <5> Specifies that the destination is a block PV endif::[] -. Start cloning the PVC by creating the DataVolume: +. Start cloning the PVC by creating the data volume: + [source,terminal] ---- @@ -81,7 +81,7 @@ $ oc create -f .yaml + [NOTE] ==== -DataVolumes prevent a virtual machine from starting before the PVC is prepared, -so you can create a virtual machine that references the new DataVolume while the +Data volumes prevent a virtual machine from starting before the PVC is prepared, +so you can create a virtual machine that references the new data volume while the PVC clones. ==== diff --git a/modules/virt-configuring-configmap-for-obsolete-cpu-models.adoc b/modules/virt-configuring-configmap-for-obsolete-cpu-models.adoc index 56253b92ada1..561cdfa9f2c4 100644 --- a/modules/virt-configuring-configmap-for-obsolete-cpu-models.adoc +++ b/modules/virt-configuring-configmap-for-obsolete-cpu-models.adoc @@ -3,9 +3,9 @@ // * virt/node_maintenance/virt-managing-node-labeling-obsolete-cpu-models.adoc [id="virt-configuring-configmap-for-obsolete-cpu-models_{context}"] -= Configuring a ConfigMap for obsolete CPU models += Configuring a config map for obsolete CPU models -Use this procedure to configure a ConfigMap for obsolete CPU models. +Use this procedure to configure a config map for obsolete CPU models. .Procedure @@ -27,7 +27,7 @@ data: <2> - "pentiumpro" minCPU: "Penryn" <4> ---- -<1> Name of the ConfigMap. +<1> Name of the config map. <2> Configuration data. <3> List of obsolete CPU models. <4> Minimum CPU model that is used for basic CPU features. diff --git a/modules/virt-configuring-masquerade-mode-cli.adoc b/modules/virt-configuring-masquerade-mode-cli.adoc index 4fdeb6ed6443..9077733d015d 100644 --- a/modules/virt-configuring-masquerade-mode-cli.adoc +++ b/modules/virt-configuring-masquerade-mode-cli.adoc @@ -6,8 +6,8 @@ = Configuring masquerade mode from the command line You can use masquerade mode to hide a virtual machine's outgoing traffic behind -the Pod IP address. Masquerade mode uses Network Address Translation (NAT) to -connect virtual machines to the Pod network backend through a Linux bridge. +the pod IP address. Masquerade mode uses Network Address Translation (NAT) to +connect virtual machines to the pod network backend through a Linux bridge. Enable masquerade mode and allow traffic to enter the virtual machine by editing your virtual machine configuration file. diff --git a/modules/virt-configuring-selinux-hpp-on-rhcos8.adoc b/modules/virt-configuring-selinux-hpp-on-rhcos8.adoc index 31afc16bde91..6e3723d96936 100644 --- a/modules/virt-configuring-selinux-hpp-on-rhcos8.adoc +++ b/modules/virt-configuring-selinux-hpp-on-rhcos8.adoc @@ -5,7 +5,7 @@ [id="virt-configuring-selinux-hpp-on-rhcos8_{context}"] = Configuring SELinux for the hostpath provisioner on Red Hat Enterprise Linux CoreOS 8 -You must configure SELinux before you create the HostPathProvisioner custom +You must configure SELinux before you create the `HostPathProvisioner` custom resource. To configure SELinux on Red Hat Enterprise Linux CoreOS 8 workers, you must create a `MachineConfig` object on each node. @@ -16,13 +16,13 @@ If you do not use Red Hat Enterprise Linux CoreOS workers, skip this procedure. .Prerequisites -* Create a backing directory on each node for the PersistentVolumes (PVs) +* Create a backing directory on each node for the persistent volumes (PVs) that the hostpath provisioner creates. .Procedure -. Create the MachineConfig file. For example: +. Create the `MachineConfig` file. For example: + [source,terminal] diff --git a/modules/virt-confirming-policy-updates-on-nodes.adoc b/modules/virt-confirming-policy-updates-on-nodes.adoc index eeb7b8aff975..0607a7e01eb0 100644 --- a/modules/virt-confirming-policy-updates-on-nodes.adoc +++ b/modules/virt-confirming-policy-updates-on-nodes.adoc @@ -3,39 +3,39 @@ // * virt/node_network/virt-configuring-node-network-policy [id="virt-confirming-policy-updates-on-nodes_{context}"] -= Confirming Policy updates on nodes += Confirming node network policy updates on nodes A `NodeNetworkConfigurationPolicy` manifest describes your requested network configuration for nodes in the cluster. -The Policy object includes your requestd network configuration and the status of execution of the Policy on the cluster as a whole. +The node network policy includes your requested network configuration and the status of execution of the policy on the cluster as a whole. -When you apply a Policy, a `NodeNetworkConfigurationEnactment` is created for every node in the cluster. The Enactment is a read-only object that represents the status of execution of the Policy on that node. -If the Policy fails to be applied on the node, the Enactment for that node includes a traceback for troubleshooting. +When you apply a node network policy, a `NodeNetworkConfigurationEnactment` object is created for every node in the cluster. The node network configuration enactment is a read-only object that represents the status of execution of the policy on that node. +If the policy fails to be applied on the node, the enactment for that node includes a traceback for troubleshooting. .Procedure -. To confirm that a Policy has been applied to the cluster, list the Policies and their status: +. To confirm that a policy has been applied to the cluster, list the policies and their status: + [source,terminal] ---- $ oc get nncp ---- -. (Optional) If a Policy is taking longer than expected to successfully configure, you can inspect the requested state and status conditions of a particular Policy: +. (Optional) If a policy is taking longer than expected to successfully configure, you can inspect the requested state and status conditions of a particular policy: + [source,terminal] ---- $ oc get nncp -o yaml ---- -. (Optional) If a policy is taking longer than expected to successfully configure on all nodes, you can list the status of the Enactments on the cluster: +. (Optional) If a policy is taking longer than expected to successfully configure on all nodes, you can list the status of the enactments on the cluster: + [source,terminal] ---- $ oc get nnce ---- -. (Optional) To view the configuration of a particular Enactment, including any error reporting for a failed configuration: +. (Optional) To view the configuration of a particular enactment, including any error reporting for a failed configuration: + [source,terminal] ---- diff --git a/modules/virt-creating-a-service-from-a-virtual-machine.adoc b/modules/virt-creating-a-service-from-a-virtual-machine.adoc index 3db8555a9681..233cc352184b 100644 --- a/modules/virt-creating-a-service-from-a-virtual-machine.adoc +++ b/modules/virt-creating-a-service-from-a-virtual-machine.adoc @@ -70,7 +70,7 @@ spec: [NOTE] ==== Labels on a virtual machine are passed through to the pod. The labels on -the `VirtualMachine`, for example `special: key`, must match the labels in +the `VirtualMachine` configuration, for example `special: key`, must match the labels in the `Service` YAML `selector` attribute, which you create later in this procedure. ==== diff --git a/modules/virt-creating-a-vm-from-a-default-os-image.adoc b/modules/virt-creating-a-vm-from-a-default-os-image.adoc index 65fcaa0d6978..697855334887 100644 --- a/modules/virt-creating-a-vm-from-a-default-os-image.adoc +++ b/modules/virt-creating-a-vm-from-a-default-os-image.adoc @@ -14,7 +14,7 @@ You can create virtual machines and virtual machine templates from default OS im .Prerequisites -* When you created the PVC, you selected the *Attach this disk to a VirtualMachine operating system* check box. +* When you created the PVC, you selected the *Attach this data to a Virtual Machine operating system* check box. .Procedure diff --git a/modules/virt-creating-an-upload-dv.adoc b/modules/virt-creating-an-upload-dv.adoc index 6c0aa27ba365..a4eb59f2f00e 100644 --- a/modules/virt-creating-an-upload-dv.adoc +++ b/modules/virt-creating-an-upload-dv.adoc @@ -3,14 +3,14 @@ // * virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-block.adoc [id="virt-creating-an-upload-dv_{context}"] -= Creating an upload DataVolume += Creating an upload data volume -You can manually create a DataVolume with an `upload` data source to use for uploading +You can manually create a data volume with an `upload` data source to use for uploading local disk images. .Procedure -. Create a DataVolume configuration that specifies `spec: source: upload{}`: +. Create a data volume configuration that specifies `spec: source: upload{}`: + [source,yaml] @@ -29,11 +29,11 @@ spec: requests: storage: <2Gi> <2> ---- -<1> The name of the DataVolume. -<2> The size of the DataVolume. Ensure that this value is greater than or equal +<1> The name of the data volume. +<2> The size of the data volume. Ensure that this value is greater than or equal to the size of the disk that you upload. -. Create the DataVolume by running the following command: +. Create the data volume by running the following command: + [source,terminal] diff --git a/modules/virt-creating-blank-disk-datavolumes.adoc b/modules/virt-creating-blank-disk-datavolumes.adoc index 91e66d365b62..befec4f9858c 100644 --- a/modules/virt-creating-blank-disk-datavolumes.adoc +++ b/modules/virt-creating-blank-disk-datavolumes.adoc @@ -3,19 +3,19 @@ // * virt/virtual_machines/virtual_disks/virt-expanding-virtual-storage-with-blank-disk-images.adoc [id="virt-creating-blank-disk-datavolumes_{context}"] -= Creating a blank disk image with DataVolumes += Creating a blank disk image with data volumes -You can create a new blank disk image in a PersistentVolumeClaim by -customizing and deploying a DataVolume configuration file. +You can create a new blank disk image in a persistent volume claim by +customizing and deploying a data volume configuration file. .Prerequisites -* At least one available PersistentVolume. +* At least one available persistent volume. * Install the OpenShift CLI (`oc`). .Procedure -. Edit the DataVolume configuration file: +. Edit the data volume configuration file: + [source,yaml] diff --git a/modules/virt-creating-bridge-nad-cli.adoc b/modules/virt-creating-bridge-nad-cli.adoc index 7038095232d2..d9d609d19ac4 100644 --- a/modules/virt-creating-bridge-nad-cli.adoc +++ b/modules/virt-creating-bridge-nad-cli.adoc @@ -3,19 +3,19 @@ // * virt/virtual_machines/vm_networking/virt-attaching-vm-multiple-networks.adoc [id="virt-creating-bridge-nad-cli_{context}"] -= Creating a Linux bridge NetworkAttachmentDefinition in the CLI += Creating a Linux bridge network attachment definition in the CLI -As a network administrator, you can configure a NetworkAttachmentDefinition +As a network administrator, you can configure a network attachment definition of type `cnv-bridge` to provide Layer-2 networking to pods and virtual machines. [NOTE] ==== -The NetworkAttachmentDefinition must be in the same namespace as the Pod or virtual machine. +The network attachment definition must be in the same namespace as the pod or virtual machine. ==== .Procedure -. Create a new file for the NetworkAttachmentDefinition in any local directory. +. Create a new file for the network attachment definition in any local directory. The file must have the following contents, modified to match your configuration: + @@ -42,11 +42,11 @@ spec: ] }' ---- -<1> If you add this annotation to your NetworkAttachmentDefinition, your virtual machine instances +<1> If you add this annotation to your network attachment definition, your virtual machine instances will only run on nodes that have the `br0` bridge connected. -<2> Required. A name for the configuration. It is recommended to match the configuration name to the `name` value of the NetworkAttachmentDefinition. +<2> Required. A name for the configuration. It is recommended to match the configuration name to the `name` value of the network attachment definition. <3> The actual name of the Container Network Interface (CNI) plug-in that provides -the network for this NetworkAttachmentDefinition. Do not change this field unless +the network for this network attachment definition. Do not change this field unless you want to use a different CNI. <4> You must substitute the actual name of the bridge, if it is not `br0`. <5> Required. This allows the MAC pool manager to assign a unique MAC address to the connection. @@ -84,13 +84,12 @@ spec: ... ---- <1> The `name` value for the bridge interface and network must be the same. -<2> You must substitute the actual `name` value from the -NetworkAttachmentDefinition. +<2> You must substitute the actual `name` value from the network attachment definition. + [NOTE] ==== -The virtual machine instance will be connected to both the `default` Pod network and `bridge-net`, which is -defined by a NetworkAttachmentDefinition named `a-bridge-network`. +The virtual machine instance will be connected to both the `default` pod network and `bridge-net`, which is +defined by a network attachment definition named `a-bridge-network`. ==== . Apply the configuration file to the resource: diff --git a/modules/virt-creating-bridge-nad-web.adoc b/modules/virt-creating-bridge-nad-web.adoc index 1a26c29f44a5..ccf12836bac2 100644 --- a/modules/virt-creating-bridge-nad-web.adoc +++ b/modules/virt-creating-bridge-nad-web.adoc @@ -5,12 +5,12 @@ //This file contains UI elements and/or package names that need to be updated. [id="virt-creating-bridge-nad-web_{context}"] -= Creating a Linux bridge NetworkAttachmentDefinition in the web console += Creating a Linux bridge network attachment definition in the web console -The NetworkAttachmentDefinition is a custom resource that exposes layer-2 devices +The network attachment definition is a custom resource that exposes layer-2 devices to a specific namespace in your {VirtProductName} cluster. -Network administrators can create NetworkAttachmentDefinitions +Network administrators can create network attachment definitions to provide existing layer-2 networking to pods and virtual machines. .Procedure diff --git a/modules/virt-creating-configmap.adoc b/modules/virt-creating-configmap.adoc index 942d56e9ed50..ca6d28432cbf 100644 --- a/modules/virt-creating-configmap.adoc +++ b/modules/virt-creating-configmap.adoc @@ -3,11 +3,11 @@ // * virt/virtual_machines/importing_vms/virt-importing-rhv-vm.adoc [id="virt-creating-configmap_{context}"] -= Creating a ConfigMap for importing a VM += Creating a config map for importing a VM -You can create a ConfigMap to map the Red Hat Virtualization (RHV) virtual machine operating system to an {VirtProductName} template if you want to override the default `vm-import-controller` mapping or to add additional mappings. +You can create a config map to map the Red Hat Virtualization (RHV) virtual machine operating system to an {VirtProductName} template if you want to override the default `vm-import-controller` mapping or to add additional mappings. -The default `vm-import-controller` ConfigMap contains the following RHV operating systems and their corresponding common {VirtProductName} templates. +The default `vm-import-controller` config map contains the following RHV operating systems and their corresponding common {VirtProductName} templates. [cols="1,1", options="header"] .Operating system and template mapping @@ -82,7 +82,7 @@ rhel8.2 . If an {VirtProductName} template that matches the RHV VM operating system does not appear in the list of available templates, create a template with the {VirtProductName} web console. -. Create a ConfigMap to map the RHV VM operating system to the {VirtProductName} template: +. Create a config map to map the RHV VM operating system to the {VirtProductName} template: + [source,yaml] ---- @@ -106,7 +106,7 @@ EOF <1> Optional: You can change the value of the `namespace` parameter. <2> Specify the REST API name of the RHV operating system and its corresponding VM template as shown in the following example. + -.ConfigMap example +.Config map example [source,yaml] ---- $ cat < ---- <1> Add `value: os-configmap` to the `name: OS_CONFIGMAP_NAME` parameter. -<2> Optional: You can add this value if you changed the namespace in the ConfigMap. +<2> Optional: You can add this value if you changed the namespace in the config map. . Save the `kubevirt-hyperconverged-operator.v{HCOVersion}.yaml` file. + -Updating the `vm-import-operator` deployment updates the `vm-import-controller` ConfigMap. +Updating the `vm-import-operator` deployment updates the `vm-import-controller` config map. endif::[] ifeval::["{VirtVersion}" >= "2.5"] -. Patch the `vm-import-controller-config` ConfigMap to apply the new ConfigMap: +. Patch the `vm-import-controller-config` config map to apply the new config map: + [source,terminal] ---- @@ -169,7 +169,7 @@ $ oc patch configmap vm-import-controller-config -n openshift-cnv --patch '{ } }' ---- -<1> Update the namespace if you changed it in the ConfigMap. +<1> Update the namespace if you changed it in the config map. endif::[] . Verify that the template appears in the {VirtProductName} web console: diff --git a/modules/virt-creating-interface-on-nodes.adoc b/modules/virt-creating-interface-on-nodes.adoc index 0fdc63e542ec..4875b5cb98db 100644 --- a/modules/virt-creating-interface-on-nodes.adoc +++ b/modules/virt-creating-interface-on-nodes.adoc @@ -38,15 +38,15 @@ spec: port: - name: eth1 ---- -<1> Name of the Policy. -<2> Optional: If you do not include the `nodeSelector`, the Policy applies to all nodes in the cluster. +<1> Name of the policy. +<2> Optional: If you do not include the `nodeSelector` parameter, the policy applies to all nodes in the cluster. <3> This example uses the `node-role.kubernetes.io/worker: ""` node selector to select all worker nodes in the cluster. <4> Optional: Human-readable description for the interface. -. Create the Policy: +. Create the node network policy: + [source,terminal] ---- $ oc apply -f <1> ---- -<1> File name of the Policy manifest. +<1> File name of the node network configuration policy manifest. diff --git a/modules/virt-creating-local-block-pv.adoc b/modules/virt-creating-local-block-pv.adoc index 46472ae4e8db..93f8b92f35f8 100644 --- a/modules/virt-creating-local-block-pv.adoc +++ b/modules/virt-creating-local-block-pv.adoc @@ -5,11 +5,11 @@ // * virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc [id="virt-creating-local-block-pv_{context}"] -= Creating a local block PersistentVolume += Creating a local block persistent volume -Create a local block PersistentVolume (PV) on a node by populating a file and +Create a local block persistent volume (PV) on a node by populating a file and mounting it as a loop device. You can then reference this loop device in a -PV configuration as a `Block` volume and use it as a block device for a +PV manifest as a `Block` volume and use it as a block device for a virtual machine image. .Procedure @@ -34,7 +34,7 @@ $ losetup d3 <1> <2> <1> File path where the loop device is mounted. <2> The file created in the previous step to be mounted as the loop device. -. Create a `PersistentVolume` configuration that references the mounted loop device. +. Create a `PersistentVolume` manifest that references the mounted loop device. + [source,yaml] ---- @@ -64,7 +64,7 @@ spec: ---- <1> The path of the loop device on the node. <2> Specifies it is a block PV. -<3> Optional: Set a StorageClass for the PV. If you omit it, the cluster default is used. +<3> Optional: Set a storage class for the PV. If you omit it, the cluster default is used. <4> The node on which the block device was mounted. . Create the block PV. @@ -73,4 +73,4 @@ spec: ---- # oc create -f <1> ---- -<1> The filename of the PersistentVolume created in the previous step. +<1> The file name of the persistent volume created in the previous step. diff --git a/modules/virt-creating-new-vm-from-cloned-pvc-using-datavolumetemplate.adoc b/modules/virt-creating-new-vm-from-cloned-pvc-using-datavolumetemplate.adoc index 272ba0c553a8..1d468e3f7ca6 100644 --- a/modules/virt-creating-new-vm-from-cloned-pvc-using-datavolumetemplate.adoc +++ b/modules/virt-creating-new-vm-from-cloned-pvc-using-datavolumetemplate.adoc @@ -3,19 +3,19 @@ // * virt/virtual_machines/cloning_vms/virt-cloning-vm-using-datavolumetemplate.adoc [id="virt-creating-new-vm-from-cloned-pvc-using-datavolumetemplate_{context}"] -= Creating a new virtual machine from a cloned PersistentVolumeClaim by using a DataVolumeTemplate += Creating a new virtual machine from a cloned persistent volume claim by using a data volume template -You can create a virtual machine that clones the PersistentVolumeClaim (PVC) of -an existing virtual machine into a DataVolume. By referencing a -`dataVolumeTemplate` in the virtual machine `spec`, the `source` PVC is cloned to -a DataVolume, which is then automatically used for the creation of the virtual +You can create a virtual machine that clones the persistent volume claim (PVC) of +an existing virtual machine into a data volume. Reference a +`dataVolumeTemplate` in the virtual machine manifest and the `source` PVC is cloned to +a data volume, which is then automatically used for the creation of the virtual machine. [NOTE] ==== -When a DataVolume is created as part of the DataVolumeTemplate of a virtual -machine, the lifecycle of the DataVolume is then dependent on the virtual -machine. If the virtual machine is deleted, the DataVolume and associated +When a data volume is created as part of the data volume template of a virtual +machine, the lifecycle of the data volume is then dependent on the virtual +machine. If the virtual machine is deleted, the data volume and associated PVC are also deleted. ==== @@ -32,7 +32,7 @@ namespace of the associated PVC. . Create a YAML file for a `VirtualMachine` object. The following virtual machine example clones `my-favorite-vm-disk`, which is located in the -`source-namespace` namespace. The `2Gi` DataVolume called `favorite-clone` +`source-namespace` namespace. The `2Gi` data volume called `favorite-clone` is created from `my-favorite-vm-disk`. + For example: @@ -82,7 +82,7 @@ spec: ---- <1> The virtual machine to create. -. Create the virtual machine with the PVC-cloned DataVolume: +. Create the virtual machine with the PVC-cloned data volume: + [source,terminal] ---- diff --git a/modules/virt-creating-pvcs-to-store-default-os-images.adoc b/modules/virt-creating-pvcs-to-store-default-os-images.adoc index 2182f6871bcf..b4a0eddfa120 100644 --- a/modules/virt-creating-pvcs-to-store-default-os-images.adoc +++ b/modules/virt-creating-pvcs-to-store-default-os-images.adoc @@ -21,7 +21,7 @@ Follow these steps to create a persistent volume claim (PVC), which you use to u . Complete the *Upload Data to Persistent Volume Claim* form to create a PVC that is used to upload and save default OS images. .. Click *Browse* and locate a bootable image to upload and save as a default OS image. -.. Select the *Attach this disk to a VirtualMachine operating system* check box. +.. Select the *Attach this data to a Virtual Machine operating system* check box. .. Select the OS of the bootable disk that you want to upload from the *Operating System* list. .. Select the storage class you want to use in the *Storage Class* list. .. Enter the size of the PVC you are creating in the *Size* fields. diff --git a/modules/virt-creating-rbac-cloning-dvs.adoc b/modules/virt-creating-rbac-cloning-dvs.adoc index 9e95c5e142d2..351523519778 100644 --- a/modules/virt-creating-rbac-cloning-dvs.adoc +++ b/modules/virt-creating-rbac-cloning-dvs.adoc @@ -3,13 +3,13 @@ // * virt/virtual_machines/cloning_vms/virt-enabling-user-permissions-to-clone-datavolumes.adoc [id="virt-creating-rbac-cloning-dvs_{context}"] -= Creating RBAC resources for cloning DataVolumes += Creating RBAC resources for cloning data volumes -Create a new ClusterRole that enables permissions for all actions for the `datavolumes` resource. +Create a new cluster role that enables permissions for all actions for the `datavolumes` resource. .Procedure -. Create a ClusterRole manifest: +. Create a `ClusterRole` manifest: + [source,yaml] ---- @@ -22,18 +22,18 @@ rules: resources: ["datavolumes/source"] verbs: ["*"] ---- -<1> Unique name for the ClusterRole. +<1> Unique name for the cluster role. -. Create the ClusterRole in the cluster: +. Create the cluster role in the cluster: + [source,terminal] ---- $ oc create -f <1> ---- -<1> The file name of the ClusterRole manifest created in the previous step. +<1> The file name of the `ClusterRole` manifest created in the previous step. -. Create a RoleBinding manifest that applies to both the source and destination namespaces and references -the ClusterRole created in the previous step. +. Create a `RoleBinding` manifest that applies to both the source and destination namespaces and references +the cluster role created in the previous step. + [source,yaml] ---- @@ -51,15 +51,15 @@ roleRef: name: datavolume-cloner <4> apiGroup: rbac.authorization.k8s.io ---- -<1> Unique name for the RoleBinding. -<2> The namespace for the source DataVolume. -<3> The namespace to which the DataVolume is cloned. -<4> The name of the ClusterRole created in the previous step. +<1> Unique name for the role binding. +<2> The namespace for the source data volume. +<3> The namespace to which the data volume is cloned. +<4> The name of the cluster role created in the previous step. -. Create the RoleBinding in the cluster: +. Create the role binding in the cluster: + [source,terminal] ---- $ oc create -f <1> ---- -<1> The file name of the RoleBinding manifest created in the previous step. +<1> The file name of the `RoleBinding` manifest created in the previous step. diff --git a/modules/virt-creating-storage-class.adoc b/modules/virt-creating-storage-class.adoc index 81346240dc55..aff00c51ad36 100644 --- a/modules/virt-creating-storage-class.adoc +++ b/modules/virt-creating-storage-class.adoc @@ -3,10 +3,10 @@ // * virt/virtual_machines/virtual_disks/virt-configuring-local-storage-for-vms.adoc [id="virt-creating-storage-class_{context}"] -= Creating a `StorageClass` object += Creating a storage class -When you create a `StorageClass` object, you set parameters that affect the -dynamic provisioning of PersistentVolumes (PVs) that belong to that storage class. + +When you create a storage class, you set parameters that affect the +dynamic provisioning of persistent volumes (PVs) that belong to that storage class. + [NOTE] ==== You cannot update a `StorageClass` object's parameters after you create it. @@ -38,8 +38,8 @@ volumeBindingMode: WaitForFirstConsumer <3> do not specify a value, the storage class defaults to `Delete`. <3> The `volumeBindingMode` value determines when dynamic provisioning and volume binding occur. Specify `WaitForFirstConsumer` to delay the binding and provisioning -of a PV until after a Pod that uses the PersistentVolumeClaim (PVC) -is created. This ensures that the PV meets the Pod's scheduling requirements. +of a PV until after a pod that uses the persistent volume claim (PVC) +is created. This ensures that the PV meets the pod's scheduling requirements. . Create the `StorageClass` object: + diff --git a/modules/virt-creating-vddk-image.adoc b/modules/virt-creating-vddk-image.adoc index 3ab7fb0cae4d..84e2a6684aef 100644 --- a/modules/virt-creating-vddk-image.adoc +++ b/modules/virt-creating-vddk-image.adoc @@ -5,7 +5,7 @@ [id="virt-creating-vddk-image_{context}"] = Creating and using a VDDK image -You can download the VMware Virtual Disk Development Kit (VDDK), build a VDDK image, and push the VDDK image to your image registry. You then add the VDDK image to the `v2v-vmware` ConfigMap. +You can download the VMware Virtual Disk Development Kit (VDDK), build a VDDK image, and push the VDDK image to your image registry. You then add the VDDK image to the `v2v-vmware` config map. .Prerequisites @@ -62,7 +62,7 @@ $ podman push /vddk: ---- . Ensure that the image is accessible to your {VirtProductName} environment. -. Edit the `v2v-vmware` ConfigMap in the *openshift-cnv* project: +. Edit the `v2v-vmware` config map in the *openshift-cnv* project: + [source,terminal] ---- diff --git a/modules/virt-creating-vm-wizard-web.adoc b/modules/virt-creating-vm-wizard-web.adoc index 3138b37cf130..c1732a620ea6 100644 --- a/modules/virt-creating-vm-wizard-web.adoc +++ b/modules/virt-creating-vm-wizard-web.adoc @@ -7,7 +7,7 @@ The web console features an interactive wizard that guides you through *General*, *Networking*, *Storage*, *Advanced*, and *Review* steps to simplify the process of creating virtual machines. All required fields are marked by a `*`. When the required fields are completed, you can review and create your virtual machine. -Network Interface Cards (NICs) and storage disks can be created and attached to virtual machines after they have been created. +Network interface cards (NICs) and storage disks can be created and attached to virtual machines after they have been created. .*Bootable Disk* diff --git a/modules/virt-creating-vm.adoc b/modules/virt-creating-vm.adoc index 2c05230c4c5b..9d6a36e53d5a 100644 --- a/modules/virt-creating-vm.adoc +++ b/modules/virt-creating-vm.adoc @@ -7,7 +7,7 @@ .Procedure -The `spec` object of the VirtualMachine configuration file references +The `spec` object of the virtual machine configuration file references the virtual machine settings, such as the number of cores and the amount of memory, the disk type, and the volumes to use. diff --git a/modules/virt-define-http-liveness-probe.adoc b/modules/virt-define-http-liveness-probe.adoc index e471ce81974c..bb82cf4ca6a7 100644 --- a/modules/virt-define-http-liveness-probe.adoc +++ b/modules/virt-define-http-liveness-probe.adoc @@ -13,9 +13,8 @@ liveness probes. . Customize a YAML configuration file to create an HTTP liveness probe, using the following code block as an example. In this example: -* You configure a probe using `spec.livenessProbe.httpGet`, which queries port `1500` of the -VirtualMachineInstance, after an initial delay of `120` seconds. -* The VirtualMachineInstance installs and runs a minimal HTTP server +* You configure a probe using `spec.livenessProbe.httpGet`, which queries port `1500` of the virtual machine instance, after an initial delay of `120` seconds. +* The virtual machine instance installs and runs a minimal HTTP server on port `1500` using `cloud-init`. + [source,yaml] @@ -62,7 +61,7 @@ spec: name: cloudinitdisk ---- + -. Create the VirtualMachineInstance by running the following command: +. Create the virtual machine instance by running the following command: + [source,terminal] ---- diff --git a/modules/virt-define-readiness-probe.adoc b/modules/virt-define-readiness-probe.adoc index 9d129c3ba194..1d32cbb4ca56 100644 --- a/modules/virt-define-readiness-probe.adoc +++ b/modules/virt-define-readiness-probe.adoc @@ -67,7 +67,7 @@ spec: name: cloudinitdisk ---- + -. Create the VirtualMachineInstance by running the following command: +. Create the virtual machine instance by running the following command: + [source,terminal] ---- diff --git a/modules/virt-define-tcp-liveness-probe.adoc b/modules/virt-define-tcp-liveness-probe.adoc index 7b496afd49bd..fd848d78688a 100644 --- a/modules/virt-define-tcp-liveness-probe.adoc +++ b/modules/virt-define-tcp-liveness-probe.adoc @@ -13,9 +13,8 @@ TCP liveness probes. . Customize a YAML configuration file to create an TCP liveness probe, using this code block as an example. In this example: -* You configure a probe using `spec.livenessProbe.tcpSocket`, which queries port `1500` of the -VirtualMachineInstance, after an initial delay of `120` seconds. -* The VirtualMachineInstance installs and runs a minimal HTTP server +* You configure a probe using `spec.livenessProbe.tcpSocket`, which queries port `1500` of the virtual machine instance, after an initial delay of `120` seconds. +* The virtual machine instance installs and runs a minimal HTTP server on port `1500` using `cloud-init`. + [source,yaml] @@ -62,7 +61,7 @@ spec: name: cloudinitdisk ---- + -. Create the VirtualMachineInstance by running the following command: +. Create the virtual machine instance by running the following command: + [source,terminal] ---- diff --git a/modules/virt-defining-storageclass-in-cdi-configuration.adoc b/modules/virt-defining-storageclass-in-cdi-configuration.adoc index 45c9b6b5d557..71964a484369 100644 --- a/modules/virt-defining-storageclass-in-cdi-configuration.adoc +++ b/modules/virt-defining-storageclass-in-cdi-configuration.adoc @@ -3,15 +3,13 @@ // * virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc [id="virt-defining-storageclass-in-cdi-configuration_{context}"] -= Defining a StorageClass in the CDI configuration += Defining a storage class in the CDI configuration -Define a StorageClass in the CDI configuration to dynamically provision scratch -space for CDI operations. +Define a storage class in the CDI configuration to dynamically provision scratch space for CDI operations. .Procedure -* Use the `oc` client to edit the `cdiconfig/config` and add or edit the -`spec: scratchSpaceStorageClass` to match a StorageClass in the cluster. +* Use the `oc` client to edit the `cdiconfig/config` and add or edit the `spec: scratchSpaceStorageClass` to match a storage class in the cluster. + [source,terminal] ---- diff --git a/modules/virt-delete-vm-web.adoc b/modules/virt-delete-vm-web.adoc index af1fa08ca4f7..d2101cf22f2c 100644 --- a/modules/virt-delete-vm-web.adoc +++ b/modules/virt-delete-vm-web.adoc @@ -9,7 +9,7 @@ Deleting a virtual machine permanently removes it from the cluster. + [NOTE] ==== -When you delete a virtual machine, the DataVolume it uses is automatically deleted. +When you delete a virtual machine, the data volume it uses is automatically deleted. ==== .Procedure diff --git a/modules/virt-deleting-deployment-custom-resource.adoc b/modules/virt-deleting-deployment-custom-resource.adoc index fcfb1d686886..cefb387ce710 100644 --- a/modules/virt-deleting-deployment-custom-resource.adoc +++ b/modules/virt-deleting-deployment-custom-resource.adoc @@ -7,8 +7,7 @@ [id="virt-deleting-deployment-custom-resource_{context}"] = Deleting the {VirtProductName} Operator Deployment custom resource -To uninstall {VirtProductName}, you must first delete the -*{VirtProductName} Operator Deployment* custom resource. +To uninstall {VirtProductName}, you must first delete the *{VirtProductName} Operator Deployment* custom resource. .Prerequisites @@ -16,8 +15,7 @@ To uninstall {VirtProductName}, you must first delete the .Procedure -. From the {product-title} web console, select `openshift-cnv` from -the *Projects* list. +. From the {product-title} web console, select `openshift-cnv` from the *Projects* list. . Navigate to the *Operators* -> *Installed Operators* page. @@ -25,16 +23,13 @@ the *Projects* list. . Click the *{VirtProductName} Operator Deployment* tab. -. Click the Options menu {kebab} in the row containing the *kubevirt-hyperconverged* -custom resource. In the expanded menu, click *Delete HyperConverged Cluster*. +. Click the Options menu {kebab} in the row containing the *kubevirt-hyperconverged* custom resource. In the expanded menu, click *Delete HyperConverged Cluster*. . Click *Delete* in the confirmation window. -. Navigate to the *Workloads* -> *Pods* page to verify that only the Operator -Pods are running. +. Navigate to the *Workloads* -> *Pods* page to verify that only the Operator pods are running. -. Open a terminal window and clean up the remaining resources by running -the following command: +. Open a terminal window and clean up the remaining resources by running the following command: + [source,terminal] ---- diff --git a/modules/virt-deleting-dvs.adoc b/modules/virt-deleting-dvs.adoc index 33c7fd0f1297..b6597425ba4c 100644 --- a/modules/virt-deleting-dvs.adoc +++ b/modules/virt-deleting-dvs.adoc @@ -4,17 +4,17 @@ [id="virt-deleting-dvs_{context}"] -= Deleting a DataVolume += Deleting a data volume -You can delete a DataVolume by using the `oc` command-line interface (CLI). +You can delete a data volume by using the `oc` command-line interface (CLI). .Prerequisites -* Identify the name of the DataVolume that you want to delete. +* Identify the name of the data volume that you want to delete. .Procedure -* Delete the DataVolume by running the following command: +* Delete the data volume by running the following command: + [source,terminal] ---- diff --git a/modules/virt-deleting-virt-cli.adoc b/modules/virt-deleting-virt-cli.adoc index 9a322d3d4d86..45a996db503f 100644 --- a/modules/virt-deleting-virt-cli.adoc +++ b/modules/virt-deleting-virt-cli.adoc @@ -16,12 +16,12 @@ You can delete {VirtProductName} by using the CLI. [NOTE] ==== -When you delete the subscription of the {VirtProductName} operator in the OLM by using the CLI, the ClusterServiceVersion (CSV) object is not deleted from the cluster. To completely uninstall {VirtProductName}, you must explicitly delete the CSV. +When you delete the subscription of the {VirtProductName} operator in the OLM by using the CLI, the `ClusterServiceVersion` (CSV) object is not deleted from the cluster. To completely uninstall {VirtProductName}, you must explicitly delete the CSV. ==== .Procedure -. Delete the HyperConverged Custom Resource: +. Delete the `HyperConverged` custom resource: + [source,terminal] ---- @@ -35,7 +35,7 @@ $ oc delete HyperConverged kubevirt-hyperconverged -n openshift-cnv $ oc delete subscription kubevirt-hyperconverged -n openshift-cnv ---- -. Set the ClusterServiceVersion (CSV) name for {VirtProductName} as an environment variable: +. Set the cluster service version (CSV) name for {VirtProductName} as an environment variable: + [source,terminal] ---- diff --git a/modules/virt-deleting-vms.adoc b/modules/virt-deleting-vms.adoc index af72920edaae..176d7e067312 100644 --- a/modules/virt-deleting-vms.adoc +++ b/modules/virt-deleting-vms.adoc @@ -10,7 +10,7 @@ You can delete a virtual machine by using the `oc` command-line interface (CLI). The `oc` client enables you to perform actions on multiple virtual machines. + [NOTE] ==== -When you delete a virtual machine, the DataVolume it uses is automatically deleted. +When you delete a virtual machine, the data volume it uses is automatically deleted. ==== .Prerequisites diff --git a/modules/virt-deploying-operator-cli.adoc b/modules/virt-deploying-operator-cli.adoc index 94df7184c1ac..be1c3cf11726 100644 --- a/modules/virt-deploying-operator-cli.adoc +++ b/modules/virt-deploying-operator-cli.adoc @@ -35,7 +35,7 @@ $ oc apply -f .yaml .Verification steps -* Ensure that {VirtProductName} deployed successfully by watching the `PHASE` of the ClusterServiceVersion (CSV) in the `openshift-cnv` namespace. Run the following command: +* Ensure that {VirtProductName} deployed successfully by watching the `PHASE` of the cluster service version (CSV) in the `openshift-cnv` namespace. Run the following command: + [source,terminal] ---- diff --git a/modules/virt-disabling-tls-for-registry.adoc b/modules/virt-disabling-tls-for-registry.adoc index 7d9774b47560..d1e8bef1e602 100644 --- a/modules/virt-disabling-tls-for-registry.adoc +++ b/modules/virt-disabling-tls-for-registry.adoc @@ -5,7 +5,7 @@ [id="virt-disabling-tls-for-registry_{context}"] = Disabling TLS for a container registry to use as insecure registry -You can disable TLS (transport layer security) for a container registry by adding the registry to the `cdi-insecure-registries` ConfigMap. +You can disable TLS (transport layer security) for a container registry by adding the registry to the `cdi-insecure-registries` config map. .Prerequisites @@ -13,7 +13,7 @@ You can disable TLS (transport layer security) for a container registry by addin .Procedure -* Add the registry to the `cdi-insecure-registries` ConfigMap in the `cdi` namespace. +* Add the registry to the `cdi-insecure-registries` config map in the `cdi` namespace. + [source,terminal] ---- diff --git a/modules/virt-edit-boot-order-yaml-web.adoc b/modules/virt-edit-boot-order-yaml-web.adoc index 787b80e5e79e..0fbe57ae7aa4 100644 --- a/modules/virt-edit-boot-order-yaml-web.adoc +++ b/modules/virt-edit-boot-order-yaml-web.adoc @@ -17,7 +17,7 @@ Edit the boot order list in a YAML configuration file by using the CLI. $ oc edit vm example ---- -. Edit the YAML file and modify the values for the boot order associated with a disk or Network Interface Card (NIC). For example: +. Edit the YAML file and modify the values for the boot order associated with a disk or network interface card (NIC). For example: + [source,yaml] @@ -40,7 +40,7 @@ interfaces: name: default ---- <1> The boot order value specified for the disk. -<2> The boot order value specified for the Network Interface Card. +<2> The boot order value specified for the network interface card. . Save the YAML file. diff --git a/modules/virt-editing-kubevirtstorageclassdefaults-cli.adoc b/modules/virt-editing-kubevirtstorageclassdefaults-cli.adoc index a4d3c9717efe..20777c8665b3 100644 --- a/modules/virt-editing-kubevirtstorageclassdefaults-cli.adoc +++ b/modules/virt-editing-kubevirtstorageclassdefaults-cli.adoc @@ -3,10 +3,10 @@ // * virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc [id="virt-editing-kubevirtstorageclassdefaults-cli_{context}"] -= Editing the `kubevirt-storage-class-defaults` ConfigMap in the CLI += Editing the `kubevirt-storage-class-defaults` config map in the CLI -Modify the storage settings for DataVolumes by editing the `kubevirt-storage-class-defaults` ConfigMap in the `openshift-cnv` namespace. -You can also add settings for other storage classes in order to create DataVolumes in the web console for different storage types. +Modify the storage settings for data volumes by editing the `kubevirt-storage-class-defaults` config map in the `openshift-cnv` namespace. +You can also add settings for other storage classes in order to create data volumes in the web console for different storage types. [NOTE] ==== @@ -15,14 +15,14 @@ You must configure storage settings that are supported by the underlying storage .Procedure -. Edit the ConfigMap by running the following command: +. Edit the config map by running the following command: + [source,terminal] ---- $ oc edit configmap kubevirt-storage-class-defaults -n openshift-cnv ---- -. Update the `data` values of the ConfigMap: +. Update the `data` values of the config map: + [source,yaml] ---- @@ -35,8 +35,8 @@ data: ---- <1> The default accessMode is `ReadWriteOnce`. <2> The default volumeMode is `Filesystem`. -<3> If you add an access mode for storage class, replace the `` part of the parameter with the storage class name. +<3> If you add an access mode for a storage class, replace the `` part of the parameter with the storage class name. <4> If you add a volume mode for a storage class, replace the `` part of the parameter with the storage class name. -. Save and exit the editor to update the ConfigMap. +. Save and exit the editor to update the config map. diff --git a/modules/virt-editing-kubevirtstorageclassdefaults-web.adoc b/modules/virt-editing-kubevirtstorageclassdefaults-web.adoc index ef314a9cff33..182352ec9e97 100644 --- a/modules/virt-editing-kubevirtstorageclassdefaults-web.adoc +++ b/modules/virt-editing-kubevirtstorageclassdefaults-web.adoc @@ -3,10 +3,10 @@ // * virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc [id="virt-editing-kubevirtstorageclassdefaults-web_{context}"] -= Editing the `kubevirt-storage-class-defaults` ConfigMap in the web console += Editing the `kubevirt-storage-class-defaults` config map in the web console -Modify the storage settings for DataVolumes by editing the `kubevirt-storage-class-defaults` ConfigMap in the `openshift-cnv` namespace. -You can also add settings for other storage classes in order to create DataVolumes in the web console for different storage types. +Modify the storage settings for data volumes by editing the `kubevirt-storage-class-defaults` config map in the `openshift-cnv` namespace. +You can also add settings for other storage classes in order to create data volumes in the web console for different storage types. [NOTE] ==== @@ -35,7 +35,7 @@ data: <3> If you add an access mode for a storage class, replace the `` part of the parameter with the storage class name. <4> If you add a volume mode for a storage class, replace the `` part of the parameter with the storage class name. -. Click *Save* to update the ConfigMap. +. Click *Save* to update the config map. diff --git a/modules/virt-example-ansible-playbook-creating-vms.adoc b/modules/virt-example-ansible-playbook-creating-vms.adoc index 83f1b53c6779..0dc8b26b127f 100644 --- a/modules/virt-example-ansible-playbook-creating-vms.adoc +++ b/modules/virt-example-ansible-playbook-creating-vms.adoc @@ -37,4 +37,4 @@ playbook. .Additional information * link:https://docs.ansible.com/ansible/latest/user_guide/playbooks.html[Intro to Playbooks] -* link:https://docs.ansible.com/ansible/latest/community/other_tools_and_programs.html#validate-playbook-tools[Tools for Validating Playbooks] \ No newline at end of file +* link:https://docs.ansible.com/ansible/latest/community/other_tools_and_programs.html#validate-playbook-tools[Tools for Validating Playbooks] diff --git a/modules/virt-example-bond-nncp.adoc b/modules/virt-example-bond-nncp.adoc index 1f9ea3e30cf6..6ce7b707baf3 100644 --- a/modules/virt-example-bond-nncp.adoc +++ b/modules/virt-example-bond-nncp.adoc @@ -3,7 +3,7 @@ // * virt/node_network/virt-updating-node-network-config.adoc [id="virt-example-bond-nncp_{context}"] -= Example: Bond interface NodeNetworkConfigurationPolicy += Example: Bond interface node network configuration policy Create a bond interface on nodes in the cluster by applying a `NodeNetworkConfigurationPolicy` manifest to the cluster. @@ -49,8 +49,8 @@ spec: - eth2 mtu: 1450 <13> ---- -<1> Name of the Policy. -<2> Optional: If you do not include the `nodeSelector`, the Policy applies to all nodes in the cluster. +<1> Name of the policy. +<2> Optional: If you do not include the `nodeSelector` parameter, the policy applies to all nodes in the cluster. <3> This example uses a `hostname` node selector. <4> Name of the interface. <5> Optional: Human-readable description of the interface. diff --git a/modules/virt-example-bridge-nncp.adoc b/modules/virt-example-bridge-nncp.adoc index d24f17b05d85..189f134dac37 100644 --- a/modules/virt-example-bridge-nncp.adoc +++ b/modules/virt-example-bridge-nncp.adoc @@ -3,7 +3,7 @@ // * virt/node_network/virt-updating-node-network-config.adoc [id="virt-example-bridge-nncp_{context}"] -= Example: Linux bridge interface NodeNetworkConfigurationPolicy += Example: Linux bridge interface node network configuration policy Create a Linux bridge interface on nodes in the cluster by applying a `NodeNetworkConfigurationPolicy` manifest to the cluster. @@ -36,8 +36,8 @@ spec: port: - name: eth1 <11> ---- -<1> Name of the Policy. -<2> Optional: If you do not include the `nodeSelector`, the Policy applies to all nodes in the cluster. +<1> Name of the policy. +<2> Optional: If you do not include the `nodeSelector` parameter, the policy applies to all nodes in the cluster. <3> This example uses a `hostname` node selector. <4> Name of the interface. <5> Optional: Human-readable description of the interface. diff --git a/modules/virt-example-configmap-tls-certificate.adoc b/modules/virt-example-configmap-tls-certificate.adoc index c6434a6c1274..67e93b4124af 100644 --- a/modules/virt-example-configmap-tls-certificate.adoc +++ b/modules/virt-example-configmap-tls-certificate.adoc @@ -3,9 +3,9 @@ // * virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc [id="virt-example-configmap-tls-certificate_{context}"] -= Example: ConfigMap created from a TLS certificate += Example: Config map created from a TLS certificate -The following example is of a ConfigMap created from `ca.pem` TLS certificate. +The following example is of a config map created from `ca.pem` TLS certificate. [source,yaml] ---- diff --git a/modules/virt-example-ethernet-nncp.adoc b/modules/virt-example-ethernet-nncp.adoc index c0ad6c1899ec..300e88ecbbcf 100644 --- a/modules/virt-example-ethernet-nncp.adoc +++ b/modules/virt-example-ethernet-nncp.adoc @@ -3,7 +3,7 @@ // * virt/node_network/virt-updating-node-network-config.adoc [id="virt-example-ethernet-nncp_{context}"] -= Example: Ethernet interface NodeNetworkConfigurationPolicy += Example: Ethernet interface node network configuration policy Configure an Ethernet interface on nodes in the cluster by applying a `NodeNetworkConfigurationPolicy` manifest to the cluster. @@ -29,8 +29,8 @@ spec: dhcp: true <8> enabled: true <9> ---- -<1> Name of the Policy. -<2> Optional: If you do not include the `nodeSelector`, the Policy applies to all nodes in the cluster. +<1> Name of the policy. +<2> Optional: If you do not include the `nodeSelector` parameter, the policy applies to all nodes in the cluster. <3> This example uses a `hostname` node selector. <4> Name of the interface. <5> Optional: Human-readable description of the interface. diff --git a/modules/virt-example-kubevirtstorageclassdefaults.adoc b/modules/virt-example-kubevirtstorageclassdefaults.adoc index 167f82ff6db3..93401d2f3deb 100644 --- a/modules/virt-example-kubevirtstorageclassdefaults.adoc +++ b/modules/virt-example-kubevirtstorageclassdefaults.adoc @@ -5,9 +5,9 @@ [id="virt-example-kubevirtstorageclassdefaults_{context}"] = Example of multiple storage class defaults -The following YAML file is an example of a `kubevirt-storage-class-defaults` ConfigMap that has storage settings configured for two storage classes, `migration` and `block`. +The following YAML file is an example of a `kubevirt-storage-class-defaults` config map that has storage settings configured for two storage classes, `migration` and `block`. -Ensure that all settings are supported by your underlying storage before you update the ConfigMap. +Ensure that all settings are supported by your underlying storage before you update the config map. [source,yaml] ---- diff --git a/modules/virt-example-nmstate-IP-management.adoc b/modules/virt-example-nmstate-IP-management.adoc index aecccc7bf495..db32e71f7dac 100644 --- a/modules/virt-example-nmstate-IP-management.adoc +++ b/modules/virt-example-nmstate-IP-management.adoc @@ -7,7 +7,7 @@ The following example configuration snippets demonstrate different methods of IP management. -These examples use the `ethernet` interface type to simplify the example while showing the related context in the Policy configuration. These IP management examples can be used with the other interface types. +These examples use the `ethernet` interface type to simplify the example while showing the related context in the policy configuration. These IP management examples can be used with the other interface types. [id="virt-example-nmstate-IP-management-static_{context}"] == Static diff --git a/modules/virt-example-nmstate-multiple-interfaces.adoc b/modules/virt-example-nmstate-multiple-interfaces.adoc index d0de1f58e29b..e493ded2ad4e 100644 --- a/modules/virt-example-nmstate-multiple-interfaces.adoc +++ b/modules/virt-example-nmstate-multiple-interfaces.adoc @@ -3,9 +3,9 @@ // * virt/node_network/virt-updating-node-network-config.adoc [id="virt-example-nmstate-multiple-interfaces_{context}"] -= Example: Multiple interfaces in the same Policy += Example: Multiple interfaces in the same node network configuration policy -You can create multiple interfaces in the same Policy. These interfaces can reference each other, allowing you to build and deploy a network configuration by using a single Policy manifest. +You can create multiple interfaces in the same node network configuration policy. These interfaces can reference each other, allowing you to build and deploy a network configuration by using a single policy manifest. The following example snippet creates a bond that is named `bond10` across two NICs and a Linux bridge that is named `br1` that connects to the bond. diff --git a/modules/virt-example-vlan-nncp.adoc b/modules/virt-example-vlan-nncp.adoc index 6e29eff3ca8d..2037b21174c6 100644 --- a/modules/virt-example-vlan-nncp.adoc +++ b/modules/virt-example-vlan-nncp.adoc @@ -3,7 +3,7 @@ // * virt/node_network/virt-updating-node-network-config.adoc [id="virt-example-vlan-nncp_{context}"] -= Example: VLAN interface NodeNetworkConfigurationPolicy += Example: VLAN interface node network configuration policy Create a VLAN interface on nodes in the cluster by applying a `NodeNetworkConfigurationPolicy` manifest to the cluster. @@ -30,8 +30,8 @@ spec: base-iface: eth1 <8> id: 102 <9> ---- -<1> Name of the Policy. -<2> Optional: If you do not include the `nodeSelector`, the Policy applies to all nodes in the cluster. +<1> Name of the policy. +<2> Optional: If you do not include the `nodeSelector` parameter, the policy applies to all nodes in the cluster. <3> This example uses a `hostname` node selector. <4> Name of the interface. <5> Optional: Human-readable description of the interface. diff --git a/modules/virt-importing-vm-cli.adoc b/modules/virt-importing-vm-cli.adoc index 360a23b54abe..ee6a5c179c02 100644 --- a/modules/virt-importing-vm-cli.adoc +++ b/modules/virt-importing-vm-cli.adoc @@ -5,9 +5,9 @@ [id="virt-importing-vm-cli_{context}"] = Importing a virtual machine with the CLI -You can import a virtual machine with the CLI by creating the Secret and VirtualMachineImport Custom Resources (CRs). The Secret CR stores the RHV Manager credentials and CA certificate. The VirtualMachineImport CR defines the parameters of the VM import process. +You can import a virtual machine with the CLI by creating the `Secret` and `VirtualMachineImport` custom resources (CRs). The `Secret` CR stores the RHV Manager credentials and CA certificate. The `VirtualMachineImport` CR defines the parameters of the VM import process. -Optional: You can create a ResourceMapping CR that is separate from the VirtualMachineImport CR. A ResourceMapping CR provides greater flexibility, for example, if you import additional RHV VMs. +Optional: You can create a `ResourceMapping` CR that is separate from the `VirtualMachineImport` CR. A `ResourceMapping` CR provides greater flexibility, for example, if you import additional RHV VMs. [IMPORTANT] ==== @@ -16,7 +16,7 @@ The default target storage class must be NFS. Cinder does not support RHV VM imp .Procedure -. Create the Secret CR by running the following command: +. Create the `Secret` CR by running the following command: + [source,yaml] ---- @@ -48,7 +48,7 @@ EOF $ openssl s_client -connect :443 -showcerts < /dev/null ---- -. Optional: Create the ResourceMapping CR if you want to separate the resource mapping from the VirtualMachineImport CR by running the following command: +. Optional: Create the `ResourceMapping` CR if you want to separate the resource mapping from the `VirtualMachineImport` CR by running the following command: + ifeval::["{VirtVersion}" < "2.5"] [source,yaml] @@ -76,7 +76,7 @@ EOF ---- <1> Specify the RHV logical network and vNIC profile. <2> Specify the {VirtProductName} network. -<3> If `storageMappings` are specified in both the ResourceMapping and the VirtualMachineImport CRs, the VirtualMachineImport CR takes precedence. +<3> If `storageMappings` are specified in both the `ResourceMapping` and the `VirtualMachineImport` CRs, the `VirtualMachineImport` CR takes precedence. <4> Specify the RHV storage domain. <5> The default storage class must be `nfs`. endif::[] @@ -107,13 +107,13 @@ EOF ---- <1> Specify the RHV logical network and vNIC profile. <2> Specify the {VirtProductName} network. -<3> If `storageMappings` are specified in both the ResourceMapping and the VirtualMachineImport CRs, the VirtualMachineImport CR takes precedence. +<3> If `storageMappings` are specified in both the `ResourceMapping` and the `VirtualMachineImport` CRs, the `VirtualMachineImport` CR takes precedence. <4> Specify the RHV storage domain. <5> Specify the target storage class as `nfs` or `ocs-storagecluster-ceph-rbd`. <6> If you specified the `ocs-storagecluster-ceph-rbd` storage class, you must specify `Block` as the volume mode. endif::[] -. Create the VirtualMachineImport CR by running the following command: +. Create the `VirtualMachineImport` CR by running the following command: + [source,yaml] ---- @@ -158,15 +158,15 @@ spec: name: <13> EOF ---- -<1> If you create a ResourceMapping CR, uncomment the `resourceMapping` section. +<1> If you create a `ResourceMapping` CR, uncomment the `resourceMapping` section. <2> Specify the target VM name. <3> Specify the source VM ID, for example, `80554327-0569-496b-bdeb-fcbbf52b827b`. You can obtain the VM ID by entering `\https:///ovirt-engine/api/vms/` in a web browser on the Manager machine to list all VMs. Locate the VM you want to import and its corresponding VM ID. You do not need to specify a VM name or cluster name. <4> If you specify the source VM name, you must also specify the source cluster. Do not specify the source VM ID. <5> If you specify the source cluster, you must also specify the source VM name. Do not specify the source VM ID. -<6> If you create a ResourceMapping CR, comment out the `mappings` section. +<6> If you create a `ResourceMapping` CR, comment out the `mappings` section. <7> Specify the logical network and vNIC profile of the source VM. <8> Specify the {VirtProductName} network. -<9> If `storageMappings` are specified in both the ResourceMapping and the VirtualMachineImport CRs, the VirtualMachineImport CR takes precedence. +<9> If `storageMappings` are specified in both the `ResourceMapping` and the `VirtualMachineImport` CRs, the `VirtualMachineImport` CR takes precedence. <10> Specify the source storage domain. <11> Specify the target storage class. <12> Specify the source VM disk ID, for example, `8181ecc1-5db8-4193-9c92-3ddab3be7b05`. You can obtain the disk ID by entering `\https:///ovirt-engine/api/vms/` in a web browser on the Manager machine and reviewing the VM details. diff --git a/modules/virt-importing-vm-datavolume.adoc b/modules/virt-importing-vm-datavolume.adoc index a496e8cb81c8..0d00ef95d42d 100644 --- a/modules/virt-importing-vm-datavolume.adoc +++ b/modules/virt-importing-vm-datavolume.adoc @@ -3,9 +3,9 @@ // * virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc [id="virt-importing-vm-datavolume_{context}"] -= Importing a virtual machine image into a PersistentVolumeClaim by using a DataVolume += Importing a virtual machine image into a persistent volume claim by using a data volume -You can import a virtual machine image into a PersistentVolumeClaim (PVC) by using a DataVolume. +You can import a virtual machine image into a persistent volume claim (PVC) by using a data volume. The virtual machine image can be hosted at an HTTP or HTTPS endpoint, or the image can be built into a container disk and stored in a container registry. @@ -14,7 +14,7 @@ To create a virtual machine from an imported virtual machine image, specify the .Prerequisites * You have installed the OpenShift CLI (`oc`). -* Your cluster has at least one available PersistentVolume. +* Your cluster has at least one available persistent volume. * To import a virtual machine image you must have the following: ** A virtual machine disk image in RAW, ISO, or QCOW2 format, optionally compressed by using `xz` or `gz`. @@ -108,7 +108,7 @@ status: {} <1> The source type to import the image from. This example uses a HTTP endpoint. To import a container disk from a registry, replace `http` with `registry`. <2> The source of the virtual machine image you want to import. This example references a virtual machine image at an HTTP endpoint. An example of a container registry endpoint is `url: "docker://kubevirt/fedora-cloud-container-disk-demo:latest"`. <3> The `secretRef` parameter is optional. -<4> The `certConfigMap` is required for communicating with servers that use self-signed certificates or certificates not signed by the system CA bundle. The referenced ConfigMap must be in the same namespace as the DataVolume. +<4> The `certConfigMap` is required for communicating with servers that use self-signed certificates or certificates not signed by the system CA bundle. The referenced config map must be in the same namespace as the data volume. <5> Specify `type: dataVolume` or `type: ""`. If you specify any other value for `type`, such as `persistentVolumeClaim`, a warning is displayed, and the virtual machine does not start. . Create the virtual machine: @@ -120,35 +120,29 @@ $ oc create -f vm--datavolume.yaml + [NOTE] ==== -The `oc create` command creates the DataVolume and the virtual machine. -The CDI controller creates an underlying PVC with the correct annotation, and -the import process begins. When the import completes, the DataVolume status -changes to `Succeeded`, and the virtual machine is allowed to start. +The `oc create` command creates the data volume and the virtual machine. The CDI controller creates an underlying PVC with the correct annotation, and the import process begins. When the import completes, the data volume status changes to `Succeeded`, and the virtual machine is allowed to start. -DataVolume provisioning happens in the background, so there is no need to -monitor it. You can start the virtual machine, and it will not run until the -import is complete. +Data volume provisioning happens in the background, so there is no need to monitor it. You can start the virtual machine, and it will not run until the import is complete. ==== .Verification steps -. The importer Pod downloads the virtual machine image or container disk from the specified URL and stores it on the provisioned PV. View the status of the importer Pod by running the following command: +. The importer pod downloads the virtual machine image or container disk from the specified URL and stores it on the provisioned PV. View the status of the importer pod by running the following command: + [source,terminal] ---- $ oc get pods ---- -. Monitor the DataVolume status until it shows `Succeeded` by running the following command: +. Monitor the data volume status until it shows `Succeeded` by running the following command: + [source,terminal] ---- $ oc describe dv <1> ---- -<1> The name of the DataVolume as specified under `dataVolumeTemplates.metadata.name` in the virtual machine +<1> The name of the data volume as specified under `dataVolumeTemplates.metadata.name` in the virtual machine configuration file. In the example configuration above, this is `fedora-dv`. -. To verify that provisioning is complete and that the VMI has started, try -accessing its serial console by running the following command: +. To verify that provisioning is complete and that the VMI has started, try accessing its serial console by running the following command: + [source,terminal] ---- diff --git a/modules/virt-importing-vm-to-block-pv.adoc b/modules/virt-importing-vm-to-block-pv.adoc index 3b4b3ae0336e..acb3b41bb704 100644 --- a/modules/virt-importing-vm-to-block-pv.adoc +++ b/modules/virt-importing-vm-to-block-pv.adoc @@ -3,25 +3,19 @@ // * virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc [id="virt-importing-vm-to-block-pv_{context}"] -= Importing a virtual machine image to a block PersistentVolume using DataVolumes += Importing a virtual machine image to a block persistent volume using data volumes -You can import an existing virtual machine image into your {product-title} -cluster. {VirtProductName} uses DataVolumes to automate the importing data and the -creation of an underlying PersistentVolumeClaim (PVC). -You can then reference the DataVolume in a virtual machine configuration. +You can import an existing virtual machine image into your {product-title} cluster. {VirtProductName} uses data volumes to automate the importing data and the creation of an underlying persistent volume claim (PVC). You can then reference the data volume in a virtual machine manifest. .Prerequisites -* A virtual machine disk image, in RAW, ISO, or QCOW2 format, optionally -compressed by using `xz` or `gz`. -* An `HTTP` or `s3` endpoint where the image is hosted, along with any -authentication credentials needed to access the data source +* A virtual machine disk image, in RAW, ISO, or QCOW2 format, optionally compressed by using `xz` or `gz`. +* An `HTTP` or `s3` endpoint where the image is hosted, along with any authentication credentials needed to access the data source * At least one available block PV. .Procedure -. If your data source requires authentication credentials, edit the -`endpoint-secret.yaml` file, and apply the updated configuration to the cluster. +. If your data source requires authentication credentials, edit the `endpoint-secret.yaml` file, and apply the updated configuration to the cluster. .. Edit the `endpoint-secret.yaml` file with your preferred text editor: + @@ -48,8 +42,7 @@ data: $ oc apply -f endpoint-secret.yaml ---- -. Create a `DataVolume` configuration that specifies the data source for the image -you want to import and `volumeMode: Block` so that an available block PV is used. +. Create a `DataVolume` manifest that specifies the data source for the image you want to import and `volumeMode: Block` so that an available block PV is used. + [source,yaml] ---- @@ -71,18 +64,17 @@ spec: requests: storage: <2Gi> ---- -<1> The name of the DataVolume. +<1> The name of the data volume. <2> Optional: Set the storage class or omit it to accept the cluster default. <3> The `HTTP` source of the image to import. <4> Only required if the data source requires authentication. <5> Required for importing to a block PV. -. Create the DataVolume to import the virtual machine image by running the following command: +. Create the data volume to import the virtual machine image by running the following command: + [source,terminal] ---- $ oc create -f <1> ---- -<1> The file name of the DataVolume that you created in the previous step. - +<1> The file name of the data volume that you created in the previous step. diff --git a/modules/virt-importing-vm-wizard.adoc b/modules/virt-importing-vm-wizard.adoc index 2d04db4864bc..76755ddfeff7 100644 --- a/modules/virt-importing-vm-wizard.adoc +++ b/modules/virt-importing-vm-wizard.adoc @@ -15,7 +15,7 @@ You can also import a VM template. If you import a VM template, {VirtProductName * You must have admin user privileges. * The VMware Virtual Disk Development Kit (VDDK) image must be in an image registry that is accessible to your {VirtProductName} environment. -* The VDDK image must be added to the `v2v-vmware` ConfigMap. +* The VDDK image must be added to the `v2v-vmware` config map. * The VM must be powered off. * Virtual disks must be connected to IDE or SCSI controllers. If virtual disks are connected to a SATA controller, you can change them to IDE controllers and then migrate the VM. * The {VirtProductName} local and shared persistent storage classes must support VM import. diff --git a/modules/virt-initiating-vm-migration-cli.adoc b/modules/virt-initiating-vm-migration-cli.adoc index 6f591cfa52e4..d329ec6c2a0b 100644 --- a/modules/virt-initiating-vm-migration-cli.adoc +++ b/modules/virt-initiating-vm-migration-cli.adoc @@ -5,14 +5,11 @@ [id="virt-initiating-vm-migration-cli_{context}"] = Initiating live migration of a virtual machine instance in the CLI -Initiate a live migration of a running virtual machine instance by creating a -`VirtualMachineInstanceMigration` object in the cluster and referencing the name - of the virtual machine instance. +Initiate a live migration of a running virtual machine instance by creating a `VirtualMachineInstanceMigration` object in the cluster and referencing the name of the virtual machine instance. .Procedure -. Create a `VirtualMachineInstanceMigration` configuration file for the -virtual machine instance to migrate. For example, `vmi-migrate.yaml`: +. Create a `VirtualMachineInstanceMigration` configuration file for the virtual machine instance to migrate. For example, `vmi-migrate.yaml`: + [source,yaml] ---- @@ -31,6 +28,5 @@ spec: $ oc create -f vmi-migrate.yaml ---- -The `VirtualMachineInstanceMigration` object triggers a live migration of the -virtual machine instance. This object exists in the cluster for as long as the -virtual machine instance is running, unless manually deleted. +The `VirtualMachineInstanceMigration` object triggers a live migration of the virtual machine instance. +This object exists in the cluster for as long as the virtual machine instance is running, unless manually deleted. diff --git a/modules/virt-listing-dvs.adoc b/modules/virt-listing-dvs.adoc index 359bf088b79d..44e59778156d 100644 --- a/modules/virt-listing-dvs.adoc +++ b/modules/virt-listing-dvs.adoc @@ -4,13 +4,13 @@ [id="virt-listing-dvs_{context}"] -= Listing all DataVolumes += Listing all data volumes -You can list the DataVolumes in your cluster by using the `oc` command-line interface. +You can list the data volumes in your cluster by using the `oc` command-line interface. .Procedure -* List all DataVolumes by running the following command: +* List all data volumes by running the following command: + [source,terminal] ---- diff --git a/modules/virt-monitoring-upgrade-status.adoc b/modules/virt-monitoring-upgrade-status.adoc index 87a393618814..9d728ce2e730 100644 --- a/modules/virt-monitoring-upgrade-status.adoc +++ b/modules/virt-monitoring-upgrade-status.adoc @@ -6,7 +6,7 @@ = Monitoring upgrade status The best way to monitor {VirtProductName} upgrade status is to watch the -ClusterServiceVersion (CSV) `PHASE`. You can also monitor the CSV conditions +cluster service version (CSV) `PHASE`. You can also monitor the CSV conditions in the web console or by running the command provided here. [NOTE] diff --git a/modules/virt-monitoring-vm-migration-cli.adoc b/modules/virt-monitoring-vm-migration-cli.adoc index a7ca14127503..d3f567f73c8b 100644 --- a/modules/virt-monitoring-vm-migration-cli.adoc +++ b/modules/virt-monitoring-vm-migration-cli.adoc @@ -5,8 +5,7 @@ [id="virt-monitoring-vm-migration-cli_{context}"] = Monitoring live migration of a virtual machine instance in the CLI -The status of the virtual machine migration is stored in the `Status` component -of the `VirtualMachineInstance` configuration. +The status of the virtual machine migration is stored in the `Status` component of the `VirtualMachineInstance` configuration. .Procedure diff --git a/modules/virt-networking-glossary.adoc b/modules/virt-networking-glossary.adoc index 1097f85125ad..9bdc3d9f9c7b 100644 --- a/modules/virt-networking-glossary.adoc +++ b/modules/virt-networking-glossary.adoc @@ -6,25 +6,20 @@ [id="virt-networking-glossary_{context}"] = {VirtProductName} networking glossary -{VirtProductName} provides advanced networking functionality by using custom -resources and plug-ins. +{VirtProductName} provides advanced networking functionality by using custom resources and plug-ins. The following terms are used throughout {VirtProductName} documentation: Container Network Interface (CNI):: a link:https://www.cncf.io/[Cloud Native Computing Foundation] -project, focused on container network connectivity. {VirtProductName} uses CNI -plug-ins to build upon the basic Kubernetes networking functionality. +project, focused on container network connectivity. +{VirtProductName} uses CNI plug-ins to build upon the basic Kubernetes networking functionality. -Multus:: a "meta" CNI plug-in that allows multiple CNIs to exist so that a Pod or -virtual machine can use the interfaces it needs. +Multus:: a "meta" CNI plug-in that allows multiple CNIs to exist so that a pod or virtual machine can use the interfaces it needs. -Custom Resource Definition (CRD):: a link:https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/[Kubernetes] -API resource that allows you to define custom resources, or an object defined by -using the CRD API resource. +Custom resource definition (CRD):: a link:https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/[Kubernetes] +API resource that allows you to define custom resources, or an object defined by using the CRD API resource. -NetworkAttachmentDefinition:: a CRD introduced by the Multus project that -allows you to attach pods, virtual machines, and virtual machine instances to one or more networks. +Network attachment definition:: a CRD introduced by the Multus project that allows you to attach pods, virtual machines, and virtual machine instances to one or more networks. -Preboot eXecution Environment (PXE):: an interface that enables an administrator -to boot a client machine from a server over the network. Network booting allows -you to remotely load operating systems and other software onto the client. +Preboot eXecution Environment (PXE):: an interface that enables an administrator to boot a client machine from a server over the network. +Network booting allows you to remotely load operating systems and other software onto the client. diff --git a/modules/virt-networking-wizard-fields-web.adoc b/modules/virt-networking-wizard-fields-web.adoc index eed5c5dbde1b..6d676c13ce53 100644 --- a/modules/virt-networking-wizard-fields-web.adoc +++ b/modules/virt-networking-wizard-fields-web.adoc @@ -13,20 +13,20 @@ |Name | Description |Name -|Name for the Network Interface Card. +|Name for the network interface card. |Model -|Indicates the model of the Network Interface Card. Supported values are *e1000e* and *virtio*. +|Indicates the model of the network interface card. Supported values are *e1000e* and *virtio*. |Network -|List of available NetworkAttachmentDefinition objects. +|List of available network attachment definitions. |Type -|List of available binding methods. For the default Pod network, `masquerade` +|List of available binding methods. For the default pod network, `masquerade` is the only recommended binding method. For secondary networks, use the `bridge` binding method. The `masquerade` method is not supported for non-default networks. |MAC Address -|MAC address for the Network Interface Card. If a MAC address is not specified, an ephemeral address is generated for the session. +|MAC address for the network interface card. If a MAC address is not specified, an ephemeral address is generated for the session. |=== diff --git a/modules/virt-openshift-client-commands.adoc b/modules/virt-openshift-client-commands.adoc index ffc58f921be9..c5f5215d9129 100644 --- a/modules/virt-openshift-client-commands.adoc +++ b/modules/virt-openshift-client-commands.adoc @@ -6,8 +6,7 @@ = {product-title} client commands The {product-title} `oc` client is a command-line utility for managing -{product-title} resources, including the virtual machine (`vm`) and virtual -machine instance (`vmi`) object types. + +{product-title} resources, including the `VirtualMachine` (`vm`) and `VirtualMachineInstance` (`vmi`) object types. + [NOTE] ==== You can use the `-n ` flag to specify a different project. @@ -29,7 +28,7 @@ You can use the `-n ` flag to specify a different project. |Display details of the specific resource in the current project. |`oc create -f ` -|Create a resource in the current project from a filename or from stdin. +|Create a resource in the current project from a file name or from stdin. |`oc edit ` |Edit a resource in the current project. diff --git a/modules/virt-preparing-container-disk-for-vms.adoc b/modules/virt-preparing-container-disk-for-vms.adoc index b1073cd091b3..e060dedc699a 100644 --- a/modules/virt-preparing-container-disk-for-vms.adoc +++ b/modules/virt-preparing-container-disk-for-vms.adoc @@ -5,7 +5,7 @@ [id="virt-preparing-container-disk-for-vms_{context}"] = Preparing a container disk for virtual machines -You must build a container disk with a virtual machine image and push it to a container registry before it can used with a virtual machine. You can then either import the container disk into a PVC using a DataVolume and attach it to a virtual machine, or you can attach the container disk directly to a virtual machine as an ephemeral `containerDisk` volume. +You must build a container disk with a virtual machine image and push it to a container registry before it can used with a virtual machine. You can then either import the container disk into a PVC using a data volume and attach it to a virtual machine, or you can attach the container disk directly to a virtual machine as an ephemeral `containerDisk` volume. .Prerequisites diff --git a/modules/virt-pxe-booting-with-mac-address.adoc b/modules/virt-pxe-booting-with-mac-address.adoc index d6e38c562293..014541af3b92 100644 --- a/modules/virt-pxe-booting-with-mac-address.adoc +++ b/modules/virt-pxe-booting-with-mac-address.adoc @@ -5,12 +5,9 @@ [id="virt-pxe-booting-with-mac-address_{context}"] = PXE booting with a specified MAC address -As an administrator, you can boot a client over the network by first creating a -NetworkAttachmentDefinition object for your PXE network. Then, reference -the NetworkAttachmentDefinition in your virtual machine instance configuration -file before you start the virtual machine instance. You can also specify a MAC -address in the virtual machine instance configuration file, if required by the -PXE server. +As an administrator, you can boot a client over the network by first creating a `NetworkAttachmentDefinition` object for your PXE network. +Then, reference the network attachment definition in your virtual machine instance configuration file before you start the virtual machine instance. +You can also specify a MAC address in the virtual machine instance configuration file, if required by the PXE server. .Prerequisites @@ -21,7 +18,7 @@ PXE server. . Configure a PXE network on the cluster: -.. Create the NetworkAttachmentDefinition file for PXE network `pxe-net-conf`: +.. Create the network attachment definition file for PXE network `pxe-net-conf`: + [source,yaml] ---- @@ -48,25 +45,21 @@ spec: + [NOTE] ==== -The virtual machine instance will be attached to the bridge `br1` through an -access port with the requested VLAN. +The virtual machine instance will be attached to the bridge `br1` through an access port with the requested VLAN. ==== -. Create the NetworkAttachmentDefinition object by using the file you created -in the previous step: +. Create the network attachment definition by using the file you created in the previous step: + [source,terminal] ---- $ oc create -f pxe-net-conf.yaml ---- -. Edit the virtual machine instance configuration file to include the -details of the interface and network. +. Edit the virtual machine instance configuration file to include the details of the interface and network. .. Specify the network and MAC address, if required by the PXE server. -If the MAC address is not specified, a value is assigned -automatically. However, note that at this time, MAC addresses -assigned automatically are not persistent. +If the MAC address is not specified, a value is assigned automatically. +However, note that at this time, MAC addresses assigned automatically are not persistent. + Ensure that `bootOrder` is set to `1` so that the interface boots first. In this example, the interface is connected to a network called @@ -88,8 +81,7 @@ interfaces: Boot order is global for interfaces and disks. ==== -.. Assign a boot device number to the disk to ensure proper booting -after operating system provisioning. +.. Assign a boot device number to the disk to ensure proper booting after operating system provisioning. + Set the disk `bootOrder` value to `2`: + @@ -103,9 +95,7 @@ devices: bootOrder: 2 ---- -.. Specify that the network is connected to the previously created -NetworkAttachmentDefinition. In this scenario, `` is connected -to the NetworkAttachmentDefinition called ``: +.. Specify that the network is connected to the previously created network attachment definition. In this scenario, `` is connected to the network attachment definition called ``: + [source,yaml] ---- @@ -154,10 +144,8 @@ $ virtctl vnc vmi-pxe-boot $ virtctl console vmi-pxe-boot ---- -. Verify the interfaces and MAC address on the virtual machine and that the interface -connected to the bridge has the specified MAC address. In this -case, we used `eth1` for the PXE boot, without an IP address. The other -interface, `eth0`, got an IP address from {product-title}. +. Verify the interfaces and MAC address on the virtual machine and that the interface connected to the bridge has the specified MAC address. +In this case, we used `eth1` for the PXE boot, without an IP address. The other interface, `eth0`, got an IP address from {product-title}. + [source,terminal] ---- diff --git a/modules/virt-removing-interface-from-nodes.adoc b/modules/virt-removing-interface-from-nodes.adoc index 0704f3f65f81..6fad4b89b6d7 100644 --- a/modules/virt-removing-interface-from-nodes.adoc +++ b/modules/virt-removing-interface-from-nodes.adoc @@ -10,9 +10,9 @@ the `state` of the interface to `absent`. [NOTE] ==== -Deleting the Policy that added an interface does not change the configuration of the network policy on the node. +Deleting the node network policy that added an interface does not change the configuration of the policy on the node. Although a `NodeNetworkConfigurationPolicy` is an object in the cluster, it only represents the requested configuration. + -Similarly, removing an interface does not delete the Policy. +Similarly, removing an interface does not delete the policy. ==== .Procedure @@ -34,15 +34,15 @@ spec: type: linux-bridge state: absent <4> ---- -<1> Name of the Policy. -<2> Optional: If you do not include the `nodeSelector`, the Policy applies to all nodes in the cluster. +<1> Name of the policy. +<2> Optional: If you do not include the `nodeSelector` parameter, the policy applies to all nodes in the cluster. <3> This example uses the `node-role.kubernetes.io/worker: ""` node selector to select all worker nodes in the cluster. <4> Changing the state to `absent` removes the interface. -. Update the Policy on the node and remove the interface: +. Update the policy on the node and remove the interface: + [source,terminal] ---- $ oc apply -f <1> ---- -<1> File name of the Policy manifest. +<1> File name of the policy manifest. diff --git a/modules/virt-removing-secret-configmap-service-account-vm.adoc b/modules/virt-removing-secret-configmap-service-account-vm.adoc index b73c9cffa382..7a9cba3b5229 100644 --- a/modules/virt-removing-secret-configmap-service-account-vm.adoc +++ b/modules/virt-removing-secret-configmap-service-account-vm.adoc @@ -4,13 +4,13 @@ [id="virt-removing-secret-configmap-service-account-vm_{context}"] -= Removing a secret, ConfigMap, or service account from a virtual machine += Removing a secret, config map, or service account from a virtual machine -Remove a secret, ConfigMap, or service account from a virtual machine by using the {product-title} web console. +Remove a secret, config map, or service account from a virtual machine by using the {product-title} web console. .Prerequisites -* You must have at least one secret, ConfigMap, or service account +* You must have at least one secret, config map, or service account that is attached to a virtual machine. .Procedure @@ -37,4 +37,4 @@ You can reset the form to the last saved state by clicking *Reload*. . From the *Virtual Machine Overview* page, click the *Disks* tab. -. Check to ensure that the secret, ConfigMap, or service account that you removed is no longer included in the list of disks. +. Check to ensure that the secret, config map, or service account that you removed is no longer included in the list of disks. diff --git a/modules/virt-restoring-node-network-configuration.adoc b/modules/virt-restoring-node-network-configuration.adoc index fcd48bb471d4..169b4d8f622a 100644 --- a/modules/virt-restoring-node-network-configuration.adoc +++ b/modules/virt-restoring-node-network-configuration.adoc @@ -34,4 +34,4 @@ spec: ---- $ oc apply -f <1> ---- -<1> File name of the Policy manifest. +<1> File name of the policy manifest. diff --git a/modules/virt-setting-node-maintenance-cli.adoc b/modules/virt-setting-node-maintenance-cli.adoc index 7e4c39fd3d19..4e3358a36a27 100644 --- a/modules/virt-setting-node-maintenance-cli.adoc +++ b/modules/virt-setting-node-maintenance-cli.adoc @@ -5,7 +5,7 @@ [id="virt-setting-node-maintenance-cli_{context}"] = Setting a node to maintenance mode in the CLI -Set a node to maintenance mode by creating a `NodeMaintenance` Custom Resource +Set a node to maintenance mode by creating a `NodeMaintenance` custom resource (CR) object that references the node name and the reason for setting it to maintenance mode. diff --git a/modules/virt-storage-wizard-fields-web.adoc b/modules/virt-storage-wizard-fields-web.adoc index b3fd10301003..a0e18d161401 100644 --- a/modules/virt-storage-wizard-fields-web.adoc +++ b/modules/virt-storage-wizard-fields-web.adoc @@ -11,7 +11,7 @@ |Name | Description |Source -|Select a blank disk for the virtual machine or choose from the options available: *URL*, *Container*, *Attach Cloned Disk*, or *Attach Disk*. To select an existing disk and attach it to the virtual machine, choose *Attach Cloned Disk* or *Attach Disk* from a list of available PersistentVolumeClaims (PVCs). +|Select a blank disk for the virtual machine or choose from the options available: *URL*, *Container*, *Attach Cloned Disk*, or *Attach Disk*. To select an existing disk and attach it to the virtual machine, choose *Attach Cloned Disk* or *Attach Disk* from a list of available persistent volume claims (PVCs). |Name |Name of the disk. The name can contain lowercase letters (`a-z`), numbers (`0-9`), hyphens (`-`), and periods (`.`), up to a maximum of 253 characters. The first and last characters must be alphanumeric. The name must not contain uppercase letters, spaces, or special characters. @@ -23,10 +23,10 @@ |Type of disk device. Supported interfaces are *virtIO*, *SATA*, and *SCSI*. |Storage Class -|The `StorageClass` that is used to create the disk. +|The storage class that is used to create the disk. |Advanced -> Volume Mode -|Defines whether the persistent volume uses a formatted filesystem or raw block state. Default is *Filesystem*. +|Defines whether the persistent volume uses a formatted file system or raw block state. Default is *Filesystem*. |Advanced -> Access Mode |Access mode of the persistent volume. Supported access modes are *ReadWriteOnce*, *ReadOnlyMany*, and *ReadWriteMany*. @@ -37,7 +37,7 @@ [discrete] == Advanced storage settings The following advanced storage settings are available for *Blank*, *URL*, and *Attach Cloned Disk* disks. -These parameters are optional. If you do not specify these parameters, the system uses the default values from the `kubevirt-storage-class-defaults` ConfigMap. +These parameters are optional. If you do not specify these parameters, the system uses the default values from the `kubevirt-storage-class-defaults` config map. [cols="2a,3a,5a"] |=== @@ -45,7 +45,7 @@ These parameters are optional. If you do not specify these parameters, the syste .2+|Volume Mode |Filesystem -|Stores the virtual disk on a filesystem-based volume. +|Stores the virtual disk on a file system-based volume. |Block |Stores the virtual disk directly on the block volume. Only use `Block` if the underlying storage supports it. diff --git a/modules/virt-template-blank-disk-datavolume.adoc b/modules/virt-template-blank-disk-datavolume.adoc index ef201b3b3cb1..76d643f328f1 100644 --- a/modules/virt-template-blank-disk-datavolume.adoc +++ b/modules/virt-template-blank-disk-datavolume.adoc @@ -3,7 +3,7 @@ // * virt/virtual_machines/virtual_disks/virt-expanding-virtual-storage-with-blank-disk-images.adoc [id="virt-template-blank-disk-datavolume_{context}"] -= Template: DataVolume configuration file for blank disk images += Template: Data volume configuration file for blank disk images *blank-image-datavolume.yaml* [source,yaml] @@ -23,4 +23,4 @@ spec: resources: requests: storage: 500Mi ----- \ No newline at end of file +---- diff --git a/modules/virt-template-datavolume-clone.adoc b/modules/virt-template-datavolume-clone.adoc index 76a9c8ed4131..0eddcbf661ca 100644 --- a/modules/virt-template-datavolume-clone.adoc +++ b/modules/virt-template-datavolume-clone.adoc @@ -3,7 +3,7 @@ // * virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume.adoc [id="virt-template-datavolume-clone_{context}"] -= Template: DataVolume clone configuration file += Template: Data volume clone configuration file *example-clone-dv.yaml* [source,yaml] @@ -23,4 +23,4 @@ spec: resources: requests: storage: "1G" ----- \ No newline at end of file +---- diff --git a/modules/virt-template-datavolume-import.adoc b/modules/virt-template-datavolume-import.adoc index 20dda8b71168..85e32e5ae065 100644 --- a/modules/virt-template-datavolume-import.adoc +++ b/modules/virt-template-datavolume-import.adoc @@ -3,7 +3,7 @@ // * virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc [id="virt-template-datavolume-import_{context}"] -= Template: DataVolume import configuration file += Template: Data volume import configuration file *example-import-dv.yaml* [source,yaml] @@ -25,4 +25,4 @@ spec: storage: "1G" ---- <1> The `HTTP` source of the image you want to import. -<2> The `secretRef` parameter is optional. \ No newline at end of file +<2> The `secretRef` parameter is optional. diff --git a/modules/virt-template-datavolume-vm.adoc b/modules/virt-template-datavolume-vm.adoc index 8ac427143c36..02e2f5233411 100644 --- a/modules/virt-template-datavolume-vm.adoc +++ b/modules/virt-template-datavolume-vm.adoc @@ -4,7 +4,7 @@ // * virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc [id="virt-template-datavolume-vm_{context}"] -= Template: DataVolume virtual machine configuration file += Template: Data volume virtual machine configuration file *example-dv-vm.yaml* [source,yaml] @@ -54,4 +54,4 @@ spec: name: example-dv name: example-dv-disk ---- -<1> The `HTTP` source of the image you want to import, if applicable. \ No newline at end of file +<1> The `HTTP` source of the image you want to import, if applicable. diff --git a/modules/virt-template-vm-config.adoc b/modules/virt-template-vm-config.adoc index 655f8bf984b0..cdf45fd3032b 100644 --- a/modules/virt-template-vm-config.adoc +++ b/modules/virt-template-vm-config.adoc @@ -3,7 +3,7 @@ // * virt/virtual_machines/vm_networking/virt-using-the-default-pod-network-with-virt.adoc [id="virt-template-vm-config_{context}"] -= Template: virtual machine configuration file += Template: Virtual machine configuration file [source,yaml] ---- diff --git a/modules/virt-template-vmi-pxe-config.adoc b/modules/virt-template-vmi-pxe-config.adoc index 4da3babbab02..adff4effd75a 100644 --- a/modules/virt-template-vmi-pxe-config.adoc +++ b/modules/virt-template-vmi-pxe-config.adoc @@ -3,7 +3,7 @@ // * virt/virtual_machines/advanced_vm_management/virt-configuring-pxe-booting.adoc [id="virt-pxe-vmi-template_{context}"] -= Template: virtual machine instance configuration file for PXE booting += Template: Virtual machine instance configuration file for PXE booting [source,yaml] ---- diff --git a/modules/virt-troubleshooting-incorrect-policy-config.adoc b/modules/virt-troubleshooting-incorrect-policy-config.adoc index ced196116a5a..0d4d73b5a607 100644 --- a/modules/virt-troubleshooting-incorrect-policy-config.adoc +++ b/modules/virt-troubleshooting-incorrect-policy-config.adoc @@ -3,19 +3,18 @@ // * virt/node_network/virt-troubleshooting-node-network.adoc [id="virt-troubleshooting-incorrect-policy-config_{context}"] -= Troubleshooting an incorrect NodeNetworkConfigurationPolicy configuration += Troubleshooting an incorrect node network configuration policy configuration -You can apply changes to the node network configuration across your entire cluster by applying a -NodeNetworkConfigurationPolicy. If you apply an incorrect configuration, you can use the -following example to troubleshoot and correct the failed network Policy. +You can apply changes to the node network configuration across your entire cluster by applying a node network configuration policy. +If you apply an incorrect configuration, you can use the following example to troubleshoot and correct the failed node network policy. -In this example, a Linux bridge Policy is applied to an example cluster that has 3 master nodes and 3 worker nodes. -The Policy fails to be applied because it references an incorrect interface. To find the error, investigate -the available nmstate resources. You can then update the Policy with the correct configuration. +In this example, a Linux bridge policy is applied to an example cluster that has 3 master nodes and 3 worker nodes. +The policy fails to be applied because it references an incorrect interface. +To find the error, investigate the available nmstate resources. You can then update the policy with the correct configuration. .Procedure -. Create a Policy and apply it to your cluster. The following example creates a simple bridge on the `ens01` interface: +. Create a policy and apply it to your cluster. The following example creates a simple bridge on the `ens01` interface: + [source,yaml] ---- @@ -52,14 +51,14 @@ $ oc apply -f ens01-bridge-testfail.yaml nodenetworkconfigurationpolicy.nmstate.io/ens01-bridge-testfail created ---- -. Verify the status of the Policy by running the following command: +. Verify the status of the policy by running the following command: + [source,terminal] ---- $ oc get nncp ---- + -The output shows that the Policy failed: +The output shows that the policy failed: + .Example output [source,terminal] @@ -68,16 +67,16 @@ NAME STATUS ens01-bridge-testfail FailedToConfigure ---- + -However the Policy status alone does not indicate if it failed on all nodes or a subset of nodes. +However, the policy status alone does not indicate if it failed on all nodes or a subset of nodes. -. List the Enactments to see if the Policy was successful on any of the nodes. If the Policy failed for only a subset it suggests the problem is with specific node configuration; if the Policy failed on all nodes it suggest the problem is with the Policy. +. List the node network configuration enactments to see if the policy was successful on any of the nodes. If the policy failed for only a subset of nodes, it suggests that the problem is with a specific node configuration. If the policy failed on all nodes, it suggests that the problem is with the policy. + [source,terminal] ---- $ oc get nnce ---- + -The output shows that the Policy failed on all nodes: +The output shows that the policy failed on all nodes: + .Example output [source,terminal] @@ -91,7 +90,7 @@ worker-2.ens01-bridge-testfail FailedToConfigure worker-3.ens01-bridge-testfail FailedToConfigure ---- -. View one of the failed Enactments and look at the traceback. The following command uses the output tool `jsonpath` to filter the output: +. View one of the failed enactments and look at the traceback. The following command uses the output tool `jsonpath` to filter the output: + [source,terminal] ---- @@ -186,15 +185,15 @@ difference line 651, in _assert_interfaces_equal\n current_state.interfaces[ifname],\nlibnmstate.error.NmstateVerificationError: ---- + -The `NmstateVerificationError` lists the `desired` Policy configuration, the `current` configuration of the Policy on the node, and the `difference` highlighting the parameters that do not match. In this example, the `port` is included in the `difference`, which suggests that the problem is the port configuration in the Policy. +The `NmstateVerificationError` lists the `desired` policy configuration, the `current` configuration of the policy on the node, and the `difference` highlighting the parameters that do not match. In this example, the `port` is included in the `difference`, which suggests that the problem is the port configuration in the policy. -. To ensure that the Policy is configured properly, view the network configuration for one or all of the nodes by requesting the `NodeNetworkState`. The following command returns the network configuration for the `master-1` node: +. To ensure that the policy is configured properly, view the network configuration for one or all of the nodes by requesting the `NodeNetworkState` object. The following command returns the network configuration for the `master-1` node: + ---- $ oc get nns master-1 -o yaml ---- + -The output shows that the interface name on the nodes is `ens1` but the failed Policy incorrectly uses `ens01`: +The output shows that the interface name on the nodes is `ens1` but the failed policy incorrectly uses `ens01`: + .Example output [source,yaml] @@ -206,7 +205,7 @@ The output shows that the interface name on the nodes is `ens1` but the failed P type: ethernet ---- -. Correct the error by editing the existing Policy: +. Correct the error by editing the existing policy: + [source,terminal] ---- @@ -220,9 +219,9 @@ $ oc edit nncp ens01-bridge-testfail - name: ens1 ---- + -Save the Policy to apply the correction. +Save the policy to apply the correction. -. Check the status of the Policy to ensure it updated successfully: +. Check the status of the policy to ensure it updated successfully: + [source,terminal] ---- @@ -236,4 +235,4 @@ NAME STATUS ens01-bridge-testfail SuccessfullyConfigured ---- -The updated Policy is successfully configured on all nodes in the cluster. +The updated policy is successfully configured on all nodes in the cluster. diff --git a/modules/virt-troubleshooting-vm-import.adoc b/modules/virt-troubleshooting-vm-import.adoc index 48cfbad49984..c4c46659a279 100644 --- a/modules/virt-troubleshooting-vm-import.adoc +++ b/modules/virt-troubleshooting-vm-import.adoc @@ -8,11 +8,11 @@ == Logs ifdef::virt-importing-vmware-vm[] -You can check the V2V Conversion Pod log for errors. +You can check the V2V Conversion pod log for errors. .Procedure -. View the V2V Conversion Pod name by running the following command: +. View the V2V Conversion pod name by running the following command: + [source,terminal] ---- @@ -26,20 +26,20 @@ $ oc get pods -n | grep v2v <1> kubevirt-v2v-conversion-f66f7d-zqkz7 1/1 Running 0 4h49m ---- -. View the V2V Conversion Pod log by running the following command: +. View the V2V Conversion pod log by running the following command: + [source,terminal] ---- $ oc logs -f -n <1> ---- -<1> Specify the VM Conversion Pod name and the namespace. +<1> Specify the VM Conversion pod name and the namespace. endif::[] ifdef::virt-importing-rhv-vm[] -You can check the VM Import Controller Pod log for errors. +You can check the VM Import Controller pod log for errors. .Procedure -. View the VM Import Controller Pod name by running the following command: +. View the VM Import Controller pod name by running the following command: + [source,terminal] ---- @@ -53,13 +53,13 @@ $ oc get pods -n | grep import <1> vm-import-controller-f66f7d-zqkz7 1/1 Running 0 4h49m ---- -. View the VM Import Controller Pod log by running the following command: +. View the VM Import Controller pod log by running the following command: + [source,terminal] ---- $ oc logs -f -n <1> ---- -<1> Specify the VM Import Controller Pod name and the namespace. +<1> Specify the VM Import Controller pod name and the namespace. endif::[] [id='error-messages_{context}'] @@ -68,14 +68,14 @@ endif::[] ifdef::virt-importing-rhv-vm[] The following error messages might appear: -* The following error message is displayed in the VM Import Controller Pod log if the target VM name exceeds 63 characters link:https://bugzilla.redhat.com/show_bug.cgi?id=1857165[(*BZ#1857165*)]: +* The following error message is displayed in the VM Import Controller pod log if the target VM name exceeds 63 characters link:https://bugzilla.redhat.com/show_bug.cgi?id=1857165[(*BZ#1857165*)]: + ---- Message: Error while importing disk image Reason: ProcessingFailed ---- -* The following error message is displayed in the VM Import Controller Pod log and the progress bar stops at 10% if the {VirtProductName} storage PV is not suitable: +* The following error message is displayed in the VM Import Controller pod log and the progress bar stops at 10% if the {VirtProductName} storage PV is not suitable: + ---- Failed to bind volumes: provisioning failed for PVC @@ -93,9 +93,9 @@ VMTemplateMatchingFailed: Couldn't find matching template + You can perform the following actions to fix this problem: -** Change the RHV VM operating system to an operating system that exists in the default `vm-import-controller` ConfigMap. -** If you created a custom ConfigMap, check the ConfigMap to verify that the RHV VM operating system is mapped to a matching {VirtProductName} common template. -** If there is no matching {VirtProductName} common template, create an appropriate VM template in the {VirtProductName} console and then create a custom ConfigMap to map the RHV VM operating system to the new template. +** Change the RHV VM operating system to an operating system that exists in the default `vm-import-controller` config map. +** If you created a custom config map, check the config map to verify that the RHV VM operating system is mapped to a matching {VirtProductName} common template. +** If there is no matching {VirtProductName} common template, create an appropriate VM template in the {VirtProductName} console and then create a custom config map to map the RHV VM operating system to the new template. * The migration will hang at the *Starting Red Hat Virtualization (RHV) controller* message in the {VirtProductName} console if a non-admin user tries to import a VM. Only an admin user has permission to import a VM. endif::[] @@ -104,7 +104,7 @@ endif::[] ifdef::virt-importing-vmware-vm[] The following error message might appear: -* If the VMware VM is not shut down before import, the imported virtual machine displays the error message, `Readiness probe failed` in the {product-title} console and the V2V Conversion Pod log displays the following error message: +* If the VMware VM is not shut down before import, the imported virtual machine displays the error message, `Readiness probe failed` in the {product-title} console and the V2V Conversion pod log displays the following error message: + ---- INFO - have error: ('virt-v2v error: internal error: invalid argument: libvirt domain ‘v2v_migration_vm_1’ is running or paused. It must be shut down in order to perform virt-v2v conversion',)" @@ -114,7 +114,7 @@ ifeval::["{VirtVersion}" == "2.4"] * When you select the VMware provider, the following warning message is displayed: + ---- -Warning alert:Could not load ConfigMap vmware-to-kubevirt-os in kube-public namespace +Warning alert:Could not load config map vmware-to-kubevirt-os in kube-public namespace Configmaps "vmware-to-kubevirt-os" not found ---- + @@ -125,7 +125,7 @@ endif::[] + [source,terminal] ---- -Could not load ConfigMap vmware-to-kubevirt-os in kube-public namespace +Could not load config map vmware-to-kubevirt-os in kube-public namespace Restricted Access: configmaps "vmware-to-kubevirt-os" is forbidden: User cannot get resource "configmaps" in API group "" in the namespace "kube-public" ---- + diff --git a/modules/virt-understanding-live-migration.adoc b/modules/virt-understanding-live-migration.adoc index 24ac3e40ca0f..ded9d988cf62 100644 --- a/modules/virt-understanding-live-migration.adoc +++ b/modules/virt-understanding-live-migration.adoc @@ -14,6 +14,6 @@ which it is running is placed into maintenance. [IMPORTANT] ==== -Virtual machines must have a PersistentVolumeClaim (PVC) +Virtual machines must have a persistent volume claim (PVC) with a shared ReadWriteMany (RWX) access mode to be live migrated. ==== diff --git a/modules/virt-understanding-logs.adoc b/modules/virt-understanding-logs.adoc index 80f3a59a4a7c..41c87335149e 100644 --- a/modules/virt-understanding-logs.adoc +++ b/modules/virt-understanding-logs.adoc @@ -5,19 +5,19 @@ [id="virt-understanding-logs_{context}"] = Understanding virtual machine logs -Logs are collected for {product-title} Builds, Deployments, and pods. +Logs are collected for {product-title} builds, deployments, and pods. In {VirtProductName}, virtual machine logs can be retrieved from the -virtual machine launcher Pod in either the web console or the CLI. +virtual machine launcher pod in either the web console or the CLI. The `-f` option follows the log output in real time, which is useful for monitoring progress and error checking. -If the launcher Pod is failing to start, use the +If the launcher pod is failing to start, use the `--previous` option to see the logs of the last attempt. [WARNING] ==== `ErrImagePull` and `ImagePullBackOff` errors can be caused by -an incorrect Deployment configuration or problems with the images that are +an incorrect deployment configuration or problems with the images that are referenced. ==== diff --git a/modules/virt-understanding-node-labeling-obsolete-cpu-models.adoc b/modules/virt-understanding-node-labeling-obsolete-cpu-models.adoc index 8d600434a04b..51f285d47595 100644 --- a/modules/virt-understanding-node-labeling-obsolete-cpu-models.adoc +++ b/modules/virt-understanding-node-labeling-obsolete-cpu-models.adoc @@ -4,11 +4,11 @@ [id="virt-understanding-node-labeling-obsolete-cpu-models_{context}"] = Understanding node labeling for obsolete CPU models -To ensure that a node supports only valid CPU models for scheduled VMs, create a ConfigMap with a list of obsolete CPU models. When the `node-labeller` obtains the list of obsolete CPU models, it eliminates those CPU models and creates labels for valid CPU models. +To ensure that a node supports only valid CPU models for scheduled VMs, create a config map with a list of obsolete CPU models. When the `node-labeller` obtains the list of obsolete CPU models, it eliminates those CPU models and creates labels for valid CPU models. [NOTE] ==== -If you do not configure a ConfigMap with a list of obsolete CPU models, all CPU models are evaluated for labels, including obsolete CPU models that are not present in your environment. +If you do not configure a config map with a list of obsolete CPU models, all CPU models are evaluated for labels, including obsolete CPU models that are not present in your environment. ==== Through the process of iteration, the list of base CPU features in the minimum CPU model are eliminated from the list of labels generated for the node. For example, an environment might have two supported CPU models: `Penryn` and `Haswell`. diff --git a/modules/virt-understanding-node-maintenance.adoc b/modules/virt-understanding-node-maintenance.adoc index 6c544facc941..6271177c9cf9 100644 --- a/modules/virt-understanding-node-maintenance.adoc +++ b/modules/virt-understanding-node-maintenance.adoc @@ -18,7 +18,7 @@ node and recreated on another node. [IMPORTANT] ==== -Virtual machines must have a PersistentVolumeClaim (PVC) with a shared +Virtual machines must have a persistent volume claim (PVC) with a shared ReadWriteMany (RWX) access mode to be live migrated. ==== diff --git a/modules/virt-understanding-scratch-space.adoc b/modules/virt-understanding-scratch-space.adoc index fffe36f3e0c9..7bfa3309f05a 100644 --- a/modules/virt-understanding-scratch-space.adoc +++ b/modules/virt-understanding-scratch-space.adoc @@ -5,33 +5,23 @@ [id="virt-understanding-scratch-space_{context}"] = Understanding scratch space -The Containerized Data Importer (CDI) requires scratch space (temporary storage) -to complete some operations, such as importing and uploading virtual machine images. -During this process, the CDI provisions a scratch space PVC equal to the size of -the PVC backing the destination DataVolume (DV). The scratch space PVC is deleted -after the operation completes or aborts. +The Containerized Data Importer (CDI) requires scratch space (temporary storage) to complete some operations, such as importing and uploading virtual machine images. +During this process, the CDI provisions a scratch space PVC equal to the size of the PVC backing the destination data volume (DV). +The scratch space PVC is deleted after the operation completes or aborts. -The CDIConfig object allows you to define which StorageClass to use to bind the -scratch space PVC by setting the `scratchSpaceStorageClass` in the `spec:` -section of the CDIConfig object. +The `CDIConfig` object allows you to define which storage class to use to bind the scratch space PVC by setting the `scratchSpaceStorageClass` in the `spec:` section of the `CDIConfig` object. -If the defined StorageClass does not match a StorageClass in the cluster, then -the default StorageClass defined for the cluster is used. If there is no -default StorageClass defined in the cluster, the StorageClass used to provision -the original DV or PVC is used. +If the defined storage class does not match a storage class in the cluster, then the default storage class defined for the cluster is used. +If there is no default storage class defined in the cluster, the storage class used to provision the original DV or PVC is used. [NOTE] ==== -The CDI requires requesting scratch space with a `file` volume mode, regardless -of the PVC backing the origin DataVolume. If the origin PVC is backed by -`block` volume mode, you must define a StorageClass capable of provisioning -`file` volume mode PVCs. +The CDI requires requesting scratch space with a `file` volume mode, regardless of the PVC backing the origin data volume. +If the origin PVC is backed by `block` volume mode, you must define a storage class capable of provisioning `file` volume mode PVCs. ==== [discrete] == Manual provisioning -If there are no storage classes, the CDI will use any PVCs in the project that -match the size requirements for the image. If there are no PVCs that match these -requirements, the CDI import pod will remain in a *Pending* state until an -appropriate PVC is made available or until a timeout function kills the pod. +If there are no storage classes, the CDI will use any PVCs in the project that match the size requirements for the image. +If there are no PVCs that match these requirements, the CDI import pod will remain in a *Pending* state until an appropriate PVC is made available or until a timeout function kills the pod. diff --git a/modules/virt-understanding-smart-cloning.adoc b/modules/virt-understanding-smart-cloning.adoc index afcde49337f2..c2109687b3be 100644 --- a/modules/virt-understanding-smart-cloning.adoc +++ b/modules/virt-understanding-smart-cloning.adoc @@ -5,8 +5,8 @@ [id="virt-understanding-smart-cloning_{context}"] = Understanding smart-cloning -When a DataVolume is smart-cloned, the following occurs: +When a data volume is smart-cloned, the following occurs: -. A snapshot of the source PersistentVolumeClaim (PVC) is created. +. A snapshot of the source persistent volume claim (PVC) is created. . A PVC is created from the snapshot. . The snapshot is deleted. diff --git a/modules/virt-updating-access-mode-for-live-migration.adoc b/modules/virt-updating-access-mode-for-live-migration.adoc index 4c44d676c183..1fa36dcdb4b1 100644 --- a/modules/virt-updating-access-mode-for-live-migration.adoc +++ b/modules/virt-updating-access-mode-for-live-migration.adoc @@ -3,9 +3,9 @@ // * virt/live_migration/virt-live-migration.adoc [id="virt-updating-access-mode-for-live-migration_{context}"] -= Updating access mode for LiveMigration += Updating access mode for live migration -For LiveMigration to function properly, you must use the +For live migration to function properly, you must use the ReadWriteMany (RWX) access mode. Use this procedure to update the access mode, if needed. diff --git a/modules/virt-uploading-image-web.adoc b/modules/virt-uploading-image-web.adoc index 57d0cc9bcc7d..00d07e128884 100644 --- a/modules/virt-uploading-image-web.adoc +++ b/modules/virt-uploading-image-web.adoc @@ -5,7 +5,7 @@ [id="virt-uploading-image-web_{context}"] = Uploading an image file using the web console -Use the web console to upload an image file to a new Persistent Volume Claim (PVC). +Use the web console to upload an image file to a new persistent volume claim (PVC). You can later use this PVC to attach the image to new virtual machines. .Prerequisites diff --git a/modules/virt-uploading-local-disk-image-dv.adoc b/modules/virt-uploading-local-disk-image-dv.adoc index 8b1736ad389a..a44e44d03db4 100644 --- a/modules/virt-uploading-local-disk-image-dv.adoc +++ b/modules/virt-uploading-local-disk-image-dv.adoc @@ -3,10 +3,10 @@ // * virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-block.adoc [id="virt-uploading-local-disk-image-dv_{context}"] -= Uploading a local disk image to a DataVolume += Uploading a local disk image to a data volume You can use the `virtctl` CLI utility to upload a local disk image from -a client machine to a DataVolume (DV) in your cluster. You can use a DV that +a client machine to a data volume (DV) in your cluster. You can use a DV that already exists in your cluster or create a new DV during this procedure. [NOTE] @@ -40,9 +40,9 @@ certificate. .Procedure . Identify the following items: -* The name of the upload DataVolume that you want to use. If this DataVolume +* The name of the upload data volume that you want to use. If this data volume does not exist, it is created automatically. -* The size of the DataVolume, if you want it to be created during the upload +* The size of the data volume, if you want it to be created during the upload procedure. The size must be greater than or equal to the size of the disk image. * The file location of the virtual machine disk image that you want to upload. @@ -56,13 +56,13 @@ $ virtctl image-upload dv \ <1> --size= \ <2> --image-path= \ <3> ---- -<1> The name of the DataVolume. -<2> The size of the DataVolume. For example: `--size=500Mi`, `--size=1G` +<1> The name of the data volume. +<2> The size of the data volume. For example: `--size=500Mi`, `--size=1G` <3> The file path of the virtual machine disk image. + [NOTE] ==== -* If you do not want to create a new DataVolume, omit the `--size` parameter and +* If you do not want to create a new data volume, omit the `--size` parameter and include the `--no-create` flag. * To allow insecure server connections when using HTTPS, use the `--insecure` @@ -70,7 +70,7 @@ parameter. Be aware that when you use the `--insecure` flag, the authenticity of the upload endpoint is *not* verified. ==== -. Optional. To verify that a DataVolume was created, view all DataVolume objects +. Optional. To verify that a data volume was created, view all data volumes by running the following command: + [source,terminal] diff --git a/modules/virt-uploading-local-disk-image-pvc.adoc b/modules/virt-uploading-local-disk-image-pvc.adoc index 005f329b1dbe..fb87c6fad0f2 100644 --- a/modules/virt-uploading-local-disk-image-pvc.adoc +++ b/modules/virt-uploading-local-disk-image-pvc.adoc @@ -3,11 +3,9 @@ // * virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-virtctl.adoc [id="virt-uploading-local-disk-image-pvc_{context}"] -= Uploading a local disk image to a new PersistentVolumeClaim += Uploading a local disk image to a new persistent volume claim -You can use the `virtctl` CLI utility to upload a virtual machine disk image from -a client machine to your cluster. Uploading the disk image creates a -PersistentVolumeClaim (PVC) that you can associate with a virtual machine. +You can use the `virtctl` CLI utility to upload a virtual machine disk image from a client machine to your cluster. Uploading the disk image creates a persistent volume claim (PVC) that you can associate with a virtual machine. .Prerequisites @@ -29,8 +27,7 @@ link:https://libguestfs.org/virt-sparsify.1.html[virt-sparsify] tool. * The `kubevirt-virtctl` package must be installed on the client machine. -* The client machine must be configured to trust the {product-title} router's -certificate. +* The client machine must be configured to trust the {product-title} router's certificate. .Procedure @@ -38,8 +35,7 @@ certificate. * File location of the VM disk image you want to upload. * Name and size required for the resulting PVC. -. Run the `virtctl image-upload` command to upload your VM image. -You must specify the PVC name, PVC size, and file location. For example: +. Run the `virtctl image-upload` command to upload your VM image. You must specify the PVC name, PVC size, and file location. For example: + [source,terminal] ---- @@ -48,9 +44,7 @@ $ virtctl image-upload --pvc-name= --pvc-size=<2Gi> --image-p + [WARNING] ==== -To allow insecure server connections when using HTTPS, use the `--insecure` -parameter. Be aware that when you use the `--insecure` flag, the authenticity of -the upload endpoint is *not* verified. +To allow insecure server connections when using HTTPS, use the `--insecure` parameter. Be aware that when you use the `--insecure` flag, the authenticity of the upload endpoint is *not* verified. ==== . To verify that the PVC was created, view all PVC objects: diff --git a/modules/virt-using-hostpath-provisioner.adoc b/modules/virt-using-hostpath-provisioner.adoc index 9c792e042f45..e5236c46c63b 100644 --- a/modules/virt-using-hostpath-provisioner.adoc +++ b/modules/virt-using-hostpath-provisioner.adoc @@ -6,11 +6,11 @@ = Using the hostpath provisioner to enable local storage To deploy the hostpath provisioner and enable your virtual machines to use local -storage, first create a HostPathProvisioner custom resource. +storage, first create a `HostPathProvisioner` custom resource. .Prerequisites -* Create a backing directory on each node for the PersistentVolumes (PVs) +* Create a backing directory on each node for the persistent volumes (PVs) that the hostpath provisioner creates. * Apply the SELinux context `container_file_t` to the PV @@ -24,12 +24,12 @@ $ sudo chcon -t container_file_t -R [NOTE] ==== If you use Red Hat Enterprise Linux CoreOS 8 workers, you must configure SELinux -by using a MachineConfig manifest instead. +by using a `MachineConfig` manifest instead. ==== .Procedure -. Create the HostPathProvisioner custom resource file. For example: +. Create the `HostPathProvisioner` custom resource file. For example: + [source,terminal] ---- @@ -52,7 +52,7 @@ spec: useNamingPrefix: "false" <2> ---- <1> Specify the backing directory where you want the provisioner to create PVs. -<2> Change this value to `true` if you want to use the name of the PersistentVolumeClaim (PVC) +<2> Change this value to `true` if you want to use the name of the persistent volume claim (PVC) that is bound to the created PV as the prefix of the directory name. + [NOTE] diff --git a/modules/virt-viewing-network-state-of-node.adoc b/modules/virt-viewing-network-state-of-node.adoc index 8f16b45757b1..0f0800c709e9 100644 --- a/modules/virt-viewing-network-state-of-node.adoc +++ b/modules/virt-viewing-network-state-of-node.adoc @@ -16,7 +16,7 @@ A `NodeNetworkState` object exists on every node in the cluster. This object is $ oc get nns ---- -. Inspect a `NodeNetworkState` to view the network on that node. The output in this example has been redacted for clarity: +. Inspect a `NodeNetworkState` object to view the network on that node. The output in this example has been redacted for clarity: + [source,terminal] ---- @@ -42,7 +42,7 @@ status: ... lastSuccessfulUpdateTime: "2020-01-31T12:14:00Z" <3> ---- -<1> The name of the `NodeNetworkState` is taken from the node. +<1> The name of the `NodeNetworkState` object is taken from the node. <2> The `currentState` contains the complete network configuration for the node, including DNS, interfaces, and routes. <3> Timestamp of the last successful update. This is updated periodically as long as the node is reachable and can be used to evalute the freshness of the report. diff --git a/modules/virt-viewing-resource-events-cli.adoc b/modules/virt-viewing-resource-events-cli.adoc index 94514e404d8f..c615ae18cde5 100644 --- a/modules/virt-viewing-resource-events-cli.adoc +++ b/modules/virt-viewing-resource-events-cli.adoc @@ -12,7 +12,7 @@ Events are included in the resource description, which you can get using the * In the namespace, use the `oc describe` command. The following example shows how to get the events for a virtual machine, a virtual machine instance, and the -virt-launcher Pod for a virtual machine: +virt-launcher pod for a virtual machine: + [source,terminal] ---- diff --git a/modules/virt-viewing-virtual-machine-logs-web.adoc b/modules/virt-viewing-virtual-machine-logs-web.adoc index 2717538100ca..10423351f655 100644 --- a/modules/virt-viewing-virtual-machine-logs-web.adoc +++ b/modules/virt-viewing-virtual-machine-logs-web.adoc @@ -12,6 +12,6 @@ Get virtual machine logs from the associated virtual machine launcher pod. . In the {VirtProductName} console, click *Workloads* -> *Virtualization* from the side menu. . Click the *Virtual Machines* tab. . Select a virtual machine to open the *Virtual Machine Overview* screen. -. In the *Details* tab, click the `virt-launcher-` Pod in the *Pod* +. In the *Details* tab, click the `virt-launcher-` pod in the *Pod* section. . Click *Logs*. diff --git a/modules/virt-virtctl-commands.adoc b/modules/virt-virtctl-commands.adoc index f131cd12b42e..b216f2dcb20f 100644 --- a/modules/virt-virtctl-commands.adoc +++ b/modules/virt-virtctl-commands.adoc @@ -56,10 +56,10 @@ the specified port of the node. |Open a VNC connection to a virtual machine instance. |`virtctl image-upload dv --image-path= --no-create` -|Upload a virtual machine image to a DataVolume that already exists. +|Upload a virtual machine image to a data volume that already exists. |`virtctl image-upload dv --size= --image-path=` -|Upload a virtual machine image to a new DataVolume. +|Upload a virtual machine image to a new data volume. |`virtctl version` |Display the client and server version information. diff --git a/modules/virt-vm-storage-volume-types.adoc b/modules/virt-vm-storage-volume-types.adoc index 0c553d9acdbe..54d3fc0f74ef 100644 --- a/modules/virt-vm-storage-volume-types.adoc +++ b/modules/virt-vm-storage-volume-types.adoc @@ -18,7 +18,7 @@ Importing an existing virtual machine disk into a PVC by using CDI and attaching the PVC to a virtual machine instance is the recommended method for importing existing virtual machines into {product-title}. There are some requirements for the disk to be used within a PVC. |dataVolume -|DataVolumes build on the `persistentVolumeClaim` disk type by managing the process of preparing the virtual machine disk via an import, clone, or upload operation. VMs that use this volume type are guaranteed not to start until the volume is ready. +|Data volumes build on the `persistentVolumeClaim` disk type by managing the process of preparing the virtual machine disk via an import, clone, or upload operation. VMs that use this volume type are guaranteed not to start until the volume is ready. Specify `type: dataVolume` or `type: ""`. If you specify any other value for `type`, such as `persistentVolumeClaim`, a warning is displayed, and the virtual machine does not start. @@ -34,7 +34,7 @@ Only RAW and QCOW2 formats are supported disk types for the container image regi [NOTE] ==== -A `containerDisk` volume is ephemeral. It is discarded when the virtual machine is stopped, restarted, or deleted. A `containerDisk` volume is useful for read-only filesystems such as CD-ROMs or for disposable virtual machines. +A `containerDisk` volume is ephemeral. It is discarded when the virtual machine is stopped, restarted, or deleted. A `containerDisk` volume is useful for read-only file systems such as CD-ROMs or for disposable virtual machines. ==== |emptyDisk diff --git a/virt/install/uninstalling-virt-cli.adoc b/virt/install/uninstalling-virt-cli.adoc index c772f62235da..933a3cd5b164 100644 --- a/virt/install/uninstalling-virt-cli.adoc +++ b/virt/install/uninstalling-virt-cli.adoc @@ -13,7 +13,7 @@ You can uninstall {VirtProductName} by using the {product-title} * You must delete all xref:../../virt/virtual_machines/virt-delete-vms.adoc#virt-delete-vm-web_virt-delete-vms[virtual machines], xref:../../virt/virtual_machines/virt-manage-vmis.adoc#virt-deleting-vmis-cli_virt-manage-vmis[virtual machine instances], -and xref:../../virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc#virt-deleting-dvs_virt-deleting-datavolumes[DataVolumes]. +and xref:../../virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc#virt-deleting-dvs_virt-deleting-datavolumes[data volumes]. + [IMPORTANT] ==== diff --git a/virt/install/uninstalling-virt-web.adoc b/virt/install/uninstalling-virt-web.adoc index 851d24597e14..d4f08c3705ae 100644 --- a/virt/install/uninstalling-virt-web.adoc +++ b/virt/install/uninstalling-virt-web.adoc @@ -12,7 +12,7 @@ xref:../../web_console/web-console.adoc#web-console-overview_web-console[web con * You must have {VirtProductName} {VirtVersion} installed. * You must delete all xref:../../virt/virtual_machines/virt-delete-vms.adoc#virt-delete-vm-web_virt-delete-vms[virtual machines], xref:../../virt/virtual_machines/virt-manage-vmis.adoc#virt-deleting-vmis-cli_virt-manage-vmis[virtual machine instances], -and xref:../../virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc#virt-deleting-dvs_virt-deleting-datavolumes[DataVolumes]. +and xref:../../virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc#virt-deleting-dvs_virt-deleting-datavolumes[data volumes]. + [IMPORTANT] ==== diff --git a/virt/live_migration/virt-live-migration.adoc b/virt/live_migration/virt-live-migration.adoc index f3321d7e0703..81b54fcac190 100644 --- a/virt/live_migration/virt-live-migration.adoc +++ b/virt/live_migration/virt-live-migration.adoc @@ -5,10 +5,10 @@ include::modules/virt-document-attributes.adoc[] toc::[] == Prerequisites -* Before using LiveMigration, ensure that the storage class used by the -virtual machine has a PersistentVolumeClaim (PVC) with a shared +* Before using live migration, ensure that the storage class used by the +virtual machine has a persistent volume claim (PVC) with a shared ReadWriteMany (RWX) access mode. -See xref:../../virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc#virt-storage-defaults-for-datavolumes[Storage defaults for DataVolumes] +See xref:../../virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc#virt-storage-defaults-for-datavolumes[Storage defaults for data volumes] to ensure your storage settings are correct. include::modules/virt-understanding-live-migration.adoc[leveloffset=+1] diff --git a/virt/logging_events_monitoring/virt-diagnosing-datavolumes-using-events-and-conditions.adoc b/virt/logging_events_monitoring/virt-diagnosing-datavolumes-using-events-and-conditions.adoc index cb74b31720f2..0783494ce414 100644 --- a/virt/logging_events_monitoring/virt-diagnosing-datavolumes-using-events-and-conditions.adoc +++ b/virt/logging_events_monitoring/virt-diagnosing-datavolumes-using-events-and-conditions.adoc @@ -1,10 +1,10 @@ [id="virt-diagnosing-datavolumes-using-events-and-conditions"] -= Diagnosing DataVolumes using events and conditions += Diagnosing data volumes using events and conditions include::modules/virt-document-attributes.adoc[] :context: virt-diagnosing-datavolumes-using-events-and-conditions toc::[] -Use the `oc describe` command to analyze and help resolve issues with DataVolumes. +Use the `oc describe` command to analyze and help resolve issues with data volumes. include::modules/virt-about-conditions-and-events.adoc[leveloffset=+1] include::modules/virt-analyzing-datavolume-conditions-and-events.adoc[leveloffset=+1] diff --git a/virt/node_maintenance/virt-managing-node-labeling-obsolete-cpu-models.adoc b/virt/node_maintenance/virt-managing-node-labeling-obsolete-cpu-models.adoc index 903edf50768d..9b7345eb821c 100644 --- a/virt/node_maintenance/virt-managing-node-labeling-obsolete-cpu-models.adoc +++ b/virt/node_maintenance/virt-managing-node-labeling-obsolete-cpu-models.adoc @@ -5,7 +5,7 @@ include::modules/common-attributes.adoc[] :context: virt-managing-node-labeling-obsolete-cpu-models toc::[] -You can schedule a virtual machine (VM) on a node where the CPU model and policy attribute of the VM are compatible with the CPU models and policy attributes that the node supports. By specifying a list of obsolete CPU models in a xref:../../builds/builds-configmaps.adoc#builds-configmap-overview_builds-configmaps[ConfigMap], you can exclude them from the list of labels created for CPU models. +You can schedule a virtual machine (VM) on a node where the CPU model and policy attribute of the VM are compatible with the CPU models and policy attributes that the node supports. By specifying a list of obsolete CPU models in a xref:../../builds/builds-configmaps.adoc#builds-configmap-overview_builds-configmaps[config map], you can exclude them from the list of labels created for CPU models. include::modules/virt-understanding-node-labeling-obsolete-cpu-models.adoc[leveloffset=+1] include::modules/virt-configuring-configmap-for-obsolete-cpu-models.adoc[leveloffset=+1] diff --git a/virt/node_network/virt-troubleshooting-node-network.adoc b/virt/node_network/virt-troubleshooting-node-network.adoc index d78482cbf93f..d7528b31509c 100644 --- a/virt/node_network/virt-troubleshooting-node-network.adoc +++ b/virt/node_network/virt-troubleshooting-node-network.adoc @@ -4,7 +4,7 @@ include::modules/virt-document-attributes.adoc[] :context: virt-troubleshooting-node-network toc::[] -If the node network configuration encounters an issue, the Policy is automatically rolled back and the Enactments report failure. +If the node network configuration encounters an issue, the policy is automatically rolled back and the enactments report failure. This includes issues such as: * The configuration fails to be applied on the host. diff --git a/virt/node_network/virt-updating-node-network-config.adoc b/virt/node_network/virt-updating-node-network-config.adoc index 542730521e6f..f049bbfca5ea 100644 --- a/virt/node_network/virt-updating-node-network-config.adoc +++ b/virt/node_network/virt-updating-node-network-config.adoc @@ -13,9 +13,9 @@ include::modules/virt-creating-interface-on-nodes.adoc[leveloffset=+1] [discrete] == Additional resources -* xref:virt-nmstate-example-policy-configurations[Example Policy configurations for different interfaces] -* xref:virt-example-nmstate-multiple-interfaces_virt-updating-node-network-config[Example for creating multiple interfaces in the same Policy] -* xref:virt-example-nmstate-IP-management_virt-updating-node-network-config[Examples of different IP management methods in Policies] +* xref:virt-nmstate-example-policy-configurations[Example policy configurations for different interfaces] +* xref:virt-example-nmstate-multiple-interfaces_virt-updating-node-network-config[Example for creating multiple interfaces in the same policy] +* xref:virt-example-nmstate-IP-management_virt-updating-node-network-config[Examples of different IP management methods in policies] include::modules/virt-confirming-policy-updates-on-nodes.adoc[leveloffset=+1] @@ -24,7 +24,7 @@ include::modules/virt-removing-interface-from-nodes.adoc[leveloffset=+1] include::modules/virt-restoring-node-network-configuration.adoc[leveloffset=+1] [id="virt-nmstate-example-policy-configurations"] -== Example Policy configurations for different interfaces +== Example policy configurations for different interfaces include::modules/virt-example-bridge-nncp.adoc[leveloffset=+2] diff --git a/virt/upgrading-virt.adoc b/virt/upgrading-virt.adoc index 6f510da336ee..c66a44e6e4f3 100644 --- a/virt/upgrading-virt.adoc +++ b/virt/upgrading-virt.adoc @@ -15,6 +15,6 @@ include::modules/virt-monitoring-upgrade-status.adoc[leveloffset=+1] == Additional resources -* xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[ClusterServiceVersions (CSVs)] +* xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[Cluster service versions (CSVs)] * xref:../virt/live_migration/virt-configuring-vmi-eviction-strategy.adoc#virt-configuring-vmi-eviction-strategy[Configuring virtual machine eviction strategy] diff --git a/virt/virt-additional-security-privileges-controller-and-launcher.adoc b/virt/virt-additional-security-privileges-controller-and-launcher.adoc index 3ec89b8c3c14..50383e8e9dee 100644 --- a/virt/virt-additional-security-privileges-controller-and-launcher.adoc +++ b/virt/virt-additional-security-privileges-controller-and-launcher.adoc @@ -4,7 +4,7 @@ include::modules/virt-document-attributes.adoc[] :context: virt-additional-security-privileges-controller-and-launcher toc::[] -The `kubevirt-controller` and virt-launcher Pods are granted some SELinux policies and Security Context Constraints privileges that are in addition to typical Pod owners. These privileges enable virtual machines to use {VirtProductName} features. +The `kubevirt-controller` and virt-launcher pods are granted some SELinux policies and Security Context Constraints privileges that are in addition to typical pod owners. These privileges enable virtual machines to use {VirtProductName} features. include::modules/virt-extended-selinux-policies-for-virt-launcher.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume-block.adoc b/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume-block.adoc index 6125e9735289..e7e27fce756a 100644 --- a/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume-block.adoc +++ b/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume-block.adoc @@ -1,18 +1,18 @@ [id="virt-cloning-vm-disk-into-new-datavolume-block"] -= Cloning a virtual machine disk into a new block storage DataVolume += Cloning a virtual machine disk into a new block storage data volume include::modules/virt-document-attributes.adoc[] :context: virt-cloning-vm-disk-into-new-datavolume-block toc::[] -You can clone the PersistentVolumeClaim (PVC) of a virtual machine disk into -a new block DataVolume by referencing the source PVC in your DataVolume configuration +You can clone the persistent volume claim (PVC) of a virtual machine disk into +a new block data volume by referencing the source PVC in your data volume configuration file. [WARNING] ==== Cloning operations between different volume modes are not supported. The `volumeMode` values must match in both the source and target specifications. -For example, if you attempt to clone from a PersistentVolume (PV) with `volumeMode: Block` to a PV with `volumeMode: Filesystem`, the operation fails with an error message. +For example, if you attempt to clone from a persistent volume (PV) with `volumeMode: Block` to a PV with `volumeMode: Filesystem`, the operation fails with an error message. ==== == Prerequisites diff --git a/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume.adoc b/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume.adoc index 7bcb5620af0e..4c045018aaaf 100644 --- a/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume.adoc +++ b/virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume.adoc @@ -1,18 +1,18 @@ [id="virt-cloning-vm-disk-into-new-datavolume"] -= Cloning a virtual machine disk into a new DataVolume += Cloning a virtual machine disk into a new data volume include::modules/virt-document-attributes.adoc[] :context: virt-cloning-vm-disk-into-new-datavolume toc::[] -You can clone the PersistentVolumeClaim (PVC) of a virtual machine disk into -a new DataVolume by referencing the source PVC in your DataVolume configuration +You can clone the persistent volume claim (PVC) of a virtual machine disk into +a new data volume by referencing the source PVC in your data volume configuration file. [WARNING] ==== Cloning operations between different volume modes are not supported. The `volumeMode` values must match in both the source and target specifications. -For example, if you attempt to clone from a PersistentVolume (PV) with `volumeMode: Block` to a PV with `volumeMode: Filesystem`, the operation fails with an error message. +For example, if you attempt to clone from a persistent volume (PV) with `volumeMode: Block` to a PV with `volumeMode: Filesystem`, the operation fails with an error message. ==== == Prerequisites diff --git a/virt/virtual_machines/cloning_vms/virt-cloning-vm-using-datavolumetemplate.adoc b/virt/virtual_machines/cloning_vms/virt-cloning-vm-using-datavolumetemplate.adoc index 02abd1c5e488..1306a0d33ff4 100644 --- a/virt/virtual_machines/cloning_vms/virt-cloning-vm-using-datavolumetemplate.adoc +++ b/virt/virtual_machines/cloning_vms/virt-cloning-vm-using-datavolumetemplate.adoc @@ -1,18 +1,18 @@ [id="virt-cloning-vm-using-datavolumetemplate"] -= Cloning a virtual machine by using a DataVolumeTemplate += Cloning a virtual machine by using a data volume template include::modules/virt-document-attributes.adoc[] :context: virt-cloning-vm-using-datavolumetemplate toc::[] -You can create a new virtual machine by cloning the PersistentVolumeClaim (PVC) of +You can create a new virtual machine by cloning the persistent volume claim (PVC) of an existing VM. By including a `dataVolumeTemplate` in your virtual machine -configuration file, you create a new DataVolume from the original PVC. +configuration file, you create a new data volume from the original PVC. [WARNING] ==== Cloning operations between different volume modes are not supported. The `volumeMode` values must match in both the source and target specifications. -For example, if you attempt to clone from a PersistentVolume (PV) with `volumeMode: Block` to a PV with `volumeMode: Filesystem`, the operation fails with an error message. +For example, if you attempt to clone from a persistent volume (PV) with `volumeMode: Block` to a PV with `volumeMode: Filesystem`, the operation fails with an error message. ==== == Prerequisites diff --git a/virt/virtual_machines/cloning_vms/virt-enabling-user-permissions-to-clone-datavolumes.adoc b/virt/virtual_machines/cloning_vms/virt-enabling-user-permissions-to-clone-datavolumes.adoc index 3d7ac6cde8ba..eb5eda321e22 100644 --- a/virt/virtual_machines/cloning_vms/virt-enabling-user-permissions-to-clone-datavolumes.adoc +++ b/virt/virtual_machines/cloning_vms/virt-enabling-user-permissions-to-clone-datavolumes.adoc @@ -1,5 +1,5 @@ [id="virt-enabling-user-permissions-to-clone-datavolumes"] -= Enabling user permissions to clone DataVolumes across namespaces += Enabling user permissions to clone data volumes across namespaces include::modules/virt-document-attributes.adoc[] :context: virt-enabling-user-permissions-to-clone-datavolumes toc::[] @@ -8,14 +8,14 @@ The isolating nature of namespaces means that users cannot by default clone resources between namespaces. To enable a user to clone a virtual machine to another namespace, a -user with the `cluster-admin` role must create a new ClusterRole. Bind -this ClusterRole to a user to enable them to clone virtual machines +user with the `cluster-admin` role must create a new cluster role. Bind +this cluster role to a user to enable them to clone virtual machines to the destination namespace. == Prerequisites * Only a user with the xref:../../../authentication/using-rbac.adoc#default-roles_using-rbac[`cluster-admin`] -role can create ClusterRoles. +role can create cluster roles. include::modules/virt-about-datavolumes.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc b/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc index 34332fbce6ca..38aed9e245fc 100644 --- a/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc +++ b/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes-block.adoc @@ -1,12 +1,12 @@ [id="virt-importing-virtual-machine-images-datavolumes-block"] -= Importing virtual machine images to block storage with DataVolumes += Importing virtual machine images to block storage with data volumes include::modules/virt-document-attributes.adoc[] :context: virt-importing-virtual-machine-images-datavolumes-block toc::[] You can import an existing virtual machine image into your {product-title} -cluster. {VirtProductName} uses DataVolumes to automate the import of data and the -creation of an underlying PersistentVolumeClaim (PVC). +cluster. {VirtProductName} uses data volumes to automate the import of data and the +creation of an underlying persistent volume claim (PVC). [IMPORTANT] ==== @@ -23,7 +23,7 @@ Refer to the operating system documentation for details. * If you require scratch space according to the xref:#virt-cdi-supported-operations-matrix_virt-importing-virtual-machine-images-datavolumes-block[CDI supported operations matrix], you must first -xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a StorageClass or prepare CDI scratch space] +xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a storage class or prepare CDI scratch space] for this operation to complete successfully. include::modules/virt-about-datavolumes.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc b/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc index 4c6ab339a2b6..b4cb11af133e 100644 --- a/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc +++ b/virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc @@ -1,10 +1,10 @@ [id="virt-importing-virtual-machine-images-datavolumes"] -= Importing virtual machine images with DataVolumes += Importing virtual machine images with data volumes include::modules/virt-document-attributes.adoc[] :context: virt-importing-virtual-machine-images-datavolumes toc::[] -Use the Containerized Data Importer (CDI) to import a virtual machine image into a PersistentVolumeClaim (PVC) by using a DataVolume. You can attach a DataVolume to a virtual machine for persistent storage. +Use the Containerized Data Importer (CDI) to import a virtual machine image into a persistent volume claim (PVC) by using a data volume. You can attach a data volume to a virtual machine for persistent storage. The virtual machine image can be hosted at an HTTP or HTTPS endpoint, or built into a container disk and stored in a container registry. @@ -18,14 +18,14 @@ The resizing procedure varies based on the operating system installed on the vir == Prerequisites * If the endpoint requires a TLS certificate, the certificate must be -xref:../../../virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc#virt-adding-tls-certificates-for-authenticating-dv-imports_virt-tls-certificates-for-dv-imports[included in a ConfigMap] -in the same namespace as the DataVolume and referenced in the DataVolume configuration. +xref:../../../virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc#virt-adding-tls-certificates-for-authenticating-dv-imports_virt-tls-certificates-for-dv-imports[included in a config map] +in the same namespace as the data volume and referenced in the data volume configuration. * To import a container disk: ** You might need to xref:../../../virt/virtual_machines/virtual_disks/virt-using-container-disks-with-vms.adoc#virt-preparing-container-disk-for-vms_virt-using-container-disks-with-vms[prepare a container disk from a virtual machine image] and store it in your container registry before importing it. -** If the container registry does not have TLS, you must xref:../../../virt/virtual_machines/virtual_disks/virt-using-container-disks-with-vms.adoc#virt-disabling-tls-for-registry_virt-using-container-disks-with-vms[add the registry to the `cdi-insecure-registries` ConfigMap] before you can import a container disk from it. +** If the container registry does not have TLS, you must xref:../../../virt/virtual_machines/virtual_disks/virt-using-container-disks-with-vms.adoc#virt-disabling-tls-for-registry_virt-using-container-disks-with-vms[add the registry to the `cdi-insecure-registries` config map] before you can import a container disk from it. -* You might need to xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a StorageClass or prepare CDI scratch space] +* You might need to xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a storage class or prepare CDI scratch space] for this operation to complete successfully. include::modules/virt-cdi-supported-operations-matrix.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/importing_vms/virt-importing-vmware-vm.adoc b/virt/virtual_machines/importing_vms/virt-importing-vmware-vm.adoc index ce23525011b5..3d3877c89bd7 100644 --- a/virt/virtual_machines/importing_vms/virt-importing-vmware-vm.adoc +++ b/virt/virtual_machines/importing_vms/virt-importing-vmware-vm.adoc @@ -16,7 +16,7 @@ include::modules/virt-features-for-storage-matrix.adoc[leveloffset=+1] The import process uses the VMware Virtual Disk Development Kit (VDDK) to copy the VMware virtual disk. -You can download the VDDK SDK, create a VDDK image, upload the image to an image registry, and add it to the `v2v-vmware` ConfigMap. +You can download the VDDK SDK, create a VDDK image, upload the image to an image registry, and add it to the `v2v-vmware` config map. You can configure either an internal {product-title} image registry or a secure external image registry for the VDDK image. The registry must be accessible to your {VirtProductName} environment. diff --git a/virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc b/virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc index 6f9717f4b355..fea2c1546763 100644 --- a/virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc +++ b/virt/virtual_machines/importing_vms/virt-tls-certificates-for-dv-imports.adoc @@ -1,5 +1,5 @@ [id="virt-tls-certificates-for-dv-imports"] -= TLS certificates for DataVolume imports += TLS certificates for data volume imports include::modules/virt-document-attributes.adoc[] include::modules/common-attributes.adoc[] :context: virt-tls-certificates-for-dv-imports diff --git a/virt/virtual_machines/virt-create-vms.adoc b/virt/virtual_machines/virt-create-vms.adoc index 836698457261..f5759e202ddd 100644 --- a/virt/virtual_machines/virt-create-vms.adoc +++ b/virt/virtual_machines/virt-create-vms.adoc @@ -30,8 +30,8 @@ include::modules/virt-cdrom-wizard-fields-web.adoc[leveloffset=+2] include::modules/virt-networking-wizard-fields-web.adoc[leveloffset=+2] include::modules/virt-storage-wizard-fields-web.adoc[leveloffset=+2] -For more information on the `kubevirt-storage-class-defaults` ConfigMap, see xref:../../virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc#virt-storage-defaults-for-datavolumes[ -Storage defaults for DataVolumes]. +For more information on the `kubevirt-storage-class-defaults` config map, see xref:../../virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc#virt-storage-defaults-for-datavolumes[ +Storage defaults for data volumes]. include::modules/virt-creating-vm-yaml-web.adoc[leveloffset=+1] include::modules/virt-creating-vm.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virt-edit-boot-order.adoc b/virt/virtual_machines/virt-edit-boot-order.adoc index a9899e188bc4..dc16ea695264 100644 --- a/virt/virtual_machines/virt-edit-boot-order.adoc +++ b/virt/virtual_machines/virt-edit-boot-order.adoc @@ -8,7 +8,7 @@ You can update the values for a boot order list by using the web console or the With *Boot Order* in the *Virtual Machine Overview* page, you can: -* Select a disk or Network Interface Card (NIC) and add it to the boot order list. +* Select a disk or network interface card (NIC) and add it to the boot order list. * Edit the order of the disks or NICs in the boot order list. * Remove a disk or NIC from the boot order list, and return it back to the inventory of bootable sources. diff --git a/virt/virtual_machines/virt-edit-vms.adoc b/virt/virtual_machines/virt-edit-vms.adoc index 3106e84ea8b7..95e34499289e 100644 --- a/virt/virtual_machines/virt-edit-vms.adoc +++ b/virt/virtual_machines/virt-edit-vms.adoc @@ -13,8 +13,8 @@ include::modules/virt-editing-vm-yaml-web.adoc[leveloffset=+1] include::modules/virt-editing-vm-cli.adoc[leveloffset=+1] include::modules/virt-add-disk-to-vm.adoc[leveloffset=+1] -For more information on the `kubevirt-storage-class-defaults` ConfigMap, see xref:../../virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc#virt-storage-defaults-for-datavolumes[ -Storage defaults for DataVolumes]. +For more information on the `kubevirt-storage-class-defaults` config map, see xref:../../virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc#virt-storage-defaults-for-datavolumes[ +Storage defaults for data volumes]. include::modules/virt-storage-wizard-fields-web.adoc[leveloffset=+2] include::modules/virt-add-nic-to-vm.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virt-managing-configmaps-secrets-service-accounts.adoc b/virt/virtual_machines/virt-managing-configmaps-secrets-service-accounts.adoc index fec8301d5ee5..a2246f7651ec 100644 --- a/virt/virtual_machines/virt-managing-configmaps-secrets-service-accounts.adoc +++ b/virt/virtual_machines/virt-managing-configmaps-secrets-service-accounts.adoc @@ -1,19 +1,18 @@ [id="virt-managing-configmaps-secrets-service-accounts"] -= Managing ConfigMaps, secrets, and service accounts in virtual machines += Managing config maps, secrets, and service accounts in virtual machines include::modules/virt-document-attributes.adoc[] :context: virt-managing-configmaps-secrets-service-accounts toc::[] - -You can use secrets, ConfigMaps, and service accounts to pass configuration data to virtual machines. For example, you can: +You can use secrets, config maps, and service accounts to pass configuration data to virtual machines. For example, you can: * Give a virtual machine access to a service that requires credentials by adding a secret to the virtual machine. -* Store non-confidential configuration data in a ConfigMap so that a Pod or another object can consume the data. +* Store non-confidential configuration data in a config map so that a pod or another object can consume the data. * Allow a component to access the API server by associating a service account with that component. [NOTE] ==== -{VirtProductName} exposes secrets, ConfigMaps, and service accounts as virtual machine disks so that you can use them across platforms without additional overhead. +{VirtProductName} exposes secrets, config maps, and service accounts as virtual machine disks so that you can use them across platforms without additional overhead. ==== include::modules/virt-adding-secret-configmap-service-account-to-vm.adoc[leveloffset=+1] @@ -23,8 +22,8 @@ include::modules/virt-removing-secret-configmap-service-account-vm.adoc[leveloff == Additional resources -* xref:../../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-about[Providing sensitive data to Pods] +* xref:../../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-about[Providing sensitive data to pods] * xref:../../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-overview[Understanding and creating service accounts] -* xref:../../builds/builds-configmaps.adoc#builds-configmap-overview_builds-configmaps[Understanding ConfigMaps] +* xref:../../builds/builds-configmaps.adoc#builds-configmap-overview_builds-configmaps[Understanding config maps] diff --git a/virt/virtual_machines/virtual_disks/virt-cloning-a-datavolume-using-smart-cloning.adoc b/virt/virtual_machines/virtual_disks/virt-cloning-a-datavolume-using-smart-cloning.adoc index 1929624bd8c3..371ef7086372 100644 --- a/virt/virtual_machines/virtual_disks/virt-cloning-a-datavolume-using-smart-cloning.adoc +++ b/virt/virtual_machines/virtual_disks/virt-cloning-a-datavolume-using-smart-cloning.adoc @@ -1,5 +1,5 @@ [id="virt-cloning-a-datavolume-using-smart-cloning"] -= Cloning a DataVolume using smart-cloning += Cloning a data volume using smart-cloning include::modules/virt-document-attributes.adoc[] :context: virt-cloning-a-datavolume-using-smart-cloning toc::[] @@ -9,7 +9,7 @@ designed to enhance performance of the cloning process. Clones created with smar You do not need to perform any action to enable smart-cloning, but you need to ensure your storage environment is compatible with smart-cloning to use this feature. -When you create a DataVolume with a PersistentVolumeClaim (PVC) source, you automatically initiate the cloning process. You always receive a clone of the DataVolume, if your environment supports smart-cloning or not. However, you will only receive the performance benefits of smart cloning if you storage provider supports smart-cloning. +When you create a data volume with a persistent volume claim (PVC) source, you automatically initiate the cloning process. You always receive a clone of the data volume, if your environment supports smart-cloning or not. However, you will only receive the performance benefits of smart cloning if you storage provider supports smart-cloning. include::modules/virt-understanding-smart-cloning.adoc[leveloffset=+1] @@ -17,4 +17,4 @@ include::modules/virt-cloning-a-datavolume.adoc[leveloffset=+1] == Additional resources -* xref:../../../virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume.adoc#virt-cloning-pvc-of-vm-disk-into-new-datavolume_virt-cloning-vm-disk-into-new-datavolume[Cloning the PersistentVolumeClaim of a virtual machine disk into a new DataVolume] +* xref:../../../virt/virtual_machines/cloning_vms/virt-cloning-vm-disk-into-new-datavolume.adoc#virt-cloning-pvc-of-vm-disk-into-new-datavolume_virt-cloning-vm-disk-into-new-datavolume[Cloning the persistent volume claim of a virtual machine disk into a new data volume] diff --git a/virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc b/virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc index b722a161783b..19fb22caed55 100644 --- a/virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc +++ b/virt/virtual_machines/virtual_disks/virt-deleting-datavolumes.adoc @@ -1,13 +1,13 @@ [id="virt-deleting-datavolumes"] -= Deleting DataVolumes += Deleting data volumes include::modules/virt-document-attributes.adoc[] :context: virt-deleting-datavolumes toc::[] -You can manually delete a DataVolume by using the `oc` command-line interface. + +You can manually delete a data volume by using the `oc` command-line interface. + [NOTE] ==== -When you delete a virtual machine, the DataVolume it uses is automatically deleted. +When you delete a virtual machine, the data volume it uses is automatically deleted. ==== include::modules/virt-about-datavolumes.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virtual_disks/virt-moving-local-vm-disk-to-different-node.adoc b/virt/virtual_machines/virtual_disks/virt-moving-local-vm-disk-to-different-node.adoc index e8ab93d92239..7b0c4c0c574b 100644 --- a/virt/virtual_machines/virtual_disks/virt-moving-local-vm-disk-to-different-node.adoc +++ b/virt/virtual_machines/virtual_disks/virt-moving-local-vm-disk-to-different-node.adoc @@ -11,7 +11,7 @@ You might want to move the virtual machine to a specific node for the following * The current node has limitations to the local storage configuration. * The new node is better optimized for the workload of that virtual machine. -To move a virtual machine that uses local storage, you must clone the underlying volume by using a DataVolume. After the cloning operation is complete, you can xref:../../../virt/virtual_machines/virt-edit-vms.adoc#virt-edit-vms[edit the virtual machine configuration] so that it uses the new DataVolume, or xref:../../../virt/virtual_machines/virt-edit-vms.adoc#virt-vm-add-disk_virt-edit-vms[add the new DataVolume to another virtual machine]. +To move a virtual machine that uses local storage, you must clone the underlying volume by using a data volume. After the cloning operation is complete, you can xref:../../../virt/virtual_machines/virt-edit-vms.adoc#virt-edit-vms[edit the virtual machine configuration] so that it uses the new data volume, or xref:../../../virt/virtual_machines/virt-edit-vms.adoc#virt-vm-add-disk_virt-edit-vms[add the new data volume to another virtual machine]. [NOTE] ==== diff --git a/virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc b/virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc index ef287dc40d67..d0ad2f4fbd53 100644 --- a/virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc +++ b/virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc @@ -16,4 +16,4 @@ include::modules/virt-cdi-supported-operations-matrix.adoc[leveloffset=+1] .Additional resources -* See the xref:../../../storage/dynamic-provisioning.adoc#about_dynamic-provisioning[Dynamic provisioning] section for more information on StorageClasses and how these are defined in the cluster. +* See the xref:../../../storage/dynamic-provisioning.adoc#about_dynamic-provisioning[Dynamic provisioning] section for more information on storage classes and how these are defined in the cluster. diff --git a/virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc b/virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc index cd23f986183f..e0abe54aa279 100644 --- a/virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc +++ b/virt/virtual_machines/virtual_disks/virt-storage-defaults-for-datavolumes.adoc @@ -1,10 +1,10 @@ [id="virt-storage-defaults-for-datavolumes"] -= Storage defaults for DataVolumes += Storage defaults for data volumes include::modules/virt-document-attributes.adoc[] :context: virt-storage-defaults-for-datavolumes toc::[] -The `kubevirt-storage-class-defaults` ConfigMap provides _access mode_ and _volume mode_ defaults for DataVolumes. You can edit or add storage class defaults to the ConfigMap in order to create DataVolumes in the web console that better match the underlying storage. +The `kubevirt-storage-class-defaults` config map provides _access mode_ and _volume mode_ defaults for data volumes. You can edit or add storage class defaults to the config map in order to create data volumes in the web console that better match the underlying storage. include::modules/virt-about-storage-setting-for-datavolumes.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-block.adoc b/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-block.adoc index fd7b74472304..1dd3967db0d7 100644 --- a/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-block.adoc +++ b/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-block.adoc @@ -1,15 +1,15 @@ [id="virt-uploading-local-disk-images-block"] -= Uploading a local disk image to a block storage DataVolume += Uploading a local disk image to a block storage data volume include::modules/virt-document-attributes.adoc[] :context: virt-uploading-local-disk-images-block toc::[] -You can upload a local disk image into a block DataVolume by using the +You can upload a local disk image into a block data volume by using the `virtctl` command-line utility. -In this workflow, you create a local block device to use as a PersistentVolume, -associate this block volume with an `upload` DataVolume, and use `virtctl` -to upload the local disk image into the DataVolume. +In this workflow, you create a local block device to use as a persistent volume, +associate this block volume with an `upload` data volume, and use `virtctl` +to upload the local disk image into the data volume. == Prerequisites @@ -17,7 +17,7 @@ to upload the local disk image into the DataVolume. the `kubevirt-virtctl` package. * If you require scratch space according to the xref:#virt-cdi-supported-operations-matrix_virt-uploading-local-disk-images-block[CDI supported operations matrix], you must first -xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a StorageClass or prepare CDI scratch space] +xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a storage class or prepare CDI scratch space] for this operation to complete successfully. :blockstorage: diff --git a/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-virtctl.adoc b/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-virtctl.adoc index 5e4a0afc5c6c..fef9a9b73eac 100644 --- a/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-virtctl.adoc +++ b/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-virtctl.adoc @@ -4,7 +4,7 @@ include::modules/virt-document-attributes.adoc[] :context: virt-uploading-local-disk-images-virtctl toc::[] -You can upload a locally stored disk image to a new or existing DataVolume by using the +You can upload a locally stored disk image to a new or existing data volume by using the `virtctl` command-line utility. == Prerequisites @@ -14,7 +14,7 @@ the `kubevirt-virtctl` package. * If you require scratch space according to the xref:#virt-cdi-supported-operations-matrix_virt-cloning-vm-disk-into-new-datavolume-block[CDI supported operations matrix], you must first -xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a StorageClass or prepare CDI scratch space] +xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a storage class or prepare CDI scratch space] for this operation to complete successfully. include::modules/virt-about-datavolumes.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-web.adoc b/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-web.adoc index 2fe7eba0a883..2412fe824ccc 100644 --- a/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-web.adoc +++ b/virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-web.adoc @@ -12,7 +12,7 @@ You can upload a locally stored disk image file by using the web console. * If you require scratch space according to the xref:../../../virt/virtual_machines/virtual_disks/virt-uploading-local-disk-images-web.adoc#virt-cdi-supported-operations-matrix_virt-uploading-local-disk-images-web[CDI supported operations matrix], you must first -xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a StorageClass or prepare CDI scratch space] +xref:../../../virt/virtual_machines/virtual_disks/virt-preparing-cdi-scratch-space.adoc#virt-defining-storageclass-in-cdi-configuration_virt-preparing-cdi-scratch-space[define a storage class or prepare CDI scratch space] for this operation to complete successfully. include::modules/virt-cdi-supported-operations-matrix.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virtual_disks/virt-using-container-disks-with-vms.adoc b/virt/virtual_machines/virtual_disks/virt-using-container-disks-with-vms.adoc index d4e116c9b2af..37c2799ee3d5 100644 --- a/virt/virtual_machines/virtual_disks/virt-using-container-disks-with-vms.adoc +++ b/virt/virtual_machines/virtual_disks/virt-using-container-disks-with-vms.adoc @@ -18,4 +18,4 @@ include::modules/virt-disabling-tls-for-registry.adoc[leveloffset=+1] * xref:../../../virt/virtual_machines/importing_vms/virt-importing-virtual-machine-images-datavolumes.adoc#virt-importing-virtual-machine-images-datavolumes[Import the container disk into persistent storage for a virtual machine]. * xref:../../../virt/virtual_machines/virt-create-vms.adoc#virt-create-vms[Create a virtual machine] that uses -a containerDisk volume for ephemeral storage. +a `containerDisk` volume for ephemeral storage. diff --git a/virt/virtual_machines/vm_networking/virt-attaching-vm-multiple-networks.adoc b/virt/virtual_machines/vm_networking/virt-attaching-vm-multiple-networks.adoc index fc0c4d288e74..05178135d910 100644 --- a/virt/virtual_machines/vm_networking/virt-attaching-vm-multiple-networks.adoc +++ b/virt/virtual_machines/vm_networking/virt-attaching-vm-multiple-networks.adoc @@ -9,12 +9,12 @@ virtual machines to multiple networks. You can import virtual machines with existing workloads that depend on access to multiple interfaces. You can also configure a PXE network so that you can boot machines over the network. -To get started, a network administrator configures a bridge NetworkAttachmentDefinition -for a namespace in the web console or CLI. Users can then create a NIC to attach Pods and virtual machines in that namespace to the bridge network. +To get started, a network administrator configures a bridge network attachment definition +for a namespace in the web console or CLI. Users can then create a NIC to attach pods and virtual machines in that namespace to the bridge network. include::modules/virt-networking-glossary.adoc[leveloffset=+1] -== Creating a NetworkAttachmentDefinition +== Creating a network attachment definition == Prerequisites @@ -23,7 +23,7 @@ See the xref:../../../virt/node_network/virt-updating-node-network-config.adoc#v [WARNING] ==== -Configuring ipam in a NetworkAttachmentDefinition for virtual machines is not supported. +Configuring ipam in a network attachment definition for virtual machines is not supported. ==== include::modules/virt-creating-bridge-nad-web.adoc[leveloffset=+2] @@ -33,7 +33,7 @@ include::modules/virt-creating-bridge-nad-cli.adoc[leveloffset=+2] [NOTE] ==== When defining the NIC in the next section, ensure that the *NETWORK* value is -the bridge network name from the NetworkAttachmentDefinition you created +the bridge network name from the network attachment definition you created in the previous section. ==== diff --git a/virt/virtual_machines/vm_networking/virt-using-the-default-pod-network-with-virt.adoc b/virt/virtual_machines/vm_networking/virt-using-the-default-pod-network-with-virt.adoc index 19ce939f7f7c..cd396ef8a410 100644 --- a/virt/virtual_machines/vm_networking/virt-using-the-default-pod-network-with-virt.adoc +++ b/virt/virtual_machines/vm_networking/virt-using-the-default-pod-network-with-virt.adoc @@ -1,12 +1,12 @@ [id="virt-using-the-default-pod-network-with-virt"] -= Using the default Pod network for virtual machines += Using the default pod network for virtual machines include::modules/virt-document-attributes.adoc[] :context: virt-using-the-default-pod-network-with-virt toc::[] -You can use the default Pod network with {VirtProductName}. To do so, +You can use the default pod network with {VirtProductName}. To do so, you must use the `masquerade` binding method. It is the only recommended -binding method for use with the default Pod network. Do not use +binding method for use with the default pod network. Do not use `masquerade` mode with non-default networks. [NOTE] diff --git a/virt/virtual_machines/vm_networking/virt-viewing-ip-of-vm-nic.adoc b/virt/virtual_machines/vm_networking/virt-viewing-ip-of-vm-nic.adoc index daaa46b5bb38..191c1b078218 100644 --- a/virt/virtual_machines/vm_networking/virt-viewing-ip-of-vm-nic.adoc +++ b/virt/virtual_machines/vm_networking/virt-viewing-ip-of-vm-nic.adoc @@ -4,7 +4,7 @@ include::modules/virt-document-attributes.adoc[] :context: virt-viewing-ip-of-vm-vnic toc::[] -You can view the IP address for a Network Interface Card (NIC) by using the web console or the `oc` client. The xref:../../../virt/virtual_machines/virt-installing-qemu-guest-agent.adoc#virt-installing-qemu-guest-agent[QEMU guest agent] displays additional information about the virtual machine's secondary networks. +You can view the IP address for a network interface card (NIC) by using the web console or the `oc` client. The xref:../../../virt/virtual_machines/virt-installing-qemu-guest-agent.adoc#virt-installing-qemu-guest-agent[QEMU guest agent] displays additional information about the virtual machine's secondary networks. include::modules/virt-viewing-vmi-ip-cli.adoc[leveloffset=+1] include::modules/virt-viewing-vmi-ip-web.adoc[leveloffset=+1]