diff --git a/_topic_map.yml b/_topic_map.yml index 15a7e62ec1f2..a152e2d96308 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -191,10 +191,16 @@ Topics: File: installing-openstack-installer-custom - Name: Installing a cluster on OpenStack with Kuryr File: installing-openstack-installer-kuryr + - Name: Installing a cluster on OpenStack on your own infrastructure + File: installing-openstack-user + - Name: Installing a cluster on OpenStack with Kuryr on your own infrastructure + File: installing-openstack-user-kuryr # - Name: Load balancing deployments on OpenStack # File: installing-openstack-load-balancing - Name: Uninstalling a cluster on OpenStack File: uninstalling-cluster-openstack + - Name: Uninstalling a cluster on OpenStack from your own infrastructure + File: uninstalling-openstack-user - Name: Installing on vSphere Dir: installing_vsphere Topics: diff --git a/installing/installing_openstack/installing-openstack-installer-custom.adoc b/installing/installing_openstack/installing-openstack-installer-custom.adoc index 537fa8f1a3f4..271b287883af 100644 --- a/installing/installing_openstack/installing-openstack-installer-custom.adoc +++ b/installing/installing_openstack/installing-openstack-installer-custom.adoc @@ -13,6 +13,7 @@ In {product-title} version {product-version}, you can install a customized clust * Review details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. +** Verify that {product-title} {product-version} is compatible with your {rh-openstack} version in the _Available platforms_ section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. * Have a storage service installed in {rh-openstack}, like Block Storage (Cinder) or Object Storage (Swift). Object storage is the recommended storage technology for {product-title} registry cluster deployment. For more information, see xref:../../scalability_and_performance/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. diff --git a/installing/installing_openstack/installing-openstack-installer-kuryr.adoc b/installing/installing_openstack/installing-openstack-installer-kuryr.adoc index 1f1332803c59..70e9ace073cb 100644 --- a/installing/installing_openstack/installing-openstack-installer-kuryr.adoc +++ b/installing/installing_openstack/installing-openstack-installer-kuryr.adoc @@ -13,6 +13,7 @@ In {product-title} version {product-version}, you can install a customized clust * Review details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. +** Verify that {product-title} {product-version} is compatible with your {rh-openstack} version in the _Available platforms_ section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. * Have a storage service installed in {rh-openstack}, like Block Storage (Cinder) or Object Storage (Swift). Object storage is the recommended storage technology for {product-title} registry cluster deployment. For more information, see xref:../../scalability_and_performance/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. diff --git a/installing/installing_openstack/installing-openstack-user-kuryr.adoc b/installing/installing_openstack/installing-openstack-user-kuryr.adoc new file mode 100644 index 000000000000..fd9fac788d36 --- /dev/null +++ b/installing/installing_openstack/installing-openstack-user-kuryr.adoc @@ -0,0 +1,66 @@ +[id="installing-openstack-user-kuryr"] += Installing a cluster on OpenStack with Kuryr on your own infrastructure +include::modules/common-attributes.adoc[] +:context: installing-openstack-user-kuryr + +toc::[] + +In {product-title} version {product-version}, you can install a cluster on +{rh-openstack-first} that runs on user-provisioned infrastructure. + +Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. + +.Prerequisites + +* Review details about the +xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. + +* Have an {rh-openstack} account where you want to install {product-title} + +* On the machine from which you run the installation program, have: +** A single directory in which you can keep the files you create during the installation process +** Python 3 + +include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] +include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] +include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] +include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] +include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] +include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] +include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] +include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] +include::modules/cluster-entitlements.adoc[leveloffset=+1] +include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] +include::modules/installation-obtaining-installer.adoc[leveloffset=+1] +include::modules/ssh-agent-using.adoc[leveloffset=+1] +// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] +include::modules/installation-osp-creating-image.adoc[leveloffset=+1] +include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] +include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] +include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] +include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] +include::modules/installation-user-infra-generate.adoc[leveloffset=+1] +include::modules/installation-initializing.adoc[leveloffset=+1] +include::modules/installation-configuration-parameters.adoc[leveloffset=+1] +include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] +include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] +include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] +include::modules/installation-osp-modifying-networktype.adoc[leveloffset=+2] +include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] +include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] +include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] +include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] +include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] +include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] +include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] +include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] +include::modules/installation-osp-creating-compute-machines.adoc[leveloffset=+1] +include::modules/installation-approve-csrs.adoc[leveloffset=+1] +include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] +include::modules/installation-osp-configuring-floating-ip.adoc[leveloffset=+1] + +.Next steps + +* xref:../../installing/install_config/customizations.adoc#customizations[Customize your cluster]. +* If necessary, you can +xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. \ No newline at end of file diff --git a/installing/installing_openstack/installing-openstack-user.adoc b/installing/installing_openstack/installing-openstack-user.adoc new file mode 100644 index 000000000000..e9d727e8b0d9 --- /dev/null +++ b/installing/installing_openstack/installing-openstack-user.adoc @@ -0,0 +1,60 @@ +[id="installing-openstack-user"] += Installing a cluster on OpenStack on your own infrastructure +include::modules/common-attributes.adoc[] +:context: installing-openstack-user + +toc::[] + +In {product-title} version {product-version}, you can install a cluster on +{rh-openstack-first} that runs on user-provisioned infrastructure. + +Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. + +.Prerequisites + +* Review details about the +xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. + +* Have an {rh-openstack} account where you want to install {product-title} + +* On the machine from which you run the installation program, have: +** A single directory in which you can keep the files you create during the installation process +** Python 3 + +include::modules/cluster-entitlements.adoc[leveloffset=+1] +include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] +include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] +include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] +include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] +include::modules/installation-obtaining-installer.adoc[leveloffset=+1] +include::modules/ssh-agent-using.adoc[leveloffset=+1] +// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] +include::modules/installation-osp-creating-image.adoc[leveloffset=+1] +include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] +include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] +include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] +include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] +include::modules/installation-user-infra-generate.adoc[leveloffset=+1] +include::modules/installation-initializing.adoc[leveloffset=+1] +include::modules/installation-configuration-parameters.adoc[leveloffset=+1] +include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] +include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] +include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] +include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] +include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] +include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] +include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] +include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] +include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] +include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] +include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] +include::modules/installation-osp-creating-compute-machines.adoc[leveloffset=+1] +include::modules/installation-approve-csrs.adoc[leveloffset=+1] +include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] +include::modules/installation-osp-configuring-floating-ip.adoc[leveloffset=+1] + +.Next steps + +* xref:../../installing/install_config/customizations.adoc#customizations[Customize your cluster]. +* If necessary, you can +xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. \ No newline at end of file diff --git a/installing/installing_openstack/uninstalling-cluster-openstack.adoc b/installing/installing_openstack/uninstalling-cluster-openstack.adoc index 0c803c4c46b2..545681a97a23 100644 --- a/installing/installing_openstack/uninstalling-cluster-openstack.adoc +++ b/installing/installing_openstack/uninstalling-cluster-openstack.adoc @@ -1,7 +1,7 @@ -[id="uninstalling-cluster-osp"] +[id="uninstalling-cluster-openstack"] = Uninstalling a cluster on OpenStack include::modules/common-attributes.adoc[] -:context: uninstall-cluster-openstack +:context: uninstalling-cluster-openstack toc::[] diff --git a/installing/installing_openstack/uninstalling-openstack-user.adoc b/installing/installing_openstack/uninstalling-openstack-user.adoc new file mode 100644 index 000000000000..3d61e64a6571 --- /dev/null +++ b/installing/installing_openstack/uninstalling-openstack-user.adoc @@ -0,0 +1,18 @@ +[id="uninstalling-openstack-user"] += Uninstalling a cluster on OpenStack from your own infrastructure +include::modules/common-attributes.adoc[] +:context: uninstalling-openstack-user + +toc::[] + +You can remove a cluster that you deployed to {rh-openstack-first} on user-provisioned infrastructure. + +.Prerequisites + +* Have on your machine +** A single directory in which you can create files to help you with the removal process +** Python 3 + +// include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] +include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] +include::modules/installation-uninstall-infra.adoc[leveloffset=+1] diff --git a/modules/installation-configuration-parameters.adoc b/modules/installation-configuration-parameters.adoc index a52f05dcf0da..59ec8fa6eae3 100644 --- a/modules/installation-configuration-parameters.adoc +++ b/modules/installation-configuration-parameters.adoc @@ -109,7 +109,7 @@ endif::osp[] |`platform..region` |The region to deploy your cluster in. |A valid region for your cloud, such as `us-east-1` for AWS, `centralus` -for Azure, or `region1` for {rh-openstack-first}. +for Azure. {rh-openstack-first} does not use this parameter. |`pullSecret` |The pull secret that you obtained from the @@ -274,10 +274,6 @@ ifdef::osp[] |For control plane machines, the root volume's type. |String, for example `performance`. -|`platform.openstack.region` -|The region where the {rh-openstack} cluster is created. -|String, for example `region1`. - |`platform.openstack.cloud` |The name of the {rh-openstack} cloud to use from the list of clouds in the `clouds.yaml` file. diff --git a/modules/installation-initializing.adoc b/modules/installation-initializing.adoc index 14973bfa2025..41d8258dc9e5 100644 --- a/modules/installation-initializing.adoc +++ b/modules/installation-initializing.adoc @@ -17,6 +17,7 @@ // * installing/installing_gcp/installing-restricted-networks-gcp.adoc // * installing/installing_openstack/installing-openstack-installer-custom.adoc // * installing/installing_openstack/installing-openstack-installer-kuryr.adoc +// * installing/installing_openstack/installing-openstack-installer-user.adoc // Consider also adding the installation-configuration-parameters.adoc module. //YOU MUST SET AN IFEVAL FOR EACH NEW MODULE @@ -62,6 +63,14 @@ endif::[] ifeval::["{context}" == "installing-openstack-installer-kuryr"] :osp: endif::[] +ifeval::["{context}" == "installing-openstack-user"] +:osp: +:osp-user: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:osp: +:osp-user: +endif::[] [id="installation-initializing_{context}"] = Creating the installation configuration file @@ -149,7 +158,7 @@ endif::gcp[] ifdef::osp[] ... Select *openstack* as the platform to target. ... Specify the {rh-openstack-first} external network name to use for installing the cluster. -... Specify the Floating IP address to use for external access to the OpenShift API. +... Specify the floating IP address to use for external access to the OpenShift API. ... Specify a {rh-openstack} flavor with at least 16 GB RAM to use for control plane and compute nodes. ... Select the base domain to deploy the cluster to. All DNS records will be @@ -208,6 +217,8 @@ The `install-config.yaml` file is consumed during the installation process. If you want to reuse the file, you must back it up now. ==== +ifdef::osp-user[You now have the file `install-config.yaml` in the directory that you specified.] + ifeval::["{context}" == "installing-aws-customizations"] :!aws: endif::[] @@ -247,3 +258,11 @@ endif::[] ifeval::["{context}" == "installing-openstack-installer-kuryr"] :!osp: endif::[] +ifeval::["{context}" == "installing-openstack-user"] +:!osp: +:!osp-user: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:!osp: +:!osp-user: +endif::[] \ No newline at end of file diff --git a/modules/installation-osp-accessing-api-floating.adoc b/modules/installation-osp-accessing-api-floating.adoc index ad974c672f14..94f4d6a283c4 100644 --- a/modules/installation-osp-accessing-api-floating.adoc +++ b/modules/installation-osp-accessing-api-floating.adoc @@ -3,28 +3,36 @@ // * installing/installing_openstack/installing-openstack-installer.adoc // * installing/installing_openstack/installing-openstack-installer-custom.adoc // * installing/installing_openstack/installing-openstack-installer-kuryr.adoc +// * installing/installing_openstack/installing-openstack-user.adoc [id="installation-osp-accessing-api-floating_{context}"] = Enabling access with floating IP addresses -Make {product-title} API endpoints accessible by attaching two floating IP (FIP) addresses to them: one for the API load balancer (`lb FIP`), and one for {product-title} applications (`apps FIP`). +Create two floating IP (FIP) addresses: one for external access to the {product-title} API, the `API FIP`, and one for {product-title} applications, the `apps FIP`. [IMPORTANT] -The load balancer FIP is also used in the `install-config.yaml` file. +The API FIP is also used in the `install-config.yaml` file. .Procedure -. Using the {rh-openstack-first} CLI, create a new external network: +. Using the {rh-openstack-first} CLI, create the API FIP: + ---- -$ openstack floating ip create +$ openstack floating ip create --description "API ." ---- -. Add a record that follows this pattern to your DNS server: +. Using the {rh-openstack-first} CLI, create the apps, or Ingress, FIP: ++ +---- +$ openstack floating ip create --description "Ingress ." +---- + +. To reflect the new FIPs, add records that follow these patterns to your DNS server: + [source,dns] ---- -api.. IN A +api... IN A +*.apps... IN A ---- + [NOTE] diff --git a/modules/installation-osp-accessing-api-no-floating.adoc b/modules/installation-osp-accessing-api-no-floating.adoc index b269b202cebc..231294746d86 100644 --- a/modules/installation-osp-accessing-api-no-floating.adoc +++ b/modules/installation-osp-accessing-api-no-floating.adoc @@ -9,4 +9,4 @@ If you cannot use floating IP addresses, the {product-title} installation might still finish. However, the installation program fails after it times out waiting for API access. -After the installation program times out, the cluster might still initialize. After the bootstrapping processing begins, it must complete. You must edit the cluster's networking configuration after it is deployed, however. +After the installation program times out, the cluster might still initialize. After the bootstrapping processing begins, it must complete. You must edit the cluster's networking configuration after it is deployed. \ No newline at end of file diff --git a/modules/installation-osp-accessing-api.adoc b/modules/installation-osp-accessing-api.adoc index aa05d9286f5b..48972c459ddb 100644 --- a/modules/installation-osp-accessing-api.adoc +++ b/modules/installation-osp-accessing-api.adoc @@ -3,10 +3,28 @@ // * installing/installing_openstack/installing-openstack-installer.adoc // * installing/installing_openstack/installing-openstack-installer-custom.adoc // * installing/installing_openstack/installing-openstack-installer-kuryr.adoc +// * installing/installing_openstack/installing-openstack-user.adoc +// * installing/installing_openstack/installing-openstack-user-kuryr.adoc + +ifeval::["{context}" == "installing-openstack-user"] +:osp-user: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:osp-user: +endif::[] [id="installation-osp-accessing-api_{context}"] = Enabling access to the environment At deployment, all {product-title} machines are created in a {rh-openstack-first}-tenant network. Therefore, they are not accessible directly in most {rh-openstack} deployments. -You can configure the {product-title} API to be accessible either with or without floating IP addresses. \ No newline at end of file +You can configure the {product-title} API and applications that run on the cluster to be accessible +ifdef::osp-user[by using floating IP addresses.] +ifndef::osp-user[with or without floating IP addresses.] + +ifeval::["{context}" == "installing-openstack-user"] +:!osp-user: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:!osp-user: +endif::[] \ No newline at end of file diff --git a/modules/installation-osp-config-yaml.adoc b/modules/installation-osp-config-yaml.adoc index c2f061003bf8..1b32de5f54fa 100644 --- a/modules/installation-osp-config-yaml.adoc +++ b/modules/installation-osp-config-yaml.adoc @@ -41,7 +41,6 @@ networking: networkType: OpenShiftSDN platform: openstack: - region: region1 cloud: mycloud externalNetwork: external computeFlavor: m1.xlarge diff --git a/modules/installation-osp-control-compute-machines.adoc b/modules/installation-osp-control-compute-machines.adoc index 3b2cc4640880..9d84fd1595f9 100644 --- a/modules/installation-osp-control-compute-machines.adoc +++ b/modules/installation-osp-control-compute-machines.adoc @@ -6,8 +6,8 @@ [id="installation-osp-control-compute-machines_{context}"] = Control plane and compute machines -By default, the {product-title} installation program stands up three control -plane and compute machines. +By default, the {product-title} installation process stands up three control +plane and three compute machines. Each machine requires: diff --git a/modules/installation-osp-converting-ignition-resources.adoc b/modules/installation-osp-converting-ignition-resources.adoc new file mode 100644 index 000000000000..046b6b9d959d --- /dev/null +++ b/modules/installation-osp-converting-ignition-resources.adoc @@ -0,0 +1,173 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-converting-ignition-resources_{context}"] += Preparing the bootstrap Ignition files + +The {product-title} installation process relies on bootstrap machines that are created from a bootstrap Ignition configuration file. + +Edit the file and upload it. Then, create a secondary bootstrap Ignition configuration file that +{rh-openstack} uses to download the primary file. + +.Prerequisites + +* You have the bootstrap Ignition file that the installer program generates, `bootstrap.ign`. +* The infrastructure ID from the installer's metadata file is set as an environment variable (`$INFRA_ID`). +** If the variable is not set, see *Creating the Kubernetes manifest and Ignition config files*. +* You have an HTTP(S)-accessible way to store the bootstrap ignition file. +** The documented procedure uses the OpenStack Image service (Glance), but you can also use the OpenStack Storage service (Swift), Amazon S3, an internal HTTP server, or an ad hoc Nova server. + +.Procedure + +. Run the following Python script. The script modifies the bootstrap Ignition file to set the host name and, if available, CA certificate file when it runs: ++ +[source,python] +---- +import base64 +import json +import os + +with open('bootstrap.ign', 'r') as f: + ignition = json.load(f) + +files = ignition['storage'].get('files', []) + +infra_id = os.environ.get('INFRA_ID', 'openshift').encode() +hostname_b64 = base64.standard_b64encode(infra_id + b'-bootstrap\n').decode().strip() +files.append( +{ + 'path': '/etc/hostname', + 'mode': 420, + 'contents': { + 'source': 'data:text/plain;charset=utf-8;base64,' + hostname_b64, + 'verification': {} + }, + 'filesystem': 'root', +}) + +ca_cert_path = os.environ.get('OS_CACERT', '') +if ca_cert_path: + with open(ca_cert_path, 'r') as f: + ca_cert = f.read().encode() + ca_cert_b64 = base64.standard_b64encode(ca_cert).decode().strip() + + files.append( + { + 'path': '/opt/openshift/tls/cloud-ca-cert.pem', + 'mode': 420, + 'contents': { + 'source': 'data:text/plain;charset=utf-8;base64,' + ca_cert_b64, + 'verification': {} + }, + 'filesystem': 'root', + }) + +ignition['storage']['files'] = files; + +with open('bootstrap.ign', 'w') as f: + json.dump(ignition, f) +---- + +. Using the OpenStack CLI, create an image that uses the bootstrap Ignition file: ++ +---- +$ openstack image create --disk-format=raw --container-format=bare --file bootstrap.ign +---- + +. Get the image's details: ++ +---- +$ openstack image show +---- ++ +Make a note of the `file` value; it follows the pattern `v2/images//file`. ++ +[NOTE] +Verify that the image you created is active. + +. Retrieve the Image service's public address: ++ +---- +$ openstack catalog show image +---- + +. Combine the public address with the image `file` value and save the result as the storage location. The location follows the pattern `/v2/images//file`. + +. Generate an auth token and save the token ID: ++ +---- +$ openstack token issue -c id -f value +---- + +. Insert the following content into a file called `$INFRA_ID-bootstrap-ignition.json` and edit the placeholders to match your own values: ++ +[source,json] +---- +{ + "ignition": { + "config": { + "append": [{ + "source": "", <1> + "verification": {}, + "httpHeaders": [{ + "name": "X-Auth-Token", <2> + "value": "" <3> + }] + }] + }, + "security": { + "tls": { + "certificateAuthorities": [{ + "source": "data:text/plain;charset=utf-8;base64,", <4> + "verification": {} + }] + } + }, + "timeouts": {}, + "version": "2.4.0" + }, + "networkd": {}, + "passwd": {}, + "storage": {}, + "systemd": {} +} +---- +<1> Replace the value of `ignition.config.append.source` with the bootstrap Ignition file storage URL. +<2> Set `name` in `httpHeaders` to `"X-Auth-Token"`. +<3> Set `value` in `httpHeaders` to your token's ID. +<4> If the bootstrap Ignition file server uses a self-signed certificate, include the Base64-encoded certificate. + +. Save the secondary Ignition config file. + +The bootstrap Ignition data will be passed to {rh-openstack} during installation. + +[WARNING] +The bootstrap Ignition file contains sensitive information, like `clouds.yaml` credentials. Ensure that you store it in a secure place, and delete it after you complete the installation process. + +// . If you are using Swift: +// .. Using the Swift CLI, create a container: +// + +// ---- +// $ swift post +// ---- +// +// .. Upload the bootstrap Ignition file to the container: +// + +// ---- +// $ swift upload bootstrap.ign +// ---- +// +// .. Set the container to be read-accessible: +// + +// ---- +// $ swift post --read-acl ".r:*,.rlistings" +// ---- +// +// .. Retrieve the storage URL: +// + +// ---- +// $ swift stat -v +// ---- +// ** The URL should follow this format: `//bootstrap.ign` +// May need to bring this back. \ No newline at end of file diff --git a/modules/installation-osp-creating-bootstrap-machine.adoc b/modules/installation-osp-creating-bootstrap-machine.adoc new file mode 100644 index 000000000000..f3b212863390 --- /dev/null +++ b/modules/installation-osp-creating-bootstrap-machine.adoc @@ -0,0 +1,77 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-creating-bootstrap-machine_{context}"] += Creating the bootstrap machine + +Create a bootstrap machine and give it the network access it needs to run on {rh-openstack-first}. Red Hat provides an Ansible playbook that you run to simplify this process. + +.Prerequisites +* The `inventory.yaml` and `common.yaml` Ansible playbooks in a common directory +** If you need these files, copy them from *Creating network resources* +* The `metadata.yaml` file that the installation program created is in the same directory as the Ansible playbooks + +.Procedure + +. On a command line, change the working directory to the location of the `inventory.yaml`and `common.yaml` files. + +. Insert the following content into a local file that is called `03_bootstrap.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk +# netaddr + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Create the bootstrap server port' + os_port: + name: "{{ os_port_bootstrap }}" + network: "{{ os_network }}" + security_groups: + - "{{ os_sg_master }}" + allowed_address_pairs: + - ip_address: "{{ os_subnet_range | next_nth_usable(5) }}" + - ip_address: "{{ os_subnet_range | next_nth_usable(6) }}" + + - name: 'Set bootstrap port tag' + command: + cmd: "openstack port set --tag {{ cluster_id_tag }} {{ os_port_bootstrap }}" + + - name: 'Create the bootstrap server' + os_server: + name: "{{ os_bootstrap_server_name }}" + image: "{{ os_image_rhcos }}" + flavor: "{{ os_flavor_master }}" + userdata: "{{ lookup('file', os_bootstrap_ignition) | string }}" + auto_ip: no + nics: + - port-name: "{{ os_port_bootstrap }}" + + - name: 'Create the bootstrap floating IP' + os_floating_ip: + state: present + network: "{{ os_external_network }}" + server: "{{ os_bootstrap_server_name }}" +---- + +. On a command line, run the playbook: ++ +---- +$ ansible-playbook -i inventory.yaml 03_bootstrap.yaml +---- + +. After the bootstrap server is active, view the logs to verify that the Ignition files were received: ++ +---- +$ openstack console log show "$INFRA_ID-bootstrap" +---- \ No newline at end of file diff --git a/modules/installation-osp-creating-compute-machines.adoc b/modules/installation-osp-creating-compute-machines.adoc new file mode 100644 index 000000000000..feccd06c2392 --- /dev/null +++ b/modules/installation-osp-creating-compute-machines.adoc @@ -0,0 +1,87 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-creating-compute-machines_{context}"] += Creating compute machines + +After standing up the control plane, create compute machines. + +.Prerequisites +* The `inventory.yaml` and `common.yaml` Ansible playbooks in a common directory +** If you need these files, copy them from *Creating network resources* +* The `metadata.yaml` file that the installation program created is in the same directory as the Ansible playbooks +* The control plane is active + +.Procedure + +. On a command line, change the working directory to the location of the `inventory.yaml`and `common.yaml` files. + +. Insert the following content into a local file that is called `05_compute-nodes.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk +# netaddr + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Create the Compute ports' + os_port: + name: "{{ item.1 }}-{{ item.0 }}" + network: "{{ os_network }}" + security_groups: + - "{{ os_sg_worker }}" + allowed_address_pairs: + - ip_address: "{{ os_subnet_range | next_nth_usable(7) }}" + with_indexed_items: "{{ [os_port_worker] * os_compute_nodes_number }}" + register: ports + + - name: 'Set Compute ports tag' + command: + cmd: "openstack port set --tag {{ [cluster_id_tag] }} {{ item.1 }}-{{ item.0 }}" + with_indexed_items: "{{ [os_port_worker] * os_compute_nodes_number }}" + + - name: 'List the Compute Trunks' + command: + cmd: "openstack network trunk list" + when: os_networking_type == "Kuryr" + register: compute_trunks + + - name: 'Create the Compute trunks' + command: + cmd: "openstack network trunk create --parent-port {{ item.1.id }} {{ os_compute_trunk_name }}-{{ item.0 }}" + with_indexed_items: "{{ ports.results }}" + when: + - os_networking_type == "Kuryr" + - "os_compute_trunk_name|string not in compute_trunks.stdout" + + - name: 'Create the Compute servers' + os_server: + name: "{{ item.1 }}-{{ item.0 }}" + image: "{{ os_image_rhcos }}" + flavor: "{{ os_flavor_worker }}" + auto_ip: no + userdata: "{{ lookup('file', 'worker.ign') | string }}" + nics: + - port-name: "{{ os_port_worker }}-{{ item.0 }}" + with_indexed_items: "{{ [os_compute_server_name] * os_compute_nodes_number }}" +---- + +. On a command line, run the playbook: ++ +---- +$ ansible-playbook -i inventory.yaml 05_compute-nodes.yaml +---- + +.Next steps + +* Approve the machines' certificate signing requests \ No newline at end of file diff --git a/modules/installation-osp-creating-control-plane-ignition.adoc b/modules/installation-osp-creating-control-plane-ignition.adoc new file mode 100644 index 000000000000..db0fc774e60f --- /dev/null +++ b/modules/installation-osp-creating-control-plane-ignition.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-creating-control-plane-ignition_{context}"] += Creating control plane Ignition config files + +Installing {product-title} on {rh-openstack-first} on your own infrastructure requires control plane Ignition config files. You must create multiple config files. + +[NOTE] +As with the bootstrap Ignition configuration, you must explicitly define a host name for each control plane machine. + +.Prerequisites + +* The infrastructure ID from the installation program's metadata file is set as an environment variable (`$INFRA_ID`) +** If the variable is not set, see *Creating the Kubernetes manifest and Ignition config files*. + +.Procedure + +* On a command line, run the following Python script: ++ +---- +$ for index in $(seq 0 2); do + MASTER_HOSTNAME="$INFRA_ID-master-$index\n" + python -c "import base64, json, sys; +ignition = json.load(sys.stdin); +files = ignition['storage'].get('files', []); +files.append({'path': '/etc/hostname', 'mode': 420, 'contents': {'source': 'data:text/plain;charset=utf-8;base64,' + base64.standard_b64encode(b'$MASTER_HOSTNAME').decode().strip(), 'verification': {}}, 'filesystem': 'root'}); +ignition['storage']['files'] = files; +json.dump(ignition, sys.stdout)" "$INFRA_ID-master-$index-ignition.json" +done +---- ++ +You now have three control plane Ignition files: `-master-0-ignition.json`, `-master-1-ignition.json`, +and `-master-2-ignition.json`. \ No newline at end of file diff --git a/modules/installation-osp-creating-control-plane.adoc b/modules/installation-osp-creating-control-plane.adoc new file mode 100644 index 000000000000..80841b2fa71d --- /dev/null +++ b/modules/installation-osp-creating-control-plane.adoc @@ -0,0 +1,107 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-creating-control-plane_{context}"] += Creating the control plane machines + +Create three control plane machines by using the Ignition config files that you generated. + +.Prerequisites +* The infrastructure ID from the installation program's metadata file is set as an environment variable (`$INFRA_ID`) +* The `inventory.yaml` and `common.yaml` Ansible playbooks in a common directory +** If you need these files, copy them from *Creating network resources* +* The three Ignition files created in *Creating control plane Ignition config files* + +.Procedure + +. On a command line, change the working directory to the location of the `inventory.yaml`and `common.yaml` files. + +. If the control plane Ignition config files aren't already in your working directory, copy them into it. + +. Insert the following content into a local file that is called `04_control-plane.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk +# netaddr + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Create the Control Plane ports' + os_port: + name: "{{ item.1 }}-{{ item.0 }}" + network: "{{ os_network }}" + security_groups: + - "{{ os_sg_master }}" + allowed_address_pairs: + - ip_address: "{{ os_subnet_range | next_nth_usable(5) }}" + - ip_address: "{{ os_subnet_range | next_nth_usable(6) }}" + - ip_address: "{{ os_subnet_range | next_nth_usable(7) }}" + with_indexed_items: "{{ [os_port_master] * os_cp_nodes_number }}" + register: ports + + - name: 'Set Control Plane ports tag' + command: + cmd: "openstack port set --tag {{ cluster_id_tag }} {{ item.1 }}-{{ item.0 }}" + with_indexed_items: "{{ [os_port_master] * os_cp_nodes_number }}" + + - name: 'List the Control Plane Trunks' + command: + cmd: "openstack network trunk list" + when: os_networking_type == "Kuryr" + register: control_plane_trunks + + - name: 'Create the Control Plane trunks' + command: + cmd: "openstack network trunk create --parent-port {{ item.1.id }} {{ os_cp_trunk_name }}-{{ item.0 }}" + with_indexed_items: "{{ ports.results }}" + when: + - os_networking_type == "Kuryr" + - "os_cp_trunk_name|string not in control_plane_trunks.stdout" + + - name: 'Create the Control Plane servers' + os_server: + name: "{{ item.1 }}-{{ item.0 }}" + image: "{{ os_image_rhcos }}" + flavor: "{{ os_flavor_master }}" + auto_ip: no + # The ignition filename will be concatenated with the Control Plane node + # name and its 0-indexed serial number. + # In this case, the first node will look for this filename: + # "{{ infraID }}-master-0-ignition.json" + userdata: "{{ lookup('file', [item.1, item.0, 'ignition.json'] | join('-')) | string }}" + nics: + - port-name: "{{ os_port_master }}-{{ item.0 }}" + with_indexed_items: "{{ [os_cp_server_name] * os_cp_nodes_number }}" +---- + +. On a command line, run the playbook: ++ +---- +$ ansible-playbook -i inventory.yaml 04_control-plane.yaml +---- + +. Run the following command to monitor the bootstrapping process: ++ +---- +$ openshift-install wait-for bootstrap-complete +---- ++ +You will see messages that confirm that the control plane machines are running and have joined the cluster: ++ +---- +INFO API v1.14.6+f9b5405 up +INFO Waiting up to 30m0s for bootstrapping to complete... +... +INFO It is now safe to remove the bootstrap resources +---- + diff --git a/modules/installation-osp-creating-image.adoc b/modules/installation-osp-creating-image.adoc new file mode 100644 index 000000000000..359de436bd22 --- /dev/null +++ b/modules/installation-osp-creating-image.adoc @@ -0,0 +1,54 @@ +//Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-creating-image_{context}"] += Creating the {op-system-first} image + +The {product-title} installation program requires that a {op-system-first} image be present in the {rh-openstack-first} cluster. Retrieve the latest {op-system} image, then upload it using the {rh-openstack} CLI. + +.Prerequisites + +* The {rh-openstack} CLI is installed. + +.Procedure + +. Log in to the Red Hat customer portal's https://access.redhat.com/downloads/content/290[Product Downloads page]. + +. Under *Version*, select the most recent release of {product-title} {product-version} for RHEL 8. ++ +[IMPORTANT] +==== +The {op-system} images might not change with every release of {product-title}. +You must download images with the highest version that is less than or equal to +the {product-title} version that you install. Use the image versions that match +your {product-title} version if they are available. +==== + +. Download the _Red Hat Enterprise Linux CoreOS - OpenStack Image (QCOW)_. + +. Decompress the image. ++ +[NOTE] +==== +You must decompress the OpenStack image before the cluster can use it. The name of the downloaded file might not contain a compression extension, like `.gz` or `.tgz`. To find out if or how the file is compressed, in a command line, enter: + +---- +$ file +---- + +==== + +. From the image that you downloaded, create an image that is named `rhcos` in your cluster by using the {rh-openstack} CLI: ++ +---- +$ openstack image create --container-format=bare --disk-format=qcow2 --file rhcos-${RHCOS_VERSION}-openstack.qcow2 rhcos +---- ++ +[IMPORTANT] +Depending on your {rh-openstack} environment, you might be able to upload the image in either link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/15/html/instances_and_images_guide/index[`.raw` or `.qcow2` formats]. If you use Ceph, you must use the `.raw` format. ++ +[CAUTION] +If the installation program finds multiple images with the same name, it chooses one of them at random. To avoid this behavior, create unique names for resources in {rh-openstack}. + +After you upload the image to {rh-openstack}, it is usable in the installation process. \ No newline at end of file diff --git a/modules/installation-osp-creating-network-resources.adoc b/modules/installation-osp-creating-network-resources.adoc new file mode 100644 index 000000000000..8c754d225a02 --- /dev/null +++ b/modules/installation-osp-creating-network-resources.adoc @@ -0,0 +1,686 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-creating-network-resources_{context}"] += Creating network resources + +Create the network resources that a {product-title} on {rh-openstack-first} installation on your own infrastructure requires. To save time, run supplied Ansible playbooks that generate security groups, networks, subnets, routers, and ports. + +.Procedure + +. Insert the following content into a local file that is called `common.yaml`: ++ +[source,yaml] +---- +- hosts: localhost + gather_facts: no + + vars_files: + - metadata.json + + tasks: + - name: 'Compute resource names' + set_fact: + cluster_id_tag: "openshiftClusterID={{ infraID }}" + os_network: "{{ infraID }}-network" + os_subnet: "{{ infraID }}-nodes" + os_router: "{{ infraID }}-external-router" + # Port names + os_port_api: "{{ infraID }}-api-port" + os_port_ingress: "{{ infraID }}-ingress-port" + os_port_bootstrap: "{{ infraID }}-bootstrap-port" + os_port_master: "{{ infraID }}-master-port" + os_port_worker: "{{ infraID }}-worker-port" + # Security groups names + os_sg_master: "{{ infraID }}-master" + os_sg_worker: "{{ infraID }}-worker" + # Server names + os_bootstrap_server_name: "{{ infraID }}-bootstrap" + os_cp_server_name: "{{ infraID }}-master" + os_compute_server_name: "{{ infraID }}-worker" + # Trunk names + os_cp_trunk_name: "{{ infraID }}-master-trunk" + os_compute_trunk_name: "{{ infraID }}-worker-trunk" + # Subnet pool name + subnet_pool: "{{ infraID }}-kuryr-pod-subnetpool" + # Service network name + os_svc_network: "{{ infraID }}-kuryr-service-network" + # Service subnet name + os_svc_subnet: "{{ infraID }}-kuryr-service-subnet" + # Ignition files + os_bootstrap_ignition: "{{ infraID }}-bootstrap-ignition.json" +---- +. Insert the following content into a local file that is called `inventory.yaml`: ++ +[source,yaml] +---- +all: + hosts: + localhost: + ansible_connection: local + ansible_python_interpreter: "{{ansible_playbook_python}}" + + # User-provided values + os_subnet_range: '10.0.0.0/16' + os_flavor_master: 'm1.xlarge' + os_flavor_worker: 'm1.large' + os_image_rhcos: 'rhcos' + os_external_network: 'external' + # OpenShift API floating IP address + os_api_fip: '203.0.113.23' + # OpenShift Ingress floating IP address + os_ingress_fip: '203.0.113.19' + # Service subnet cidr + svc_subnet_range: '172.30.0.0/16' + os_svc_network_range: '172.30.0.0/15' + # Subnet pool prefixes + cluster_network_cidrs: '10.128.0.0/14' + # Subnet pool prefix length + host_prefix: '23' + # Name of the SDN. + # Possible values are OpenshiftSDN or Kuryr. + os_networking_type: 'OpenshiftSDN' + + # Number of provisioned Control Plane nodes + # 3 is the minimum number for a fully-functional cluster. + os_cp_nodes_number: 3 + + # Number of provisioned Compute nodes. + # 3 is the minimum number for a fully-functional cluster. + os_compute_nodes_number: 3 +---- +. Insert the following content into a local file that is called `01_security-groups.yaml` ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Create the master security group' + os_security_group: + name: "{{ os_sg_master }}" + + - name: 'Set master security group tag' + command: + cmd: "openstack security group set --tag {{ cluster_id_tag }} {{ os_sg_master }} " + + - name: 'Create the worker security group' + os_security_group: + name: "{{ os_sg_worker }}" + + - name: 'Set worker security group tag' + command: + cmd: "openstack security group set --tag {{ cluster_id_tag }} {{ os_sg_worker }} " + + - name: 'Create master-sg rule "ICMP"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: icmp + + - name: 'Create master-sg rule "machine config server"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_ip_prefix: "{{ os_subnet_range }}" + port_range_min: 22623 + port_range_max: 22623 + + - name: 'Create master-sg rule "SSH"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + port_range_min: 22 + port_range_max: 22 + + - name: 'Create master-sg rule "DNS (TCP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" + protocol: tcp + port_range_min: 53 + port_range_max: 53 + + - name: 'Create master-sg rule "DNS (UDP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" + protocol: udp + port_range_min: 53 + port_range_max: 53 + + - name: 'Create master-sg rule "mDNS"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" + protocol: udp + port_range_min: 5353 + port_range_max: 5353 + + - name: 'Create master-sg rule "OpenShift API"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + port_range_min: 6443 + port_range_max: 6443 + + - name: 'Create master-sg rule "VXLAN"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 4789 + port_range_max: 4789 + + - name: 'Create master-sg rule "VXLAN from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 4789 + port_range_max: 4789 + + - name: 'Create master-sg rule "Geneve"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 6081 + port_range_max: 6081 + + - name: 'Create master-sg rule "Geneve from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 6081 + port_range_max: 6081 + + - name: 'Create master-sg rule "ovndb"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 6641 + port_range_max: 6642 + + - name: 'Create master-sg rule "ovndb from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 6641 + port_range_max: 6642 + + - name: 'Create master-sg rule "master ingress internal (TCP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create master-sg rule "master ingress internal from worker (TCP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create master-sg rule "master ingress internal (UDP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create master-sg rule "master ingress internal from worker (UDP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create master-sg rule "kube scheduler"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 10259 + port_range_max: 10259 + + - name: 'Create master-sg rule "kube scheduler from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 10259 + port_range_max: 10259 + + - name: 'Create master-sg rule "kube controller manager"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 10257 + port_range_max: 10257 + + - name: 'Create master-sg rule "kube controller manager from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 10257 + port_range_max: 10257 + + - name: 'Create master-sg rule "master ingress kubelet secure"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 10250 + port_range_max: 10250 + + - name: 'Create master-sg rule "master ingress kubelet secure from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 10250 + port_range_max: 10250 + + - name: 'Create master-sg rule "etcd"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 2379 + port_range_max: 2380 + + - name: 'Create master-sg rule "master ingress services (TCP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create master-sg rule "master ingress services (TCP) from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create master-sg rule "master ingress services (UDP)"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create master-sg rule "master ingress services (UDP) from worker"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create master-sg rule "VRRP"' + os_security_group_rule: + security_group: "{{ os_sg_master }}" + protocol: '112' + remote_ip_prefix: "{{ os_subnet_range }}" + + + - name: 'Create worker-sg rule "ICMP"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: icmp + + - name: 'Create worker-sg rule "SSH"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + port_range_min: 22 + port_range_max: 22 + + - name: 'Create worker-sg rule "mDNS"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_ip_prefix: "{{ os_subnet_range }}" + port_range_min: 5353 + port_range_max: 5353 + + - name: 'Create worker-sg rule "Ingress HTTP"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + port_range_min: 80 + port_range_max: 80 + + - name: 'Create worker-sg rule "Ingress HTTPS"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + port_range_min: 443 + port_range_max: 443 + + - name: 'Create worker-sg rule "router"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + remote_ip_prefix: "{{ os_subnet_range }}" + port_range_min: 1936 + port_range_max: 1936 + + - name: 'Create worker-sg rule "VXLAN"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 4789 + port_range_max: 4789 + + - name: 'Create worker-sg rule "VXLAN from master"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 4789 + port_range_max: 4789 + + - name: 'Create worker-sg rule "Geneve"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 6081 + port_range_max: 6081 + + - name: 'Create worker-sg rule "Geneve from master"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 6081 + port_range_max: 6081 + + - name: 'Create worker-sg rule "worker ingress internal (TCP)"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create worker-sg rule "worker ingress internal from master (TCP)"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create worker-sg rule "worker ingress internal (UDP)"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create worker-sg rule "worker ingress internal from master (UDP)"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 9000 + port_range_max: 9999 + + - name: 'Create worker-sg rule "worker ingress kubelet secure"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 10250 + port_range_max: 10250 + + - name: 'Create worker-sg rule "worker ingress kubelet secure from master"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 10250 + port_range_max: 10250 + + - name: 'Create worker-sg rule "worker ingress services (TCP)"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + remote_group: "{{ os_sg_worker }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create worker-sg rule "worker ingress services (TCP) from master"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: tcp + remote_group: "{{ os_sg_master }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create worker-sg rule "worker ingress services (UDP)"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_worker }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create worker-sg rule "worker ingress services (UDP) from master"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: udp + remote_group: "{{ os_sg_master }}" + port_range_min: 30000 + port_range_max: 32767 + + - name: 'Create worker-sg rule "VRRP"' + os_security_group_rule: + security_group: "{{ os_sg_worker }}" + protocol: '112' + remote_ip_prefix: "{{ os_subnet_range }}" +---- +. Insert the following content into a local file that is called `02_network.yaml` ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk +# netaddr + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Create the cluster network' + os_network: + name: "{{ os_network }}" + + - name: 'Set the cluster network tag' + command: + cmd: "openstack network set --tag {{ cluster_id_tag }} {{ os_network }}" + + - name: 'Create a subnet' + os_subnet: + name: "{{ os_subnet }}" + network_name: "{{ os_network }}" + cidr: "{{ os_subnet_range }}" + allocation_pool_start: "{{ os_subnet_range | next_nth_usable(10) }}" + allocation_pool_end: "{{ os_subnet_range | ipaddr('last_usable') }}" + + - name: 'Set the cluster subnet tag' + command: + cmd: "openstack subnet set --tag {{ cluster_id_tag }} {{ os_subnet }}" + + - name: 'Create the service network' + os_network: + name: "{{ os_svc_network }}" + when: os_networking_type == "Kuryr" + + - name: 'Set the service network tag' + command: + cmd: "openstack network set --tag {{ cluster_id_tag }} {{ os_svc_network }}" + when: os_networking_type == "Kuryr" + + - name: 'Computing facts for service subnet' + set_fact: + first_ip_svc_subnet_range: "{{ svc_subnet_range | ipv4('network') }}" + last_ip_svc_subnet_range: "{{ svc_subnet_range | ipaddr('last_usable') |ipmath(1) }}" + first_ip_os_svc_network_range: "{{ os_svc_network_range | ipv4('network') }}" + last_ip_os_svc_network_range: "{{ os_svc_network_range | ipaddr('last_usable') |ipmath(1) }}" + allocation_pool: "" + when: os_networking_type == "Kuryr" + + - name: 'Get first part of OpenStack network' + set_fact: + allocation_pool: "{{ allocation_pool + '--allocation-pool start={{ first_ip_os_svc_network_range | ipmath(1) }},end={{ first_ip_svc_subnet_range |ipmath(-1) }}' }}" + when: + - os_networking_type == "Kuryr" + - first_ip_svc_subnet_range != first_ip_os_svc_network_range + + - name: 'Get last part of OpenStack network' + set_fact: + allocation_pool: "{{ allocation_pool + ' --allocation-pool start={{ last_ip_svc_subnet_range | ipmath(1) }},end={{ last_ip_os_svc_network_range |ipmath(-1) }}' }}" + when: + - os_networking_type == "Kuryr" + - last_ip_svc_subnet_range != last_ip_os_svc_network_range + + - name: 'Get end of allocation' + set_fact: + gateway_ip: "{{ allocation_pool.split('=')[-1] }}" + when: os_networking_type == "Kuryr" + + - name: 'replace last IP' + set_fact: + allocation_pool: "{{ allocation_pool | replace(gateway_ip, gateway_ip | ipmath(-1))}}" + when: os_networking_type == "Kuryr" + + - name: 'list service subnet' + command: + cmd: "openstack subnet list --name {{ os_svc_subnet }} --tag {{ cluster_id_tag }}" + when: os_networking_type == "Kuryr" + register: svc_subnet + + - name: 'Create the service subnet' + command: + cmd: "openstack subnet create --ip-version 4 --gateway {{ gateway_ip }} --subnet-range {{ os_svc_network_range }} {{ allocation_pool }} --no-dhcp --network {{ os_svc_network }} --tag {{ cluster_id_tag }} {{ os_svc_subnet }}" + when: + - os_networking_type == "Kuryr" + - svc_subnet.stdout == "" + + - name: 'list subnet pool' + command: + cmd: "openstack subnet pool list --name {{ subnet_pool }} --tags {{ cluster_id_tag }}" + when: os_networking_type == "Kuryr" + register: pods_subnet_pool + + - name: 'Create pods subnet pool' + command: + cmd: "openstack subnet pool create --default-prefix-length {{ host_prefix }} --pool-prefix {{ cluster_network_cidrs }} --tag {{ cluster_id_tag }} {{ subnet_pool }}" + when: + - os_networking_type == "Kuryr" + - pods_subnet_pool.stdout == "" + + - name: 'Create external router' + os_router: + name: "{{ os_router }}" + network: "{{ os_external_network }}" + interfaces: + - "{{ os_subnet }}" + + - name: 'Set external router tag' + command: + cmd: "openstack router set --tag {{ cluster_id_tag }} {{ os_router }}" + when: os_networking_type == "Kuryr" + + - name: 'Create the API port' + os_port: + name: "{{ os_port_api }}" + network: "{{ os_network }}" + security_groups: + - "{{ os_sg_master }}" + fixed_ips: + - subnet: "{{ os_subnet }}" + ip_address: "{{ os_subnet_range | next_nth_usable(5) }}" + + - name: 'Set API port tag' + command: + cmd: "openstack port set --tag {{ cluster_id_tag }} {{ os_port_api }}" + + - name: 'Create the Ingress port' + os_port: + name: "{{ os_port_ingress }}" + network: "{{ os_network }}" + security_groups: + - "{{ os_sg_worker }}" + fixed_ips: + - subnet: "{{ os_subnet }}" + ip_address: "{{ os_subnet_range | next_nth_usable(7) }}" + + - name: 'Set the Ingress port tag' + command: + cmd: "openstack port set --tag {{ cluster_id_tag }} {{ os_port_ingress }}" + + # NOTE: openstack ansible module doesn't allow attaching Floating IPs to + # ports, let's use the CLI instead + - name: 'Attach the API floating IP to API port' + command: + cmd: "openstack floating ip set --port {{ os_port_api }} {{ os_api_fip }}" + + # NOTE: openstack ansible module doesn't allow attaching Floating IPs to + # ports, let's use the CLI instead + - name: 'Attach the Ingress floating IP to Ingress port' + command: + cmd: "openstack floating ip set --port {{ os_port_ingress }} {{ os_ingress_fip }}" +---- + +. On a command line, create security groups by running the first numbered playbook: ++ +---- +$ ansible-playbook -i inventory.yaml 01_security-groups.yaml +---- + +. On a command line, create a network, subnet, and router by running the second numbered playbook: ++ +---- +$ ansible-playbook -i inventory.yaml 02_network.yaml +---- + +. _Optional_: If you want to control the default resolvers that Nova servers use, run the OpenStack CLI command: ++ +---- +$ openstack subnet set --dns-nameserver --dns-nameserver "$INFRA_ID-nodes" +---- \ No newline at end of file diff --git a/modules/installation-osp-default-deployment.adoc b/modules/installation-osp-default-deployment.adoc index c4034975de83..0a6b5d61af30 100644 --- a/modules/installation-osp-default-deployment.adoc +++ b/modules/installation-osp-default-deployment.adoc @@ -2,21 +2,18 @@ // // * installing/installing_openstack/installing-openstack-installer.adoc // * installing/installing_openstack/installing-openstack-installer-custom.adoc - -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp-custom: -endif::[] +// * installing/installing_openstack/installing-openstack-user.adoc [id="installation-osp-default-deployment_{context}"] = Resource guidelines for installing {product-title} on OpenStack -Your quota must meet the following requirements to run the {product-title} installation program in {rh-openstack-first}. +To support a {product-title} installation, your {rh-openstack-first} quota must meet the following requirements: .Recommended resources for a default {product-title} cluster on {rh-openstack} [options="header"] -|================================ +|====================================== |Resource | Value -|Floating IP addresses | 2 +|Floating IP addresses | 3 |Ports | 15 |Routers | 1 |Subnets | 1 @@ -26,7 +23,7 @@ Your quota must meet the following requirements to run the {product-title} insta |Instances | 7 |Security groups | 3 |Security group rules | 60 -|================================ +|====================================== A cluster might function with fewer than recommended resources, but its performance is not guaranteed. @@ -38,8 +35,4 @@ If OpenStack Object Storage (Swift) is available and operated by a user account [NOTE] By default, your security group and security group rule quotas might be low. If you encounter problems, run `openstack quota set --secgroups 3 --secgroup-rules 60 ` to increase them. -An {product-title} deployment comprises control plane machines, compute machines, and a bootstrap machine. - -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp-custom: -endif::[] +An {product-title} deployment comprises control plane machines, compute machines, and a bootstrap machine. \ No newline at end of file diff --git a/modules/installation-osp-default-kuryr-deployment.adoc b/modules/installation-osp-default-kuryr-deployment.adoc index a7127ce4a7fe..a07d8311d175 100644 --- a/modules/installation-osp-default-kuryr-deployment.adoc +++ b/modules/installation-osp-default-kuryr-deployment.adoc @@ -15,7 +15,7 @@ Use the following quota to satisfy a default cluster's minimum requirements: .Recommended resources for a default {product-title} cluster on {rh-openstack} with Kuryr [options="header"] -|================================ +|============================================================================================== |Resource | Value |Floating IP addresses | 3 - plus the expected number of Services of LoadBalancer type |Ports | 1500 - 1 needed per Pod @@ -24,17 +24,32 @@ Use the following quota to satisfy a default cluster's minimum requirements: |Networks | 250 - 1 needed per Namespace/Project |RAM | 112 GB |vCPUs | 28 -|Volume storage | 175 GB +|Volume storage | 275 GB |Instances | 7 |Security groups | 250 - 1 needed per Service and per NetworkPolicy |Security group rules | 1000 -|Swift containers | 2 -|Swift objects | 1 -|Swift available space | 10 MB or more |Load balancers | 100 - 1 needed per Service |Load balancer listeners | 500 - 1 needed per Service-exposed port |Load balancer pools | 500 - 1 needed per Service-exposed port -|================================ +|============================================================================================== + +[IMPORTANT] +==== +If OpenStack Object Storage (Swift) is available, it is used as the default backend for the {product-title} image registry. In this case, the volume storage requirement is 175GB. + +.Recommend Swift resources +[options="header"] +|====================================== +|Resource | Value +|Swift containers | 2 +|Swift objects | 1 +|Swift available space | 10 MB or more +|====================================== + +Swift space requirements vary depending on the size of the bootstrap Ignition file and image registry. + +A cluster might function with fewer than recommended resources, but its performance is not guaranteed. +==== A cluster might function with fewer than recommended resources, but its performance is not guaranteed. diff --git a/modules/installation-osp-deleting-bootstrap-resources.adoc b/modules/installation-osp-deleting-bootstrap-resources.adoc new file mode 100644 index 000000000000..327c54c683ed --- /dev/null +++ b/modules/installation-osp-deleting-bootstrap-resources.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc +// * installing/installing_openstack/installing-openstack-kuryr.adoc + +[id="installation-osp-deleting-bootstrap-resources_{context}"] += Deleting bootstrap resources + +Delete the bootstrap resources that you no longer need. + +.Prerequisites +* The `inventory.yaml` and `common.yaml` Ansible playbooks in a common directory +** If you need these files, copy them from *Creating network resources* +* The control plane machines are running +** If you don't know the machines' status, see *Verifying cluster status* + +.Procedure + +. Insert the following content into a local file that is called `down-03_bootstrap.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Remove the bootstrap server' + os_server: + name: "{{ os_bootstrap_server_name }}" + state: absent + delete_fip: yes + + - name: 'Remove the bootstrap server port' + os_port: + name: "{{ os_port_bootstrap }}" + state: absent +---- + +. On a command line, run the playbook: ++ +---- +$ ansible-playbook -i inventory.yaml down-03_bootstrap.yaml +---- + +The bootstrap port, server, and floating IP address are deleted. + +[WARNING] +If you have not disabled the bootstrap Ignition file URL, do so now. \ No newline at end of file diff --git a/modules/installation-osp-describing-cloud-parameters.adoc b/modules/installation-osp-describing-cloud-parameters.adoc index 8a0d761202bc..8b9615817145 100644 --- a/modules/installation-osp-describing-cloud-parameters.adoc +++ b/modules/installation-osp-describing-cloud-parameters.adoc @@ -8,7 +8,7 @@ [id="installation-osp-describing-cloud-parameters_{context}"] = Defining parameters for the installation program -The {product-title} installation program relies on a file called `clouds.yaml`. The file describes {rh-openstack-first} configuration parameters, including the project name, log in information, and authorization service URLs. +The {product-title} installation program relies on a file that is called `clouds.yaml`. The file describes {rh-openstack-first} configuration parameters, including the project name, log in information, and authorization service URLs. .Procedure diff --git a/modules/installation-osp-downloading-modules.adoc b/modules/installation-osp-downloading-modules.adoc new file mode 100644 index 000000000000..c9cf26f873d1 --- /dev/null +++ b/modules/installation-osp-downloading-modules.adoc @@ -0,0 +1,92 @@ +// Module included in the following assemblies: +// * installing/installing_openstack/installing-openstack-installer-user.adoc +// * installing/installing_openstack/installing-openstack-installer-user-kuryr.adoc +// * installing/installing_openstack/uninstalling-cluster-openstack.adoc +// +//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE + +ifeval::["{context}" == "installing-openstack-user"] +:osp-user: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:osp-user: +endif::[] +ifeval::["{context}" == "uninstalling-openstack-user"] +:osp-user-uninstall: +endif::[] + +[id="installation-osp-downloading-modules_{context}"] += Downloading playbook dependencies + +ifdef::osp-user[] +The Ansible playbooks that simplify the installation process on user-provisioned +infrastructure require several Python modules. On the machine where you will run the installer, +add the modules' repositories and then download them. +endif::osp-user[] + +ifdef::osp-user-uninstall[] +The Ansible playbooks that simplify the removal process on user-provisioned +infrastructure require several Python modules. On the machine where you will run the process, +add the modules' repositories and then download them. +endif::osp-user-uninstall[] + +[NOTE] +These instructions assume that you are using Red Hat Enterprise Linux 8. + + +.Prerequisites + +* Python 3 is installed on your machine +// * The following Python modules: +// *** `ansible` version 2.9.2 or compatible +// *** `openstacksdk` version 0.39.0 or compatible +// *** `openstackclient` version 4.0.0 or compatible +// *** `netaddr` version 0.7.19 or compatible + +.Procedure + +. On a command line, add the repositories: ++ +---- +$ sudo subscription-manager register # If not done already +$ sudo subscription-manager attach --pool=$YOUR_POOLID # If not done already +$ sudo subscription-manager repos --disable=* # If not done already + +$ sudo subscription-manager repos \ + --enable=rhel-8-for-x86_64-baseos-rpms \ + --enable=openstack-16-tools-for-rhel-8-x86_64-rpms \ + --enable=ansible-2.8-for-rhel-8-x86_64-rpms \ + --enable=rhel-8-for-x86_64-appstream-rpms +---- + +ifdef::osp-user[] +. Install the modules: ++ +---- +$ sudo yum install python3-openstackclient ansible python3-openstacksdk python3-netaddr +---- +endif::osp-user[] + +ifdef::osp-user-uninstall[] +. Install the modules: ++ +---- +$ sudo yum install python3-openstackclient ansible python3-openstacksdk +---- +endif::osp-user-uninstall[] + +. Ensure that the `python` command points to `python3`: ++ +---- +$ sudo alternatives --set python /usr/bin/python3 +---- + +ifeval::["{context}" == "installing-openstack-user"] +:!osp-user: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:!osp-user: +endif::[] +ifeval::["{context}" == "uninstalling-cluster-openstack"] +:!osp-user-uninstall: +endif::[] \ No newline at end of file diff --git a/modules/installation-osp-emptying-worker-pools.adoc b/modules/installation-osp-emptying-worker-pools.adoc new file mode 100644 index 000000000000..fe6a1e15adbd --- /dev/null +++ b/modules/installation-osp-emptying-worker-pools.adoc @@ -0,0 +1,33 @@ +// Module included in the following assemblies: +// * installing/installing_openstack/installing-openstack-user.adoc +// * installing/installing_openstack/installing-openstack-user-kuryr.adoc +// +//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE + +[id="installation-osp-emptying-worker-pools_{context}"] += Emptying compute machine pools + +To proceed with an installation that uses your own infrastructure, set the number of compute machines in the installation configuration file to zero. Later, you create these machines manually. + +.Prerequisites + +* You have the `install-config.yaml` file that was generated by the {product-title} installation program. + +.Procedure + +. On a command line, browse to the directory that contains `install-config.yaml`. + +. From that directory, either run a script to edit the `install-config.yaml` file or update the file manually: + +** To set the value by using a script, run: ++ +---- +$ python -c ' +import yaml; +path = "install-config.yaml"; +data = yaml.safe_load(open(path)); +data["compute"][0]["replicas"] = 0; +open(path, "w").write(yaml.dump(data, default_flow_style=False))' +---- + +** To set the value manually, open the file and set the value of `compute..replicas` to `0`. \ No newline at end of file diff --git a/modules/installation-osp-fixing-subnet.adoc b/modules/installation-osp-fixing-subnet.adoc new file mode 100644 index 000000000000..6771e79fb9b6 --- /dev/null +++ b/modules/installation-osp-fixing-subnet.adoc @@ -0,0 +1,33 @@ +// Module included in the following assemblies: +// * installing/installing_openstack/installing-openstack-installer-user.adoc +// +//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE + +[id="installation-osp-fixing-subnet_{context}"] += Setting a custom subnet for machines + +The IP range that the installation program uses by default might not match the Neutron subnet that you create when you install {product-title}. If necessary, update the CIDR value for new machines by editing the installation configuration file. + +.Prerequisites + +* You have the `install-config.yaml` file that was generated by the {product-title} installation program. + +.Procedure + +. On a command line, browse to the directory that contains `install-config.yaml`. + +. From that directory, either run a script to edit the `install-config.yaml` file or update the file manually: + +** To set the value by using a script, run: ++ +---- +python -c ' +import yaml; +path = "install-config.yaml"; +data = yaml.safe_load(open(path)); +data["networking"]["machineNetwork"] = [{"cidr": "192.168.0.0/18"}]; <1> +open(path, "w").write(yaml.dump(data, default_flow_style=False))' +---- +<1> Insert a value that matches your intended Neutron subnet, e.g. `192.0.2.0/24`. + +** To set the value manually, open the file and set the value of `networking.machineCIDR` to something that matches your intended Neutron subnet. \ No newline at end of file diff --git a/modules/installation-osp-kuryr-config-yaml.adoc b/modules/installation-osp-kuryr-config-yaml.adoc index 7669c171d6e8..bafcbfb74b36 100644 --- a/modules/installation-osp-kuryr-config-yaml.adoc +++ b/modules/installation-osp-kuryr-config-yaml.adoc @@ -45,7 +45,6 @@ networking: networkType: Kuryr platform: openstack: - region: region1 cloud: mycloud externalNetwork: external computeFlavor: m1.xlarge diff --git a/modules/installation-osp-modifying-networktype.adoc b/modules/installation-osp-modifying-networktype.adoc new file mode 100644 index 000000000000..5f94ef3ba4b5 --- /dev/null +++ b/modules/installation-osp-modifying-networktype.adoc @@ -0,0 +1,32 @@ +// Module included in the following assemblies: +// * installing/installing_openstack/installing-openstack-installer-user-kuryr.adoc +// +//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE + +[id="installation-osp-modifying-networktype_{context}"] += Modifying the network type + +By default, the installation program selects the `OpenShiftSDN` network type. To use Kuryr instead, change the value in the installation configuration file that the program generated. + +.Prerequisites + +* You have the file `install-config.yaml` that was generated by the {product-title} installation program + +.Procedure + +. In a command prompt, browse to the directory that contains `install-config.yaml`. + +. From that directory, either run a script to edit the `install-config.yaml` file or update the file manually: + +** To set the value by using a script, run: ++ +---- +$ python -c ' +import yaml; +path = "install-config.yaml"; +data = yaml.safe_load(open(path)); +data["networking"]["networkType"] = "Kuryr"; +open(path, "w").write(yaml.dump(data, default_flow_style=False))' +---- + +** To set the value manually, open the file and set `networking.networkType` to `"Kuryr"`. \ No newline at end of file diff --git a/modules/installation-osp-verifying-external-network.adoc b/modules/installation-osp-verifying-external-network.adoc index 4e322fdc0b74..6fc5766eb525 100644 --- a/modules/installation-osp-verifying-external-network.adoc +++ b/modules/installation-osp-verifying-external-network.adoc @@ -3,6 +3,7 @@ // * installing/installing_openstack/installing-openstack-installer.adoc // * installing/installing_openstack/installing-openstack-installer-custom.adoc // * installing/installing_openstack/installing-openstack-installer-kuryr.adoc +// * installing/installing_openstack/installing-openstack-user.adoc // // DNS resolution KI ifeval::["{context}" == "installing-openstack-installer-custom"] @@ -11,11 +12,14 @@ endif::[] ifeval::["{context}" == "installing-openstack-installer-kuryr"] :osp-kuryr: endif::[] +ifeval::["{context}" == "installing-openstack-user"] +:osp-user: +endif::[] [id="installation-osp-verifying-external-network_{context}"] = Verifying external network access -The {product-title} installer requires external network access. You must provide an external network value to it, or deployment fails. Before you run the installer, verify that a network with the External router type exists in {rh-openstack-first}. +The {product-title} installation process requires external network access. You must provide an external network value to it, or deployment fails. Before you begin the process, verify that a network with the External router type exists in {rh-openstack-first}. .Prerequisites * https://docs.openstack.org/neutron/rocky/admin/config-dns-res.html#case-2-dhcp-agents-forward-dns-queries-from-instances[Configure OpenStack's networking service to have DHCP agents forward instances' DNS queries] @@ -39,7 +43,7 @@ A network with an External router type appears in the network list. If at least ifdef::osp-custom,osp-kuryr[] [IMPORTANT] ==== -If the external network's CIDR range overlaps one of the default network ranges, you must change the matching network ranges in the `install-config.yaml` file before you run the installation program. +If the external network's CIDR range overlaps one of the default network ranges, you must change the matching network ranges in the `install-config.yaml` file before you start the installation process. The default network ranges are: [options="header"] @@ -58,8 +62,10 @@ The default network ranges are: ==== endif::osp-custom,osp-kuryr[] +ifdef::osp-custom,osp-kuryr[] [CAUTION] If the installation program finds multiple networks with the same name, it sets one of them at random. To avoid this behavior, create unique names for resources in {rh-openstack}. +endif::osp-custom,osp-kuryr[] [NOTE] ==== @@ -72,3 +78,6 @@ endif::[] ifeval::["{context}" == "installing-openstack-installer-kuryr"] :!osp-kuryr: endif::[] +ifeval::["{context}" == "installing-openstack-user"] +:!osp-user: +endif::[] diff --git a/modules/installation-osp-verifying-installation.adoc b/modules/installation-osp-verifying-installation.adoc new file mode 100644 index 000000000000..b00c70a0f16f --- /dev/null +++ b/modules/installation-osp-verifying-installation.adoc @@ -0,0 +1,23 @@ +// Module included in the following assemblies: +// +// * installing/installing_openstack/installing-openstack-user.adoc + +[id="installation-osp-verifying-installation_{context}"] += Verifying a successful installation + +Verify that the {product-title} installation is complete. + +.Prerequisites + +* You have the installation program (`openshift-install`) + + +.Procedure + +* On a command line, enter: ++ +---- +$ openshift-install --log-level debug wait-for install-complete +---- + +The program outputs the console URL, as well as the administrator's login information. \ No newline at end of file diff --git a/modules/installation-overview.adoc b/modules/installation-overview.adoc index b22278dd4b50..f0d89e5c5725 100644 --- a/modules/installation-overview.adoc +++ b/modules/installation-overview.adoc @@ -56,7 +56,8 @@ installer-provisioned infrastructure on the following platforms: * Amazon Web Services (AWS) * Google Cloud Platform (GCP) * Microsoft Azure -* Red Hat OpenStack Platform version 13 and 14 +* {rh-openstack-first} version 13 and 16 +** The latest {product-title} release supports both the latest {rh-openstack} long-life release and intermediate release. For complete {rh-openstack} release compatibility, see the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. * Red Hat Virtualization (RHV) For these clusters, all machines, including the computer that you run the installation process on, must have direct internet access to pull images for platform containers and provide telemetry data to Red Hat. diff --git a/modules/installation-uninstall-infra.adoc b/modules/installation-uninstall-infra.adoc new file mode 100644 index 000000000000..bf66efb23671 --- /dev/null +++ b/modules/installation-uninstall-infra.adoc @@ -0,0 +1,416 @@ +// Module included in the following assemblies: +// +// * installing/installing_osp/uninstalling-cluster-osp.adoc + +[id="installation-uninstall-infra_{context}"] += Removing a cluster on OpenStack that uses your own infrastructure + +You can remove an {product-title} cluster on {rh-openstack-first} that uses your own infrastructure. To complete the removal process quickly, create and run several Ansible playbooks. + +.Prerequisites + +* Python 3 is installed on your machine +* You downloaded the modules in "Downloading playbook dependencies" + +.Procedure + +[NOTE] +You may have the `common.yaml` and `inventory.yaml` playbooks left over from when you installed {product-title}. If you do, you can skip the first two steps of the procedure. + +. Insert the following content into a local file called `common.yaml`: ++ +[source,yaml] +---- +- hosts: localhost + gather_facts: no + + vars_files: + - metadata.json + + tasks: + - name: 'Compute resource names' + set_fact: + cluster_id_tag: "openshiftClusterID={{ infraID }}" + os_network: "{{ infraID }}-network" + os_subnet: "{{ infraID }}-nodes" + os_router: "{{ infraID }}-external-router" + # Port names + os_port_api: "{{ infraID }}-api-port" + os_port_ingress: "{{ infraID }}-ingress-port" + os_port_bootstrap: "{{ infraID }}-bootstrap-port" + os_port_master: "{{ infraID }}-master-port" + os_port_worker: "{{ infraID }}-worker-port" + # Security groups names + os_sg_master: "{{ infraID }}-master" + os_sg_worker: "{{ infraID }}-worker" + # Server names + os_bootstrap_server_name: "{{ infraID }}-bootstrap" + os_cp_server_name: "{{ infraID }}-master" + os_compute_server_name: "{{ infraID }}-worker" + # Trunk names + os_cp_trunk_name: "{{ infraID }}-master-trunk" + os_compute_trunk_name: "{{ infraID }}-worker-trunk" + # Subnet pool name + subnet_pool: "{{ infraID }}-kuryr-pod-subnetpool" + # Service network name + os_svc_network: "{{ infraID }}-kuryr-service-network" + # Service subnet name + os_svc_subnet: "{{ infraID }}-kuryr-service-subnet" + # Ignition files + os_bootstrap_ignition: "{{ infraID }}-bootstrap-ignition.json" +---- + +. Insert the following content into a local file called `inventory.yaml`, and edit the values to match your own: ++ +[source,yaml] +---- +all: + hosts: + localhost: + ansible_connection: local + ansible_python_interpreter: "{{ansible_playbook_python}}" + + # User-provided values + os_subnet_range: '10.0.0.0/16' + os_flavor_master: 'm1.xlarge' + os_flavor_worker: 'm1.large' + os_image_rhcos: 'rhcos' + os_external_network: 'external' + # OpenShift API floating IP address + os_api_fip: '203.0.113.23' + # OpenShift Ingress floating IP address + os_ingress_fip: '203.0.113.19' + # Service subnet cidr + svc_subnet_range: '172.30.0.0/16' + os_svc_network_range: '172.30.0.0/15' + # Subnet pool prefixes + cluster_network_cidrs: '10.128.0.0/14' + # Subnet pool prefix length + host_prefix: '23' + # Name of the SDN. + # Possible values are OpenshiftSDN or Kuryr. + os_networking_type: 'OpenshiftSDN' + + # Number of provisioned Control Plane nodes + # 3 is the minimum number for a fully-functional cluster. + os_cp_nodes_number: 3 + + # Number of provisioned Compute nodes. + # 3 is the minimum number for a fully-functional cluster. + os_compute_nodes_number: 3 +---- + +. _Optional_: If your cluster uses Kuryr, insert the following content into a local file called `down-06_load-balancers.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Get a token for creating the server group' + os_auth: + register: cloud + when: os_networking_type == "Kuryr" + + - name: 'List octavia versions' + uri: + method: GET + headers: + X-Auth-Token: "{{ cloud.ansible_facts.auth_token }}" + Content-Type: 'application/json' + url: "{{ cloud.ansible_facts.service_catalog | selectattr('name', 'match', 'octavia') | first | json_query('endpoints') | selectattr('interface', 'match', 'public') | first | json_query('url') }}/" + register: octavia_versions + when: os_networking_type == "Kuryr" + + - set_fact: + versions: "{{ octavia_versions.json.versions | selectattr('id', 'match', 'v2.5') | map(attribute='id') | list }}" + when: os_networking_type == "Kuryr" + + - name: 'List tagged loadbalancers' + uri: + method: GET + headers: + X-Auth-Token: "{{ cloud.ansible_facts.auth_token }}" + url: "{{ cloud.ansible_facts.service_catalog | selectattr('name', 'match', 'octavia') | first | json_query('endpoints') | selectattr('interface', 'match', 'public') | first | json_query('url') }}/v2.0/lbaas/loadbalancers?tags={{cluster_id_tag}}" + when: + - os_networking_type == "Kuryr" + - versions | length > 0 + register: lbs_tagged + + # NOTE: Kuryr creates an Octavia load balancer + # for each service present on the cluster. Let's make + # sure to remove the resources generated. + - name: 'Remove the cluster load balancers' + os_loadbalancer: + name: "{{ item.name }}" + state: absent + wait: no + with_items: "{{ lbs_tagged.json.loadbalancers }}" + when: + - os_networking_type == "Kuryr" + - versions | length > 0 + + - name: 'List loadbalancers tagged on description' + uri: + method: GET + headers: + X-Auth-Token: "{{ cloud.ansible_facts.auth_token }}" + url: "{{ cloud.ansible_facts.service_catalog | selectattr('name', 'match', 'octavia') | first | json_query('endpoints') | selectattr('interface', 'match', 'public') | first | json_query('url') }}/v2.0/lbaas/loadbalancers?description={{cluster_id_tag}}" + when: + - os_networking_type == "Kuryr" + - versions | length == 0 + register: lbs_description + + # NOTE: Kuryr creates an Octavia load balancer + # for each service present on the cluster. Let's make + # sure to remove the resources generated. + - name: 'Remove the cluster load balancers' + os_loadbalancer: + name: "{{ item.name }}" + state: absent + with_items: "{{ lbs_description.json.loadbalancers }}" + when: + - os_networking_type == "Kuryr" + - versions | length == 0 +---- + +. Insert the following content into a local file called `down-05_compute-nodes.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Remove the Compute servers' + os_server: + name: "{{ item.1 }}-{{ item.0 }}" + state: absent + with_indexed_items: "{{ [os_compute_server_name] * os_compute_nodes_number }}" + + - name: 'List the Compute trunks' + command: + cmd: "openstack network trunk list -c Name -f value" + when: os_networking_type == "Kuryr" + register: trunks + + - name: 'Remove the Compute trunks' + command: + cmd: "openstack network trunk delete {{ item.1 }}-{{ item.0 }}" + when: + - os_networking_type == "Kuryr" + - (item.1|string + '-' + item.0|string) in trunks.stdout_lines|list + with_indexed_items: "{{ [os_compute_trunk_name] * os_compute_nodes_number }}" + + - name: 'Remove the Compute ports' + os_port: + name: "{{ item.1 }}-{{ item.0 }}" + state: absent + with_indexed_items: "{{ [os_port_worker] * os_compute_nodes_number }}" +---- + +. Insert the following content into a local file called `down-04_control-plane.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Remove the Control Plane servers' + os_server: + name: "{{ item.1 }}-{{ item.0 }}" + state: absent + with_indexed_items: "{{ [os_cp_server_name] * os_cp_nodes_number }}" + + - name: 'List the Compute trunks' + command: + cmd: "openstack network trunk list -c Name -f value" + when: os_networking_type == "Kuryr" + register: trunks + + - name: 'Remove the Control Plane trunks' + command: + cmd: "openstack network trunk delete {{ item.1 }}-{{ item.0 }}" + when: + - os_networking_type == "Kuryr" + - (item.1|string + '-' + item.0|string) in trunks.stdout_lines|list + with_indexed_items: "{{ [os_cp_trunk_name] * os_cp_nodes_number }}" + + - name: 'Remove the Control Plane ports' + os_port: + name: "{{ item.1 }}-{{ item.0 }}" + state: absent + with_indexed_items: "{{ [os_port_master] * os_cp_nodes_number }}" +---- + +. Insert the following content into a local file called `down-03_bootstrap.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'Remove the bootstrap server' + os_server: + name: "{{ os_bootstrap_server_name }}" + state: absent + delete_fip: yes + + - name: 'Remove the bootstrap server port' + os_port: + name: "{{ os_port_bootstrap }}" + state: absent + +---- + +. Insert the following content into a local file called `down-02_network.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'List ports attatched to router' + command: + cmd: "openstack port list --device-owner=network:router_interface --tags {{ cluster_id_tag }} -f value -c id" + register: router_ports + + - name: 'Remove the ports from router' + command: + cmd: "openstack router remove port {{ os_router }} {{ item.1}}" + with_indexed_items: "{{ router_ports.stdout_lines }}" + + - name: 'List ha ports attached to router' + command: + cmd: "openstack port list --device-owner=network:ha_router_replicated_interface --tags {{ cluster_id_tag }} -f value -c id" + register: ha_router_ports + + - name: 'Remove the ha ports from router' + command: + cmd: "openstack router remove port {{ os_router }} {{ item.1}}" + with_indexed_items: "{{ ha_router_ports.stdout_lines }}" + + - name: 'List ports' + command: + cmd: "openstack port list --tags {{ cluster_id_tag }} -f value -c id " + register: ports + + - name: 'Remove the cluster ports' + command: + cmd: "openstack port delete {{ item.1}}" + with_indexed_items: "{{ ports.stdout_lines }}" + + - name: 'Remove the cluster router' + os_router: + name: "{{ os_router }}" + state: absent + + - name: 'List cluster networks' + command: + cmd: "openstack network list --tags {{ cluster_id_tag }} -f value -c Name" + register: networks + + - name: 'Remove the cluster networks' + os_network: + name: "{{ item.1}}" + state: absent + with_indexed_items: "{{ networks.stdout_lines }}" + + - name: 'List the cluster subnet pool' + command: + cmd: "openstack subnet pool list --name {{ subnet_pool }}" + when: os_networking_type == "Kuryr" + register: pods_subnet_pool + + - name: 'Remove the cluster subnet pool' + command: + cmd: "openstack subnet pool delete {{ subnet_pool }}" + when: + - os_networking_type == "Kuryr" + - pods_subnet_pool.stdout != "" +---- + +. Insert the following content into a local file called `down-01_security-groups.yaml`: ++ +[source,yaml] +---- +# Required Python packages: +# +# ansible +# openstackclient +# openstacksdk + +- import_playbook: common.yaml + +- hosts: all + gather_facts: no + + tasks: + - name: 'List security groups' + command: + cmd: "openstack security group list --tags {{ cluster_id_tag }} -f value -c Name" + register: security_groups + + - name: 'Remove the cluster security groups' + command: + cmd: "openstack security group delete {{ item.1 }}" + with_indexed_items: "{{ security_groups.stdout_lines }}" +---- + +. On a command line, run the playbooks you created: ++ +---- +$ ansible-playbook -i inventory.yaml \ + down-03_bootstrap.yaml \ + down-04_control-plane.yaml \ + down-05_compute-nodes.yaml \ + down-06_load-balancers.yaml \ + down-02_network.yaml \ + down-01_security-groups.yaml +---- + +. Remove any DNS record changes you made for the {product-title} installation. + +{product-title} is removed from your infrastructure. \ No newline at end of file diff --git a/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc b/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc index 06a91c359564..c37141fb92e1 100644 --- a/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc +++ b/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc @@ -10,6 +10,7 @@ // * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc // * installing/installing_vsphere/installing-vsphere.adoc // * installing/installing_ibm_z/installing-ibm-z.adoc +// * installing/installing_openstack/installing-openstack-user.adoc ifeval::["{context}" == "installing-aws-user-infra"] @@ -36,6 +37,12 @@ ifeval::["{context}" == "installing-restricted-networks-gcp"] :gcp: :restricted: endif::[] +ifeval::["{context}" == "installing-openstack-user"] +:osp: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:osp: +endif::[] [id="installation-user-infra-generate-k8s-manifest-ignition_{context}"] = Creating the Kubernetes manifest and Ignition config files @@ -101,6 +108,19 @@ Because you create and manage the worker machines yourself, you do not need to initialize these machines. endif::aws,azure,gcp[] +ifdef::osp[] +. Remove the Kubernetes manifest files that define the control plane machines and compute machineSets: ++ +---- +$ rm -f openshift/99_openshift-cluster-api_master-machines-*.yaml openshift/99_openshift-cluster-api_worker-machineset-*.yaml +---- ++ +Because you create and manage these resources yourself, you do not have +to initialize them. ++ +* You can preserve the MachineSet files to create compute machines by using the machine API, but you must update references to them to match your environment. +endif::osp[] + . Modify the `/manifests/cluster-scheduler-02-config.yml` Kubernetes manifest file to prevent Pods from being scheduled on the control plane machines: + -- @@ -173,6 +193,17 @@ The following files are generated in the directory: └── worker.ign ---- +ifdef::osp[] +. Export the metadata file's `infraID` key as an environment variable: ++ +---- +$ export INFRA_ID=$(jq -r .infraID metadata.json) +---- + +[TIP] +Extract the `infraID` key from `metadata.json` and use it as a prefix for all of the {rh-openstack} resources that you create. By doing so, you avoid name conflicts when making multiple deployments in the same project. +endif::osp[] + ifeval::["{context}" == "installing-restricted-networks-aws"] :!aws: :!restricted: @@ -197,3 +228,9 @@ ifeval::["{context}" == "installing-restricted-networks-gcp"] :!gcp: :!restricted: endif::[] +ifeval::["{context}" == "installing-osp-user"] +:!osp: +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:!osp: +endif::[] \ No newline at end of file diff --git a/modules/installation-user-infra-generate.adoc b/modules/installation-user-infra-generate.adoc index a01e183302ad..8c66cf47beb4 100644 --- a/modules/installation-user-infra-generate.adoc +++ b/modules/installation-user-infra-generate.adoc @@ -5,6 +5,7 @@ // * installing/installing_gcp/installing-gcp-user-infra.adoc // * installing/installing_aws/installing-restricted-networks-aws.adoc // * installing/installing_gcp/installing-restricted-networks-gcp.adoc +// * installing/installing_openstack/installing-openstack-user.adoc ifeval::["{context}" == "installing-restricted-networks-aws"] :restricted: @@ -32,6 +33,14 @@ ifeval::["{context}" == "installing-restricted-networks-gcp"] :cp: GCP :gcp: endif::[] +ifeval::["{context}" == "installing-openstack-user"] +:cp-first: Red Hat OpenStack Platform +:cp: RHOSP +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:cp-first: Red Hat OpenStack Platform +:cp: RHOSP +endif::[] [id="installation-user-infra-generate_{context}"] = Creating the installation files for {cp} @@ -77,3 +86,11 @@ ifeval::["{context}" == "installing-restricted-networks-gcp"] :!cp: :!gcp: endif::[] +ifeval::["{context}" == "installing-openstack-user"] +:!cp-first: Red Hat OpenStack Platform +:!cp: RHOSP +endif::[] +ifeval::["{context}" == "installing-openstack-user-kuryr"] +:!cp-first: Red Hat OpenStack Platform +:!cp: RHOSP +endif::[] \ No newline at end of file