diff --git a/_topic_map.yml b/_topic_map.yml index 25b083c7ab50..e982977340a4 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -268,6 +268,14 @@ Topics: File: ipi-install-expanding-the-cluster - Name: Troubleshooting File: ipi-install-troubleshooting +- Name: Deploying installer-provisioned clusters on IBM Cloud + Dir: installing_ibm_cloud + Distros: openshift-origin,openshift-enterprise + Topics: + - Name: Prerequisites + File: install-ibm-cloud-prerequisites + - Name: Installation workflow + File: install-ibm-cloud-installation-workflow - Name: Installing with z/VM on IBM Z and LinuxONE Dir: installing_ibm_z Distros: openshift-enterprise diff --git a/installing/installing_ibm_cloud/images b/installing/installing_ibm_cloud/images new file mode 120000 index 000000000000..847b03ed0541 --- /dev/null +++ b/installing/installing_ibm_cloud/images @@ -0,0 +1 @@ +../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc b/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc new file mode 100644 index 000000000000..68060bbfe4a6 --- /dev/null +++ b/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc @@ -0,0 +1,26 @@ +[id="install-ibm-cloud-installation-workflow"] += Setting up the environment for an {product-title} installation +include::modules/common-attributes.adoc[] +:context: install-ibm-cloud-installation-workflow + +toc::[] + +include::modules/install-ibm-cloud-preparing-the-provisioner-node.adoc[leveloffset=+1] + +include::modules/install-ibm-cloud-configuring-the-public-subnet.adoc[leveloffset=+1] + +include::modules/ipi-install-retrieving-the-openshift-installer.adoc[leveloffset=+1] + +include::modules/ipi-install-extracting-the-openshift-installer.adoc[leveloffset=+1] + +include::modules/install-ibm-cloud-configuring-the-install-config-file.adoc[leveloffset=+1] + +include::modules/ipi-install-additional-install-config-parameters.adoc[leveloffset=+1] + +include::modules/ipi-install-root-device-hints.adoc[leveloffset=+1] + +include::modules/ipi-install-creating-the-openshift-manifests.adoc[leveloffset=+1] + +include::modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc[leveloffset=+1] + +include::modules/ipi-install-following-the-installation.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc b/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc new file mode 100644 index 000000000000..d7a778c515eb --- /dev/null +++ b/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc @@ -0,0 +1,24 @@ +[id="install-ibm-cloud-prerequisites"] += Prerequisites +include::modules/common-attributes.adoc[] +:context: install-ibm-cloud + +toc::[] + +You can use installer-provisioned installation to install {product-title} on IBM Cloud® nodes. This document describes the prerequisites and procedures when installing {product-title} on IBM Cloud nodes. + +[IMPORTANT] +==== +Red Hat supports IPMI and PXE on the `provisioning` network only. Red Hat has not tested Red Fish, virtual media, or other complementary technologies such as Secure Boot on IBM Cloud deployments. The `provisioning` network is required. +==== + +Installer-provisioned installation of {product-title} requires: + +* One provisioner node with {op-system-first} 8.x installed +* Three control plane nodes +* One routable network +* One network for provisioning nodes + +Before starting an installer-provisioned installation of {product-title} on IBM Cloud, address the following prerequisites and requirements. + +include::modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/modules b/installing/installing_ibm_cloud/modules new file mode 120000 index 000000000000..36719b9de743 --- /dev/null +++ b/installing/installing_ibm_cloud/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/modules/install-ibm-cloud-configuring-the-install-config-file.adoc b/modules/install-ibm-cloud-configuring-the-install-config-file.adoc new file mode 100644 index 000000000000..07830ea4ff41 --- /dev/null +++ b/modules/install-ibm-cloud-configuring-the-install-config-file.adoc @@ -0,0 +1,110 @@ +// This is included in the following assemblies: +// +// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc + +[id="configuring-the-install-config-file_{context}"] += Configuring the install-config.yaml file + +The `install-config.yaml` file requires some additional details. Most of the information is teaching the installer and the resulting cluster enough about the available IBM Cloud® hardware so that it is able to fully manage it. The material difference between installing on bare metal and installing on IBM Cloud is that you must explicitly set the privilege level for IPMI in the BMC section of the `install-config.yaml` file. + +.Procedure + +. Configure `install-config.yaml`. Change the appropriate variables to match the environment, including `pullSecret` and `sshKey`. ++ +[source,yaml] +---- + apiVersion: v1 + baseDomain: + metadata: + name: + networking: + machineCIDR: + networkType: OVNKubernetes + compute: + - name: worker + replicas: 2 + controlPlane: + name: master + replicas: 3 + platform: + baremetal: {} + platform: + baremetal: + apiVIP: + ingressVIP: + provisioningNetworkInterface: + provisioningNetworkCIDR: + hosts: + - name: openshift-master-0 + role: master + bmc: + address: ipmi://10.196.130.145?privilegelevel=OPERATOR <1> + username: root + password: + bootMACAddress: 00:e0:ed:6a:ca:b4 <2> + rootDeviceHints: + deviceName: "/dev/sda" + - name: openshift-worker-0 + role: worker + bmc: + address: ipmi://?privilegelevel=OPERATOR <1> + username: + password: + bootMACAddress: <2> + rootDeviceHints: + deviceName: "/dev/sda" + pullSecret: '' + sshKey: '' +---- ++ +<1> The `bmc.address` provides a `privilegelevel` configuration setting with the value set to `OPERATOR`. This is required for IBM Cloud. +<2> Add the MAC address of the private `provisioning` network NIC for the corresponding node. ++ +[NOTE] +==== +You can use the `ibmcloud` command-line utility to retrieve the password. + +[source,terminal] +---- +$ ibmcloud sl hardware detail --output JSON | \ + jq '"(.networkManagementIpAddress) (.remoteManagementAccounts[0].password)"' +---- + +Replace `` with the ID of the node. +==== + +. Create a directory to store the cluster configuration: ++ +[source,terminal] +---- +$ mkdir ~/clusterconfigs +---- + +. Copy the `install-config.yaml` file into the directory: ++ +[source,terminal] +---- +$ cp install-config.yaml ~/clusterconfig +---- + +. Ensure all bare metal nodes are powered off prior to installing the {product-title} cluster: ++ +[source,terminal] +---- +$ ipmitool -I lanplus -U -P -H power off +---- + +. Remove old bootstrap resources if any are left over from a previous deployment attempt: ++ +[source,bash] +---- +for i in $(sudo virsh list | tail -n +3 | grep bootstrap | awk {'print $2'}); +do + sudo virsh destroy $i; + sudo virsh undefine $i; + sudo virsh vol-delete $i --pool $i; + sudo virsh vol-delete $i.ign --pool $i; + sudo virsh pool-destroy $i; + sudo virsh pool-undefine $i; +done +---- diff --git a/modules/install-ibm-cloud-configuring-the-public-subnet.adoc b/modules/install-ibm-cloud-configuring-the-public-subnet.adoc new file mode 100644 index 000000000000..61cc1b60e222 --- /dev/null +++ b/modules/install-ibm-cloud-configuring-the-public-subnet.adoc @@ -0,0 +1,191 @@ +// This is included in the following assemblies: +// +// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc + +[id="configuring-the-public-subnet_{context}"] += Configuring the public subnet + +All of the {product-title} cluster nodes must be on the public subnet. IBM Cloud® does not provide a DHCP server on the subnet. Set it up separately on the provisioner node. + +You must reset the BASH variables defined when preparing the provisioner node. Rebooting the provisioner node after preparing it will delete the BASH variables previously set. + +.Procedure + +. Install `dnsmasq`: ++ +[source,terminal] +---- +$ sudo dnf install dnsmasq +---- + +. Open the `dnsmasq` configuration file: ++ +[source,terminal] +---- +$ sudo vi /etc/dnsmasq.conf +---- + +. Add the following configuration to the `dnsmasq` configuration file: ++ +[source,text] +---- +interface=baremetal +except-interface=lo +bind-dynamic +log-dhcp + +dhcp-range=,, <1> +dhcp-option=baremetal,121,0.0.0.0/0,,, <2> + +dhcp-hostsfile=/var/lib/dnsmasq/dnsmasq.hostsfile +---- ++ +<1> Set the DHCP range. Replace both instances of `` with one unused IP address from the public subnet so that the `dhcp-range` for the `baremetal` network begins and ends with the same the IP address. Replace `` with the CIDR of the public subnet. ++ +<2> Set the DHCP option. Replace `` with the IP address of the gateway for the `baremetal` network. Replace `` with the IP address of the provisioner node's private IP address on the `provisioning` network. Replace `` with the IP address of the provisioner node's public IP address on the `baremetal` network. ++ +To retrieve the value for ``, execute: ++ +[source,terminal] +---- +$ ibmcloud sl subnet detail --output JSON | jq .cidr +---- ++ +Replace `` with the ID of the public subnet. ++ +To retrieve the value for ``, execute: ++ +[source,terminal] +---- +$ ibmcloud sl subnet detail --output JSON | jq .gateway -r +---- ++ +Replace `` with the ID of the public subnet. ++ +To retrieve the value for ``, execute: ++ +[source,terminal] +---- +$ ibmcloud sl hardware detail --output JSON | \ + jq .primaryBackendIpAddress -r +---- ++ +Replace `` with the ID of the provisioner node. ++ +To retrieve the value for ``, execute: ++ +[source,terminal] +---- +$ ibmcloud sl hardware detail --output JSON | jq .primaryIpAddress -r +---- ++ +Replace `` with the ID of the provisioner node. + +. Obtain the list of hardware for the cluster: ++ +[source,terminal] +---- +$ ibmcloud sl hardware list +---- + +. Obtain the MAC addresses and IP addresses for each node: ++ +[source,terminal] +---- +$ ibmcloud sl hardware detail --output JSON | \ + jq '.networkComponents[] | \ + "\(.primaryIpAddress) \(.macAddress)"' | grep -v null +---- ++ +Replace `` with the ID of the node. ++ +.Example output +[source,terminal] +---- +"10.196.130.144 00:e0:ed:6a:ca:b4" +"141.125.65.215 00:e0:ed:6a:ca:b5" +---- ++ +Make a note of the MAC address and IP address of the public network. Make a separate note of the MAC address of the private network, which you will use later in the `install-config.yaml` file. Repeat this procedure for each node until you have all the public MAC and IP addresses for the public `baremetal` network, and the MAC addresses of the private `provisioning` network. + +. Add the MAC and IP address pair of the public `baremetal` network for each node into the `dnsmasq.hostsfile` file: ++ +[source,terminal] +---- +$ sudo vim /var/lib/dnsmasq/dnsmasq.hostsfile +---- ++ +.Example input +[source,text] +---- +00:e0:ed:6a:ca:b5,141.125.65.215,master-0 +,,master-1 +,,master-2 +,,worker-0 +,,worker-1 +... +---- ++ +Replace `,` with the public MAC address and public IP address of the corresponding node name. + +. Start `dnsmasq`: ++ +[source,terminal] +---- +$ sudo systemctl start dnsmasq +---- + +. Enable `dnsmasq` so that it starts when booting the node: ++ +[source,terminal] +---- +$ sudo systemctl enable dnsmasq +---- + +. Verify `dnsmasq` is running: ++ +[source,terminal] +---- +$ sudo systemctl status dnsmasq +---- ++ +.Example output +[source,terminal] +---- +● dnsmasq.service - DNS caching server. +Loaded: loaded (/usr/lib/systemd/system/dnsmasq.service; enabled; vendor preset: disabled) +Active: active (running) since Tue 2021-10-05 05:04:14 CDT; 49s ago +Main PID: 3101 (dnsmasq) +Tasks: 1 (limit: 204038) +Memory: 732.0K +CGroup: /system.slice/dnsmasq.service +└─3101 /usr/sbin/dnsmasq -k +---- + +. Open ports `53` and `67` with UDP protocol: ++ +[source,terminal] +---- +$ sudo firewall-cmd --add-port 53/udp --permanent +---- ++ +[source,terminal] +---- +$ sudo firewall-cmd --add-port 67/udp --permanent +---- + +. Add `provisioning` to the external zone with masquerade: ++ +[source,terminal] +---- +$ sudo firewall-cmd --change-zone=provisioning --zone=external --permanent +---- ++ +This step ensures network address translation for IPMI calls to the management subnet. + +. Reload the `firewalld` configuration: ++ +[source,terminal] +---- +$ sudo firewall-cmd --reload +---- diff --git a/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc b/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc new file mode 100644 index 000000000000..80eaa56c049b --- /dev/null +++ b/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc @@ -0,0 +1,272 @@ +// Module included in the following assemblies: +// +// * installing/installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc + +[id="preparing-the-provisioner-node-for-openshift-install-on-ibm-cloud_{context}"] += Preparing the provisioner node for {product-title} installation on IBM Cloud + +Perform the following steps to prepare the provisioner node. + +.Procedure + +. Log in to the provisioner node via `ssh`. + +. Create a non-root user (`kni`) and provide that user with `sudo` privileges: ++ +[source,terminal] +---- +# useradd kni +---- ++ +[source,terminal] +---- +# passwd kni +---- ++ +[source,terminal] +---- +# echo "kni ALL=(root) NOPASSWD:ALL" | tee -a /etc/sudoers.d/kni +---- ++ +[source,terminal] +---- +# chmod 0440 /etc/sudoers.d/kni +---- + +. Create an `ssh` key for the new user: ++ +[source,terminal] +---- +# su - kni -c "ssh-keygen -f /home/kni/.ssh/id_rsa -N ''" +---- + +. Log in as the new user on the provisioner node: ++ +[source,terminal] +---- +# su - kni +---- + +. Use Red Hat Subscription Manager to register the provisioner node: ++ +[source,terminal] +---- +$ sudo subscription-manager register --username= --password= --auto-attach +---- ++ +[source,terminal] +---- +$ sudo subscription-manager repos --enable=rhel-8-for-x86_64-appstream-rpms \ + --enable=rhel-8-for-x86_64-baseos-rpms +---- ++ +[NOTE] +==== +For more information about Red Hat Subscription Manager, see link:https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html-single/rhsm/index[Using and Configuring Red Hat Subscription Manager]. +==== + +. Install the following packages: ++ +[source,terminal] +---- +$ sudo dnf install -y libvirt qemu-kvm mkisofs python3-devel jq ipmitool +---- + +. Modify the user to add the `libvirt` group to the newly created user: ++ +[source,terminal] +---- +$ sudo usermod --append --groups libvirt kni +---- + +. Start `firewalld`: ++ +[source,terminal] +---- +$ sudo systemctl start firewalld +---- + +. Enable `firewalld`: ++ +[source,terminal] +---- +$ sudo systemctl enable firewalld +---- + +. Start the `http` service: ++ +[source,terminal] +---- +$ sudo firewall-cmd --zone=public --add-service=http --permanent +---- ++ +[source,terminal] +---- +$ sudo firewall-cmd --reload +---- + +. Start and enable the `libvirtd` service: ++ +[source,terminal] +---- +$ sudo systemctl enable libvirtd --now +---- + +. Set the ID of the provisioner node: ++ +[source,terminal] +---- +$ PRVN_HOST_ID= +---- ++ +You can view the ID with the following `ibmcloud` command: ++ +[source,terminal] +---- +$ ibmcloud sl hardware list +---- + +. Set the ID of the public subnet: ++ +[source,terminal] +---- +$ PUBLICSUBNETID= +---- ++ +You can view the ID with the following `ibmcloud` command: ++ +[source,terminal] +---- +$ ibmcloud sl subnet list +---- + +. Set the ID of the private subnet: ++ +[source,terminal] +---- +$ PRIVSUBNETID= +---- ++ +You can view the ID with the following `ibmcloud` command: ++ +[source,terminal] +---- +$ ibmcloud sl subnet list +---- + +. Set the provisioner node public IP address: ++ +[source,terminal] +---- +$ PRVN_PUB_IP=$(ibmcloud sl hardware detail $PRVN_HOST_ID --output JSON | jq .primaryIpAddress -r) +---- + +. Set the CIDR for the public network: ++ +[source,terminal] +---- +$ PUBLICCIDR=$(ibmcloud sl subnet detail $PUBLICSUBNETID --output JSON | jq .cidr) +---- + +. Set the IP address and CIDR for the public network: ++ +[source,terminal] +---- +$ PUB_IP_CIDR=$PRVN_PUB_IP/$PUBLICCIDR +---- + +. Set the gateway for the public network: ++ +[source,terminal] +---- +$ PUB_GATEWAY=$(ibmcloud sl subnet detail $PUBLICSUBNETID --output JSON | jq .gateway -r) +---- + +. Set the private IP address of the provisioner node: ++ +[source,terminal] +---- +$ PRVN_PRIV_IP=$(ibmcloud sl hardware detail $PRVN_HOST_ID --output JSON | \ + jq .primaryBackendIpAddress -r) +---- + +. Set the CIDR for the private network: ++ +[source,terminal] +---- +$ PRIVCIDR=$(ibmcloud sl subnet detail $PRIVSUBNETID --output JSON | jq .cidr) +---- + +. Set the IP address and CIDR for the private network: ++ +[source,terminal] +---- +$ PRIV_IP_CIDR=$PRVN_PRIV_IP/$PRIVCIDR +---- + +. Set the gateway for the private network: ++ +[source,terminal] +---- +$ PRIV_GATEWAY=$(ibmcloud sl subnet detail $PRIVSUBNETID --output JSON | jq .gateway -r) +---- + +. Set up the bridges for the `baremetal` and `provisioning` networks: ++ +[source,terminal] +---- +$ sudo nohup bash -c " + nmcli --get-values UUID con show | xargs -n 1 nmcli con delete + nmcli connection add ifname provisioning type bridge con-name provisioning + nmcli con add type bridge-slave ifname eth1 master provisioning + nmcli connection add ifname baremetal type bridge con-name baremetal + nmcli con add type bridge-slave ifname eth2 master baremetal + nmcli connection modify baremetal ipv4.addresses $PUB_IP_CIDR ipv4.method manual ipv4.gateway $PUB_GATEWAY + nmcli connection modify provisioning ipv4.addresses 172.22.0.1/24,$PRIV_IP_CIDR ipv4.method manual + nmcli connection modify provisioning +ipv4.routes \"10.0.0.0/8 $PRIV_GATEWAY\" + nmcli con down baremetal + nmcli con up baremetal + nmcli con down provisioning + nmcli con up provisioning + init 6 +" +---- ++ +[NOTE] +==== +For `eth1` and `eth2`, substitute the appropriate interface name, as needed. +==== + +. If required, SSH back into the `provisioner` node: ++ +[source,terminal] +---- +# ssh kni@provisioner.. +---- + +. Verify the connection bridges have been properly created: ++ +[source,terminal] +---- +$ sudo nmcli con show +---- ++ +.Example output +[source,terminal] +---- +NAME UUID TYPE DEVICE +baremetal 4d5133a5-8351-4bb9-bfd4-3af264801530 bridge baremetal +provisioning 43942805-017f-4d7d-a2c2-7cb3324482ed bridge provisioning +virbr0 d9bca40f-eee1-410b-8879-a2d4bb0465e7 bridge virbr0 +bridge-slave-eth1 76a8ed50-c7e5-4999-b4f6-6d9014dd0812 ethernet eth1 +bridge-slave-eth2 f31c3353-54b7-48de-893a-02d2b34c4736 ethernet eth2 +---- + +. Create a `pull-secret.txt` file: ++ +[source,terminal] +---- +$ vim pull-secret.txt +---- ++ +In a web browser, navigate to link:https://console.redhat.com/openshift/install/metal/user-provisioned[Install on Bare Metal with user-provisioned infrastructure]. In step 1, click **Download pull secret**. Paste the contents into the `pull-secret.txt` file and save the contents in the `kni` user's home directory. diff --git a/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc b/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc new file mode 100644 index 000000000000..57f92cc11a99 --- /dev/null +++ b/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc @@ -0,0 +1,190 @@ +// This is included in the following assemblies: +// +// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc + +[id="setting-up-ibm-cloud-infrastructure_{context}"] += Setting up IBM Cloud infrastructure + +To deploy an {product-title} cluster on IBM Cloud®, you must first provision the IBM Cloud nodes. + +[IMPORTANT] +==== +Red Hat supports IPMI and PXE on the `provisioning` network only. Red Hat has not tested Red Fish, virtual media, or other complementary technologies such as Secure Boot on IBM Cloud deployments. The `provisioning` network is required. +==== + +You can customize IBM Cloud nodes using the IBM Cloud API. When creating IBM Cloud nodes, you must consider the following requirements. + +[discrete] +== Use one data center per cluster + +All nodes in the {product-title} cluster must run in the same IBM Cloud data center. + +[discrete] +== Create public and private VLANs + +Create all nodes with a single public VLAN and a single private VLAN. + +[discrete] +== Ensure subnets have sufficient IP addresses + +IBM Cloud public VLAN subnets use a `/28` prefix by default, which provides 16 IP addresses. That is sufficient for a cluster consisting of three control plane nodes, four worker nodes, and two IP addresses for the API VIP and Ingress VIP on the `baremetal` network. For larger clusters, you might need a smaller prefix. + +IBM Cloud private VLAN subnets use a `/26` prefix by default, which provides 64 IP addresses. IBM Cloud will use private network IP addresses to access the Baseboard Management Controller (BMC) of each node. {product-title} creates an additional subnet for the `provisioning` network. Network traffic for the `provisioning` network subnet routes through the private VLAN. For larger clusters, you might need a smaller prefix. + +.IP addresses per prefix +[options="header"] +|==== +|IP addresses |Prefix +|32| `/27` +|64| `/26` +|128| `/25` +|256| `/24` +|==== + +[discrete] +== Configuring NICs + +{product-title} deploys with two networks: + +- `provisioning`: The `provisioning` network is a non-routable network used for provisioning the underlying operating system on each node that is a part of the {product-title} cluster. + +- `baremetal`: The `baremetal` network is a routable network. You can use any NIC order to interface with the `baremetal` network, provided it is not the NIC specified in the `provisioningNetworkInterface` configuration setting or the NIC associated to a node's `bootMACAddress` configuration setting for the `provisioning` network. + +While the cluster nodes can contain more than two NICs, the installation process only focuses on the first two NICs. For example: + +[options="header"] +|=== +|NIC |Network |VLAN +| NIC1 | `provisioning` | +| NIC2 | `baremetal` | +|=== + +In the previous example, NIC1 on all control plane and worker nodes connects to the non-routable network (`provisioning`) that is only used for the installation of the {product-title} cluster. NIC2 on all control plane and worker nodes connects to the routable `baremetal` network. + +[options="header"] +|=== +|PXE |Boot order +| NIC1 PXE-enabled `provisioning` network | 1 +| NIC2 `baremetal` network. | 2 +|=== + +[NOTE] +==== +Ensure PXE is enabled on the NIC used for the `provisioning` network and is disabled on all other NICs. +==== + +[discrete] +== Configuring canonical names + +Clients access the {product-title} cluster nodes over the `baremetal` network. Configure IBM Cloud subdomains or subzones where the canonical name extension is the cluster name. + +---- +. +---- + +For example: + +---- +test-cluster.example.com +---- + +[discrete] +== Creating DNS entries + +You must create DNS `A` record entries resolving to unused IP addresses on the public subnet for the following: + +[width="100%", options="header"] +|===== +| Usage | Host Name | IP +| API | api.. | +| Ingress LB (apps) | *.apps.. | +|===== + +Control plane and worker nodes already have DNS entries after provisioning. + +The following table provides an example of fully qualified domain names. The API and Nameserver addresses begin with canonical name extensions. The host names of the control plane and worker nodes are examples, so you can use any host naming convention you prefer. + +[width="100%", options="header"] +|===== +| Usage | Host Name | IP +| API | api.. | +| Ingress LB (apps) | *.apps.. | +ifeval::[{product-version} <= 4.5] +| Nameserver | ns1.. | +endif::[] +| Provisioner node | provisioner.. | +| Master-0 | openshift-master-0.. | +| Master-1 | openshift-master-1.. | +| Master-2 | openshift-master-2.. | +| Worker-0 | openshift-worker-0.. | +| Worker-1 | openshift-worker-1.. | +| Worker-n | openshift-worker-n.. | +|===== + +{product-title} includes functionality that uses cluster membership information to generate `A` records. This resolves the node names to their IP addresses. After the nodes are registered with the API, the cluster can disperse node information without using CoreDNS-mDNS. This eliminates the network traffic associated with multicast DNS. + +[IMPORTANT] +==== +After provisioning the IBM Cloud nodes, you must create a DNS entry for the `api..` domain name on the external DNS because removing CoreDNS causes the local entry to disappear. Failure to create a DNS record for the `api..` domain name in the external DNS server prevents worker nodes from joining the cluster. +==== + +[discrete] +== Network Time Protocol (NTP) + +Each {product-title} node in the cluster must have access to an NTP server. {product-title} nodes use NTP to synchronize their clocks. For example, cluster nodes use SSL certificates that require validation, which might fail if the date and time between the nodes are not in sync. + +[IMPORTANT] +==== +Define a consistent clock date and time format in each cluster node's BIOS settings, or installation might fail. +==== + +[discrete] +== Configure a DHCP server + +IBM Cloud does not run DHCP on the public or private VLANs. After provisioning IBM Cloud nodes, you must set up a DHCP server for the public VLAN, which corresponds to {product-title}'s `baremetal` network. + +[NOTE] +==== +The IP addresses allocated to each node do not need to match the IP addresses allocated by the IBM Cloud provisioning system. +==== + +See the "Configuring the public subnet" section for details. + +[discrete] +== Ensure BMC access privileges + +The "Remote management" page for each node on the dashboard contains the node's intelligent platform management interface (IPMI) credentials. The default IPMI privileges prevent the user from making certain boot target changes. You must change the privilege level to `OPERATOR` so that Ironic can make those changes. + +In the `install-config.yaml` file, add the `privilegelevel` parameter to the URLs used to configure each BMC. See the "Configuring the install-config.yaml file" section for additional details. For example: + +[source,yaml] +---- +ipmi://:?privilegelevel=OPERATOR +---- + +Alternatively, contact IBM Cloud support and request that they increase the IPMI privileges to `ADMINISTRATOR` for each node. + +[discrete] +== Create bare metal servers + +Create bare metal servers in the link:https://cloud.ibm.com[IBM Cloud dashboard] by navigating to *Create resource* -> *Bare Metal Server*. + +Alternatively, you can create bare metal servers with the `ibmcloud` CLI utility. For example: + +[source,terminal] +---- +$ ibmcloud sl hardware create --hostname \ + --domain \ + --size \ + --os \ + --datacenter \ + --port-speed \ + --billing +---- + +See link:https://cloud.ibm.com/docs/cli?topic=cli-install-ibmcloud-cli[Installing the stand-alone IBM Cloud CLI] for details on installing the IBM Cloud CLI. + +[NOTE] +==== +IBM Cloud servers might take 3-5 hours to become available. +==== diff --git a/modules/ipi-install-additional-install-config-parameters.adoc b/modules/ipi-install-additional-install-config-parameters.adoc index e034c0ecde35..4c9a480b2186 100644 --- a/modules/ipi-install-additional-install-config-parameters.adoc +++ b/modules/ipi-install-additional-install-config-parameters.adoc @@ -14,24 +14,24 @@ and the `bmc` parameter for the `install-config.yaml` file. |Parameters |Default |Description -| [[basedomain]] `baseDomain` +| `baseDomain` | | The domain name for the cluster. For example, `example.com`. -| [[bootmode]] `bootMode` +| `bootMode` | `UEFI` | The boot mode for a node. Options are `legacy`, `UEFI`, and `UEFISecureBoot`. If `bootMode` is not set, Ironic sets it while inspecting the node. -| [[sshkey]] `sshKey` +| `sshKey` | | The `sshKey` configuration setting contains the key in the `~/.ssh/id_rsa.pub` file required to access the control plane nodes and worker nodes. Typically, this key is from the `provisioner` node. -| [[pullsecret]] `pullSecret` +| `pullSecret` | | The `pullSecret` configuration setting contains a copy of the pull secret downloaded from the link:https://console.redhat.com/openshift/install/metal/user-provisioned[Install OpenShift on Bare Metal] page when preparing the provisioner node. -a|[[metadataname]] +a| ---- metadata: name: @@ -40,7 +40,7 @@ metadata: |The name to be given to the {product-title} cluster. For example, `openshift`. -a|[[machinecidr]] +a| ---- networking: machineCIDR: @@ -54,7 +54,7 @@ endif::[] endif::[] . -a|[[workername]] +a| ---- compute: - name: worker @@ -63,7 +63,7 @@ compute: |The {product-title} cluster requires a name be provided for worker (or compute) nodes even if there are zero nodes. -a|[[computereplicas]] +a| ---- compute: replicas: 2 @@ -72,7 +72,7 @@ compute: |Replicas sets the number of worker (or compute) nodes in the {product-title} cluster. -a|[[controlplanename]] +a| ---- controlPlane: name: master @@ -81,7 +81,7 @@ controlPlane: |The {product-title} cluster requires a name for control plane (master) nodes. -a|[[controlplanereplicas]] +a| ---- controlPlane: replicas: 3 @@ -90,24 +90,24 @@ controlPlane: |Replicas sets the number of control plane (master) nodes included as part of the {product-title} cluster. ifeval::[{product-version} >= 4.4] -a| [[provisioningNetworkInterface]]`provisioningNetworkInterface` | | The name of the network interface on nodes connected to the `provisioning` network. For {product-title} 4.9 and later releases, use the `bootMACAddress` configuration setting to enable Ironic to identify the IP address of the NIC instead of using the `provisioningNetworkInterface` configuration setting to identify the name of the NIC. +a| `provisioningNetworkInterface` | | The name of the network interface on nodes connected to the `provisioning` network. For {product-title} 4.9 and later releases, use the `bootMACAddress` configuration setting to enable Ironic to identify the IP address of the NIC instead of using the `provisioningNetworkInterface` configuration setting to identify the name of the NIC. endif::[] | `defaultMachinePlatform` | | The default configuration used for machine pools without a platform configuration. -| [[apivip]]`apiVIP` | `api.` | The VIP to use for internal API communication. +| `apiVIP` | `api.` | The VIP to use for internal API communication. This setting must either be provided or pre-configured in the DNS so that the default name resolves correctly. | `disableCertificateVerification` | `False` | `redfish` and `redfish-virtualmedia` need this parameter to manage BMC addresses. The value should be `True` when using a self-signed certificate for BMC addresses. -| [[ingressvip]]`ingressVIP` | `test.apps.` | The VIP to use for ingress traffic. +| `ingressVIP` | `test.apps.` | The VIP to use for ingress traffic. ifeval::[{product-version} < 4.5] Provide this setting or pre-configure it in the DNS so that the default name resolves correctly. -|[[dnsVIP]]`dnsVIP` | | The VIP to use for internal DNS communication. +|`dnsVIP` | | The VIP to use for internal DNS communication. This setting has no default and must always be provided. endif::[] @@ -217,7 +217,6 @@ endif::[] |=== -[id="hoststable"] .Hosts The `hosts` parameter is a list of separate bare metal assets used to build the cluster. @@ -226,12 +225,12 @@ The `hosts` parameter is a list of separate bare metal assets used to build the .Hosts |=== |Name |Default |Description -| [[name]]`name` +| `name` | | The name of the `BareMetalHost` resource to associate with the details. For example, `openshift-master-0`. -| [[role]]`role` +| `role` | | The role of the bare metal node. Either `master` or `worker`. @@ -241,12 +240,12 @@ The `hosts` parameter is a list of separate bare metal assets used to build the | Connection details for the baseboard management controller. See the BMC addressing section for additional details. -| [[bootMACAddress]]`bootMACAddress` +| `bootMACAddress` | | The MAC address of the NIC that the host uses for the `provisioning` network. Ironic retrieves the IP address using the `bootMACAddress` configuration setting. Then, it binds to the host. ifeval::[{product-version} < 4.6] -| [[hardwareProfile]]`hardwareProfile` +| `hardwareProfile` | `default` | This parameter exposes the device name that the installer attempts to deploy the {product-title} cluster for the control plane and worker nodes. The value defaults to `default` for control plane nodes and `unknown` for worker nodes. The list of profiles includes: `default`, `libvirt`, `dell`, `dell-raid`, and `openstack`. The `default` parameter attempts to install on `/dev/sda` of the {product-title} cluster nodes. endif::[]