diff --git a/_topic_map.yml b/_topic_map.yml index 7542946ddc6f..862994c6e163 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -308,6 +308,9 @@ Topics: - Name: Gathering data about your cluster File: gathering-cluster-data Distros: openshift-enterprise,openshift-webscale +- Name: Summarizing cluster specifications + File: summarizing-cluster-specifications + Distros: openshift-enterprise,openshift-webscale,openshift-origin - Name: Remote health monitoring with connected clusters Dir: remote_health_monitoring Distros: openshift-enterprise,openshift-webscale,openshift-dedicated @@ -320,6 +323,24 @@ Topics: File: opting-out-of-remote-health-reporting - Name: Using Insights to identify issues with your cluster File: using-insights-to-identify-issues-with-your-cluster +- Name: Troubleshooting + Dir: troubleshooting + Distros: openshift-enterprise,openshift-dedicated,openshift-webscale,openshift-origin + Topics: + - Name: Troubleshooting installations + File: troubleshooting-installations + - Name: Verifying node health + File: verifying-node-health + - Name: Troubleshooting CRI-O container runtime issues + File: troubleshooting-crio-issues + - Name: Troubleshooting Operator issues + File: troubleshooting-operator-issues + - Name: Investigating Pod issues + File: investigating-pod-issues + - Name: Troubleshooting the Source-to-Image process + File: troubleshooting-s2i + - Name: Diagnosing OpenShift CLI (oc) issues + File: diagnosing-oc-issues --- Name: Web console Dir: web_console diff --git a/modules/about-crio.adoc b/modules/about-crio.adoc new file mode 100644 index 000000000000..3f7bd1bafd81 --- /dev/null +++ b/modules/about-crio.adoc @@ -0,0 +1,10 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-crio-issues.adoc + +[id="about-crio_{context}"] += About CRI-O container runtime engine + +CRI-O is a Kubernetes-native container runtime implementation that integrates closely with the operating system to deliver an efficient and optimized Kubernetes experience. CRI-O provides facilities for running, stopping, and restarting containers. + +The CRI-O container runtime engine is managed using a systemd service on each {product-title} cluster node. When container runtime issues occur, verify the status of the `crio` systemd service on each node. Gather CRI-O journald unit logs from nodes that manifest container runtime issues. diff --git a/modules/about-sosreport.adoc b/modules/about-sosreport.adoc new file mode 100644 index 000000000000..d2bce3788aab --- /dev/null +++ b/modules/about-sosreport.adoc @@ -0,0 +1,10 @@ +// Module included in the following assemblies: +// +// * support/gathering-cluster-data.adoc + +[id="about-sosreport_{context}"] += About `sosreport` + +`sosreport` is a tool that collects configuration details, system information, and diagnostic data from {op-system-base-full} and {op-system-first} systems. `sosreport` provides a standardized way to collect diagnostic information relating to a node, which can then be provided to Red Hat Support for issue diagnosis. + +In some support interactions, Red Hat Support may ask you to collect a `sosreport` archive for a specific {product-title} node. For example, it might sometimes be necessary to review system logs or other node-specific data that is not included within the output of `oc adm must-gather`. diff --git a/modules/accessing-running-pods.adoc b/modules/accessing-running-pods.adoc new file mode 100644 index 000000000000..eaf61126ed2e --- /dev/null +++ b/modules/accessing-running-pods.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/investigating-pod-issues.adoc + +[id="accessing-running-pods_{context}"] += Accessing running Pods + +You can review running Pods dynamically by opening a shell inside a Pod or by gaining network access through port forwarding. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Switch into the project that contains the Pod you would like to access. This is necessary because the `oc rsh` command does not accept the `-n` namespace option: ++ +---- +$ oc project +---- + +. Start a remote shell into a Pod: ++ +---- +$ oc rsh <1> +---- +<1> If a Pod has multiple containers, `oc rsh` defaults to the first container unless `-c ` is specified. + +. Start a remote shell into a specific container within a Pod: ++ +---- +$ oc rsh -c pod/ +---- + +. Create a port forwarding session to a port on a Pod: ++ +---- +$ oc port-forward : <1> +---- +<1> Enter `Ctrl+C` to cancel the port forwarding session. diff --git a/modules/checking-load-balancer-configuration.adoc b/modules/checking-load-balancer-configuration.adoc new file mode 100644 index 000000000000..06328c0d307a --- /dev/null +++ b/modules/checking-load-balancer-configuration.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="checking-load-balancer-configuration_{context}"] += Checking a load balancer configuration before {product-title} installation + +Check your load balancer configuration prior to starting an {product-title} installation. + +.Prerequisites + +* You have configured an external load balancer of your choosing, in preparation for an {product-title} installation. The following example is based on a {op-system-base-full} host using HAProxy to provide load balancing services to a cluster. +* You have configured DNS in preparation for an {product-title} installation. +* You have SSH access to your load balancer. + +.Procedure + +. Check that the `haproxy` systemd service is active: ++ +---- +$ ssh @ systemctl status haproxy +---- + +. Verify that the load balancer is listening on the required ports. The following example references ports `80`, `443`, `6443`, and `22623`. ++ +* For HAProxy instances running on {op-system-base-full} 6, verify port status by using the `netstat` command: ++ +---- +$ ssh @ netstat -nltupe | grep -E ':80|:443|:6443|:22623' +---- ++ +* For HAProxy instances running on {op-system-base-full} 7 or 8, verify port status by using the `ss` command: ++ +---- +$ ssh @ ss -nltupe | grep -E ':80|:443|:6443|:22623' +---- ++ +[NOTE] +==== +Red Hat recommends the `ss` command instead of `netstat` in {op-system-base-full} 7 or later. `ss` is provided by the iproute package. For more information on the `ss` command, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/performance_tuning_guide/sect-red_hat_enterprise_linux-performance_tuning_guide-tool_reference-ss[{op-system-base-full} 7 Performance Tuning Guide]. +==== ++ +. Check that the wildcard DNS record resolves to the load balancer: ++ +---- +$ dig @ +---- diff --git a/modules/copying-files-pods-and-containers.adoc b/modules/copying-files-pods-and-containers.adoc new file mode 100644 index 000000000000..c47d25e4b46a --- /dev/null +++ b/modules/copying-files-pods-and-containers.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/investigating-pod-issues.adoc + +[id="copying-files-pods-and-containers_{context}"] += Copying files to and from Pods and containers + +You can copy files to and from a Pod to test configuration changes or gather diagnostic information. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Copy a file to a Pod: ++ +---- +$ oc cp :/ -c <1> +---- +<1> Note that a Pod's first container will be selected if the `-c` option is not specified. + +. Copy a file from a Pod: ++ +---- +$ oc cp :/ -c <1> +---- +<1> Note that a Pod's first container will be selected if the `-c` option is not specified. ++ +[NOTE] +==== +For `oc cp` to function, the `tar` binary must be available within the container. +==== diff --git a/modules/determining-where-installation-issues-occur.adoc b/modules/determining-where-installation-issues-occur.adoc new file mode 100644 index 000000000000..ba22dd8ae43b --- /dev/null +++ b/modules/determining-where-installation-issues-occur.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="determining-where-installation-issues-occur_{context}"] += Determining where installation issues occur + +When troubleshooting {product-title} installation issues, you can monitor installation logs to determine at which stage issues occur. Then, retrieve diagnostic data relevant to that stage. + +{product-title} installation proceeds through the following stages: + +. Ignition configuration files are created. + +. The bootstrap machine boots and starts hosting the remote resources required for the master machines to boot. + +. The master machines fetch the remote resources from the bootstrap machine and finish booting. + +. The master machines use the bootstrap machine to form an etcd cluster. + +. The bootstrap machine starts a temporary Kubernetes control plane using the new etcd cluster. + +. The temporary control plane schedules the production control plane to the master machines. + +. The temporary control plane shuts down and passes control to the production control plane. + +. The bootstrap machine adds {product-title} components into the production control plane. + +. The installation program shuts down the bootstrap machine. + +. The control plane sets up the worker nodes. + +. The control plane installs additional services in the form of a set of Operators. + +. The cluster downloads and configures remaining components needed for the day-to-day operation, including the creation of worker machines in supported environments. diff --git a/modules/gathering-application-diagnostic-data.adoc b/modules/gathering-application-diagnostic-data.adoc new file mode 100644 index 000000000000..a8f807f0ed01 --- /dev/null +++ b/modules/gathering-application-diagnostic-data.adoc @@ -0,0 +1,107 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-s2i.adoc + +[id="gathering-application-diagnostic-data_{context}"] += Gathering application diagnostic data to investigate application failures + +Application failures can occur within running application Pods. In these situations, you can retrieve diagnostic information with these strategies: + +* Review events relating to the application Pods. +* Review the logs from the application Pods, including application-specific log files that are not collected by the {product-title} logging framework. +* Test application functionality interactively and run diagnostic tools in an application container. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. List events relating to a specific application Pod. The following example retrieves events for an application Pod named `my-app-1-akdlg`: ++ +---- +$ oc describe pod/my-app-1-akdlg +---- + +. Review logs from an application Pod: ++ +---- +$ oc logs -f pod/my-app-1-akdlg +---- + +. Query specific logs within a running application Pod. Logs that are sent to stdout are collected by the {product-title} logging framework and are included in the output of the preceding command. The following query is only required for logs that are not sent to stdout. ++ +.. If an application log can be accessed without root privileges within a Pod, concatenate the log file as follows: ++ +---- +$ oc exec my-app-1-akdlg -- cat /var/log/my-application.log +---- ++ +.. If root access is required to view an application log, you can start a debug container with root privileges and then view the log file from within the container. Start the debug container from the project's deployment configuration. Pod users typically run with non-root privileges, but running troubleshooting Pods with temporary root privileges can be useful during issue investigation: ++ +---- +$ oc debug dc/my-deployment-configuration --as-root -- cat /var/log/my-application.log +---- ++ +[NOTE] +==== +You can access an interactive shell with root access within the debug Pod if you run `oc debug dc/ --as-root` without appending `-- `. +==== + +. Test application functionality interactively and run diagnostic tools, in an application container with an interactive shell. +.. Start an interactive shell on the application container: ++ +---- +$ oc exec -it my-app-1-akdlg /bin/bash +---- ++ +.. Test application functionality interactively from within the shell. For example, you can run the container's entry point command and observe the results. Then, test changes from the command line directly, before updating the source code and rebuilding the application container through the S2I process. ++ +.. Run diagnostic binaries available within the container. ++ +[NOTE] +==== +Root privileges are required to run some diagnostic binaries. In these situations you can start a debug Pod with root access, based on a problematic Pod's deployment configuration, by running `oc debug dc/ --as-root`. Then, you can run diagnostic binaries as root from within the debug Pod. +==== + +. If diagnostic binaries are not available within a container, you can run a host's diagnostic binaries within a container's namespace by using `nsenter`. The following example runs `ip ad` within a container's namespace, using the host`s `ip` binary. +.. Enter into a debug session on the target node. This step instantiates a debug Pod called `-debug`: ++ +---- +$ oc debug node/my-cluster-node +---- ++ +.. Set `/host` as the root directory within the debug shell. The debug Pod mounts the host's root file system in `/host` within the Pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: ++ +---- +# chroot /host +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..` instead. +==== ++ +.. Determine the target container ID: ++ +---- +# crictl ps +---- ++ +.. Determine the container's process ID. In this example, the target container ID is `a7fe32346b120`: ++ +---- +# crictl inspect a7fe32346b120 --output yaml | grep 'pid:' | awk '{print $2}' +---- ++ +.. Run `ip ad` within the container's namespace, using the host's `ip` binary. This example uses `31150` as the container's process ID. The `nsenter` command enters the namespace of a target process and runs a command in its namespace. Because the target process in this example is a container's process ID, the `ip ad` command is run in the container's namespace from the host: ++ +---- +# nsenter -n -t 31150 -- ip ad +---- ++ +[NOTE] +==== +Running a host's diagnostic binaries within a container's namespace is only possible if you are using a privileged container such as a debug node. +==== diff --git a/modules/gathering-bootstrap-diagnostic-data.adoc b/modules/gathering-bootstrap-diagnostic-data.adoc new file mode 100644 index 000000000000..5ed7c965128f --- /dev/null +++ b/modules/gathering-bootstrap-diagnostic-data.adoc @@ -0,0 +1,70 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="gathering-bootstrap-diagnostic-data_{context}"] += Gathering bootstrap node diagnostic data + +When experiencing bootstrap-related issues, you can gather `bootkube.service` `journald` unit logs and container logs from the bootstrap node. + +.Prerequisites + +* You have SSH access to your bootstrap node. +* You have the fully qualified domain name of the bootstrap node. +* If you are hosting Ignition configuration files by using an HTTP server, you must have the HTTP server's fully qualified domain name and the port number. You must also have SSH access to the HTTP host. + +.Procedure + +. If you have access to the bootstrap node's console, monitor the console until the node reaches the login prompt. + +. Verify the Ignition file configuration. ++ +* If you are hosting Ignition configuration files by using an HTTP server. ++ +.. Verify the bootstrap node Ignition file URL. Replace `` with HTTP server's fully qualified domain name: ++ +---- +$ curl -I http://:/bootstrap.ign <1> +---- +<1> The `-I` option returns the header only. If the Ignition file is available on the specified URL, the command returns `200 OK` status. If it is not available, the command returns `404 file not found`. ++ +.. To verify that the Ignition file was received by the bootstrap node, query the HTTP server logs on the serving host. For example, if you are using an Apache web server to serve Ignition files, enter the following command: ++ +---- +$ grep -is 'bootstrap.ign' /var/log/httpd/access_log +---- ++ +If the bootstrap Ignition file is received, the associated `HTTP GET` log message will include a `200 OK` success status, indicating that the request succeeded. ++ +.. If the Ignition file was not received, check that the Ignition files exist and that they have the appropriate file and web server permissions on the serving host directly. ++ +* If you are using a cloud provider mechanism to inject Ignition configuration files into hosts as part of their initial deployment. ++ +.. Review the bootstrap node's console to determine if the mechanism is injecting the bootstrap node Ignition file correctly. + +. Verify the availability of the bootstrap node's assigned storage device. + +. Verify that the bootstrap node has been assigned an IP address from the DHCP server. + +. Collect `bootkube.service` journald unit logs from the bootstrap node. Replace `` with the bootstrap node's fully qualified domain name: ++ +---- +$ ssh core@ journalctl -b -f -u bootkube.service +---- ++ +[NOTE] +==== +The `bootkube.service` log on the bootstrap node outputs etcd `connection refused` errors, indicating that the bootstrap server is unable to connect to etcd on master nodes. After etcd has started on each master node and the nodes have joined the cluster, the errors should stop. +==== ++ +. Collect logs from the bootstrap node containers. +.. Collect the logs using `podman` on the bootstrap node. Replace `` with the bootstrap node's fully qualified domain name: ++ +---- +$ ssh core@ 'for pod in $(sudo podman ps -a -q); do sudo podman logs $pod; done' +---- + +. If the bootstrap process fails, verify the following. ++ +* You can resolve `api..` from the installation host. +* The load balancer proxies port 6443 connections to bootstrap and master nodes. Ensure that the proxy configuration meets {product-title} installation requirements. diff --git a/modules/gathering-crio-logs.adoc b/modules/gathering-crio-logs.adoc new file mode 100644 index 000000000000..b989c3af0b5d --- /dev/null +++ b/modules/gathering-crio-logs.adoc @@ -0,0 +1,40 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-crio-issues.adoc + +[id="gathering-crio-logs_{context}"] += Gathering CRI-O journald unit logs + +If you experience CRI-O issues, you can obtain CRI-O journald unit logs from a node. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). +* You have the fully qualified domain names of the control plane, or master machines. + +.Procedure + +. Gather CRI-O journald unit logs. The following example collects logs from all master nodes within the cluster: ++ +---- +$ oc adm node-logs --role=master -u crio +---- + +. Gather CRI-O journald unit logs from a specific node: ++ +---- +$ oc adm node-logs -u crio +---- + +. If the API is not functional, review the logs using SSH instead. Replace `..` with appropriate values: ++ +---- +$ ssh core@.. journalctl -b -f -u crio.service +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..`. +==== diff --git a/modules/gathering-operator-logs.adoc b/modules/gathering-operator-logs.adoc new file mode 100644 index 000000000000..6f143f33226f --- /dev/null +++ b/modules/gathering-operator-logs.adoc @@ -0,0 +1,71 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-operator-issues.adoc + +[id="gathering-operator-logs_{context}"] += Gathering Operator logs + +If you experience Operator issues, you can gather detailed diagnostic information from Operator Pod logs. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). +* You have the fully qualified domain names of the control plane, or master machines. + +.Procedure + +. List the Operator Pods that are running in the Operator's namespace, plus the Pod status, restarts, and age: ++ +---- +$ oc get pods -n +---- + +. Review logs for an Operator Pod: ++ +---- +$ oc logs pod/ -n +---- ++ +If an Operator Pod has multiple containers, the preceding command will produce an error that includes the name of each container. Query logs from an individual container: ++ +---- +$ oc logs pod/ -c -n +---- + +. If the API is not functional, review Operator Pod and container logs on each master node by using SSH instead. Replace `..` with appropriate values. +.. List Pods on each master node: ++ +---- +$ ssh core@.. sudo crictl pods +---- ++ +.. For any Operator Pods not showing a `Ready` status, inspect the Pod's status in detail. Replace `` with the Operator Pod's ID listed in the output of the preceding command: ++ +---- +$ ssh core@.. sudo crictl inspectp +---- ++ +.. List containers related to an Operator Pod: ++ +---- +$ ssh core@.. sudo crictl ps --pod= +---- ++ +.. For any Operator container not showing a `Ready` status, inspect the container's status in detail. Replace `` with a container ID listed in the output of the preceding command: ++ +---- +$ ssh core@.. sudo crictl inspect +---- ++ +.. Review the logs for any Operator containers not showing a `Ready` status. Replace `` with a container ID listed in the output of the preceding command: ++ +---- +$ ssh core@.. sudo crictl logs -f +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..`. +==== diff --git a/modules/gathering-s2i-diagnostic-data.adoc b/modules/gathering-s2i-diagnostic-data.adoc new file mode 100644 index 000000000000..fb8ded952558 --- /dev/null +++ b/modules/gathering-s2i-diagnostic-data.adoc @@ -0,0 +1,53 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-s2i.adoc + +[id="gathering-s2i-diagnostic-data_{context}"] += Gathering Source-to-Image diagnostic data + +The S2I tool runs a build Pod and a deployment Pod in sequence. The deployment Pod is responsible for deploying the application Pods based on the application container image created in the build stage. Watch build, deployment and application Pod status to determine where in the S2I process a failure occurs. Then, focus diagnostic data collection accordingly. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Watch the Pod status throughout the S2I process to determine at which stage a failure occurs: ++ +---- +$ oc get pods -w <1> +---- +<1> Use `-w` to monitor Pods for changes until you quit the command using `Ctrl+C`. + +. Review a failed Pod's logs for errors. ++ +* *If the build Pod fails*, review the build Pod's logs: ++ +---- +$ oc logs -f pod/--build +---- ++ +[NOTE] +==== +Alternatively, you can review the build configuration's logs using `oc logs -f bc/`. The build configuration's logs include the logs from the build Pod. +==== ++ +* *If the deployment Pod fails*, review the deployment Pod's logs: ++ +---- +$ oc logs -f pod/--deploy +---- ++ +[NOTE] +==== +Alternatively, you can review the deployment configuration's logs using `oc logs -f dc/`. This outputs logs from the deployment Pod until the deployment Pod completes successfully. The command outputs logs from the application Pods if you run it after the deployment Pod has completed. After a deployment Pod completes, its logs can still be accessed by running `oc logs -f pod/--deploy`. +==== ++ +* *If an application Pod fails, or if an application is not behaving as expected within a running application Pod*, review the application Pod's logs: ++ +---- +$ oc logs -f pod/-- +---- diff --git a/modules/inspecting-pod-and-container-logs.adoc b/modules/inspecting-pod-and-container-logs.adoc new file mode 100644 index 000000000000..95dd894665f3 --- /dev/null +++ b/modules/inspecting-pod-and-container-logs.adoc @@ -0,0 +1,54 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/investigating-pod-issues.adoc + +[id="inspecting-pod-and-container-logs_{context}"] += Inspecting Pod and container logs + +You can inspect Pod and container logs for warnings and error messages related to explicit Pod failures. Depending on policy and exit code, Pod and container logs remain available after Pods have been terminated. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Query logs for a specific Pod: ++ +---- +$ oc logs +---- + +. Query logs for a specific container within a Pod: ++ +---- +$ oc logs -c +---- ++ +Logs retrieved using the preceding `oc logs` commands are composed of messages sent to stdout within Pods or containers. + +. Inspect logs contained in `/var/log/` within a Pod. +.. List log files and subdirectories contained in `/var/log` within a Pod: ++ +---- +$ oc exec ls -alh /var/log +---- ++ +.. Query a specific log file contained in `/var/log` within a Pod: ++ +---- +$ oc exec cat /var/log/ +---- +.. List log files and subdirectories contained in `/var/log` within a specific container: ++ +---- +$ oc exec -c ls /var/log +---- ++ +.. Query a specific log file contained in `/var/log` within a specific container: ++ +---- +$ oc exec -c cat /var/log/ +---- diff --git a/modules/installation-bootstrap-gather.adoc b/modules/installation-bootstrap-gather.adoc index d7cdf9fdde46..deb86ee7751a 100644 --- a/modules/installation-bootstrap-gather.adoc +++ b/modules/installation-bootstrap-gather.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // -// *installing/installing-troubleshooting.adoc +// * installing/installing-troubleshooting.adoc +// * support/troubleshooting/troubleshooting-installations.adoc [id="installation-bootstrap-gather_{context}"] = Gathering logs from a failed installation @@ -17,13 +18,9 @@ running cluster, use the `oc adm must-gather` command. .Prerequisites -* Your {product-title} installation failed before the bootstrap process -finished. The bootstrap node must be running and accessible through SSH. -* The `ssh-agent` process is active on your computer, and you provided both the -`ssh-agent` process and the installation program the same SSH key. -* If you tried to install a cluster on infrastructure that you provisioned, you -must have the fully-qualified domain names of the control plane, or master, -machines. +* Your {product-title} installation failed before the bootstrap process finished. The bootstrap node is running and accessible through SSH. +* The `ssh-agent` process is active on your computer, and you provided the same SSH key to both the `ssh-agent` process and the installation program. +* If you tried to install a cluster on infrastructure that you provisioned, you must have the fully qualified domain names of the bootstrap and master nodes. .Procedure @@ -55,9 +52,9 @@ $ ./openshift-install gather bootstrap --dir= \ <1> ---- <1> For `installation_directory`, specify the same directory you specified when you ran `./openshift-install create cluster`. This directory contains the {product-title} definition files that the installation program creates. -<2> `` is the fully-qualified domain name or IP address of +<2> `` is the fully qualified domain name or IP address of the cluster's bootstrap machine. -<3> For each control plane, or master, machine in your cluster, replace `` with its fully-qualified domain name or IP address. +<3> For each control plane, or master, machine in your cluster, replace `` with its fully qualified domain name or IP address. + [NOTE] ==== diff --git a/modules/investigating-etcd-installation-issues.adoc b/modules/investigating-etcd-installation-issues.adoc new file mode 100644 index 000000000000..c39536f97ff8 --- /dev/null +++ b/modules/investigating-etcd-installation-issues.adoc @@ -0,0 +1,92 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="investigating-etcd-installation-issues_{context}"] += Investigating etcd installation issues + +If you experience etcd issues during installation, you can check etcd Pod status and collect etcd Pod logs. You can also verify etcd DNS records and check DNS availability on master nodes. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* You have SSH access to your hosts. +* You have the fully qualified domain names of the master nodes. + +.Procedure + +. Check the status of etcd Pods. +.. Review the status of Pods in the `openshift-etcd` namespace: ++ +---- +$ oc get pods -n openshift-etcd +---- ++ +.. Review the status of Pods in the `openshift-etcd-operator` namespace: ++ +---- +oc get pods -n openshift-etcd-operator +---- + +. If any of the Pods listed by the previous commands are not showing a `Running` or a `Completed` status, gather diagnostic information for the Pod. +.. Review events for the Pod: ++ +---- +$ oc describe pod/ -n +---- ++ +.. Inspect the Pod's logs: ++ +---- +$ oc logs pod/ -n +---- ++ +.. If the Pod has more than one container, the preceding command will create an error, and the container names will be provided in the error message. Inspect logs for each container: ++ +---- +$ oc logs pod/ -c -n +---- + +. If the API is not functional, review etcd Pod and container logs on each master node by using SSH instead. Replace `..` with appropriate values. +.. List etcd Pods on each master node: ++ +---- +$ ssh core@.. sudo crictl pods --name=etcd- +---- ++ +.. For any Pods not showing `Ready` status, inspect Pod status in detail. Replace `` with the Pod's ID listed in the output of the preceding command: ++ +---- +$ ssh core@.. sudo crictl inspectp +---- ++ +.. List containers related to a Pod: ++ +// TODO: Once https://bugzilla.redhat.com/show_bug.cgi?id=1858239 has been resolved, replace the `grep` command below: +//---- +//$ ssh core@.. sudo crictl ps --pod= +//---- ++ +---- +$ ssh core@.. sudo crictl ps | grep '' +---- ++ +.. For any containers not showing `Ready` status, inspect container status in detail. Replace `` with container IDs listed in the output of the preceding command: ++ +---- +$ ssh core@.. sudo crictl inspect +---- ++ +.. Review the logs for any containers not showing a `Ready` status. Replace `` with the container IDs listed in the output of the preceding command: ++ +---- +$ ssh core@.. sudo crictl logs -f +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..`. +==== ++ +. Validate primary and secondary DNS server connectivity from master nodes. diff --git a/modules/investigating-kubelet-api-installation-issues.adoc b/modules/investigating-kubelet-api-installation-issues.adoc new file mode 100644 index 000000000000..1ebfc4aa4010 --- /dev/null +++ b/modules/investigating-kubelet-api-installation-issues.adoc @@ -0,0 +1,54 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="investigating-kubelet-api-installation-issues_{context}"] += Investigating master node kubelet and API server issues + +To investigate master node kubelet and API server issues during installation, check DNS, DHCP, and load balancer functionality. Also, verify that certificates have not expired. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* You have SSH access to your hosts. +* You have the fully qualified domain names of the master nodes. + +.Procedure + +. Verify that the API server's DNS record directs the kubelet on master nodes to [x-]`https://api-int..:6443`. Ensure that the record references the load balancer. + +. Ensure that the load balancer's port 6443 definition references each master node. + +. Check that unique master node host names have been provided by DHCP. + +. Inspect the `kubelet.service` journald unit logs on each master node. +.. Retrieve the logs using `oc`: ++ +---- +$ oc adm node-logs --role=master -u kubelet +---- ++ +.. If the API is not functional, review the logs using SSH instead. Replace `..` with appropriate values: ++ +---- +$ ssh core@.. journalctl -b -f -u kubelet.service +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..`. +==== ++ +. Check for certificate expiration messages in the master node kubelet logs. +.. Retrieve the log using `oc`: ++ +---- +$ oc adm node-logs --role=master -u kubelet | grep -is 'x509: certificate has expired' +---- ++ +.. If the API is not functional, review the logs using SSH instead. Replace `..` with appropriate values: ++ +---- +$ ssh core@.. journalctl -b -f -u kubelet.service | grep -is 'x509: certificate has expired' +---- diff --git a/modules/investigating-master-node-installation-issues.adoc b/modules/investigating-master-node-installation-issues.adoc new file mode 100644 index 000000000000..7b0ace2e1996 --- /dev/null +++ b/modules/investigating-master-node-installation-issues.adoc @@ -0,0 +1,220 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="investigating-master-node-installation-issues_{context}"] += Investigating master node installation issues + +If you experience master node installation issues, determine the master node, {product-title} software defined network (SDN), and network Operator status. Collect `kubelet.service`, `crio.service` journald unit logs, and master node container logs for visibility into master node agent, CRI-O container runtime, and Pod activity. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* You have SSH access to your hosts. +* You have the fully qualified domain names of the bootstrap and master nodes. +* If you are hosting Ignition configuration files by using an HTTP server, you must have the HTTP server's fully qualified domain name and the port number. You must also have SSH access to the HTTP host. ++ +[NOTE] +==== +The initial `kubeadmin` password can be found in `/auth/kubeadmin-password` on the installation host. +==== + +.Procedure + +. If you have access to the master node's console, monitor the console until the node reaches the login prompt. During the installation, Ignition log messages are output to the console. + +. Verify Ignition file configuration. ++ +* If you are hosting Ignition configuration files by using an HTTP server. ++ +.. Verify the master node Ignition file URL. Replace `` with HTTP server's fully qualified domain name: ++ +---- +curl -I http://:/master.ign <1> +---- +<1> The `-I` option returns the header only. If the Ignition file is available on the specified URL, the command returns `200 OK` status. If it is not available, the command returns `404 file not found`. ++ +.. To verify that the ignition file was received by the master node, query the HTTP server logs on the serving host. For example, if you are using an Apache web server to serve Ignition files: ++ +---- +$ grep -is 'master.ign' /var/log/httpd/access_log +---- ++ +If the master Ignition file is received, the associated `HTTP GET` log message will include a `200 OK` success status, indicating that the request succeeded. ++ +.. If the Ignition file was not received, check that it exists on the serving host directly. Ensure that the appropriate file and web server permissions are in place. ++ +* If you are using a cloud provider mechanism to inject Ignition configuration files into hosts as part of their initial deployment. ++ +.. Review the master node's console to determine if the mechanism is injecting the master node Ignition file correctly. + +. Check the availability of the master node's assigned storage device. + +. Verify that the master node has been assigned an IP address from the DHCP server. + +. Determine master node status. +.. Query master node status: ++ +---- +$ oc get nodes +---- ++ +.. If one of the master nodes does not reach a `Ready` status, retrieve a detailed node description: ++ +---- +$ oc describe node +---- ++ +[NOTE] +==== +It is not possible to run `oc` commands if an installation issue prevents the {product-title} API from running or if the kubelet is not running yet on each node: +==== ++ +. Determine {product-title} SDN status. ++ +.. Review `sdn-controller`, `sdn`, and `ovs` DaemonSet status, in the `openshift-sdn` namespace: ++ +---- +$ oc get daemonsets -n openshift-sdn +---- ++ +.. If those resources are listed as `Not found`, review Pods in the `openshift-sdn` namespace: ++ +---- +$ oc get pods -n openshift-sdn +---- ++ +.. Review logs relating to failed {product-title} SDN Pods in the `openshift-sdn` namespace: ++ +---- +$ oc logs -n openshift-sdn +---- + +. Determine cluster network configuration status. +.. Review whether the cluster's network configuration exists: ++ +---- +$ oc get network.config.openshift.io cluster -o yaml +---- ++ +.. If the installer failed to create the network configuration, generate the Kubernetes manifests again and review message output: ++ +---- +$ ./openshift-install create manifests +---- ++ +.. Review Pod status in the `openshift-network-operator` namespace to determine whether the network Operator is running: ++ +---- +$ oc get pods -n openshift-network-operator +---- ++ +.. Gather network Operator Pod logs from the `openshift-network-operator` namespace: ++ +---- +$ oc logs pod/ -n openshift-network-operator +---- + +. Monitor `kubelet.service` journald unit logs on master nodes, after they have booted. This provides visibility into master node agent activity. +.. Retrieve the logs using `oc`: ++ +---- +$ oc adm node-logs --role=master -u kubelet +---- ++ +.. If the API is not functional, review the logs using SSH instead. Replace `..` with appropriate values: ++ +---- +$ ssh core@.. journalctl -b -f -u kubelet.service +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..`. +==== ++ +. Retrieve `crio.service` journald unit logs on master nodes, after they have booted. This provides visibility into master node CRI-O container runtime activity. +.. Retrieve the logs using `oc`: ++ +---- +$ oc adm node-logs --role=master -u crio +---- ++ +.. If the API is not functional, review the logs using SSH instead: ++ +---- +$ ssh core@.. journalctl -b -f -u crio.service +---- + +. Collect logs from specific subdirectories under `/var/log/` on master nodes. +.. Retrieve a list of logs contained within a `/var/log/` subdirectory. The following example lists files in `/var/log/openshift-apiserver/` on all master nodes: ++ +---- +$ oc adm node-logs --role=master --path=openshift-apiserver +---- ++ +.. Inspect a specific log within a `/var/log/` subdirectory. The following example outputs `/var/log/openshift-apiserver/audit.log` contents from all master nodes: ++ +---- +$ oc adm node-logs --role=master --path=openshift-apiserver/audit.log +---- ++ +.. If the API is not functional, review the logs on each node using SSH instead. The following example tails `/var/log/openshift-apiserver/audit.log`: ++ +---- +$ ssh core@.. sudo tail -f /var/log/openshift-apiserver/audit.log +---- + +. Review master node container logs using SSH. +.. List the containers: ++ +---- +$ ssh core@.. sudo crictl ps -a +---- ++ +.. Retrieve a container's logs using `crictl`: ++ +---- +$ ssh core@.. sudo crictl logs -f +---- + +. If you experience master node configuration issues, verify that the MCO, MCO endpoint, and DNS record are functioning. The Machine Config Operator (MCO) manages operating system configuration during the installation procedure. Also verify system clock accuracy and certificate validity. +.. Test whether the MCO endpoint is available. Replace `` with appropriate values: ++ +---- +$ curl https://api-int.:22623/config/master +---- ++ +.. If the endpoint is unresponsive, verify load balancer configuration. Ensure that the endpoint is configured to run on port 22623. ++ +.. Verify that the MCO endpoint's DNS record is configured and resolves to the load balancer. +... Run a DNS lookup for the defined MCO endpoint name: ++ +---- +$ dig api-int. @ +---- ++ +... Run a reverse lookup to the assigned MCO IP address on the load balancer: ++ +---- +$ dig -x @ +---- ++ +.. Verify that the MCO is functioning from the bootstrap node directly. Replace `` with the bootstrap node's fully qualified domain name: ++ +---- +$ ssh core@ curl https://api-int.:22623/config/master +---- ++ +.. System clock time must be synchronized between bootstrap, master, and worker nodes. Check each node's system clock reference time and time synchronization statistics: ++ +---- +$ ssh core@.. chronyc tracking +---- ++ +.. Review certificate validity: ++ +---- +$ openssl s_client -connect api-int.:22623 | openssl x509 -noout -text +---- diff --git a/modules/investigating-worker-node-installation-issues.adoc b/modules/investigating-worker-node-installation-issues.adoc new file mode 100644 index 000000000000..2a3575683c52 --- /dev/null +++ b/modules/investigating-worker-node-installation-issues.adoc @@ -0,0 +1,200 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="investigating-worker-node-installation-issues_{context}"] += Investigating worker node installation issues + +If you experience worker node installation issues, you can review the worker node status. Collect `kubelet.service`, `crio.service` journald unit logs and the worker node container logs for visibility into the worker node agent, CRI-O container runtime and Pod activity. Additionally, you can check the Ignition file and Machine API Operator functionality. If worker node post-installation configuration fails, check Machine Config Operator (MCO) and DNS functionality. You can also verify system clock synchronization between the bootstrap, master, and worker nodes, and validate certificates. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* You have SSH access to your hosts. +* You have the fully qualified domain names of the bootstrap and worker nodes. +* If you are hosting Ignition configuration files by using an HTTP server, you must have the HTTP server's fully qualified domain name and the port number. You must also have SSH access to the HTTP host. ++ +[NOTE] +==== +The initial `kubeadmin` password can be found in `/auth/kubeadmin-password` on the installation host. +==== + +.Procedure + +. If you have access to the worker node's console, monitor the console until the node reaches the login prompt. During the installation, Ignition log messages are output to the console. + +. Verify Ignition file configuration. ++ +* If you are hosting Ignition configuration files by using an HTTP server. ++ +.. Verify the worker node Ignition file URL. Replace `` with HTTP server's fully qualified domain name: ++ +---- +curl -I http://:/worker.ign <1> +---- +<1> The `-I` option returns the header only. If the Ignition file is available on the specified URL, the command returns `200 OK` status. If it is not available, the command returns `404 file not found`. ++ +.. To verify that the ignition file was received by the worker node, query the HTTP server logs on the HTTP host. For example, if you are using an Apache web server to serve Ignition files: ++ +---- +$ grep -is 'worker.ign' /var/log/httpd/access_log +---- ++ +If the worker Ignition file is received, the associated `HTTP GET` log message will include a `200 OK` success status, indicating that the request succeeded. ++ +.. If the Ignition file was not received, check that it exists on the serving host directly. Ensure that the appropriate file and web server permissions are in place. ++ +* If you are using a cloud provider mechanism to inject Ignition configuration files into hosts as part of their initial deployment. ++ +.. Review the worker node's console to determine if the mechanism is injecting the worker node Ignition file correctly. + +. Check the availability of the worker node's assigned storage device. + +. Verify that the worker node has been assigned an IP address from the DHCP server. + +. Determine worker node status. +.. Query node status: ++ +---- +$ oc get nodes +---- ++ +.. Retrieve a detailed node description for any worker nodes not showing a `Ready` status: ++ +---- +$ oc describe node +---- ++ +[NOTE] +==== +It is not possible to run `oc` commands if an installation issue prevents the {product-title} API from running or if the kubelet is not running yet on each node. +==== ++ +. Unlike master nodes, worker nodes are deployed and scaled using the Machine API Operator. Check the status of the Machine API Operator. +.. Review Machine API Operator Pod status: ++ +---- +$ oc get pods -n openshift-machine-api +---- ++ +.. If the Machine API Operator Pod does not have a `Ready` status, detail the Pod's events: ++ +---- +$ oc describe pod/ -n openshift-machine-api +---- ++ +.. Inspect `machine-api-operator` container logs. The container runs within the `machine-api-operator` Pod: ++ +---- +$ oc logs pod/ -n openshift-machine-api -c machine-api-operator +---- ++ +.. Also inspect `kube-rbac-proxy` container logs. The container also runs within the `machine-api-operator` Pod: ++ +---- +$ oc logs pod/ -n openshift-machine-api -c kube-rbac-proxy +---- + +. Monitor `kubelet.service` journald unit logs on worker nodes, after they have booted. This provides visibility into worker node agent activity. +.. Retrieve the logs using `oc`: ++ +---- +$ oc adm node-logs --role=worker -u kubelet +---- ++ +.. If the API is not functional, review the logs using SSH instead. Replace `..` with appropriate values: ++ +---- +$ ssh core@.. journalctl -b -f -u kubelet.service +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..`. +==== ++ +. Retrieve `crio.service` journald unit logs on worker nodes, after they have booted. This provides visibility into worker node CRI-O container runtime activity. +.. Retrieve the logs using `oc`: ++ +---- +$ oc adm node-logs --role=worker -u crio +---- ++ +.. If the API is not functional, review the logs using SSH instead: ++ +---- +$ ssh core@.. journalctl -b -f -u crio.service +---- + +. Collect logs from specific subdirectories under `/var/log/` on worker nodes. +.. Retrieve a list of logs contained within a `/var/log/` subdirectory. The following example lists files in `/var/log/sssd/` on all worker nodes: ++ +---- +$ oc adm node-logs --role=worker --path=sssd +---- ++ +.. Inspect a specific log within a `/var/log/` subdirectory. The following example outputs `/var/log/sssd/audit.log` contents from all worker nodes: ++ +---- +$ oc adm node-logs --role=worker --path=sssd/sssd.log +---- ++ +.. If the API is not functional, review the logs on each node using SSH instead. The following example tails `/var/log/sssd/sssd.log`: ++ +---- +$ ssh core@.. sudo tail -f /var/log/sssd/sssd.log +---- + +. Review worker node container logs using SSH. +.. List the containers: ++ +---- +$ ssh core@.. sudo crictl ps -a +---- ++ +.. Retrieve a container's logs using `crictl`: ++ +---- +$ ssh core@.. sudo crictl logs -f +---- + +. If you experience worker node configuration issues, verify that the MCO, MCO endpoint, and DNS record are functioning. The Machine Config Operator (MCO) manages operating system configuration during the installation procedure. Also verify system clock accuracy and certificate validity. +.. Test whether the MCO endpoint is available. Replace `` with appropriate values: ++ +---- +$ curl https://api-int.:22623/config/worker +---- ++ +.. If the endpoint is unresponsive, verify load balancer configuration. Ensure that the endpoint is configured to run on port 22623. ++ +.. Verify that the MCO endpoint's DNS record is configured and resolves to the load balancer. +... Run a DNS lookup for the defined MCO endpoint name: ++ +---- +$ dig api-int. @ +---- ++ +... Run a reverse lookup to the assigned MCO IP address on the load balancer: ++ +---- +$ dig -x @ +---- ++ +.. Verify that the MCO is functioning from the bootstrap node directly. Replace `` with the bootstrap node's fully qualified domain name: ++ +---- +$ ssh core@ curl https://api-int.:22623/config/worker +---- ++ +.. System clock time must be synchronized between bootstrap, master, and worker nodes. Check each node's system clock reference time and time synchronization statistics: ++ +---- +$ ssh core@.. chronyc tracking +---- ++ +.. Review certificate validity: ++ +---- +$ openssl s_client -connect api-int.:22623 | openssl x509 -noout -text +---- diff --git a/modules/monitoring-installation-progress.adoc b/modules/monitoring-installation-progress.adoc new file mode 100644 index 000000000000..b8dc125edeff --- /dev/null +++ b/modules/monitoring-installation-progress.adoc @@ -0,0 +1,64 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="monitoring-installation-progress_{context}"] += Monitoring installation progress + +You can monitor high-level installation, bootstrap, and control plane logs as an {product-title} installation progresses. This provides greater visibility into how an installation progresses and helps identify the stage at which an installation failure occurs. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* You have SSH access to your hosts. +* You have the fully qualified domain names of the bootstrap and master nodes. ++ +[NOTE] +==== +The initial `kubeadmin` password can be found in `/auth/kubeadmin-password` on the installation host. +==== + +.Procedure + +. Watch the installation log as the installation progresses: ++ +---- +$ tail -f ~//.openshift_install.log +---- + +. Monitor the `bootkube.service` journald unit log on the bootstrap node, after it has booted. This provides visibility into the bootstrapping of the first control plane. Replace `` with the bootstrap node's fully qualified domain name: ++ +---- +$ ssh core@ journalctl -b -f -u bootkube.service +---- ++ +[NOTE] +==== +The `bootkube.service` log on the bootstrap node outputs etcd `connection refused` errors, indicating that the bootstrap server is unable to connect to etcd on master nodes. After etcd has started on each master node and the nodes have joined the cluster, the errors should stop. +==== ++ +. Monitor `kubelet.service` journald unit logs on master nodes, after they have booted. This provides visibility into master node agent activity. +.. Monitor the logs using `oc`: ++ +---- +$ oc adm node-logs --role=master -u kubelet +---- +.. If the API is not functional, review the logs using SSH instead. Replace `..` with appropriate values: ++ +---- +$ ssh core@.. journalctl -b -f -u kubelet.service +---- + +. Monitor `crio.service` journald unit logs on master nodes, after they have booted. This provides visibility into master node CRI-O container runtime activity. +.. Monitor the logs using `oc`: ++ +---- +$ oc adm node-logs --role=master -u crio +---- ++ +.. If the API is not functional, review the logs using SSH instead. Replace `..` with appropriate values: ++ +---- +$ ssh core@master-N.cluster_name.sub_domain.domain journalctl -b -f -u crio.service +---- diff --git a/modules/olm-status-conditions.adoc b/modules/olm-status-conditions.adoc index 5c608ef02dfc..9f7a33896872 100644 --- a/modules/olm-status-conditions.adoc +++ b/modules/olm-status-conditions.adoc @@ -1,9 +1,10 @@ // Module included in the following assemblies: // // * operators/olm-status.adoc +// * support/troubleshooting/troubleshooting-operator-issues.adoc [id="olm-status-conditions_{context}"] -= Condition types += Operator Subscription condition types Subscriptions can report the following condition types: @@ -25,3 +26,8 @@ Subscriptions can report the following condition types: |A Subscription's InstallPlan has failed. |=== + +[NOTE] +==== +Default {product-title} cluster Operators are managed by the Cluster Version Operator (CVO) and they do not have a Subscription object. Application Operators are managed by Operator Lifecycle Manager (OLM) and they have a Subscription object. +==== diff --git a/modules/olm-status-viewing-cli.adoc b/modules/olm-status-viewing-cli.adoc index da703668a778..f6c1bf51b95a 100644 --- a/modules/olm-status-viewing-cli.adoc +++ b/modules/olm-status-viewing-cli.adoc @@ -1,18 +1,30 @@ // Module included in the following assemblies: // // * operators/olm-status.adoc +// * support/troubleshooting/troubleshooting-operator-issues.adoc [id="olm-status-viewing-cli_{context}"] -= Viewing Operator status using the CLI += Viewing Operator Subscription status using the CLI -You can view Operator status using the CLI. +You can view Operator Subscription status using the CLI. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). .Procedure -. Use the `oc describe` command to inspect the Subscription resource: +. List Operator Subscriptions: + ---- -$ oc describe sub +$ oc get subs -n +---- + +. Use the `oc describe` command to inspect a Subscription resource: ++ +---- +$ oc describe sub -n ---- . In the command output, find the `Conditions` section: @@ -25,3 +37,8 @@ Conditions: Status: False Type: CatalogSourcesUnhealthy ---- + +[NOTE] +==== +Default {product-title} cluster Operators are managed by the Cluster Version Operator (CVO) and they do not have a Subscription object. Application Operators are managed by Operator Lifecycle Manager (OLM) and they have a Subscription object. +==== diff --git a/modules/querying-bootstrap-node-journal-logs.adoc b/modules/querying-bootstrap-node-journal-logs.adoc new file mode 100644 index 000000000000..74c7ec7ef57b --- /dev/null +++ b/modules/querying-bootstrap-node-journal-logs.adoc @@ -0,0 +1,32 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/gathering-cluster-data.adoc + +[id="querying-bootstrap-node-journal-logs_{context}"] += Querying bootstrap node journal logs + +If you experience bootstrap-related issues, you can gather `bootkube.service` `journald` unit logs and container logs from the bootstrap node. + +.Prerequisites + +* You have SSH access to your bootstrap node. +* You have the fully qualified domain name of the bootstrap node. + +.Procedure + +. Query `bootkube.service` `journald` unit logs from a bootstrap node during {product-title} installation. Replace `` with the bootstrap node's fully qualified domain name: ++ +---- +$ ssh core journalctl -b -f -u bootkube.service +---- ++ +[NOTE] +==== +The `bootkube.service` log on the bootstrap node outputs etcd `connection refused` errors, indicating that the bootstrap server is unable to connect to etcd on master nodes. After etcd has started on each master node and the nodes have joined the cluster, the errors should stop. +==== ++ +. Collect logs from the bootstrap node containers using `podman` on the bootstrap node. Replace `` with the bootstrap node's fully qualified domain name: ++ +---- +$ ssh core@ 'for pod in $(sudo podman ps -a -q); do sudo podman logs $pod; done' +---- diff --git a/modules/querying-cluster-node-journal-logs.adoc b/modules/querying-cluster-node-journal-logs.adoc new file mode 100644 index 000000000000..270d5a207cc4 --- /dev/null +++ b/modules/querying-cluster-node-journal-logs.adoc @@ -0,0 +1,49 @@ +// Module included in the following assemblies: +// +// * support/gathering-cluster-data.adoc +// * support/troubleshooting/verifying-node-health.adoc + +[id="querying-cluster-node-journal-logs_{context}"] += Querying cluster node journal logs + +You can gather `journald` unit logs and other logs within `/var/log` on individual cluster nodes. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). +* You have SSH access to your hosts. + +.Procedure + +. Query `kubelet` `journald` unit logs from {product-title} cluster nodes. The following example queries master nodes only: ++ +---- +$ oc adm node-logs --role=master -u kubelet <1> +---- +<1> Replace `kubelet` as appropriate to query other unit logs. + +. Collect logs from specific subdirectories under `/var/log/` on cluster nodes. +.. Retrieve a list of logs contained within a `/var/log/` subdirectory. The following example lists files in `/var/log/openshift-apiserver/` on all master nodes: ++ +---- +$ oc adm node-logs --role=master --path=openshift-apiserver +---- ++ +.. Inspect a specific log within a `/var/log/` subdirectory. The following example outputs `/var/log/openshift-apiserver/audit.log` contents from all master nodes: ++ +---- +$ oc adm node-logs --role=master --path=openshift-apiserver/audit.log +---- ++ +.. If the API is not functional, review the logs on each node using SSH instead. The following example tails `/var/log/openshift-apiserver/audit.log`: ++ +---- +$ ssh core@.. sudo tail -f /var/log/openshift-apiserver/audit.log +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..`. +==== diff --git a/modules/querying-kubelet-status-on-a-node.adoc b/modules/querying-kubelet-status-on-a-node.adoc new file mode 100644 index 000000000000..59adaded1017 --- /dev/null +++ b/modules/querying-kubelet-status-on-a-node.adoc @@ -0,0 +1,46 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/verifying-node-health.adoc + +[id="querying-kubelet-status-on-a-node_{context}"] += Querying the kubelet's status on a node + +You can review cluster node health status, resource consumption statistics, and node logs. Additionally, you can query `kubelet` status on individual nodes. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. The kubelet is managed using a systemd service on each node. Review the kubelet's status by querying the `kubelet` systemd service within a debug Pod. +.. Start a debug Pod for a node: ++ +---- +$ oc debug node/my-node +---- ++ +.. Set `/host` as the root directory within the debug shell. The debug Pod mounts the host's root file system in `/host` within the Pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: ++ +---- +# chroot /host +---- ++ +[NOTE] +==== +{product-title} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or `kubelet` is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..` instead. +==== ++ +.. Check whether the `kubelet` systemd service is active on the node: ++ +---- +# systemctl is-active kubelet +---- ++ +.. Output a more detailed `kubelet.service` status summary: ++ +---- +# systemctl status kubelet +---- diff --git a/modules/querying-operator-pod-status.adoc b/modules/querying-operator-pod-status.adoc new file mode 100644 index 000000000000..b4406b783122 --- /dev/null +++ b/modules/querying-operator-pod-status.adoc @@ -0,0 +1,66 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-operator-issues.adoc + +[id="querying-operator-pod-status_{context}"] += Querying Operator Pod status + +You can list Operator Pods within a cluster and their status. You can also collect a detailed Operator Pod summary. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. List Operators running in the cluster. The output includes Operator version, availability, and up-time information: ++ +---- +$ oc get clusteroperators +---- + +. List Operator Pods running in the Operator's namespace, plus Pod status, restarts, and age: ++ +---- +$ oc get pod -n +---- + +. Output a detailed Operator Pod summary: ++ +---- +$ oc describe pod -n +---- + +. If an Operator issue is node-specific, query Operator container status on that node. +.. Start a debug Pod for the node: ++ +---- +$ oc debug node/my-node +---- ++ +.. Set `/host` as the root directory within the debug shell. The debug Pod mounts the host's root file system in `/host` within the Pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: ++ +---- +# chroot /host +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..` instead. +==== ++ +.. List details about the node's containers, including state and associated Pod IDs: ++ +---- +# crictl ps +---- ++ +.. List information about a specific Operator container on the node. The following example lists information about the `network-operator` container: ++ +---- +# crictl ps --name network-operator +---- ++ +.. Exit from the debug shell. diff --git a/modules/querying-operator-status-after-installation.adoc b/modules/querying-operator-status-after-installation.adoc new file mode 100644 index 000000000000..748500e0263e --- /dev/null +++ b/modules/querying-operator-status-after-installation.adoc @@ -0,0 +1,59 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="querying-operator-status-after-installation_{context}"] += Querying Operator status after installation + +You can check Operator status at the end of an installation. Retrieve diagnostic data for Operators that do not become available. Review logs for any Operator Pods that are listed as `Pending` or have an error status. Validate base images used by problematic Pods. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Check that cluster Operators are all available at the end of an installation. ++ +---- +$ oc get clusteroperators +---- + +. If any Operators fail to become available, view Operator events: ++ +---- +$ oc describe clusteroperator +---- ++ + +. Review Operator Pod status within the Operator's namespace: ++ +---- +$ oc get pods -n +---- + +. Obtain a detailed description for Pods that do not have `Running` status: ++ +---- +oc describe pod/ -n +---- + +. Inspect Pod logs: ++ +---- +$ oc logs pod/ -n +---- + +. When experiencing Pod base image related issues, review base image status. +.. Obtain details of the base image used by a problematic Pod: ++ +---- +$ oc get pod -o "jsonpath={range .status.containerStatuses[*]}{.name}{'\t'}{.state}{'\t'}{.image}{'\n'}{end}" -n +---- ++ +.. List base image release information: ++ +---- +$ oc adm release info : --commits +---- diff --git a/modules/reviewing-node-status-usage-and-configuration.adoc b/modules/reviewing-node-status-usage-and-configuration.adoc new file mode 100644 index 000000000000..d831b9aa5139 --- /dev/null +++ b/modules/reviewing-node-status-usage-and-configuration.adoc @@ -0,0 +1,33 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/verifying-node-health.adoc + +[id="reviewing-node-status-use-and-configuration_{context}"] += Reviewing node status, resource usage, and configuration + +Review cluster node health status, resource consumption statistics, and node logs. Additionally, query `kubelet` status on individual nodes. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. List the name, status, and role for all nodes in the cluster: ++ +---- +$ oc get nodes +---- + +. Summarize CPU and memory usage for each node within the cluster: ++ +---- +$ oc adm top nodes +---- + +. Summarize CPU and memory usage for a specific node: ++ +---- +$ oc adm top node -l my-node +---- diff --git a/modules/reviewing-pod-status.adoc b/modules/reviewing-pod-status.adoc new file mode 100644 index 000000000000..db593c4c2fa5 --- /dev/null +++ b/modules/reviewing-pod-status.adoc @@ -0,0 +1,60 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/investigating-pod-issues.adoc + +[id="reviewing-pod-status_{context}"] += Reviewing Pod status + +You can query Pod status and error states. You can also query a Pod's associated deployment configuration and review base image availability. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* `skopeo` is installed. + +.Procedure + +. Switch into a project: ++ +---- +$ oc project +---- + +. List Pods running within the namespace, as well as Pod status, error states, restarts, and age: ++ +---- +$ oc get pods +---- + +. Determine whether the namespace is managed by a deployment configuration: ++ +---- +$ oc status +---- ++ +If the namespace is managed by a deployment configuration, the output includes the deployment configuration name and a base image reference. + +. Inspect the base image referenced in the preceding command's output: ++ +---- +$ skopeo inspect docker:// +---- + +. If the base image reference is not correct, update the reference in the deployment configuration: ++ +---- +$ oc edit deployment/my-deployment +---- + +. When deployment configuration changes on exit, the configuration will automatically redeploy. Watch Pod status as the deployment progresses, to determine whether the issue has been resolved: ++ +---- +$ oc get pods -w +---- + +. Review events within the namespace for diagnostic information relating to Pod failures: ++ +---- +$ oc get events +---- diff --git a/modules/specifying-oc-log-levels.adoc b/modules/specifying-oc-log-levels.adoc new file mode 100644 index 000000000000..f4c6305c0234 --- /dev/null +++ b/modules/specifying-oc-log-levels.adoc @@ -0,0 +1,26 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/diagnosing-oc-issues.adoc + +[id="specifying-oc-log-levels_{context}"] += Specifying OpenShift CLI (`oc`) log levels + +You can investigate OpenShift CLI (`oc`) issues by increasing the command's log level. + +.Prerequisites + +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Specify the `oc` log level when running an `oc` command: ++ +---- +$ oc --loglevel +---- + +. The {product-title} user's current session token is typically included in logged `curl` requests where required. You can also obtain the current user's session token manually, for use when testing aspects of an `oc` command's underlying process step by step: ++ +---- +$ oc whoami -t +---- diff --git a/modules/specifying-openshift-installer-log-levels.adoc b/modules/specifying-openshift-installer-log-levels.adoc new file mode 100644 index 000000000000..dfc0dd2b9694 --- /dev/null +++ b/modules/specifying-openshift-installer-log-levels.adoc @@ -0,0 +1,21 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="specifying-openshift-installer-log-levels_{context}"] += Specifying {product-title} installer log levels + +By default, the {product-title} installer log level is set to `info`. If more detailed logging is required when diagnosing a failed {product-title} installation, you can increase the `openshift-install` log level to `debug` when starting the installation again. + +.Prerequisites + +* You have access to the installation host. + +.Procedure + +* Set the installation log level to `debug` when initiating the installation: ++ +---- +$ ./openshift-install --dir= wait-for bootstrap-complete --log-level=debug <1> +---- +<1> Possible log levels include `info`, `warn`, `error,` and `debug`. diff --git a/modules/starting-debug-pods-with-root-access.adoc b/modules/starting-debug-pods-with-root-access.adoc new file mode 100644 index 000000000000..8ef112c055b8 --- /dev/null +++ b/modules/starting-debug-pods-with-root-access.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/investigating-pod-issues.adoc + +[id="starting-debug-pods-with-root-access_{context}"] += Starting debug Pods with root access + +You can start a debug Pod with root access, based on a problematic Pod's deployment or deployment configuration. Pod users typically run with non-root privileges, but running troubleshooting Pods with temporary root privileges can be useful during issue investigation. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* Your API service is still functional. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Start a debug Pod with root access, based on a deployment. +.. Obtain a project's deployment name: ++ +---- +$ oc get deployment -n +---- ++ +.. Start a debug Pod with root privileges, based on the deployment: ++ +---- +$ oc debug deployment/my-deployment --as-root -n +---- + +. Start a debug Pod with root access, based on a deployment configuration. +.. Obtain a project's deployment configuration name: ++ +---- +$ oc get deploymentconfigs -n +---- ++ +.. Start a debug Pod with root privileges, based on the deployment configuration: ++ +---- +$ oc debug deploymentconfig/my-deployment-configuration --as-root -n +---- + +[NOTE] +==== +You can append `-- ` to the preceding `oc debug` commands to run individual commands within a debug Pod, instead of running an interactive shell. +==== diff --git a/modules/strategies-for-s2i-troubleshooting.adoc b/modules/strategies-for-s2i-troubleshooting.adoc new file mode 100644 index 000000000000..a8a7a7d622f7 --- /dev/null +++ b/modules/strategies-for-s2i-troubleshooting.adoc @@ -0,0 +1,22 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-s2i.adoc + +[id="strategies-for-s2i-troubleshooting_{context}"] += Strategies for Source-to-Image troubleshooting + +Use Source-to-Image (S2I) to build reproducible, Docker-formatted container images. You can create ready-to-run images by injecting application source code into a container image and assembling a new image. The new image incorporates the base image (the builder) and built source. + +To determine where in the S2I process a failure occurs, you can observe the state of the Pods relating to each of the following S2I stages: + +. *During the build configuration stage*, a build Pod is used to create an application container image from a base image and application source code. + +. *During the deployment configuration stage*, a deployment Pod is used to deploy application Pods from the application container image that was built in the build configuration stage. The deployment Pod also deploys other resources such as services and routes. The deployment configuration begins after the build configuration succeeds. + +. *After the deployment Pod has started the application Pods*, application failures can occur within the running application Pods. For instance, an application might not behave as expected even though the application Pods are in a `Running` state. In this scenario, you can access running application Pods to investigate application failures within a Pod. + +When troubleshooting S2I issues, follow this strategy: + +. Monitor build, deployment, and application Pod status +. Determine the stage of the S2I process where the problem occurred +. Review logs corresponding to the failed stage diff --git a/modules/summarizing-cluster-specifications-through-clusterversion.adoc b/modules/summarizing-cluster-specifications-through-clusterversion.adoc new file mode 100644 index 000000000000..d92f5373f74a --- /dev/null +++ b/modules/summarizing-cluster-specifications-through-clusterversion.adoc @@ -0,0 +1,27 @@ +// Module included in the following assemblies: +// +// * support/summarizing-cluster-specifications.adoc + +[id="summarizing-cluster-specifications-through-clusterversion_{context}"] += Summarizing cluster specifications through `clusterversion` + +You can obtain a summary of {product-title} cluster specifications by querying the `clusterversion` resource. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Query cluster version, availability, uptime, and general status: ++ +---- +$ oc get clusterversion +---- + +. Obtain a detailed summary of cluster specifications, update availability, and update history: ++ +---- +$ oc describe clusterversion +---- diff --git a/modules/support-collecting-network-trace.adoc b/modules/support-collecting-network-trace.adoc new file mode 100644 index 000000000000..7412394e48e8 --- /dev/null +++ b/modules/support-collecting-network-trace.adoc @@ -0,0 +1,115 @@ +// Module included in the following assemblies: +// +// * support/gathering-cluster-data.adoc + +[id="support-collecting-network-trace_{context}"] += Collecting a network trace from an {product-title} node or container + +When investigating potential network-related {product-title} issues, Red Hat Support might request a network packet trace from a specific {product-title} cluster node or from a specific container. The recommended method to capture a network trace in {product-title} is through a debug Pod. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* You have a Red Hat standard or premium Subscription. +* You have a Red Hat Customer Portal account. +* You have an existing Red Hat Support case ID. +* You have SSH access to your hosts. + +.Procedure + +. Obtain a list of cluster nodes: ++ +---- +$ oc get nodes +---- + +. Enter into a debug session on the target node. This step instantiates a debug Pod called `-debug`: ++ +---- +$ oc debug node/my-cluster-node +---- + +. Set `/host` as the root directory within the debug shell. The debug Pod mounts the host's root file system in `/host` within the Pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: ++ +---- +# chroot /host +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..` instead. +==== ++ +. From within the `chroot` environment console, obtain the node's interface names: ++ +---- +# ip ad +---- + +. Start a `toolbox` container, which includes the required binaries and plug-ins to run `sosreport`: ++ +---- +# toolbox +---- ++ +[NOTE] +==== +If an existing `toolbox` Pod is already running, the `toolbox` command outputs `'toolbox-' already exists. Trying to start...`. To avoid `tcpdump` issues, remove the running toolbox container with `podman rm toolbox-` and spawn a new toolbox container. +==== ++ +. Initiate a `tcpdump` session on the cluster node and redirect output to a capture file. This example uses `ens5` as the interface name: ++ +---- +tcpdump -nn -s 0 -i ens5 -w /host/var/tmp/my-cluster-node_$(date +%d_%m_%Y-%H_%M_%S-%Z).pcap <1> +---- +<1> The `tcpdump` capture file's path is outside of the `chroot` environment because the toolbox container mounts the host’s root directory at `/host`. + +. If a `tcpdump` capture is required for a specific container on the node, follow these steps. +.. Determine the target container ID. The `chroot host` command precedes the `crictl` command in this step because the toolbox container mounts the host's root directory at `/host`: ++ +---- +# chroot /host crictl ps +---- ++ +.. Determine the container's process ID. In this example, the container ID is `a7fe32346b120`: ++ +---- +# chroot /host crictl inspect --output yaml a7fe32346b120 | grep 'pid' | awk '{print $2}' +---- ++ +.. Initiate a `tcpdump` session on the container and redirect output to a capture file. This example uses `49628` as the container's process ID and `ens5` as the interface name. The `nsenter` command enters the namespace of a target process and runs a command in its namespace. because the target process in this example is a container's process ID, the `tcpdump` command is run in the container's namespace from the host: ++ +---- +# nsenter -n -t 49628 -- tcpdump -nn -i ens5 -w /host/var/tmp/my-cluster-node-my-container_$(date +%d_%m_%Y-%H_%M_%S-%Z).pcap.pcap <1> +---- +<1> The `tcpdump` capture file's path is outside of the `chroot` environment because the toolbox container mounts the host’s root directory at `/host`. + +. Provide the `tcpdump` capture file to Red Hat Support for analysis, using one of the following methods. ++ +* Upload the file to an existing Red Hat support case directly from an {product-title} cluster. +.. From within the toolbox container, run `redhat-support-tool` to attach the file directly to an existing Red Hat Support case. This example uses support case ID `01234567`: ++ +---- +# redhat-support-tool addattachment -c 01234567 /host/var/tmp/my-tcpdump-capture-file.pcap <1> +---- +<1> The toolbox container mounts the host’s root directory at `/host`. Reference the absolute path from the toolbox container's root directory, including `/host/`, when specifying files to upload through the `redhat-support-tool` command. ++ +* Upload the file to an existing Red Hat support case. +.. Concatenate the `sosreport` archive by running the `oc debug node/` command and redirect the output to a file. This command assumes you have exited the previous `oc debug` session: ++ +---- +$ oc debug node/my-cluster-node -- bash -c 'cat /host/var/tmp/my-tcpdump-capture-file.pcap' > /tmp/my-tcpdump-capture-file.pcap <1> +---- +<1> The debug container mounts the host’s root directory at `/host`. Reference the absolute path from the debug container's root directory, including `/host`, when specifying target files for concatenation. ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Transferring a `tcpdump` capture file from a cluster node by using `scp` is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to copy a `tcpdump` capture file from a node by running `scp core@..: `. +==== ++ +.. Navigate to an existing support case within link:https://access.redhat.com/support/cases/[https://access.redhat.com/support/cases/]. ++ +.. Select *Attach files* and follow the prompts to upload the file. + +// TODO - Add details relating to https://github.com/openshift/must-gather/pull/156 within the procedure. diff --git a/modules/support-generating-a-sosreport-archive.adoc b/modules/support-generating-a-sosreport-archive.adoc new file mode 100644 index 000000000000..26e7cc0544b9 --- /dev/null +++ b/modules/support-generating-a-sosreport-archive.adoc @@ -0,0 +1,102 @@ +// Module included in the following assemblies: +// +// * support/gathering-cluster-data.adoc + +[id="support-generating-a-sosreport-archive_{context}"] += Generating a `sosreport` archive for an {product-title} cluster node + +The recommended way to generate a `sosreport` for an {product-title} {product-version} cluster node is through a debug Pod. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have SSH access to your hosts. +* You have installed the OpenShift CLI (`oc`). +* You have a Red Hat standard or premium Subscription. +* You have a Red Hat Customer Portal account. +* You have an existing Red Hat Support case ID. + +.Procedure + +. Obtain a list of cluster nodes: ++ +---- +$ oc get nodes +---- + +. Enter into a debug session on the target node. This step instantiates a debug Pod called `-debug`: ++ +---- +$ oc debug node/my-cluster-node +---- + +. Set `/host` as the root directory within the debug shell. The debug Pod mounts the host's root file system in `/host` within the Pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: ++ +---- +# chroot /host +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..` instead. +==== ++ +. Start a `toolbox` container, which includes the required binaries and plug-ins to run `sosreport`: ++ +---- +# toolbox +---- ++ +[NOTE] +==== +If an existing `toolbox` Pod is already running, the `toolbox` command outputs `'toolbox-' already exists. Trying to start...`. Remove the running toolbox container with `podman rm toolbox-` and spawn a new toolbox container, to avoid issues with `sosreport` plugins. +==== ++ +. Collect a `sosreport` archive. +.. Run the `sosreport` command and enable the `crio.all` and `crio.logs` CRI-O container engine `sosreport` plug-ins: ++ +---- +# sosreport -k crio.all=on -k crio.logs=on <1> +---- +<1> `-k` enables you to define `sosreport` plug-in parameters outside of the defaults. ++ +.. Press *Enter* when prompted, to continue. ++ +.. Provide the Red Hat Support case ID. `sosreport` adds the ID to the archive's file name. ++ +.. The `sosreport` output provides the archive's location and checksum. The following sample output references support case ID `01234567`: ++ +---- +Your sosreport has been generated and saved in: + /host/var/tmp/sosreport-my-cluster-node-01234567-2020-05-28-eyjknxt.tar.xz <1> + +The checksum is: 382ffc167510fd71b4f12a4f40b97a4e +---- +<1> The `sosreport` archive's file path is outside of the `chroot` environment because the toolbox container mounts the host’s root directory at `/host`. + +. Provide the `sosreport` archive to Red Hat Support for analysis, using one of the following methods. ++ +* Upload the file to an existing Red Hat support case directly from an {product-title} cluster. +.. From within the toolbox container, run `redhat-support-tool` to attach the archive directly to an existing Red Hat support case. This example uses support case ID `01234567`: ++ +---- +# redhat-support-tool addattachment -c 01234567 /host/var/tmp/my-sosreport.tar.xz <1> +---- +<1> The toolbox container mounts the host’s root directory at `/host`. Reference the absolute path from the toolbox container's root directory, including `/host/`, when specifying files to upload through the `redhat-support-tool` command. ++ +* Upload the file to an existing Red Hat support case. +.. Concatenate the `sosreport` archive by running the `oc debug node/` command and redirect the output to a file. This command assumes you have exited the previous `oc debug` session: ++ +---- +$ oc debug node/my-cluster-node -- bash -c 'cat /host/var/tmp/sosreport-my-cluster-node-01234567-2020-05-28-eyjknxt.tar.xz' > /tmp/sosreport-my-cluster-node-01234567-2020-05-28-eyjknxt.tar.xz <1> +---- +<1> The debug container mounts the host’s root directory at `/host`. Reference the absolute path from the debug container's root directory, including `/host`, when specifying target files for concatenation. ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Transferring a `sosreport` archive from a cluster node by using `scp` is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to copy a `sosreport` archive from a node by running `scp core@..: `. +==== ++ +.. Navigate to an existing support case within link:https://access.redhat.com/support/cases/[https://access.redhat.com/support/cases/]. ++ +.. Select *Attach files* and follow the prompts to upload the file. diff --git a/modules/support-get-cluster-id.adoc b/modules/support-get-cluster-id.adoc index 09d501b3301a..7b2d02ad15ec 100644 --- a/modules/support-get-cluster-id.adoc +++ b/modules/support-get-cluster-id.adoc @@ -16,12 +16,12 @@ When providing information to Red Hat Support, it is helpful to provide the uniq ifdef::openshift-enterprise,openshift-webscale,openshift-dedicated[] * To open a support case and have your cluster ID autofilled using the web console: .. From the toolbar, navigate to *(?) Help* -> *Open Support Case*. -.. The 'Cluster ID' value is autofilled. +.. The *Cluster ID* value is autofilled. endif::[] ifdef::openshift-origin[] * To open a bug and have your cluster ID autofilled using the web console: .. From the toolbar, navigate to *(?) Help* -> *Report Bug*. -.. The 'Cluster ID' value is autofilled after you click `Submit Bug`. +.. The *Cluster ID* value is autofilled after you click `Submit Bug`. endif::[] * To manually obtain your cluster ID using the web console: diff --git a/modules/support-knowledgebase-about.adoc b/modules/support-knowledgebase-about.adoc new file mode 100644 index 000000000000..0379c2529924 --- /dev/null +++ b/modules/support-knowledgebase-about.adoc @@ -0,0 +1,8 @@ +// Module included in the following assemblies: +// +// * support/support-redhat-knowledgebase.adoc + +[id="support-knowledgebase-about_{context}"] += About the Red Hat Knowledgebase + +The link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] provides rich content aimed at helping you make the most of Red Hat's products and technologies. The Red Hat Knowledgebase consists of articles, product documentation, and videos outlining best practices on installing, configuring, and using Red Hat products. In addition, you can search for solutions to known issues, each providing concise root cause descriptions and remedial steps. diff --git a/modules/support-knowledgebase-search.adoc b/modules/support-knowledgebase-search.adoc new file mode 100644 index 000000000000..f2b54e53ffd8 --- /dev/null +++ b/modules/support-knowledgebase-search.adoc @@ -0,0 +1,28 @@ +// Module included in the following assemblies: +// +// * support/support-redhat-knowledgebase.adoc + +[id="support-knowledgebase-search_{context}"] += Searching the Red Hat Knowledgebase + +In the event of an {product-title} issue, you can perform an initial search to determine if a solution already exists within the Red Hat Knowledgebase. + +.Prerequisites + +* You have a Red Hat Customer Portal account. + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal]. + +. In the main Red Hat Customer Portal search field, input keywords and strings relating to the problem, including: ++ +* {product-title} components (such as *etcd*) +* Related procedure (such as *installation*) +* Warnings, error messages, and other outputs related to explicit failures + +. Click *Search*. + +. Select the *{product-title}* product filter. + +. Select the *Knowledgebase* content type filter. diff --git a/modules/support-providing-diagnostic-data-to-red-hat.adoc b/modules/support-providing-diagnostic-data-to-red-hat.adoc new file mode 100644 index 000000000000..44260cd26ba7 --- /dev/null +++ b/modules/support-providing-diagnostic-data-to-red-hat.adoc @@ -0,0 +1,78 @@ +// Module included in the following assemblies: +// +// * support/gathering-cluster-data.adoc + +[id="support-providing-diagnostic-data-to-red-hat_{context}"] += Providing diagnostic data to Red Hat Support + +When investigating {product-title} issues, Red Hat Support might ask you to upload diagnostic data to a support case. Files can be uploaded to a support case through the Red Hat Customer Portal, or from an {product-title} cluster directly by using the `redhat-support-tool` command. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have SSH access to your hosts. +* You have installed the OpenShift CLI (`oc`). +* You have a Red Hat standard or premium Subscription. +* You have a Red Hat Customer Portal account. +* You have an existing Red Hat Support case ID. + +.Procedure + +* Upload diagnostic data to an existing Red Hat support case through the Red Hat Customer Portal. +. Concatenate a diagnostic file contained on an {product-title} node by using the `oc debug node/` command and redirect the output to a file. The following example copies `/host/var/tmp/my-diagnostic-data.tar.gz` from a debug container to `/var/tmp/my-diagnostic-data.tar.gz`: ++ +---- +$ oc debug node/my-cluster-node -- bash -c 'cat /host/var/tmp/my-diagnostic-data.tar.gz' > /var/tmp/my-diagnostic-data.tar.gz <1> +---- +<1> The debug container mounts the host’s root directory at `/host`. Reference the absolute path from the debug container's root directory, including `/host`, when specifying target files for concatenation. ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Transferring files from a cluster node by using `scp` is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to copy diagnostic files from a node by running `scp core@..: `. +==== ++ +. Navigate to an existing support case within link:https://access.redhat.com/support/cases/[https://access.redhat.com/support/cases/]. ++ +. Select *Attach files* and follow the prompts to upload the file. + +* Upload diagnostic data to an existing Red Hat support case directly from an {product-title} cluster. +. Obtain a list of cluster nodes: ++ +---- +$ oc get nodes +---- ++ +. Enter into a debug session on the target node. This step instantiates a debug Pod called `-debug`: ++ +---- +$ oc debug node/my-cluster-node +---- ++ +. Set `/host` as the root directory within the debug shell. The debug Pod mounts the host's root file system in `/host` within the Pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: ++ +---- +# chroot /host +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..` instead. +==== ++ +. Start a `toolbox` container, which includes the required binaries to run `redhat-support-tool`: ++ +---- +# toolbox +---- ++ +[NOTE] +==== +If an existing `toolbox` Pod is already running, the `toolbox` command outputs `'toolbox-' already exists. Trying to start...`. Remove the running toolbox container with `podman rm toolbox-` and spawn a new toolbox container, to avoid issues. +==== ++ +.. Run `redhat-support-tool` to attach a file from the debug Pod directly to an existing Red Hat Support case. This example uses support case ID '01234567' and example file path `/host/var/tmp/my-diagnostic-data.tar.gz`: ++ +---- +# redhat-support-tool addattachment -c 01234567 /host/var/tmp/my-diagnostic-data.tar.gz <1> +---- +<1> The toolbox container mounts the host’s root directory at `/host`. Reference the absolute path from the toolbox container's root directory, including `/host/`, when specifying files to upload through the `redhat-support-tool` command. diff --git a/modules/support-submitting-a-case.adoc b/modules/support-submitting-a-case.adoc new file mode 100644 index 000000000000..2bbe88749d43 --- /dev/null +++ b/modules/support-submitting-a-case.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// * support/getting-support.adoc + +[id="support-submitting-a-case_{context}"] += Submitting a support case + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +* You have a Red Hat Customer Portal account. +* You have a Red Hat standard or premium Subscription. + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal] and select *SUPPORT CASES* -> *Open a case*. + +. Select the appropriate category for your issue (such as *Defect / Bug*), product (*{product-title}*), and product version (*{product-version}*, if this is not already autofilled). + +. Review the list of suggested Red Hat Knowledgebase solutions for a potential match against the problem that is being reported. If the suggested articles do not address the issue, click *Continue*. + +. Enter a concise but descriptive problem summary and further details about the symptoms being experienced, as well as your expectations. + +. Review the updated list of suggested Red Hat Knowledgebase solutions for a potential match against the problem that is being reported. The list is refined as you provide more information during the case creation process. If the suggested articles do not address the issue, click *Continue*. + +. Ensure that the account information presented is as expected, and if not, amend accordingly. + +. Check that the autofilled {product-title} Cluster ID is correct. If it is not, manually obtain your cluster ID. ++ +* To manually obtain your cluster ID using the {product-title} web console: +.. Navigate to *Home* -> *Dashboards* -> *Overview*. +.. Find the value in the *Cluster ID* field of the *Details* section. ++ +* Alternatively, it is possible to open a new support case through the {product-title} web console and have your cluster ID autofilled. +.. From the toolbar, navigate to *(?) Help* -> *Open Support Case*. +.. The *Cluster ID* value is autofilled. ++ +* To obtain your cluster ID using the OpenShift CLI (`oc`), run the following command: ++ +---- +$ oc get clusterversion -o jsonpath='{.items[].spec.clusterID}{"\n"}' +---- + +. Complete the following questions where prompted and then click *Continue*: ++ +* Where are you experiencing the behavior? What environment? +* When does the behavior occur? Frequency? Repeatedly? At certain times? +* What information can you provide around time-frames and the business impact? + +. Upload relevant diagnostic data files and click *Continue*. It is recommended to include data gathered using the `oc adm must-gather` command as a starting point, plus any issue specific data that is not collected by that command. + +. Input relevant case management details and click *Continue*. + +. Preview the case details and click *Submit*. diff --git a/modules/support.adoc b/modules/support.adoc index b8b8ea623d53..c370526ae40a 100644 --- a/modules/support.adoc +++ b/modules/support.adoc @@ -5,26 +5,11 @@ [id="support_{context}"] = Getting support -If you experience difficulty with a procedure described in this documentation, -visit the link:http://access.redhat.com[Red Hat Customer Portal]. Through the -Customer Portal, you can: +If you experience difficulty with a procedure described in this documentation, or with {product-title} in general, visit the link:http://access.redhat.com[Red Hat Customer Portal]. From the Customer Portal, you can: -* Search or browse through the Red Hat Knowledgebase of technical support -articles about Red Hat products. +* Search or browse through the Red Hat Knowledgebase of articles and solutions relating to Red Hat products. * Submit a support case to Red Hat Support. -+ -ifdef::openshift-enterprise,openshift-webscale[] - -[NOTE] -==== -When submitting a support case, it is recommended to provide the following information about your cluster to Red Hat Support to aid in troubleshooting: - -* Data gathered using the `oc adm must-gather` command -* The unique cluster ID. Navigate to *(?) Help* -> *Open Support Case* to have the cluster ID autofilled when you submit the case. - // TODO: xref -==== -endif::[] * Access other product documentation. // TODO: verify that these settings apply for Service Mesh and OpenShift virtualization, etc. diff --git a/modules/telemetry-showing-data-collected-from-the-cluster.adoc b/modules/telemetry-showing-data-collected-from-the-cluster.adoc index 19da13775a4b..c0c5a0e6ce17 100644 --- a/modules/telemetry-showing-data-collected-from-the-cluster.adoc +++ b/modules/telemetry-showing-data-collected-from-the-cluster.adoc @@ -10,7 +10,7 @@ You can see the cluster and components time series data captured by Telemetry. .Prerequisites * Install the OpenShift Command-line Interface (CLI), commonly known as `oc`. -* You must log in to the cluster with a user that has the `cluster-admin` role. +* You must log in to the cluster with a user that has either the `cluster-admin` role or the `cluster-monitoring-view` role. .Procedure @@ -24,6 +24,8 @@ $ oc get route prometheus-k8s -n openshift-monitoring -o jsonpath="{.spec.host}" . Enter this query in the *Expression* input box and press *Execute*: + - {__name__="up"} or {__name__="cluster_version"} or {__name__="cluster_version_available_updates"} or {__name__="cluster_operator_up"} or {__name__="cluster_operator_conditions"} or {__name__="cluster_version_payload"} or {__name__="cluster_version_payload_errors"} or {__name__="instance:etcd_object_counts:sum"} or {__name__="ALERTS",alertstate="firing"} or {__name__="code:apiserver_request_count:rate:sum"} or {__name__="kube_pod_status_ready:etcd:sum"} or {__name__="kube_pod_status_ready:image_registry:sum"} or {__name__="cluster:capacity_cpu_cores:sum"} or {__name__="cluster:capacity_memory_bytes:sum"} or {__name__="cluster:cpu_usage_cores:sum"} or {__name__="cluster:memory_usage_bytes:sum"} or {__name__="openshift:cpu_usage_cores:sum"} or {__name__="openshift:memory_usage_bytes:sum"} or {__name__="cluster:node_instance_type_count:sum"} +---- +{__name__=~"cluster:usage:.*|count:up0|count:up1|cluster_version|cluster_version_available_updates|cluster_operator_up|cluster_operator_conditions|cluster_version_payload|cluster_installer|cluster_infrastructure_provider|cluster_feature_set|instance:etcd_object_counts:sum|ALERTS|code:apiserver_request_total:rate:sum|cluster:capacity_cpu_cores:sum|cluster:capacity_memory_bytes:sum|cluster:cpu_usage_cores:sum|cluster:memory_usage_bytes:sum|openshift:cpu_usage_cores:sum|openshift:memory_usage_bytes:sum|workload:cpu_usage_cores:sum|workload:memory_usage_bytes:sum|cluster:virt_platform_nodes:sum|cluster:node_instance_type_count:sum|cnv:vmi_status_running:count|node_role_os_version_machine:cpu_capacity_cores:sum|node_role_os_version_machine:cpu_capacity_sockets:sum|subscription_sync_total|csv_succeeded|csv_abnormal|ceph_cluster_total_bytes|ceph_cluster_total_used_raw_bytes|ceph_health_status|job:ceph_osd_metadata:count|job:kube_pv:count|job:ceph_pools_iops:total|job:ceph_pools_iops_bytes:total|job:ceph_versions_running:count|job:noobaa_total_unhealthy_buckets:sum|job:noobaa_bucket_count:sum|job:noobaa_total_object_count:sum|noobaa_accounts_num|noobaa_total_usage|console_url|cluster:network_attachment_definition_instances:max|cluster:network_attachment_definition_enabled_instance_up:max|insightsclient_request_send_total|cam_app_workload_migrations|cluster:apiserver_current_inflight_requests:sum:max_over_time:2m|cluster:telemetry_selected_series:count",alertstate=~"firing|"} +---- + This query replicates the request that Telemetry makes against a running {product-title} cluster's Prometheus service and returns the full set of time series captured by Telemetry. diff --git a/modules/troubleshooting-openshift-install-command-issues.adoc b/modules/troubleshooting-openshift-install-command-issues.adoc new file mode 100644 index 000000000000..e5dca28c8ffa --- /dev/null +++ b/modules/troubleshooting-openshift-install-command-issues.adoc @@ -0,0 +1,16 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="troubleshooting-openshift-install-command-issues_{context}"] += Troubleshooting `openshift-install` command issues + +If you experience issues running the `openshift-install` command, check the following: + +* The installation has been initiated within 24 hours of Ignition configuration file creation. The Ignition files are created when the following command is run: ++ +---- +$ ./openshift-install create ignition-configs --dir=./install_dir +---- + +* The `install-config.yaml` file is in the same directory as the installer. If an alternative installation path is declared by using the `./openshift-install --dir` option, verify that the `install-config.yaml` file exists within that directory. diff --git a/modules/understanding-oc-log-levels.adoc b/modules/understanding-oc-log-levels.adoc new file mode 100644 index 000000000000..04b1a9f695c3 --- /dev/null +++ b/modules/understanding-oc-log-levels.adoc @@ -0,0 +1,36 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/diagnosing-oc-issues.adoc + +[id="understanding-oc-log-levels_{context}"] += Understanding OpenShift CLI (`oc`) log levels + +With the OpenShift CLI (`oc`), you can create applications and manage {product-title} projects from a terminal. + +If `oc` command-specific issues arise, increase the `oc` log level to output API request, API response, and `curl` request details generated by the command. This provides a granular view of a particular `oc` command's underlying operation, which in turn might provide insight into the nature of a failure. + +`oc` log levels range from 1 to 10. The following table provides a list of `oc` log levels, along with their descriptions. + +.OpenShift CLI (`oc`) log levels +[cols="1,4",options="header"] +|=== +| Log level | Description + +| 1 to 5 +| No additional logging to stderr. + +| 6 +| Log API requests to stderr. + +| 7 +| Log API requests and headers to stderr. + +| 8 +| Log API requests, headers, and body, plus API response headers and body to stderr. + +| 9 +| Log API requests, headers, and body, API response headers and body, plus `curl` requests to stderr. + +| 10 +| Log API requests, headers, and body, API response headers and body, plus `curl` requests to stderr, in verbose detail. +|=== diff --git a/modules/understanding-pod-error-states.adoc b/modules/understanding-pod-error-states.adoc new file mode 100644 index 000000000000..3259470447e3 --- /dev/null +++ b/modules/understanding-pod-error-states.adoc @@ -0,0 +1,67 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/investigating-pod-issues.adoc + +[id="understanding-pod-error-states_{context}"] += Understanding Pod error states + +Pod failures return explicit error states that can be observed in the `status` field in the output of `oc get Pods`. Pod error states cover image, container, and container network related failures. + +The following table provides a list of Pod error states along with their descriptions. + +.Pod error states +[cols="1,4",options="header"] +|=== +| Pod error state | Description + +| `ErrImagePull` +| Generic image retrieval error. + +| `ErrImagePullBackOff` +| Image retrieval failed and is backed off. + +| `ErrInvalidImageName` +| The specified image name was invalid. + +| `ErrImageInspect` +| Image inspection did not succeed. + +| `ErrImageNeverPull` +| `PullPolicy` is set to `NeverPullImage` and the target image is not present locally on the host. + +| `ErrRegistryUnavailable` +| When attempting to retrieve an image from a registry, an HTTP error was encountered. + +| `ErrContainerNotFound` +| The specified container is either not present or not managed by the kubelet, within the declared Pod. + +| `ErrRunInitContainer` +| Container initialization failed. + +| `ErrRunContainer` +| None of the Pod's containers started successfully. + +| `ErrKillContainer` +| None of the Pod's containers were killed successfully. + +| `ErrCrashLoopBackOff` +| A container has terminated. The kubelet will not attempt to restart it. + +| `ErrVerifyNonRoot` +| A container or image attempted to run with root privileges. + +| `ErrCreatePodSandbox` +| Pod sandbox creation did not succeed. + +| `ErrConfigPodSandbox` +| Pod sandbox configuration was not obtained. + +| `ErrKillPodSandbox` +| A Pod's sandbox did not stop successfully. + +| `ErrSetupNetwork` +| Network initialization failed. + +| `ErrTeardownNetwork` +| Network termination failed. +|=== diff --git a/modules/upi-installation-considerations.adoc b/modules/upi-installation-considerations.adoc new file mode 100644 index 000000000000..7cdb8a0376d9 --- /dev/null +++ b/modules/upi-installation-considerations.adoc @@ -0,0 +1,29 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-installations.adoc + +[id="upi-installation-considerations_{context}"] += User-provisioned infrastructure installation considerations + +The default installation method uses installer-provisioned infrastructure. With installer-provisioned infrastructure clusters, {product-title} manages all aspects of the cluster, including the operating system itself. If possible, use this feature to avoid having to provision and maintain the cluster infrastructure. + +You can alternatively install {product-title} {product-version} on infrastructure that you provide. If you use this installation method, follow user-provisioned infrastructure installation documentation carefully. Additionally, review the following considerations before the installation: + +* Check the link:https://access.redhat.com/ecosystem/search/#/ecosystem/Red%20Hat%20Enterprise%20Linux[{op-system-base-full} Ecosystem] to determine the level of {op-system-first} support provided for your chosen server hardware or virtualization technology. + +* Many virtualization and cloud environments require agents to be installed on guest operating systems. Ensure that these agents are installed as a containerized workload deployed through a DaemonSet. + +* Install cloud provider integration if you want to enable features such as dynamic storage, on-demand service routing, node host name to Kubernetes host name resolution, and cluster autoscaling. ++ +[NOTE] +==== +It is not possible to enable cloud provider integration in {product-title} environments that mix resources from different cloud providers, or that span multiple physical or virtual platforms. The node life cycle controller will not allow nodes that are external to the existing provider to be added to a cluster, and it is not possible to specify more than one cloud provider integration. +==== + +* A provider-specific Machine API implementation is required if you want to use MachineSets or autoscaling to automatically provision {product-title} cluster nodes. + +* Check whether your chosen cloud provider offers a method to inject Ignition configuration files into hosts as part of their initial deployment. If they do not, you will need to host Ignition configuration files by using an HTTP server. The steps taken to troubleshoot Ignition configuration file issues will differ depending on which of these two methods is deployed. + +* Storage needs to be manually provisioned if you want to leverage optional framework components such as the embedded container registry, ElasticSearch, or Prometheus. Default storage classes are not defined in user-provisioned infrastructure installations unless explicitly configured. + +* A load balancer is required to distribute API requests across all master nodes in highly available {product-title} environments. You can use any TCP-based load balancing solution that meets {product-title} DNS routing and port requirements. diff --git a/modules/verifying-crio-status.adoc b/modules/verifying-crio-status.adoc new file mode 100644 index 000000000000..8bba02cba65b --- /dev/null +++ b/modules/verifying-crio-status.adoc @@ -0,0 +1,45 @@ +// Module included in the following assemblies: +// +// * support/troubleshooting/troubleshooting-crio-issues.adoc + +[id="verifying-crio-status_{context}"] += Verifying CRI-O runtime engine status + +You can verify CRI-O container runtime engine status on each cluster node. + +.Prerequisites + +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Review CRI-O status by querying the `crio` systemd service on a node, within a debug Pod. +.. Start a debug Pod for a node: ++ +---- +$ oc debug node/my-node +---- ++ +.. Set `/host` as the root directory within the debug shell. The debug Pod mounts the host's root file system in `/host` within the Pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths: ++ +---- +# chroot /host +---- ++ +[NOTE] +==== +{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes using SSH is not recommended and nodes will be tainted as _accessed_. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@..` instead. +==== ++ +.. Check whether the `crio` systemd service is active on the node: ++ +---- +# systemctl is-active crio +---- ++ +.. Output a more detailed `kubelet.service` status summary: ++ +---- +# systemctl status crio +---- diff --git a/support/gathering-cluster-data.adoc b/support/gathering-cluster-data.adoc index a0d804c05948..1b8b5d5969ba 100644 --- a/support/gathering-cluster-data.adoc +++ b/support/gathering-cluster-data.adoc @@ -24,3 +24,21 @@ include::modules/gathering-data-specific-features.adoc[leveloffset=+1] // Obtain your cluster identifier include::modules/support-get-cluster-id.adoc[leveloffset=+1] + +// About `sosreport` +include::modules/about-sosreport.adoc[leveloffset=+1] + +// Generating a `sosreport` archive for an {product-title} cluster node +include::modules/support-generating-a-sosreport-archive.adoc[leveloffset=+1] + +// Querying bootstrap node journal logs +include::modules/querying-bootstrap-node-journal-logs.adoc[leveloffset=+1] + +// Querying cluster node journal logs +include::modules/querying-cluster-node-journal-logs.adoc[leveloffset=+1] + +// Collecting a network trace from an {product-title} node or container +include::modules/support-collecting-network-trace.adoc[leveloffset=+1] + +// Providing diagnostic data to Red Hat Support +include::modules/support-providing-diagnostic-data-to-red-hat.adoc[leveloffset=+1] diff --git a/support/getting-support.adoc b/support/getting-support.adoc index 84b87fbb49cf..b27ba96518df 100644 --- a/support/getting-support.adoc +++ b/support/getting-support.adoc @@ -9,6 +9,9 @@ toc::[] ifdef::openshift-enterprise,openshift-webscale,openshift-dedicated,openshift-origin[] include::modules/support.adoc[leveloffset=+1] +include::modules/support-knowledgebase-about.adoc[leveloffset=+1] +include::modules/support-knowledgebase-search.adoc[leveloffset=+1] +include::modules/support-submitting-a-case.adoc[leveloffset=+1] endif::openshift-enterprise,openshift-webscale,openshift-dedicated,openshift-origin[] diff --git a/support/summarizing-cluster-specifications.adoc b/support/summarizing-cluster-specifications.adoc new file mode 100644 index 000000000000..ac73da5571e2 --- /dev/null +++ b/support/summarizing-cluster-specifications.adoc @@ -0,0 +1,9 @@ +[id="summarizing-cluster-specifications"] += Summarizing cluster specifications +include::modules/common-attributes.adoc[] +:context: summarizing-cluster-specifications + +toc::[] + +// Summarizing cluster specifications through `clusterversion` +include::modules/summarizing-cluster-specifications-through-clusterversion.adoc[leveloffset=+1] diff --git a/support/troubleshooting/diagnosing-oc-issues.adoc b/support/troubleshooting/diagnosing-oc-issues.adoc new file mode 100644 index 000000000000..cacbdd6cb6b3 --- /dev/null +++ b/support/troubleshooting/diagnosing-oc-issues.adoc @@ -0,0 +1,12 @@ +[id="diagnosing-oc-issues"] += Diagnosing OpenShift CLI (`oc`) issues +include::modules/common-attributes.adoc[] +:context: diagnosing-oc-issues + +toc::[] + +// Understanding OpenShift CLI (`oc`) log levels +include::modules/understanding-oc-log-levels.adoc[leveloffset=+1] + +// Specifying OpenShift CLI (`oc`) log levels +include::modules/specifying-oc-log-levels.adoc[leveloffset=+1] diff --git a/support/troubleshooting/investigating-pod-issues.adoc b/support/troubleshooting/investigating-pod-issues.adoc new file mode 100644 index 000000000000..e050bd788082 --- /dev/null +++ b/support/troubleshooting/investigating-pod-issues.adoc @@ -0,0 +1,30 @@ +[id="investigating-pod-issues"] += Investigating Pod issues +include::modules/common-attributes.adoc[] +:context: investigating-pod-issues + +toc::[] + +{product-title} leverages the Kubernetes concept of a Pod, which is one or more containers deployed together on one host. A Pod is the smallest compute unit that can be defined, deployed, and managed on {product-title} {product-version}. + +After a Pod is defined, it is assigned to run on a node until its containers exit, or until it is removed. Depending on policy and exit code, Pods are either removed after exiting or retained so that their logs can be accessed. + +The first thing to check when Pod issues arise is the Pod's status. If an explicit Pod failure has occurred, observe the Pod's error state to identify specific image, container, or Pod network issues. Focus diagnostic data collection according to the error state. Review Pod event messages, as well as Pod and container log information. Diagnose issues dynamically by accessing running Pods on the command line, or start a debug Pod with root access based on a problematic Pod's deployment configuration. + +// Understanding Pod error states +include::modules/understanding-pod-error-states.adoc[leveloffset=+1] + +// Reviewing Pod status +include::modules/reviewing-pod-status.adoc[leveloffset=+1] + +// Inspecting Pod and container logs +include::modules/inspecting-pod-and-container-logs.adoc[leveloffset=+1] + +// Accessing running Pods +include::modules/accessing-running-pods.adoc[leveloffset=+1] + +// Starting debug Pods with root access +include::modules/starting-debug-pods-with-root-access.adoc[leveloffset=+1] + +// Copying files to and from Pods and containers +include::modules/copying-files-pods-and-containers.adoc[leveloffset=+1] diff --git a/support/troubleshooting/troubleshooting-crio-issues.adoc b/support/troubleshooting/troubleshooting-crio-issues.adoc new file mode 100644 index 000000000000..6ca794a07d77 --- /dev/null +++ b/support/troubleshooting/troubleshooting-crio-issues.adoc @@ -0,0 +1,15 @@ +[id="troubleshooting-crio-issues"] += Troubleshooting CRI-O container runtime issues +include::modules/common-attributes.adoc[] +:context: troubleshooting-crio-issues + +toc::[] + +// About CRI-O container runtime engine +include::modules/about-crio.adoc[leveloffset=+1] + +// Verifying CRI-O runtime engine status +include::modules/verifying-crio-status.adoc[leveloffset=+1] + +// Gathering CRI-O journald unit logs +include::modules/gathering-crio-logs.adoc[leveloffset=+1] diff --git a/support/troubleshooting/troubleshooting-installations.adoc b/support/troubleshooting/troubleshooting-installations.adoc new file mode 100644 index 000000000000..8844b5807d31 --- /dev/null +++ b/support/troubleshooting/troubleshooting-installations.adoc @@ -0,0 +1,51 @@ +[id="troubleshooting-installations"] += Troubleshooting installations +include::modules/common-attributes.adoc[] +:context: troubleshooting-installations + +toc::[] + +// Determining where installation issues occur +include::modules/determining-where-installation-issues-occur.adoc[leveloffset=+1] + +// User-provisioned infrastructure installation considerations +include::modules/upi-installation-considerations.adoc[leveloffset=+1] + +// Checking load balancer configuration before {product-title} installation +include::modules/checking-load-balancer-configuration.adoc[leveloffset=+1] + +// Specifying {product-title} installer log levels +include::modules/specifying-openshift-installer-log-levels.adoc[leveloffset=+1] + +// Troubleshooting `openshift-install` command issues +include::modules/troubleshooting-openshift-install-command-issues.adoc[leveloffset=+1] + +// Monitoring installation progress +include::modules/monitoring-installation-progress.adoc[leveloffset=+1] + +// Gathering bootstrap node diagnostic data +include::modules/gathering-bootstrap-diagnostic-data.adoc[leveloffset=+1] + +// Investigating master node installation issues +include::modules/investigating-master-node-installation-issues.adoc[leveloffset=+1] + +// Investigating etcd installation issues +include::modules/investigating-etcd-installation-issues.adoc[leveloffset=+1] + +// Investigating master node kubelet and API server issues +include::modules/investigating-kubelet-api-installation-issues.adoc[leveloffset=+1] + +// Investigating worker node installation issues +include::modules/investigating-worker-node-installation-issues.adoc[leveloffset=+1] + +// Querying Operator status after installation +include::modules/querying-operator-status-after-installation.adoc[leveloffset=+1] + +// Gathering logs from a failed installation +include::modules/installation-bootstrap-gather.adoc[leveloffset=+1] + +== Additional resources + +* See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more details on {product-title} installation types and process. + +// TODO: xref to UPI recommendations for respective versions, with ifdefs. diff --git a/support/troubleshooting/troubleshooting-operator-issues.adoc b/support/troubleshooting/troubleshooting-operator-issues.adoc new file mode 100644 index 000000000000..04510ddf67fd --- /dev/null +++ b/support/troubleshooting/troubleshooting-operator-issues.adoc @@ -0,0 +1,26 @@ +[id="troubleshooting-operator-issues"] += Troubleshooting Operator issues +include::modules/common-attributes.adoc[] +:context: troubleshooting-operator-issues + +toc::[] + +Operators are a method of packaging, deploying, and managing an {product-title} application. They act like an extension of the software vendor’s engineering team, watching over an {product-title} environment and using its current state to make decisions in real time. Operators are designed to handle upgrades seamlessly, react to failures automatically, and not take shortcuts, like skipping a software backup process to save time. + +{product-title} {product-version} includes a default set of Operators that are required for proper functioning of the cluster. These default Operators are managed by the Cluster Version Operator (CVO). + +As a cluster administrator, you can install application Operators from the OperatorHub using the {product-title} web console or the CLI. You can then subscribe the Operator to one or more namespaces to make it available for developers on your cluster. Application Operators are managed by Operator Lifecycle Manager (OLM). + +If you experience Operator issues, verify Operator Subscription status. Check Operator Pod health across the cluster and gather Operator logs for diagnosis. + +// Operator Subscription condition types +include::modules/olm-status-conditions.adoc[leveloffset=+1] + +// Viewing Operator Subscription status using the CLI +include::modules/olm-status-viewing-cli.adoc[leveloffset=+1] + +// Querying Operator Pod status +include::modules/querying-operator-pod-status.adoc[leveloffset=+1] + +// Gathering Operator logs +include::modules/gathering-operator-logs.adoc[leveloffset=+1] diff --git a/support/troubleshooting/troubleshooting-s2i.adoc b/support/troubleshooting/troubleshooting-s2i.adoc new file mode 100644 index 000000000000..2dce97a3c08b --- /dev/null +++ b/support/troubleshooting/troubleshooting-s2i.adoc @@ -0,0 +1,19 @@ +[id="troubleshooting-s2i"] += Troubleshooting the Source-to-Image process +include::modules/common-attributes.adoc[] +:context: troubleshooting-s2i + +toc::[] + +// Strategies for Source-to-Image troubleshooting +include::modules/strategies-for-s2i-troubleshooting.adoc[leveloffset=+1] + +// Gathering Source-to-Image diagnostic data +include::modules/gathering-s2i-diagnostic-data.adoc[leveloffset=+1] + +// Gathering application diagnostic data to investigate application failures +include::modules/gathering-application-diagnostic-data.adoc[leveloffset=+1] + +== Additional resources + +* See xref:../../builds/build-strategies.adoc#build-strategy-s2i_build-strategies[Source-to-Image (S2I) build] for more details about the S2I build strategy. diff --git a/support/troubleshooting/verifying-node-health.adoc b/support/troubleshooting/verifying-node-health.adoc new file mode 100644 index 000000000000..1df50aea9fd2 --- /dev/null +++ b/support/troubleshooting/verifying-node-health.adoc @@ -0,0 +1,15 @@ +[id="verifying-node-health"] += Verifying node health +include::modules/common-attributes.adoc[] +:context: verifying-node-health + +toc::[] + +// Reviewing node status, resource usage, and configuration +include::modules/reviewing-node-status-usage-and-configuration.adoc[leveloffset=+1] + +// Querying the kubelet's status on a node +include::modules/querying-kubelet-status-on-a-node.adoc[leveloffset=+1] + +// Querying node journal logs +include::modules/querying-cluster-node-journal-logs.adoc[leveloffset=+1]