diff --git a/images/installer/root/usr/local/bin/generate b/images/installer/root/usr/local/bin/generate index 3db7a3ee8de..e29e7ac6aab 100755 --- a/images/installer/root/usr/local/bin/generate +++ b/images/installer/root/usr/local/bin/generate @@ -186,7 +186,7 @@ class Host: if self.public_ip_addr: info += "openshift_public_ip=" + self.public_ip_addr + " " if self.hostname: - info += "openshift_hostname=" + self.hostname + " " + info += "openshift_kubelet_name_override=" + self.hostname + " " if self.public_hostname: info += "openshift_public_hostname=" + self.public_hostname diff --git a/inventory/hosts.example b/inventory/hosts.example index a0831b46fda..7f698648495 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -819,7 +819,7 @@ debug_level=2 #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] # The OpenShift-Ansible installer will fail when it detects that the -# value of openshift_hostname resolves to an IP address not bound to any local +# value of openshift_kubelet_name_override resolves to an IP address not bound to any local # interfaces. This mis-configuration is problematic for any pod leveraging host # networking and liveness or readiness probes. # Setting this variable to false will override that check. diff --git a/inventory/hosts.openstack b/inventory/hosts.openstack index b11f662f0c9..b9aa9927b71 100644 --- a/inventory/hosts.openstack +++ b/inventory/hosts.openstack @@ -24,7 +24,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_pkg_version=-3.0.0.0 [masters] -jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" [etcd] jdetiber-etcd.usersys.redhat.com @@ -33,5 +33,5 @@ jdetiber-etcd.usersys.redhat.com #ose3-lb-ansible.test.example.com [nodes] -jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-master" -jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-compute" +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_node_group_name="node-config-master" +jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_node_group_name="node-config-compute" diff --git a/playbooks/byo/calico/legacy_upgrade.yml b/playbooks/byo/calico/legacy_upgrade.yml index cca1550ecfd..ca8d5ff4ad5 100644 --- a/playbooks/byo/calico/legacy_upgrade.yml +++ b/playbooks/byo/calico/legacy_upgrade.yml @@ -100,7 +100,7 @@ - name: Apply node label delegate_to: "{{ groups.oo_first_master.0 }}" command: > - {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ openshift.node.nodename | lower }} --overwrite projectcalico.org/ds-ready=true + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ l_kubelet_node_name | lower }} --overwrite projectcalico.org/ds-ready=true - name: Wait for node running uri: url: http://localhost:9099/readiness diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index d2200a74b26..f9cc1602bf9 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -56,7 +56,7 @@ tasks: - name: Mark node unschedulable oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" + node: "{{ l_kubelet_node_name | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 @@ -70,7 +70,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s @@ -94,7 +94,7 @@ - name: Set node schedulability oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" + node: "{{ l_kubelet_node_name | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index c75db58d213..02dd6e506cd 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -23,7 +23,7 @@ # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" + node: "{{ l_kubelet_node_name | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 @@ -33,7 +33,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index e259b5d095d..4adb2a3d5dd 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -26,7 +26,7 @@ - name: Mark node unschedulable oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" + node: "{{ l_kubelet_node_name | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" retries: 10 @@ -43,7 +43,7 @@ tasks: - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s diff --git a/playbooks/init/cluster_facts.yml b/playbooks/init/cluster_facts.yml index e593523dbff..1b0ae44346a 100644 --- a/playbooks/init/cluster_facts.yml +++ b/playbooks/init/cluster_facts.yml @@ -28,7 +28,7 @@ openshift_facts: role: common local_facts: - hostname: "{{ openshift_hostname | default(None) }}" + hostname: "{{ (openshift_kubelet_name_override | default(None)) if l_openshift_upgrade_in_progress else None }}" ip: "{{ openshift_ip | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" public_ip: "{{ openshift_public_ip | default(None) }}" @@ -63,6 +63,9 @@ local_facts: sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" bootstrapped: "{{ openshift_is_bootstrapped }}" + - name: set_fact l_kubelet_node_name + set_fact: + l_kubelet_node_name: "{{ openshift_kubelet_name_override if (openshift_kubelet_name_override is defined and l_openshift_upgrade_in_progress) else openshift.node.nodename }}" - name: Initialize etcd host variables hosts: oo_masters_to_config diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml index b37e6fec4a7..ca280684b3b 100644 --- a/playbooks/init/validate_hostnames.yml +++ b/playbooks/init/validate_hostnames.yml @@ -10,19 +10,20 @@ changed_when: false failed_when: false - - name: Validate openshift_hostname when defined + - name: Validate openshift_kubelet_name_override when defined fail: msg: > The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} doesn't resolve to an IP address owned by this host. Please set - openshift_hostname variable to a hostname that when resolved on the host + openshift_kubelet_name_override variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This will ensure proper functionality of OpenShift networking features. - Inventory setting: openshift_hostname={{ openshift_hostname | default ('undefined') }} + Inventory setting: openshift_kubelet_name_override={{ openshift_kubelet_name_override | default ('undefined') }} This check can be overridden by setting openshift_hostname_check=false in the inventory. See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables when: + - openshift_kubelet_name_override is defined - lookupip.stdout != '127.0.0.1' - lookupip.stdout not in ansible_all_ipv4_addresses - openshift_hostname_check | default(true) | bool diff --git a/playbooks/openshift-glusterfs/private/add_hosts.yml b/playbooks/openshift-glusterfs/private/add_hosts.yml index b76df3daff1..c8b95fbcb3d 100644 --- a/playbooks/openshift-glusterfs/private/add_hosts.yml +++ b/playbooks/openshift-glusterfs/private/add_hosts.yml @@ -2,7 +2,7 @@ # This play runs when new gluster hosts are part of new_nodes group during # master or node scaleup. -# Need to gather facts on glusterfs hosts to ensure we collect openshift.node.nodename +# Need to gather facts on glusterfs hosts to ensure we collect l_kubelet_node_name # for topology file. - import_playbook: ../../init/basic_facts.yml vars: diff --git a/playbooks/openshift-node/private/join.yml b/playbooks/openshift-node/private/join.yml index aed052e88f9..f25fb2b0879 100644 --- a/playbooks/openshift-node/private/join.yml +++ b/playbooks/openshift-node/private/join.yml @@ -31,7 +31,7 @@ - name: Find all hostnames for bootstrapping set_fact: - l_nodes_to_join: "{{ groups['oo_nodes_to_config'] | default([]) | map('extract', hostvars) | map(attribute='openshift.node.nodename') | list }}" + l_nodes_to_join: "{{ groups['oo_nodes_to_config'] | default([]) | map('extract', hostvars) | map(attribute='l_kubelet_node_name') | list }}" - name: Dump the bootstrap hostnames debug: diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index 411bfb66a4b..1a5c4c1d1c1 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -36,7 +36,7 @@ oc_obj: state: list kind: node - name: "{{ openshift.node.nodename | lower }}" + name: "{{ l_kubelet_node_name | lower }}" register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_config diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py index c7f8bfd9462..fe434b281e8 100755 --- a/playbooks/openstack/inventory.py +++ b/playbooks/openstack/inventory.py @@ -97,8 +97,8 @@ def _get_hostvars(server, docker_storage_mountpoints): # name at all, so using a hostname here would require an internal # DNS which would complicate the setup and potentially introduce # performance issues. - hostvars['openshift_hostname'] = server.metadata.get( - 'openshift_hostname', server.private_v4) + hostvars['openshift_kubelet_name_override'] = server.metadata.get( + 'openshift_kubelet_name_override', server.private_v4) hostvars['openshift_public_hostname'] = server.name if server.metadata['host-type'] == 'cns': diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index 7867be71773..43416b9a426 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -35,7 +35,7 @@ - name: Apply node label delegate_to: "{{ groups.oo_first_master.0 }}" command: > - {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ openshift.node.nodename | lower }} --overwrite projectcalico.org/ds-ready=true + {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ l_kubelet_node_name | lower }} --overwrite projectcalico.org/ds-ready=true - name: Wait for node running uri: diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py index 43bcf82f6a0..b810dbb5c4f 100644 --- a/roles/lib_utils/action_plugins/sanity_checks.py +++ b/roles/lib_utils/action_plugins/sanity_checks.py @@ -108,6 +108,7 @@ ('openshift_cockpit_deployer_prefix', 'openshift_cockpit_deployer_image'), ('openshift_cockpit_deployer_basename', 'openshift_cockpit_deployer_image'), ('openshift_cockpit_deployer_version', 'openshift_cockpit_deployer_image'), + ('openshift_hostname', 'Removed: See documentation'), ) # TODO(michaelgugino): Remove in 3.11 @@ -241,10 +242,10 @@ def network_plugin_check(self, hostvars, host): raise errors.AnsibleModuleError(msg) def check_hostname_vars(self, hostvars, host): - """Checks to ensure openshift_hostname + """Checks to ensure openshift_kubelet_name_override and openshift_public_hostname conform to the proper length of 63 characters or less""" - for varname in ('openshift_public_hostname', 'openshift_hostname'): + for varname in ('openshift_public_hostname', 'openshift_kubelet_name_override'): var_value = self.template_var(hostvars, host, varname) if var_value and len(var_value) > 63: msg = '{} must be 63 characters or less'.format(varname) diff --git a/roles/openshift_control_plane/tasks/main.yml b/roles/openshift_control_plane/tasks/main.yml index 61238d58183..2b2dd8cc0c9 100644 --- a/roles/openshift_control_plane/tasks/main.yml +++ b/roles/openshift_control_plane/tasks/main.yml @@ -168,7 +168,7 @@ oc_obj: state: list kind: pod - name: "master-{{ item }}-{{ openshift.node.nodename | lower }}" + name: "master-{{ item }}-{{ l_kubelet_node_name | lower }}" namespace: kube-system register: control_plane_pods until: @@ -220,7 +220,7 @@ oc_obj: state: list kind: pod - name: "master-{{ item }}-{{ openshift.node.nodename | lower }}" + name: "master-{{ item }}-{{ l_kubelet_node_name | lower }}" namespace: kube-system register: control_plane_health until: diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index ddf6218e07f..0907337e49b 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -200,3 +200,4 @@ openshift_node_group_edits_crio: - "10m" openshift_master_manage_htpasswd: True +l_openshift_upgrade_in_progress: False diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index 9d48f6f1bb1..7c095f03604 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{% if 'glusterfs_registry' in groups and groups['glusterfs_registry'] | length > 0 %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups and groups['glusterfs'] | length > 0 %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" + src: "{% if 'glusterfs_registry' in groups and groups['glusterfs_registry'] | length > 0 %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups and groups['glusterfs'] | length > 0 %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].l_kubelet_node_name }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions diff --git a/roles/openshift_manage_node/tasks/config.yml b/roles/openshift_manage_node/tasks/config.yml index e5e5b3df901..8c2fda07a92 100644 --- a/roles/openshift_manage_node/tasks/config.yml +++ b/roles/openshift_manage_node/tasks/config.yml @@ -1,7 +1,7 @@ --- - name: Set node schedulability oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" + node: "{{ l_kubelet_node_name | lower }}" schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}" retries: 10 delay: 5 diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index a5133ee89c3..84ac3092e9a 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -24,7 +24,7 @@ - name: Wait for Node Registration oc_obj: - name: "{{ openshift.node.nodename }}" + name: "{{ l_kubelet_node_name | lower }}" kind: node state: list register: get_node diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index f96c8dfe540..55c61be3698 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -101,7 +101,7 @@ oc_obj: state: list kind: node - name: "{{ openshift.node.nodename | lower }}" + name: "{{ l_kubelet_node_name | lower }}" register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" until: @@ -122,7 +122,7 @@ oc_bin: "{{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }}" oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig" node_list: - - "{{ openshift.node.nodename | lower }}" + - "{{ l_kubelet_node_name | lower }}" delegate_to: "{{ groups.oo_first_master.0 }}" register: node_upgrade_oc_csr_approve retries: 30 diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index dcd7484de6d..e6d5037eae9 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -51,7 +51,7 @@ oc_bin: "{{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }}" oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig" node_list: - - "{{ openshift.node.nodename | lower }}" + - "{{ l_kubelet_node_name | lower }}" delegate_to: "{{ groups.oo_first_master.0 }}" register: node_upgrade_oc_csr_approve retries: 30 diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 470de934748..26d3d7ae02c 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -45,7 +45,7 @@ networkConfig: {% if openshift_set_node_ip | bool %} nodeIP: {{ openshift.common.ip }} {% endif %} -nodeName: {{ openshift.node.nodename }} +nodeName: {{ l_kubelet_node_name }} podManifestConfig: servingInfo: bindAddress: 0.0.0.0:10250 diff --git a/roles/openshift_node_group/files/sync.yaml b/roles/openshift_node_group/files/sync.yaml index 55452b7ff32..d8948534c0c 100644 --- a/roles/openshift_node_group/files/sync.yaml +++ b/roles/openshift_node_group/files/sync.yaml @@ -101,18 +101,26 @@ spec: ) & break done - + mkdir -p /etc/origin/node/tmp # periodically refresh both node-config.yaml and relabel the node while true; do - if ! oc extract "configmaps/${name}" -n openshift-node --to=/etc/origin/node --confirm --request-timeout=10s --config /etc/origin/node/node.kubeconfig "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" > /dev/null; then + if ! oc extract "configmaps/${name}" -n openshift-node --to=/etc/origin/node/tmp --confirm --request-timeout=10s --config /etc/origin/node/node.kubeconfig "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" > /dev/null; then echo "error: Unable to retrieve latest config for node" 2>&1 sleep 15 & wait $! continue fi + + KUBELET_HOSTNAME_OVERRIDE=$(cat /etc/sysconfig/KUBELET_HOSTNAME_OVERRIDE) || : + if ! [[ -z "$KUBELET_HOSTNAME_OVERRIDE" ]]; then + #Patching node-config for hostname override + echo "nodeName: $KUBELET_HOSTNAME_OVERRIDE" >> /etc/origin/node/tmp/node-config.yaml + fi + # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet. - md5sum /etc/origin/node/node-config.yaml > /tmp/.new + md5sum /etc/origin/node/tmp/node-config.yaml > /tmp/.new if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then + mv /etc/origin/node/tmp/node-config.yaml /etc/origin/node/node-config.yaml echo "info: Configuration changed, restarting kubelet" 2>&1 # TODO: kubelet doesn't relabel nodes, best effort for now # https://github.com/kubernetes/kubernetes/issues/59314 diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml index a21c3e2c06e..e06aeaf1e8a 100644 --- a/roles/openshift_openstack/tasks/node-configuration.yml +++ b/roles/openshift_openstack/tasks/node-configuration.yml @@ -1,6 +1,6 @@ --- # NOTE(shadower): we need to do this because some of the install tasks seem to -# ignore openshift_hostname and rely on the actual system's hostname +# ignore openshift_kubelet_name_override and rely on the actual system's hostname - name: Update hostname to match the OpenStack name hostname: name: "{{ inventory_hostname }}" diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 7c30ef1470f..706d3c2418e 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -242,7 +242,7 @@ resources: sub-host-type: { get_param: subtype } openshift_node_group_name: { get_param: openshift_node_group_name } {% if openshift_openstack_dns_nameservers %} - openshift_hostname: { get_param: name } + openshift_kubelet_name_override: { get_param: name } {% endif %} scheduler_hints: { get_param: scheduler_hints } diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index 7b2c88a0c80..ce8ea2cad66 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -63,7 +63,7 @@ their configuration as GlusterFS nodes: | Name | Default value | Description | |--------------------|---------------------------|-----------------------------------------| | glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace -| glusterfs_hostname | openshift.node.nodename | A hostname (or IP address) that will be used for internal GlusterFS communication +| glusterfs_hostname | l_kubelet_node_name | A hostname (or IP address) that will be used for internal GlusterFS communication | glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes | glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml index 84adba07f91..26cca18ced7 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml @@ -44,7 +44,7 @@ - name: Unlabel any existing GlusterFS nodes oc_label: - name: "{{ hostvars[item].openshift.node.nodename }}" + name: "{{ hostvars[item].l_kubelet_node_name }}" kind: node state: absent labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" diff --git a/roles/openshift_storage_glusterfs/tasks/label_nodes.yml b/roles/openshift_storage_glusterfs/tasks/label_nodes.yml index c7e1a873044..61c26a18cc0 100644 --- a/roles/openshift_storage_glusterfs/tasks/label_nodes.yml +++ b/roles/openshift_storage_glusterfs/tasks/label_nodes.yml @@ -1,7 +1,7 @@ --- - name: Label GlusterFS nodes oc_label: - name: "{{ hostvars[item].openshift.node.nodename }}" + name: "{{ hostvars[item].l_kubelet_node_name }}" kind: node state: add labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}" diff --git a/roles/openshift_storage_glusterfs/templates/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/topology.json.j2 index bdbd857a534..d392a827c19 100644 --- a/roles/openshift_storage_glusterfs/templates/topology.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/topology.json.j2 @@ -20,7 +20,7 @@ {%- if 'glusterfs_hostname' in hostvars[node] -%} "{{ hostvars[node].glusterfs_hostname }}" {%- elif 'openshift' in hostvars[node] -%} - "{{ hostvars[node].openshift.node.nodename }}" + "{{ hostvars[node].l_kubelet_node_name }}" {%- else -%} "{{ node }}" {%- endif -%}