-
Notifications
You must be signed in to change notification settings - Fork 192
/
Copy path06_create_cluster.sh
executable file
·133 lines (114 loc) · 4.49 KB
/
06_create_cluster.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
#!/usr/bin/env bash
set -x
set -e
source logging.sh
source common.sh
source network.sh
source ocp_install_env.sh
source release_info.sh
source utils.sh
source rhcos.sh
source validation.sh
early_deploy_validation
if [[ ! -z "$INSTALLER_PROXY" ]]; then
export HTTP_PROXY=${HTTP_PROXY}
export HTTPS_PROXY=${HTTPS_PROXY}
export NO_PROXY=${NO_PROXY}
# Update libvirt firewalld policy to allow the VM to connect to the proxy
sudo firewall-cmd --policy=libvirt-to-host --add-port=$INSTALLER_PROXY_PORT/tcp
# Allow the bootstrap VM to talk directly to services on the bootstrap host
sudo firewall-cmd --policy=libvirt-to-host --add-port=8000/tcp # sushy
sudo firewall-cmd --policy=libvirt-to-host --add-port=6230-6240/udp # vbmc
sudo firewall-cmd --policy=libvirt-to-host --add-port=123/udp # ntp
# And NFS if used
if [ "${PERSISTENT_IMAGEREG}" == true ] ; then
sudo firewall-cmd --policy=libvirt-to-host --add-port=2049/tcp
fi
fi
# Call openshift-installer to deploy the bootstrap node and masters
create_cluster ${OCP_DIR}
# Kill the dnsmasq container on the host since it is performing DHCP and doesn't
# allow our pod in openshift to take over. We don't want to take down all of ironic
# as it makes cleanup "make clean" not work properly.
for name in dnsmasq ironic-inspector ; do
sudo podman ps | grep -w "$name$" && sudo podman stop $name
done
# Default to emptyDir for image-reg storage
if [ "${PERSISTENT_IMAGEREG}" != true ] ; then
oc patch configs.imageregistry.operator.openshift.io \
cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}},"managementState":"Managed"}}'
else
oc apply -f - <<EOF
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv1
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteMany
nfs:
path: /opt/dev-scripts/nfsshare/1
server: $LOCAL_REGISTRY_DNS_NAME
readOnly: false
mountOptions:
- noac
EOF
oc patch configs.imageregistry.operator.openshift.io \
cluster --type merge --patch '{"spec":{"storage":{"pvc":{"claim":""}},"managementState":"Managed","replicas": 2}}'
fi
if [[ ! -z "${ENABLE_LOCAL_REGISTRY}" ]]; then
# Configure tools image registry and cluster samples operator
# when local image stream is enabled. These are basically to run CI tests
# depend on tools image.
add_local_certificate_as_trusted
fi
# Marketplace operators could not pull their images via internet
# and stays degraded in disconnected.
# This is the suggested way in
# https://docs.openshift.com/container-platform/4.9/operators/admin/olm-managing-custom-catalogs.html#olm-restricted-networks-operatorhub_olm-managing-custom-catalogs
if [[ -n "${MIRROR_IMAGES}" && "${MIRROR_IMAGES,,}" != "false" ]]; then
oc patch OperatorHub cluster --type json \
-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'
fi
if [[ -n "${APPLY_EXTRA_WORKERS}" ]]; then
if [[ ${NUM_EXTRA_WORKERS} -ne 0 && -s "${OCP_DIR}/extra_host_manifests.yaml" ]]; then
oc apply -f "${OCP_DIR}/extra_host_manifests.yaml"
for h in $(jq -r '.[].name' ${EXTRA_BAREMETALHOSTS_FILE}); do
while ! oc get baremetalhost -n openshift-machine-api $h 2>/dev/null; do
echo "Waiting for $h"
sleep 5
done
echo "$h is successfully applied"
done
else
echo "NUM_EXTRA_WORKERS should be set and extra_host_manifests.yaml should exist"
fi
fi
# Create a secret containing extraworkers info for the e2e tests
if [[ ${NUM_EXTRA_WORKERS} -ne 0 && -d "${OCP_DIR}/extras" ]]; then
oc create secret generic extraworkers-secret --from-file="${OCP_DIR}/extras/" -n openshift-machine-api
fi
if [[ ! -z "${ENABLE_METALLB}" ]]; then
if [[ -z ${METALLB_IMAGE_BASE} ]]; then
# This can use any image in the release, as we are dropping
# the hash
export METALLB_IMAGE_BASE=$(\
image_for cli | sed -e 's/@.*$//g')
export METALLB_IMAGE_TAG="metallb"
export FRR_IMAGE_TAG="metallb-frr"
fi
pushd metallb
./configure_metallb.sh
popd
fi
if [[ ! -z "${ENABLE_VIRTUAL_MEDIA_VIA_EXTERNAL_NETWORK}" ]]; then
oc patch provisioning provisioning-configuration --type merge -p "{\"spec\":{\"virtualMediaViaExternalNetwork\":true}}"
fi
if [[ -n "${ENABLE_CAPI_E2E}" ]]; then
./setup_capi_e2e.sh
fi
echo "Cluster up, you can interact with it via oc --kubeconfig ${KUBECONFIG} <command>"
echo "To avoid using the --kubeconfig flag on each command, set KUBECONFIG variable with: export KUBECONFIG=${KUBECONFIG}"