diff --git a/Makefile b/Makefile index cea19d7c3c9c9..a8b5a08d63cde 100644 --- a/Makefile +++ b/Makefile @@ -69,6 +69,8 @@ prow-secrets: oc create secret generic repo-management-token --from-literal=oauth=${REPO_MANAGEMENT_TOKEN} -o yaml --dry-run | oc apply -f - # gce.json is used by jobs operating against GCE oc create secret generic cluster-secrets-gcp --from-file=cluster/test-deploy/gcp/gce.json --from-file=cluster/test-deploy/gcp/ssh-privatekey --from-file=cluster/test-deploy/gcp/ssh-publickey --from-file=cluster/test-deploy/gcp/ops-mirror.pem -o yaml --dry-run | oc apply -f - + # gce.json is used by jobs operating against AWS + oc create secret generic cluster-secrets-aws --from-file=cluster/test-deploy/aws/.awscred --from-file=cluster/test-deploy/aws/pull-secret --from-file=cluster/test-deploy/aws/license --from-file=cluster/test-deploy/aws/ssh-privatekey --from-file=cluster/test-deploy/aws/ssh-publickey -o yaml --dry-run | oc apply -f - .PHONY: prow-secrets prow-builds: @@ -136,10 +138,12 @@ prow-services: .PHONY: prow-services prow-cluster-jobs: + oc create configmap cluster-profile-aws --from-file=cluster/test-deploy/aws/openshift.yaml -o yaml --dry-run | oc apply -f - oc create configmap cluster-profile-gcp --from-file=cluster/test-deploy/gcp/vars.yaml --from-file=cluster/test-deploy/gcp/vars-origin.yaml -o yaml --dry-run | oc apply -f - oc create configmap cluster-profile-gcp-ha --from-file=cluster/test-deploy/gcp/vars.yaml --from-file=cluster/test-deploy/gcp/vars-origin.yaml -o yaml --dry-run | oc apply -f - oc create configmap cluster-profile-gcp-ha-static --from-file=cluster/test-deploy/gcp/vars.yaml --from-file=cluster/test-deploy/gcp/vars-origin.yaml -o yaml --dry-run | oc apply -f - oc create configmap prow-job-cluster-launch-e2e --from-file=cluster/ci/config/prow/jobs/cluster-launch-e2e.yaml -o yaml --dry-run | oc apply -f - + oc create configmap prow-job-cluster-launch-installer-e2e --from-file=cluster/ci/config/prow/jobs/cluster-launch-installer-e2e.yaml -o yaml --dry-run | oc apply -f - oc create configmap prow-job-master-sidecar --from-file=cluster/ci/config/prow/jobs/master-sidecar.yaml -o yaml --dry-run | oc apply -f - .PHONY: prow-cluster-jobs @@ -168,6 +172,7 @@ projects: gcsweb kube-state-metrics oauth-proxy origin origin-stable origin-rele origin: oc create configmap ci-operator-origin --from-file=projects/origin/config.json -o yaml --dry-run | oc apply -f - + oc create configmap ci-operator-origin-installer --from-file=projects/openshift-installer/config.json -o yaml --dry-run | oc apply -f - oc create configmap ci-operator-origin-web-console-server --from-file=config.json=projects/origin/web-console-server.config.json -o yaml --dry-run | oc apply -f - $(MAKE) apply WHAT=projects/origin/src-cache-origin.yaml .PHONY: origin diff --git a/cluster/ci/config/prow/config.yaml b/cluster/ci/config/prow/config.yaml index 4a1cdfaacca15..1960bb342cf22 100644 --- a/cluster/ci/config/prow/config.yaml +++ b/cluster/ci/config/prow/config.yaml @@ -112,6 +112,7 @@ tide: - openshift/node-problem-detector - openshift/kubernetes-autoscaler - openshift/openshift-restclient-java + - openshift/installer labels: - lgtm missingLabels: @@ -1570,6 +1571,87 @@ presubmits: args: - check + openshift/installer: + - name: pull-ci-origin-installer-unit + agent: kubernetes + context: ci/prow/unit + branches: + - master + rerun_command: "/test unit" + always_run: true + trigger: "((?m)^/test( all| unit),?(\\s+|$))" + decorate: true + spec: + serviceAccountName: ci-operator + containers: + - name: test + image: ci-operator:latest + env: + - name: CONFIG_SPEC + valueFrom: + configMapKeyRef: + name: ci-operator-origin-installer + key: config.json + command: + - ci-operator + args: + - --delete-when-idle=10m + - --artifact-dir=$(ARTIFACTS) + - --dry-run=false + - --target=unit + + - name: pull-ci-origin-installer-e2e-aws + agent: kubernetes + context: ci/prow/e2e-aws + branches: + - master + rerun_command: "/test e2e-aws" + always_run: true + trigger: "((?m)^/test( all| e2e-aws),?(\\s+|$))" + decorate: true + spec: + serviceAccountName: ci-operator + volumes: + - name: job-definition + configMap: + name: prow-job-cluster-launch-installer-e2e + - name: cluster-profile + projected: + sources: + - secret: + name: cluster-secrets-aws + - configMap: + name: cluster-profile-aws + containers: + - name: test + image: ci-operator:latest + volumeMounts: + - name: job-definition + mountPath: /usr/local/e2e-aws + subPath: cluster-launch-installer-e2e.yaml + - name: cluster-profile + mountPath: /usr/local/e2e-aws-cluster-profile + env: + - name: TEST_FOCUS + value: Suite:openshift/conformance/parallel + - name: JOB_NAME_SAFE + value: e2e-aws + - name: CLUSTER_TYPE + value: aws + - name: CONFIG_SPEC + valueFrom: + configMapKeyRef: + name: ci-operator-origin-installer + key: config.json + command: + - ci-operator + - --delete-when-idle=10m + - --dry-run=false + - --artifact-dir=$(ARTIFACTS) + - --secret-dir=/usr/local/e2e-aws-cluster-profile + - --template=/usr/local/e2e-aws + - --target=e2e-aws + postsubmits: openshift/origin: - name: ami_build_origin_int_rhel_build diff --git a/cluster/ci/config/prow/jobs/cluster-launch-installer-e2e.yaml b/cluster/ci/config/prow/jobs/cluster-launch-installer-e2e.yaml new file mode 100644 index 0000000000000..575a04e97541e --- /dev/null +++ b/cluster/ci/config/prow/jobs/cluster-launch-installer-e2e.yaml @@ -0,0 +1,292 @@ +kind: Template +apiVersion: template.openshift.io/v1 + +parameters: +- name: JOB_NAME_SAFE + required: true +- name: JOB_NAME_HASH + required: true +- name: NAMESPACE + required: true +- name: IMAGE_FORMAT + required: true +- name: IMAGE_INSTALLER + required: true +- name: IMAGE_TESTS + required: true +- name: CLUSTER_TYPE + required: true +- name: TEST_SUITE +- name: TEST_FOCUS +- name: TEST_SKIP +- name: TEST_SUITE_SERIAL +- name: TEST_FOCUS_SERIAL +- name: TEST_SKIP_SERIAL + +objects: + +# We want the cluster to be able to access these images +- kind: RoleBinding + apiVersion: authorization.openshift.io/v1 + metadata: + name: ${JOB_NAME_SAFE}-image-puller + namespace: ${NAMESPACE} + roleRef: + name: system:image-puller + subjects: + - kind: SystemGroup + name: system:unauthenticated + +# The e2e pod spins up a cluster, runs e2e tests, and then cleans up the cluster. +- kind: Pod + apiVersion: v1 + metadata: + name: ${JOB_NAME_SAFE} + namespace: ${NAMESPACE} + annotations: + # we want to gather the teardown logs no matter what + ci-operator.openshift.io/wait-for-container-artifacts: teardown + spec: + restartPolicy: Never + activeDeadlineSeconds: 7200 + terminationGracePeriodSeconds: 600 + volumes: + - name: artifacts + emptyDir: {} + - name: shared-tmp + emptyDir: {} + - name: cluster-profile + secret: + secretName: ${JOB_NAME_SAFE}-cluster-profile + + containers: + + # Once admin.kubeconfig exists, executes shared tests + - name: test + image: ${IMAGE_TESTS} + resources: + requests: + cpu: 1 + memory: 300Mi + limits: + cpu: 3 + memory: 2Gi + volumeMounts: + - name: shared-tmp + mountPath: /tmp/shared + - name: cluster-profile + mountPath: /tmp/cluster + - name: artifacts + mountPath: /tmp/artifacts + env: + - name: HOME + value: /tmp/home + command: + - /bin/bash + - -c + - | + #!/bin/bash + set -euo pipefail + + trap 'touch /tmp/shared/exit' EXIT + trap 'kill $(jobs -p); exit 0' TERM + + cp "$(which oc)" /tmp/shared/ + + mkdir -p "${HOME}" + + # wait until the setup job creates admin.kubeconfig + while true; do + if [[ -f /tmp/shared/exit ]]; then + echo "Another process exited" 2>&1 + exit 1 + fi + if [[ ! -f /tmp/shared/admin.kubeconfig ]]; then + sleep 15 & wait + continue + fi + break + done + echo "Found shared kubeconfig" + + # don't let clients impact the global kubeconfig + cp /tmp/shared/admin.kubeconfig /tmp/admin.kubeconfig + export KUBECONFIG=/tmp/admin.kubeconfig + + PATH=/usr/libexec/origin:$PATH + + # set up cloud provider specific env vars + if [[ "${CLUSTER_TYPE}" == "gcp" ]]; then + export GOOGLE_APPLICATION_CREDENTIALS="/tmp/cluster/gce.json" + export KUBE_SSH_USER=cloud-user + mkdir -p ~/.ssh + cp /tmp/cluster/ssh-privatekey ~/.ssh/google_compute_engine || true + export PROVIDER_ARGS='-provider=gce -gce-zone=us-east1-c -gce-project=openshift-gce-devel-ci' + elif [[ "${CLUSTER_TYPE}" == "aws" ]]; then + region="$( cat /tmp/shared/cluster/terraform.tfvars | python -c 'import sys, json; print json.load(sys.stdin)["tectonic_aws_region"]' )" + export PROVIDER_ARGS="-provider=aws -gce-zone=${region}" + fi + + mkdir -p /tmp/output + cd /tmp/output + + # TODO: the test binary should really be a more structured command - most of these flags should be + # autodetected from the running cluster. + # TODO: bump nodes up to 40 again + set -x + if [[ -n "${TEST_SUITE}" || -n "${TEST_FOCUS}" ]]; then + ginkgo -v -noColor -nodes=30 $( which extended.test ) -- \ + -suite "${TEST_SUITE}" -ginkgo.focus="${TEST_FOCUS}" -ginkgo.skip="${TEST_SKIP}" \ + -e2e-output-dir /tmp/artifacts -report-dir /tmp/artifacts/junit \ + -test.timeout=10m ${PROVIDER_ARGS-} || rc=$? + fi + if [[ -n "${TEST_SUITE_SERIAL}" || -n "${TEST_FOCUS_SERIAL}" ]]; then + ginkgo -v -noColor -nodes=1 $( which extended.test ) -- \ + -suite "${TEST_SUITE_SERIAL}" -ginkgo.focus="${TEST_FOCUS_SERIAL}" -ginkgo.skip="${TEST_SKIP_SERIAL}" \ + -e2e-output-dir /tmp/artifacts -report-dir /tmp/artifacts/junit/serial \ + -test.timeout=20m ${PROVIDER_ARGS-} || rc=$? + fi + exit ${rc:-0} + + # Runs an install + - name: setup + image: ${IMAGE_INSTALLER} + volumeMounts: + - name: shared-tmp + mountPath: /tmp + - name: cluster-profile + mountPath: /etc/openshift-installer + - name: artifacts + mountPath: /tmp/artifacts + env: + - name: INSTANCE_PREFIX + value: ${NAMESPACE}-${JOB_NAME_HASH} + - name: TYPE + value: ${CLUSTER_TYPE} + command: + - /bin/bash + - -c + - | + #!/bin/bash + set -euo pipefail + + trap 'rc=$?; if [[ $rc -ne 0 ]]; then touch /tmp/exit; fi; exit $rc' EXIT + trap 'kill $(jobs -p); exit 0' TERM + + mkdir /tmp/cluster + cp /etc/openshift-installer/* /tmp/cluster/ + + export AWS_SHARED_CREDENTIALS_FILE=/etc/openshift-installer/.awscred + export NAME=${INSTANCE_PREFIX} + ( + export EMAIL=test@ci.openshift.io + export PASSWORD=$( date +%s | sha256sum | base64 | head -c 32 ; echo ) + cat /etc/openshift-installer/openshift.yaml | envsubst > /tmp/cluster/inputs.yaml + ) + set -x + echo "Invoking installer ..." + + cd /tmp/cluster + tectonic init --config=inputs.yaml + mv -f ${NAME}/* /tmp/cluster/ + mkdir /tmp/artifacts/installer + cp inputs.yaml config.yaml internal.yaml terraform.tfvars /tmp/artifacts/installer/ + + tectonic install --dir=. --log-level=debug + + export KUBECONFIG=$(pwd)/generated/auth/kubeconfig + # wait until oc shows up + while true; do + if [[ -f /tmp/exit ]]; then + echo "Interrupted" + exit 1 + fi + if [[ ! -f /tmp/oc ]]; then + echo "Waiting for oc binary to show up ..." + sleep 15 & wait + continue + fi + if ! /tmp/oc get nodes 2>/dev/null; then + echo "Waiting for API at $(/tmp/oc whoami --show-server) to respond ..." + sleep 15 & wait + continue + fi + break + done + echo "Copied kubeconfig, installation successful" + cp $KUBECONFIG /tmp/admin.kubeconfig + + # Performs cleanup of all created resources + - name: teardown + image: ${IMAGE_INSTALLER} + volumeMounts: + - name: shared-tmp + mountPath: /tmp/shared + - name: cluster-profile + mountPath: /etc/openshift-installer + - name: artifacts + mountPath: /tmp/artifacts + env: + - name: INSTANCE_PREFIX + value: ${NAMESPACE}-${JOB_NAME_HASH} + - name: TYPE + value: ${CLUSTER_TYPE} + command: + - /bin/bash + - -c + - | + #!/bin/bash + function teardown() { + set +e + export PATH=$PATH:/tmp/shared + echo "Gathering artifacts ..." + export KUBECONFIG=/tmp/shared/admin.kubeconfig + mkdir -p /tmp/artifacts/pods /tmp/artifacts/nodes /tmp/artifacts/metrics + + oc get nodes -o jsonpath --template '{range .items[*]}{.metadata.name}{"\n"}{end}' > /tmp/nodes + oc get pods --all-namespaces --template '{{ range .items }}{{ $name := .metadata.name }}{{ $ns := .metadata.namespace }}{{ range .spec.containers }}-n {{ $ns }} {{ $name }} -c {{ .name }}{{ "\n" }}{{ end }}{{ range .spec.initContainers }}-n {{ $ns }} {{ $name }} -c {{ .name }}{{ "\n" }}{{ end }}{{ end }}' > /tmp/containers + oc get nodes -o json > /tmp/artifacts/nodes.json + oc get events --all-namespaces -o json > /tmp/artifacts/events.json + oc get pods -l openshift.io/component=api --all-namespaces --template '{{ range .items }}-n {{ .metadata.namespace }} {{ .metadata.name }}{{ "\n" }}{{ end }}' > /tmp/pods-api + + while IFS= read -r i; do + file="$( echo "$i" | cut -d ' ' -f 3 | tr -s ' ' '_' )" + oc exec $i -- /bin/bash -c 'oc get --raw /debug/pprof/heap --server "https://$( hostname ):8443" --config /etc/origin/master/admin.kubeconfig' > /tmp/artifacts/metrics/${file}-heap.gz + oc exec $i -- /bin/bash -c 'oc get --raw /metrics --server "https://$( hostname ):8443" --config /etc/origin/master/admin.kubeconfig' | gzip -c > /tmp/artifacts/metrics/${file}-api.gz + oc exec $i -- /bin/bash -c 'oc get --raw /debug/pprof/heap --server "https://$( hostname ):8444" --config /etc/origin/master/admin.kubeconfig' > /tmp/artifacts/metrics/${file}-controllers-heap.gz + oc exec $i -- /bin/bash -c 'oc get --raw /metrics --server "https://$( hostname ):8444" --config /etc/origin/master/admin.kubeconfig' | gzip -c > /tmp/artifacts/metrics/${file}-controllers.gz + done < /tmp/pods-api + while IFS= read -r i; do + file="$( echo "$i" | cut -d ' ' -f 2,3,5 | tr -s ' ' '_' )" + oc logs $i | gzip -c > /tmp/artifacts/pods/${file}.log.gz + oc logs -p $i | gzip -c > /tmp/artifacts/pods/${file}_previous.log.gz + done < /tmp/containers + while IFS= read -r i; do + mkdir -p /tmp/artifacts/nodes/$i + oc get --raw /api/v1/nodes/$i/proxy/metrics | gzip -c > /tmp/artifacts/metrics/node-$i.gz + oc get --raw /api/v1/nodes/$i/proxy/debug/pprof/heap > /tmp/artifacts/nodes/$i/heap.gz + oc get --raw /api/v1/nodes/$i/proxy/logs/messages | gzip -c > /tmp/artifacts/nodes/$i/messages.gz + oc get --raw /api/v1/nodes/$i/proxy/logs/secure | gzip -c > /tmp/artifacts/nodes/$i/secure.gz + oc get --raw /api/v1/nodes/$i/proxy/logs/audit | gzip -c > /tmp/artifacts/nodes/$i/audit.gz + oc get --raw /api/v1/nodes/$i/proxy/logs/journal | sed -e 's|.*href="\(.*\)".*|\1|;t;d' > /tmp/journals + while IFS= read -r j; do + oc get --raw /api/v1/nodes/$i/proxy/logs/journal/${j}system.journal | gzip -c > /tmp/artifacts/nodes/$i/journal.gz + done < /tmp/journals + done < /tmp/nodes + + echo "Deprovisioning cluster ..." + export AWS_SHARED_CREDENTIALS_FILE=/etc/openshift-installer/.awscred + cd /tmp/shared/cluster + set -e + tectonic destroy --dir=. --log-level=debug + } + + trap 'teardown' EXIT + trap 'kill $(jobs -p); exit 0' TERM + + for i in `seq 1 120`; do + if [[ -f /tmp/shared/exit ]]; then + exit 0 + fi + sleep 60 & wait + done diff --git a/cluster/ci/config/prow/plugins.yaml b/cluster/ci/config/prow/plugins.yaml index 2a9aac40d9f79..05bdffd080414 100644 --- a/cluster/ci/config/prow/plugins.yaml +++ b/cluster/ci/config/prow/plugins.yaml @@ -210,3 +210,6 @@ plugins: openshift/autoheal: - trigger + + openshift/installer: + - trigger diff --git a/cluster/test-deploy/aws/.gitignore b/cluster/test-deploy/aws/.gitignore new file mode 100644 index 0000000000000..99efbf7fc6df1 --- /dev/null +++ b/cluster/test-deploy/aws/.gitignore @@ -0,0 +1,6 @@ +* +!.type +!.gitignore +!vars*.yaml +!openshift.yaml +!bootstrap-script.sh diff --git a/cluster/test-deploy/aws/.type b/cluster/test-deploy/aws/.type new file mode 100644 index 0000000000000..0eb8bf97e53e0 --- /dev/null +++ b/cluster/test-deploy/aws/.type @@ -0,0 +1 @@ +aws \ No newline at end of file diff --git a/cluster/test-deploy/aws/openshift.yaml b/cluster/test-deploy/aws/openshift.yaml new file mode 100644 index 0000000000000..81084d78f44fa --- /dev/null +++ b/cluster/test-deploy/aws/openshift.yaml @@ -0,0 +1,312 @@ +admin: + email: "${EMAIL}" + password: "${PASSWORD}" +aws: + # (optional) Unique name under which the Amazon S3 bucket will be created. Bucket name must start with a lower case name and is limited to 63 characters. + # The Tectonic Installer uses the bucket to store tectonic assets and kubeconfig. + # If name is not provided the installer will construct the name using "name", current AWS region and "baseDomain" + # assetsS3BucketName: + + # (optional) Extra AWS tags to be applied to created autoscaling group resources. + # This is a list of maps having the keys `key`, `value` and `propagate_at_launch`. + # + # Example: `[ { key = "foo", value = "bar", propagate_at_launch = true } ]` + # autoScalingGroupExtraTags: + + # (optional) AMI override for all nodes. Example: `ami-foobar123`. + # ec2AMIOverride: + + etcd: + # Instance size for the etcd node(s). Example: `t2.medium`. Read the [etcd recommended hardware](https:#coreos.com/etcd/docs/latest/op-guide/hardware.html) guide for best performance + ec2Type: t2.medium + + # (optional) List of additional security group IDs for etcd nodes. + # + # Example: `["sg-51530134", "sg-b253d7cc"]` + # extraSGIDs: + + # (optional) Name of IAM role to use for the instance profiles of etcd nodes. + # The name is also the last part of a role's ARN. + # + # Example: + # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer + # * Role Name = tectonic-installer + # iamRoleName: + + rootVolume: + # The amount of provisioned IOPS for the root block device of etcd nodes. + # Ignored if the volume type is not io1. + iops: 100 + + # The size of the volume in gigabytes for the root block device of etcd nodes. + size: 30 + + # The type of volume for the root block device of etcd nodes. + type: gp2 + + external: + # (optional) List of subnet IDs within an existing VPC to deploy master nodes into. + # Required to use an existing VPC and the list must match the AZ count. + # + # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` + # masterSubnetIDs: + + # (optional) If set, the given Route53 zone ID will be used as the internal (private) zone. + # This zone will be used to create etcd DNS records as well as internal API and internal Ingress records. + # If set, no additional private zone will be created. + # + # Example: `"Z1ILINNUJGTAO1"` + # privateZone: + + # (optional) ID of an existing VPC to launch nodes into. + # If unset a new VPC is created. + # + # Example: `vpc-123456` + # vpcID: + + # (optional) List of subnet IDs within an existing VPC to deploy worker nodes into. + # Required to use an existing VPC and the list must match the AZ count. + # + # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` + # workerSubnetIDs: + + # (optional) Extra AWS tags to be applied to created resources. + # + # Example: `{ "key" = "value", "foo" = "bar" }` + # extraTags: + + # (optional) Name of IAM role to use to access AWS in order to deploy the Tectonic Cluster. + # The name is also the full role's ARN. + # + # Example: + # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer + # installerRole: + + master: + # (optional) This configures master availability zones and their corresponding subnet CIDRs directly. + # + # Example: + # `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }` + # customSubnets: + + # Instance size for the master node(s). Example: `t2.medium`. + ec2Type: t2.medium + + # (optional) List of additional security group IDs for master nodes. + # + # Example: `["sg-51530134", "sg-b253d7cc"]` + # extraSGIDs: + + # (optional) Name of IAM role to use for the instance profiles of master nodes. + # The name is also the last part of a role's ARN. + # + # Example: + # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer + # * Role Name = tectonic-installer + # iamRoleName: + + rootVolume: + # The amount of provisioned IOPS for the root block device of master nodes. + # Ignored if the volume type is not io1. + iops: 100 + + # The size of the volume in gigabytes for the root block device of master nodes. + size: 30 + + # The type of volume for the root block device of master nodes. + type: gp2 + + # (optional) If set to true, create private-facing ingress resources (ELB, A-records). + # If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone. + # privateEndpoints: true + + # (optional) This declares the AWS credentials profile to use. + # profile: default + + # (optional) If set to true, create public-facing ingress resources (ELB, A-records). + # If set to false, no public-facing ingress resources will be created. + publicEndpoints: true + + # The target AWS region for the cluster. + region: us-east-1 + + # Name of an SSH key located within the AWS region. Example: coreos-user. + sshKey: libra + + # Block of IP addresses used by the VPC. + # This should not overlap with any other networks, such as a private datacenter connected via Direct Connect. + vpcCIDRBlock: 10.0.0.0/16 + + worker: + # (optional) This configures worker availability zones and their corresponding subnet CIDRs directly. + # + # Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }` + # customSubnets: + + # Instance size for the worker node(s). Example: `t2.medium`. + ec2Type: t2.medium + + # (optional) List of additional security group IDs for worker nodes. + # + # Example: `["sg-51530134", "sg-b253d7cc"]` + # extraSGIDs: + + # (optional) Name of IAM role to use for the instance profiles of worker nodes. + # The name is also the last part of a role's ARN. + # + # Example: + # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer + # * Role Name = tectonic-installer + # iamRoleName: + + # (optional) List of ELBs to attach all worker instances to. + # This is useful for exposing NodePort services via load-balancers managed separately from the cluster. + # + # Example: + # * `["ingress-nginx"]` + # loadBalancers: + + rootVolume: + # The amount of provisioned IOPS for the root block device of worker nodes. + # Ignored if the volume type is not io1. + iops: 100 + + # The size of the volume in gigabytes for the root block device of worker nodes. + size: 30 + + # The type of volume for the root block device of worker nodes. + type: gp2 + +# The base DNS domain of the cluster. It must NOT contain a trailing period. Some +# DNS providers will automatically add this if necessary. +# +# Example: `openstack.dev.coreos.systems`. +# +# Note: This field MUST be set manually prior to creating the cluster. +# This applies only to cloud platforms. +# +# [Azure-specific NOTE] +# To use Azure-provided DNS, `BaseDomain` should be set to `""` +# If using DNS records, ensure that `BaseDomain` is set to a properly configured external DNS zone. +# Instructions for configuring delegated domains for Azure DNS can be found here: https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns +baseDomain: origin-ci-int-aws.dev.rhcloud.com + +ca: + # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. + # If left blank, a CA certificate will be automatically generated. + # cert: + + # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. + # This field is mandatory if `ca_cert` is set. + # key: + + # (optional) The algorithm used to generate ca_key. + # The default value is currently recommended. + # This field is mandatory if `ca_cert` is set. + # keyAlg: RSA + +containerLinux: + # (optional) The Container Linux update channel. + # + # Examples: `stable`, `beta`, `alpha` + # channel: stable + + # The Container Linux version to use. Set to `latest` to select the latest available version for the selected update channel. + # + # Examples: `latest`, `1465.6.0` + version: latest + + # (optional) A list of PEM encoded CA files that will be installed in /etc/ssl/certs on etcd, master, and worker nodes. + # customCAPEMList: + +etcd: + # The name of the node pool(s) to use for etcd nodes + nodePools: + - etcd + +iscsi: + # (optional) Start iscsid.service to enable iscsi volume attachment. + # enabled: false + +# The path to the tectonic licence file. +# You can download the Tectonic license file from your Account overview page at [1]. +# +# [1] https://account.coreos.com/overview +licensePath: license + +master: + # The name of the node pool(s) to use for master nodes + nodePools: + - master + +# The name of the cluster. +# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. +# +# Note: This field MUST be set manually prior to creating the cluster. +# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. +name: ${NAME} + +networking: + # (optional) This declares the MTU used by Calico. + # mtu: + + # This declares the IP range to assign Kubernetes pod IPs in CIDR notation. + podCIDR: 10.2.0.0/16 + + # This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. + # The maximum size of this IP range is /12 + serviceCIDR: 10.3.0.0/16 + + # (optional) Configures the network to be used in Tectonic. One of the following values can be used: + # + # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. + # + # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. + # + # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. + # + # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. + # type: canal + +nodePools: + # The number of etcd nodes to be created. + # If set to zero, the count of etcd nodes will be determined automatically. + # + # Note: This is not supported on bare metal. + - count: 3 + name: etcd + + # The number of master nodes to be created. + # This applies only to cloud platforms. + - count: 1 + name: master + + # The number of worker nodes to be created. + # This applies only to cloud platforms. + - count: 3 + name: worker + +# The platform used for deploying. +platform: AWS + +# The path the pull secret file in JSON format. +# This is known to be a "Docker pull secret" as produced by the docker login [1] command. +# A sample JSON content is shown in [2]. +# You can download the pull secret from your Account overview page at [3]. +# +# [1] https://docs.docker.com/engine/reference/commandline/login/ +# +# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup +# +# [3] https://account.coreos.com/overview +pullSecretPath: pull-secret + +# Validity period of the self-signed certificates (in hours). +# Default is 3 years. +# This setting is ignored if user provided certificates are used. +tlsValidityPeriod: 26280 + +worker: + # The name of the node pool(s) to use for workers + nodePools: + - worker \ No newline at end of file diff --git a/projects/kubernetes/autoscaler.config.json b/projects/kubernetes/autoscaler.config.json index 27dafc4128b84..5d6811a22f78f 100644 --- a/projects/kubernetes/autoscaler.config.json +++ b/projects/kubernetes/autoscaler.config.json @@ -38,7 +38,7 @@ "tests": [ { "as": "unit", - "from": "test-bin", + "from": "src", "commands": "ARTIFACT_DIR=/tmp/artifacts JUNIT_REPORT=1 hack/test-go.sh", "artifact_dir": "/tmp/artifacts" } @@ -48,6 +48,18 @@ "*": { "requests": { "cpu": "100m", "memory": "200Mi" }, "limits": { "cpu": "2", "memory": "4Gi" } + }, + "bin": { + "requests": { "cpu": "2", "memory": "4Gi" }, + "limits": { "cpu": "5", "memory": "5Gi" } + }, + "test-bin": { + "requests": { "cpu": "2", "memory": "4Gi" }, + "limits": { "cpu": "5", "memory": "5Gi" } + }, + "unit": { + "requests": { "cpu": "2", "memory": "4Gi" }, + "limits": { "cpu": "5", "memory": "5Gi" } } } } \ No newline at end of file diff --git a/projects/origin/web-console.config.json b/projects/origin/web-console.config.json index 661f2cf0f8938..c573bdd594ac2 100644 --- a/projects/origin/web-console.config.json +++ b/projects/origin/web-console.config.json @@ -6,7 +6,7 @@ "tag": "", "tag_overrides": {} }, - "base_rpm_images": { + "base_images": { "base": { "cluster": "https://api.ci.openshift.org", "namespace": "openshift",