From 8cd8d27e9c0c6c359e292ca3496c67918034c47c Mon Sep 17 00:00:00 2001 From: Viren Baraiya Date: Thu, 28 Sep 2023 15:09:51 -0700 Subject: [PATCH] Docker updates (#3790) * Add support for JDK17 in the docker containers * Docker compose for redis, postgres and mysql with Elasticsearch7 co-author: @manan164 --- docker/README.md | 52 ++++++++++++- docker/docker-compose-dynomite.yaml | 31 -------- ...compose.yaml => docker-compose-mysql.yaml} | 67 ++++++++--------- docker/docker-compose-postgres.yaml | 44 ++++------- docker/docker-compose-prometheus.yaml | 20 ----- docker/docker-compose.yaml | 46 ++++-------- docker/grpc/Makefile | 18 ----- docker/server/Dockerfile | 54 +++++++++++--- docker/server/README.md | 13 ---- docker/server/bin/startup.sh | 10 ++- docker/server/config/config-local.properties | 36 --------- .../config/config-mysql-grpc.properties | 39 ---------- docker/server/config/config-mysql.properties | 23 +++--- .../server/config/config-postgres.properties | 24 +++--- docker/server/config/config-redis.properties | 25 +++++++ docker/server/config/config.properties | 74 ++++++++----------- docker/server/config/log4j.properties | 1 + docker/server/config/redis.conf | 1 + docker/server/nginx/nginx.conf | 50 +++++++++++++ docker/serverAndUI/Dockerfile | 63 ---------------- docker/serverAndUI/README.md | 10 --- docker/serverAndUI/bin/startup.sh | 36 --------- .../config/config-local.properties | 33 --------- docker/serverAndUI/config/config.properties | 35 --------- docker/serverAndUI/nginx/nginx.conf | 20 ----- docker/ui/Dockerfile | 14 ++-- 26 files changed, 301 insertions(+), 538 deletions(-) delete mode 100644 docker/docker-compose-dynomite.yaml rename docker/{grpc/docker-compose.yaml => docker-compose-mysql.yaml} (60%) delete mode 100644 docker/docker-compose-prometheus.yaml delete mode 100644 docker/grpc/Makefile delete mode 100644 docker/server/README.md delete mode 100755 docker/server/config/config-local.properties delete mode 100755 docker/server/config/config-mysql-grpc.properties create mode 100755 docker/server/config/config-redis.properties create mode 100644 docker/server/config/redis.conf create mode 100644 docker/server/nginx/nginx.conf delete mode 100644 docker/serverAndUI/Dockerfile delete mode 100644 docker/serverAndUI/README.md delete mode 100755 docker/serverAndUI/bin/startup.sh delete mode 100755 docker/serverAndUI/config/config-local.properties delete mode 100755 docker/serverAndUI/config/config.properties delete mode 100644 docker/serverAndUI/nginx/nginx.conf diff --git a/docker/README.md b/docker/README.md index 538ebae353..b45b2d9bc6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1 +1,51 @@ -[Docker Instructions](/docs/docs/gettingstarted/docker.md) \ No newline at end of file + +# Conductor Docker Builds + +## Pre-built docker images + +Conductor server with support for the following backend: +1. Redis +2. Postgres +3. Mysql +4. Cassandra + +### Docker File for Server and UI + +[Docker Image Source for Server with UI](docker/server/DockerFile) + +### Configuration Guide for Conductor Server +Conductor uses a persistent store for managing state. +The choice of backend is quite flexible and can be configured at runtime using `conductor.db.type` property. + +Refer to the table below for various supported backend and required configurations to enable each of them. + +> [!IMPORTANT] +> +> See [config.properties](docker/server/config/config.properties) for the required properties for each of the backends. +> +> | Backend | Property | +> |------------|------------------------------------| +> | postgres | conductor.db.type=postgres | +> | redis | conductor.db.type=redis_standalone | +> | mysql | conductor.db.type=mysql | +> | cassandra | conductor.db.type=cassandra | +> + +Conductor using Elasticsearch for indexing the workflow data. +Currently, Elasticsearch 6 and 7 are supported. + +We welcome community contributions for other indexing backends. + +**Note:** Docker images use Elasticsearch 7. + +## Helm Charts +TODO: Link to the helm charts + +## Run Docker Compose Locally +### Use the docker-compose to bring up the local conductor server. + +| Docker Compose | Description | +|--------------------------------------------------------------|----------------------------| +| [docker-compose.yaml](docker-compose.yaml) | Redis + Elasticsearch 7 | +| [docker-compose-postgres.yaml](docker-compose-postgres.yaml) | Postgres + Elasticsearch 7 | +| [docker-compose-postgres.yaml](docker-compose-mysql.yaml) | Mysql + Elasticsearch 7 | diff --git a/docker/docker-compose-dynomite.yaml b/docker/docker-compose-dynomite.yaml deleted file mode 100644 index 99e15af930..0000000000 --- a/docker/docker-compose-dynomite.yaml +++ /dev/null @@ -1,31 +0,0 @@ -version: '2.3' - -services: - conductor-server: - environment: - - CONFIG_PROP=config.properties - links: - - dynomite:dyno1 - depends_on: - dynomite: - condition: service_healthy - - dynomite: - image: v1r3n/dynomite - networks: - - internal - ports: - - 8102:8102 - healthcheck: - test: timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/8102' - interval: 5s - timeout: 5s - retries: 12 - logging: - driver: "json-file" - options: - max-size: "1k" - max-file: "3" - -networks: - internal: diff --git a/docker/grpc/docker-compose.yaml b/docker/docker-compose-mysql.yaml similarity index 60% rename from docker/grpc/docker-compose.yaml rename to docker/docker-compose-mysql.yaml index 40a75625b7..5c587e3218 100644 --- a/docker/grpc/docker-compose.yaml +++ b/docker/docker-compose-mysql.yaml @@ -4,43 +4,41 @@ services: conductor-server: environment: - - CONFIG_PROP=config-mysql-grpc.properties + - CONFIG_PROP=config-mysql.properties image: conductor:server + container_name: conductor-server build: - context: ../../ + context: ../ dockerfile: docker/server/Dockerfile networks: - internal ports: - 8080:8080 - - 8090:8090 + - 5000:5000 + healthcheck: + test: [ "CMD", "curl","-I" ,"-XGET", "http://localhost:8080/health" ] + interval: 60s + timeout: 30s + retries: 12 links: - - elasticsearch:es - - redis:rs + - conductor-elasticsearch:es + - conductor-mysql:mysql + - conductor-redis:rs depends_on: - elasticsearch: + conductor-elasticsearch: condition: service_healthy - mysql: + conductor-mysql: condition: service_healthy - redis: + conductor-redis: condition: service_healthy + logging: + driver: "json-file" + options: + max-size: "1k" + max-file: "3" - conductor-ui: - environment: - - WF_SERVER=http://conductor-server:8080 - image: conductor:ui - build: - context: ../../ - dockerfile: docker/ui/Dockerfile - networks: - - internal - ports: - - 5000:5000 - depends_on: - - conductor-server - - mysql: - image: mysql:5.7 + conductor-mysql: + image: mysql:latest environment: MYSQL_ROOT_PASSWORD: 12345 MYSQL_DATABASE: conductor @@ -60,31 +58,31 @@ services: timeout: 5s retries: 12 - redis: + conductor-redis: image: redis:6.2.3-alpine volumes: - ./redis.conf:/usr/local/etc/redis/redis.conf networks: - internal ports: - - 6379:6379 + - 7379:6379 healthcheck: test: [ "CMD", "redis-cli","ping" ] - elasticsearch: - image: elasticsearch:6.8.15 + conductor-elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.11 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx1024m" - - transport.host=0.0.0.0 - - discovery.type=single-node - xpack.security.enabled=false + - discovery.type=single-node + volumes: + - esdata-conductor:/usr/share/elasticsearch/data networks: - internal ports: - - 9200:9200 - - 9300:9300 + - 9201:9200 healthcheck: - test: timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/9300' + test: curl http://localhost:9200/_cluster/health -o /dev/null interval: 5s timeout: 5s retries: 12 @@ -96,6 +94,9 @@ services: volumes: conductor_mysql: + driver: local + esdata-conductor: + driver: local networks: internal: diff --git a/docker/docker-compose-postgres.yaml b/docker/docker-compose-postgres.yaml index 568eed95c7..12a6b70465 100644 --- a/docker/docker-compose-postgres.yaml +++ b/docker/docker-compose-postgres.yaml @@ -13,21 +13,19 @@ services: - internal ports: - 8080:8080 + - 5000:5000 healthcheck: test: [ "CMD", "curl","-I" ,"-XGET", "http://localhost:8080/health" ] interval: 60s timeout: 30s retries: 12 links: - - elasticsearch:es - - redis:rs - - postgres:postgresdb + - conductor-elasticsearch:es + - conductor-postgres:postgresdb depends_on: - elasticsearch: + conductor-elasticsearch: condition: service_healthy - redis: - condition: service_healthy - postgres: + conductor-postgres: condition: service_healthy logging: driver: "json-file" @@ -35,7 +33,7 @@ services: max-size: "1k" max-file: "3" - postgres: + conductor-postgres: image: postgres environment: - POSTGRES_USER=conductor @@ -45,7 +43,7 @@ services: networks: - internal ports: - - 5432:5432 + - 6432:5432 healthcheck: test: timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5432' interval: 5s @@ -57,32 +55,20 @@ services: max-size: "1k" max-file: "3" - redis: - image: redis:6.2.3-alpine - volumes: - - ./redis.conf:/usr/local/etc/redis/redis.conf - networks: - - internal - ports: - - 6379:6379 - healthcheck: - test: [ "CMD", "redis-cli","ping" ] - - elasticsearch: - image: elasticsearch:6.8.15 - container_name: elasticsearch + conductor-elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.11 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx1024m" - - transport.host=0.0.0.0 - - discovery.type=single-node - xpack.security.enabled=false + - discovery.type=single-node + volumes: + - esdata-conductor:/usr/share/elasticsearch/data networks: - internal ports: - - 9200:9200 - - 9300:9300 + - 9201:9200 healthcheck: - test: wget http://localhost:9200/ -O /dev/null + test: curl http://localhost:9200/_cluster/health -o /dev/null interval: 5s timeout: 5s retries: 12 @@ -95,6 +81,8 @@ services: volumes: pgdata-conductor: driver: local + esdata-conductor: + driver: local networks: internal: diff --git a/docker/docker-compose-prometheus.yaml b/docker/docker-compose-prometheus.yaml deleted file mode 100644 index 10f8d80e40..0000000000 --- a/docker/docker-compose-prometheus.yaml +++ /dev/null @@ -1,20 +0,0 @@ -version: '3' - -services: - - prometheus: - image: prom/prometheus - volumes: - - ./prometheus/:/etc/prometheus/ - command: - - '--config.file=/etc/prometheus/prometheus.yml' - ports: - - 9090:9090 - external_links: - - conductor-server:conductor-server - networks: - - internal - restart: always - -networks: - internal: \ No newline at end of file diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 91cb3d4610..7c32c1437c 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -3,7 +3,7 @@ version: '2.3' services: conductor-server: environment: - - CONFIG_PROP=config-local.properties + - CONFIG_PROP=config-redis.properties image: conductor:server container_name: conductor-server build: @@ -13,18 +13,19 @@ services: - internal ports: - 8080:8080 + - 5000:5000 healthcheck: test: ["CMD", "curl","-I" ,"-XGET", "http://localhost:8080/health"] interval: 60s timeout: 30s retries: 12 links: - - elasticsearch:es - - redis:rs + - conductor-elasticsearch:es + - conductor-redis:rs depends_on: - elasticsearch: + conductor-elasticsearch: condition: service_healthy - redis: + conductor-redis: condition: service_healthy logging: driver: "json-file" @@ -32,50 +33,31 @@ services: max-size: "1k" max-file: "3" - conductor-ui: - environment: - - WF_SERVER=http://conductor-server:8080 - image: conductor:ui - container_name: conductor-ui - build: - context: ../ - dockerfile: docker/ui/Dockerfile - networks: - - internal - ports: - - 5000:5000 - links: - - conductor-server - stdin_open: true - - redis: + conductor-redis: image: redis:6.2.3-alpine volumes: - - ./redis.conf:/usr/local/etc/redis/redis.conf + - ../server/config/redis.conf:/usr/local/etc/redis/redis.conf networks: - internal ports: - - 6379:6379 + - 7379:6379 healthcheck: test: [ "CMD", "redis-cli","ping" ] - elasticsearch: - image: elasticsearch:6.8.15 - container_name: elasticsearch + conductor-elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.11 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx1024m" - - transport.host=0.0.0.0 - - discovery.type=single-node - xpack.security.enabled=false + - discovery.type=single-node volumes: - esdata-conductor:/usr/share/elasticsearch/data networks: - internal ports: - - 9200:9200 - - 9300:9300 + - 9201:9200 healthcheck: - test: wget http://localhost:9200/ -O /dev/null + test: curl http://localhost:9200/_cluster/health -o /dev/null interval: 5s timeout: 5s retries: 12 diff --git a/docker/grpc/Makefile b/docker/grpc/Makefile deleted file mode 100644 index e111f0c63c..0000000000 --- a/docker/grpc/Makefile +++ /dev/null @@ -1,18 +0,0 @@ - -clean-db: - docker volume rm grpc_conductor_mysql - -compose-build: - docker-compose build - -dependencies-up: - docker-compose up -d mysql elasticsearch - -dependencies-down: - docker-compose down - -stack-up: - docker-compose up - -stack-down: - docker-compose down diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index 882d5b6d91..ab6b099b50 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,35 +1,71 @@ # -# conductor:server - Netflix conductor server +# conductor:server - Combined Netflix conductor server & UI # - # =========================================================================================================== # 0. Builder stage # =========================================================================================================== -FROM eclipse-temurin:17-jdk-focal AS builder +FROM alpine:3.18 AS builder LABEL maintainer="Netflix OSS " -# Copy the project directly onto the image +# =========================================================================================================== +# 0. Build Conductor Server +# =========================================================================================================== + + +# Install dependencies +RUN apk add openjdk17 +RUN apk add git +RUN apk add --update nodejs npm yarn + COPY . /conductor -WORKDIR /conductor +WORKDIR /conductor/ui +RUN yarn install && yarn build +RUN ls -ltr +RUN echo "Done building UI" + +# Checkout the community project +WORKDIR / +RUN mkdir server-build +WORKDIR server-build +RUN ls -ltr + +RUN git clone https://github.com/Netflix/conductor-community.git + +# Copy the project directly onto the image +WORKDIR conductor-community +RUN ls -ltr # Build the server on run RUN ./gradlew build -x test --stacktrace +WORKDIR /server-build +RUN ls -ltr +RUN pwd + # =========================================================================================================== # 1. Bin stage # =========================================================================================================== -FROM eclipse-temurin:17-jre-focal +FROM alpine:3.18 LABEL maintainer="Netflix OSS " +RUN apk add openjdk17 +RUN apk add nginx + # Make app folders RUN mkdir -p /app/config /app/logs /app/libs # Copy the compiled output to new image -COPY --from=builder /conductor/docker/server/bin /app -COPY --from=builder /conductor/docker/server/config /app/config -COPY --from=builder /conductor/server/build/libs/conductor-server-*-boot.jar /app/libs +COPY docker/server/bin /app +COPY docker/server/config /app/config +COPY --from=builder /server-build/conductor-community/community-server/build/libs/*boot*.jar /app/libs/conductor-server.jar + +# Copy compiled UI assets to nginx www directory +WORKDIR /usr/share/nginx/html +RUN rm -rf ./* +COPY --from=builder /conductor/ui/build . +COPY --from=builder /conductor/docker/server/nginx/nginx.conf /etc/nginx/http.d/default.conf # Copy the files for the server into the app folders RUN chmod +x /app/startup.sh diff --git a/docker/server/README.md b/docker/server/README.md deleted file mode 100644 index 8baafbfc05..0000000000 --- a/docker/server/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Docker -## Conductor server -This Dockerfile create the conductor:server image - -## Building the image - -Run the following commands from the project root. - -`docker build -f docker/server/Dockerfile -t conductor:server .` - -## Running the conductor server - - Standalone server (interal DB): `docker run -p 8080:8080 -d -t conductor:server` - - Server (external DB required): `docker run -p 8080:8080 -d -t -e "CONFIG_PROP=config.properties" conductor:server` diff --git a/docker/server/bin/startup.sh b/docker/server/bin/startup.sh index 9d1b98cba8..e5bd7f501d 100755 --- a/docker/server/bin/startup.sh +++ b/docker/server/bin/startup.sh @@ -16,6 +16,10 @@ echo "Starting Conductor server" +echo "Running Nginx in background" +# Start nginx as daemon +nginx + # Start the server cd /app/libs echo "Property file: $CONFIG_PROP" @@ -24,8 +28,8 @@ export config_file= if [ -z "$CONFIG_PROP" ]; then - echo "Using an in-memory instance of conductor"; - export config_file=/app/config/config-local.properties + echo "Using default configuration file"; + export config_file=/app/config/config.properties else echo "Using '$CONFIG_PROP'"; export config_file=/app/config/$CONFIG_PROP @@ -33,4 +37,4 @@ fi echo "Using java options config: $JAVA_OPTS" -java ${JAVA_OPTS} -jar -DCONDUCTOR_CONFIG_FILE=$config_file conductor-server-*-boot.jar 2>&1 | tee -a /app/logs/server.log +java ${JAVA_OPTS} -jar -DCONDUCTOR_CONFIG_FILE=$config_file conductor-server.jar 2>&1 | tee -a /app/logs/server.log diff --git a/docker/server/config/config-local.properties b/docker/server/config/config-local.properties deleted file mode 100755 index fa2cd25957..0000000000 --- a/docker/server/config/config-local.properties +++ /dev/null @@ -1,36 +0,0 @@ -# Servers. -conductor.grpc-server.enabled=false - -# Database persistence model. -conductor.db.type=redis_standalone -conductor.queue.type=redis_standalone -# Dynomite Cluster details. -# format is host:port:rack separated by semicolon -conductor.redis.hosts=rs:6379:us-east-1c - -# Namespace for the keys stored in Dynomite/Redis -conductor.redis.workflowNamespacePrefix=conductor - -# Namespace prefix for the dyno queues -conductor.redis.queueNamespacePrefix=conductor_queues - -# No. of threads allocated to dyno-queues (optional) -queues.dynomite.threads=10 - -# By default with dynomite, we want the repairservice enabled -conductor.app.workflowRepairServiceEnabled=true - -# Non-quorum port used to connect to local redis. Used by dyno-queues. -# When using redis directly, set this to the same port as redis server -# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. -conductor.redis.queuesNonQuorumPort=22122 - -# Elastic search instance indexing is disabled. -conductor.indexing.enabled=true -conductor.elasticsearch.url=http://es:9200 -conductor.elasticsearch.indexReplicasCount=0 - -# Load sample kitchen sink workflow -loadSample=true - -conductor.elasticsearch.clusterHealthColor=yellow diff --git a/docker/server/config/config-mysql-grpc.properties b/docker/server/config/config-mysql-grpc.properties deleted file mode 100755 index 6ed6563dc3..0000000000 --- a/docker/server/config/config-mysql-grpc.properties +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright 2021 Netflix, Inc. -#

-# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -# Servers. -conductor.grpc-server.enabled=true - -# Database persistence model. -conductor.db.type=mysql -conductor.queue.type=redis_standalone -conductor.redis.hosts=rs:6379:us-east-1c -spring.datasource.url=jdbc:mysql://mysql:3306/conductor -spring.datasource.username=conductor -spring.datasource.password=conductor - -# Hikari pool sizes are -1 by default and prevent startup -spring.datasource.hikari.maximum-pool-size=10 -spring.datasource.hikari.minimum-idle=2 - -# Elastic search instance indexing is enabled. -conductor.indexing.enabled=true - -# Transport address to elasticsearch -conductor.elasticsearch.url=http://es:9200 - -# Name of the elasticsearch cluster -conductor.elasticsearch.indexName=conductor - -# Load sample kitchen sink workflow -loadSample=true diff --git a/docker/server/config/config-mysql.properties b/docker/server/config/config-mysql.properties index d5effd8d6d..6b618dc2eb 100755 --- a/docker/server/config/config-mysql.properties +++ b/docker/server/config/config-mysql.properties @@ -1,27 +1,24 @@ -# Servers. -conductor.grpc-server.enabled=false - # Database persistence type. conductor.db.type=mysql -conductor.queue.type=redis_standalone -conductor.redis.hosts=rs:6379:us-east-1c +# mysql spring.datasource.url=jdbc:mysql://mysql:3306/conductor spring.datasource.username=conductor spring.datasource.password=conductor -# Hikari pool sizes are -1 by default and prevent startup -spring.datasource.hikari.maximum-pool-size=10 -spring.datasource.hikari.minimum-idle=2 +# Use redis queues +conductor.queue.type=redis_standalone # Elastic search instance indexing is enabled. conductor.indexing.enabled=true - -# Transport address to elasticsearch conductor.elasticsearch.url=http://es:9200 - -# Name of the elasticsearch cluster conductor.elasticsearch.indexName=conductor +conductor.elasticsearch.version=7 +conductor.elasticsearch.clusterHealthColor=yellow + +# Additional modules for metrics collection exposed to Prometheus (optional) +conductor.metrics-prometheus.enabled=true +management.endpoints.web.exposure.include=prometheus -# Load sample kitchen sink workflow +# Load sample kitchen-sink workflow loadSample=true diff --git a/docker/server/config/config-postgres.properties b/docker/server/config/config-postgres.properties index e1b3cc8fde..43aa64a0fb 100755 --- a/docker/server/config/config-postgres.properties +++ b/docker/server/config/config-postgres.properties @@ -1,27 +1,21 @@ -# Servers. -conductor.grpc-server.enabled=false - # Database persistence type. conductor.db.type=postgres -conductor.queue.type=redis_standalone -conductor.redis.hosts=rs:6379:us-east-1c -spring.datasource.url=jdbc:postgresql://postgres:5432/conductor +# postgres +spring.datasource.url=jdbc:postgresql://postgresdb:5432/postgres spring.datasource.username=conductor spring.datasource.password=conductor -# Hikari pool sizes are -1 by default and prevent startup -spring.datasource.hikari.maximum-pool-size=10 -spring.datasource.hikari.minimum-idle=2 - # Elastic search instance indexing is enabled. conductor.indexing.enabled=true - -# Transport address to elasticsearch conductor.elasticsearch.url=http://es:9200 - -# Name of the elasticsearch cluster conductor.elasticsearch.indexName=conductor +conductor.elasticsearch.version=7 +conductor.elasticsearch.clusterHealthColor=yellow + +# Additional modules for metrics collection exposed to Prometheus (optional) +conductor.metrics-prometheus.enabled=true +management.endpoints.web.exposure.include=prometheus -# Load sample kitchen sink workflow +# Load sample kitchen-sink workflow loadSample=true diff --git a/docker/server/config/config-redis.properties b/docker/server/config/config-redis.properties new file mode 100755 index 0000000000..5afd337974 --- /dev/null +++ b/docker/server/config/config-redis.properties @@ -0,0 +1,25 @@ +# Database persistence type. +# Below are the properties for redis +conductor.db.type=redis_standalone +conductor.redis.hosts=rs:6379:us-east-1c +conductor.redis-lock.serverAddress=redis://rs:6379 +conductor.redis.taskDefCacheRefreshInterval=1 +conductor.redis.workflowNamespacePrefix=conductor +conductor.redis.queueNamespacePrefix=conductor_queues + +#Use redis queues +conductor.queue.type=redis_standalone + +# Elastic search instance indexing is enabled. +conductor.indexing.enabled=true +conductor.elasticsearch.url=http://es:9200 +conductor.elasticsearch.indexName=conductor +conductor.elasticsearch.version=7 +conductor.elasticsearch.clusterHealthColor=yellow + +# Additional modules for metrics collection exposed to Prometheus (optional) +conductor.metrics-prometheus.enabled=true +management.endpoints.web.exposure.include=prometheus + +# Load sample kitchen sink workflow +loadSample=true diff --git a/docker/server/config/config.properties b/docker/server/config/config.properties index 55124c78c0..74d414520f 100755 --- a/docker/server/config/config.properties +++ b/docker/server/config/config.properties @@ -1,54 +1,42 @@ -# Servers. -conductor.grpc-server.enabled=false +# See README in the docker for configuration guide -# Database persistence type. -conductor.db.type=dynomite +# db.type determines the type of database used +# See various configurations below for the values +conductor.db.type=SET_THIS -# Dynomite Cluster details. -# format is host:port:rack separated by semicolon -conductor.redis.hosts=dyno1:8102:us-east-1c +# =====================================================# +# Redis Configuration Properties +# =====================================================# +#conductor.db.type=redis_standalone -# Dynomite cluster name -conductor.redis.clusterName=dyno1 +# The last part MUST be us-east-1c, it is not used and is kept for backwards compatibility +# conductor.redis.hosts=rs:6379:us-east-1c +# -# Namespace for the keys stored in Dynomite/Redis -conductor.redis.workflowNamespacePrefix=conductor +# conductor.redis-lock.serverAddress=redis://rs:6379 +# conductor.redis.taskDefCacheRefreshInterval=1 +# conductor.redis.workflowNamespacePrefix=conductor +# conductor.redis.queueNamespacePrefix=conductor_queues -# Namespace prefix for the dyno queues -conductor.redis.queueNamespacePrefix=conductor_queues -# No. of threads allocated to dyno-queues (optional) -queues.dynomite.threads=10 +# =====================================================# +# Postgres Configuration Properties +# =====================================================# -# By default with dynomite, we want the repairservice enabled -conductor.app.workflowRepairServiceEnabled=true +# conductor.db.type=postgres +# spring.datasource.url=jdbc:postgresql://localhost:5432/postgres +# spring.datasource.username=postgres +# spring.datasource.password=postgres +# Additionally you can use set the spring.datasource.XXX properties for connection pool size etc. -# Non-quorum port used to connect to local redis. Used by dyno-queues. -# When using redis directly, set this to the same port as redis server -# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. -conductor.redis.queuesNonQuorumPort=22122 +# If you want to use Postgres as indexing store set the following +# conductor.indexing.enabled=true +# conductor.indexing.type=postgres -# Elastic search instance indexing is enabled. -conductor.indexing.enabled=true +# When using Elasticsearch 7 for indexing, set the following -# Transport address to elasticsearch -conductor.elasticsearch.url=http://es:9200 +# conductor.indexing.enabled=true +# conductor.elasticsearch.url=http://es:9200 +# conductor.elasticsearch.version=7 +# conductor.elasticsearch.indexName=conductor -# Name of the elasticsearch cluster -conductor.elasticsearch.indexName=conductor -#conductor.event-queues.amqp.queueType=classic -#conductor.event-queues.amqp.sequentialMsgProcessing=true - -# Additional modules for metrics collection exposed via logger (optional) -# conductor.metrics-logger.enabled=true -# conductor.metrics-logger.reportPeriodSeconds=15 - -# Additional modules for metrics collection exposed to Prometheus (optional) -# conductor.metrics-prometheus.enabled=true -# management.endpoints.web.exposure.include=prometheus - -# To enable Workflow/Task Summary Input/Output JSON Serialization, use the following: -# conductor.app.summary-input-output-json-serialization.enabled=true - -# Load sample kitchen sink workflow -loadSample=true diff --git a/docker/server/config/log4j.properties b/docker/server/config/log4j.properties index bb249b00d0..900fbc3224 100644 --- a/docker/server/config/log4j.properties +++ b/docker/server/config/log4j.properties @@ -23,3 +23,4 @@ log4j.appender.A1=org.apache.log4j.ConsoleAppender # A1 uses PatternLayout. log4j.appender.A1.layout=org.apache.log4j.PatternLayout log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n +logging.logger.com.netflix.dyno.queues.redis.RedisDynoQueue=ERROR \ No newline at end of file diff --git a/docker/server/config/redis.conf b/docker/server/config/redis.conf new file mode 100644 index 0000000000..f43add6148 --- /dev/null +++ b/docker/server/config/redis.conf @@ -0,0 +1 @@ +appendonly yes \ No newline at end of file diff --git a/docker/server/nginx/nginx.conf b/docker/server/nginx/nginx.conf new file mode 100644 index 0000000000..fa8f0871d0 --- /dev/null +++ b/docker/server/nginx/nginx.conf @@ -0,0 +1,50 @@ +server { + listen 5000; + server_name conductor; + server_tokens off; + + location / { + add_header Referrer-Policy "strict-origin"; + add_header X-Frame-Options "SAMEORIGIN"; + add_header X-Content-Type-Options "nosniff"; + add_header Content-Security-Policy "script-src 'self' 'unsafe-inline' 'unsafe-eval' assets.orkes.io *.googletagmanager.com *.pendo.io https://cdn.jsdelivr.net; worker-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob:;"; + add_header Permissions-Policy "accelerometer=(), autoplay=(), camera=(), cross-origin-isolated=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), xr-spatial-tracking=(), clipboard-read=(self), clipboard-write=(self), gamepad=(), hid=(), idle-detection=(), serial=(), window-placement=(self)"; + + # This would be the directory where your React app's static files are stored at + root /usr/share/nginx/html; + try_files $uri /index.html; + } + + location /api { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-NginX-Proxy true; + proxy_pass http://localhost:8080/api; + proxy_ssl_session_reuse off; + proxy_set_header Host $http_host; + proxy_cache_bypass $http_upgrade; + proxy_redirect off; + } + + location /actuator { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-NginX-Proxy true; + proxy_pass http://localhost:8080/actuator; + proxy_ssl_session_reuse off; + proxy_set_header Host $http_host; + proxy_cache_bypass $http_upgrade; + proxy_redirect off; + } + + location /swagger-ui { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-NginX-Proxy true; + proxy_pass http://localhost:8080/swagger-ui; + proxy_ssl_session_reuse off; + proxy_set_header Host $http_host; + proxy_cache_bypass $http_upgrade; + proxy_redirect off; + } +} \ No newline at end of file diff --git a/docker/serverAndUI/Dockerfile b/docker/serverAndUI/Dockerfile deleted file mode 100644 index 7840fc5f49..0000000000 --- a/docker/serverAndUI/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -# -# conductor:serverAndUI - Combined Netflix conductor server & UI -# -# =========================================================================================================== -# 0. Builder stage -# =========================================================================================================== -FROM openjdk:17-jdk AS builder -LABEL maintainer="Netflix OSS " - -# Install Node -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - \ - && curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \ - && echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \ - && apt-get update -qq \ - && apt-get install -qq --no-install-recommends \ - build-essential \ - nodejs \ - yarn \ - && apt-get upgrade -qq \ - && rm -rf /var/lib/apt/lists/* - -# Copy the project onto the builder image -COPY . /conductor - -# Build the server -WORKDIR /conductor -RUN ./gradlew build -x test - -# Build the client -WORKDIR /conductor/ui -RUN yarn install && yarn build - -# =========================================================================================================== -# 1. Bin stage -# =========================================================================================================== - -FROM nginx:alpine -RUN apk add openjdk17-jre - -LABEL maintainer="Netflix OSS " - -# Make app folders -RUN mkdir -p /app/config /app/logs /app/libs - -# Copy the compiled output to new image -COPY --from=builder /conductor/docker/serverAndUI/bin /app -COPY --from=builder /conductor/docker/serverAndUI/config /app/config -COPY --from=builder /conductor/server/build/libs/conductor-server-*-boot.jar /app/libs - -# Copy compiled UI assets to nginx www directory -WORKDIR /usr/share/nginx/html -RUN rm -rf ./* -COPY --from=builder /conductor/ui/build . -COPY --from=builder /conductor/docker/serverAndUI/nginx/nginx.conf /etc/nginx/conf.d/default.conf - -# Copy the files for the server into the app folders -RUN chmod +x /app/startup.sh - -HEALTHCHECK --interval=60s --timeout=30s --retries=10 CMD curl -I -XGET http://localhost:8080/health || exit 1 - -CMD [ "/app/startup.sh" ] -ENTRYPOINT [ "/bin/sh"] diff --git a/docker/serverAndUI/README.md b/docker/serverAndUI/README.md deleted file mode 100644 index 275d74add6..0000000000 --- a/docker/serverAndUI/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Docker -## Conductor server and UI -This Dockerfile create the conductor:serverAndUI image - -## Building the image -`docker build -t conductor:serverAndUI .` - -## Running the conductor server - - Standalone server (interal DB): `docker run -p 8080:8080 -p 80:5000 -d -t conductor:serverAndUI` - - Server (external DB required): `docker run -p 8080:8080 -p 80:5000 -d -t -e "CONFIG_PROP=config.properties" conductor:serverAndUI` diff --git a/docker/serverAndUI/bin/startup.sh b/docker/serverAndUI/bin/startup.sh deleted file mode 100755 index 0070cd0b9d..0000000000 --- a/docker/serverAndUI/bin/startup.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# -# Copyright 2021 Netflix, Inc. -#

-# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -echo "Starting Conductor Server and UI" -echo "Running Nginx in background" -# Start nginx as daemon -nginx - -# Start the server -cd /app/libs -echo "Property file: $CONFIG_PROP" -echo $CONFIG_PROP -export config_file= - -if [ -z "$CONFIG_PROP" ]; - then - echo "Using an in-memory instance of conductor"; - export config_file=/app/config/config-local.properties - else - echo "Using '$CONFIG_PROP'"; - export config_file=/app/config/$CONFIG_PROP -fi - -nohup java -jar -DCONDUCTOR_CONFIG_FILE=$config_file conductor-server-*-boot.jar 1>&2 > /app/logs/server.log diff --git a/docker/serverAndUI/config/config-local.properties b/docker/serverAndUI/config/config-local.properties deleted file mode 100755 index d725130e89..0000000000 --- a/docker/serverAndUI/config/config-local.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Database persistence type. -conductor.db.type=memory - -# Dynomite Cluster details. -# format is host:port:rack separated by semicolon -conductor.redis.hosts=dyno1:8102:us-east-1c - -# Namespace for the keys stored in Dynomite/Redis -conductor.redis.workflowNamespacePrefix=conductor - -# Namespace prefix for the dyno queues -conductor.redis.queueNamespacePrefix=conductor_queues - -# No. of threads allocated to dyno-queues (optional) -queues.dynomite.threads=10 - -# By default with dynomite, we want the repairservice enabled -conductor.app.workflowRepairServiceEnabled=true - - -# Non-quorum port used to connect to local redis. Used by dyno-queues. -# When using redis directly, set this to the same port as redis server -# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. -conductor.redis.queuesNonQuorumPort=22122 - -# Transport address to elasticsearch -conductor.elasticsearch.url=localhost:9300 - -# Name of the elasticsearch cluster -conductor.elasticsearch.indexName=conductor - -# Load sample kitchen sink workflow -loadSample=true diff --git a/docker/serverAndUI/config/config.properties b/docker/serverAndUI/config/config.properties deleted file mode 100755 index c596c6f10f..0000000000 --- a/docker/serverAndUI/config/config.properties +++ /dev/null @@ -1,35 +0,0 @@ -# Database persistence model. -conductor.db.type=dynomite - -# Dynomite Cluster details. -# format is host:port:rack separated by semicolon -conductor.redis.hosts=dyno1:8102:us-east-1c - -# Dynomite cluster name -conductor.redis.clusterName=dyno1 - -# Namespace for the keys stored in Dynomite/Redis -conductor.redis.workflowNamespacePrefix=conductor - -# Namespace prefix for the dyno queues -conductor.redis.queueNamespacePrefix=conductor_queues - -# No. of threads allocated to dyno-queues (optional) -queues.dynomite.threads=10 - -# By default with dynomite, we want the repairservice enabled -conductor.app.workflowRepairServiceEnabled=true - -# Non-quorum port used to connect to local redis. Used by dyno-queues. -# When using redis directly, set this to the same port as redis server -# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. -conductor.redis.queuesNonQuorumPort=22122 - -# Transport address to elasticsearch -conductor.elasticsearch.url=es:9300 - -# Name of the elasticsearch cluster -conductor.elasticsearch.indexName=conductor - -# Load sample kitchen sink workflow -loadSample=true diff --git a/docker/serverAndUI/nginx/nginx.conf b/docker/serverAndUI/nginx/nginx.conf deleted file mode 100644 index 74e0ec2e61..0000000000 --- a/docker/serverAndUI/nginx/nginx.conf +++ /dev/null @@ -1,20 +0,0 @@ -server { - listen 5000; - server_name conductor; - location / { - # This would be the directory where your React app's static files are stored at - root /usr/share/nginx/html; - try_files $uri /index.html; - } - - location /api { - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-NginX-Proxy true; - proxy_pass http://localhost:8080/api; - proxy_ssl_session_reuse off; - proxy_set_header Host $http_host; - proxy_cache_bypass $http_upgrade; - proxy_redirect off; - } -} \ No newline at end of file diff --git a/docker/ui/Dockerfile b/docker/ui/Dockerfile index a79aef3b0b..b7967b9a00 100644 --- a/docker/ui/Dockerfile +++ b/docker/ui/Dockerfile @@ -1,25 +1,25 @@ # # conductor:ui - Netflix Conductor UI # -FROM node:16-alpine +FROM node:20-alpine LABEL maintainer="Netflix OSS " # Install the required packages for the node build # to run on alpine RUN apk update && apk add --no-cache python3 py3-pip make g++ - + # A directory within the virtualized Docker environment # Becomes more relevant when using Docker Compose later WORKDIR /usr/src/app - + # Copies package.json to Docker environment in a separate layer as a performance optimization COPY ./ui/package.json ./ - + # Installs all node packages. Cached unless package.json changes RUN yarn install - + # Copies everything else over to Docker environment -# node_modules excluded in .dockerignore. +# node_modules excluded in .dockerignore. COPY ./ui . - + CMD [ "yarn", "start" ]