Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ parameters:
- '!hudi-examples/hudi-examples-flink'
- '!hudi-examples/hudi-examples-java'
- '!hudi-examples/hudi-examples-spark'
- '!hudi-spark-datasource/hudi-spark3.1.x'
- '!hudi-flink-datasource'
- '!hudi-flink-datasource/hudi-flink'
- '!hudi-flink-datasource/hudi-flink1.13.x'
Expand All @@ -72,11 +73,11 @@ parameters:
- '!hudi-utilities'

variables:
BUILD_PROFILES: '-Dscala-2.11 -Dspark2 -Dflink1.14'
BUILD_PROFILES: '-Dscala-2.12 -Dspark3.1 -Dflink1.14'
PLUGIN_OPTS: '-Dcheckstyle.skip=true -Drat.skip=true -Djacoco.skip=true'
MVN_OPTS_INSTALL: '-DskipTests $(BUILD_PROFILES) $(PLUGIN_OPTS)'
MVN_OPTS_TEST: '-fae $(BUILD_PROFILES) $(PLUGIN_OPTS)'
SPARK_VERSION: '2.4.4'
SPARK_VERSION: '3.1.3'
HADOOP_VERSION: '2.7'
SPARK_ARCHIVE: spark-$(SPARK_VERSION)-bin-hadoop$(HADOOP_VERSION)
JOB1_MODULES: ${{ join(',',parameters.job1Modules) }}
Expand Down Expand Up @@ -211,6 +212,7 @@ stages:
publishJUnitResults: false
jdkVersionOption: '1.8'
- task: Maven@3
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instead of deleting this, could you add a property to disable this task? cc @xushiyan for help.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe if add a condition and have it set to false then it should disable this section https://docs.microsoft.com/en-us/azure/devops/pipelines/process/tasks?view=azure-devops&tabs=yaml will give it a try.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

then let's comment these lines out without deleting them to remind reenabling them again.

condition: false
displayName: UT integ-test
inputs:
mavenPomFile: 'pom.xml'
Expand All @@ -233,4 +235,4 @@ stages:
- script: |
export SPARK_HOME=$(Pipeline.Workspace)/$(SPARK_ARCHIVE)
mvn $(MVN_OPTS_TEST) -Pintegration-tests verify
displayName: IT
displayName: IT
309 changes: 309 additions & 0 deletions docker/compose/docker-compose_hadoop284_hive233_spark313.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,309 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

version: "3.3"

services:

namenode:
image: apachehudi/hudi-hadoop_2.8.4-namenode:latest
hostname: namenode
container_name: namenode
environment:
- CLUSTER_NAME=hudi_hadoop284_hive232_spark313
ports:
- "50070:50070"
- "8020:8020"
env_file:
- ./hadoop.env
healthcheck:
test: ["CMD", "curl", "-f", "http://namenode:50070"]
interval: 30s
timeout: 10s
retries: 3

datanode1:
image: apachehudi/hudi-hadoop_2.8.4-datanode:latest
container_name: datanode1
hostname: datanode1
environment:
- CLUSTER_NAME=hudi_hadoop284_hive232_spark313
env_file:
- ./hadoop.env
ports:
- "50075:50075"
- "50010:50010"
links:
- "namenode"
- "historyserver"
healthcheck:
test: ["CMD", "curl", "-f", "http://datanode1:50075"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
- namenode

historyserver:
image: apachehudi/hudi-hadoop_2.8.4-history:latest
hostname: historyserver
container_name: historyserver
environment:
- CLUSTER_NAME=hudi_hadoop284_hive232_spark313
depends_on:
- "namenode"
links:
- "namenode"
ports:
- "58188:8188"
healthcheck:
test: ["CMD", "curl", "-f", "http://historyserver:8188"]
interval: 30s
timeout: 10s
retries: 3
env_file:
- ./hadoop.env
volumes:
- historyserver:/hadoop/yarn/timeline

hive-metastore-postgresql:
image: bde2020/hive-metastore-postgresql:2.3.0
volumes:
- hive-metastore-postgresql:/var/lib/postgresql
hostname: hive-metastore-postgresql
container_name: hive-metastore-postgresql

hivemetastore:
image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3:latest
hostname: hivemetastore
container_name: hivemetastore
links:
- "hive-metastore-postgresql"
- "namenode"
env_file:
- ./hadoop.env
command: /opt/hive/bin/hive --service metastore
environment:
SERVICE_PRECONDITION: "namenode:50070 hive-metastore-postgresql:5432"
ports:
- "9083:9083"
healthcheck:
test: ["CMD", "nc", "-z", "hivemetastore", "9083"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
- "hive-metastore-postgresql"
- "namenode"

hiveserver:
image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3:latest
hostname: hiveserver
container_name: hiveserver
env_file:
- ./hadoop.env
environment:
SERVICE_PRECONDITION: "hivemetastore:9083"
ports:
- "10000:10000"
depends_on:
- "hivemetastore"
links:
- "hivemetastore"
- "hive-metastore-postgresql"
- "namenode"
volumes:
- ${HUDI_WS}:/var/hoodie/ws

sparkmaster:
image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkmaster_3.1.3:latest
hostname: sparkmaster
container_name: sparkmaster
env_file:
- ./hadoop.env
ports:
- "8080:8080"
- "7077:7077"
environment:
- INIT_DAEMON_STEP=setup_spark
links:
- "hivemetastore"
- "hiveserver"
- "hive-metastore-postgresql"
- "namenode"

spark-worker-1:
image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkworker_3.1.3:latest
hostname: spark-worker-1
container_name: spark-worker-1
env_file:
- ./hadoop.env
depends_on:
- sparkmaster
ports:
- "8081:8081"
environment:
- "SPARK_MASTER=spark://sparkmaster:7077"
links:
- "hivemetastore"
- "hiveserver"
- "hive-metastore-postgresql"
- "namenode"

zookeeper:
image: 'bitnami/zookeeper:3.4.12-r68'
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes

kafka:
image: 'bitnami/kafka:2.0.0'
hostname: kafkabroker
container_name: kafkabroker
ports:
- "9092:9092"
environment:
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes

presto-coordinator-1:
container_name: presto-coordinator-1
hostname: presto-coordinator-1
image: apachehudi/hudi-hadoop_2.8.4-prestobase_0.271:latest
ports:
- "8090:8090"
environment:
- PRESTO_JVM_MAX_HEAP=512M
- PRESTO_QUERY_MAX_MEMORY=1GB
- PRESTO_QUERY_MAX_MEMORY_PER_NODE=256MB
- PRESTO_QUERY_MAX_TOTAL_MEMORY_PER_NODE=384MB
- PRESTO_MEMORY_HEAP_HEADROOM_PER_NODE=100MB
- TERM=xterm
links:
- "hivemetastore"
volumes:
- ${HUDI_WS}:/var/hoodie/ws
command: coordinator

presto-worker-1:
container_name: presto-worker-1
hostname: presto-worker-1
image: apachehudi/hudi-hadoop_2.8.4-prestobase_0.271:latest
depends_on: [ "presto-coordinator-1" ]
environment:
- PRESTO_JVM_MAX_HEAP=512M
- PRESTO_QUERY_MAX_MEMORY=1GB
- PRESTO_QUERY_MAX_MEMORY_PER_NODE=256MB
- PRESTO_QUERY_MAX_TOTAL_MEMORY_PER_NODE=384MB
- PRESTO_MEMORY_HEAP_HEADROOM_PER_NODE=100MB
- TERM=xterm
links:
- "hivemetastore"
- "hiveserver"
- "hive-metastore-postgresql"
- "namenode"
volumes:
- ${HUDI_WS}:/var/hoodie/ws
command: worker

trino-coordinator-1:
container_name: trino-coordinator-1
hostname: trino-coordinator-1
image: apachehudi/hudi-hadoop_2.8.4-trinocoordinator_368:latest
ports:
- "8091:8091"
links:
- "hivemetastore"
volumes:
- ${HUDI_WS}:/var/hoodie/ws
command: http://trino-coordinator-1:8091 trino-coordinator-1

trino-worker-1:
container_name: trino-worker-1
hostname: trino-worker-1
image: apachehudi/hudi-hadoop_2.8.4-trinoworker_368:latest
depends_on: [ "trino-coordinator-1" ]
ports:
- "8092:8092"
links:
- "hivemetastore"
- "hiveserver"
- "hive-metastore-postgresql"
- "namenode"
volumes:
- ${HUDI_WS}:/var/hoodie/ws
command: http://trino-coordinator-1:8091 trino-worker-1

graphite:
container_name: graphite
hostname: graphite
image: graphiteapp/graphite-statsd
ports:
- 80:80
- 2003-2004:2003-2004
- 8126:8126

adhoc-1:
image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkadhoc_3.1.3:latest
hostname: adhoc-1
container_name: adhoc-1
env_file:
- ./hadoop.env
depends_on:
- sparkmaster
ports:
- '4040:4040'
environment:
- "SPARK_MASTER=spark://sparkmaster:7077"
links:
- "hivemetastore"
- "hiveserver"
- "hive-metastore-postgresql"
- "namenode"
- "presto-coordinator-1"
- "trino-coordinator-1"
volumes:
- ${HUDI_WS}:/var/hoodie/ws

adhoc-2:
image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkadhoc_3.1.3:latest
hostname: adhoc-2
container_name: adhoc-2
env_file:
- ./hadoop.env
depends_on:
- sparkmaster
environment:
- "SPARK_MASTER=spark://sparkmaster:7077"
links:
- "hivemetastore"
- "hiveserver"
- "hive-metastore-postgresql"
- "namenode"
- "presto-coordinator-1"
- "trino-coordinator-1"
volumes:
- ${HUDI_WS}:/var/hoodie/ws

volumes:
namenode:
historyserver:
hive-metastore-postgresql:

networks:
default:
19 changes: 19 additions & 0 deletions docker/hoodie/hadoop/build_docker_images.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
docker build base -t apachehudi/hudi-hadoop_2.8.4-base
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

have to upload final docker images to apachehudi dockerhub

docker build namenode -t apachehudi/hudi-hadoop_2.8.4-namenode
docker build datanode -t apachehudi/hudi-hadoop_2.8.4-datanode
docker build historyserver -t apachehudi/hudi-hadoop_2.8.4-history

docker build hive_base -t apachehudi/hudi-hadoop_2.8.4-hive_2.3.3

docker build spark_base -t apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkbase_3.1.3
docker build sparkmaster -t apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkmaster_3.1.3
docker build sparkadhoc -t apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkadhoc_3.1.3
docker build sparkworker -t apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkworker_3.1.3


docker build prestobase -t apachehudi/hudi-hadoop_2.8.4-prestobase_0.271

docker build base_java11 -t apachehudi/hudi-hadoop_2.8.4-base-java11
docker build trinobase -t apachehudi/hudi-hadoop_2.8.4-trinobase_368
docker build trinocoordinator -t apachehudi/hudi-hadoop_2.8.4-trinocoordinator_368
docker build trinoworker -t apachehudi/hudi-hadoop_2.8.4-trinoworker_368
2 changes: 1 addition & 1 deletion docker/hoodie/hadoop/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
<properties>
<skipITs>false</skipITs>
<docker.build.skip>true</docker.build.skip>
<docker.spark.version>2.4.4</docker.spark.version>
<docker.spark.version>3.1.3</docker.spark.version>
<docker.hive.version>2.3.3</docker.hive.version>
<docker.hadoop.version>2.8.4</docker.hadoop.version>
<docker.presto.version>0.271</docker.presto.version>
Expand Down
2 changes: 1 addition & 1 deletion docker/hoodie/hadoop/spark_base/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ ENV ENABLE_INIT_DAEMON true
ENV INIT_DAEMON_BASE_URI http://identifier/init-daemon
ENV INIT_DAEMON_STEP spark_master_init

ARG SPARK_VERSION=2.4.4
ARG SPARK_VERSION=3.1.3
ARG SPARK_HADOOP_VERSION=2.7

ENV SPARK_VERSION ${SPARK_VERSION}
Expand Down
2 changes: 1 addition & 1 deletion docker/hoodie/hadoop/sparkadhoc/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

ARG HADOOP_VERSION=2.8.4
ARG HIVE_VERSION=2.3.3
ARG SPARK_VERSION=2.4.4
ARG SPARK_VERSION=3.1.3
FROM apachehudi/hudi-hadoop_${HADOOP_VERSION}-hive_${HIVE_VERSION}-sparkbase_${SPARK_VERSION}

ARG PRESTO_VERSION=0.268
Expand Down
Loading