From 860058fc8f6c0f07350eabc6e9080f57d5ef955b Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Wed, 22 Jul 2020 22:31:52 +0000 Subject: [PATCH 1/4] Migrate remaining Dockerfiles to docker-compose.yml - Delete unused Dockerfiles - Delete unused install/*.sh scripts - Consolidate ubuntu_gpu_tensorrt and ubuntu_gpu - Remove deprecated logic in ci/build.py (no longer needed with docker-compose) - Remove ci/docker_cache.py (no longer needed with docker-compose) --- ci/Jenkinsfile_docker_cache | 1 - ci/build.py | 189 ++---------- ci/dev_menu.py | 1 - ci/docker/Dockerfile.build.ubuntu | 25 +- ci/docker/Dockerfile.build.ubuntu_cpu_c | 35 --- ci/docker/Dockerfile.build.ubuntu_cpu_julia | 66 ----- ci/docker/Dockerfile.build.ubuntu_cpu_r | 46 --- ci/docker/Dockerfile.build.ubuntu_cpu_scala | 53 ---- .../Dockerfile.build.ubuntu_gpu_tensorrt | 47 --- ci/docker/Dockerfile.build.ubuntu_rat | 36 --- .../Dockerfile.publish.test.ubuntu1604_cpu | 39 --- .../Dockerfile.publish.test.ubuntu1604_gpu | 39 --- .../Dockerfile.publish.test.ubuntu1804_cpu | 41 --- .../Dockerfile.publish.test.ubuntu1804_gpu | 41 --- ci/docker/Dockerfile.publish.ubuntu1604_cpu | 44 --- ci/docker/Dockerfile.publish.ubuntu1604_gpu | 44 --- ci/docker/docker-compose.yml | 31 ++ ci/docker/install/export_gpg_keys.sh | 23 -- ci/docker/install/r.gpg | Bin 1519 -> 0 bytes ci/docker/install/sbt.gpg | Bin 2210 -> 0 bytes ci/docker/install/tensorrt.sh | 49 ---- ci/docker/install/ubuntu_base.sh | 40 --- ci/docker/install/ubuntu_clang.sh | 42 --- ci/docker/install/ubuntu_clojure.sh | 30 -- ci/docker/install/ubuntu_cudnn.sh | 62 ---- ci/docker/install/ubuntu_emscripten.sh | 41 --- ci/docker/install/ubuntu_gcc8.sh | 23 -- ci/docker/install/ubuntu_julia.sh | 43 --- ci/docker/install/ubuntu_nightly_tests.sh | 35 --- ci/docker/install/ubuntu_r.sh | 50 ---- ci/docker/install/ubuntu_scala.sh | 31 -- ci/docker/runtime_functions.sh | 11 +- ci/docker_cache.py | 203 ------------- ci/docker_cache_requirements | 24 -- ci/jenkins/Jenkins_steps.groovy | 3 +- ci/test_docker_cache.py | 272 ------------------ ci/windows/test_jl07_cpu.ps1 | 56 ---- ci/windows/test_jl10_cpu.ps1 | 56 ---- 38 files changed, 84 insertions(+), 1788 deletions(-) delete mode 100644 ci/docker/Dockerfile.build.ubuntu_cpu_c delete mode 100644 ci/docker/Dockerfile.build.ubuntu_cpu_julia delete mode 100644 ci/docker/Dockerfile.build.ubuntu_cpu_r delete mode 100644 ci/docker/Dockerfile.build.ubuntu_cpu_scala delete mode 100644 ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt delete mode 100644 ci/docker/Dockerfile.build.ubuntu_rat delete mode 100644 ci/docker/Dockerfile.publish.test.ubuntu1604_cpu delete mode 100644 ci/docker/Dockerfile.publish.test.ubuntu1604_gpu delete mode 100644 ci/docker/Dockerfile.publish.test.ubuntu1804_cpu delete mode 100644 ci/docker/Dockerfile.publish.test.ubuntu1804_gpu delete mode 100644 ci/docker/Dockerfile.publish.ubuntu1604_cpu delete mode 100644 ci/docker/Dockerfile.publish.ubuntu1604_gpu delete mode 100755 ci/docker/install/export_gpg_keys.sh delete mode 100644 ci/docker/install/r.gpg delete mode 100644 ci/docker/install/sbt.gpg delete mode 100755 ci/docker/install/tensorrt.sh delete mode 100755 ci/docker/install/ubuntu_base.sh delete mode 100755 ci/docker/install/ubuntu_clang.sh delete mode 100755 ci/docker/install/ubuntu_clojure.sh delete mode 100755 ci/docker/install/ubuntu_cudnn.sh delete mode 100755 ci/docker/install/ubuntu_emscripten.sh delete mode 100755 ci/docker/install/ubuntu_gcc8.sh delete mode 100755 ci/docker/install/ubuntu_julia.sh delete mode 100755 ci/docker/install/ubuntu_nightly_tests.sh delete mode 100755 ci/docker/install/ubuntu_r.sh delete mode 100755 ci/docker/install/ubuntu_scala.sh delete mode 100644 ci/docker_cache.py delete mode 100644 ci/docker_cache_requirements delete mode 100644 ci/test_docker_cache.py delete mode 100644 ci/windows/test_jl07_cpu.ps1 delete mode 100644 ci/windows/test_jl10_cpu.ps1 diff --git a/ci/Jenkinsfile_docker_cache b/ci/Jenkinsfile_docker_cache index 96cf2c7bef86..5f378b5d69eb 100644 --- a/ci/Jenkinsfile_docker_cache +++ b/ci/Jenkinsfile_docker_cache @@ -37,7 +37,6 @@ core_logic: { ws('workspace/docker_cache') { timeout(time: total_timeout, unit: 'MINUTES') { utils.init_git() - sh "python3 ./ci/docker_cache.py --docker-registry ${env.DOCKER_CACHE_REGISTRY}" sh "cd ci && python3 ./docker_login.py --secret-name ${env.DOCKERHUB_SECRET_NAME} && docker-compose -f docker/docker-compose.yml pull && docker-compose -f docker/docker-compose.yml build --parallel && COMPOSE_HTTP_TIMEOUT=600 docker-compose -f docker/docker-compose.yml push && docker logout" } } diff --git a/ci/build.py b/ci/build.py index 70ea8dcee37b..f42be1861b19 100755 --- a/ci/build.py +++ b/ci/build.py @@ -18,23 +18,18 @@ # specific language governing permissions and limitations # under the License. -"""Multi arch dockerized build tool. +"""Multi arch dockerized build tool.""" -""" - -__author__ = 'Marco de Abreu, Kellen Sunderland, Anton Chernov, Pedro Larroy' -__version__ = '0.3' +__author__ = 'Marco de Abreu, Kellen Sunderland, Anton Chernov, Pedro Larroy, Leonard Lausen' +__version__ = '0.4' import argparse -import glob import pprint -import re import os -import shutil import signal import subprocess from itertools import chain -from subprocess import check_call, check_output +from subprocess import check_call from typing import * import yaml @@ -42,49 +37,18 @@ from safe_docker_run import SafeDockerClient from util import * -# NOTE: Temporary whitelist used until all Dockerfiles are refactored for docker compose -DOCKER_COMPOSE_WHITELIST = ('centos7_cpu', 'centos7_gpu_cu92', 'centos7_gpu_cu100', - 'centos7_gpu_cu101', 'centos7_gpu_cu102', 'ubuntu_cpu', - 'ubuntu_build_cuda', 'ubuntu_gpu_cu101', 'publish.test.centos7_cpu', - 'publish.test.centos7_gpu', 'android_armv7', 'android_armv8', - 'armv6', 'armv7', 'armv8', 'test.armv7', 'test.armv8') -# Files for docker compose -DOCKER_COMPOSE_FILES = set(('docker/build.centos7', 'docker/build.ubuntu', 'docker/build.android', - 'docker/build.arm', 'docker/test.arm', 'docker/publish.test.centos7')) - - -def get_dockerfiles_path(): - return "docker" - - -def get_platforms(path: str = get_dockerfiles_path(), legacy_only=False) -> List[str]: - """Get a list of architectures given our dockerfiles""" - dockerfiles = glob.glob(os.path.join(path, "Dockerfile.*")) - dockerfiles = set(filter(lambda x: x[-1] != '~', dockerfiles)) - files = set(map(lambda x: re.sub(r"Dockerfile.(.*)", r"\1", x), dockerfiles)) - if legacy_only: - files = files - DOCKER_COMPOSE_FILES - platforms = list(map(lambda x: os.path.split(x)[1], sorted(files))) - return platforms +def get_platforms() -> List[str]: + """Get a list of architectures declared in docker-compose.yml""" + with open("docker/docker-compose.yml", "r") as f: + compose_config = yaml.load(f.read(), yaml.SafeLoader) + return list(compose_config["services"].keys()) def get_docker_tag(platform: str, registry: str) -> str: """:return: docker tag to be used for the container""" - if platform in DOCKER_COMPOSE_WHITELIST: - with open("docker/docker-compose.yml", "r") as f: - compose_config = yaml.load(f.read(), yaml.SafeLoader) - return compose_config["services"][platform]["image"].replace('${DOCKER_CACHE_REGISTRY}', registry) - - platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform) - if not registry: - registry = "mxnet_local" - return "{0}/{1}".format(registry, platform) - - -def get_dockerfile(platform: str, path=get_dockerfiles_path()) -> str: - platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform) - return os.path.join(path, "Dockerfile.{0}".format(platform)) - + with open("docker/docker-compose.yml", "r") as f: + compose_config = yaml.load(f.read(), yaml.SafeLoader) + return compose_config["services"][platform]["image"].replace('${DOCKER_CACHE_REGISTRY}', registry) def build_docker(platform: str, registry: str, num_retries: int, no_cache: bool, cache_intermediate: bool = False) -> str: @@ -96,50 +60,18 @@ def build_docker(platform: str, registry: str, num_retries: int, no_cache: bool, :param no_cache: pass no-cache to docker to rebuild the images :return: Id of the top level image """ - tag = get_docker_tag(platform=platform, registry=registry) + logging.info('Building docker container \'%s\' based on ci/docker/docker-compose.yml', platform) + # We add a user with the same group as the executing non-root user so files created in the + # container match permissions of the local user. Same for the group. + cmd = ['docker-compose', '-f', 'docker/docker-compose.yml', 'build', + "--build-arg", "USER_ID={}".format(os.getuid()), + "--build-arg", "GROUP_ID={}".format(os.getgid())] + if cache_intermediate: + cmd.append('--no-rm') + cmd.append(platform) env = os.environ.copy() - - # Case 1: docker-compose - if platform in DOCKER_COMPOSE_WHITELIST: - logging.info('Building docker container tagged \'%s\' based on ci/docker/docker-compose.yml', tag) - # We add a user with the same group as the executing non-root user so files created in the - # container match permissions of the local user. Same for the group. - cmd = ['docker-compose', '-f', 'docker/docker-compose.yml', 'build', - "--build-arg", "USER_ID={}".format(os.getuid()), - "--build-arg", "GROUP_ID={}".format(os.getgid())] - if cache_intermediate: - cmd.append('--no-rm') - cmd.append(platform) - env["DOCKER_CACHE_REGISTRY"] = registry - else: # Case 2: Deprecated way, will be removed - # We add a user with the same group as the executing non-root user so files created in the - # container match permissions of the local user. Same for the group. - # - # These variables are used in the docker files to create user and group with these ids. - # see: docker/install/ubuntu_adduser.sh - # - # cache-from is needed so we use the cached images tagged from the remote via - # docker pull see: docker_cache.load_docker_cache - # - # This also prevents using local layers for caching: https://github.com/moby/moby/issues/33002 - # So to use local caching, we should omit the cache-from by using --no-dockerhub-cache argument to this - # script. - # - # This doesn't work with multi head docker files. - logging.info("Building docker container tagged '%s'", tag) - cmd = ["docker", "build", - "-f", get_dockerfile(platform), - "--build-arg", "USER_ID={}".format(os.getuid()), - "--build-arg", "GROUP_ID={}".format(os.getgid())] - if no_cache: - cmd.append("--no-cache") - if cache_intermediate: - cmd.append("--rm=false") - elif registry: - cmd.extend(["--cache-from", tag]) - cmd.extend(["-t", tag, get_dockerfiles_path()]) - + env["DOCKER_CACHE_REGISTRY"] = registry @retry(subprocess.CalledProcessError, tries=num_retries) def run_cmd(env=None): @@ -148,27 +80,6 @@ def run_cmd(env=None): run_cmd(env=env) - # Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the - # check_call would have failed - image_id = _get_local_image_id(docker_tag=tag) - if not image_id: - raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag)) - return image_id - - -def _get_local_image_id(docker_tag): - """ - Get the image id of the local docker layer with the passed tag - :param docker_tag: docker tag - :return: Image id as string or None if tag does not exist - """ - cmd = ["docker", "images", "-q", docker_tag] - image_id_b = check_output(cmd) - image_id = image_id_b.decode('utf-8').strip() - if not image_id: - raise RuntimeError('Unable to find docker image id matching with tag {}'.format(docker_tag)) - return image_id - def buildir() -> str: return os.path.join(get_mxnet_root(), "build") @@ -291,21 +202,11 @@ def list_platforms() -> str: def load_docker_cache(platform, tag, docker_registry) -> None: """Imports tagged container from the given docker registry""" if docker_registry: - if platform in DOCKER_COMPOSE_WHITELIST: - env = os.environ.copy() - env["DOCKER_CACHE_REGISTRY"] = docker_registry - cmd = ['docker-compose', '-f', 'docker/docker-compose.yml', 'pull', platform] - logging.info("Running command: 'DOCKER_CACHE_REGISTRY=%s %s'", docker_registry, ' '.join(cmd)) - check_call(cmd, env=env) - return - - # noinspection PyBroadException - try: - import docker_cache - logging.info('Docker cache download is enabled from registry %s', docker_registry) - docker_cache.load_docker_cache(registry=docker_registry, docker_tag=tag) - except Exception: - logging.exception('Unable to retrieve Docker cache. Continue without...') + env = os.environ.copy() + env["DOCKER_CACHE_REGISTRY"] = docker_registry + cmd = ['docker-compose', '-f', 'docker/docker-compose.yml', 'pull', platform] + logging.info("Running command: 'DOCKER_CACHE_REGISTRY=%s %s'", docker_registry, ' '.join(cmd)) + check_call(cmd, env=env) else: logging.info('Distributed docker cache disabled') @@ -327,9 +228,9 @@ def main() -> int: parser = argparse.ArgumentParser(description="""Utility for building and testing MXNet on docker containers""", epilog="") - parser.add_argument("-p", "--platform", - help="platform", - type=str) + parser.add_argument("-p", "--platform", type=str, help= \ + "Platform. See ci/docker/docker-compose.yml for list of supported " \ + "platforms (services).") parser.add_argument("-b", "--build-only", help="Only build the container, don't build the project", @@ -339,10 +240,6 @@ def main() -> int: help="Only run the container, don't rebuild the container", action='store_true') - parser.add_argument("-a", "--all", - help="build for all platforms", - action='store_true') - parser.add_argument("-n", "--nvidiadocker", help="Use nvidia docker", action='store_true') @@ -443,32 +340,6 @@ def main() -> int: logging.critical("Execution of %s failed with status: %d", command, ret) return ret - elif args.all: - platforms = get_platforms() - platforms = [platform for platform in platforms if 'build.' in platform] - logging.info("Building for all architectures: %s", platforms) - logging.info("Artifacts will be produced in the build/ directory.") - for platform in platforms: - tag = get_docker_tag(platform=platform, registry=args.docker_registry) - load_docker_cache(platform=platform, tag=tag, docker_registry=args.docker_registry) - build_docker(platform, registry=args.docker_registry, num_retries=args.docker_build_retries, - no_cache=args.no_cache) - if args.build_only: - continue - shutil.rmtree(buildir(), ignore_errors=True) - build_platform = "build_{}".format(platform) - plat_buildir = os.path.abspath(os.path.join(get_mxnet_root(), '..', - "mxnet_{}".format(build_platform))) - if os.path.exists(plat_buildir): - logging.warning("%s already exists, skipping", plat_buildir) - continue - command = ["/work/mxnet/ci/docker/runtime_functions.sh", build_platform] - container_run( - docker_client=docker_client, platform=platform, nvidia_runtime=args.nvidiadocker, - shared_memory_size=args.shared_memory_size, command=command, docker_registry=args.docker_registry, - local_ccache_dir=args.ccache_dir, environment=environment) - shutil.move(buildir(), plat_buildir) - logging.info("Built files left in: %s", plat_buildir) else: parser.print_help() diff --git a/ci/dev_menu.py b/ci/dev_menu.py index cd2aa8d46e1e..dcc01723d83d 100644 --- a/ci/dev_menu.py +++ b/ci/dev_menu.py @@ -130,7 +130,6 @@ def provision_virtualenv(venv_path=DEFAULT_PYENV): ('[Docker] sanity_check. Check for linting and code formatting and licenses.', [ "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh sanity_check", - "ci/build.py --platform ubuntu_rat /work/runtime_functions.sh nightly_test_rat_check", ]), ('[Docker] Python3 CPU unittests', [ diff --git a/ci/docker/Dockerfile.build.ubuntu b/ci/docker/Dockerfile.build.ubuntu index 415e6ae881ae..26e4f7a5ee06 100644 --- a/ci/docker/Dockerfile.build.ubuntu +++ b/ci/docker/Dockerfile.build.ubuntu @@ -122,14 +122,6 @@ RUN python3 -m pip install cmake==3.16.6 && \ # Only OpenJDK 8 supported at this time.. RUN update-java-alternatives -s java-1.8.0-openjdk-amd64 -# julia not available on 18.04 -COPY install/ubuntu_julia.sh /work/ -RUN /work/ubuntu_julia.sh - -# MXNetJS nightly needs emscripten for wasm -COPY install/ubuntu_emscripten.sh /work/ -RUN /work/ubuntu_emscripten.sh - ARG USER_ID=0 COPY install/docker_filepermissions.sh /work/ RUN /work/docker_filepermissions.sh @@ -152,6 +144,23 @@ RUN cd /usr/local && \ cd thrust && \ git checkout 1.9.8 +# Install TensorRT +# We need to redeclare ARG due to +# https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact +ARG BASE_IMAGE +RUN export SHORT_CUDA_VERSION=${CUDA_VERSION%.*} && \ + apt-get update && \ + if [ ${SHORT_CUDA_VERSION} = 10.0 ]; then \ + apt-get install -y "libnvinfer-dev=5.1.5-1+cuda10.0"; \ + elif [ ${SHORT_CUDA_VERSION} = 10.1 ]; then \ + apt-get install -y "libnvinfer-dev=5.1.5-1+cuda10.1"; \ + elif [ ${SHORT_CUDA_VERSION} = 10.2 ]; then \ + apt-get install -y "libnvinfer-dev=6.0.1-1+cuda10.2"; \ + else \ + echo "ERROR: Cuda ${SHORT_CUDA_VERSION} not yet supported in Dockerfile.build.ubuntu"; \ + exit 1; \ + fi && \ + rm -rf /var/lib/apt/lists/* FROM gpu as gpuwithcudaruntimelibs # Special case because the CPP-Package requires the CUDA runtime libs diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_c b/ci/docker/Dockerfile.build.ubuntu_cpu_c deleted file mode 100644 index c7969da1bb1d..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_c +++ /dev/null @@ -1,35 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU - -FROM ubuntu:16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -RUN apt-get update && apt-get install -y doxygen graphviz - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_julia b/ci/docker/Dockerfile.build.ubuntu_cpu_julia deleted file mode 100644 index e100d4df09a8..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_julia +++ /dev/null @@ -1,66 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU - -FROM ubuntu:16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -COPY install/ubuntu_clojure.sh /work/ -RUN /work/ubuntu_clojure.sh - -COPY install/ubuntu_julia.sh /work/ -RUN /work/ubuntu_julia.sh - -COPY install/ubuntu_clang.sh /work/ -RUN /work/ubuntu_clang.sh - -COPY install/ubuntu_gcc8.sh /work/ -RUN /work/ubuntu_gcc8.sh - -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - -COPY install/ubuntu_docs.sh /work/ -RUN /work/ubuntu_docs.sh - -# Always last -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_r b/ci/docker/Dockerfile.build.ubuntu_cpu_r deleted file mode 100644 index 2354cb3b66d6..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_r +++ /dev/null @@ -1,46 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU - -FROM ubuntu:16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_gcc8.sh /work/ -RUN /work/ubuntu_gcc8.sh - -COPY install/ubuntu_r.sh /work/ -COPY install/r.gpg /work/ -RUN /work/ubuntu_r.sh - -# Always last -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_scala b/ci/docker/Dockerfile.build.ubuntu_cpu_scala deleted file mode 100644 index a36e4426c39c..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_scala +++ /dev/null @@ -1,53 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU - -FROM ubuntu:16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh - -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh - -COPY install/ubuntu_gcc8.sh /work/ -RUN /work/ubuntu_gcc8.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -COPY install/ubuntu_clojure.sh /work/ -RUN /work/ubuntu_clojure.sh - -# Always last -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt b/ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt deleted file mode 100644 index 90bd772ecb17..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt +++ /dev/null @@ -1,47 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for CPU - -FROM nvidia/cuda:10.0-devel - -WORKDIR /work/deps - -COPY install/ubuntu_core.sh /work/ -RUN /work/ubuntu_core.sh -COPY install/deb_ubuntu_ccache.sh /work/ -RUN /work/deb_ubuntu_ccache.sh -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh -COPY install/tensorrt.sh /work -RUN /work/tensorrt.sh - -ARG USER_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -ENV CUDNN_VERSION=7.5.0.56 -COPY install/ubuntu_cudnn.sh /work/ -RUN /work/ubuntu_cudnn.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib -ENV CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:/usr/local/cuda-10.0/targets/x86_64-linux/include/ diff --git a/ci/docker/Dockerfile.build.ubuntu_rat b/ci/docker/Dockerfile.build.ubuntu_rat deleted file mode 100644 index 234d2e42e946..000000000000 --- a/ci/docker/Dockerfile.build.ubuntu_rat +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run the Apache RAT license check - -FROM ubuntu:16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_rat.sh /work/ -RUN /work/ubuntu_rat.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1604_cpu b/ci/docker/Dockerfile.publish.test.ubuntu1604_cpu deleted file mode 100644 index bbb7b6a0d7bd..000000000000 --- a/ci/docker/Dockerfile.publish.test.ubuntu1604_cpu +++ /dev/null @@ -1,39 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU - -FROM ubuntu:16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_scala.sh /work/ -RUN /work/ubuntu_scala.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1604_gpu b/ci/docker/Dockerfile.publish.test.ubuntu1604_gpu deleted file mode 100644 index 660461dc0cfa..000000000000 --- a/ci/docker/Dockerfile.publish.test.ubuntu1604_gpu +++ /dev/null @@ -1,39 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU - -FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_scala.sh /work/ -RUN /work/ubuntu_scala.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1804_cpu b/ci/docker/Dockerfile.publish.test.ubuntu1804_cpu deleted file mode 100644 index e3a8c193f234..000000000000 --- a/ci/docker/Dockerfile.publish.test.ubuntu1804_cpu +++ /dev/null @@ -1,41 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 18.04 for CPU - -FROM ubuntu:18.04 - -WORKDIR /work/deps - -ENV DEBIAN_FRONTEND noninteractive - -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_scala.sh /work/ -RUN /work/ubuntu_scala.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.publish.test.ubuntu1804_gpu b/ci/docker/Dockerfile.publish.test.ubuntu1804_gpu deleted file mode 100644 index 99f7e0d3eff9..000000000000 --- a/ci/docker/Dockerfile.publish.test.ubuntu1804_gpu +++ /dev/null @@ -1,41 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 18.04 for GPU - -FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu18.04 - -WORKDIR /work/deps - -ENV DEBIAN_FRONTEND noninteractive - -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_scala.sh /work/ -RUN /work/ubuntu_scala.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.publish.ubuntu1604_cpu b/ci/docker/Dockerfile.publish.ubuntu1604_cpu deleted file mode 100644 index e5898b66c161..000000000000 --- a/ci/docker/Dockerfile.publish.ubuntu1604_cpu +++ /dev/null @@ -1,44 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU - -FROM ubuntu:16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/Dockerfile.publish.ubuntu1604_gpu b/ci/docker/Dockerfile.publish.ubuntu1604_gpu deleted file mode 100644 index 0bd8b8259b90..000000000000 --- a/ci/docker/Dockerfile.publish.ubuntu1604_gpu +++ /dev/null @@ -1,44 +0,0 @@ -# -*- mode: dockerfile -*- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# Dockerfile to run MXNet on Ubuntu 16.04 for GPU - -FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu16.04 - -WORKDIR /work/deps - -COPY install/ubuntu_base.sh /work/ -RUN /work/ubuntu_base.sh - -COPY install/ubuntu_python.sh /work/ -COPY install/requirements /work/ -RUN /work/ubuntu_python.sh - -COPY install/ubuntu_scala.sh /work/ -COPY install/sbt.gpg /work/ -RUN /work/ubuntu_scala.sh - -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -COPY runtime_functions.sh /work/ - -WORKDIR /work/mxnet -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib diff --git a/ci/docker/docker-compose.yml b/ci/docker/docker-compose.yml index 73beb232b1ca..cced098d7f11 100644 --- a/ci/docker/docker-compose.yml +++ b/ci/docker/docker-compose.yml @@ -206,3 +206,34 @@ services: BASE_IMAGE: nvidia/cuda:9.2-cudnn7-devel-centos7 cache_from: - ${DOCKER_CACHE_REGISTRY}/publish.test.centos7_gpu:latest + ################################################################################################### + # Miscellaneous containers + ################################################################################################### + jetson: + image: ${DOCKER_CACHE_REGISTRY}/build.jetson:latest + build: + context: . + dockerfile: Dockerfile.build.jetson + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.jetson:latest + ubuntu_cpu_jekyll: + image: ${DOCKER_CACHE_REGISTRY}/build.ubuntu_cpu_jekyll:latest + build: + context: . + dockerfile: Dockerfile.build.ubuntu_cpu_jekyll + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.ubuntu_cpu_jekyll:latest + ubuntu_cpu_python: + image: ${DOCKER_CACHE_REGISTRY}/build.ubuntu_cpu_python:latest + build: + context: . + dockerfile: Dockerfile.build.ubuntu_cpu_python + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.ubuntu_cpu_python:latest + ubuntu_blc: + image: ${DOCKER_CACHE_REGISTRY}/build.ubuntu_blc:latest + build: + context: . + dockerfile: Dockerfile.build.ubuntu_blc + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.ubuntu_blc:latest diff --git a/ci/docker/install/export_gpg_keys.sh b/ci/docker/install/export_gpg_keys.sh deleted file mode 100755 index 604a27b98143..000000000000 --- a/ci/docker/install/export_gpg_keys.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -gpg --keyserver keyserver.ubuntu.com --recv 2EE0EA64E40A89B84B2DF73499E82A75642AC823 -gpg --output sbt.gpg --export scalasbt@gmail.com -gpg --keyserver keyserver.ubuntu.com --recv E084DAB9 -gpg --output r.gpg --export marutter@gmail.com diff --git a/ci/docker/install/r.gpg b/ci/docker/install/r.gpg deleted file mode 100644 index 77fd6341e9d44dc73c40172d3428ac99413db3df..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1519 zcmZ|OZ9EeQ0LSq?m~EbIp4!es7D+J_5iymNMxH4e!;)+z&q|rhX+w$+VUk5n-7cm) ztVj#Tj%=!V8b^|nBj;hKFju{}>&?CUzy5vx-x8p*9KNN#0|*1Kk3l~sHtw8zttc}n z541squ4z{-YFKdXC0=71mqzE^=E@fGFbegp)L(4EG3Tn;Ea}{u_FTk){Oy6xfNYNf znV$##xpD9$Cvk5EQ%3N_JM02U=K#phb#3QUB(ur{0be-PI7L2eBDf^oh-+9a2eXXL zTeznhH*V=^^AnWZ%#N_TnhDobgv;aN)ol*##3l>*P`lo&?5utZFp5!pYSFA4Ux*<8 z^@&jvpHXuOw){bL-7Ir*=E;ef1H*eqTev;}}*TNju6H zy&dP=%zwkscT}+it8@|utBlw%%1{m54It|C!k4kYsG5{>ch{ zMq$35(%81XHfPKF)1t+s zbz=aHk$x5AEw}6$cBY*o-5r_7?{rE(t*#5?SvYZ-_w~wkLhZzkX$M9)-1S?m6?uT| zUK_7PX=(1n2b_%xe+w%Q`d9q_ufENSHYNW zAn<{q)?o14%sju6wGY09neS_}nnP^p1LoO+V<2-a`bq}{EjPgN>!Tq=MQX*6#fB-Y zaFa`Zi_W2iw3*LHv;uT&s#p}s2D}!h}b^Qdz!;R&b z`bfX8OoX`7lOTnqgs4rm0ZQBjOHdNqV1{&8z0{PLp{h#SiTe4cjmnC+x$?ql73J8| zQc21CN7)!J{L+;N1>DNpOZPh@@i8mZ*G5xY68z^;cWbEE>%X(51C8W#zKT=@p_RVD z8&LiZukFX*n1w=R);P)09K?k1@x3zcU+>OeCOxGSNzHV!XJDmkVL=5!sJc3KBULks z?S3uB^X1K80}j~g5Tu#Q(YJ&dg~A}J+HYoESUe-N#{RlrgUC#B!BTih{^5xW$DRsO zhkOwyKGe$cc?57)cgP-{Hr&auFPAK^_v@AdqYTHAd%Q){p4fxAia0JqeWE|(PVc;F zxgEY{{bHvw6p*K diff --git a/ci/docker/install/sbt.gpg b/ci/docker/install/sbt.gpg deleted file mode 100644 index 664f01b37d017674079d04787790c3bf62ef0c95..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2210 zcmV;T2wnG?0u2OJX!7R)5CGFPNJP~V<0RgxtVW$#A+pUk;)WDKG}^0o z|AE~HWG$?FwOp^+W$^sr-_gqhBm50`5g>7?quS!+tvD4PQ-3~zT{&-tKaTQs$BYAz zA^sIS^d4HvrwYGO0TxYtZ%lu1Ed&=0$R~o%Iek|~dz3>q+vl;}X+MhT;grOVq6wF0 z(JkLecUz4b{cQCHizwwslA7~|m{nRSb?8L-z=v>(^WBTH#T1+U;swuvw{BsWfMG{+ z!bQhDEqt^7s#wShat`(Kx%8s2x}XoUfweObmItVfj#sh8Pctz7Z>@eO)_=EPu7ivg z)FR{*b1=#tRO>@r+YU@WV84~2&bRV^BGiI*{+tYl)-lHoo}bXSSg<~p^`wN6Z3CWZ zQz*fdR|I9WNG22@_~lim{s?fxC1yjqGP005hpPMD7P&qwrU|(ip+QtL6~~42PYo~q zB)|C6>fnnZ#JH(So+2S}wD$IZLtJv-!$H_TL6h`JLa?<0_ z0_~AX_N`0dGk*XP0RRECBXeSOAYyfCY-AvGZ*OcMJac1VY+-X^bU0I5^h36D$@QM+n(A2F?$$f zhGE1$L{aJiG{f)UhB=(9dvQ#iuQ2>^MGl~bm(vVAiO!LUzqJcFrv360s$C#j5_K(U z&tDh>L`P>((UziOoXa-7{E|L!0xsMj`ye2_p0%CFg8hIC=eXkew6QT(U-S7R0%VYb7$ZV< z#Kx}-YR3kx#Q;Xdax{nQ!%Q)Nr9kd|9NyC`)JqB}^kB&%i3DXOtk>HwB3tI~6Uq?k z+<(l)L1A0-;1*jmS(w1+sr@S_s3B%U*Ut>Z)=pQp+eEdHZJIkhZ;G}JIZ(m`OnMh{ zgOmnplSZ_v0xshMQ%Mto-Zf^${@J`oIf2aPnxJRYi7beL9rwz2Gb@4WGOTR; zHHUz>u(((QzcqGG2T7dpj6TrmDMhm{wX5=I z4mf8ig7_?%Pe#ButGog#f1R_Y{z;TxL(eFyUXb!e<5SyV(56;`$+RPN+hh9OhUrD7 z=PBPHcIuR^Qv?b}-4O{e`msI?OHyE|stgTWA#|iB)V(p-11a~!8Ob@+m0~C)>mV@` zWJ)4k?!&s^&J5Ox;=-){pfjQR6N9kVPYs?nB2t5z*PoB{v*ZIStNIH z?57kzx1?a{a4Rs8NCn#vM+&Y9&E)&9Pu3#a&-)cqxCkX7nCmr^R6MU6x zgp|lPE?FL^6bJM`>(W10r$YkVQa)oA4ijo;9f7j-TZM~kMmV4u?KrO`Y8FHUJ9#~8 z>-wB^}>_!q-^x<2g$&-Ny$44N_{r`bDI344rg zwRaWHil%XnoA$~g1t~If5fA@v4fQ@pjKl-O${8`pfN4v=Z1S6o;Y=QC>uBBeZUr1XrqmS4KgNf2* zMSWfq3_3-pOmDPaT1%egO}3+>6tdLatv776GrA++lA>)$mLi;EidfJsYq?4!~ptlVxsb5ZwG-~G 07; 1.0 -> 10 - local JLBINARY="julia-$1.tar.gz" - local JULIADIR="/work/julia$suffix" - local JULIA="${JULIADIR}/bin/julia" - - mkdir -p $JULIADIR - # The julia version in Ubuntu repo is too old - # We download the tarball from the official link: - # https://julialang.org/downloads/ - wget -qO $JLBINARY https://julialang-s3.julialang.org/bin/linux/x64/$1/julia-$2-linux-x86_64.tar.gz - tar xzf $JLBINARY -C $JULIADIR --strip 1 - rm $JLBINARY - - $JULIA -e 'using InteractiveUtils; versioninfo()' -} - -install_julia 0.7 0.7.0 -install_julia 1.0 1.0.4 diff --git a/ci/docker/install/ubuntu_nightly_tests.sh b/ci/docker/install/ubuntu_nightly_tests.sh deleted file mode 100755 index c80efed6c377..000000000000 --- a/ci/docker/install/ubuntu_nightly_tests.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#Install steps for the nightly tests - -set -ex - -# Install for Compilation warning Nightly Test -# Adding ppas frequently fails due to busy gpg servers, retry 5 times with 5 minute delays. -for i in 1 2 3 4 5; do add-apt-repository -y ppa:ubuntu-toolchain-r/test && break || sleep 300; done - -apt-get update || true -apt-get -y install time - -# Install for RAT License Check Nightly Test -apt-get install -y subversion maven -y #>/dev/null - -# Packages needed for the Straight Dope Nightly tests. -pip3 install pandas scikit-image prompt_toolkit diff --git a/ci/docker/install/ubuntu_r.sh b/ci/docker/install/ubuntu_r.sh deleted file mode 100755 index 44ebf7c0799e..000000000000 --- a/ci/docker/install/ubuntu_r.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image - -# Important Maintenance Instructions: -# Align changes with installation instructions in /get_started/ubuntu_setup.md -# Align with R install script: /docs/install/install_mxnet_ubuntu_r.sh - -set -ex -cd "$(dirname "$0")" -# install libraries for mxnet's r package on ubuntu -echo "deb http://cran.rstudio.com/bin/linux/ubuntu trusty/" >> /etc/apt/sources.list - -apt-key add r.gpg - -# Installing the latest version (3.3+) that is compatible with MXNet -add-apt-repository 'deb [arch=amd64,i386] https://cran.rstudio.com/bin/linux/ubuntu xenial/' - -apt-get update || true -apt-get install -y --allow-unauthenticated \ - libcairo2-dev \ - libssl-dev \ - libxml2-dev \ - libxt-dev \ - r-base \ - r-base-dev \ - texinfo \ - texlive \ - texlive-fonts-extra - -# Delete cran repository as it requires --allow-unauthenticated -find /etc/apt -name "*.list" | xargs sed -i 's/.*cran\.rstudio.com.*//' diff --git a/ci/docker/install/ubuntu_scala.sh b/ci/docker/install/ubuntu_scala.sh deleted file mode 100755 index 355e978e075c..000000000000 --- a/ci/docker/install/ubuntu_scala.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# build and install are separated so changes to build don't invalidate -# the whole docker cache for the image - -set -ex - -apt-get update || true -apt-get install -y \ - openjdk-8-jdk \ - openjdk-8-jre \ - software-properties-common \ - scala \ - maven diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 2f0bf777c464..a8efc4951c6a 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -604,17 +604,16 @@ build_ubuntu_gpu_tensorrt() { rm -rf build mkdir -p build cd build - cmake \ - -DCMAKE_CXX_FLAGS=-I/usr/include/python${PYVER}\ - -DBUILD_SHARED_LIBS=ON ..\ - -G Ninja - ninja -j 1 -v onnx/onnx.proto - ninja -j 1 -v + cmake -DBUILD_SHARED_LIBS=ON -GNinja .. + ninja onnx/onnx.proto + ninja export LIBRARY_PATH=`pwd`:`pwd`/onnx/:$LIBRARY_PATH export CPLUS_INCLUDE_PATH=`pwd`:$CPLUS_INCLUDE_PATH popd # Build ONNX-TensorRT + export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib + export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:/usr/local/cuda-10.1/targets/x86_64-linux/include/ pushd . cd 3rdparty/onnx-tensorrt/ mkdir -p build diff --git a/ci/docker_cache.py b/ci/docker_cache.py deleted file mode 100644 index 7fb946b53ebc..000000000000 --- a/ci/docker_cache.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" -Utility to handle distributed docker cache. This is done by keeping the entire image chain of a docker container -on an S3 bucket. This utility allows cache creation and download. After execution, the cache will be in an identical -state as if the container would have been built locally already. -""" - -import argparse -import logging -import os -import subprocess -import sys -from typing import * - -import build as build_util -from docker_login import login_dockerhub, logout_dockerhub -from util import retry - -DOCKER_CACHE_NUM_RETRIES = 3 -DOCKER_CACHE_TIMEOUT_MINS = 45 -PARALLEL_BUILDS = 10 -DOCKER_CACHE_RETRY_SECONDS = 5 - - -def build_save_containers(platforms, registry, load_cache, no_publish) -> int: - """ - Entry point to build and upload all built dockerimages in parallel - :param platforms: List of platforms - :param registry: Docker registry name - :param load_cache: Load cache before building - :return: 1 if error occurred, 0 otherwise - """ - from joblib import Parallel, delayed - if len(platforms) == 0: - return 0 - - platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")( - delayed(_build_save_container)(platform, registry, load_cache, no_publish) - for platform in platforms) - - is_error = False - for platform_result in platform_results: - if platform_result is not None: - logging.error('Failed to generate %s', platform_result) - is_error = True - - return 1 if is_error else 0 - - -def _build_save_container(platform, registry, load_cache, no_publish) -> Optional[str]: - """ - Build image for passed platform and upload the cache to the specified S3 bucket - :param platform: Platform - :param registry: Docker registry name - :param load_cache: Load cache before building - :return: Platform if failed, None otherwise - """ - docker_tag = build_util.get_docker_tag(platform=platform, registry=registry) - # Preload cache - if load_cache: - load_docker_cache(registry=registry, docker_tag=docker_tag) - - # Start building - logging.debug('Building %s as %s', platform, docker_tag) - try: - # Increase the number of retries for building the cache. - image_id = build_util.build_docker(platform=platform, registry=registry, num_retries=10, no_cache=False) - logging.info('Built %s as %s', docker_tag, image_id) - - # Push cache to registry - if not no_publish: - _upload_image(registry=registry, docker_tag=docker_tag, image_id=image_id) - return None - except Exception: - logging.exception('Unexpected exception during build of %s', docker_tag) - return platform - # Error handling is done by returning the errorous platform name. This is necessary due to - # Parallel being unable to handle exceptions - - -def _upload_image(registry, docker_tag, image_id) -> None: - """ - Upload the passed image by id, tag it with docker tag and upload to S3 bucket - :param registry: Docker registry name - :param docker_tag: Docker tag - :param image_id: Image id - :return: None - """ - # We don't have to retag the image since it is already in the right format - logging.info('Uploading %s (%s) to %s', docker_tag, image_id, registry) - push_cmd = ['docker', 'push', docker_tag] - subprocess.check_call(push_cmd) - - -@retry(target_exception=subprocess.TimeoutExpired, tries=DOCKER_CACHE_NUM_RETRIES, - delay_s=DOCKER_CACHE_RETRY_SECONDS) -def load_docker_cache(registry, docker_tag) -> None: - """ - Load the precompiled docker cache from the registry - :param registry: Docker registry name - :param docker_tag: Docker tag to load - :return: None - """ - # We don't have to retag the image since it's already in the right format - if not registry: - return - assert docker_tag - - logging.info('Loading Docker cache for %s from %s', docker_tag, registry) - pull_cmd = ['docker', 'pull', docker_tag] - - # Don't throw an error if the image does not exist - subprocess.run(pull_cmd, timeout=DOCKER_CACHE_TIMEOUT_MINS*60) - logging.info('Successfully pulled docker cache') - - -def delete_local_docker_cache(docker_tag): - """ - Delete the local docker cache for the entire docker image chain - :param docker_tag: Docker tag - :return: None - """ - history_cmd = ['docker', 'history', '-q', docker_tag] - - try: - image_ids_b = subprocess.check_output(history_cmd) - image_ids_str = image_ids_b.decode('utf-8').strip() - layer_ids = [id.strip() for id in image_ids_str.split('\n') if id != ''] - - delete_cmd = ['docker', 'image', 'rm', '--force'] - delete_cmd.extend(layer_ids) - subprocess.check_call(delete_cmd) - except subprocess.CalledProcessError as error: - # Could be caused by the image not being present - logging.debug('Error during local cache deletion %s', error) - - -def main() -> int: - """ - Utility to create and publish the Docker cache to Docker Hub - :return: - """ - # We need to be in the same directory than the script so the commands in the dockerfiles work as - # expected. But the script can be invoked from a different path - base = os.path.split(os.path.realpath(__file__))[0] - os.chdir(base) - - logging.getLogger().setLevel(logging.DEBUG) - logging.getLogger('botocore').setLevel(logging.INFO) - logging.getLogger('boto3').setLevel(logging.INFO) - logging.getLogger('urllib3').setLevel(logging.INFO) - logging.getLogger('s3transfer').setLevel(logging.INFO) - - def script_name() -> str: - return os.path.split(sys.argv[0])[1] - - logging.basicConfig(format='{}: %(asctime)-15s %(message)s'.format(script_name())) - - parser = argparse.ArgumentParser(description="Utility for preserving and loading Docker cache", epilog="") - parser.add_argument("--docker-registry", - help="Docker hub registry name", - type=str, - required=True) - parser.add_argument("--no-publish", help="Only build but don't publish. Used for testing.", - action='store_true') - - args = parser.parse_args() - - platforms = build_util.get_platforms(legacy_only=True) - - secret_name = os.environ['DOCKERHUB_SECRET_NAME'] - endpoint_url = os.environ['DOCKERHUB_SECRET_ENDPOINT_URL'] - region_name = os.environ['DOCKERHUB_SECRET_ENDPOINT_REGION'] - - try: - if not args.no_publish: - login_dockerhub(secret_name, endpoint_url, region_name) - return build_save_containers(platforms=platforms, registry=args.docker_registry, load_cache=True, no_publish=args.no_publish) - finally: - logout_dockerhub() - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/ci/docker_cache_requirements b/ci/docker_cache_requirements deleted file mode 100644 index d1272cb348c7..000000000000 --- a/ci/docker_cache_requirements +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -boto3==1.7.13 -botocore==1.10.13 -docutils==0.14 -jmespath==0.9.3 -joblib==0.11 -python-dateutil==2.7.2 -s3transfer==0.1.13 -six==1.11.0 diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index f247ccb54fd6..655ae48ae527 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -278,7 +278,7 @@ def compile_unix_tensorrt_gpu(lib_name) { ws('workspace/build-tensorrt') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('ubuntu_gpu_tensorrt', 'build_ubuntu_gpu_tensorrt', false) + utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_tensorrt', false) utils.pack_lib(lib_name, mx_tensorrt_lib) } } @@ -1250,7 +1250,6 @@ def misc_test_docker_cache_build() { node(NODE_LINUX_CPU) { ws('workspace/docker_cache') { utils.init_git() - sh "python3 ./ci/docker_cache.py --docker-registry ${env.DOCKER_CACHE_REGISTRY} --no-publish" sh "cd ci && docker-compose -f docker/docker-compose.yml pull && docker-compose -f docker/docker-compose.yml build --parallel" } } diff --git a/ci/test_docker_cache.py b/ci/test_docker_cache.py deleted file mode 100644 index 81b315be4cff..000000000000 --- a/ci/test_docker_cache.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" -Distributed Docker cache tests -""" - -import unittest.mock -import tempfile -import os -import logging -import subprocess -import sys -from unittest.mock import MagicMock - -sys.path.append(os.path.dirname(__file__)) -import docker_cache -import build as build_util - -DOCKERFILE_DIR = 'docker' -DOCKER_REGISTRY_NAME = 'test_registry' -DOCKER_REGISTRY_PORT = 5000 -DOCKER_REGISTRY_PATH = 'localhost:{}'.format(DOCKER_REGISTRY_PORT) - -class RedirectSubprocessOutput(object): - """ - Redirect the output of all subprocess.call calls to a readable buffer instead of writing it to stdout/stderr. - The output can then be retrieved with get_output. - """ - def __enter__(self): - self.buf_output = tempfile.TemporaryFile() - - def trampoline(*popenargs, **kwargs): - self.call(*popenargs, **kwargs) - - self.old_method = subprocess.call - subprocess.call = trampoline - return self - - def __exit__(self, *args): - logging.info('Releasing docker output buffer:\n%s', self.get_output()) - subprocess.call = self.old_method - self.buf_output.close() - - def call(self, *popenargs, **kwargs): - """ - Replace subprocess.call - :param popenargs: - :param timeout: - :param kwargs: - :return: - """ - kwargs['stderr'] = subprocess.STDOUT - kwargs['stdout'] = self.buf_output - return self.old_method(*popenargs, **kwargs) - - def get_output(self): - self.buf_output.seek(0) - return self.buf_output.read().decode('utf-8') - - -class TestDockerCache(unittest.TestCase): - """ - Test utility class - """ - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - - # We need to be in the same directory than the script so the commands in the dockerfiles work as - # expected. But the script can be invoked from a different path - base = os.path.split(os.path.realpath(__file__))[0] - os.chdir(base) - - docker_cache.login_dockerhub = MagicMock() # Override login - - # Stop in case previous execution was dirty - try: - self._stop_local_docker_registry() - except Exception: - pass - - # Start up docker registry - self._start_local_docker_registry() - - def tearDown(self): - # Stop docker registry - self._stop_local_docker_registry() - - @classmethod - def _start_local_docker_registry(cls): - # https://docs.docker.com/registry/deploying/#run-a-local-registrys - start_cmd = [ - 'docker', 'run', '-d', '-p', '{}:{}'.format(DOCKER_REGISTRY_PORT, DOCKER_REGISTRY_PORT), - '--name', DOCKER_REGISTRY_NAME, 'registry:2' - ] - subprocess.check_call(start_cmd) - - @classmethod - def _stop_local_docker_registry(cls): - # https://docs.docker.com/registry/deploying/#run-a-local-registry - stop_cmd = ['docker', 'container', 'stop', DOCKER_REGISTRY_NAME] - subprocess.check_call(stop_cmd) - - clean_cmd = ['docker', 'container', 'rm', '-v', DOCKER_REGISTRY_NAME] - subprocess.check_call(clean_cmd) - - def test_full_cache(self): - """ - Test whether it's possible to restore cache entirely - :return: - """ - dockerfile_content = """ - FROM busybox - RUN touch ~/file1 - RUN touch ~/file2 - RUN touch ~/file3 - RUN touch ~/file4 - """ - platform = 'test_full_cache' - docker_tag = build_util.get_docker_tag(platform=platform, registry=DOCKER_REGISTRY_PATH) - dockerfile_path = os.path.join(DOCKERFILE_DIR, 'Dockerfile.build.' + platform) - try: - with open(dockerfile_path, 'w') as dockerfile_handle: - dockerfile_handle.write(dockerfile_content) - - # Warm up - docker_cache.delete_local_docker_cache(docker_tag=docker_tag) - - def warm_up_lambda_func(): - build_util.build_docker( - docker_binary='docker', - platform=platform, - registry=DOCKER_REGISTRY_PATH, - num_retries=3, - no_cache=False - ) - _assert_docker_build(lambda_func=warm_up_lambda_func, expected_cache_hit_count=0, - expected_cache_miss_count=4) - - # Assert local cache is properly primed - def primed_cache_lambda_func(): - build_util.build_docker( - docker_binary='docker', - platform=platform, - registry=DOCKER_REGISTRY_PATH, - num_retries=3, - no_cache=False - ) - _assert_docker_build(lambda_func=primed_cache_lambda_func, expected_cache_hit_count=4, - expected_cache_miss_count=0) - - # Upload and clean local cache - docker_cache.build_save_containers(platforms=[platform], registry=DOCKER_REGISTRY_PATH, load_cache=False) - docker_cache.delete_local_docker_cache(docker_tag=docker_tag) - - # Build with clean local cache and cache loading enabled - def clean_cache_lambda_func(): - docker_cache.build_save_containers( - platforms=[platform], registry=DOCKER_REGISTRY_PATH, load_cache=True) - _assert_docker_build(lambda_func=clean_cache_lambda_func, expected_cache_hit_count=4, - expected_cache_miss_count=0) - finally: - # Delete dockerfile - os.remove(dockerfile_path) - docker_cache.delete_local_docker_cache(docker_tag=docker_tag) - - def test_partial_cache(self): - """ - Test whether it's possible to restore cache and then pit it up partially by using a Dockerfile which shares - some parts - :return: - """ - # These two dockerfiles diverge at the fourth RUN statement. Their common parts (1-3) should be re-used - dockerfile_content_1 = """ - FROM busybox - RUN touch ~/file1 - RUN touch ~/file2 - RUN touch ~/file3 - RUN touch ~/file4 - """ - dockerfile_content_2 = """ - FROM busybox - RUN touch ~/file1 - RUN touch ~/file2 - RUN touch ~/file3 - RUN touch ~/file5 - RUN touch ~/file4 - RUN touch ~/file6 - """ - platform = 'test_partial_cache' - docker_tag = build_util.get_docker_tag(platform=platform, registry=DOCKER_REGISTRY_PATH) - dockerfile_path = os.path.join(DOCKERFILE_DIR, 'Dockerfile.build.' + platform) - try: - # Write initial Dockerfile - with open(dockerfile_path, 'w') as dockerfile_handle: - dockerfile_handle.write(dockerfile_content_1) - - # Warm up - docker_cache.delete_local_docker_cache(docker_tag=docker_tag) - - def warm_up_lambda_func(): - build_util.build_docker( - docker_binary='docker', - platform=platform, - registry=DOCKER_REGISTRY_PATH, - num_retries=3, - no_cache=False - ) - _assert_docker_build(lambda_func=warm_up_lambda_func, expected_cache_hit_count=0, - expected_cache_miss_count=4) - - # Assert local cache is properly primed - def primed_cache_lambda_func(): - build_util.build_docker( - docker_binary='docker', - platform=platform, - registry=DOCKER_REGISTRY_PATH, - num_retries=3, - no_cache=False - ) - _assert_docker_build(lambda_func=primed_cache_lambda_func, expected_cache_hit_count=4, - expected_cache_miss_count=0) - - # Upload and clean local cache - docker_cache.build_save_containers(platforms=[platform], registry=DOCKER_REGISTRY_PATH, load_cache=False) - docker_cache.delete_local_docker_cache(docker_tag=docker_tag) - - # Replace Dockerfile with the second one, resulting in a partial cache hit - with open(dockerfile_path, 'w') as dockerfile_handle: - dockerfile_handle.write(dockerfile_content_2) - - # Test if partial cache is properly hit. It will attempt to load the cache from the first Dockerfile, - # resulting in a partial hit - def partial_cache_lambda_func(): - docker_cache.build_save_containers( - platforms=[platform], registry=DOCKER_REGISTRY_PATH, load_cache=True) - _assert_docker_build(lambda_func=partial_cache_lambda_func, expected_cache_hit_count=3, - expected_cache_miss_count=3) - - finally: - # Delete dockerfile - os.remove(dockerfile_path) - docker_cache.delete_local_docker_cache(docker_tag=docker_tag) - - -def _assert_docker_build(lambda_func, expected_cache_hit_count: int, expected_cache_miss_count: int): - with RedirectSubprocessOutput() as redirected_output: - lambda_func() - output = redirected_output.get_output() - assert output.count('Running in') == expected_cache_miss_count, \ - 'Expected {} "Running in", got {}. Log:{}'.\ - format(expected_cache_miss_count, output.count('Running in'), output) - assert output.count('Using cache') == expected_cache_hit_count, \ - 'Expected {} "Using cache", got {}. Log:{}'.\ - format(expected_cache_hit_count, output.count('Using cache'), output) diff --git a/ci/windows/test_jl07_cpu.ps1 b/ci/windows/test_jl07_cpu.ps1 deleted file mode 100644 index 4924957ddb68..000000000000 --- a/ci/windows/test_jl07_cpu.ps1 +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -7z x -y windows_package.7z - -# set default output encoding to utf8 -$PSDefaultParameterValues['Out-File:Encoding'] = 'utf8' - -$env:MXNET_HOME = [System.IO.Path]::GetFullPath('.\windows_package') -$env:JULIA_URL = "https://julialang-s3.julialang.org/bin/winnt/x64/0.7/julia-0.7.0-win64.exe" -$env:JULIA_DEPOT_PATH = [System.IO.Path]::GetFullPath('.\julia-depot') - -$JULIA_DIR = [System.IO.Path]::GetFullPath('.\julia07') -$JULIA = "$JULIA_DIR\bin\julia" - -# Download most recent Julia Windows binary -[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12 -(New-Object System.Net.WebClient).DownloadFile($env:JULIA_URL, "julia-binary.exe") -if ($LastExitCode -ne 0) { Throw ("Error on downloading Julia Windows binary") } - -# Run installer silently, output to C:\julia07\julia -Start-Process -Wait "julia-binary.exe" -ArgumentList "/S /D=$JULIA_DIR" -if ($LastExitCode -ne 0) { Throw ("Error on installing Julia") } - -& $JULIA -e "using InteractiveUtils; versioninfo()" - -dir - -$src=' - using Pkg - Pkg.activate(".\\julia") - Pkg.build() - Pkg.test() -' - -$src > .\ci-build.jl - -# Redirect all stderr output to stdout, -# since Julia loggers output stuffs to stderr. -# Then, stderr triggers powershell NativeCommandError. -& $JULIA .\ci-build.jl 2>&1 | %{ "$_" } -if ($LastExitCode -eq 1) { Throw ("Error") } diff --git a/ci/windows/test_jl10_cpu.ps1 b/ci/windows/test_jl10_cpu.ps1 deleted file mode 100644 index b54b11a8a8ab..000000000000 --- a/ci/windows/test_jl10_cpu.ps1 +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -7z x -y windows_package.7z - -# set default output encoding to utf8 -$PSDefaultParameterValues['Out-File:Encoding'] = 'utf8' - -$env:MXNET_HOME = [System.IO.Path]::GetFullPath('.\windows_package') -$env:JULIA_URL = "https://julialang-s3.julialang.org/bin/winnt/x64/1.0/julia-1.0.3-win64.exe" -$env:JULIA_DEPOT_PATH = [System.IO.Path]::GetFullPath('.\julia-depot') - -$JULIA_DIR = [System.IO.Path]::GetFullPath('.\julia10') -$JULIA = "$JULIA_DIR\bin\julia" - -# Download most recent Julia Windows binary -[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12 -(New-Object System.Net.WebClient).DownloadFile($env:JULIA_URL, "julia-binary.exe") -if ($LastExitCode -ne 0) { Throw ("Error on downloading Julia Windows binary") } - -# Run installer silently, output to C:\julia10\julia -Start-Process -Wait "julia-binary.exe" -ArgumentList "/S /D=$JULIA_DIR" -if ($LastExitCode -ne 0) { Throw ("Error on installing Julia") } - -& $JULIA -e "using InteractiveUtils; versioninfo()" - -dir - -$src=' - using Pkg - Pkg.activate(".\\julia") - Pkg.build() - Pkg.test() -' - -$src > .\ci-build.jl - -# Redirect all stderr output to stdout, -# since Julia loggers output stuffs to stderr. -# Then, stderr triggers powershell NativeCommandError. -& $JULIA .\ci-build.jl 2>&1 | %{ "$_" } -if ($LastExitCode -eq 1) { Throw ("Error") } From 7c2c8bab2c9fc4ecab9bb8ac1569b952f8494cd6 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Thu, 23 Jul 2020 00:31:49 +0000 Subject: [PATCH 2/4] Fix --- ci/docker/Dockerfile.build.ubuntu | 25 ++++---------- ci/docker/install/ubuntu_rat.sh | 34 ------------------- ci/docker/runtime_functions.sh | 6 ++-- ci/jenkins/Jenkins_steps.groovy | 2 +- .../apache_rat_license_check/README.md | 2 +- 5 files changed, 11 insertions(+), 58 deletions(-) delete mode 100755 ci/docker/install/ubuntu_rat.sh diff --git a/ci/docker/Dockerfile.build.ubuntu b/ci/docker/Dockerfile.build.ubuntu index 26e4f7a5ee06..adaf38003684 100644 --- a/ci/docker/Dockerfile.build.ubuntu +++ b/ci/docker/Dockerfile.build.ubuntu @@ -68,9 +68,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libzmq3-dev \ liblapack-dev \ libopencv-dev \ - # Caffe - caffe-cpu \ - libcaffe-cpu-dev \ # BytePS numactl \ libnuma-dev \ @@ -80,23 +77,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-pip \ python3-nose \ python3-nose-timer \ - # Scala - openjdk-8-jdk \ - openjdk-8-jre \ - maven \ - scala \ - # Clojure - clojure \ - leiningen \ - # R - r-base-core \ - r-cran-devtools \ - libcairo2-dev \ - libxml2-dev \ ## Documentation doxygen \ pandoc \ ## Build-dependencies for ccache 3.7.9 + autoconf \ gperf \ libb2-dev \ libzstd-dev && \ @@ -114,14 +99,16 @@ RUN cd /usr/local/src && \ cd /usr/local/src && \ rm -rf ccache +# RAT License Checker tool +RUN cd /usr/local/src && \ + wget https://archive.apache.org/dist/creadur/apache-rat-0.13/apache-rat-0.13-bin.tar.gz && \ + tar xf apache-rat-0.13-bin.tar.gz + # Python & cmake COPY install/requirements /work/ RUN python3 -m pip install cmake==3.16.6 && \ python3 -m pip install -r /work/requirements -# Only OpenJDK 8 supported at this time.. -RUN update-java-alternatives -s java-1.8.0-openjdk-amd64 - ARG USER_ID=0 COPY install/docker_filepermissions.sh /work/ RUN /work/docker_filepermissions.sh diff --git a/ci/docker/install/ubuntu_rat.sh b/ci/docker/install/ubuntu_rat.sh deleted file mode 100755 index 2c905fc275c7..000000000000 --- a/ci/docker/install/ubuntu_rat.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -ex - -echo "Install dependencies" -apt-get update || true -apt-get install -y subversion maven openjdk-8-jdk openjdk-8-jre - -echo "download RAT" -#svn co http://svn.apache.org/repos/asf/creadur/rat/trunk/ -svn co http://svn.apache.org/repos/asf/creadur/rat/branches/0.12-release/ - -echo "cd into directory" -cd 0.12-release - -echo "mvn install" -mvn -Dmaven.test.skip=true install diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index a8efc4951c6a..69526ec2d966 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -1054,15 +1054,15 @@ unittest_ubuntu_python3_arm() { # Functions that run the nightly Tests: #Runs Apache RAT Check on MXNet Source for License Headers -nightly_test_rat_check() { +test_rat_check() { set -e pushd . - cd /work/deps/0.12-release/apache-rat/target + cd /usr/local/src/apache-rat-0.13 # Use shell number 5 to duplicate the log output. It get sprinted and stored in $OUTPUT at the same time https://stackoverflow.com/a/12451419 exec 5>&1 - OUTPUT=$(java -jar apache-rat-0.13-SNAPSHOT.jar -E /work/mxnet/tests/nightly/apache_rat_license_check/rat-excludes -d /work/mxnet|tee >(cat - >&5)) + OUTPUT=$(java -jar apache-rat-0.13.jar -E /work/mxnet/tests/nightly/apache_rat_license_check/rat-excludes -d /work/mxnet|tee >(cat - >&5)) ERROR_MESSAGE="Printing headers for text files without a valid license header" diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index 655ae48ae527..8079420dd794 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -1228,7 +1228,7 @@ def sanity_rat_license() { node(NODE_LINUX_CPU) { ws('workspace/sanity-rat') { utils.init_git() - utils.docker_run('ubuntu_rat', 'nightly_test_rat_check', false) + utils.docker_run('ubuntu_cpu', 'test_rat_check', false) } } }] diff --git a/tests/nightly/apache_rat_license_check/README.md b/tests/nightly/apache_rat_license_check/README.md index 70ec665fa57f..388aaca3922d 100644 --- a/tests/nightly/apache_rat_license_check/README.md +++ b/tests/nightly/apache_rat_license_check/README.md @@ -31,7 +31,7 @@ The following commands can be used to run a Apache RAT check locally - Docker based 1-click-method: ``` -ci/build.py -p ubuntu_rat nightly_test_rat_check +ci/build.py -p ubuntu_cpu test_rat_check ``` Manual method: From 07a8241c14575d213b11d8bb4f2b39ca0901072b Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Thu, 23 Jul 2020 02:01:37 +0000 Subject: [PATCH 3/4] Fix --- ci/docker/Dockerfile.build.ubuntu | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/docker/Dockerfile.build.ubuntu b/ci/docker/Dockerfile.build.ubuntu index adaf38003684..c9ec3f5a04fc 100644 --- a/ci/docker/Dockerfile.build.ubuntu +++ b/ci/docker/Dockerfile.build.ubuntu @@ -68,6 +68,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libzmq3-dev \ liblapack-dev \ libopencv-dev \ + libxml2-dev \ # BytePS numactl \ libnuma-dev \ From a0440d76c1cd83f3209c3ede13d63a3a4d756220 Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Thu, 23 Jul 2020 05:00:48 +0000 Subject: [PATCH 4/4] Fix ubuntu_cpu_jekyll --- ci/docker/Dockerfile.build.ubuntu_cpu_jekyll | 43 ++++---------------- ci/docker/runtime_functions.sh | 1 - 2 files changed, 9 insertions(+), 35 deletions(-) diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll b/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll index 52ed2e083c69..6586a4e907d3 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll @@ -18,53 +18,28 @@ # # Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU -FROM ubuntu:16.04 +FROM ruby:2.6.5-buster WORKDIR /work/deps -SHELL ["/bin/bash", "-l", "-c" ] - -RUN apt-get update && apt-get install -y \ - build-essential \ - git \ - zlib1g-dev \ - gnupg2 \ - curl \ - wget \ - unzip - -# Always last, except here to prevent conflicts with rvm -ARG USER_ID=0 -ARG GROUP_ID=0 -COPY install/ubuntu_adduser.sh /work/ -RUN /work/ubuntu_adduser.sh - -RUN curl -sSL https://rvm.io/mpapis.asc | gpg2 --import - && \ - curl -sSL https://rvm.io/pkuczynski.asc | gpg2 --import - && \ - curl -sSL https://get.rvm.io | bash -s stable - -RUN source /etc/profile.d/rvm.sh && \ - rvm requirements && \ - rvm install 2.6.5 && \ - rvm use 2.6.5 --default - ENV BUNDLE_HOME=/work/deps/bundle ENV BUNDLE_APP_CONFIG=/work/deps/bundle ENV BUNDLE_BIN=/work/deps/bundle/bin ENV GEM_BIN=/work/deps/gem/bin ENV GEM_HOME=/work/deps/gem -RUN echo "gem: --no-ri --no-rdoc" > ~/.gemrc -RUN yes | gem update --system -RUN yes | gem install --force bundler -RUN gem install jekyll +RUN echo "gem: --no-ri --no-rdoc" > ~/.gemrc && \ + yes | gem update --system && \ + yes | gem install --force bundler && \ + gem install jekyll ENV PATH=$BUNDLE_BIN:$GEM_BIN:$PATH COPY runtime_functions.sh /work/ -RUN chown -R jenkins_slave /work/ && \ - chown -R jenkins_slave /usr/local/bin && \ - chown -R jenkins_slave /usr/local/rvm +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh WORKDIR /work/mxnet diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 69526ec2d966..3701597fe974 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -1174,7 +1174,6 @@ build_ubuntu_cpu_docs() { build_jekyll_docs() { set -ex - source /etc/profile.d/rvm.sh pushd . build_docs_setup