Skip to content

Commit

Permalink
Merge branch 'mainline' into contributing-update
Browse files Browse the repository at this point in the history
  • Loading branch information
vicilliar authored Oct 26, 2023
2 parents d39f231 + 4ff9055 commit fd98040
Show file tree
Hide file tree
Showing 178 changed files with 14,260 additions and 5,383 deletions.
3 changes: 3 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
#marqo assets
assets/

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
7 changes: 4 additions & 3 deletions .github/workflows/arm64_local_os_CI.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: arm64_local_os_CI
# runs Integration Tests on Local OS on an ARM64 Machine
on:
workflow_call:
workflow_dispatch:
inputs:
image_to_test:
Expand Down Expand Up @@ -42,12 +43,12 @@ jobs:

# ARM Runner Image
ec2-image-id: ${{ secrets.ARM_EC2_IMAGE_ID }}
ec2-instance-type: a1.xlarge
ec2-instance-type: t4g.xlarge
subnet-id: ${{ secrets.ARM_SUBNET_ID }}
security-group-id: ${{ secrets.ARM_SECURITY_GROUP_ID }}

Test-Marqo:
name: Run Marqo Test Suite
name: Run arm64 API Tests
needs: Start-Runner # required to start the main job when the runner is ready
runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner

Expand Down Expand Up @@ -114,4 +115,4 @@ jobs:
mode: stop
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
label: ${{ needs.start-runner.outputs.label }}
ec2-instance-id: ${{ needs.start-runner.outputs.ec2-instance-id }}
ec2-instance-id: ${{ needs.start-runner.outputs.ec2-instance-id }}
3 changes: 2 additions & 1 deletion .github/workflows/cuda_dind_os_CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ name: cuda_dind_os_CI
# runs GPU-enabled Integration Tests on DIND OS on AMD64 machine

on:
workflow_call:
workflow_dispatch:
inputs:
image_to_test:
Expand Down Expand Up @@ -47,7 +48,7 @@ jobs:
security-group-id: ${{ secrets.CUDA_SECURITY_GROUP_ID }}

Test-Marqo:
name: Run Marqo Test Suite
name: Run CUDA Docker-in-Docker API Tests
needs: Start-Runner # required to start the main job when the runner is ready
runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner

Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/dind_os_CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ name: dind_os_CI
# runs Integration Tests on DIND OS on AMD64 machine

on:
workflow_call:
workflow_dispatch:
inputs:
image_to_test:
Expand Down Expand Up @@ -45,7 +46,7 @@ jobs:
security-group-id: ${{ secrets.AMD_SECURITY_GROUP_ID }}

Test-Marqo:
name: Run Marqo Test Suite
name: Run Docker-in-Docker API Tests
needs: Start-Runner # required to start the main job when the runner is ready
runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner

Expand Down
76 changes: 30 additions & 46 deletions .github/workflows/largemodel_unit_test_CI.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
name: largemodel_unit_test_CI
# runs unit tests on CUDA machine with large model tests.

on: workflow_dispatch
on:
workflow_call:
workflow_dispatch:

permissions:
contents: read
Expand Down Expand Up @@ -32,69 +34,51 @@ jobs:
security-group-id: ${{ secrets.CUDA_SECURITY_GROUP_ID }}

Test-Marqo:
name: Run Marqo Test Suite
name: Run Large Model Unit Tests
needs: Start-Runner # required to start the main job when the runner is ready
runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner

environment: marqo-test-suite

steps:

- name: Checkout marqo repo
uses: actions/checkout@v3
with:
fetch-depth: 0
path: marqo

- name: Set up Python 3.8
uses: actions/setup-python@v3
with:
python-version: "3.8"
cache: "pip"

- name: Install Dependencies
run: |
#pip install -r requirements.txt
pip install tox==3.26
pip install flake8
# TODO: Figure out how to make linting work on self-hosted runner
# usual error: $HOME not set
#- name: Get Changed Directories
# id: changed-dir
# uses: tj-actions/[email protected]
# with:
# dir_names: true
# run: |
# export HOME=$pwd

#- name: Lint Changed Directories with flake8
# run: |
# for dir in ${{ steps.changed-dir.outputs.all_changed_files }}; do
# echo "$dir was changed"
# stop the build if there are Python syntax errors or undefined names
# flake8 $dir --count --select=E9,F63,F7,F82 --show-source --statistics --filename *.py
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
# flake8 $dir --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --filename *.py
# done

- name: Checkout marqo-api-tests repo

- name: Checkout marqo-base for requirements
uses: actions/checkout@v3
with:
repository: marqo-ai/marqo-api-tests

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2

- name: Set up Environment
repository: marqo-ai/marqo-base
path: marqo-base

- name: Install dependencies
run: |
# Set up conf file
echo 'export MARQO_API_TESTS_ROOT="${{ github.workspace }}"' >> conf
- name: Run Unit Tests
pip install -r marqo-base/requirements.txt
# override base requirements with marqo requirements, if needed:
pip install -r marqo/requirements.txt
pip install pytest==7.4.0
- name: start Marqo-os
run: |
export MQ_API_TEST_BRANCH=$(echo "${GITHUB_REF}" | cut -d'/' -f3-)
tox -e py3-local_os_unit_tests_w_requirements -- --largemodel
export LOCAL_OPENSEARCH_URL="https://localhost:9200"
docker run --name marqo-os -d -p 9200:9200 -p 9600:9600 -e "discovery.type=single-node" marqoai/marqo-os:0.0.3 || exit
# wait for marqo-os to start:
timeout 10m bash -c 'until [[ $(curl -v --silent --insecure $LOCAL_OPENSEARCH_URL 2>&1 | grep Unauthorized) ]]; do sleep 0.1; done;' || \
(echo "Marqo-os did not start in time" && exit 1)
- name: Run Large Model Unit Tests
run: |
export LOCAL_OPENSEARCH_URL="https://localhost:9200"
export PYTHONPATH="./marqo/tests:./marqo/src:./marqo"
pytest marqo/tests --largemodel
Stop-Runner:
name: Stop self-hosted EC2 runner
needs:
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/local_os_CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ name: local_os_CI
# runs Integration Tests on LOCAL OS on AMD64 machine

on:
workflow_call:
workflow_dispatch:
inputs:
image_to_test:
Expand Down Expand Up @@ -45,7 +46,7 @@ jobs:
security-group-id: ${{ secrets.AMD_SECURITY_GROUP_ID }}

Test-Marqo:
name: Run Marqo Test Suite
name: Run Local Marqo-OS API Tests
needs: Start-Runner # required to start the main job when the runner is ready
runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner

Expand Down
134 changes: 134 additions & 0 deletions .github/workflows/run_all_tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
# Runs all Marqo tests.
# - The goal is a single workflow that, if all jobs pass, is sufficient to
# merge.
# - This can be run manually. It is automatically triggered if a PR is
# approved.
# - The automatic trigger on PR approvals is skipped if only markdown files
# have changed.

name: Run all Marqo tests

on:
pull_request_review:
types: [submitted]
branches:
- mainline
workflow_dispatch:

permissions:
contents: read

jobs:
should-tests-run:
name: Determine whether tests should run
runs-on: ubuntu-latest
outputs:
decision: ${{ steps.decision.outputs.result }}
steps:
- name: Checkout repository
uses: actions/checkout@v2

- name: Check if only markdown files have changed
id: filter
uses: dorny/paths-filter@v2
with:
filters: |
proceed:
- '**'
- '!*.md'
- name: Decide whether to run tests
id: decision
run: |
if [[ "${{ github.event_name }}" == "pull_request_review" && \
"${{ github.event.review.state }}" == "approved" && \
"${{ steps.filter.outputs.proceed }}" == "true" ]] \
|| [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "::set-output name=result::true"
else
echo "::set-output name=result::false"
fi
run-arm64-tests:
needs: should-tests-run
if: needs.should-tests-run.outputs.decision == 'true'
uses: ./.github/workflows/arm64_local_os_CI.yml
secrets: inherit

run-cuda-dind-tests:
needs: should-tests-run
if: needs.should-tests-run.outputs.decision == 'true'
uses: ./.github/workflows/cuda_dind_os_CI.yml
secrets: inherit

run-dind-os-tests:
needs: should-tests-run
if: needs.should-tests-run.outputs.decision == 'true'
uses: ./.github/workflows/dind_os_CI.yml
secrets: inherit

run-largemodel-unit-tests:
needs: should-tests-run
if: needs.should-tests-run.outputs.decision == 'true'
uses: ./.github/workflows/largemodel_unit_test_CI.yml
secrets: inherit

run-local-os-tests:
needs: should-tests-run
if: needs.should-tests-run.outputs.decision == 'true'
uses: ./.github/workflows/local_os_CI.yml
secrets: inherit

run-unit-tests:
needs: should-tests-run
if: needs.should-tests-run.outputs.decision == 'true'
uses: ./.github/workflows/unit_test_200gb_CI.yml
secrets: inherit

summary:
if: ${{ always() }}
name: Test Summary
needs: [should-tests-run, run-arm64-tests, run-cuda-dind-tests, run-dind-os-tests, run-largemodel-unit-tests, run-local-os-tests, run-unit-tests]
runs-on: ubuntu-latest
steps:
- name: Check test results
run: |
# If tests weren't intended to run, consider this a success
if [[ "${{ needs.should-tests-run.outputs.decision }}" != "true" ]]; then
echo "Tests were skipped. No further checks required."
exit 0
fi
if [[ "${{ needs.run-arm64-tests.result }}" != "success" && "${{ needs.run-arm64-tests.result }}" != "skipped" ]]; then
echo "Job run-arm64-tests did not succeed."
exit 1
fi

if [[ "${{ needs.run-cuda-dind-tests.result }}" != "success" && "${{ needs.run-cuda-dind-tests.result }}" != "skipped" ]]; then
echo "Job run-cuda-dind-tests did not succeed."
exit 1
fi

if [[ "${{ needs.run-dind-os-tests.result }}" != "success" && "${{ needs.run-dind-os-tests.result }}" != "skipped" ]]; then
echo "Job run-dind-os-tests did not succeed."
exit 1
fi

if [[ "${{ needs.run-largemodel-unit-tests.result }}" != "success" && "${{ needs.run-largemodel-unit-tests.result }}" != "skipped" ]]; then
echo "Job run-largemodel-unit-tests did not succeed."
exit 1
fi

if [[ "${{ needs.run-local-os-tests.result }}" != "success" && "${{ needs.run-local-os-tests.result }}" != "skipped" ]]; then
echo "Job run-local-os-tests did not succeed."
exit 1
fi

if [[ "${{ needs.run-unit-tests.result }}" != "success" && "${{ needs.run-unit-tests.result }}" != "skipped" ]]; then
echo "Job run-unit-tests did not succeed."
exit 1
fi

echo "All tests either passed or were skipped."


Loading

0 comments on commit fd98040

Please sign in to comment.