name: Run the README instructions periodically to ensure they work

on:
  schedule:
    - cron: '0 0 * * *'  # Runs daily at midnight UTC
  push:
    tags:
      - ciflow/periodic/*
  workflow_dispatch:

jobs:
  test-readme:
    permissions:
      id-token: write
      contents: read
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
    secrets: inherit
    with:
      runner: linux.g5.4xlarge.nvidia.gpu
      secrets-env: "HF_TOKEN_PERIODIC"
      gpu-arch-type: cuda
      gpu-arch-version: "12.4"
      timeout: 60
      script: |
        echo "::group::Print machine info"
        uname -a
        echo "::endgroup::"

        echo "::group::Create script to run README"
        python3 torchchat/utils/scripts/updown.py --create-sections --file README.md > ./run-readme.sh
        # for good measure, if something happened to updown processor,
        # and it did not error out, fail with an exit 1
        echo "exit 1" >> ./run-readme.sh
        echo "::endgroup::"

        echo "::group::Run README"
        echo "*******************************************"
        cat ./run-readme.sh
        echo "*******************************************"
        bash -x ./run-readme.sh
        echo "::endgroup::"


  test-quantization-any:
    permissions:
      id-token: write
      contents: read
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
    with:
      runner: linux.g5.4xlarge.nvidia.gpu
      secrets: inherit
      gpu-arch-type: cuda
      gpu-arch-version: "12.4"
      timeout: 60
      script: |
        echo "::group::Print machine info"
        uname -a
        echo "::endgroup::"

        echo "::group::Create script to run quantization"
        python3 torchchat/utils/scripts/updown.py --create-sections --file docs/quantization.md > ./run-quantization.sh
        # for good measure, if something happened to updown processor,
        # and it did not error out, fail with an exit 1
        echo "exit 1" >> ./run-quantization.sh
        echo "::endgroup::"

        echo "::group::Run quantization"
        echo "*******************************************"
        cat ./run-quantization.sh
        echo "*******************************************"
        bash -x ./run-quantization.sh
        echo "::endgroup::"

  test-gguf-any:
    permissions:
      id-token: write
      contents: read
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
    secrets: inherit
    with:
      runner: linux.g5.4xlarge.nvidia.gpu
      secrets-env: "HF_TOKEN_PERIODIC"
      gpu-arch-type: cuda
      gpu-arch-version: "12.4"
      timeout: 60
      script: |
        echo "::group::Print machine info"
        uname -a
        echo "::endgroup::"

        echo "::group::Create script to run gguf"
        python3 torchchat/utils/scripts/updown.py --file docs/GGUF.md > ./run-gguf.sh
        # for good measure, if something happened to updown processor,
        # and it did not error out, fail with an exit 1
        echo "exit 1" >> ./run-gguf.sh
        echo "::endgroup::"

        echo "::group::Run gguf"
        echo "*******************************************"
        cat ./run-gguf.sh
        echo "*******************************************"
        bash -x ./run-gguf.sh
        echo "::endgroup::"

        echo "::group::Completion"
        echo "tests complete"
        echo "*******************************************"
        echo "::endgroup::"