diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 8b6c38b9c7ca..d856f536e4b9 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -78,6 +78,19 @@ jobs: dockerfile: "./backend/Dockerfile.python" context: "./" ubuntu-version: '2404' + - build-type: '' + cuda-major-version: "" + cuda-minor-version: "" + platforms: 'linux/amd64' + tag-latest: 'auto' + tag-suffix: '-cpu-moonshine' + runs-on: 'ubuntu-latest' + base-image: "ubuntu:24.04" + skip-drivers: 'true' + backend: "moonshine" + dockerfile: "./backend/Dockerfile.python" + context: "./" + ubuntu-version: '2404' # CUDA 12 builds - build-type: 'cublas' cuda-major-version: "12" @@ -222,6 +235,19 @@ jobs: dockerfile: "./backend/Dockerfile.python" context: "./" ubuntu-version: '2404' + - build-type: 'cublas' + cuda-major-version: "12" + cuda-minor-version: "9" + platforms: 'linux/amd64' + tag-latest: 'auto' + tag-suffix: '-gpu-nvidia-cuda-12-moonshine' + runs-on: 'ubuntu-latest' + base-image: "ubuntu:24.04" + skip-drivers: 'false' + backend: "moonshine" + dockerfile: "./backend/Dockerfile.python" + context: "./" + ubuntu-version: '2404' - build-type: 'cublas' cuda-major-version: "12" cuda-minor-version: "9" @@ -444,6 +470,19 @@ jobs: dockerfile: "./backend/Dockerfile.python" context: "./" ubuntu-version: '2404' + - build-type: 'cublas' + cuda-major-version: "13" + cuda-minor-version: "0" + platforms: 'linux/amd64' + tag-latest: 'auto' + tag-suffix: '-gpu-nvidia-cuda-13-moonshine' + runs-on: 'ubuntu-latest' + base-image: "ubuntu:24.04" + skip-drivers: 'false' + backend: "moonshine" + dockerfile: "./backend/Dockerfile.python" + context: "./" + ubuntu-version: '2404' - build-type: 'cublas' cuda-major-version: "13" cuda-minor-version: "0" diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index 66382e1a7b20..bbefad7e1911 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -247,3 +247,22 @@ jobs: run: | make --jobs=5 --output-sync=target -C backend/python/coqui make --jobs=5 --output-sync=target -C backend/python/coqui test + tests-moonshine: + runs-on: ubuntu-latest + steps: + - name: Clone + uses: actions/checkout@v6 + with: + submodules: true + - name: Dependencies + run: | + sudo apt-get update + sudo apt-get install build-essential ffmpeg + sudo apt-get install -y ca-certificates cmake curl patch python3-pip + # Install UV + curl -LsSf https://astral.sh/uv/install.sh | sh + pip install --user --no-cache-dir grpcio-tools==1.64.1 + - name: Test moonshine + run: | + make --jobs=5 --output-sync=target -C backend/python/moonshine + make --jobs=5 --output-sync=target -C backend/python/moonshine test \ No newline at end of file diff --git a/AGENTS.md b/AGENTS.md index 273f456ae278..bc8b966d15c2 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -15,6 +15,150 @@ Let's say the user wants to build a particular backend for a given platform. For - The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity. - Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly. +## Adding a New Backend + +When adding a new backend to LocalAI, you need to update several files to ensure the backend is properly built, tested, and registered. Here's a step-by-step guide based on the pattern used for adding backends like `moonshine`: + +### 1. Create Backend Directory Structure + +Create the backend directory under the appropriate location: +- **Python backends**: `backend/python//` +- **Go backends**: `backend/go//` +- **C++ backends**: `backend/cpp//` + +For Python backends, you'll typically need: +- `backend.py` - Main gRPC server implementation +- `Makefile` - Build configuration +- `install.sh` - Installation script for dependencies +- `protogen.sh` - Protocol buffer generation script +- `requirements.txt` - Python dependencies +- `run.sh` - Runtime script +- `test.py` / `test.sh` - Test files + +### 2. Add Build Configurations to `.github/workflows/backend.yml` + +Add build matrix entries for each platform/GPU type you want to support. Look at similar backends (e.g., `chatterbox`, `faster-whisper`) for reference. + +**Placement in file:** +- CPU builds: Add after other CPU builds (e.g., after `cpu-chatterbox`) +- CUDA 12 builds: Add after other CUDA 12 builds (e.g., after `gpu-nvidia-cuda-12-chatterbox`) +- CUDA 13 builds: Add after other CUDA 13 builds (e.g., after `gpu-nvidia-cuda-13-chatterbox`) + +**Additional build types you may need:** +- ROCm/HIP: Use `build-type: 'hipblas'` with `base-image: "rocm/dev-ubuntu-24.04:6.4.4"` +- Intel/SYCL: Use `build-type: 'intel'` or `build-type: 'sycl_f16'`/`sycl_f32` with `base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"` +- L4T (ARM): Use `build-type: 'l4t'` with `platforms: 'linux/arm64'` and `runs-on: 'ubuntu-24.04-arm'` + +### 3. Add Backend Metadata to `backend/index.yaml` + +**Step 3a: Add Meta Definition** + +Add a YAML anchor definition in the `## metas` section (around line 2-300). Look for similar backends to use as a template such as `diffusers` or `chatterbox` + +**Step 3b: Add Image Entries** + +Add image entries at the end of the file, following the pattern of similar backends such as `diffusers` or `chatterbox`. Include both `latest` (production) and `master` (development) tags. + +### 4. Update the Makefile + +The Makefile needs to be updated in several places to support building and testing the new backend: + +**Step 4a: Add to `.NOTPARALLEL`** + +Add `backends/` to the `.NOTPARALLEL` line (around line 2) to prevent parallel execution conflicts: + +```makefile +.NOTPARALLEL: ... backends/ +``` + +**Step 4b: Add to `prepare-test-extra`** + +Add the backend to the `prepare-test-extra` target (around line 312) to prepare it for testing: + +```makefile +prepare-test-extra: protogen-python + ... + $(MAKE) -C backend/python/ +``` + +**Step 4c: Add to `test-extra`** + +Add the backend to the `test-extra` target (around line 319) to run its tests: + +```makefile +test-extra: prepare-test-extra + ... + $(MAKE) -C backend/python/ test +``` + +**Step 4d: Add Backend Definition** + +Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type: + +**For Python backends with root context** (like `faster-whisper`, `bark`): +```makefile +BACKEND_ = |python|.|false|true +``` + +**For Python backends with `./backend` context** (like `chatterbox`, `moonshine`): +```makefile +BACKEND_ = |python|./backend|false|true +``` + +**For Go backends**: +```makefile +BACKEND_ = |golang|.|false|true +``` + +**Step 4e: Generate Docker Build Target** + +Add an eval call to generate the docker-build target (around line 480-501): + +```makefile +$(eval $(call generate-docker-build-target,$(BACKEND_))) +``` + +**Step 4f: Add to `docker-build-backends`** + +Add `docker-build-` to the `docker-build-backends` target (around line 507): + +```makefile +docker-build-backends: ... docker-build- +``` + +**Determining the Context:** + +- If the backend is in `backend/python//` and uses `./backend` as context in the workflow file, use `./backend` context +- If the backend is in `backend/python//` but uses `.` as context in the workflow file, use `.` context +- Check similar backends to determine the correct context + +### 5. Verification Checklist + +After adding a new backend, verify: + +- [ ] Backend directory structure is complete with all necessary files +- [ ] Build configurations added to `.github/workflows/backend.yml` for all desired platforms +- [ ] Meta definition added to `backend/index.yaml` in the `## metas` section +- [ ] Image entries added to `backend/index.yaml` for all build variants (latest + development) +- [ ] Tag suffixes match between workflow file and index.yaml +- [ ] Makefile updated with all 6 required changes (`.NOTPARALLEL`, `prepare-test-extra`, `test-extra`, backend definition, docker-build target eval, `docker-build-backends`) +- [ ] No YAML syntax errors (check with linter) +- [ ] No Makefile syntax errors (check with linter) +- [ ] Follows the same pattern as similar backends (e.g., if it's a transcription backend, follow `faster-whisper` pattern) + +### 6. Example: Adding a Python Backend + +For reference, when `moonshine` was added: +- **Files created**: `backend/python/moonshine/{backend.py, Makefile, install.sh, protogen.sh, requirements.txt, run.sh, test.py, test.sh}` +- **Workflow entries**: 3 build configurations (CPU, CUDA 12, CUDA 13) +- **Index entries**: 1 meta definition + 6 image entries (cpu, cuda12, cuda13 × latest/development) +- **Makefile updates**: + - Added to `.NOTPARALLEL` line + - Added to `prepare-test-extra` and `test-extra` targets + - Added `BACKEND_MOONSHINE = moonshine|python|./backend|false|true` + - Added eval for docker-build target generation + - Added `docker-build-moonshine` to `docker-build-backends` + # Coding style - The project has the following .editorconfig diff --git a/Makefile b/Makefile index 9eb59fefb8ba..d703b1e00d0d 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # Disable parallel execution for backend builds -.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm +.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine GOCMD=go GOTEST=$(GOCMD) test @@ -315,6 +315,7 @@ prepare-test-extra: protogen-python $(MAKE) -C backend/python/chatterbox $(MAKE) -C backend/python/vllm $(MAKE) -C backend/python/vibevoice + $(MAKE) -C backend/python/moonshine test-extra: prepare-test-extra $(MAKE) -C backend/python/transformers test @@ -322,6 +323,7 @@ test-extra: prepare-test-extra $(MAKE) -C backend/python/chatterbox test $(MAKE) -C backend/python/vllm test $(MAKE) -C backend/python/vibevoice test + $(MAKE) -C backend/python/moonshine test DOCKER_IMAGE?=local-ai DOCKER_AIO_IMAGE?=local-ai-aio @@ -455,6 +457,7 @@ BACKEND_VLLM = vllm|python|./backend|false|true BACKEND_DIFFUSERS = diffusers|python|./backend|--progress=plain|true BACKEND_CHATTERBOX = chatterbox|python|./backend|false|true BACKEND_VIBEVOICE = vibevoice|python|./backend|--progress=plain|true +BACKEND_MOONSHINE = moonshine|python|./backend|false|true # Helper function to build docker image for a backend # Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG) @@ -499,12 +502,13 @@ $(eval $(call generate-docker-build-target,$(BACKEND_VLLM))) $(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS))) $(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX))) $(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE))) +$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE))) # Pattern rule for docker-save targets docker-save-%: backend-images docker save local-ai-backend:$* -o backend-images/$*.tar -docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 +docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine ######################################################## ### END Backends diff --git a/backend/index.yaml b/backend/index.yaml index 1f8c1f7fb4df..41befc625a44 100644 --- a/backend/index.yaml +++ b/backend/index.yaml @@ -275,6 +275,24 @@ amd: "rocm-faster-whisper" nvidia-cuda-13: "cuda13-faster-whisper" nvidia-cuda-12: "cuda12-faster-whisper" +- &moonshine + description: | + Moonshine is a fast, accurate, and efficient speech-to-text transcription model using ONNX Runtime. + It provides real-time transcription capabilities with support for multiple model sizes and GPU acceleration. + urls: + - https://github.com/moonshine-ai/moonshine + tags: + - speech-to-text + - transcription + - ONNX + license: MIT + name: "moonshine" + alias: "moonshine" + capabilities: + nvidia: "cuda12-moonshine" + default: "cpu-moonshine" + nvidia-cuda-13: "cuda13-moonshine" + nvidia-cuda-12: "cuda12-moonshine" - &kokoro icon: https://avatars.githubusercontent.com/u/166769057?v=4 description: | @@ -1315,6 +1333,44 @@ uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-faster-whisper" mirrors: - localai/localai-backends:master-gpu-nvidia-cuda-13-faster-whisper +## moonshine +- !!merge <<: *moonshine + name: "moonshine-development" + capabilities: + nvidia: "cuda12-moonshine-development" + default: "cpu-moonshine-development" + nvidia-cuda-13: "cuda13-moonshine-development" + nvidia-cuda-12: "cuda12-moonshine-development" +- !!merge <<: *moonshine + name: "cpu-moonshine" + uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-moonshine" + mirrors: + - localai/localai-backends:latest-cpu-moonshine +- !!merge <<: *moonshine + name: "cpu-moonshine-development" + uri: "quay.io/go-skynet/local-ai-backends:master-cpu-moonshine" + mirrors: + - localai/localai-backends:master-cpu-moonshine +- !!merge <<: *moonshine + name: "cuda12-moonshine" + uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-moonshine" + mirrors: + - localai/localai-backends:latest-gpu-nvidia-cuda-12-moonshine +- !!merge <<: *moonshine + name: "cuda12-moonshine-development" + uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-moonshine" + mirrors: + - localai/localai-backends:master-gpu-nvidia-cuda-12-moonshine +- !!merge <<: *moonshine + name: "cuda13-moonshine" + uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-moonshine" + mirrors: + - localai/localai-backends:latest-gpu-nvidia-cuda-13-moonshine +- !!merge <<: *moonshine + name: "cuda13-moonshine-development" + uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine" + mirrors: + - localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine ## coqui - !!merge <<: *coqui diff --git a/backend/python/moonshine/Makefile b/backend/python/moonshine/Makefile new file mode 100644 index 000000000000..71050097c44f --- /dev/null +++ b/backend/python/moonshine/Makefile @@ -0,0 +1,16 @@ +.DEFAULT_GOAL := install + +.PHONY: install +install: + bash install.sh + +.PHONY: protogen-clean +protogen-clean: + $(RM) backend_pb2_grpc.py backend_pb2.py + +.PHONY: clean +clean: protogen-clean + rm -rf venv __pycache__ + +test: install + bash test.sh \ No newline at end of file diff --git a/backend/python/moonshine/backend.py b/backend/python/moonshine/backend.py new file mode 100644 index 000000000000..bc9e2965be3f --- /dev/null +++ b/backend/python/moonshine/backend.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +""" +This is an extra gRPC server of LocalAI for Moonshine transcription +""" +from concurrent import futures +import time +import argparse +import signal +import sys +import os +import backend_pb2 +import backend_pb2_grpc +import moonshine_onnx + +import grpc + + +_ONE_DAY_IN_SECONDS = 60 * 60 * 24 + +# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 +MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) + +# Implement the BackendServicer class with the service methods +class BackendServicer(backend_pb2_grpc.BackendServicer): + """ + BackendServicer is the class that implements the gRPC service + """ + def Health(self, request, context): + return backend_pb2.Reply(message=bytes("OK", 'utf-8')) + + def LoadModel(self, request, context): + try: + print("Preparing models, please wait", file=sys.stderr) + # Store the model name for use in transcription + # Model name format: e.g., "moonshine/tiny" + self.model_name = request.Model + print(f"Model name set to: {self.model_name}", file=sys.stderr) + except Exception as err: + return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") + return backend_pb2.Result(message="Model loaded successfully", success=True) + + def AudioTranscription(self, request, context): + resultSegments = [] + text = "" + try: + # moonshine_onnx.transcribe returns a list of strings + transcriptions = moonshine_onnx.transcribe(request.dst, self.model_name) + + # Combine all transcriptions into a single text + if isinstance(transcriptions, list): + text = " ".join(transcriptions) + # Create segments for each transcription in the list + for id, trans in enumerate(transcriptions): + # Since moonshine doesn't provide timing info, we'll create a single segment + # with id and text, using approximate timing + resultSegments.append(backend_pb2.TranscriptSegment( + id=id, + start=0, + end=0, + text=trans + )) + else: + # Handle case where it's not a list (shouldn't happen, but be safe) + text = str(transcriptions) + resultSegments.append(backend_pb2.TranscriptSegment( + id=0, + start=0, + end=0, + text=text + )) + except Exception as err: + print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr) + return backend_pb2.TranscriptResult(segments=[], text="") + + return backend_pb2.TranscriptResult(segments=resultSegments, text=text) + +def serve(address): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS), + options=[ + ('grpc.max_message_length', 50 * 1024 * 1024), # 50MB + ('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB + ('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB + ]) + backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server) + server.add_insecure_port(address) + server.start() + print("Server started. Listening on: " + address, file=sys.stderr) + + # Define the signal handler function + def signal_handler(sig, frame): + print("Received termination signal. Shutting down...") + server.stop(0) + sys.exit(0) + + # Set the signal handlers for SIGINT and SIGTERM + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + try: + while True: + time.sleep(_ONE_DAY_IN_SECONDS) + except KeyboardInterrupt: + server.stop(0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run the gRPC server.") + parser.add_argument( + "--addr", default="localhost:50051", help="The address to bind the server to." + ) + args = parser.parse_args() + + serve(args.addr) + diff --git a/backend/python/moonshine/install.sh b/backend/python/moonshine/install.sh new file mode 100755 index 000000000000..4abc9cf583c0 --- /dev/null +++ b/backend/python/moonshine/install.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +backend_dir=$(dirname $0) +if [ -d $backend_dir/common ]; then + source $backend_dir/common/libbackend.sh +else + source $backend_dir/../common/libbackend.sh +fi + +installRequirements + diff --git a/backend/python/moonshine/protogen.sh b/backend/python/moonshine/protogen.sh new file mode 100755 index 000000000000..1dc00c768268 --- /dev/null +++ b/backend/python/moonshine/protogen.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +backend_dir=$(dirname $0) +if [ -d $backend_dir/common ]; then + source $backend_dir/common/libbackend.sh +else + source $backend_dir/../common/libbackend.sh +fi + +python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto + diff --git a/backend/python/moonshine/requirements.txt b/backend/python/moonshine/requirements.txt new file mode 100644 index 000000000000..240f166cf275 --- /dev/null +++ b/backend/python/moonshine/requirements.txt @@ -0,0 +1,4 @@ +grpcio==1.71.0 +protobuf +grpcio-tools +useful-moonshine-onnx@git+https://git@github.com/moonshine-ai/moonshine.git#subdirectory=moonshine-onnx \ No newline at end of file diff --git a/backend/python/moonshine/run.sh b/backend/python/moonshine/run.sh new file mode 100755 index 000000000000..8b3809e4a55b --- /dev/null +++ b/backend/python/moonshine/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash +backend_dir=$(dirname $0) +if [ -d $backend_dir/common ]; then + source $backend_dir/common/libbackend.sh +else + source $backend_dir/../common/libbackend.sh +fi + +startBackend $@ + diff --git a/backend/python/moonshine/test.py b/backend/python/moonshine/test.py new file mode 100644 index 000000000000..d69a7798d9ef --- /dev/null +++ b/backend/python/moonshine/test.py @@ -0,0 +1,139 @@ +""" +A test script to test the gRPC service for Moonshine transcription +""" +import unittest +import subprocess +import time +import os +import tempfile +import shutil +import backend_pb2 +import backend_pb2_grpc + +import grpc + + +class TestBackendServicer(unittest.TestCase): + """ + TestBackendServicer is the class that tests the gRPC service + """ + def setUp(self): + """ + This method sets up the gRPC service by starting the server + """ + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) + time.sleep(10) + + def tearDown(self) -> None: + """ + This method tears down the gRPC service by terminating the server + """ + self.service.terminate() + self.service.wait() + + def test_server_startup(self): + """ + This method tests if the server starts up successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.Health(backend_pb2.HealthMessage()) + self.assertEqual(response.message, b'OK') + except Exception as err: + print(err) + self.fail("Server failed to start") + finally: + self.tearDown() + + def test_load_model(self): + """ + This method tests if the model is loaded successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="moonshine/tiny")) + self.assertTrue(response.success) + self.assertEqual(response.message, "Model loaded successfully") + except Exception as err: + print(err) + self.fail("LoadModel service failed") + finally: + self.tearDown() + + def test_audio_transcription(self): + """ + This method tests if audio transcription works successfully + """ + # Create a temporary directory for the audio file + temp_dir = tempfile.mkdtemp() + audio_file = os.path.join(temp_dir, 'audio.wav') + + try: + # Download the audio file to the temporary directory + print(f"Downloading audio file to {audio_file}...") + url = "https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav" + result = subprocess.run( + ["wget", "-q", url, "-O", audio_file], + capture_output=True, + text=True + ) + if result.returncode != 0: + self.fail(f"Failed to download audio file: {result.stderr}") + + # Verify the file was downloaded + if not os.path.exists(audio_file): + self.fail(f"Audio file was not downloaded to {audio_file}") + + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + # Load the model first + load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="moonshine/tiny")) + self.assertTrue(load_response.success) + + # Perform transcription + transcript_request = backend_pb2.TranscriptRequest(dst=audio_file) + transcript_response = stub.AudioTranscription(transcript_request) + + # Print the transcribed text for debugging + print(f"Transcribed text: {transcript_response.text}") + print(f"Number of segments: {len(transcript_response.segments)}") + + # Verify response structure + self.assertIsNotNone(transcript_response) + self.assertIsNotNone(transcript_response.text) + # Protobuf repeated fields return a sequence, not a list + self.assertIsNotNone(transcript_response.segments) + # Check if segments is iterable (has length) + self.assertGreaterEqual(len(transcript_response.segments), 0) + + # Verify the transcription contains the expected text + expected_text = "This is the micro machine man presenting the most midget miniature" + self.assertIn( + expected_text.lower(), + transcript_response.text.lower(), + f"Expected text '{expected_text}' not found in transcription: '{transcript_response.text}'" + ) + + # If we got segments, verify they have the expected structure + if len(transcript_response.segments) > 0: + segment = transcript_response.segments[0] + self.assertIsNotNone(segment.text) + self.assertIsInstance(segment.id, int) + else: + # Even if no segments, we should have text + self.assertIsNotNone(transcript_response.text) + self.assertGreater(len(transcript_response.text), 0) + except Exception as err: + print(err) + self.fail("AudioTranscription service failed") + finally: + self.tearDown() + # Clean up the temporary directory + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) + diff --git a/backend/python/moonshine/test.sh b/backend/python/moonshine/test.sh new file mode 100755 index 000000000000..f6a66da3e58d --- /dev/null +++ b/backend/python/moonshine/test.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +backend_dir=$(dirname $0) +if [ -d $backend_dir/common ]; then + source $backend_dir/common/libbackend.sh +else + source $backend_dir/../common/libbackend.sh +fi + +runUnittests +