Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ run: check-env-run
--env VLLM_API_TOKEN=$(ANSIBLE_CHATBOT_VLLM_API_TOKEN) \
--env INFERENCE_MODEL=$(ANSIBLE_CHATBOT_INFERENCE_MODEL) \
--env INFERENCE_MODEL_FILTER=$(ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER) \
--env GEMINI_API_KEY=$(GEMINI_API_KEY) \
ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)

run-test:
Expand Down Expand Up @@ -162,6 +163,7 @@ run-local-db: check-env-run-local-db
--env VLLM_API_TOKEN=$(ANSIBLE_CHATBOT_VLLM_API_TOKEN) \
--env INFERENCE_MODEL=$(ANSIBLE_CHATBOT_INFERENCE_MODEL) \
--env INFERENCE_MODEL_FILTER=$(ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER) \
--env GEMINI_API_KEY=$(GEMINI_API_KEY) \
ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)

clean:
Expand Down
14 changes: 14 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,20 @@ Runs basic tests against the local container.
kubectl apply -f my-chatbot-stack-deploy.yaml
```

## Appendix - Google Gemini

* Please set the environment variable `OPENAI_API_KEY=<YOUR_API_KEY>`
* Example of a `v1/query` request:
```json
{
"query": "hello",
"system_prompt": "You are a helpful assistant.",
"model": "gemini/gemini-2.5-flash",
"provider": "gemini"
}
```


## Appendix - Host clean-up

If you have the need for re-building images, apply the following clean-ups right before:
Expand Down
9 changes: 9 additions & 0 deletions ansible-chatbot-mcp-run.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ providers:
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: gemini
provider_type: remote::gemini
config:
api_key: ${env.GEMINI_API_KEY:=fake}
- provider_id: inline_sentence-transformer
provider_type: inline::sentence-transformers
config: {}
Expand Down Expand Up @@ -85,6 +89,11 @@ models:
model_id: ${env.EMBEDDINGS_MODEL:=/.llama/data/distributions/ansible-chatbot/embeddings_model}
provider_id: inline_sentence-transformer
model_type: embedding
- metadata: {}
model_id: ${env.GEMINI_INFERENCE_MODEL:=gemini/gemini-2.5-flash}
provider_id: gemini
provider_model_id: gemini/gemini-2.5-flash
model_type: llm
shields: []
vector_dbs:
- metadata: {}
Expand Down
9 changes: 9 additions & 0 deletions ansible-chatbot-run.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ providers:
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: gemini
provider_type: remote::gemini
config:
api_key: ${env.GEMINI_API_KEY:=fake}
- provider_id: inline_sentence-transformer
provider_type: inline::sentence-transformers
config: {}
Expand Down Expand Up @@ -85,6 +89,11 @@ models:
model_id: ${env.EMBEDDINGS_MODEL:=/.llama/data/distributions/ansible-chatbot/embeddings_model}
provider_id: inline_sentence-transformer
model_type: embedding
- metadata: {}
model_id: ${env.GEMINI_INFERENCE_MODEL:=gemini/gemini-2.5-flash}
provider_id: gemini
provider_model_id: gemini/gemini-2.5-flash
model_type: llm
shields: []
vector_dbs:
- metadata: {}
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ dependencies = [
"opentelemetry-exporter-otlp~=1.34.1",
"sentence-transformers>=5.0.0",
"sqlalchemy~=2.0.41",
"litellm~=1.75.3",
]

[dependency-groups]
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ joblib==1.5.1
jsonschema==4.24.0
jsonschema-specifications==2025.4.1
lightspeed-stack-providers==0.1.14
litellm==1.75.5.post1
llama-api-client==0.1.2
llama-stack==0.2.16
llama-stack-client==0.2.16
Expand All @@ -62,7 +63,7 @@ nvidia-cusparselt-cu12==0.6.3 ; platform_machine == 'x86_64' and sys_platform ==
nvidia-nccl-cu12==2.26.2 ; platform_machine == 'x86_64' and sys_platform == 'linux'
nvidia-nvjitlink-cu12==12.6.85 ; platform_machine == 'x86_64' and sys_platform == 'linux'
nvidia-nvtx-cu12==12.6.77 ; platform_machine == 'x86_64' and sys_platform == 'linux'
openai==1.91.0
openai==1.99.9
opentelemetry-api==1.34.1
opentelemetry-exporter-otlp==1.34.1
opentelemetry-exporter-otlp-proto-common==1.34.1
Expand Down
30 changes: 27 additions & 3 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.