diff --git a/gallery/index.yaml b/gallery/index.yaml index 6662f78bf8a6..ed1fa27d76fa 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1,4 +1,29 @@ --- +- name: "liquidai.lfm2-2.6b-transcript" + url: "github:mudler/LocalAI/gallery/virtual.yaml@master" + urls: + - https://huggingface.co/DevQuasar/LiquidAI.LFM2-2.6B-Transcript-GGUF + description: | + This is a large language model (2.6B parameters) designed for text-generation tasks. It is a quantized version of the original model `LiquidAI/LFM2-2.6B-Transcript`, optimized for efficiency while retaining strong performance. The model is built on the foundation of the base model, with additional optimizations for deployment and use cases like transcription or language modeling. It is trained on large-scale text data and supports multiple languages. + overrides: + parameters: + model: llama-cpp/models/LiquidAI.LFM2-2.6B-Transcript.Q4_K_M.gguf + name: LiquidAI.LFM2-2.6B-Transcript-GGUF + backend: llama-cpp + template: + use_tokenizer_template: true + known_usecases: + - chat + function: + grammar: + disable: true + description: Imported from https://huggingface.co/DevQuasar/LiquidAI.LFM2-2.6B-Transcript-GGUF + options: + - use_jinja:true + files: + - filename: llama-cpp/models/LiquidAI.LFM2-2.6B-Transcript.Q4_K_M.gguf + sha256: 301a8467531781909dc7a6263318103a3d8673a375afc4641e358d4174bd15d4 + uri: https://huggingface.co/DevQuasar/LiquidAI.LFM2-2.6B-Transcript-GGUF/resolve/main/LiquidAI.LFM2-2.6B-Transcript.Q4_K_M.gguf - name: "lfm2.5-1.2b-nova-function-calling" url: "github:mudler/LocalAI/gallery/virtual.yaml@master" urls: