diff --git a/js/src/lib/interfaces/Libraries.ts b/js/src/lib/interfaces/Libraries.ts index e1ce8d81b..04a263368 100644 --- a/js/src/lib/interfaces/Libraries.ts +++ b/js/src/lib/interfaces/Libraries.ts @@ -9,10 +9,12 @@ export enum ModelLibrary { "allennlp" = "allenNLP", "asteroid" = "Asteroid", "diffusers" = "Diffusers", + "doctr" = "docTR", "espnet" = "ESPnet", "fairseq" = "Fairseq", "flair" = "Flair", "keras" = "Keras", + "k2" = "K2", "nemo" = "NeMo", "paddlenlp" = "PaddleNLP", "pyannote-audio" = "pyannote.audio", @@ -32,7 +34,12 @@ export enum ModelLibrary { "pythae" = "Pythae", } -export const ALL_MODEL_LIBRARY_KEYS = Object.keys(ModelLibrary) as (keyof typeof ModelLibrary)[]; +export type ModelLibraryKey = keyof typeof ModelLibrary; +export const ALL_MODEL_LIBRARY_KEYS = Object.keys(ModelLibrary) as ModelLibraryKey[]; + +const EXCLUDE_THOSE_LIBRARIES_FROM_DISPLAY: ModelLibraryKey[] = ["doctr", "k2"]; + +export const ALL_DISPLAY_MODEL_LIBRARY_KEYS = ALL_MODEL_LIBRARY_KEYS.filter(k => !EXCLUDE_THOSE_LIBRARIES_FROM_DISPLAY.includes(k)); /** @@ -390,7 +397,7 @@ model = AutoModel.load_from_hf_hub("${model.id}")`; -export const MODEL_LIBRARIES_UI_ELEMENTS: { [key in keyof typeof ModelLibrary]?: LibraryUiElement } = { +export const MODEL_LIBRARIES_UI_ELEMENTS: Partial> = { // ^^ TODO(remove the optional ? marker when Stanza snippet is available) "adapter-transformers": { btnLabel: "Adapter Transformers", diff --git a/js/src/lib/interfaces/Types.ts b/js/src/lib/interfaces/Types.ts index 8f923bdf3..ce88466f7 100644 --- a/js/src/lib/interfaces/Types.ts +++ b/js/src/lib/interfaces/Types.ts @@ -1,3 +1,5 @@ +import type { ModelLibraryKey } from "./Libraries"; + // Warning: order of modalities here determine how they are listed on the /tasks page export const MODALITIES = [ "cv", @@ -758,88 +760,84 @@ export interface TransformersInfo { * This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge. * Ref: https://github.com/huggingface/api-inference-community/pull/158 */ -export const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Record> = { - "adapter_transformers": [ +export const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial> = { + "adapter-transformers": [ "question-answering", "text-classification", - "token-classification" + "token-classification", ], "allennlp": [ - "question-answering" + "question-answering", ], "asteroid": [ - "audio-source-separation", - "audio-to-audio" + // "audio-source-separation", + "audio-to-audio", ], "diffusers": [ - "text-to-image" + "text-to-image", ], "doctr": [ - "object-detection" + "object-detection", ], "espnet": [ "text-to-speech", - "automatic-speech-recognition" + "automatic-speech-recognition", ], "fairseq": [ "text-to-speech", - "audio-to-audio" + "audio-to-audio", ], "fastai": [ - "image-classification" + "image-classification", ], "fasttext": [ "feature-extraction", - "text-classification" + "text-classification", ], "flair": [ - "token-classification" + "token-classification", ], - "k2_sherpa": [ - "automatic-speech-recognition" + "k2": [ + "automatic-speech-recognition", ], "keras": [ - "image-classification" + "image-classification", ], "nemo": [ - "automatic-speech-recognition" + "automatic-speech-recognition", ], "paddlenlp": [ "conversational", - "fill-mask" + "fill-mask", ], - "pyannote_audio": [ - "automatic-speech-recognition" + "pyannote-audio": [ + "automatic-speech-recognition", ], - "sentence_transformers": [ + "sentence-transformers": [ "feature-extraction", - "sentence-similarity" + "sentence-similarity", ], "sklearn": [ "tabular-classification", "tabular-regression", - "text-classification" + "text-classification", ], "spacy": [ "token-classification", "text-classification", - "sentence-similarity" + "sentence-similarity", ], "speechbrain": [ "audio-classification", "audio-to-audio", "automatic-speech-recognition", "text-to-speech", - "text2text-generation" + "text2text-generation", ], "stanza": [ - "token-classification" - ], - "superb": [ - "automatic-speech-recognition", - "speech-segmentation" + "token-classification", ], "timm": [ - "image-classification" - ] -} \ No newline at end of file + "image-classification", + ], +}; diff --git a/tasks/src/Types.ts b/tasks/src/Types.ts index db8ca498b..0c89973b8 100644 --- a/tasks/src/Types.ts +++ b/tasks/src/Types.ts @@ -1,4 +1,4 @@ -import type { ModelLibrary } from "../../js/src/lib/interfaces/Libraries"; +import type { ModelLibraryKey } from "../../js/src/lib/interfaces/Libraries"; import type { PipelineType } from "../../js/src/lib/interfaces/Types"; export interface ExampleRepo { @@ -46,7 +46,7 @@ export interface TaskData { id: PipelineType; isPlaceholder?: boolean; label: string; - libraries: Array; + libraries: ModelLibraryKey[]; metrics: ExampleRepo[]; models: ExampleRepo[]; summary: string; diff --git a/tasks/src/const.ts b/tasks/src/const.ts index 9fc685641..1c0217844 100644 --- a/tasks/src/const.ts +++ b/tasks/src/const.ts @@ -1,10 +1,10 @@ -import type { ModelLibrary } from "../../js/src/lib/interfaces/Libraries"; +import type { ModelLibraryKey } from "../../js/src/lib/interfaces/Libraries"; import type { PipelineType } from "../../js/src/lib/interfaces/Types"; /* * Model libraries compatible with each ML task */ -export const TASKS_MODEL_LIBRARIES: Record> = { +export const TASKS_MODEL_LIBRARIES: Record = { "audio-classification": ["speechbrain", "transformers"], "audio-to-audio": ["asteroid", "speechbrain"], "automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers"],