diff --git a/docs/hub/_toctree.yml b/docs/hub/_toctree.yml index 41d5bbbc0..e4b5e1214 100644 --- a/docs/hub/_toctree.yml +++ b/docs/hub/_toctree.yml @@ -89,6 +89,8 @@ title: TensorBoard - local: timm title: timm + - local: transformers-js + title: Transformers.js - local: models-widgets title: Model Widgets sections: diff --git a/docs/hub/models-libraries.md b/docs/hub/models-libraries.md index ebbeb55eb..478087edb 100644 --- a/docs/hub/models-libraries.md +++ b/docs/hub/models-libraries.md @@ -33,6 +33,7 @@ The table below summarizes the supported libraries and their level of integratio | [Stable-Baselines3](https://github.com/DLR-RM/stable-baselines3) | Set of reliable implementations of deep reinforcement learning algorithms in PyTorch | ❌ | ✅ | ✅ | ✅ | | [TensorFlowTTS](https://github.com/TensorSpeech/TensorFlowTTS) | Real-time state-of-the-art speech synthesis architectures. | ❌ | ❌ | ✅ | ❌ | | [Timm](https://github.com/rwightman/pytorch-image-models) | Collection of image models, scripts, pretrained weights, etc. | ✅ | ✅ | ✅ | ✅ | +| [Transformers.js](https://github.com/xenova/transformers.js) | State-of-the-art Machine Learning for the web. Run 🤗 Transformers directly in your browser, with no need for a server! | ❌ | ❌ | ✅ | ❌ | ### How can I add a new library to the Inference API? diff --git a/docs/hub/transformers-js.md b/docs/hub/transformers-js.md new file mode 100644 index 000000000..d095dbdda --- /dev/null +++ b/docs/hub/transformers-js.md @@ -0,0 +1,74 @@ +# Using `Transformers.js` at Hugging Face + +Transformers.js is a JavaScript library for running 🤗 Transformers directly in your browser, with no need for a server! It is designed to be functionally equivalent to the original [Python library](https://github.com/huggingface/transformers), meaning you can run the same pretrained models using a very similar API. + +## Exploring `transformers.js` in the Hub + +You can find `transformers.js` models by filtering by library in the [models page](https://huggingface.co/models?library=transformers.js). + + + +## Quick tour + + +It's super simple to translate from existing code! Just like the Python library, we support the `pipeline` API. Pipelines group together a pretrained model with preprocessing of inputs and postprocessing of outputs, making it the easiest way to run models with the library. + + + + + + + + + + +
Python (original)Javascript (ours)
+ +```python +from transformers import pipeline + +# Allocate a pipeline for sentiment-analysis +pipe = pipeline('sentiment-analysis') + +out = pipe('I love transformers!') +# [{'label': 'POSITIVE', 'score': 0.999806941}] +``` + + + +```javascript +import { pipeline } from '@xenova/transformers'; + +// Allocate a pipeline for sentiment-analysis +let pipe = await pipeline('sentiment-analysis'); + +let out = await pipe('I love transformers!'); +// [{'label': 'POSITIVE', 'score': 0.999817686}] +``` + +
+ + +You can also use a different model by specifying the model id or path as the second argument to the `pipeline` function. For example: +```javascript +// Use a different model for sentiment-analysis +let pipe = await pipeline('sentiment-analysis', 'nlptown/bert-base-multilingual-uncased-sentiment'); +``` + +Refer to the [documentation](https://huggingface.co/docs/transformers.js) for the full list of supported tasks and models. + +## Installation + +To install via [NPM](https://www.npmjs.com/package/@xenova/transformers), run: +```bash +npm i @xenova/transformers +``` + +For more information, including how to use it in vanilla JS (without any bundler) via a CDN or static hosting, refer to the [README](https://github.com/xenova/transformers.js/blob/main/README.md#installation). + + +## Additional resources + +* Transformers.js [repository](https://github.com/xenova/transformers.js) +* Transformers.js [docs](https://huggingface.co/docs/transformers.js) +* Transformers.js [demo](https://xenova.github.io/transformers.js/) diff --git a/js/src/lib/interfaces/Libraries.ts b/js/src/lib/interfaces/Libraries.ts index 618605a34..78a7f79df 100644 --- a/js/src/lib/interfaces/Libraries.ts +++ b/js/src/lib/interfaces/Libraries.ts @@ -31,6 +31,7 @@ export enum ModelLibrary { "timm" = "Timm", "fastai" = "fastai", "transformers" = "Transformers", + "transformers.js" = "Transformers.js", "stanza" = "Stanza", "fasttext" = "fastText", "stable-baselines3" = "Stable-Baselines3", @@ -395,6 +396,20 @@ const transformers = (model: ModelData) => { } }; +const transformersJS = (model: ModelData) => { + if (!model.pipeline_tag) { + return `// ⚠️ Unknown pipeline tag`; + } + + const libName = '@xenova/transformers'; + + return `// npm i ${libName} +import { pipeline } from '${libName}'; + +// Allocate pipeline +const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');`; +}; + const peftTask = (peftTaskType?: string) => { switch (peftTaskType) { case "CAUSAL_LM": @@ -623,6 +638,12 @@ export const MODEL_LIBRARIES_UI_ELEMENTS: Partial = { "audio-classification": ["speechbrain", "transformers"], "audio-to-audio": ["asteroid", "speechbrain"], - "automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers"], + "automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"], "conversational": ["transformers"], "depth-estimation": ["transformers"], "document-question-answering": ["transformers"], - "feature-extraction": ["sentence-transformers", "transformers"], - "fill-mask": ["transformers"], + "feature-extraction": ["sentence-transformers", "transformers", "transformers.js"], + "fill-mask": ["transformers", "transformers.js"], "graph-ml": ["transformers"], - "image-classification": ["keras", "timm", "transformers"], - "image-segmentation": ["transformers"], + "image-classification": ["keras", "timm", "transformers", "transformers.js"], + "image-segmentation": ["transformers", "transformers.js"], "image-to-image": [], - "image-to-text": [], + "image-to-text": ["transformers.js"], "video-classification": [], "multiple-choice": ["transformers"], - "object-detection": ["transformers"], + "object-detection": ["transformers", "transformers.js"], "other": [], - "question-answering": ["adapter-transformers", "allennlp", "transformers"], + "question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"], "robotics": [], "reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"], - "sentence-similarity": ["sentence-transformers", "spacy"], - "summarization": ["transformers"], + "sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"], + "summarization": ["transformers", "transformers.js"], "table-question-answering": ["transformers"], "table-to-text": ["transformers"], "tabular-classification": ["sklearn"], "tabular-regression": ["sklearn"], "tabular-to-text": ["transformers"], - "text-classification": ["adapter-transformers", "spacy", "transformers"], - "text-generation": ["transformers"], + "text-classification": ["adapter-transformers", "spacy", "transformers", "transformers.js"], + "text-generation": ["transformers", "transformers.js"], "text-retrieval": [], "text-to-image": [], "text-to-speech": ["espnet", "tensorflowtts"], "text-to-video": [], - "text2text-generation": ["transformers"], + "text2text-generation": ["transformers", "transformers.js"], "time-series-forecasting": [], - "token-classification": ["adapter-transformers", "flair", "spacy", "span-marker", "stanza", "transformers"], - "translation": ["transformers"], + "token-classification": ["adapter-transformers", "flair", "spacy", "span-marker", "stanza", "transformers", "transformers.js"], + "translation": ["transformers", "transformers.js"], "unconditional-image-generation": [], "visual-question-answering": [], "voice-activity-detection": [], - "zero-shot-classification": ["transformers"], - "zero-shot-image-classification": [], + "zero-shot-classification": ["transformers", "transformers.js"], + "zero-shot-image-classification": ["transformers.js"], };