Skip to content

Commit 202853f

Browse files
hanouticelinagithub-actions[bot]
authored andcommitted
Update API inference documentation (automated)
1 parent 290314b commit 202853f

File tree

5 files changed

+35
-34
lines changed

5 files changed

+35
-34
lines changed

docs/api-inference/tasks/chat-completion.md

+16-16
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ curl 'https://api-inference.huggingface.co/models/google/gemma-2-2b-it/v1/chat/c
7979
</curl>
8080

8181
<python>
82-
With huggingface_hub client:
82+
Using `huggingface_hub`:
8383
```py
8484
from huggingface_hub import InferenceClient
8585

@@ -103,7 +103,7 @@ for chunk in stream:
103103
print(chunk.choices[0].delta.content, end="")
104104
```
105105

106-
With openai client:
106+
Using `openai`:
107107
```py
108108
from openai import OpenAI
109109

@@ -134,11 +134,11 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
134134
</python>
135135

136136
<js>
137-
With huggingface_hub client:
137+
Using `huggingface.js`:
138138
```js
139-
import { HfInference } from "@huggingface/inference"
139+
import { HfInference } from "@huggingface/inference";
140140

141-
const client = new HfInference("hf_***")
141+
const client = new HfInference("hf_***");
142142

143143
let out = "";
144144

@@ -162,14 +162,14 @@ for await (const chunk of stream) {
162162
}
163163
```
164164

165-
With openai client:
165+
Using `openai`:
166166
```js
167-
import { OpenAI } from "openai"
167+
import { OpenAI } from "openai";
168168

169169
const client = new OpenAI({
170170
baseURL: "https://api-inference.huggingface.co/v1/",
171171
apiKey: "hf_***"
172-
})
172+
});
173173

174174
let out = "";
175175

@@ -237,7 +237,7 @@ curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Visio
237237
</curl>
238238

239239
<python>
240-
With huggingface_hub client:
240+
Using `huggingface_hub`:
241241
```py
242242
from huggingface_hub import InferenceClient
243243

@@ -272,7 +272,7 @@ for chunk in stream:
272272
print(chunk.choices[0].delta.content, end="")
273273
```
274274

275-
With openai client:
275+
Using `openai`:
276276
```py
277277
from openai import OpenAI
278278

@@ -314,11 +314,11 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
314314
</python>
315315

316316
<js>
317-
With huggingface_hub client:
317+
Using `huggingface.js`:
318318
```js
319-
import { HfInference } from "@huggingface/inference"
319+
import { HfInference } from "@huggingface/inference";
320320

321-
const client = new HfInference("hf_***")
321+
const client = new HfInference("hf_***");
322322

323323
let out = "";
324324

@@ -353,14 +353,14 @@ for await (const chunk of stream) {
353353
}
354354
```
355355

356-
With openai client:
356+
Using `openai`:
357357
```js
358-
import { OpenAI } from "openai"
358+
import { OpenAI } from "openai";
359359

360360
const client = new OpenAI({
361361
baseURL: "https://api-inference.huggingface.co/v1/",
362362
apiKey: "hf_***"
363-
})
363+
});
364364

365365
let out = "";
366366

docs/api-inference/tasks/image-text-to-text.md

+2-12
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,8 @@ curl https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision
4545
</curl>
4646

4747
<python>
48-
With huggingface_hub client:
48+
Using `huggingface_hub`:
4949
```py
50-
import requests
51-
52-
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"
53-
headers = {"Authorization": "Bearer hf_***"}
54-
5550
from huggingface_hub import InferenceClient
5651

5752
client = InferenceClient(api_key="hf_***")
@@ -69,13 +64,8 @@ for chunk in stream:
6964
print(chunk.choices[0].delta.content, end="")
7065
```
7166

72-
With openai client:
67+
Using `openai`:
7368
```py
74-
import requests
75-
76-
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"
77-
headers = {"Authorization": "Bearer hf_***"}
78-
7969
from openai import OpenAI
8070

8171
client = OpenAI(

docs/api-inference/tasks/text-to-image.md

+11
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,16 @@ curl https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev \
4545
</curl>
4646

4747
<python>
48+
Using `huggingface_hub`:
49+
```py
50+
from huggingface_hub import InferenceClient
51+
client = InferenceClient("black-forest-labs/FLUX.1-dev", token="hf_***")
52+
53+
# output is a PIL.Image object
54+
image = client.text_to_image("Astronaut riding a horse")
55+
```
56+
57+
Using `requests`:
4858
```py
4959
import requests
5060

@@ -57,6 +67,7 @@ def query(payload):
5767
image_bytes = query({
5868
"inputs": "Astronaut riding a horse",
5969
})
70+
6071
# You can access the image with PIL.Image for example
6172
import io
6273
from PIL import Image

scripts/api-inference/package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"author": "",
1515
"license": "ISC",
1616
"dependencies": {
17-
"@huggingface/tasks": "^0.12.15",
17+
"@huggingface/tasks": "^0.13.3",
1818
"@types/node": "^22.5.0",
1919
"handlebars": "^4.7.8",
2020
"node": "^20.17.0",

scripts/api-inference/pnpm-lock.yaml

+5-5
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)