|
3 | 3 | Run `pytest tests/entrypoints/openai/test_embedding_dimensions.py`. |
4 | 4 | """ |
5 | 5 |
|
| 6 | +from typing import Optional |
| 7 | + |
6 | 8 | import openai |
7 | 9 | import pytest |
8 | 10 |
|
9 | 11 | from vllm.entrypoints.openai.protocol import EmbeddingResponse |
10 | 12 |
|
11 | | -from ...models.embedding.utils import EmbedModelInfo |
| 13 | +from ...conftest import HfRunner |
| 14 | +from ...models.embedding.utils import EmbedModelInfo, correctness_test |
12 | 15 | from ...utils import RemoteOpenAIServer |
13 | 16 |
|
14 | 17 | MODELS = [ |
15 | | - EmbedModelInfo(name="BAAI/bge-m3", is_matryoshka=False), |
16 | | - EmbedModelInfo(name="jinaai/jina-embeddings-v3", is_matryoshka=True), |
| 18 | + EmbedModelInfo("intfloat/multilingual-e5-small", is_matryoshka=False), |
| 19 | + EmbedModelInfo("Snowflake/snowflake-arctic-embed-m-v1.5", |
| 20 | + is_matryoshka=True, |
| 21 | + matryoshka_dimensions=[256]), |
17 | 22 | ] |
18 | 23 |
|
19 | 24 | input_texts = [ |
20 | 25 | "The chef prepared a delicious meal.", |
21 | | -] * 3 |
| 26 | +] |
22 | 27 |
|
23 | 28 |
|
24 | | -@pytest.mark.asyncio |
25 | | -@pytest.mark.parametrize("model", MODELS) |
26 | | -async def test_validating_dimensions(model: EmbedModelInfo): |
| 29 | +@pytest.fixture(scope="module", params=MODELS) |
| 30 | +def model_info(request): |
| 31 | + return request.param |
| 32 | + |
| 33 | + |
| 34 | +@pytest.fixture(scope="module", params=["bfloat16"]) |
| 35 | +def dtype(request): |
| 36 | + return request.param |
| 37 | + |
| 38 | + |
| 39 | +@pytest.fixture(scope="module") |
| 40 | +def server(model_info, dtype: str): |
27 | 41 | args = [ |
28 | 42 | "--task", |
29 | 43 | "embed", |
30 | 44 | # use half precision for speed and memory savings in CI environment |
31 | 45 | "--dtype", |
32 | | - "bfloat16", |
| 46 | + dtype, |
33 | 47 | "--enforce-eager", |
34 | 48 | "--max-model-len", |
35 | | - "512", |
36 | | - "--trust_remote_code" |
| 49 | + "512" |
37 | 50 | ] |
38 | | - with RemoteOpenAIServer(model.name, args) as remote_server: |
39 | | - client = remote_server.get_async_client() |
40 | | - |
41 | | - async def make_request(dimensions): |
42 | | - embedding_response = await client.embeddings.create( |
43 | | - model=model.name, |
44 | | - input=input_texts, |
45 | | - dimensions=dimensions, |
46 | | - encoding_format="float", |
47 | | - ) |
48 | | - embeddings = EmbeddingResponse.model_validate( |
49 | | - embedding_response.model_dump(mode="json")) |
50 | | - |
51 | | - assert embeddings.id is not None |
52 | | - assert len(embeddings.data) == 3 |
53 | | - assert len(embeddings.data[0].embedding) > 0 |
54 | | - assert embeddings.usage.completion_tokens == 0 |
55 | | - assert embeddings.usage.prompt_tokens > 0 |
56 | | - assert embeddings.usage.total_tokens > 0 |
57 | | - |
58 | | - if dimensions is not None: |
59 | | - assert len(embeddings.data[0].embedding) == dimensions |
60 | | - |
61 | | - if model.is_matryoshka: |
62 | | - for dimensions in [None, 16]: |
63 | | - await make_request(dimensions) |
64 | 51 |
|
| 52 | + if model_info.name == "Snowflake/snowflake-arctic-embed-m-v1.5": |
| 53 | + # Manually enable Matryoshka Embeddings |
| 54 | + args.extend([ |
| 55 | + "--trust_remote_code", "--hf_overrides", |
| 56 | + '{"matryoshka_dimensions":[256]}' |
| 57 | + ]) |
| 58 | + |
| 59 | + with RemoteOpenAIServer(model_info.name, args) as remote_server: |
| 60 | + yield remote_server |
| 61 | + |
| 62 | + |
| 63 | +@pytest.fixture(scope="module") |
| 64 | +def hf_model(hf_runner, model_info, dtype: str): |
| 65 | + with hf_runner(model_info.name, dtype=dtype, |
| 66 | + is_sentence_transformer=True) as hf_model: |
| 67 | + yield hf_model |
| 68 | + |
| 69 | + |
| 70 | +@pytest.mark.asyncio |
| 71 | +async def test_matryoshka(model_info: EmbedModelInfo, |
| 72 | + server: RemoteOpenAIServer, hf_model: HfRunner): |
| 73 | + client = server.get_async_client() |
| 74 | + |
| 75 | + async def make_request_and_correctness_test(dimensions): |
| 76 | + prompts = input_texts * 3 |
| 77 | + |
| 78 | + embedding_response = await client.embeddings.create( |
| 79 | + model=model_info.name, |
| 80 | + input=prompts, |
| 81 | + dimensions=dimensions, |
| 82 | + encoding_format="float", |
| 83 | + ) |
| 84 | + embeddings = EmbeddingResponse.model_validate( |
| 85 | + embedding_response.model_dump(mode="json")) |
| 86 | + |
| 87 | + assert embeddings.id is not None |
| 88 | + assert len(embeddings.data) == 3 |
| 89 | + assert len(embeddings.data[0].embedding) > 0 |
| 90 | + assert embeddings.usage.completion_tokens == 0 |
| 91 | + assert embeddings.usage.prompt_tokens > 0 |
| 92 | + assert embeddings.usage.total_tokens > 0 |
| 93 | + |
| 94 | + if dimensions is not None: |
| 95 | + assert len(embeddings.data[0].embedding) == dimensions |
| 96 | + |
| 97 | + vllm_outputs = [d.embedding for d in embeddings.data] |
| 98 | + correctness_test(hf_model, prompts, vllm_outputs, dimensions) |
| 99 | + |
| 100 | + if model_info.is_matryoshka: |
| 101 | + valid_dimensions: list[Optional[int]] = [None] |
| 102 | + if model_info.matryoshka_dimensions is not None: |
| 103 | + valid_dimensions += model_info.matryoshka_dimensions[:2] |
| 104 | + |
| 105 | + for dimensions in valid_dimensions: |
| 106 | + await make_request_and_correctness_test(dimensions) |
| 107 | + |
| 108 | + invalid_dimensions: list[Optional[int]] = [-1] |
| 109 | + if model_info.matryoshka_dimensions is not None: |
| 110 | + assert 5 not in model_info.matryoshka_dimensions |
| 111 | + invalid_dimensions.append(5) |
| 112 | + |
| 113 | + for dimensions in invalid_dimensions: |
65 | 114 | with pytest.raises(openai.BadRequestError): |
66 | | - for dimensions in [-1]: |
67 | | - await make_request(dimensions) |
| 115 | + await make_request_and_correctness_test(dimensions) |
68 | 116 |
|
69 | | - else: |
70 | | - for dimensions in [None]: |
71 | | - await make_request(dimensions) |
| 117 | + else: |
| 118 | + for dimensions in [None]: |
| 119 | + await make_request_and_correctness_test(dimensions) |
72 | 120 |
|
| 121 | + for dimensions in [-1, 16]: |
73 | 122 | with pytest.raises(openai.BadRequestError): |
74 | | - for dimensions in [-1, 16]: |
75 | | - await make_request(dimensions) |
| 123 | + await make_request_and_correctness_test(dimensions) |
0 commit comments