|
| 1 | +# SPDX-License-Identifier: Apache-2.0 |
| 2 | +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project |
| 3 | +import pytest |
| 4 | +import torch |
| 5 | +import torch.nn.functional as F |
| 6 | +from transformers import AutoModel |
| 7 | + |
| 8 | +from vllm.platforms import current_platform |
| 9 | + |
| 10 | +from ....conftest import HfRunner |
| 11 | + |
| 12 | + |
| 13 | +@pytest.fixture(autouse=True) |
| 14 | +def v1(run_with_both_engines): |
| 15 | + # Simple autouse wrapper to run both engines for each test |
| 16 | + # This can be promoted up to conftest.py to run for every |
| 17 | + # test in a package |
| 18 | + pass |
| 19 | + |
| 20 | + |
| 21 | +@pytest.fixture |
| 22 | +def math_step_prompts(): |
| 23 | + # ruff: noqa: E501 |
| 24 | + data = { |
| 25 | + "system": |
| 26 | + "Please reason step by step, and put your final answer within \\boxed{}. ", |
| 27 | + "query": |
| 28 | + "Sue lives in a fun neighborhood. One weekend, the neighbors decided to play a prank on Sue. On Friday morning, the neighbors placed 18 pink plastic flamingos out on Sue's front yard. On Saturday morning, the neighbors took back one third of the flamingos, painted them white, and put these newly painted white flamingos back out on Sue's front yard. Then, on Sunday morning, they added another 18 pink plastic flamingos to the collection. At noon on Sunday, how many more pink plastic flamingos were out than white plastic flamingos?", |
| 29 | + "response": [ |
| 30 | + "To find out how many more pink plastic flamingos were out than white plastic flamingos at noon on Sunday, we can break down the problem into steps. First, on Friday, the neighbors start with 18 pink plastic flamingos.", |
| 31 | + "On Saturday, they take back one third of the flamingos. Since there were 18 flamingos, (1/3 \\times 18 = 6) flamingos are taken back. So, they have (18 - 6 = 12) flamingos left in their possession. Then, they paint these 6 flamingos white and put them back out on Sue's front yard. Now, Sue has the original 12 pink flamingos plus the 6 new white ones. Thus, by the end of Saturday, Sue has (12 + 6 = 18) pink flamingos and 6 white flamingos.", |
| 32 | + "On Sunday, the neighbors add another 18 pink plastic flamingos to Sue's front yard. By the end of Sunday morning, Sue has (18 + 18 = 36) pink flamingos and still 6 white flamingos.", |
| 33 | + "To find the difference, subtract the number of white flamingos from the number of pink flamingos: (36 - 6 = 30). Therefore, at noon on Sunday, there were 30 more pink plastic flamingos out than white plastic flamingos. The answer is (\\boxed{30}).", |
| 34 | + ], |
| 35 | + } |
| 36 | + answer = "<extra_0>".join(data['response']) + "<extra_0>" |
| 37 | + prompt = f"<im_start>system\n{data['system']}<im_end>\n<im_start>user\n{data['query']}<im_end>\n<im_start>assistant\n{answer}<im_end><|endoftext|>" |
| 38 | + return [prompt] |
| 39 | + |
| 40 | + |
| 41 | +def step_reward_patch_hf_model(hf_model: HfRunner): |
| 42 | + |
| 43 | + # Patch the hf_runner to use the step reward function |
| 44 | + def make_step_rewards(logits: torch.Tensor, |
| 45 | + token_masks: torch.Tensor) -> list[list[float]]: |
| 46 | + probabilities = F.softmax(logits, dim=-1) |
| 47 | + probabilities = probabilities * token_masks.unsqueeze(-1) |
| 48 | + |
| 49 | + all_scores_res: list[list[float]] = [] |
| 50 | + for i in range(probabilities.size(0)): |
| 51 | + sample = probabilities[i] # seq_len, num_labels |
| 52 | + positive_probs = sample[sample != 0].view(-1, 2) |
| 53 | + non_zero_elements_list = positive_probs.cpu().tolist() |
| 54 | + all_scores_res.append(non_zero_elements_list) |
| 55 | + return all_scores_res |
| 56 | + |
| 57 | + def reward(prompts: list[str]) -> list[list[float]]: |
| 58 | + input_ids = hf_model.tokenizer(prompts, return_tensors="pt").input_ids |
| 59 | + input_ids = hf_model.wrap_device(input_ids) |
| 60 | + outputs = hf_model.model(input_ids=input_ids) |
| 61 | + |
| 62 | + step_sep_id = hf_model.tokenizer.encode("<extra_0>")[0] |
| 63 | + token_masks = (input_ids == step_sep_id) |
| 64 | + return make_step_rewards(outputs[0], token_masks) |
| 65 | + |
| 66 | + hf_model.reward = reward # type: ignore[attr-defined] |
| 67 | + |
| 68 | + return hf_model |
| 69 | + |
| 70 | + |
| 71 | +@pytest.mark.parametrize( |
| 72 | + "model", |
| 73 | + [ |
| 74 | + pytest.param("Qwen/Qwen2.5-Math-PRM-7B", |
| 75 | + marks=[pytest.mark.core_model, pytest.mark.cpu_model]), |
| 76 | + ], |
| 77 | +) |
| 78 | +@pytest.mark.parametrize("dtype", ["half"]) |
| 79 | +def test_prm_models( |
| 80 | + hf_runner, |
| 81 | + vllm_runner, |
| 82 | + math_step_prompts, |
| 83 | + model: str, |
| 84 | + dtype: str, |
| 85 | + monkeypatch, |
| 86 | +) -> None: |
| 87 | + if current_platform.is_rocm(): |
| 88 | + # ROCm Triton FA does not currently support sliding window attention |
| 89 | + # switch to use ROCm CK FA backend |
| 90 | + monkeypatch.setenv("VLLM_USE_TRITON_FLASH_ATTN", "False") |
| 91 | + |
| 92 | + with vllm_runner(model, max_model_len=1024, dtype=dtype) as vllm_model: |
| 93 | + vllm_outputs = vllm_model.encode(math_step_prompts) |
| 94 | + |
| 95 | + with hf_runner(model, dtype=dtype, auto_cls=AutoModel) as hf_model: |
| 96 | + hf_model = step_reward_patch_hf_model(hf_model) |
| 97 | + hf_outputs = hf_model.reward(math_step_prompts) |
| 98 | + |
| 99 | + # check logits difference |
| 100 | + for hf_output, vllm_output in zip(hf_outputs, vllm_outputs): |
| 101 | + hf_output = torch.tensor(hf_output) |
| 102 | + vllm_output = torch.tensor(vllm_output) |
| 103 | + |
| 104 | + assert torch.allclose(hf_output, vllm_output, 1e-2) |
0 commit comments