Skip to content

Commit

Permalink
Fix typos (#3123)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Feb 15, 2025
1 parent 65b1227 commit 3c712c9
Show file tree
Hide file tree
Showing 12 changed files with 24 additions and 24 deletions.
2 changes: 1 addition & 1 deletion swift/llm/argument/base_args/quant_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class QuantizeArguments:
The compute dtype for bnb 4-bit quantization.
bnb_4bit_quant_type (Literal['fp4', 'nf4']): The quantization type for bnb 4-bit quantization.
bnb_4bit_use_double_quant (bool): Whether to use double quantization for bnb 4-bit quantization.
bnb_4bit_quant_storage (Optional[str]): This sets the storage type to pack the quanitzed 4-bit prarams.
bnb_4bit_quant_storage (Optional[str]): This sets the storage type to pack the quantized 4-bit params.
"""
# awq, gptq, and aqlm need to be pre-quantized models.
# It can be detected automatically, without the need to pass in.
Expand Down
2 changes: 1 addition & 1 deletion swift/llm/dataset/dataset/mllm.py
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,7 @@ def preprocess_row(row: Dict[str, Any]) -> Dict[str, Any]:
'Input some text into a web element like <input> or <textbox>',
'parameter': [{
'element': 'string, the element in the web page to input to',
'content': 'string, what content to input into the textbox elment'
'content': 'string, what content to input into the textbox element'
}]
}
}, {
Expand Down
4 changes: 2 additions & 2 deletions swift/llm/infer/infer_engine/infer_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,9 @@ def infer(
def _prepare_request_data(model: str, infer_request: InferRequest, request_config: RequestConfig) -> Dict[str, Any]:
res = asdict(ChatCompletionRequest(model, **asdict(infer_request), **asdict(request_config)))
# ignore empty
empty_requset = ChatCompletionRequest('', [])
empty_request = ChatCompletionRequest('', [])
for k in list(res.keys()):
if res[k] == getattr(empty_requset, k):
if res[k] == getattr(empty_request, k):
res.pop(k)
return res

Expand Down
2 changes: 1 addition & 1 deletion swift/plugin/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def format_toolbench(tool_names, tool_descs):
After the call, you will get the call result, and you are now in a new state.
Then you will analyze your status now, then decide what to do next...
After many (Thought-call) pairs, you finally perform the task, then you can give your finial answer.
After many (Thought-call) pairs, you finally perform the task, then you can give your final answer.
Remember:
1.the state change is irreversible, you can't go back to one of the former state, if you want to restart the task, \
say \"I give up and restart\".
Expand Down
8 changes: 4 additions & 4 deletions swift/tuners/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,13 +383,13 @@ def _prepare_model(
assert (hasattr(config, SWIFT_TYPE_KEY))
from .mapping import SWIFT_MAPPING

adatper_cls = SWIFT_MAPPING[config.swift_type][1]
if adatper_cls.has_additional_modules() and not getattr(model, 'model_frozen', False):
adapter_cls = SWIFT_MAPPING[config.swift_type][1]
if adapter_cls.has_additional_modules() and not getattr(model, 'model_frozen', False):
for _, p in model.named_parameters():
p.requires_grad = False
model.model_frozen = True
config.has_additional_modules = adatper_cls.has_additional_modules()
return adatper_cls.prepare_model(model, config, adapter_name)
config.has_additional_modules = adapter_cls.has_additional_modules()
return adapter_cls.prepare_model(model, config, adapter_name)

def create_or_update_model_card(self, output_dir: str):
"""
Expand Down
2 changes: 1 addition & 1 deletion swift/ui/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class BaseUI:
'local_dir_alert': {
'value': {
'zh': '无法识别model_type和template,请手动选择',
'en': 'Cannot recognize the model_type and template, please choose manully'
'en': 'Cannot recognize the model_type and template, please choose manually'
}
},
}
Expand Down
14 changes: 7 additions & 7 deletions swift/utils/torchacc_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,16 @@ def _get_closet_bucket(bucket_sizes, data_length):
"""Select the one from bucket_sizes that is closest in distance to
data_length. This is required for TorchAcc.
"""
cloest_length = sys.maxsize
closest_length = sys.maxsize
for b in bucket_sizes:
if b == data_length or ((b < cloest_length) and (b > data_length)):
cloest_length = b
if b == data_length or ((b < closest_length) and (b > data_length)):
closest_length = b

if cloest_length == sys.maxsize:
if closest_length == sys.maxsize:
bucket_sizes.append(data_length)
cloest_length = data_length
closest_length = data_length

return cloest_length
return closest_length


def pad_and_split_batch(padding_to, input_ids, attention_mask, labels, loss_scale, max_length, tokenizer, rank,
Expand Down Expand Up @@ -672,7 +672,7 @@ def qwen2_attn_forward(
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)

# Reashape to the expected shape for Flash Attention
# Reshape to the expected shape for Flash Attention
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
Expand Down
4 changes: 2 additions & 2 deletions tests/deploy/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def test_pt():
_test('pt')


def test_vllm_orgin():
def test_vllm_origin():
import subprocess
import sys
from modelscope import snapshot_download
Expand All @@ -55,7 +55,7 @@ def test_vllm_orgin():


if __name__ == '__main__':
# test_vllm_orgin()
# test_vllm_origin()
# test_vllm()
test_lmdeploy()
# test_pt()
4 changes: 2 additions & 2 deletions tests/deploy/test_logprobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_pt():
_test('pt')


def test_vllm_orgin():
def test_vllm_origin():
import os
import subprocess
import sys
Expand All @@ -107,7 +107,7 @@ def test_vllm_orgin():


if __name__ == '__main__':
# test_vllm_orgin()
# test_vllm_origin()
# test_vllm()
test_vllm_vlm()
# test_lmdeploy()
Expand Down
2 changes: 1 addition & 1 deletion tests/llm/test_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def test_tool_message_join(self):
StdTemplateInputs.messages_join_observation(test_messages, tools_prompt=tool_prompt)

# multi-round tool calling should be joined that only one assistant message left.
assert len(test_messages) == 2, f'Tool prompot {tool_prompt} join failed, {messages}'
assert len(test_messages) == 2, f'Tool prompt {tool_prompt} join failed, {messages}'
assert test_messages[1]['content'] == f"""{obs_word}first_round_result\n{obs_word}second_round_result\n"""


Expand Down
2 changes: 1 addition & 1 deletion tests/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def get_selected_cases():


def run_in_subprocess(args):
# only case args.isolated_cases run in subporcess, all other run in a subprocess
# only case args.isolated_cases run in subprocess, all other run in a subprocess
if not args.no_diff: # run based on git diff
try:
test_suite_files = get_selected_cases()
Expand Down
2 changes: 1 addition & 1 deletion tests/run_config.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# isolate cases in env, we can install different dependencies in each env.
isolated: # test cases that may require excessive anmount of GPU memory or run long time, which will be executed in dedicagted process.
isolated: # test cases that may require excessive amount of GPU memory or run long time, which will be executed in dedicated process.

envs:
default: # default env, case not in other env will in default, pytorch.
Expand Down

0 comments on commit 3c712c9

Please sign in to comment.