File tree Expand file tree Collapse file tree 2 files changed +0
-12
lines changed Expand file tree Collapse file tree 2 files changed +0
-12
lines changed Original file line number Diff line number Diff line change @@ -217,14 +217,9 @@ def create_py_executor(
217217 tokenizer : Optional [TokenizerBase ] = None ,
218218 lora_config : Optional [LoraConfig ] = None ,
219219 kv_connector_config : Optional [KvCacheConnectorConfig ] = None ,
220- logits_post_processor_config : Optional [LogitsPostProcessorConfig ] = None ,
221- parallel_config : Optional [ParallelConfig ] = None ,
222220) -> PyExecutor :
223221
224222 executor_config = llm_args .get_executor_config (checkpoint_dir , tokenizer )
225- executor_config .logits_post_processor_config = logits_post_processor_config
226- executor_config .parallel_config = parallel_config
227-
228223 garbage_collection_gen0_threshold = llm_args .garbage_collection_gen0_threshold
229224
230225 _mangle_executor_config (executor_config )
Original file line number Diff line number Diff line change @@ -122,13 +122,6 @@ def _create_py_executor():
122122 args ["tokenizer" ] = tokenizer
123123 args ["lora_config" ] = lora_config
124124 args ["kv_connector_config" ] = kv_connector_config
125- args [
126- "logits_post_processor_config" ] = tllm .LogitsPostProcessorConfig (
127- processor_batched = batched_logits_processor ,
128- replicate = False )
129- comm_ranks , device_ids = _get_comm_ranks_device_id ()
130- args ["parallel_config" ] = tllm .ParallelConfig (
131- participant_ids = comm_ranks , device_ids = device_ids )
132125 elif self .llm_args .backend == "_autodeploy" :
133126 from tensorrt_llm ._torch .auto_deploy .llm_args import \
134127 LlmArgs as ADLlmArgs
You can’t perform that action at this time.
0 commit comments