Skip to content

Commit

Permalink
Merge pull request langchain-ai#3 from benjaminliugang/revert-2-MAX-3…
Browse files Browse the repository at this point in the history
…2464-a

Revert "MAX-32464 Add log to check cost not report issue on zinc related instance"
  • Loading branch information
shinxi authored Jan 23, 2024
2 parents 917e87e + f61065d commit 7e82a10
Showing 1 changed file with 0 additions and 4 deletions.
4 changes: 0 additions & 4 deletions libs/community/langchain_community/callbacks/openai_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,16 +189,12 @@ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
completion_tokens = token_usage.get("completion_tokens", 0)
prompt_tokens = token_usage.get("prompt_tokens", 0)
model_name = standardize_model_name(response.llm_output.get("model_name", ""))
# TODO Remove the following test log
print(f"Report metrics cost issue check: {model_name}, {response.llm_output.get('model_name', '')}, {model_name in MODEL_COST_PER_1K_TOKENS}") # noqa: E501
if model_name in MODEL_COST_PER_1K_TOKENS:
completion_cost = get_openai_token_cost_for_model(
model_name, completion_tokens, is_completion=True
)
prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens)
self.total_cost += prompt_cost + completion_cost
# TODO Remove the following test log
print(f"Report metrics cost issue check: completion_cost {completion_cost}, prompt_cost {prompt_cost}, total_cost {self.total_cost}") # noqa: E501
self.total_tokens += token_usage.get("total_tokens", 0)
self.prompt_tokens += prompt_tokens
self.completion_tokens += completion_tokens
Expand Down

0 comments on commit 7e82a10

Please sign in to comment.