Skip to content

Commit

Permalink
Merge pull request #5788 from ConnectAI-E/fix-o1-maxtokens
Browse files Browse the repository at this point in the history
chore: o1模型使用max_completion_tokens
  • Loading branch information
lloydzhou authored Nov 7, 2024
2 parents fbb9385 + d5bda29 commit 108069a
Showing 1 changed file with 6 additions and 0 deletions.
6 changes: 6 additions & 0 deletions app/client/platforms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ export interface RequestPayload {
frequency_penalty: number;
top_p: number;
max_tokens?: number;
max_completion_tokens?: number;
}

export interface DalleRequestPayload {
Expand Down Expand Up @@ -233,6 +234,11 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};

// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1) {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}

// add max_tokens to vision model
if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
Expand Down

0 comments on commit 108069a

Please sign in to comment.