You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: configs/base_config.yaml
+4-3
Original file line number
Diff line number
Diff line change
@@ -42,14 +42,14 @@ gpt3: # GPT-3 configuration
42
42
n_votes: 1# Number of tries to use for GPT-3. Use with temperature > 0
43
43
qa_prompt: ./prompts/gpt3/gpt3_qa.txt
44
44
temperature: 0.# Temperature for GPT-3. Almost deterministic if 0
45
-
model: text-davinci-003 #Can replace with code-davinci-002 (which is free for now) but will have worse performance as it's meant for code
45
+
model: text-davinci-003 #See openai.Model.list() for available models
46
46
47
47
codex:
48
48
temperature: 0.# Temperature for Codex. (Almost) deterministic if 0
49
49
best_of: 1# Number of tries to choose from. Use when temperature > 0
50
50
max_tokens: 512# Maximum number of tokens to generate for Codex
51
-
prompt: ./prompts/api.prompt # Codex prompt file, which defines the API. If you use a Chat-based model (3.5/4) try ./prompts/chatapi.prompt (doesn't support video for now due to token limits)
52
-
model: code-davinci-002# Codex model to use. [code-davinci-002, gpt-3.5-turbo, gpt-4]
51
+
prompt: ./prompts/chatapi.prompt # Codex prompt file, which defines the API. (doesn't support video for now due to token limits)
52
+
model: gpt-3.5-turbo# Codex model to use. [gpt-3.5-turbo, gpt-4]. See openai.Model.list()
0 commit comments