Skip to content

Commit

Permalink
refactor: constants
Browse files Browse the repository at this point in the history
  • Loading branch information
c0sogi committed Jun 2, 2023
1 parent 6f5d5e8 commit ee25304
Show file tree
Hide file tree
Showing 14 changed files with 463 additions and 224 deletions.
Empty file added .vscode/redis-xplorer.redis
Empty file.
15 changes: 15 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,19 @@
"**/.pytest_cache": true,
},
"python.analysis.typeCheckingMode": "basic",
"redisXplorer.config": {
"profiles": [
{
"name": "remote",
"host": "walabi.store",
"accessKey": "faf5209e77b3472f816df1ca025c3e16",
"filter": "*",
"port": "6379"
}
],
"scanLimit": 200
},
"python.linting.flake8Enabled": false,
"python.linting.mypyEnabled": true,
"python.linting.enabled": false,
}
163 changes: 86 additions & 77 deletions app/common/constants.py
Original file line number Diff line number Diff line change
@@ -1,96 +1,104 @@
# flake8: noqa

import enum
from langchain import PromptTemplate


LONG_PROMPT = (
"This year, the elves invested in a gift-wrapping machine. However, it i"
"sn't programmed! An algorithm that aids it in the task must be developed. Many p"
"resents are given to the machine. Each present is a string. Each gift must be wr"
"apped by the machine and set in a display of other wrapped gifts. To wrap a gift"
", you must place the wrapping paper around the string, which is represented by t"
"he * symbol. For instance: const gifts are [“cat,” “game,” and “socks”]. console"
".log const wrapped = wrapping(gifts) (wrapped) / [“ncatn,” “ngamen,” and “nsocks"
"n**”] */ As you can see, the thread is wrapped in the wrapping paper. The corner"
"s are also wrapped in wrapping paper on the top and bottom to prevent any gaps."
)
class QueryTemplates(str, enum.Enum):
CONTEXT_QUESTION__DEFAULT = (
"Context information is below. \n"
"---------------------\n"
"{context}"
"\n---------------------\n"
"answer the question: {question}\n"
)
CONTEXT_QUESTION__CONTEXT_ONLY = (
"Context information is below. \n"
"---------------------\n"
"{context}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {question}\n"
)

QUERY_TMPL1 = (
"Context information is below. \n"
"---------------------\n"
"{context}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {question}\n"
)

QUERY_TMPL2 = (
"Context information is below. \n"
"---------------------\n"
"{context}"
"\n---------------------\n"
"answer the question: {question}\n"
)
class DescriptionTemplates(str, enum.Enum):
USER_AI__DEFAULT = (
"The following is a friendly conversation between a {user} and an {ai}. "
"The {ai} is talkative and provides lots of specific details from its context. "
"If the {ai} does not know the answer to a question, it truthfully says it does not know.\n\n"
"Current conversation:\n\n"
)

DESCRIPTION_TMPL1 = (
"The following is a friendly conversation between a {user} and an {ai}. "
"The {ai} is talkative and provides lots of specific details from its context. "
"If the {ai} does not know the answer to a question, it truthfully says it does not know.\n\n"
"Current conversation:\n\n"
)
USER_AI__SHORT = (
"A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions."
)

DESCRIPTION_TMPL2 = (
"A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions."
)

CHAT_TURN_TMPL1 = "### {role}: {content}\n"
class ChatTurnTemplates(str, enum.Enum):
ROLE_CONTENT_1 = "### {role}: {content}\n"


MARKUP_SUMMARIZE_TEMPLATE = PromptTemplate(
template=(
"Write a concise summary of the following text delimited by triple backquotes. "
"Return your response in bullet points which covers the key points of the text.\n"
"```\n{text}\n```\n\nBULLET POINT SUMMARY:\n"
),
input_variables=["text"],
)
class SummarizationTemplates(enum.Enum):
TEXT__MARKUP = PromptTemplate(
template=(
"Write a concise summary of the following text delimited by triple backquotes. "
"Return your response in bullet points which covers the key points of the text.\n"
"```\n{text}\n```\n\nBULLET POINT SUMMARY:\n"
),
input_variables=["text"],
)

TEXT__CONVERSATIONAL = PromptTemplate(
template=(
"Write a summary of the following conversations delimited by triple backquotes.\n"
"Organize the key points of each message in the order of the conversation in a format like `ROLE: SUMMARY`.\n"
"```\n{text}\n```\n\nCONVERSATION SUMMARY:\n"
),
input_variables=["text"],
)

CONVERSATION_SUMMARIZE_TEMPLATE = PromptTemplate(
template=(
"Write a summary of the following conversations delimited by triple backquotes.\n"
"Organize the key points of each message in the order of the conversation in a format like `ROLE: SUMMARY`.\n"
"```\n{text}\n```\n\nCONVERSATION SUMMARY:\n"
),
input_variables=["text"],
)

CODEX_PROMPT = (
'Act as CODEX ("COding DEsign eXpert"), an expert coder with experience in mult'
"iple coding languages. Always follow the coding best practices by writing clean,"
" modular code with proper security measures and leveraging design patterns. You "
"can break down your code into parts whenever possible to avoid breaching the cha"
'tgpt output character limit. Write code part by part when I send "continue". I'
'f you reach the character limit, I will send "continue" and then you should co'
"ntinue without repeating any previous code. Do not assume anything from your sid"
"e; please ask me a numbered list of essential questions before starting. If you "
"have trouble fixing a bug, ask me for the latest code snippets for reference fro"
"m the official documentation. I am using [MacOS], [VSCode] and prefer [brew] pac"
'kage manager. Start a conversation as "CODEX: Hi, what are we coding today?'
)
class SystemPrompts(str, enum.Enum):
CODEX = (
'Act as CODEX ("COding DEsign eXpert"), an expert coder with experience in mult'
"iple coding languages. Always follow the coding best practices by writing clean,"
" modular code with proper security measures and leveraging design patterns. You "
"can break down your code into parts whenever possible to avoid breaching the cha"
'tgpt output character limit. Write code part by part when I send "continue". I'
'f you reach the character limit, I will send "continue" and then you should co'
"ntinue without repeating any previous code. Do not assume anything from your sid"
"e; please ask me a numbered list of essential questions before starting. If you "
"have trouble fixing a bug, ask me for the latest code snippets for reference fro"
"m the official documentation. I am using [MacOS], [VSCode] and prefer [brew] pac"
'kage manager. Start a conversation as "CODEX: Hi, what are we coding today?'
)

REDEX_PROMPT = (
"compress the following text in a way that fits in a tweet (ideally) and such tha"
"t you (GPT) can reconstruct the intention of the human who wrote text as close a"
"s possible to the original intention. This is for yourself. It does not need to "
"be human readable or understandable. Abuse of language mixing, abbreviations, sy"
"mbols (unicode and emoji), or any other encodings or internal representations is"
" all permissible, as long as it, if pasted in a new inference cycle, will yield "
"near-identical results as the original text: "
REDEX = (
"compress the following text in a way that fits in a tweet (ideally) and such tha"
"t you (GPT) can reconstruct the intention of the human who wrote text as close a"
"s possible to the original intention. This is for yourself. It does not need to "
"be human readable or understandable. Abuse of language mixing, abbreviations, sy"
"mbols (unicode and emoji), or any other encodings or internal representations is"
" all permissible, as long as it, if pasted in a new inference cycle, will yield "
"near-identical results as the original text: "
)


LONG_PROMPT = (
"This year, the elves invested in a gift-wrapping machine. However, it i"
"sn't programmed! An algorithm that aids it in the task must be developed. Many p"
"resents are given to the machine. Each present is a string. Each gift must be wr"
"apped by the machine and set in a display of other wrapped gifts. To wrap a gift"
", you must place the wrapping paper around the string, which is represented by t"
"he * symbol. For instance: const gifts are [“cat,” “game,” and “socks”]. console"
".log const wrapped = wrapping(gifts) (wrapped) / [“ncatn,” “ngamen,” and “nsocks"
"n**”] */ As you can see, the thread is wrapped in the wrapping paper. The corner"
"s are also wrapped in wrapping paper on the top and bottom to prevent any gaps."
)

CONVERSATION_EXAMPLE: list[dict[str, str]] = [
CONVERSATION_EXAMPLES: list[dict[str, str]] = [
{
"role": "user",
"content": (
Expand Down Expand Up @@ -286,8 +294,9 @@

def split_long_text(long_text: str, chars_per_line: int):
split_strings = [
repr(long_text[i : i + chars_per_line]) for i in range(0, len(long_text), chars_per_line)
] # noqa: E203
repr(long_text[i : i + chars_per_line])
for i in range(0, len(long_text), chars_per_line)
]
return "(" + "\n".join(split_strings) + ")"

while True:
Expand Down
24 changes: 18 additions & 6 deletions app/models/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,15 @@ def parse_stringified_json(cls, stred_json: str) -> "UserChatContext":
return cls(
user_chat_profile=UserChatProfile(**stored["user_chat_profile"]),
llm_model=LLMModels._member_map_[stored["llm_model"].replace(".", "_").replace("-", "_")], # type: ignore
user_message_histories=[MessageHistory(**m) for m in stored["user_message_histories"]],
ai_message_histories=[MessageHistory(**m) for m in stored["ai_message_histories"]],
system_message_histories=[MessageHistory(**m) for m in stored["system_message_histories"]],
user_message_histories=[
MessageHistory(**m) for m in stored["user_message_histories"]
],
ai_message_histories=[
MessageHistory(**m) for m in stored["ai_message_histories"]
],
system_message_histories=[
MessageHistory(**m) for m in stored["system_message_histories"]
],
)

def json(self) -> dict:
Expand All @@ -134,7 +140,9 @@ def json(self) -> dict:
"llm_model": self.llm_model.name,
"user_message_histories": [m.__dict__ for m in self.user_message_histories],
"ai_message_histories": [m.__dict__ for m in self.ai_message_histories],
"system_message_histories": [m.__dict__ for m in self.system_message_histories],
"system_message_histories": [
m.__dict__ for m in self.system_message_histories
],
}

def to_stringified_json(self) -> str:
Expand All @@ -152,12 +160,16 @@ def left_tokens(self) -> int:
self.llm_model.value.max_total_tokens
- self.total_tokens
- self.llm_model.value.token_margin
- int(getattr(self.llm_model.value, "description_tokens", 0))
)

@property
def total_tokens(self) -> int:
return self.user_message_tokens + self.ai_message_tokens + self.system_message_tokens
return (
self.user_message_tokens
+ self.ai_message_tokens
+ self.system_message_tokens
+ int(getattr(self.llm_model.value, "description_tokens", 0))
)

@property
def token_per_request(self) -> int:
Expand Down
Loading

0 comments on commit ee25304

Please sign in to comment.