| 
1 | 1 | from datetime import datetime  | 
2 | 2 | 
 
  | 
3 |  | -# ruff: noqa: E501  | 
4 |  | - | 
5 | 3 | summary_prompt = (  | 
6 | 4 |     "Summarize the excerpt below to help answer a question.\n\nExcerpt from"  | 
7 |  | -    " {citation}\n\n----\n\n{text}\n\n----\n\nQuestion: {question}\n\nDo not directly"  | 
 | 5 | +    " {citation}\n\n------------\n\n{text}\n\n------------"  | 
 | 6 | +    "\n\nQuestion: {question}\n\nDo not directly"  | 
8 | 7 |     " answer the question, instead summarize to give evidence to help answer the"  | 
9 | 8 |     " question. Stay detailed; report specific numbers, equations, or direct quotes"  | 
10 | 9 |     ' (marked with quotation marks). Reply "Not applicable" if the excerpt is'  | 
11 | 10 |     " irrelevant. At the end of your response, provide an integer score from 1-10 on a"  | 
12 | 11 |     " newline indicating relevance to question. Do not explain your score.\n\nRelevant"  | 
13 | 12 |     " Information Summary ({summary_length}):"  | 
14 | 13 | )  | 
 | 14 | +# This prompt template integrates with `text` variable of the above `summary_prompt`  | 
 | 15 | +text_with_tables_prompt_template = (  | 
 | 16 | +    "{text}\n\n------------\n\nMarkdown tables from {citation}."  | 
 | 17 | +    " If the markdown is garbled, refer to the images"  | 
 | 18 | +    "\n\n------------\n\n{tables}"  | 
 | 19 | +)  | 
15 | 20 | 
 
  | 
16 | 21 | summary_json_prompt = (  | 
17 |  | -    "Excerpt from {citation}\n\n----\n\n{text}\n\n----\n\nQuestion: {question}\n\n"  | 
 | 22 | +    "Excerpt from {citation}\n\n------------\n\n{text}\n\n------------"  | 
 | 23 | +    "\n\nQuestion: {question}\n\n"  | 
18 | 24 | )  | 
19 | 25 | 
 
  | 
20 | 26 | # The below "cannot answer" sentinel phrase should:  | 
 | 
45 | 51 | 
 
  | 
46 | 52 | qa_prompt = (  | 
47 | 53 |     "Answer the question below with the context.\n\n"  | 
48 |  | -    "Context:\n\n{context}\n\n----\n\n"  | 
 | 54 | +    "Context:\n\n{context}\n\n------------\n\n"  | 
49 | 55 |     "Question: {question}\n\n"  | 
50 | 56 |     "Write an answer based on the context. "  | 
51 | 57 |     "If the context provides insufficient information reply "  | 
 | 
99 | 105 | )  | 
100 | 106 | 
 
  | 
101 | 107 | # NOTE: we use double curly braces here so it's not considered an f-string template  | 
102 |  | -summary_json_system_prompt = """\  | 
103 |  | -Provide a summary of the relevant information that could help answer the question based on the excerpt. Respond with the following JSON format:  | 
104 |  | -
  | 
105 |  | -{{  | 
106 |  | -  "summary": "...",  | 
107 |  | -  "relevance_score": "..."  | 
108 |  | -}}  | 
109 |  | -
  | 
110 |  | -where `summary` is relevant information from the text - {summary_length} words. `relevance_score` is an integer 1-10 for the relevance of `summary` to the question."""  | 
 | 108 | +summary_json_system_prompt = (  | 
 | 109 | +    "Provide a summary of the relevant information"  | 
 | 110 | +    " that could help answer the question based on the excerpt."  | 
 | 111 | +    " Your summary, combined with many others,"  | 
 | 112 | +    " will be given to the model to generate an answer."  | 
 | 113 | +    " Respond with the following JSON format:"  | 
 | 114 | +    '\n\n{{\n  "summary": "...",\n  "relevance_score": "..."\n  "used_images"\n}}'  | 
 | 115 | +    "\n\nwhere `summary` is relevant information from the text - {summary_length} words."  | 
 | 116 | +    " `relevance_score` is an integer 1-10 for the relevance of `summary` to the question."  | 
 | 117 | +    " `used_images` is a boolean flag indicating"  | 
 | 118 | +    " if any images present in a multimodal message were used,"  | 
 | 119 | +    " and if no images were present it should be false."  | 
 | 120 | +)  | 
111 | 121 | 
 
  | 
112 | 122 | env_system_prompt = (  | 
113 | 123 |     # Matching https://github.com/langchain-ai/langchain/blob/langchain%3D%3D0.2.3/libs/langchain/langchain/agents/openai_functions_agent/base.py#L213-L215  | 
 | 
0 commit comments