Skip to content
This repository was archived by the owner on Aug 22, 2025. It is now read-only.

Commit a699d8e

Browse files
committed
Refactor
1 parent 1b8b8c7 commit a699d8e

File tree

1 file changed

+45
-35
lines changed

1 file changed

+45
-35
lines changed

src/actions/askNotionPage.ts

Lines changed: 45 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -73,14 +73,12 @@ const actionDefinition: ActionDefinition = {
7373
export default actionDefinition;
7474

7575
export async function handler({ input }: ActionContext): Promise<OutputObject> {
76-
const { notionPageUrl, notionApiKey, question, openaiApiKey, openaiModel } = input;
77-
7876
try {
7977
// Extract the page ID from the provided Notion URL
80-
const notionPageId = extractPageIdFromUrl(notionPageUrl);
78+
const notionPageId = extractPageIdFromUrl(input.notionPageUrl);
8179

8280
// Initialize the Notion client
83-
const notion = new Client({ auth: notionApiKey });
81+
const notion = new Client({ auth: input.notionApiKey });
8482

8583
// Retrieve all blocks of the Notion page
8684
const blocks = await retrieveBlockChildren(notion, notionPageId);
@@ -95,55 +93,67 @@ export async function handler({ input }: ActionContext): Promise<OutputObject> {
9593
);
9694
}
9795

98-
// Initialize OpenAI with the provided API key
99-
const openai = new OpenAI({ apiKey: openaiApiKey });
96+
// Ask OpenAI for an answer
97+
const answer = await askOpenAI(input.openaiApiKey, input.openaiModel, pageContent, input.question);
98+
99+
// Return the model's answer directly
100+
return { textResponse: answer };
101+
} catch (error: any) {
102+
console.error('An error occurred:', (error as Error).message);
103+
throw new Error(`Error occurred: ${(error as Error).message}`);
104+
}
105+
}
106+
107+
async function askOpenAI(
108+
openaiApiKey: string,
109+
openaiModel: string,
110+
pageContent: string,
111+
question: string,
112+
): Promise<string> {
113+
// Initialize OpenAI with the provided API key
114+
const openai = new OpenAI({ apiKey: openaiApiKey });
100115

101-
// Create the system message with instructions for the model
102-
const systemMessage = `You are an FAQ expert. When asked a question or given a request related to a specific topic, you provide an accurate and concise answer based strictly on the content provided.
116+
// Create the system message with instructions for the model
117+
const systemMessage = `You are an FAQ expert. When asked a question or given a request related to a specific topic, you provide an accurate and concise answer based strictly on the content provided.
103118
You respond in the same language as the user’s input and adjust your answer to fit the context of the request, whether it’s a direct question or an indirect inquiry.
104119
You never guess or paraphrase — only answer if the explicit content for that request is available.
105120
If there are any disclaimers or indications in the content that it should not be shared with clients or is a work in progress, include that information only if it is explicitly mentioned.
106121
Here is the content you should use to generate your answer:
107122
${pageContent}
108123
`;
109124

110-
// Set the user's question separately
111-
const userQuestion = `Based on this content, please respond to the following request or question with high confidence:
125+
// Set the user's question separately
126+
const userQuestion = `Based on this content, please respond to the following request or question with high confidence:
112127
${question}”.
113128
If you are not confident that the content fully addresses the request, respond with:
114129
‘I don’t have enough information to answer your question.’
115130
`;
116131

117-
// Request completion from OpenAI using the specified model
118-
const response = await openai.chat.completions.create({
119-
model: openaiModel,
120-
messages: [
121-
{ role: 'system', content: systemMessage },
122-
{ role: 'user', content: userQuestion },
123-
],
124-
});
132+
// Request completion from OpenAI using the specified model
133+
const response = await openai.chat.completions.create({
134+
model: openaiModel,
135+
messages: [
136+
{ role: 'system', content: systemMessage },
137+
{ role: 'user', content: userQuestion },
138+
],
139+
});
125140

126-
// Log and handle the response
127-
if (!response.choices || response.choices.length === 0) {
128-
console.error('Model did not respond with any choices.');
129-
throw new Error('Model did not respond.');
130-
}
141+
// Log and handle the response
142+
if (!response.choices || response.choices.length === 0) {
143+
console.error('Model did not respond with any choices.');
144+
throw new Error('Model did not respond.');
145+
}
131146

132-
const messageContent = response.choices[0].message.content;
147+
const messageContent = response.choices[0].message.content;
133148

134-
if (messageContent === null || messageContent.trim().length === 0) {
135-
console.error("Model's answer length is too short.");
136-
throw new Error("Model's answer is too short.");
137-
}
149+
if (messageContent === null || messageContent.trim().length === 0) {
150+
console.error("Model's answer length is too short.");
151+
throw new Error("Model's answer is too short.");
152+
}
138153

139-
const answer = messageContent.trim();
154+
const answer = messageContent.trim();
140155

141-
// Return the model's answer directly
142-
return { textResponse: answer };
143-
} catch (error: any) {
144-
console.error('An error occurred:', (error as Error).message);
145-
throw new Error(`Error occurred: ${(error as Error).message}`);
146-
}
156+
return answer;
147157
}
148158

149159
/**

0 commit comments

Comments
 (0)